1 | Sun May 30 18:43:46 PDT 2010 Kevan Carstensen <kevan@isnotajoke.com> |
---|
2 | * Code cleanup |
---|
3 | |
---|
4 | - Change 'readv' to 'readvs' in remote_slot_readv in the storage |
---|
5 | server, to more adaquately convey what the argument is. |
---|
6 | |
---|
7 | Mon May 31 15:49:00 PDT 2010 Kevan Carstensen <kevan@isnotajoke.com> |
---|
8 | * Add tests for new MDMF proxies |
---|
9 | |
---|
10 | Mon May 31 16:10:09 PDT 2010 Kevan Carstensen <kevan@isnotajoke.com> |
---|
11 | * Add objects for MDMF shares in support of a new segmented uploader |
---|
12 | |
---|
13 | New patches: |
---|
14 | |
---|
15 | [Code cleanup |
---|
16 | Kevan Carstensen <kevan@isnotajoke.com>**20100531014346 |
---|
17 | Ignore-this: 697378037e83290267f108a4a88b8776 |
---|
18 | |
---|
19 | - Change 'readv' to 'readvs' in remote_slot_readv in the storage |
---|
20 | server, to more adaquately convey what the argument is. |
---|
21 | ] { |
---|
22 | hunk ./src/allmydata/storage/server.py 569 |
---|
23 | self) |
---|
24 | return share |
---|
25 | |
---|
26 | - def remote_slot_readv(self, storage_index, shares, readv): |
---|
27 | + def remote_slot_readv(self, storage_index, shares, readvs): |
---|
28 | start = time.time() |
---|
29 | self.count("readv") |
---|
30 | si_s = si_b2a(storage_index) |
---|
31 | hunk ./src/allmydata/storage/server.py 590 |
---|
32 | if sharenum in shares or not shares: |
---|
33 | filename = os.path.join(bucketdir, sharenum_s) |
---|
34 | msf = MutableShareFile(filename, self) |
---|
35 | - datavs[sharenum] = msf.readv(readv) |
---|
36 | + datavs[sharenum] = msf.readv(readvs) |
---|
37 | log.msg("returning shares %s" % (datavs.keys(),), |
---|
38 | facility="tahoe.storage", level=log.NOISY, parent=lp) |
---|
39 | self.add_latency("readv", time.time() - start) |
---|
40 | } |
---|
41 | [Add tests for new MDMF proxies |
---|
42 | Kevan Carstensen <kevan@isnotajoke.com>**20100531224900 |
---|
43 | Ignore-this: 34c9b2afba71cb63228d48d4d2a72195 |
---|
44 | ] { |
---|
45 | hunk ./src/allmydata/test/test_storage.py 2 |
---|
46 | |
---|
47 | -import time, os.path, stat, re, simplejson, struct |
---|
48 | +import time, os.path, stat, re, simplejson, struct, shutil |
---|
49 | |
---|
50 | from twisted.trial import unittest |
---|
51 | |
---|
52 | hunk ./src/allmydata/test/test_storage.py 22 |
---|
53 | from allmydata.storage.expirer import LeaseCheckingCrawler |
---|
54 | from allmydata.immutable.layout import WriteBucketProxy, WriteBucketProxy_v2, \ |
---|
55 | ReadBucketProxy |
---|
56 | +from allmydata.mutable.layout import MDMFSlotWriteProxy, MDMFSlotReadProxy, \ |
---|
57 | + LayoutInvalid |
---|
58 | from allmydata.interfaces import BadWriteEnablerError |
---|
59 | hunk ./src/allmydata/test/test_storage.py 25 |
---|
60 | -from allmydata.test.common import LoggingServiceParent |
---|
61 | +from allmydata.test.common import LoggingServiceParent, ShouldFailMixin |
---|
62 | from allmydata.test.common_web import WebRenderingMixin |
---|
63 | from allmydata.web.storage import StorageStatus, remove_prefix |
---|
64 | |
---|
65 | hunk ./src/allmydata/test/test_storage.py 1285 |
---|
66 | self.failUnless(os.path.exists(prefixdir)) |
---|
67 | self.failIf(os.path.exists(bucketdir)) |
---|
68 | |
---|
69 | + |
---|
70 | +class MDMFProxies(unittest.TestCase, ShouldFailMixin): |
---|
71 | + def setUp(self): |
---|
72 | + self.sparent = LoggingServiceParent() |
---|
73 | + self._lease_secret = itertools.count() |
---|
74 | + self.ss = self.create("MDMFProxies storage test server") |
---|
75 | + self.rref = RemoteBucket() |
---|
76 | + self.rref.target = self.ss |
---|
77 | + self.secrets = (self.write_enabler("we_secret"), |
---|
78 | + self.renew_secret("renew_secret"), |
---|
79 | + self.cancel_secret("cancel_secret")) |
---|
80 | + self.block = "aa" |
---|
81 | + self.salt = "a" * 16 |
---|
82 | + self.block_hash = "a" * 32 |
---|
83 | + self.block_hash_tree = [self.block_hash for i in xrange(6)] |
---|
84 | + self.share_hash = self.block_hash |
---|
85 | + self.share_hash_chain = dict([(i, self.share_hash) for i in xrange(6)]) |
---|
86 | + self.signature = "foobarbaz" |
---|
87 | + self.verification_key = "vvvvvv" |
---|
88 | + self.encprivkey = "private" |
---|
89 | + self.root_hash = self.block_hash |
---|
90 | + self.salt_hash = self.root_hash |
---|
91 | + self.block_hash_tree_s = self.serialize_blockhashes(self.block_hash_tree) |
---|
92 | + self.share_hash_chain_s = self.serialize_sharehashes(self.share_hash_chain) |
---|
93 | + |
---|
94 | + |
---|
95 | + def tearDown(self): |
---|
96 | + self.sparent.stopService() |
---|
97 | + shutil.rmtree(self.workdir("MDMFProxies storage test server")) |
---|
98 | + |
---|
99 | + |
---|
100 | + def write_enabler(self, we_tag): |
---|
101 | + return hashutil.tagged_hash("we_blah", we_tag) |
---|
102 | + |
---|
103 | + |
---|
104 | + def renew_secret(self, tag): |
---|
105 | + return hashutil.tagged_hash("renew_blah", str(tag)) |
---|
106 | + |
---|
107 | + |
---|
108 | + def cancel_secret(self, tag): |
---|
109 | + return hashutil.tagged_hash("cancel_blah", str(tag)) |
---|
110 | + |
---|
111 | + |
---|
112 | + def workdir(self, name): |
---|
113 | + basedir = os.path.join("storage", "MutableServer", name) |
---|
114 | + return basedir |
---|
115 | + |
---|
116 | + |
---|
117 | + def create(self, name): |
---|
118 | + workdir = self.workdir(name) |
---|
119 | + ss = StorageServer(workdir, "\x00" * 20) |
---|
120 | + ss.setServiceParent(self.sparent) |
---|
121 | + return ss |
---|
122 | + |
---|
123 | + |
---|
124 | + def write_test_share_to_server(self, |
---|
125 | + storage_index, |
---|
126 | + tail_segment=False): |
---|
127 | + """ |
---|
128 | + I write some data for the read tests to read to self.ss |
---|
129 | + |
---|
130 | + If tail_segment=True, then I will write a share that has a |
---|
131 | + smaller tail segment than other segments. |
---|
132 | + """ |
---|
133 | + write = self.ss.remote_slot_testv_and_readv_and_writev |
---|
134 | + # Start with the checkstring |
---|
135 | + data = struct.pack(">BQ32s32s", |
---|
136 | + 1, |
---|
137 | + 0, |
---|
138 | + self.root_hash, |
---|
139 | + self.salt_hash) |
---|
140 | + self.checkstring = data |
---|
141 | + # Next, the encoding parameters |
---|
142 | + if tail_segment: |
---|
143 | + data += struct.pack(">BBQQ", |
---|
144 | + 3, |
---|
145 | + 10, |
---|
146 | + 6, |
---|
147 | + 33) |
---|
148 | + else: |
---|
149 | + data += struct.pack(">BBQQ", |
---|
150 | + 3, |
---|
151 | + 10, |
---|
152 | + 6, |
---|
153 | + 36) |
---|
154 | + # Now we'll build the offsets. |
---|
155 | + # The header -- everything up to the salts -- is 143 bytes long. |
---|
156 | + # The shares come after the salts. |
---|
157 | + salts = self.salt * 6 |
---|
158 | + share_offset = 143 + len(salts) |
---|
159 | + if tail_segment: |
---|
160 | + sharedata = self.block * 6 |
---|
161 | + else: |
---|
162 | + sharedata = self.block * 6 + "a" |
---|
163 | + # The encrypted private key comes after the shares |
---|
164 | + encrypted_private_key_offset = share_offset + len(sharedata) |
---|
165 | + # The blockhashes come after the private key |
---|
166 | + blockhashes_offset = encrypted_private_key_offset + len(self.encprivkey) |
---|
167 | + # The sharehashes come after the blockhashes |
---|
168 | + sharehashes_offset = blockhashes_offset + len(self.block_hash_tree_s) |
---|
169 | + # The signature comes after the share hash chain |
---|
170 | + signature_offset = sharehashes_offset + len(self.share_hash_chain_s) |
---|
171 | + # The verification key comes after the signature |
---|
172 | + verification_offset = signature_offset + len(self.signature) |
---|
173 | + # The EOF comes after the verification key |
---|
174 | + eof_offset = verification_offset + len(self.verification_key) |
---|
175 | + data += struct.pack(">LQQQQQQ", |
---|
176 | + share_offset, |
---|
177 | + encrypted_private_key_offset, |
---|
178 | + blockhashes_offset, |
---|
179 | + sharehashes_offset, |
---|
180 | + signature_offset, |
---|
181 | + verification_offset, |
---|
182 | + eof_offset) |
---|
183 | + # Next, we'll add in the salts, |
---|
184 | + data += salts |
---|
185 | + # the share data, |
---|
186 | + data += sharedata |
---|
187 | + # the private key, |
---|
188 | + data += self.encprivkey |
---|
189 | + # the block hash tree, |
---|
190 | + data += self.block_hash_tree_s |
---|
191 | + # the share hash chain, |
---|
192 | + data += self.share_hash_chain_s |
---|
193 | + # the signature, |
---|
194 | + data += self.signature |
---|
195 | + # and the verification key |
---|
196 | + data += self.verification_key |
---|
197 | + |
---|
198 | + # Finally, we write the whole thing to the storage server in one |
---|
199 | + # pass. |
---|
200 | + testvs = [(0, 1, "eq", "")] |
---|
201 | + tws = {} |
---|
202 | + tws[0] = (testvs, [(0, data)], None) |
---|
203 | + readv = [(0, 1)] |
---|
204 | + results = write(storage_index, self.secrets, tws, readv) |
---|
205 | + self.failUnless(results[0]) |
---|
206 | + |
---|
207 | + |
---|
208 | + def test_read(self): |
---|
209 | + self.write_test_share_to_server("si1") |
---|
210 | + mr = MDMFSlotReadProxy(self.rref, self.secrets, "si1", 0) |
---|
211 | + # Check that every method equals what we expect it to. |
---|
212 | + d = defer.succeed(None) |
---|
213 | + def _check_block_and_salt((block, salt)): |
---|
214 | + self.failUnlessEqual(block, self.block) |
---|
215 | + self.failUnlessEqual(salt, self.salt) |
---|
216 | + |
---|
217 | + for i in xrange(6): |
---|
218 | + d.addCallback(lambda ignored, i=i: |
---|
219 | + mr.get_block_and_salt(i)) |
---|
220 | + d.addCallback(_check_block_and_salt) |
---|
221 | + |
---|
222 | + d.addCallback(lambda ignored: |
---|
223 | + mr.get_encprivkey()) |
---|
224 | + d.addCallback(lambda encprivkey: |
---|
225 | + self.failUnlessEqual(self.encprivkey, encprivkey)) |
---|
226 | + |
---|
227 | + d.addCallback(lambda ignored: |
---|
228 | + mr.get_blockhashes()) |
---|
229 | + d.addCallback(lambda blockhashes: |
---|
230 | + self.failUnlessEqual(self.block_hash_tree, blockhashes)) |
---|
231 | + |
---|
232 | + d.addCallback(lambda ignored: |
---|
233 | + mr.get_sharehashes()) |
---|
234 | + d.addCallback(lambda sharehashes: |
---|
235 | + self.failUnlessEqual(self.share_hash_chain, sharehashes)) |
---|
236 | + |
---|
237 | + d.addCallback(lambda ignored: |
---|
238 | + mr.get_signature()) |
---|
239 | + d.addCallback(lambda signature: |
---|
240 | + self.failUnlessEqual(signature, self.signature)) |
---|
241 | + |
---|
242 | + d.addCallback(lambda ignored: |
---|
243 | + mr.get_verification_key()) |
---|
244 | + d.addCallback(lambda verification_key: |
---|
245 | + self.failUnlessEqual(verification_key, self.verification_key)) |
---|
246 | + |
---|
247 | + d.addCallback(lambda ignored: |
---|
248 | + mr.get_seqnum()) |
---|
249 | + d.addCallback(lambda seqnum: |
---|
250 | + self.failUnlessEqual(seqnum, 0)) |
---|
251 | + |
---|
252 | + d.addCallback(lambda ignored: |
---|
253 | + mr.get_root_hash()) |
---|
254 | + d.addCallback(lambda root_hash: |
---|
255 | + self.failUnlessEqual(self.root_hash, root_hash)) |
---|
256 | + |
---|
257 | + d.addCallback(lambda ignored: |
---|
258 | + mr.get_salt_hash()) |
---|
259 | + d.addCallback(lambda salt_hash: |
---|
260 | + self.failUnlessEqual(self.salt_hash, salt_hash)) |
---|
261 | + |
---|
262 | + d.addCallback(lambda ignored: |
---|
263 | + mr.get_seqnum()) |
---|
264 | + d.addCallback(lambda seqnum: |
---|
265 | + self.failUnlessEqual(0, seqnum)) |
---|
266 | + |
---|
267 | + d.addCallback(lambda ignored: |
---|
268 | + mr.get_encoding_parameters()) |
---|
269 | + def _check_encoding_parameters((k, n, segsize, datalen)): |
---|
270 | + self.failUnlessEqual(k, 3) |
---|
271 | + self.failUnlessEqual(n, 10) |
---|
272 | + self.failUnlessEqual(segsize, 6) |
---|
273 | + self.failUnlessEqual(datalen, 36) |
---|
274 | + d.addCallback(_check_encoding_parameters) |
---|
275 | + |
---|
276 | + d.addCallback(lambda ignored: |
---|
277 | + mr.get_checkstring()) |
---|
278 | + d.addCallback(lambda checkstring: |
---|
279 | + self.failUnlessEqual(checkstring, checkstring)) |
---|
280 | + return d |
---|
281 | + |
---|
282 | + |
---|
283 | + def test_read_with_different_tail_segment_size(self): |
---|
284 | + self.write_test_share_to_server("si1", tail_segment=True) |
---|
285 | + mr = MDMFSlotReadProxy(self.rref, self.secrets, "si1", 0) |
---|
286 | + d = mr.get_block_and_salt(5) |
---|
287 | + def _check_tail_segment(results): |
---|
288 | + block, salt = results |
---|
289 | + self.failUnlessEqual(len(block), 1) |
---|
290 | + self.failUnlessEqual(block, "a") |
---|
291 | + d.addCallback(_check_tail_segment) |
---|
292 | + return d |
---|
293 | + |
---|
294 | + |
---|
295 | + def test_get_block_with_invalid_segnum(self): |
---|
296 | + self.write_test_share_to_server("si1") |
---|
297 | + mr = MDMFSlotReadProxy(self.rref, self.secrets, "si1", 0) |
---|
298 | + d = defer.succeed(None) |
---|
299 | + d.addCallback(lambda ignored: |
---|
300 | + self.shouldFail(LayoutInvalid, "test invalid segnum", |
---|
301 | + None, |
---|
302 | + mr.get_block_and_salt, 7)) |
---|
303 | + return d |
---|
304 | + |
---|
305 | + |
---|
306 | + def test_get_encoding_parameters_first(self): |
---|
307 | + self.write_test_share_to_server("si1") |
---|
308 | + mr = MDMFSlotReadProxy(self.rref, self.secrets, "si1", 0) |
---|
309 | + d = mr.get_encoding_parameters() |
---|
310 | + def _check_encoding_parameters((k, n, segment_size, datalen)): |
---|
311 | + self.failUnlessEqual(k, 3) |
---|
312 | + self.failUnlessEqual(n, 10) |
---|
313 | + self.failUnlessEqual(segment_size, 6) |
---|
314 | + self.failUnlessEqual(datalen, 36) |
---|
315 | + d.addCallback(_check_encoding_parameters) |
---|
316 | + return d |
---|
317 | + |
---|
318 | + |
---|
319 | + def test_get_seqnum_first(self): |
---|
320 | + self.write_test_share_to_server("si1") |
---|
321 | + mr = MDMFSlotReadProxy(self.rref, self.secrets, "si1", 0) |
---|
322 | + d = mr.get_seqnum() |
---|
323 | + d.addCallback(lambda seqnum: |
---|
324 | + self.failUnlessEqual(seqnum, 0)) |
---|
325 | + return d |
---|
326 | + |
---|
327 | + |
---|
328 | + def test_get_root_hash_first(self): |
---|
329 | + self.write_test_share_to_server("si1") |
---|
330 | + mr = MDMFSlotReadProxy(self.rref, self.secrets, "si1", 0) |
---|
331 | + d = mr.get_root_hash() |
---|
332 | + d.addCallback(lambda root_hash: |
---|
333 | + self.failUnlessEqual(root_hash, self.root_hash)) |
---|
334 | + return d |
---|
335 | + |
---|
336 | + |
---|
337 | + def test_get_salt_hash_first(self): |
---|
338 | + self.write_test_share_to_server("si1") |
---|
339 | + mr = MDMFSlotReadProxy(self.rref, self.secrets, "si1", 0) |
---|
340 | + d = mr.get_salt_hash() |
---|
341 | + d.addCallback(lambda salt_hash: |
---|
342 | + self.failUnlessEqual(salt_hash, self.salt_hash)) |
---|
343 | + return d |
---|
344 | + |
---|
345 | + |
---|
346 | + def test_get_checkstring_first(self): |
---|
347 | + self.write_test_share_to_server("si1") |
---|
348 | + mr = MDMFSlotReadProxy(self.rref, self.secrets, "si1", 0) |
---|
349 | + d = mr.get_checkstring() |
---|
350 | + d.addCallback(lambda checkstring: |
---|
351 | + self.failUnlessEqual(checkstring, self.checkstring)) |
---|
352 | + return d |
---|
353 | + |
---|
354 | + |
---|
355 | + def test_write_read_vectors(self): |
---|
356 | + # When writing for us, the storage server will return to us a |
---|
357 | + # read vector, along with its result. If a write fails because |
---|
358 | + # the test vectors failed, this read vector can help us to |
---|
359 | + # diagnose the problem. This test ensures that the read vector |
---|
360 | + # is working appropriately. |
---|
361 | + mw = self._make_new_mw("si1", 0) |
---|
362 | + d = defer.succeed(None) |
---|
363 | + |
---|
364 | + # Write one share. This should return a checkstring of nothing, |
---|
365 | + # since there is no data there. |
---|
366 | + d.addCallback(lambda ignored: |
---|
367 | + mw.put_block(self.block, 0, self.salt)) |
---|
368 | + def _check_first_write(results): |
---|
369 | + result, readvs = results |
---|
370 | + self.failUnless(result) |
---|
371 | + self.failIf(readvs) |
---|
372 | + d.addCallback(_check_first_write) |
---|
373 | + # Now, there should be a different checkstring returned when |
---|
374 | + # we write other shares |
---|
375 | + d.addCallback(lambda ignored: |
---|
376 | + mw.put_block(self.block, 1, self.salt)) |
---|
377 | + def _check_next_write(results): |
---|
378 | + result, readvs = results |
---|
379 | + self.failUnless(result) |
---|
380 | + self.expected_checkstring = mw.get_checkstring() |
---|
381 | + self.failUnlessIn(0, readvs) |
---|
382 | + self.failUnlessEqual(readvs[0][0], self.expected_checkstring) |
---|
383 | + d.addCallback(_check_next_write) |
---|
384 | + # Add the other four shares |
---|
385 | + for i in xrange(2, 6): |
---|
386 | + d.addCallback(lambda ignored, i=i: |
---|
387 | + mw.put_block(self.block, i, self.salt)) |
---|
388 | + d.addCallback(_check_next_write) |
---|
389 | + # Add the encrypted private key |
---|
390 | + d.addCallback(lambda ignored: |
---|
391 | + mw.put_encprivkey(self.encprivkey)) |
---|
392 | + d.addCallback(_check_next_write) |
---|
393 | + # Add the block hash tree and share hash tree |
---|
394 | + d.addCallback(lambda ignored: |
---|
395 | + mw.put_blockhashes(self.block_hash_tree)) |
---|
396 | + d.addCallback(_check_next_write) |
---|
397 | + d.addCallback(lambda ignored: |
---|
398 | + mw.put_sharehashes(self.share_hash_chain)) |
---|
399 | + d.addCallback(_check_next_write) |
---|
400 | + # Add the root hash and the salt hash. This should change the |
---|
401 | + # checkstring, but not in a way that we'll be able to see right |
---|
402 | + # now, since the read vectors are applied before the write |
---|
403 | + # vectors. |
---|
404 | + d.addCallback(lambda ignored: |
---|
405 | + mw.put_root_and_salt_hashes(self.root_hash, self.salt_hash)) |
---|
406 | + def _check_old_testv_after_new_one_is_written(results): |
---|
407 | + result, readvs = results |
---|
408 | + self.failUnless(result) |
---|
409 | + self.failUnlessIn(0, readvs) |
---|
410 | + self.failUnlessEqual(self.expected_checkstring, |
---|
411 | + readvs[0][0]) |
---|
412 | + new_checkstring = mw.get_checkstring() |
---|
413 | + self.failIfEqual(new_checkstring, |
---|
414 | + readvs[0][0]) |
---|
415 | + d.addCallback(_check_old_testv_after_new_one_is_written) |
---|
416 | + # Now add the signature. This should succeed, meaning that the |
---|
417 | + # data gets written and the read vector matches what the writer |
---|
418 | + # thinks should be there. |
---|
419 | + d.addCallback(lambda ignored: |
---|
420 | + mw.put_signature(self.signature)) |
---|
421 | + d.addCallback(_check_next_write) |
---|
422 | + # The checkstring remains the same for the rest of the process. |
---|
423 | + return d |
---|
424 | + |
---|
425 | + |
---|
426 | + def test_blockhashes_after_share_hash_chain(self): |
---|
427 | + mw = self._make_new_mw("si1", 0) |
---|
428 | + d = defer.succeed(None) |
---|
429 | + # Put everything up to and including the share hash chain |
---|
430 | + for i in xrange(6): |
---|
431 | + d.addCallback(lambda ignored, i=i: |
---|
432 | + mw.put_block(self.block, i, self.salt)) |
---|
433 | + d.addCallback(lambda ignored: |
---|
434 | + mw.put_encprivkey(self.encprivkey)) |
---|
435 | + d.addCallback(lambda ignored: |
---|
436 | + mw.put_blockhashes(self.block_hash_tree)) |
---|
437 | + d.addCallback(lambda ignored: |
---|
438 | + mw.put_sharehashes(self.share_hash_chain)) |
---|
439 | + # Now try to put a block hash tree after the share hash chain. |
---|
440 | + # This won't necessarily overwrite the share hash chain, but it |
---|
441 | + # is a bad idea in general -- if we write one that is anything |
---|
442 | + # other than the exact size of the initial one, we will either |
---|
443 | + # overwrite the share hash chain, or give the reader (who uses |
---|
444 | + # the offset of the share hash chain as an end boundary) a |
---|
445 | + # shorter tree than they know to read, which will result in them |
---|
446 | + # reading junk. There is little reason to support it as a use |
---|
447 | + # case, so we should disallow it altogether. |
---|
448 | + d.addCallback(lambda ignored: |
---|
449 | + self.shouldFail(LayoutInvalid, "test same blockhashes", |
---|
450 | + None, |
---|
451 | + mw.put_blockhashes, self.block_hash_tree)) |
---|
452 | + return d |
---|
453 | + |
---|
454 | + |
---|
455 | + def test_encprivkey_after_blockhashes(self): |
---|
456 | + mw = self._make_new_mw("si1", 0) |
---|
457 | + d = defer.succeed(None) |
---|
458 | + # Put everything up to and including the block hash tree |
---|
459 | + for i in xrange(6): |
---|
460 | + d.addCallback(lambda ignored, i=i: |
---|
461 | + mw.put_block(self.block, i, self.salt)) |
---|
462 | + d.addCallback(lambda ignored: |
---|
463 | + mw.put_encprivkey(self.encprivkey)) |
---|
464 | + d.addCallback(lambda ignored: |
---|
465 | + mw.put_blockhashes(self.block_hash_tree)) |
---|
466 | + d.addCallback(lambda ignored: |
---|
467 | + self.shouldFail(LayoutInvalid, "out of order private key", |
---|
468 | + None, |
---|
469 | + mw.put_encprivkey, self.encprivkey)) |
---|
470 | + return d |
---|
471 | + |
---|
472 | + |
---|
473 | + def test_share_hash_chain_after_signature(self): |
---|
474 | + mw = self._make_new_mw("si1", 0) |
---|
475 | + d = defer.succeed(None) |
---|
476 | + # Put everything up to and including the signature |
---|
477 | + for i in xrange(6): |
---|
478 | + d.addCallback(lambda ignored, i=i: |
---|
479 | + mw.put_block(self.block, i, self.salt)) |
---|
480 | + d.addCallback(lambda ignored: |
---|
481 | + mw.put_encprivkey(self.encprivkey)) |
---|
482 | + d.addCallback(lambda ignored: |
---|
483 | + mw.put_blockhashes(self.block_hash_tree)) |
---|
484 | + d.addCallback(lambda ignored: |
---|
485 | + mw.put_sharehashes(self.share_hash_chain)) |
---|
486 | + d.addCallback(lambda ignored: |
---|
487 | + mw.put_root_and_salt_hashes(self.root_hash, self.salt_hash)) |
---|
488 | + d.addCallback(lambda ignored: |
---|
489 | + mw.put_signature(self.signature)) |
---|
490 | + # Now try to put the share hash chain again. This should fail |
---|
491 | + d.addCallback(lambda ignored: |
---|
492 | + self.shouldFail(LayoutInvalid, "out of order share hash chain", |
---|
493 | + None, |
---|
494 | + mw.put_sharehashes, self.share_hash_chain)) |
---|
495 | + return d |
---|
496 | + |
---|
497 | + |
---|
498 | + def test_signature_after_verification_key(self): |
---|
499 | + mw = self._make_new_mw("si1", 0) |
---|
500 | + d = defer.succeed(None) |
---|
501 | + # Put everything up to and including the verification key. |
---|
502 | + for i in xrange(6): |
---|
503 | + d.addCallback(lambda ignored, i=i: |
---|
504 | + mw.put_block(self.block, i, self.salt)) |
---|
505 | + d.addCallback(lambda ignored: |
---|
506 | + mw.put_encprivkey(self.encprivkey)) |
---|
507 | + d.addCallback(lambda ignored: |
---|
508 | + mw.put_blockhashes(self.block_hash_tree)) |
---|
509 | + d.addCallback(lambda ignored: |
---|
510 | + mw.put_sharehashes(self.share_hash_chain)) |
---|
511 | + d.addCallback(lambda ignored: |
---|
512 | + mw.put_root_and_salt_hashes(self.root_hash, self.salt_hash)) |
---|
513 | + d.addCallback(lambda ignored: |
---|
514 | + mw.put_signature(self.signature)) |
---|
515 | + d.addCallback(lambda ignored: |
---|
516 | + mw.put_verification_key(self.verification_key)) |
---|
517 | + # Now try to put the signature again. This should fail |
---|
518 | + d.addCallback(lambda ignored: |
---|
519 | + self.shouldFail(LayoutInvalid, "signature after verification", |
---|
520 | + None, |
---|
521 | + mw.put_signature, self.signature)) |
---|
522 | + return d |
---|
523 | + |
---|
524 | + |
---|
525 | + def test_uncoordinated_write(self): |
---|
526 | + # Make two mutable writers, both pointing to the same storage |
---|
527 | + # server, both at the same storage index, and try writing to the |
---|
528 | + # same share. |
---|
529 | + mw1 = self._make_new_mw("si1", 0) |
---|
530 | + mw2 = self._make_new_mw("si1", 0) |
---|
531 | + d = defer.succeed(None) |
---|
532 | + def _check_success(results): |
---|
533 | + result, readvs = results |
---|
534 | + self.failUnless(result) |
---|
535 | + |
---|
536 | + def _check_failure(results): |
---|
537 | + result, readvs = results |
---|
538 | + self.failIf(result) |
---|
539 | + |
---|
540 | + d.addCallback(lambda ignored: |
---|
541 | + mw1.put_block(self.block, 0, self.salt)) |
---|
542 | + d.addCallback(_check_success) |
---|
543 | + d.addCallback(lambda ignored: |
---|
544 | + mw2.put_block(self.block, 0, self.salt)) |
---|
545 | + d.addCallback(_check_failure) |
---|
546 | + return d |
---|
547 | + |
---|
548 | + |
---|
549 | + def test_invalid_salt_size(self): |
---|
550 | + # Salts need to be 16 bytes in size. Writes that attempt to |
---|
551 | + # write more or less than this should be rejected. |
---|
552 | + mw = self._make_new_mw("si1", 0) |
---|
553 | + invalid_salt = "a" * 17 # 17 bytes |
---|
554 | + another_invalid_salt = "b" * 15 # 15 bytes |
---|
555 | + d = defer.succeed(None) |
---|
556 | + d.addCallback(lambda ignored: |
---|
557 | + self.shouldFail(LayoutInvalid, "salt too big", |
---|
558 | + None, |
---|
559 | + mw.put_block, self.block, 0, invalid_salt)) |
---|
560 | + d.addCallback(lambda ignored: |
---|
561 | + self.shouldFail(LayoutInvalid, "salt too small", |
---|
562 | + None, |
---|
563 | + mw.put_block, self.block, 0, |
---|
564 | + another_invalid_salt)) |
---|
565 | + return d |
---|
566 | + |
---|
567 | + |
---|
568 | + def test_write_test_vectors(self): |
---|
569 | + # If we give the write proxy a bogus test vector at |
---|
570 | + # any point during the process, it should fail to write. |
---|
571 | + mw = self._make_new_mw("si1", 0) |
---|
572 | + mw.set_checkstring("this is a lie") |
---|
573 | + # The initial write should be expecting to find the improbable |
---|
574 | + # checkstring above in place; finding nothing, it should fail. |
---|
575 | + d = defer.succeed(None) |
---|
576 | + d.addCallback(lambda ignored: |
---|
577 | + mw.put_block(self.block, 0, self.salt)) |
---|
578 | + def _check_failure(results): |
---|
579 | + result, readv = results |
---|
580 | + self.failIf(result) |
---|
581 | + d.addCallback(_check_failure) |
---|
582 | + # Now set the checkstring to the empty string, which |
---|
583 | + # indicates that no share is there. |
---|
584 | + d.addCallback(lambda ignored: |
---|
585 | + mw.set_checkstring("")) |
---|
586 | + d.addCallback(lambda ignored: |
---|
587 | + mw.put_block(self.block, 0, self.salt)) |
---|
588 | + def _check_success(results): |
---|
589 | + result, readv = results |
---|
590 | + self.failUnless(result) |
---|
591 | + d.addCallback(_check_success) |
---|
592 | + # Now set the checkstring to something wrong |
---|
593 | + d.addCallback(lambda ignored: |
---|
594 | + mw.set_checkstring("something wrong")) |
---|
595 | + # This should fail to do anything |
---|
596 | + d.addCallback(lambda ignored: |
---|
597 | + mw.put_block(self.block, 1, self.salt)) |
---|
598 | + d.addCallback(_check_failure) |
---|
599 | + # Now set it back to what it should be. |
---|
600 | + d.addCallback(lambda ignored: |
---|
601 | + mw.set_checkstring(mw.get_checkstring())) |
---|
602 | + for i in xrange(1, 6): |
---|
603 | + d.addCallback(lambda ignored, i=i: |
---|
604 | + mw.put_block(self.block, i, self.salt)) |
---|
605 | + d.addCallback(_check_success) |
---|
606 | + d.addCallback(lambda ignored: |
---|
607 | + mw.put_encprivkey(self.encprivkey)) |
---|
608 | + d.addCallback(_check_success) |
---|
609 | + d.addCallback(lambda ignored: |
---|
610 | + mw.put_blockhashes(self.block_hash_tree)) |
---|
611 | + d.addCallback(_check_success) |
---|
612 | + d.addCallback(lambda ignored: |
---|
613 | + mw.put_sharehashes(self.share_hash_chain)) |
---|
614 | + d.addCallback(_check_success) |
---|
615 | + def _keep_old_checkstring(ignored): |
---|
616 | + self.old_checkstring = mw.get_checkstring() |
---|
617 | + mw.set_checkstring("foobarbaz") |
---|
618 | + d.addCallback(_keep_old_checkstring) |
---|
619 | + d.addCallback(lambda ignored: |
---|
620 | + mw.put_root_and_salt_hashes(self.root_hash, self.salt_hash)) |
---|
621 | + d.addCallback(_check_failure) |
---|
622 | + d.addCallback(lambda ignored: |
---|
623 | + self.failUnlessEqual(self.old_checkstring, mw.get_checkstring())) |
---|
624 | + def _restore_old_checkstring(ignored): |
---|
625 | + mw.set_checkstring(self.old_checkstring) |
---|
626 | + d.addCallback(_restore_old_checkstring) |
---|
627 | + d.addCallback(lambda ignored: |
---|
628 | + mw.put_root_and_salt_hashes(self.root_hash, self.salt_hash)) |
---|
629 | + # The checkstring should have been set appropriately for us on |
---|
630 | + # the last write; if we try to change it to something else, |
---|
631 | + # that change should cause the verification key step to fail. |
---|
632 | + d.addCallback(lambda ignored: |
---|
633 | + mw.set_checkstring("something else")) |
---|
634 | + d.addCallback(lambda ignored: |
---|
635 | + mw.put_signature(self.signature)) |
---|
636 | + d.addCallback(_check_failure) |
---|
637 | + d.addCallback(lambda ignored: |
---|
638 | + mw.set_checkstring(mw.get_checkstring())) |
---|
639 | + d.addCallback(lambda ignored: |
---|
640 | + mw.put_signature(self.signature)) |
---|
641 | + d.addCallback(_check_success) |
---|
642 | + d.addCallback(lambda ignored: |
---|
643 | + mw.put_verification_key(self.verification_key)) |
---|
644 | + d.addCallback(_check_success) |
---|
645 | + return d |
---|
646 | + |
---|
647 | + |
---|
648 | + def test_offset_only_set_on_success(self): |
---|
649 | + # The write proxy should be smart enough to detect when a write |
---|
650 | + # has failed, and to temper its definition of progress based on |
---|
651 | + # that. |
---|
652 | + mw = self._make_new_mw("si1", 0) |
---|
653 | + d = defer.succeed(None) |
---|
654 | + for i in xrange(1, 6): |
---|
655 | + d.addCallback(lambda ignored, i=i: |
---|
656 | + mw.put_block(self.block, i, self.salt)) |
---|
657 | + def _break_checkstring(ignored): |
---|
658 | + self._old_checkstring = mw.get_checkstring() |
---|
659 | + mw.set_checkstring("foobarbaz") |
---|
660 | + |
---|
661 | + def _fix_checkstring(ignored): |
---|
662 | + mw.set_checkstring(self._old_checkstring) |
---|
663 | + |
---|
664 | + d.addCallback(_break_checkstring) |
---|
665 | + |
---|
666 | + # Setting the encrypted private key shouldn't work now, which is |
---|
667 | + # to be expected and is tested elsewhere. We also want to make |
---|
668 | + # sure that we can't add the block hash tree after a failed |
---|
669 | + # write of this sort. |
---|
670 | + d.addCallback(lambda ignored: |
---|
671 | + mw.put_encprivkey(self.encprivkey)) |
---|
672 | + d.addCallback(lambda ignored: |
---|
673 | + self.shouldFail(LayoutInvalid, "test out-of-order blockhashes", |
---|
674 | + None, |
---|
675 | + mw.put_blockhashes, self.block_hash_tree)) |
---|
676 | + d.addCallback(_fix_checkstring) |
---|
677 | + d.addCallback(lambda ignored: |
---|
678 | + mw.put_encprivkey(self.encprivkey)) |
---|
679 | + d.addCallback(_break_checkstring) |
---|
680 | + d.addCallback(lambda ignored: |
---|
681 | + mw.put_blockhashes(self.block_hash_tree)) |
---|
682 | + d.addCallback(lambda ignored: |
---|
683 | + self.shouldFail(LayoutInvalid, "test out-of-order sharehashes", |
---|
684 | + None, |
---|
685 | + mw.put_sharehashes, self.share_hash_chain)) |
---|
686 | + d.addCallback(_fix_checkstring) |
---|
687 | + d.addCallback(lambda ignored: |
---|
688 | + mw.put_blockhashes(self.block_hash_tree)) |
---|
689 | + d.addCallback(_break_checkstring) |
---|
690 | + d.addCallback(lambda ignored: |
---|
691 | + mw.put_sharehashes(self.share_hash_chain)) |
---|
692 | + d.addCallback(lambda ignored: |
---|
693 | + self.shouldFail(LayoutInvalid, "out-of-order root hash", |
---|
694 | + None, |
---|
695 | + mw.put_root_and_salt_hashes, |
---|
696 | + self.root_hash, self.salt_hash)) |
---|
697 | + d.addCallback(_fix_checkstring) |
---|
698 | + d.addCallback(lambda ignored: |
---|
699 | + mw.put_sharehashes(self.share_hash_chain)) |
---|
700 | + d.addCallback(_break_checkstring) |
---|
701 | + d.addCallback(lambda ignored: |
---|
702 | + mw.put_root_and_salt_hashes(self.root_hash, self.salt_hash)) |
---|
703 | + d.addCallback(lambda ignored: |
---|
704 | + self.shouldFail(LayoutInvalid, "out-of-order signature", |
---|
705 | + None, |
---|
706 | + mw.put_signature, self.signature)) |
---|
707 | + d.addCallback(_fix_checkstring) |
---|
708 | + d.addCallback(lambda ignored: |
---|
709 | + mw.put_root_and_salt_hashes(self.root_hash, self.salt_hash)) |
---|
710 | + d.addCallback(_break_checkstring) |
---|
711 | + d.addCallback(lambda ignored: |
---|
712 | + mw.put_signature(self.signature)) |
---|
713 | + d.addCallback(lambda ignored: |
---|
714 | + self.shouldFail(LayoutInvalid, "out-of-order verification key", |
---|
715 | + None, |
---|
716 | + mw.put_verification_key, |
---|
717 | + self.verification_key)) |
---|
718 | + d.addCallback(_fix_checkstring) |
---|
719 | + d.addCallback(lambda ignored: |
---|
720 | + mw.put_signature(self.signature)) |
---|
721 | + d.addCallback(_break_checkstring) |
---|
722 | + d.addCallback(lambda ignored: |
---|
723 | + mw.put_verification_key(self.verification_key)) |
---|
724 | + d.addCallback(lambda ignored: |
---|
725 | + self.shouldFail(LayoutInvalid, "out-of-order finish", |
---|
726 | + None, |
---|
727 | + mw.finish_publishing)) |
---|
728 | + return d |
---|
729 | + |
---|
730 | + |
---|
731 | + def serialize_blockhashes(self, blockhashes): |
---|
732 | + return "".join(blockhashes) |
---|
733 | + |
---|
734 | + |
---|
735 | + def serialize_sharehashes(self, sharehashes): |
---|
736 | + return "".join([struct.pack(">H32s", i, sharehashes[i]) |
---|
737 | + for i in sorted(sharehashes.keys())]) |
---|
738 | + |
---|
739 | + |
---|
740 | + def test_write(self): |
---|
741 | + # This translates to a file with 6 6-byte segments, and with 2-byte |
---|
742 | + # blocks. |
---|
743 | + mw = self._make_new_mw("si1", 0) |
---|
744 | + mw2 = self._make_new_mw("si1", 1) |
---|
745 | + # Test writing some blocks. |
---|
746 | + read = self.ss.remote_slot_readv |
---|
747 | + def _check_block_write(i, share): |
---|
748 | + self.failUnlessEqual(read("si1", [share], [(239 + (i * 2), 2)]), |
---|
749 | + {share: [self.block]}) |
---|
750 | + self.failUnlessEqual(read("si1", [share], [(143 + (i * 16), 16)]), |
---|
751 | + {share: [self.salt]}) |
---|
752 | + d = defer.succeed(None) |
---|
753 | + for i in xrange(6): |
---|
754 | + d.addCallback(lambda ignored, i=i: |
---|
755 | + mw.put_block(self.block, i, self.salt)) |
---|
756 | + d.addCallback(lambda ignored, i=i: |
---|
757 | + _check_block_write(i, 0)) |
---|
758 | + # Now try the same thing, but with share 1 instead of share 0. |
---|
759 | + for i in xrange(6): |
---|
760 | + d.addCallback(lambda ignored, i=i: |
---|
761 | + mw2.put_block(self.block, i, self.salt)) |
---|
762 | + d.addCallback(lambda ignored, i=i: |
---|
763 | + _check_block_write(i, 1)) |
---|
764 | + |
---|
765 | + def _spy_on_results(results): |
---|
766 | + print read("si1", [], [(0, 40000000)]) |
---|
767 | + return results |
---|
768 | + |
---|
769 | + # Next, we make a fake encrypted private key, and put it onto the |
---|
770 | + # storage server. |
---|
771 | + d.addCallback(lambda ignored: |
---|
772 | + mw.put_encprivkey(self.encprivkey)) |
---|
773 | + # So far, we have: |
---|
774 | + # header: 143 bytes |
---|
775 | + # salts: 16 * 6 = 96 bytes |
---|
776 | + # blocks: 2 * 6 = 12 bytes |
---|
777 | + # = 251 bytes |
---|
778 | + expected_private_key_offset = 251 |
---|
779 | + self.failUnlessEqual(len(self.encprivkey), 7) |
---|
780 | + d.addCallback(lambda ignored: |
---|
781 | + self.failUnlessEqual(read("si1", [0], [(251, 7)]), |
---|
782 | + {0: [self.encprivkey]})) |
---|
783 | + |
---|
784 | + # Next, we put a fake block hash tree. |
---|
785 | + d.addCallback(lambda ignored: |
---|
786 | + mw.put_blockhashes(self.block_hash_tree)) |
---|
787 | + # The block hash tree got inserted at: |
---|
788 | + # header + salts + blocks: 251 bytes |
---|
789 | + # encrypted private key: 7 bytes |
---|
790 | + # = 258 bytes |
---|
791 | + expected_block_hash_offset = 258 |
---|
792 | + self.failUnlessEqual(len(self.block_hash_tree_s), 32 * 6) |
---|
793 | + d.addCallback(lambda ignored: |
---|
794 | + self.failUnlessEqual(read("si1", [0], [(expected_block_hash_offset, 32 * 6)]), |
---|
795 | + {0: [self.block_hash_tree_s]})) |
---|
796 | + |
---|
797 | + # Next, put a fake share hash chain |
---|
798 | + d.addCallback(lambda ignored: |
---|
799 | + mw.put_sharehashes(self.share_hash_chain)) |
---|
800 | + # The share hash chain got inserted at: |
---|
801 | + # header + salts + blocks + private key = 258 bytes |
---|
802 | + # block hash tree: 32 * 6 = 192 bytes |
---|
803 | + # = 450 bytes |
---|
804 | + expected_share_hash_offset = 450 |
---|
805 | + d.addCallback(lambda ignored: |
---|
806 | + self.failUnlessEqual(read("si1", [0],[(expected_share_hash_offset, (32 + 2) * 6)]), |
---|
807 | + {0: [self.share_hash_chain_s]})) |
---|
808 | + |
---|
809 | + # Next, we put what is supposed to be the root hash of |
---|
810 | + # our share hash tree but isn't, along with the flat hash |
---|
811 | + # of all the salts. |
---|
812 | + d.addCallback(lambda ignored: |
---|
813 | + mw.put_root_and_salt_hashes(self.root_hash, self.salt_hash)) |
---|
814 | + # The root hash gets inserted at byte 9 (its position is in the header, |
---|
815 | + # and is fixed). The salt is right after it. |
---|
816 | + def _check(ignored): |
---|
817 | + self.failUnlessEqual(read("si1", [0], [(9, 32)]), |
---|
818 | + {0: [self.root_hash]}) |
---|
819 | + self.failUnlessEqual(read("si1", [0], [(41, 32)]), |
---|
820 | + {0: [self.salt_hash]}) |
---|
821 | + d.addCallback(_check) |
---|
822 | + |
---|
823 | + # Next, we put a signature of the header block. |
---|
824 | + d.addCallback(lambda ignored: |
---|
825 | + mw.put_signature(self.signature)) |
---|
826 | + # The signature gets written to: |
---|
827 | + # header + salts + blocks + block and share hash tree = 654 |
---|
828 | + expected_signature_offset = 654 |
---|
829 | + self.failUnlessEqual(len(self.signature), 9) |
---|
830 | + d.addCallback(lambda ignored: |
---|
831 | + self.failUnlessEqual(read("si1", [0], [(expected_signature_offset, 9)]), |
---|
832 | + {0: [self.signature]})) |
---|
833 | + |
---|
834 | + # Next, we put the verification key |
---|
835 | + d.addCallback(lambda ignored: |
---|
836 | + mw.put_verification_key(self.verification_key)) |
---|
837 | + # The verification key gets written to: |
---|
838 | + # 654 + 9 = 663 bytes |
---|
839 | + expected_verification_key_offset = 663 |
---|
840 | + self.failUnlessEqual(len(self.verification_key), 6) |
---|
841 | + d.addCallback(lambda ignored: |
---|
842 | + self.failUnlessEqual(read("si1", [0], [(expected_verification_key_offset, 6)]), |
---|
843 | + {0: [self.verification_key]})) |
---|
844 | + |
---|
845 | + def _check_signable(ignored): |
---|
846 | + # Make sure that the signable is what we think it should be. |
---|
847 | + signable = mw.get_signable() |
---|
848 | + verno, seq, roothash, salthash, k, n, segsize, datalen = \ |
---|
849 | + struct.unpack(">BQ32s32sBBQQ", |
---|
850 | + signable) |
---|
851 | + self.failUnlessEqual(verno, 1) |
---|
852 | + self.failUnlessEqual(seq, 0) |
---|
853 | + self.failUnlessEqual(roothash, self.root_hash) |
---|
854 | + self.failUnlessEqual(salthash, self.salt_hash) |
---|
855 | + self.failUnlessEqual(k, 3) |
---|
856 | + self.failUnlessEqual(n, 10) |
---|
857 | + self.failUnlessEqual(segsize, 6) |
---|
858 | + self.failUnlessEqual(datalen, 36) |
---|
859 | + d.addCallback(_check_signable) |
---|
860 | + # Next, we cause the offset table to be published. |
---|
861 | + d.addCallback(lambda ignored: |
---|
862 | + mw.finish_publishing()) |
---|
863 | + expected_eof_offset = 669 |
---|
864 | + |
---|
865 | + # The offset table starts at byte 91. Happily, we have already |
---|
866 | + # worked out most of these offsets above, but we want to make |
---|
867 | + # sure that the representation on disk agrees what what we've |
---|
868 | + # calculated. |
---|
869 | + # |
---|
870 | + # (we don't have an explicit offset for the AES salts, because |
---|
871 | + # we know that they start right after the header) |
---|
872 | + def _check_offsets(ignored): |
---|
873 | + # Check the version number to make sure that it is correct. |
---|
874 | + expected_version_number = struct.pack(">B", 1) |
---|
875 | + self.failUnlessEqual(read("si1", [0], [(0, 1)]), |
---|
876 | + {0: [expected_version_number]}) |
---|
877 | + # Check the sequence number to make sure that it is correct |
---|
878 | + expected_sequence_number = struct.pack(">Q", 0) |
---|
879 | + self.failUnlessEqual(read("si1", [0], [(1, 8)]), |
---|
880 | + {0: [expected_sequence_number]}) |
---|
881 | + # Check that the encoding parameters (k, N, segement size, data |
---|
882 | + # length) are what they should be. These are 3, 10, 6, 36 |
---|
883 | + expected_k = struct.pack(">B", 3) |
---|
884 | + self.failUnlessEqual(read("si1", [0], [(73, 1)]), |
---|
885 | + {0: [expected_k]}) |
---|
886 | + expected_n = struct.pack(">B", 10) |
---|
887 | + self.failUnlessEqual(read("si1", [0], [(74, 1)]), |
---|
888 | + {0: [expected_n]}) |
---|
889 | + expected_segment_size = struct.pack(">Q", 6) |
---|
890 | + self.failUnlessEqual(read("si1", [0], [(75, 8)]), |
---|
891 | + {0: [expected_segment_size]}) |
---|
892 | + expected_data_length = struct.pack(">Q", 36) |
---|
893 | + self.failUnlessEqual(read("si1", [0], [(83, 8)]), |
---|
894 | + {0: [expected_data_length]}) |
---|
895 | + # 91 4 The offset of the share data |
---|
896 | + expected_offset = struct.pack(">L", 239) |
---|
897 | + self.failUnlessEqual(read("si1", [0], [(91, 4)]), |
---|
898 | + {0: [expected_offset]}) |
---|
899 | + # 95 8 The offset of the encrypted private key |
---|
900 | + expected_offset = struct.pack(">Q", expected_private_key_offset) |
---|
901 | + self.failUnlessEqual(read("si1", [0], [(95, 8)]), |
---|
902 | + {0: [expected_offset]}) |
---|
903 | + # 103 8 The offset of the block hash tree |
---|
904 | + expected_offset = struct.pack(">Q", expected_block_hash_offset) |
---|
905 | + self.failUnlessEqual(read("si1", [0], [(103, 8)]), |
---|
906 | + {0: [expected_offset]}) |
---|
907 | + # 111 8 The offset of the share hash chain |
---|
908 | + expected_offset = struct.pack(">Q", expected_share_hash_offset) |
---|
909 | + self.failUnlessEqual(read("si1", [0], [(111, 8)]), |
---|
910 | + {0: [expected_offset]}) |
---|
911 | + # 119 8 The offset of the signature |
---|
912 | + expected_offset = struct.pack(">Q", expected_signature_offset) |
---|
913 | + self.failUnlessEqual(read("si1", [0], [(119, 8)]), |
---|
914 | + {0: [expected_offset]}) |
---|
915 | + # 127 8 The offset of the verification key |
---|
916 | + expected_offset = struct.pack(">Q", expected_verification_key_offset) |
---|
917 | + self.failUnlessEqual(read("si1", [0], [(127, 8)]), |
---|
918 | + {0: [expected_offset]}) |
---|
919 | + # 135 8 offset of the EOF |
---|
920 | + expected_offset = struct.pack(">Q", expected_eof_offset) |
---|
921 | + self.failUnlessEqual(read("si1", [0], [(135, 8)]), |
---|
922 | + {0: [expected_offset]}) |
---|
923 | + # = 143 bytes in total. |
---|
924 | + d.addCallback(_check_offsets) |
---|
925 | + return d |
---|
926 | + |
---|
927 | + def _make_new_mw(self, si, share, datalength=36): |
---|
928 | + # This is a file of size 36 bytes. Since it has a segment |
---|
929 | + # size of 6, we know that it has 6 byte segments, which will |
---|
930 | + # be split into blocks of 2 bytes because our FEC k |
---|
931 | + # parameter is 3. |
---|
932 | + mw = MDMFSlotWriteProxy(share, self.rref, si, self.secrets, 0, 3, 10, |
---|
933 | + 6, datalength) |
---|
934 | + return mw |
---|
935 | + |
---|
936 | + |
---|
937 | + def test_write_rejected_with_too_many_blocks(self): |
---|
938 | + mw = self._make_new_mw("si0", 0) |
---|
939 | + |
---|
940 | + # Try writing too many blocks. We should not be able to write |
---|
941 | + # more than 6 |
---|
942 | + # blocks into each share. |
---|
943 | + d = defer.succeed(None) |
---|
944 | + for i in xrange(6): |
---|
945 | + d.addCallback(lambda ignored, i=i: |
---|
946 | + mw.put_block(self.block, i, self.salt)) |
---|
947 | + d.addCallback(lambda ignored: |
---|
948 | + self.shouldFail(LayoutInvalid, "too many blocks", |
---|
949 | + None, |
---|
950 | + mw.put_block, self.block, 7, self.salt)) |
---|
951 | + return d |
---|
952 | + |
---|
953 | + |
---|
954 | + def test_write_rejected_with_invalid_salt(self): |
---|
955 | + # Try writing an invalid salt. Salts are 16 bytes -- any more or |
---|
956 | + # less should cause an error. |
---|
957 | + mw = self._make_new_mw("si1", 0) |
---|
958 | + bad_salt = "a" * 17 # 17 bytes |
---|
959 | + d = defer.succeed(None) |
---|
960 | + d.addCallback(lambda ignored: |
---|
961 | + self.shouldFail(LayoutInvalid, "test_invalid_salt", |
---|
962 | + None, mw.put_block, self.block, 7, bad_salt)) |
---|
963 | + return d |
---|
964 | + |
---|
965 | + |
---|
966 | + def test_write_rejected_with_invalid_salt_hash(self): |
---|
967 | + # Try writing an invalid salt hash. These should be SHA256d, and |
---|
968 | + # 32 bytes long as a result. |
---|
969 | + mw = self._make_new_mw("si2", 0) |
---|
970 | + invalid_salt_hash = "b" * 31 |
---|
971 | + d = defer.succeed(None) |
---|
972 | + # Before this test can work, we need to put some blocks + salts, |
---|
973 | + # a block hash tree, and a share hash tree. Otherwise, we'll see |
---|
974 | + # failures that match what we are looking for, but are caused by |
---|
975 | + # the constraints imposed on operation ordering. |
---|
976 | + for i in xrange(6): |
---|
977 | + d.addCallback(lambda ignored, i=i: |
---|
978 | + mw.put_block(self.block, i, self.salt)) |
---|
979 | + d.addCallback(lambda ignored: |
---|
980 | + mw.put_encprivkey(self.encprivkey)) |
---|
981 | + d.addCallback(lambda ignored: |
---|
982 | + mw.put_blockhashes(self.block_hash_tree)) |
---|
983 | + d.addCallback(lambda ignored: |
---|
984 | + mw.put_sharehashes(self.share_hash_chain)) |
---|
985 | + d.addCallback(lambda ignored: |
---|
986 | + self.shouldFail(LayoutInvalid, "invalid root hash", |
---|
987 | + None, mw.put_root_and_salt_hashes, |
---|
988 | + self.root_hash, invalid_salt_hash)) |
---|
989 | + return d |
---|
990 | + |
---|
991 | + |
---|
992 | + def test_write_rejected_with_invalid_root_hash(self): |
---|
993 | + # Try writing an invalid root hash. This should be SHA256d, and |
---|
994 | + # 32 bytes long as a result. |
---|
995 | + mw = self._make_new_mw("si2", 0) |
---|
996 | + # 17 bytes != 32 bytes |
---|
997 | + invalid_root_hash = "a" * 17 |
---|
998 | + d = defer.succeed(None) |
---|
999 | + # Before this test can work, we need to put some blocks + salts, |
---|
1000 | + # a block hash tree, and a share hash tree. Otherwise, we'll see |
---|
1001 | + # failures that match what we are looking for, but are caused by |
---|
1002 | + # the constraints imposed on operation ordering. |
---|
1003 | + for i in xrange(6): |
---|
1004 | + d.addCallback(lambda ignored, i=i: |
---|
1005 | + mw.put_block(self.block, i, self.salt)) |
---|
1006 | + d.addCallback(lambda ignored: |
---|
1007 | + mw.put_encprivkey(self.encprivkey)) |
---|
1008 | + d.addCallback(lambda ignored: |
---|
1009 | + mw.put_blockhashes(self.block_hash_tree)) |
---|
1010 | + d.addCallback(lambda ignored: |
---|
1011 | + mw.put_sharehashes(self.share_hash_chain)) |
---|
1012 | + d.addCallback(lambda ignored: |
---|
1013 | + self.shouldFail(LayoutInvalid, "invalid root hash", |
---|
1014 | + None, mw.put_root_and_salt_hashes, |
---|
1015 | + invalid_root_hash, self.salt_hash)) |
---|
1016 | + return d |
---|
1017 | + |
---|
1018 | + |
---|
1019 | + def test_write_rejected_with_invalid_blocksize(self): |
---|
1020 | + # The blocksize implied by the writer that we get from |
---|
1021 | + # _make_new_mw is 2bytes -- any more or any less than this |
---|
1022 | + # should be cause for failure, unless it is the tail segment, in |
---|
1023 | + # which case it may not be failure. |
---|
1024 | + invalid_block = "a" |
---|
1025 | + mw = self._make_new_mw("si3", 0, 33) # implies a tail segment with |
---|
1026 | + # one byte blocks |
---|
1027 | + # 1 bytes != 2 bytes |
---|
1028 | + d = defer.succeed(None) |
---|
1029 | + d.addCallback(lambda ignored, invalid_block=invalid_block: |
---|
1030 | + self.shouldFail(LayoutInvalid, "test blocksize too small", |
---|
1031 | + None, mw.put_block, invalid_block, 0, |
---|
1032 | + self.salt)) |
---|
1033 | + invalid_block = invalid_block * 3 |
---|
1034 | + # 3 bytes != 2 bytes |
---|
1035 | + d.addCallback(lambda ignored: |
---|
1036 | + self.shouldFail(LayoutInvalid, "test blocksize too large", |
---|
1037 | + None, |
---|
1038 | + mw.put_block, invalid_block, 0, self.salt)) |
---|
1039 | + for i in xrange(5): |
---|
1040 | + d.addCallback(lambda ignored, i=i: |
---|
1041 | + mw.put_block(self.block, i, self.salt)) |
---|
1042 | + # Try to put an invalid tail segment |
---|
1043 | + d.addCallback(lambda ignored: |
---|
1044 | + self.shouldFail(LayoutInvalid, "test invalid tail segment", |
---|
1045 | + None, |
---|
1046 | + mw.put_block, self.block, 5, self.salt)) |
---|
1047 | + valid_block = "a" |
---|
1048 | + d.addCallback(lambda ignored: |
---|
1049 | + mw.put_block(valid_block, 5, self.salt)) |
---|
1050 | + return d |
---|
1051 | + |
---|
1052 | + |
---|
1053 | + def test_write_enforces_order_constraints(self): |
---|
1054 | + # We require that the MDMFSlotWriteProxy be interacted with in a |
---|
1055 | + # specific way. |
---|
1056 | + # That way is: |
---|
1057 | + # 0: __init__ |
---|
1058 | + # 1: write blocks and salts |
---|
1059 | + # 2: Write the encrypted private key |
---|
1060 | + # 3: Write the block hashes |
---|
1061 | + # 4: Write the share hashes |
---|
1062 | + # 5: Write the root hash and salt hash |
---|
1063 | + # 6: Write the signature and verification key |
---|
1064 | + # 7: Write the file. |
---|
1065 | + # |
---|
1066 | + # Some of these can be performed out-of-order, and some can't. |
---|
1067 | + # The dependencies that I want to test here are: |
---|
1068 | + # - Private key before block hashes |
---|
1069 | + # - share hashes and block hashes before root hash |
---|
1070 | + # - root hash before signature |
---|
1071 | + # - signature before verification key |
---|
1072 | + mw0 = self._make_new_mw("si0", 0) |
---|
1073 | + # Write some shares |
---|
1074 | + d = defer.succeed(None) |
---|
1075 | + for i in xrange(6): |
---|
1076 | + d.addCallback(lambda ignored, i=i: |
---|
1077 | + mw0.put_block(self.block, i, self.salt)) |
---|
1078 | + # Try to write the block hashes before writing the encrypted |
---|
1079 | + # private key |
---|
1080 | + d.addCallback(lambda ignored: |
---|
1081 | + self.shouldFail(LayoutInvalid, "block hashes before key", |
---|
1082 | + None, mw0.put_blockhashes, |
---|
1083 | + self.block_hash_tree)) |
---|
1084 | + |
---|
1085 | + # Write the private key. |
---|
1086 | + d.addCallback(lambda ignored: |
---|
1087 | + mw0.put_encprivkey(self.encprivkey)) |
---|
1088 | + |
---|
1089 | + |
---|
1090 | + # Try to write the share hash chain without writing the block |
---|
1091 | + # hash tree |
---|
1092 | + d.addCallback(lambda ignored: |
---|
1093 | + self.shouldFail(LayoutInvalid, "share hash chain before " |
---|
1094 | + "block hash tree", |
---|
1095 | + None, |
---|
1096 | + mw0.put_sharehashes, self.share_hash_chain)) |
---|
1097 | + |
---|
1098 | + # Try to write the root hash and salt hash without writing either the |
---|
1099 | + # block hashes or the share hashes |
---|
1100 | + d.addCallback(lambda ignored: |
---|
1101 | + self.shouldFail(LayoutInvalid, "root hash before share hashes", |
---|
1102 | + None, |
---|
1103 | + mw0.put_root_and_salt_hashes, |
---|
1104 | + self.root_hash, self.salt_hash)) |
---|
1105 | + |
---|
1106 | + # Now write the block hashes and try again |
---|
1107 | + d.addCallback(lambda ignored: |
---|
1108 | + mw0.put_blockhashes(self.block_hash_tree)) |
---|
1109 | + d.addCallback(lambda ignored: |
---|
1110 | + self.shouldFail(LayoutInvalid, "root hash before share hashes", |
---|
1111 | + None, mw0.put_root_and_salt_hashes, |
---|
1112 | + self.root_hash, self.salt_hash)) |
---|
1113 | + |
---|
1114 | + # We haven't yet put the root hash on the share, so we shouldn't |
---|
1115 | + # be able to sign it. |
---|
1116 | + d.addCallback(lambda ignored: |
---|
1117 | + self.shouldFail(LayoutInvalid, "signature before root hash", |
---|
1118 | + None, mw0.put_signature, self.signature)) |
---|
1119 | + |
---|
1120 | + d.addCallback(lambda ignored: |
---|
1121 | + self.failUnlessRaises(LayoutInvalid, mw0.get_signable)) |
---|
1122 | + |
---|
1123 | + # ..and, since that fails, we also shouldn't be able to put the |
---|
1124 | + # verification key. |
---|
1125 | + d.addCallback(lambda ignored: |
---|
1126 | + self.shouldFail(LayoutInvalid, "key before signature", |
---|
1127 | + None, mw0.put_verification_key, |
---|
1128 | + self.verification_key)) |
---|
1129 | + |
---|
1130 | + # Now write the share hashes and verify that it works. |
---|
1131 | + d.addCallback(lambda ignored: |
---|
1132 | + mw0.put_sharehashes(self.share_hash_chain)) |
---|
1133 | + |
---|
1134 | + # We should still be unable to sign the header |
---|
1135 | + d.addCallback(lambda ignored: |
---|
1136 | + self.shouldFail(LayoutInvalid, "signature before hashes", |
---|
1137 | + None, |
---|
1138 | + mw0.put_signature, self.signature)) |
---|
1139 | + |
---|
1140 | + # We should be able to write the root hash now too |
---|
1141 | + d.addCallback(lambda ignored: |
---|
1142 | + mw0.put_root_and_salt_hashes(self.root_hash, self.salt_hash)) |
---|
1143 | + |
---|
1144 | + # We should still be unable to put the verification key |
---|
1145 | + d.addCallback(lambda ignored: |
---|
1146 | + self.shouldFail(LayoutInvalid, "key before signature", |
---|
1147 | + None, mw0.put_verification_key, |
---|
1148 | + self.verification_key)) |
---|
1149 | + |
---|
1150 | + d.addCallback(lambda ignored: |
---|
1151 | + mw0.put_signature(self.signature)) |
---|
1152 | + |
---|
1153 | + # We shouldn't be able to write the offsets to the remote server |
---|
1154 | + # until the offset table is finished; IOW, until we have written |
---|
1155 | + # the verification key. |
---|
1156 | + d.addCallback(lambda ignored: |
---|
1157 | + self.shouldFail(LayoutInvalid, "offsets before verification key", |
---|
1158 | + None, |
---|
1159 | + mw0.finish_publishing)) |
---|
1160 | + |
---|
1161 | + d.addCallback(lambda ignored: |
---|
1162 | + mw0.put_verification_key(self.verification_key)) |
---|
1163 | + return d |
---|
1164 | + |
---|
1165 | + |
---|
1166 | + def test_end_to_end(self): |
---|
1167 | + mw = self._make_new_mw("si1", 0) |
---|
1168 | + # Write a share using the mutable writer, and make sure that the |
---|
1169 | + # reader knows how to read everything back to us. |
---|
1170 | + d = defer.succeed(None) |
---|
1171 | + for i in xrange(6): |
---|
1172 | + d.addCallback(lambda ignored, i=i: |
---|
1173 | + mw.put_block(self.block, i, self.salt)) |
---|
1174 | + d.addCallback(lambda ignored: |
---|
1175 | + mw.put_encprivkey(self.encprivkey)) |
---|
1176 | + d.addCallback(lambda ignored: |
---|
1177 | + mw.put_blockhashes(self.block_hash_tree)) |
---|
1178 | + d.addCallback(lambda ignored: |
---|
1179 | + mw.put_sharehashes(self.share_hash_chain)) |
---|
1180 | + d.addCallback(lambda ignored: |
---|
1181 | + mw.put_root_and_salt_hashes(self.root_hash, self.salt_hash)) |
---|
1182 | + d.addCallback(lambda ignored: |
---|
1183 | + mw.put_signature(self.signature)) |
---|
1184 | + d.addCallback(lambda ignored: |
---|
1185 | + mw.put_verification_key(self.verification_key)) |
---|
1186 | + d.addCallback(lambda ignored: |
---|
1187 | + mw.finish_publishing()) |
---|
1188 | + |
---|
1189 | + mr = MDMFSlotReadProxy(self.rref, self.secrets, "si1", 0) |
---|
1190 | + def _check_block_and_salt((block, salt)): |
---|
1191 | + self.failUnlessEqual(block, self.block) |
---|
1192 | + self.failUnlessEqual(salt, self.salt) |
---|
1193 | + |
---|
1194 | + for i in xrange(6): |
---|
1195 | + d.addCallback(lambda ignored, i=i: |
---|
1196 | + mr.get_block_and_salt(i)) |
---|
1197 | + d.addCallback(_check_block_and_salt) |
---|
1198 | + |
---|
1199 | + d.addCallback(lambda ignored: |
---|
1200 | + mr.get_encprivkey()) |
---|
1201 | + d.addCallback(lambda encprivkey: |
---|
1202 | + self.failUnlessEqual(self.encprivkey, encprivkey)) |
---|
1203 | + |
---|
1204 | + d.addCallback(lambda ignored: |
---|
1205 | + mr.get_blockhashes()) |
---|
1206 | + d.addCallback(lambda blockhashes: |
---|
1207 | + self.failUnlessEqual(self.block_hash_tree, blockhashes)) |
---|
1208 | + |
---|
1209 | + d.addCallback(lambda ignored: |
---|
1210 | + mr.get_sharehashes()) |
---|
1211 | + d.addCallback(lambda sharehashes: |
---|
1212 | + self.failUnlessEqual(self.share_hash_chain, sharehashes)) |
---|
1213 | + |
---|
1214 | + d.addCallback(lambda ignored: |
---|
1215 | + mr.get_signature()) |
---|
1216 | + d.addCallback(lambda signature: |
---|
1217 | + self.failUnlessEqual(signature, self.signature)) |
---|
1218 | + |
---|
1219 | + d.addCallback(lambda ignored: |
---|
1220 | + mr.get_verification_key()) |
---|
1221 | + d.addCallback(lambda verification_key: |
---|
1222 | + self.failUnlessEqual(verification_key, self.verification_key)) |
---|
1223 | + |
---|
1224 | + d.addCallback(lambda ignored: |
---|
1225 | + mr.get_seqnum()) |
---|
1226 | + d.addCallback(lambda seqnum: |
---|
1227 | + self.failUnlessEqual(seqnum, 0)) |
---|
1228 | + |
---|
1229 | + d.addCallback(lambda ignored: |
---|
1230 | + mr.get_root_hash()) |
---|
1231 | + d.addCallback(lambda root_hash: |
---|
1232 | + self.failUnlessEqual(self.root_hash, root_hash)) |
---|
1233 | + |
---|
1234 | + d.addCallback(lambda ignored: |
---|
1235 | + mr.get_salt_hash()) |
---|
1236 | + d.addCallback(lambda salt_hash: |
---|
1237 | + self.failUnlessEqual(self.salt_hash, salt_hash)) |
---|
1238 | + |
---|
1239 | + d.addCallback(lambda ignored: |
---|
1240 | + mr.get_encoding_parameters()) |
---|
1241 | + def _check_encoding_parameters((k, n, segsize, datalen)): |
---|
1242 | + self.failUnlessEqual(k, 3) |
---|
1243 | + self.failUnlessEqual(n, 10) |
---|
1244 | + self.failUnlessEqual(segsize, 6) |
---|
1245 | + self.failUnlessEqual(datalen, 36) |
---|
1246 | + d.addCallback(_check_encoding_parameters) |
---|
1247 | + |
---|
1248 | + d.addCallback(lambda ignored: |
---|
1249 | + mr.get_checkstring()) |
---|
1250 | + d.addCallback(lambda checkstring: |
---|
1251 | + self.failUnlessEqual(checkstring, mw.get_checkstring())) |
---|
1252 | + return d |
---|
1253 | + |
---|
1254 | + |
---|
1255 | class Stats(unittest.TestCase): |
---|
1256 | |
---|
1257 | def setUp(self): |
---|
1258 | } |
---|
1259 | [Add objects for MDMF shares in support of a new segmented uploader |
---|
1260 | Kevan Carstensen <kevan@isnotajoke.com>**20100531231009 |
---|
1261 | Ignore-this: 7e811139c2cd20df744f0ed0c3cb4ce8 |
---|
1262 | ] { |
---|
1263 | hunk ./src/allmydata/interfaces.py 7 |
---|
1264 | ChoiceOf, IntegerConstraint, Any, RemoteInterface, Referenceable |
---|
1265 | |
---|
1266 | HASH_SIZE=32 |
---|
1267 | +SALT_SIZE=16 |
---|
1268 | |
---|
1269 | Hash = StringConstraint(maxLength=HASH_SIZE, |
---|
1270 | minLength=HASH_SIZE)# binary format 32-byte SHA256 hash |
---|
1271 | hunk ./src/allmydata/mutable/layout.py 4 |
---|
1272 | |
---|
1273 | import struct |
---|
1274 | from common import NeedMoreDataError, UnknownVersionError |
---|
1275 | +from twisted.python import failure |
---|
1276 | +from allmydata.interfaces import HASH_SIZE, SALT_SIZE |
---|
1277 | +from allmydata.util import mathutil |
---|
1278 | +from twisted.internet import defer |
---|
1279 | + |
---|
1280 | +# These strings describe the format of the packed structs they help process |
---|
1281 | +# Here's what they mean: |
---|
1282 | +# |
---|
1283 | +# PREFIX: |
---|
1284 | +# >: Big-endian byte order; the most significant byte is first (leftmost). |
---|
1285 | +# B: The version information; an 8 bit version identifier. Stored as |
---|
1286 | +# an unsigned char. This is currently 00 00 00 00; our modifications |
---|
1287 | +# will turn it into 00 00 00 01. |
---|
1288 | +# Q: The sequence number; this is sort of like a revision history for |
---|
1289 | +# mutable files; they start at 1 and increase as they are changed after |
---|
1290 | +# being uploaded. Stored as an unsigned long long, which is 8 bytes in |
---|
1291 | +# length. |
---|
1292 | +# 32s: The root hash of the share hash tree. We use sha-256d, so we use 32 |
---|
1293 | +# characters = 32 bytes to store the value. |
---|
1294 | +# 16s: The salt for the readkey. This is a 16-byte random value, stored as |
---|
1295 | +# 16 characters. |
---|
1296 | +# |
---|
1297 | +# SIGNED_PREFIX additions, things that are covered by the signature: |
---|
1298 | +# B: The "k" encoding parameter. We store this as an 8-bit character, |
---|
1299 | +# which is convenient because our erasure coding scheme cannot |
---|
1300 | +# encode if you ask for more than 255 pieces. |
---|
1301 | +# B: The "N" encoding parameter. Stored as an 8-bit character for the |
---|
1302 | +# same reasons as above. |
---|
1303 | +# Q: The segment size of the uploaded file. This will essentially be the |
---|
1304 | +# length of the file in SDMF. An unsigned long long, so we can store |
---|
1305 | +# files of quite large size. |
---|
1306 | +# Q: The data length of the uploaded file. Modulo padding, this will be |
---|
1307 | +# the same of the data length field. Like the data length field, it is |
---|
1308 | +# an unsigned long long and can be quite large. |
---|
1309 | +# |
---|
1310 | +# HEADER additions: |
---|
1311 | +# L: The offset of the signature of this. An unsigned long. |
---|
1312 | +# L: The offset of the share hash chain. An unsigned long. |
---|
1313 | +# L: The offset of the block hash tree. An unsigned long. |
---|
1314 | +# L: The offset of the share data. An unsigned long. |
---|
1315 | +# Q: The offset of the encrypted private key. An unsigned long long, to |
---|
1316 | +# account for the possibility of a lot of share data. |
---|
1317 | +# Q: The offset of the EOF. An unsigned long long, to account for the |
---|
1318 | +# possibility of a lot of share data. |
---|
1319 | +# |
---|
1320 | +# After all of these, we have the following: |
---|
1321 | +# - The verification key: Occupies the space between the end of the header |
---|
1322 | +# and the start of the signature (i.e.: data[HEADER_LENGTH:o['signature']]. |
---|
1323 | +# - The signature, which goes from the signature offset to the share hash |
---|
1324 | +# chain offset. |
---|
1325 | +# - The share hash chain, which goes from the share hash chain offset to |
---|
1326 | +# the block hash tree offset. |
---|
1327 | +# - The share data, which goes from the share data offset to the encrypted |
---|
1328 | +# private key offset. |
---|
1329 | +# - The encrypted private key offset, which goes until the end of the file. |
---|
1330 | +# |
---|
1331 | +# The block hash tree in this encoding has only one share, so the offset of |
---|
1332 | +# the share data will be 32 bits more than the offset of the block hash tree. |
---|
1333 | +# Given this, we may need to check to see how many bytes a reasonably sized |
---|
1334 | +# block hash tree will take up. |
---|
1335 | |
---|
1336 | PREFIX = ">BQ32s16s" # each version has a different prefix |
---|
1337 | SIGNED_PREFIX = ">BQ32s16s BBQQ" # this is covered by the signature |
---|
1338 | hunk ./src/allmydata/mutable/layout.py 264 |
---|
1339 | encprivkey]) |
---|
1340 | return final_share |
---|
1341 | |
---|
1342 | +def pack_prefix(seqnum, root_hash, IV, |
---|
1343 | + required_shares, total_shares, |
---|
1344 | + segment_size, data_length): |
---|
1345 | + prefix = struct.pack(SIGNED_PREFIX, |
---|
1346 | + 0, # version, |
---|
1347 | + seqnum, |
---|
1348 | + root_hash, |
---|
1349 | + IV, |
---|
1350 | + required_shares, |
---|
1351 | + total_shares, |
---|
1352 | + segment_size, |
---|
1353 | + data_length, |
---|
1354 | + ) |
---|
1355 | + return prefix |
---|
1356 | + |
---|
1357 | + |
---|
1358 | +MDMFHEADER = ">BQ32s32sBBQQ LQQQQQQ" |
---|
1359 | +MDMFHEADERSIZE = struct.calcsize(MDMFHEADER) |
---|
1360 | +MDMFCHECKSTRING = ">BQ32s32s" |
---|
1361 | +MDMFSIGNABLEHEADER = ">BQ32s32sBBQQ" |
---|
1362 | +MDMFOFFSETS = ">LQQQQQQ" |
---|
1363 | + |
---|
1364 | +class MDMFSlotWriteProxy: |
---|
1365 | + #implements(IMutableSlotWriter) TODO |
---|
1366 | + |
---|
1367 | + """ |
---|
1368 | + I represent a remote write slot for an MDMF mutable file. |
---|
1369 | + |
---|
1370 | + I abstract away from my caller the details of block and salt |
---|
1371 | + management, and the implementation of the on-disk format for MDMF |
---|
1372 | + shares. |
---|
1373 | + """ |
---|
1374 | + |
---|
1375 | + # Expected layout, MDMF: |
---|
1376 | + # offset: size: name: |
---|
1377 | + #-- signed part -- |
---|
1378 | + # 0 1 version number (01) |
---|
1379 | + # 1 8 sequence number |
---|
1380 | + # 9 32 share tree root hash |
---|
1381 | + # 41 32 concatenated salts hash |
---|
1382 | + # 73 1 The "k" encoding parameter |
---|
1383 | + # 74 1 The "N" encoding parameter |
---|
1384 | + # 75 8 The segment size of the uploaded file |
---|
1385 | + # 83 8 The data length of the uploaded file |
---|
1386 | + #-- end signed part -- |
---|
1387 | + # 91 4 The offset of the share data |
---|
1388 | + # 95 8 The offset of the encrypted private key |
---|
1389 | + # 103 8 The offset of the block hash tree |
---|
1390 | + # 111 8 The offset of the signature hash chain |
---|
1391 | + # 119 8 The offset of the signature |
---|
1392 | + # 127 8 The offset of the verification key |
---|
1393 | + # 135 8 offset of the EOF |
---|
1394 | + # |
---|
1395 | + # followed by salts, share data, the encrypted private key, the |
---|
1396 | + # block hash tree, the share hash chain, a signature over the first |
---|
1397 | + # eight fields, and a verification key. |
---|
1398 | + # |
---|
1399 | + # The checkstring is the first four fields -- the version number, |
---|
1400 | + # sequence number, root hash and salt hash. This is consistent in |
---|
1401 | + # meaning to what we have with SDMF files, except now instead of |
---|
1402 | + # using the literal salt, we use a value derived from all of the |
---|
1403 | + # salts. |
---|
1404 | + # |
---|
1405 | + # The ordering of the offsets is different to reflect the dependencies |
---|
1406 | + # that we'll run into with an MDMF file. The expected write flow is |
---|
1407 | + # something like this: |
---|
1408 | + # |
---|
1409 | + # 0: Initialize with the sequence number, encoding |
---|
1410 | + # parameters and data length. From this, we can deduce the |
---|
1411 | + # number of segments, and from that we can deduce the size of |
---|
1412 | + # the AES salt field, telling us where to write AES salts, and |
---|
1413 | + # where to write share data. We can also figure out where the |
---|
1414 | + # encrypted private key should go, because we can figure out |
---|
1415 | + # how big the share data will be. |
---|
1416 | + # |
---|
1417 | + # 1: Encrypt, encode, and upload the file in chunks. Do something |
---|
1418 | + # like |
---|
1419 | + # |
---|
1420 | + # put_block(data, segnum, salt) |
---|
1421 | + # |
---|
1422 | + # to write a block and a salt to the disk. We can do both of |
---|
1423 | + # these operations now because we have enough of the offsets to |
---|
1424 | + # know where to put them. |
---|
1425 | + # |
---|
1426 | + # 2: Put the encrypted private key. Use: |
---|
1427 | + # |
---|
1428 | + # put_encprivkey(encprivkey) |
---|
1429 | + # |
---|
1430 | + # Now that we know the length of the private key, we can fill |
---|
1431 | + # in the offset for the block hash tree. |
---|
1432 | + # |
---|
1433 | + # 3: We're now in a position to upload the block hash tree for |
---|
1434 | + # a share. Put that using something like: |
---|
1435 | + # |
---|
1436 | + # put_blockhashes(block_hash_tree) |
---|
1437 | + # |
---|
1438 | + # Note that block_hash_tree is a list of hashes -- we'll take |
---|
1439 | + # care of the details of serializing that appropriately. When |
---|
1440 | + # we get the block hash tree, we are also in a position to |
---|
1441 | + # calculate the offset for the share hash chain, and fill that |
---|
1442 | + # into the offsets table. |
---|
1443 | + # |
---|
1444 | + # 4: We're now in a position to upload the share hash chain for |
---|
1445 | + # a share. Do that with something like: |
---|
1446 | + # |
---|
1447 | + # put_sharehashes(share_hash_chain) |
---|
1448 | + # |
---|
1449 | + # share_hash_chain should be a dictionary mapping shnums to |
---|
1450 | + # 32-byte hashes -- the wrapper handles serialization. |
---|
1451 | + # We'll know where to put the signature at this point, also, |
---|
1452 | + # but, for various reasons, will not allow clients to do that |
---|
1453 | + # until after they've put the flat salt hash and the root hash |
---|
1454 | + # in the next step. |
---|
1455 | + # |
---|
1456 | + # 5: Put the root hash and the flat salt hash. Use: |
---|
1457 | + # |
---|
1458 | + # put_root_and_salt_hashes(root_hash, salt_hash) |
---|
1459 | + # |
---|
1460 | + # These must both be 32-byte values. Since they have fixed |
---|
1461 | + # offsets in the header, we could conceivably put them whenever |
---|
1462 | + # we want to, but it makes sense enough to put them only after |
---|
1463 | + # putting the share hash chain, since having a root hash |
---|
1464 | + # implies that we have a share hash chain. |
---|
1465 | + # |
---|
1466 | + # After this step, callers can call my get_signable method, |
---|
1467 | + # which returns a packed representation of the data that they |
---|
1468 | + # need to sign for the signature field, which is the next one |
---|
1469 | + # to be placed. |
---|
1470 | + # |
---|
1471 | + # 5: With the root hash put, we can now sign the header. Use: |
---|
1472 | + # |
---|
1473 | + # put_signature(signature) |
---|
1474 | + # |
---|
1475 | + # 6: Add the verification key, and finish. Do: |
---|
1476 | + # |
---|
1477 | + # put_verification_key(key) |
---|
1478 | + # |
---|
1479 | + # and |
---|
1480 | + # |
---|
1481 | + # finish_publish() |
---|
1482 | + # |
---|
1483 | + # Checkstring management: |
---|
1484 | + # |
---|
1485 | + # To write to a mutable slot, we have to provide test vectors to ensure |
---|
1486 | + # that we are writing to the same data that we think we are. These |
---|
1487 | + # vectors allow us to detect uncoordinated writes; that is, writes |
---|
1488 | + # where both we and some other shareholder are writing to the |
---|
1489 | + # mutable slot, and to report those back to the parts of the program |
---|
1490 | + # doing the writing. |
---|
1491 | + # |
---|
1492 | + # With SDMF, this was easy -- all of the share data was written in |
---|
1493 | + # one go, so it was easy to detect uncoordinated writes, and we only |
---|
1494 | + # had to do it once. With MDMF, not all of the file is written at |
---|
1495 | + # once. |
---|
1496 | + # |
---|
1497 | + # If a share is new, we write out as much of the header as we can |
---|
1498 | + # before writing out anything else. This gives other writers a |
---|
1499 | + # canary that they can use to detect uncoordinated writes, and, if |
---|
1500 | + # they do the same thing, gives us the same canary. We them update |
---|
1501 | + # the share. We won't be able to write out two fields of the header |
---|
1502 | + # -- the share tree hash and the salt hash -- until we finish |
---|
1503 | + # writing out the share. We only require the writer to provide the |
---|
1504 | + # initial checkstring, and keep track of what it should be after |
---|
1505 | + # updates ourselves. |
---|
1506 | + # |
---|
1507 | + # If we haven't written anything yet, then on the first write (which |
---|
1508 | + # will probably be a block + salt of a share), we'll also write out |
---|
1509 | + # the header. On subsequent passes, we'll expect to see the header. |
---|
1510 | + # This changes in two places: |
---|
1511 | + # |
---|
1512 | + # - When we write out the salt hash |
---|
1513 | + # - When we write out the root of the share hash tree |
---|
1514 | + # |
---|
1515 | + # since these values will change the header. It is possible that we |
---|
1516 | + # can just make those be written in one operation to minimize |
---|
1517 | + # disruption. |
---|
1518 | + def __init__(self, |
---|
1519 | + shnum, |
---|
1520 | + rref, # a remote reference to a storage server |
---|
1521 | + storage_index, |
---|
1522 | + secrets, # (write_enabler, renew_secret, cancel_secret) |
---|
1523 | + seqnum, # the sequence number of the mutable file |
---|
1524 | + required_shares, |
---|
1525 | + total_shares, |
---|
1526 | + segment_size, |
---|
1527 | + data_length): # the length of the original file |
---|
1528 | + self._shnum = shnum |
---|
1529 | + self._rref = rref |
---|
1530 | + self._storage_index = storage_index |
---|
1531 | + self._seqnum = seqnum |
---|
1532 | + self._required_shares = required_shares |
---|
1533 | + assert self._shnum >= 0 and self._shnum < total_shares |
---|
1534 | + self._total_shares = total_shares |
---|
1535 | + # We build up the offset table as we write things. It is the |
---|
1536 | + # last thing we write to the remote server. |
---|
1537 | + self._offsets = {} |
---|
1538 | + self._testvs = [] |
---|
1539 | + self._secrets = secrets |
---|
1540 | + # The segment size needs to be a multiple of the k parameter -- |
---|
1541 | + # any padding should have been carried out by the publisher |
---|
1542 | + # already. |
---|
1543 | + assert segment_size % required_shares == 0 |
---|
1544 | + self._segment_size = segment_size |
---|
1545 | + self._data_length = data_length |
---|
1546 | + |
---|
1547 | + # These are set later -- we define them here so that we can |
---|
1548 | + # check for their existence easily |
---|
1549 | + self._root_hash = None |
---|
1550 | + self._salt_hash = None |
---|
1551 | + |
---|
1552 | + # We haven't yet written anything to the remote bucket. By |
---|
1553 | + # setting this, we tell the _write method as much. The write |
---|
1554 | + # method will then know that it also needs to add a write vector |
---|
1555 | + # for the checkstring (or what we have of it) to the first write |
---|
1556 | + # request. We'll then record that value for future use. If |
---|
1557 | + # we're expecting something to be there already, we need to call |
---|
1558 | + # set_checkstring before we write anything to tell the first |
---|
1559 | + # write about that. |
---|
1560 | + self._written = False |
---|
1561 | + |
---|
1562 | + # When writing data to the storage servers, we get a read vector |
---|
1563 | + # for free. We'll read the checkstring, which will help us |
---|
1564 | + # figure out what's gone wrong if a write fails. |
---|
1565 | + self._readv = [(0, struct.calcsize(MDMFCHECKSTRING))] |
---|
1566 | + |
---|
1567 | + # We calculate the number of segments because it tells us |
---|
1568 | + # where the salt part of the file ends/share segment begins, |
---|
1569 | + # and also because it provides a useful amount of bounds checking. |
---|
1570 | + self._num_segments = mathutil.div_ceil(self._data_length, |
---|
1571 | + self._segment_size) |
---|
1572 | + self._block_size = self._segment_size / self._required_shares |
---|
1573 | + # We also calculate the share size, to help us with block |
---|
1574 | + # constraints later. |
---|
1575 | + tail_size = self._data_length % self._segment_size |
---|
1576 | + if not tail_size: |
---|
1577 | + self._tail_block_size = self._block_size |
---|
1578 | + else: |
---|
1579 | + self._tail_block_size = mathutil.next_multiple(tail_size, |
---|
1580 | + self._required_shares) |
---|
1581 | + self._tail_block_size /= self._required_shares |
---|
1582 | + |
---|
1583 | + # We already know where the AES salts start; right after the end |
---|
1584 | + # of the header (which is defined as the signable part + the offsets) |
---|
1585 | + # We need to calculate where the share data starts, since we're |
---|
1586 | + # responsible (after this method) for being able to write it. |
---|
1587 | + self._offsets['share-data'] = MDMFHEADERSIZE |
---|
1588 | + self._offsets['share-data'] += self._num_segments * SALT_SIZE |
---|
1589 | + # We can also calculate where the encrypted private key begins |
---|
1590 | + # from what we know know. |
---|
1591 | + self._offsets['enc_privkey'] = self._offsets['share-data'] |
---|
1592 | + self._offsets['enc_privkey'] += self._block_size * self._num_segments |
---|
1593 | + # We'll wait for the rest. Callers can now call my "put_block" and |
---|
1594 | + # "set_checkstring" methods. |
---|
1595 | + |
---|
1596 | + |
---|
1597 | + def set_checkstring(self, checkstring): |
---|
1598 | + """ |
---|
1599 | + Set checkstring checkstring for the given shnum. |
---|
1600 | + |
---|
1601 | + By default, I assume that I am writing new shares to the grid. |
---|
1602 | + If you don't explcitly set your own checkstring, I will use |
---|
1603 | + one that requires that the remote share not exist. |
---|
1604 | + """ |
---|
1605 | + # You're allowed to overwrite checkstrings with this method; |
---|
1606 | + # I assume that users know what they are doing when they call |
---|
1607 | + # it. |
---|
1608 | + if checkstring == "": |
---|
1609 | + # We special-case this, since len("") = 0, but we need |
---|
1610 | + # length of 1 for the case of an empty share to work on the |
---|
1611 | + # storage server, which is what a checkstring that is the |
---|
1612 | + # empty string means. |
---|
1613 | + self._testvs = [] |
---|
1614 | + else: |
---|
1615 | + self._testvs = [] |
---|
1616 | + self._testvs.append((0, len(checkstring), "eq", checkstring)) |
---|
1617 | + |
---|
1618 | + |
---|
1619 | + def get_checkstring(self): |
---|
1620 | + """ |
---|
1621 | + Given a share number, I return a representation of what the |
---|
1622 | + checkstring for that share on the server will look like. |
---|
1623 | + """ |
---|
1624 | + if self._root_hash: |
---|
1625 | + roothash = self._root_hash |
---|
1626 | + else: |
---|
1627 | + roothash = "\x00" * 32 |
---|
1628 | + if self._salt_hash: |
---|
1629 | + salthash = self._salt_hash |
---|
1630 | + else: |
---|
1631 | + salthash = "\x00" * 32 |
---|
1632 | + checkstring = struct.pack(MDMFCHECKSTRING, |
---|
1633 | + 1, |
---|
1634 | + self._seqnum, |
---|
1635 | + roothash, |
---|
1636 | + salthash) |
---|
1637 | + return checkstring |
---|
1638 | + |
---|
1639 | + |
---|
1640 | + def put_block(self, data, segnum, salt): |
---|
1641 | + """ |
---|
1642 | + Put the encrypted-and-encoded data segment in the slot, along |
---|
1643 | + with the salt. |
---|
1644 | + """ |
---|
1645 | + if segnum >= self._num_segments: |
---|
1646 | + raise LayoutInvalid("I won't overwrite the private key") |
---|
1647 | + if len(salt) != SALT_SIZE: |
---|
1648 | + raise LayoutInvalid("I was given a salt of size %d, but " |
---|
1649 | + "I wanted a salt of size %d") |
---|
1650 | + if segnum + 1 == self._num_segments: |
---|
1651 | + if len(data) != self._tail_block_size: |
---|
1652 | + raise LayoutInvalid("I was given the wrong size block to write") |
---|
1653 | + elif len(data) != self._block_size: |
---|
1654 | + raise LayoutInvalid("I was given the wrong size block to write") |
---|
1655 | + |
---|
1656 | + # We want to write at offsets['share-data'] + segnum * block_size. |
---|
1657 | + assert self._offsets |
---|
1658 | + assert self._offsets['share-data'] |
---|
1659 | + |
---|
1660 | + offset = self._offsets['share-data'] + segnum * self._block_size |
---|
1661 | + datavs = [tuple([offset, data])] |
---|
1662 | + # We also have to write the salt. This is at: |
---|
1663 | + salt_offset = MDMFHEADERSIZE + SALT_SIZE * segnum |
---|
1664 | + datavs.append(tuple([salt_offset, salt])) |
---|
1665 | + return self._write(datavs) |
---|
1666 | + |
---|
1667 | + |
---|
1668 | + def put_encprivkey(self, encprivkey): |
---|
1669 | + """ |
---|
1670 | + Put the encrypted private key in the remote slot. |
---|
1671 | + """ |
---|
1672 | + assert self._offsets |
---|
1673 | + assert self._offsets['enc_privkey'] |
---|
1674 | + # You shouldn't re-write the encprivkey after the block hash |
---|
1675 | + # tree is written, since that could cause the private key to run |
---|
1676 | + # into the block hash tree. Before it writes the block hash |
---|
1677 | + # tree, the block hash tree writing method writes the offset of |
---|
1678 | + # the signature hash chain. So that's a good indicator of |
---|
1679 | + # whether or not the block hash tree has been written. |
---|
1680 | + if "share_hash_chain" in self._offsets: |
---|
1681 | + raise LayoutInvalid("You must write this before the block hash tree") |
---|
1682 | + |
---|
1683 | + self._offsets['block_hash_tree'] = self._offsets['enc_privkey'] + len(encprivkey) |
---|
1684 | + datavs = [(tuple([self._offsets['enc_privkey'], encprivkey]))] |
---|
1685 | + def _on_failure(): |
---|
1686 | + del(self._offsets['block_hash_tree']) |
---|
1687 | + return self._write(datavs, on_failure=_on_failure) |
---|
1688 | + |
---|
1689 | + |
---|
1690 | + def put_blockhashes(self, blockhashes): |
---|
1691 | + """ |
---|
1692 | + Put the block hash tree in the remote slot. |
---|
1693 | + |
---|
1694 | + The encrypted private key must be put before the block hash |
---|
1695 | + tree, since we need to know how large it is to know where the |
---|
1696 | + block hash tree should go. The block hash tree must be put |
---|
1697 | + before the share hash chain, since its size determines the |
---|
1698 | + offset of the share hash chain. |
---|
1699 | + """ |
---|
1700 | + assert self._offsets |
---|
1701 | + assert isinstance(blockhashes, list) |
---|
1702 | + if "block_hash_tree" not in self._offsets: |
---|
1703 | + raise LayoutInvalid("You must put the encrypted private key " |
---|
1704 | + "before you put the block hash tree") |
---|
1705 | + # If written, the share hash chain causes the signature offset |
---|
1706 | + # to be defined. |
---|
1707 | + if "signature" in self._offsets: |
---|
1708 | + raise LayoutInvalid("You must put the block hash tree before " |
---|
1709 | + "you put the share hash chain") |
---|
1710 | + blockhashes_s = "".join(blockhashes) |
---|
1711 | + self._offsets['share_hash_chain'] = self._offsets['block_hash_tree'] + len(blockhashes_s) |
---|
1712 | + datavs = [] |
---|
1713 | + datavs.append(tuple([self._offsets['block_hash_tree'], blockhashes_s])) |
---|
1714 | + def _on_failure(): |
---|
1715 | + del(self._offsets['share_hash_chain']) |
---|
1716 | + return self._write(datavs, on_failure=_on_failure) |
---|
1717 | + |
---|
1718 | + |
---|
1719 | + def put_sharehashes(self, sharehashes): |
---|
1720 | + """ |
---|
1721 | + Put the share hash chain in the remote slot. |
---|
1722 | + |
---|
1723 | + The block hash tree must be put before the share hash chain, |
---|
1724 | + since we need to know where the block hash tree ends before we |
---|
1725 | + can know where the share hash chain starts. The share hash chain |
---|
1726 | + must be put before the signature, since the length of the packed |
---|
1727 | + share hash chain determines the offset of the signature. |
---|
1728 | + """ |
---|
1729 | + assert isinstance(sharehashes, dict) |
---|
1730 | + if "share_hash_chain" not in self._offsets: |
---|
1731 | + raise LayoutInvalid("You need to put the block hashes before " |
---|
1732 | + "you can put the share hash chain") |
---|
1733 | + # The signature comes after the share hash chain. If the |
---|
1734 | + # signature has already been written, we must not write another |
---|
1735 | + # share hash chain. The signature writes the verification key |
---|
1736 | + # offset when it gets sent to the remote server, so we look for |
---|
1737 | + # that. |
---|
1738 | + if "verification_key" in self._offsets: |
---|
1739 | + raise LayoutInvalid("You must write the share hash chain " |
---|
1740 | + "before you write the signature") |
---|
1741 | + datavs = [] |
---|
1742 | + sharehashes_s = "".join([struct.pack(">H32s", i, sharehashes[i]) |
---|
1743 | + for i in sorted(sharehashes.keys())]) |
---|
1744 | + self._offsets['signature'] = self._offsets['share_hash_chain'] + len(sharehashes_s) |
---|
1745 | + datavs.append(tuple([self._offsets['share_hash_chain'], sharehashes_s])) |
---|
1746 | + def _on_failure(): |
---|
1747 | + del(self._offsets['signature']) |
---|
1748 | + return self._write(datavs, on_failure=_on_failure) |
---|
1749 | + |
---|
1750 | + |
---|
1751 | + def put_root_and_salt_hashes(self, roothash, salthash): |
---|
1752 | + """ |
---|
1753 | + Put the root hash (the root of the share hash tree) in the |
---|
1754 | + remote slot. |
---|
1755 | + """ |
---|
1756 | + # It does not make sense to be able to put the root and salt |
---|
1757 | + # hashes without first putting the share hashes, since you need |
---|
1758 | + # the share hashes to generate the root hash. |
---|
1759 | + # |
---|
1760 | + # Signature is defined by the routine that places the share hash |
---|
1761 | + # chain, so it's a good thing to look for in finding out whether |
---|
1762 | + # or not the share hash chain exists on the remote server. |
---|
1763 | + if "signature" not in self._offsets: |
---|
1764 | + raise LayoutInvalid("You need to put the share hash chain " |
---|
1765 | + "before you can put the root share hash") |
---|
1766 | + if len(roothash) != HASH_SIZE or len(salthash) != HASH_SIZE: |
---|
1767 | + raise LayoutInvalid("hashes and salts must be exactly %d bytes" |
---|
1768 | + % HASH_SIZE) |
---|
1769 | + datavs = [] |
---|
1770 | + self._root_hash = roothash |
---|
1771 | + self._salt_hash = salthash |
---|
1772 | + checkstring = self.get_checkstring() |
---|
1773 | + datavs.append(tuple([0, checkstring])) |
---|
1774 | + # This write, if successful, changes the checkstring, so we need |
---|
1775 | + # to update our internal checkstring to be consistent with the |
---|
1776 | + # one on the server. |
---|
1777 | + def _on_success(): |
---|
1778 | + self._testvs = [(0, len(checkstring), "eq", checkstring)] |
---|
1779 | + def _on_failure(): |
---|
1780 | + self._root_hash = None |
---|
1781 | + self._salt_hash = None |
---|
1782 | + return self._write(datavs, |
---|
1783 | + on_success=_on_success, |
---|
1784 | + on_failure=_on_failure) |
---|
1785 | + |
---|
1786 | + |
---|
1787 | + def get_signable(self): |
---|
1788 | + """ |
---|
1789 | + Get the first eight fields of the mutable file; the parts that |
---|
1790 | + are signed. |
---|
1791 | + """ |
---|
1792 | + if not self._root_hash or not self._salt_hash: |
---|
1793 | + raise LayoutInvalid("You need to set the root hash and the " |
---|
1794 | + "salt hash before getting something to " |
---|
1795 | + "sign") |
---|
1796 | + return struct.pack(MDMFSIGNABLEHEADER, |
---|
1797 | + 1, |
---|
1798 | + self._seqnum, |
---|
1799 | + self._root_hash, |
---|
1800 | + self._salt_hash, |
---|
1801 | + self._required_shares, |
---|
1802 | + self._total_shares, |
---|
1803 | + self._segment_size, |
---|
1804 | + self._data_length) |
---|
1805 | + |
---|
1806 | + |
---|
1807 | + def put_signature(self, signature): |
---|
1808 | + """ |
---|
1809 | + Put the signature field to the remote slot. |
---|
1810 | + |
---|
1811 | + I require that the root hash and share hash chain have been put |
---|
1812 | + to the grid before I will write the signature to the grid. |
---|
1813 | + """ |
---|
1814 | + if "signature" not in self._offsets: |
---|
1815 | + raise LayoutInvalid("You must put the share hash chain " |
---|
1816 | + # It does not make sense to put a signature without first |
---|
1817 | + # putting the root hash and the salt hash (since otherwise |
---|
1818 | + # the signature would be incomplete), so we don't allow that. |
---|
1819 | + "before putting the signature") |
---|
1820 | + if not self._root_hash: |
---|
1821 | + raise LayoutInvalid("You must complete the signed prefix " |
---|
1822 | + "before computing a signature") |
---|
1823 | + # If we put the signature after we put the verification key, we |
---|
1824 | + # could end up running into the verification key, and will |
---|
1825 | + # probably screw up the offsets as well. So we don't allow that. |
---|
1826 | + # The method that writes the verification key defines the EOF |
---|
1827 | + # offset before writing the verification key, so look for that. |
---|
1828 | + if "EOF" in self._offsets: |
---|
1829 | + raise LayoutInvalid("You must write the signature before the verification key") |
---|
1830 | + |
---|
1831 | + self._offsets['verification_key'] = self._offsets['signature'] + len(signature) |
---|
1832 | + datavs = [] |
---|
1833 | + datavs.append(tuple([self._offsets['signature'], signature])) |
---|
1834 | + def _on_failure(): |
---|
1835 | + del(self._offsets['verification_key']) |
---|
1836 | + return self._write(datavs, on_failure=_on_failure) |
---|
1837 | + |
---|
1838 | + |
---|
1839 | + def put_verification_key(self, verification_key): |
---|
1840 | + """ |
---|
1841 | + Put the verification key into the remote slot. |
---|
1842 | + |
---|
1843 | + I require that the signature have been written to the storage |
---|
1844 | + server before I allow the verification key to be written to the |
---|
1845 | + remote server. |
---|
1846 | + """ |
---|
1847 | + if "verification_key" not in self._offsets: |
---|
1848 | + raise LayoutInvalid("You must put the signature before you " |
---|
1849 | + "can put the verification key") |
---|
1850 | + self._offsets['EOF'] = self._offsets['verification_key'] + len(verification_key) |
---|
1851 | + datavs = [] |
---|
1852 | + datavs.append(tuple([self._offsets['verification_key'], verification_key])) |
---|
1853 | + def _on_failure(): |
---|
1854 | + del(self._offsets['EOF']) |
---|
1855 | + return self._write(datavs, on_failure=_on_failure) |
---|
1856 | + |
---|
1857 | + |
---|
1858 | + def finish_publishing(self): |
---|
1859 | + """ |
---|
1860 | + Write the offset table and encoding parameters to the remote |
---|
1861 | + slot, since that's the only thing we have yet to publish at this |
---|
1862 | + point. |
---|
1863 | + """ |
---|
1864 | + if "EOF" not in self._offsets: |
---|
1865 | + raise LayoutInvalid("You must put the verification key before " |
---|
1866 | + "you can publish the offsets") |
---|
1867 | + offsets_offset = struct.calcsize(MDMFSIGNABLEHEADER) |
---|
1868 | + offsets = struct.pack(MDMFOFFSETS, |
---|
1869 | + self._offsets['share-data'], |
---|
1870 | + self._offsets['enc_privkey'], |
---|
1871 | + self._offsets['block_hash_tree'], |
---|
1872 | + self._offsets['share_hash_chain'], |
---|
1873 | + self._offsets['signature'], |
---|
1874 | + self._offsets['verification_key'], |
---|
1875 | + self._offsets['EOF']) |
---|
1876 | + datavs = [] |
---|
1877 | + datavs.append(tuple([offsets_offset, offsets])) |
---|
1878 | + encoding_parameters_offset = struct.calcsize(MDMFCHECKSTRING) |
---|
1879 | + params = struct.pack(">BBQQ", |
---|
1880 | + self._required_shares, |
---|
1881 | + self._total_shares, |
---|
1882 | + self._num_segments, |
---|
1883 | + self._data_length) |
---|
1884 | + datavs.append(tuple([encoding_parameters_offset, params])) |
---|
1885 | + return self._write(datavs) |
---|
1886 | + |
---|
1887 | + |
---|
1888 | + def _write(self, datavs, on_failure=None, on_success=None): |
---|
1889 | + """I write the data vectors in datavs to the remote slot.""" |
---|
1890 | + tw_vectors = {} |
---|
1891 | + new_share = False |
---|
1892 | + if not self._testvs: |
---|
1893 | + self._testvs = [] |
---|
1894 | + self._testvs.append(tuple([0, 1, "eq", ""])) |
---|
1895 | + new_share = True |
---|
1896 | + if not self._written: |
---|
1897 | + # Write a new checkstring to the share when we write it, so |
---|
1898 | + # that we have something to check later. |
---|
1899 | + new_checkstring = self.get_checkstring() |
---|
1900 | + datavs.append((0, new_checkstring)) |
---|
1901 | + def _first_write(): |
---|
1902 | + self._written = True |
---|
1903 | + self._testvs = [(0, len(new_checkstring), "eq", new_checkstring)] |
---|
1904 | + on_success = _first_write |
---|
1905 | + tw_vectors[self._shnum] = (self._testvs, datavs, None) |
---|
1906 | + d = self._rref.callRemote("slot_testv_and_readv_and_writev", |
---|
1907 | + self._storage_index, |
---|
1908 | + self._secrets, |
---|
1909 | + tw_vectors, |
---|
1910 | + self._readv) |
---|
1911 | + def _result(results): |
---|
1912 | + if isinstance(results, failure.Failure) or not results[0]: |
---|
1913 | + # Do nothing; the write was unsuccessful. |
---|
1914 | + if on_failure: |
---|
1915 | + on_failure() |
---|
1916 | + else: |
---|
1917 | + if on_success: |
---|
1918 | + on_success() |
---|
1919 | + return results |
---|
1920 | + d.addCallback(_result) |
---|
1921 | + return d |
---|
1922 | + |
---|
1923 | + |
---|
1924 | +class MDMFSlotReadProxy: |
---|
1925 | + """ |
---|
1926 | + I read from a mutable slot filled with data written in the MDMF data |
---|
1927 | + format (which is described above). |
---|
1928 | + """ |
---|
1929 | + def __init__(self, |
---|
1930 | + rref, |
---|
1931 | + secrets, |
---|
1932 | + storage_index, |
---|
1933 | + shnum): |
---|
1934 | + # Start the initialization process. |
---|
1935 | + self._rref = rref |
---|
1936 | + self._storage_index = storage_index |
---|
1937 | + self._shnum = shnum |
---|
1938 | + self._secrets = secrets |
---|
1939 | + |
---|
1940 | + # Before doing anything, the reader is probably going to want to |
---|
1941 | + # verify that the signature is correct. To do that, they'll need |
---|
1942 | + # the verification key, and the signature. To get those, we'll |
---|
1943 | + # need the offset table. So fetch the offset table on the |
---|
1944 | + # assumption that that will be the first thing that a reader is |
---|
1945 | + # going to do. |
---|
1946 | + d = self._fetch_offsets() |
---|
1947 | + |
---|
1948 | + # The fact that these encoding parameters are None tells us |
---|
1949 | + # that we haven't yet fetched them from the remote share, so we |
---|
1950 | + # should. We could just not set them, but the checks will be |
---|
1951 | + # easier to read if we don't have to use hasattr. |
---|
1952 | + self._version_number = None |
---|
1953 | + self._sequence_number = None |
---|
1954 | + self._root_hash = None |
---|
1955 | + self._salt_hash = None |
---|
1956 | + self._required_shares = None |
---|
1957 | + self._total_shares = None |
---|
1958 | + self._segment_size = None |
---|
1959 | + self._data_length = None |
---|
1960 | + |
---|
1961 | + |
---|
1962 | + def _fetch_offsets(self): |
---|
1963 | + """ |
---|
1964 | + I fetch the offset table from the remote slot. |
---|
1965 | + """ |
---|
1966 | + # The offset table starts at 91 |
---|
1967 | + readv = (91, struct.calcsize(MDMFOFFSETS)) |
---|
1968 | + readvs = [readv] |
---|
1969 | + d = self._read(readvs) |
---|
1970 | + def _set_offsets(data): |
---|
1971 | + assert self._shnum in data |
---|
1972 | + offsets = data[self._shnum][0] |
---|
1973 | + |
---|
1974 | + (share_data, |
---|
1975 | + encprivkey, |
---|
1976 | + blockhashes, |
---|
1977 | + sharehashes, |
---|
1978 | + signature, |
---|
1979 | + verification_key, |
---|
1980 | + eof) = struct.unpack(MDMFOFFSETS, offsets) |
---|
1981 | + self._offsets = {} |
---|
1982 | + self._offsets['share_data'] = share_data |
---|
1983 | + self._offsets['enc_privkey'] = encprivkey |
---|
1984 | + self._offsets['block_hash_tree'] = blockhashes |
---|
1985 | + self._offsets['share_hash_chain'] = sharehashes |
---|
1986 | + self._offsets['signature'] = signature |
---|
1987 | + self._offsets['verification_key'] = verification_key |
---|
1988 | + self._offsets['EOF'] = eof |
---|
1989 | + d.addCallback(_set_offsets) |
---|
1990 | + return d |
---|
1991 | + |
---|
1992 | + |
---|
1993 | + def _fetch_header_without_offsets(self): |
---|
1994 | + """ |
---|
1995 | + I fetch the part of the header that isn't the offsets. |
---|
1996 | + |
---|
1997 | + I am called after the reader has verified that the signature is |
---|
1998 | + correct -- at that point, the reader will want to retrieve the |
---|
1999 | + rest of the file, and will need those parameters. |
---|
2000 | + """ |
---|
2001 | + readvs = [(0, 91)] |
---|
2002 | + d = self._read(readvs) |
---|
2003 | + def _set_encoding_parameters(data): |
---|
2004 | + assert self._shnum in data |
---|
2005 | + encoding_parameters = data[self._shnum][0] |
---|
2006 | + (verno, |
---|
2007 | + seqnum, |
---|
2008 | + root_hash, |
---|
2009 | + salt_hash, |
---|
2010 | + k, |
---|
2011 | + n, |
---|
2012 | + segsize, |
---|
2013 | + datalen) = struct.unpack(MDMFSIGNABLEHEADER, |
---|
2014 | + encoding_parameters) |
---|
2015 | + |
---|
2016 | + self._version_number = verno |
---|
2017 | + self._sequence_number = seqnum |
---|
2018 | + self._root_hash = root_hash |
---|
2019 | + self._salt_hash = salt_hash |
---|
2020 | + self._required_shares = k |
---|
2021 | + self._total_shares = n |
---|
2022 | + self._segment_size = segsize |
---|
2023 | + self._data_length = datalen |
---|
2024 | + |
---|
2025 | + self._num_segments = mathutil.div_ceil(self._data_length, |
---|
2026 | + self._segment_size) |
---|
2027 | + self._block_size = self._segment_size / self._required_shares |
---|
2028 | + tail_size = self._data_length % self._segment_size |
---|
2029 | + if not tail_size: |
---|
2030 | + self._tail_block_size = self._block_size |
---|
2031 | + else: |
---|
2032 | + self._tail_block_size = mathutil.next_multiple(tail_size, |
---|
2033 | + self._required_shares) |
---|
2034 | + self._tail_block_size /= self._required_shares |
---|
2035 | + |
---|
2036 | + d.addCallback(_set_encoding_parameters) |
---|
2037 | + return d |
---|
2038 | + |
---|
2039 | + |
---|
2040 | + def get_block_and_salt(self, segnum): |
---|
2041 | + """ |
---|
2042 | + I return (block, salt), where block is the block data and |
---|
2043 | + salt is the salt used to encrypt that segment. |
---|
2044 | + """ |
---|
2045 | + assert self._offsets, "I need offsets to get share data" |
---|
2046 | + |
---|
2047 | + if not self._segment_size: |
---|
2048 | + d = self._fetch_header_without_offsets() |
---|
2049 | + else: |
---|
2050 | + d = defer.succeed(None) |
---|
2051 | + base_salt_offset = struct.calcsize(MDMFHEADER) |
---|
2052 | + base_share_offset = self._offsets['share_data'] |
---|
2053 | + salt_offset = base_salt_offset + SALT_SIZE * segnum |
---|
2054 | + |
---|
2055 | + def _calculate_share_offset(ignored): |
---|
2056 | + if segnum + 1 > self._num_segments: |
---|
2057 | + raise LayoutInvalid("Not a valid segment number") |
---|
2058 | + |
---|
2059 | + share_offset = base_share_offset + self._block_size * segnum |
---|
2060 | + if segnum + 1 == self._num_segments: |
---|
2061 | + data = self._tail_block_size |
---|
2062 | + else: |
---|
2063 | + data = self._block_size |
---|
2064 | + readvs = [(salt_offset, SALT_SIZE), (share_offset, data)] |
---|
2065 | + return readvs |
---|
2066 | + |
---|
2067 | + d.addCallback(_calculate_share_offset) |
---|
2068 | + d.addCallback(lambda readvs: |
---|
2069 | + self._read(readvs)) |
---|
2070 | + def _process_results(results): |
---|
2071 | + assert self._shnum in results |
---|
2072 | + salt, data = results[self._shnum] |
---|
2073 | + return data, salt |
---|
2074 | + d.addCallback(_process_results) |
---|
2075 | + return d |
---|
2076 | + |
---|
2077 | + |
---|
2078 | + def get_blockhashes(self): |
---|
2079 | + """ |
---|
2080 | + I return the block hash tree |
---|
2081 | + """ |
---|
2082 | + # TODO: Return only the parts of the block hash tree necessary |
---|
2083 | + # to validate the blocknum provided? |
---|
2084 | + assert self._offsets |
---|
2085 | + |
---|
2086 | + blockhashes_offset = self._offsets['block_hash_tree'] |
---|
2087 | + blockhashes_length = self._offsets['share_hash_chain'] - blockhashes_offset |
---|
2088 | + readvs = [(blockhashes_offset, blockhashes_length)] |
---|
2089 | + d = self._read(readvs) |
---|
2090 | + def _build_block_hash_tree(results): |
---|
2091 | + assert self._shnum in results |
---|
2092 | + |
---|
2093 | + rawhashes = results[self._shnum][0] |
---|
2094 | + results = [rawhashes[i:i+HASH_SIZE] |
---|
2095 | + for i in range(0, len(rawhashes), HASH_SIZE)] |
---|
2096 | + return results |
---|
2097 | + d.addCallback(_build_block_hash_tree) |
---|
2098 | + return d |
---|
2099 | + |
---|
2100 | + |
---|
2101 | + def get_sharehashes(self): |
---|
2102 | + """ |
---|
2103 | + I return the part of the share hash chain placed to validate |
---|
2104 | + this share. |
---|
2105 | + """ |
---|
2106 | + assert self._offsets |
---|
2107 | + |
---|
2108 | + sharehashes_offset = self._offsets['share_hash_chain'] |
---|
2109 | + sharehashes_length = self._offsets['signature'] - sharehashes_offset |
---|
2110 | + |
---|
2111 | + readvs = [(sharehashes_offset, sharehashes_length)] |
---|
2112 | + d = self._read(readvs) |
---|
2113 | + def _build_share_hash_chain(results): |
---|
2114 | + assert self._shnum in results |
---|
2115 | + |
---|
2116 | + sharehashes = results[self._shnum][0] |
---|
2117 | + results = [sharehashes[i:i+(HASH_SIZE + 2)] |
---|
2118 | + for i in range(0, len(sharehashes), HASH_SIZE + 2)] |
---|
2119 | + results = dict([struct.unpack(">H32s", data) |
---|
2120 | + for data in results]) |
---|
2121 | + return results |
---|
2122 | + d.addCallback(_build_share_hash_chain) |
---|
2123 | + return d |
---|
2124 | + |
---|
2125 | + |
---|
2126 | + def get_encprivkey(self): |
---|
2127 | + """ |
---|
2128 | + I return the encrypted private key. |
---|
2129 | + """ |
---|
2130 | + assert self._offsets |
---|
2131 | + |
---|
2132 | + privkey_offset = self._offsets['enc_privkey'] |
---|
2133 | + privkey_length = self._offsets['block_hash_tree'] - privkey_offset |
---|
2134 | + readvs = [(privkey_offset, privkey_length)] |
---|
2135 | + d = self._read(readvs) |
---|
2136 | + def _process_results(results): |
---|
2137 | + assert self._shnum in results |
---|
2138 | + privkey = results[self._shnum][0] |
---|
2139 | + return privkey |
---|
2140 | + d.addCallback(_process_results) |
---|
2141 | + return d |
---|
2142 | + |
---|
2143 | + |
---|
2144 | + def get_signature(self): |
---|
2145 | + """ |
---|
2146 | + I return the signature of my share. |
---|
2147 | + """ |
---|
2148 | + assert self._offsets |
---|
2149 | + |
---|
2150 | + signature_offset = self._offsets['signature'] |
---|
2151 | + signature_length = self._offsets['verification_key'] - signature_offset |
---|
2152 | + readvs = [(signature_offset, signature_length)] |
---|
2153 | + d = self._read(readvs) |
---|
2154 | + def _process_results(results): |
---|
2155 | + assert self._shnum in results |
---|
2156 | + signature = results[self._shnum][0] |
---|
2157 | + return signature |
---|
2158 | + d.addCallback(_process_results) |
---|
2159 | + return d |
---|
2160 | + |
---|
2161 | + |
---|
2162 | + def get_verification_key(self): |
---|
2163 | + """ |
---|
2164 | + I return the verification key. |
---|
2165 | + """ |
---|
2166 | + assert self._offsets |
---|
2167 | + vk_offset = self._offsets['verification_key'] |
---|
2168 | + vk_length = self._offsets['EOF'] - vk_offset |
---|
2169 | + readvs = [(vk_offset, vk_length)] |
---|
2170 | + d = self._read(readvs) |
---|
2171 | + def _process_results(results): |
---|
2172 | + assert self._shnum in results |
---|
2173 | + verification_key = results[self._shnum][0] |
---|
2174 | + return verification_key |
---|
2175 | + d.addCallback(_process_results) |
---|
2176 | + return d |
---|
2177 | + |
---|
2178 | + |
---|
2179 | + def get_encoding_parameters(self): |
---|
2180 | + """ |
---|
2181 | + I return (k, n, segsize, datalen) |
---|
2182 | + """ |
---|
2183 | + if not self._required_shares: |
---|
2184 | + d = self._fetch_header_without_offsets() |
---|
2185 | + else: |
---|
2186 | + d = defer.succeed(None) |
---|
2187 | + d.addCallback(lambda ignored: |
---|
2188 | + (self._required_shares, |
---|
2189 | + self._total_shares, |
---|
2190 | + self._segment_size, |
---|
2191 | + self._data_length)) |
---|
2192 | + return d |
---|
2193 | + |
---|
2194 | + |
---|
2195 | + def get_seqnum(self): |
---|
2196 | + """ |
---|
2197 | + I return the sequence number for this share. |
---|
2198 | + """ |
---|
2199 | + if self._sequence_number == None: |
---|
2200 | + d = self._fetch_header_without_offsets() |
---|
2201 | + else: |
---|
2202 | + d = defer.succeed(None) |
---|
2203 | + d.addCallback(lambda ignored: |
---|
2204 | + self._sequence_number) |
---|
2205 | + return d |
---|
2206 | + |
---|
2207 | + |
---|
2208 | + def get_root_hash(self): |
---|
2209 | + """ |
---|
2210 | + I return the root of the block hash tree |
---|
2211 | + """ |
---|
2212 | + if not self._root_hash: |
---|
2213 | + d = self._fetch_header_without_offsets() |
---|
2214 | + else: |
---|
2215 | + d = defer.succeed(None) |
---|
2216 | + d.addCallback(lambda ignored: self._root_hash) |
---|
2217 | + return d |
---|
2218 | + |
---|
2219 | + |
---|
2220 | + def get_salt_hash(self): |
---|
2221 | + """ |
---|
2222 | + I return the flat salt hash |
---|
2223 | + """ |
---|
2224 | + if not self._salt_hash: |
---|
2225 | + d = self._fetch_header_without_offsets() |
---|
2226 | + else: |
---|
2227 | + d = defer.succeed(None) |
---|
2228 | + d.addCallback(lambda ignored: self._salt_hash) |
---|
2229 | + return d |
---|
2230 | + |
---|
2231 | + |
---|
2232 | + def get_checkstring(self): |
---|
2233 | + """ |
---|
2234 | + I return the packed representation of the following: |
---|
2235 | + |
---|
2236 | + - version number |
---|
2237 | + - sequence number |
---|
2238 | + - root hash |
---|
2239 | + - salt hash |
---|
2240 | + |
---|
2241 | + which my users use as a checkstring to detect other writers. |
---|
2242 | + """ |
---|
2243 | + if self._version_number == None: |
---|
2244 | + d = self._fetch_header_without_offsets() |
---|
2245 | + else: |
---|
2246 | + d = defer.succeed(None) |
---|
2247 | + def _build_checkstring(ignored): |
---|
2248 | + checkstring = struct.pack(MDMFCHECKSTRING, |
---|
2249 | + self._version_number, |
---|
2250 | + self._sequence_number, |
---|
2251 | + self._root_hash, |
---|
2252 | + self._salt_hash) |
---|
2253 | + return checkstring |
---|
2254 | + d.addCallback(_build_checkstring) |
---|
2255 | + return d |
---|
2256 | + |
---|
2257 | + |
---|
2258 | + def _read(self, readvs): |
---|
2259 | + d = self._rref.callRemote("slot_readv", |
---|
2260 | + self._storage_index, |
---|
2261 | + [self._shnum], |
---|
2262 | + readvs) |
---|
2263 | + return d |
---|
2264 | + |
---|
2265 | + |
---|
2266 | +class LayoutInvalid(Exception): |
---|
2267 | + """ |
---|
2268 | + This isn't a valid MDMF mutable file |
---|
2269 | + """ |
---|
2270 | } |
---|
2271 | |
---|
2272 | Context: |
---|
2273 | |
---|
2274 | [Suppress deprecation warning for twisted.web.error.NoResource when using Twisted >= 9.0.0. |
---|
2275 | david-sarah@jacaranda.org**20100516205625 |
---|
2276 | Ignore-this: 2361a3023cd3db86bde5e1af759ed01 |
---|
2277 | ] |
---|
2278 | [docs: CREDITS for Jeremy Visser |
---|
2279 | zooko@zooko.com**20100524081829 |
---|
2280 | Ignore-this: d7c1465fd8d4e25b8d46d38a1793465b |
---|
2281 | ] |
---|
2282 | [test: show stdout and stderr in case of non-zero exit code from "tahoe" command |
---|
2283 | zooko@zooko.com**20100524073348 |
---|
2284 | Ignore-this: 695e81cd6683f4520229d108846cd551 |
---|
2285 | ] |
---|
2286 | [setup: upgrade bundled zetuptoolz to zetuptoolz-0.6c15dev and make it unpacked and directly loaded by setup.py |
---|
2287 | zooko@zooko.com**20100523205228 |
---|
2288 | Ignore-this: 24fb32aaee3904115a93d1762f132c7 |
---|
2289 | Also fix the relevant "make clean" target behavior. |
---|
2290 | ] |
---|
2291 | [setup: remove bundled zipfile egg of setuptools |
---|
2292 | zooko@zooko.com**20100523205120 |
---|
2293 | Ignore-this: c68b5f2635bb93d1c1fa7b613a026f9e |
---|
2294 | We're about to replace it with bundled unpacked source code of setuptools, which is much nicer for debugging and evolving under revision control. |
---|
2295 | ] |
---|
2296 | [setup: remove bundled copy of setuptools_trial-0.5.2.tar |
---|
2297 | zooko@zooko.com**20100522221539 |
---|
2298 | Ignore-this: 140f90eb8fb751a509029c4b24afe647 |
---|
2299 | Hopefully it will get installed automatically as needed and we won't bundle it anymore. |
---|
2300 | ] |
---|
2301 | [setup: remove bundled setuptools_darcs-1.2.8.tar |
---|
2302 | zooko@zooko.com**20100522015333 |
---|
2303 | Ignore-this: 378b1964b513ae7fe22bae2d3478285d |
---|
2304 | This version of setuptools_darcs had a bug when used on Windows which has been fixed in setuptools_darcs-1.2.9. Hopefully we will not need to bundle a copy of setuptools_darcs-1.2.9 in with Tahoe-LAFS and can instead rely on it to be downloaded from PyPI or bundled in the "tahoe deps" separate tarball. |
---|
2305 | ] |
---|
2306 | [tests: fix pyflakes warnings in bench_dirnode.py |
---|
2307 | zooko@zooko.com**20100521202511 |
---|
2308 | Ignore-this: f23d55b4ed05e52865032c65a15753c4 |
---|
2309 | ] |
---|
2310 | [setup: if the string '--reporter=bwverbose-coverage' appears on sys.argv then you need trialcoverage |
---|
2311 | zooko@zooko.com**20100521122226 |
---|
2312 | Ignore-this: e760c45dcfb5a43c1dc1e8a27346bdc2 |
---|
2313 | ] |
---|
2314 | [tests: don't let bench_dirnode.py do stuff and have side-effects at import time (unless __name__ == '__main__') |
---|
2315 | zooko@zooko.com**20100521122052 |
---|
2316 | Ignore-this: 96144a412250d9bbb5fccbf83b8753b8 |
---|
2317 | ] |
---|
2318 | [tests: increase timeout to give François's ARM buildslave a chance to complete the tests |
---|
2319 | zooko@zooko.com**20100520134526 |
---|
2320 | Ignore-this: 3dd399fdc8b91149c82b52f955b50833 |
---|
2321 | ] |
---|
2322 | [run_trial.darcspath |
---|
2323 | freestorm77@gmail.com**20100510232829 |
---|
2324 | Ignore-this: 5ebb4df74e9ea8a4bdb22b65373d1ff2 |
---|
2325 | ] |
---|
2326 | [docs: line-wrap README.txt |
---|
2327 | zooko@zooko.com**20100518174240 |
---|
2328 | Ignore-this: 670a02d360df7de51ebdcf4fae752577 |
---|
2329 | ] |
---|
2330 | [Hush pyflakes warnings |
---|
2331 | Kevan Carstensen <kevan@isnotajoke.com>**20100515184344 |
---|
2332 | Ignore-this: fd602c3bba115057770715c36a87b400 |
---|
2333 | ] |
---|
2334 | [setup: new improved misc/show-tool-versions.py |
---|
2335 | zooko@zooko.com**20100516050122 |
---|
2336 | Ignore-this: ce9b1de1b35b07d733e6cf823b66335a |
---|
2337 | ] |
---|
2338 | [Improve code coverage of the Tahoe2PeerSelector tests. |
---|
2339 | Kevan Carstensen <kevan@isnotajoke.com>**20100515032913 |
---|
2340 | Ignore-this: 793151b63ffa65fdae6915db22d9924a |
---|
2341 | ] |
---|
2342 | [Remove a comment that no longer makes sense. |
---|
2343 | Kevan Carstensen <kevan@isnotajoke.com>**20100514203516 |
---|
2344 | Ignore-this: 956983c7e7c7e4477215494dfce8f058 |
---|
2345 | ] |
---|
2346 | [docs: update docs/architecture.txt to more fully and correctly explain the upload procedure |
---|
2347 | zooko@zooko.com**20100514043458 |
---|
2348 | Ignore-this: 538b6ea256a49fed837500342092efa3 |
---|
2349 | ] |
---|
2350 | [Fix up the behavior of #778, per reviewers' comments |
---|
2351 | Kevan Carstensen <kevan@isnotajoke.com>**20100514004917 |
---|
2352 | Ignore-this: 9c20b60716125278b5456e8feb396bff |
---|
2353 | |
---|
2354 | - Make some important utility functions clearer and more thoroughly |
---|
2355 | documented. |
---|
2356 | - Assert in upload.servers_of_happiness that the buckets attributes |
---|
2357 | of PeerTrackers passed to it are mutually disjoint. |
---|
2358 | - Get rid of some silly non-Pythonisms that I didn't see when I first |
---|
2359 | wrote these patches. |
---|
2360 | - Make sure that should_add_server returns true when queried about a |
---|
2361 | shnum that it doesn't know about yet. |
---|
2362 | - Change Tahoe2PeerSelector.preexisting_shares to map a shareid to a set |
---|
2363 | of peerids, alter dependencies to deal with that. |
---|
2364 | - Remove upload.should_add_servers, because it is no longer necessary |
---|
2365 | - Move upload.shares_of_happiness and upload.shares_by_server to a utility |
---|
2366 | file. |
---|
2367 | - Change some points in Tahoe2PeerSelector. |
---|
2368 | - Compute servers_of_happiness using a bipartite matching algorithm that |
---|
2369 | we know is optimal instead of an ad-hoc greedy algorithm that isn't. |
---|
2370 | - Change servers_of_happiness to just take a sharemap as an argument, |
---|
2371 | change its callers to merge existing_shares and used_peers before |
---|
2372 | calling it. |
---|
2373 | - Change an error message in the encoder to be more appropriate for |
---|
2374 | servers of happiness. |
---|
2375 | - Clarify the wording of an error message in immutable/upload.py |
---|
2376 | - Refactor a happiness failure message to happinessutil.py, and make |
---|
2377 | immutable/upload.py and immutable/encode.py use it. |
---|
2378 | - Move the word "only" as far to the right as possible in failure |
---|
2379 | messages. |
---|
2380 | - Use a better definition of progress during peer selection. |
---|
2381 | - Do read-only peer share detection queries in parallel, not sequentially. |
---|
2382 | - Clean up logging semantics; print the query statistics whenever an |
---|
2383 | upload is unsuccessful, not just in one case. |
---|
2384 | |
---|
2385 | ] |
---|
2386 | [Alter the error message when an upload fails, per some comments in #778. |
---|
2387 | Kevan Carstensen <kevan@isnotajoke.com>**20091230210344 |
---|
2388 | Ignore-this: ba97422b2f9737c46abeb828727beb1 |
---|
2389 | |
---|
2390 | When I first implemented #778, I just altered the error messages to refer to |
---|
2391 | servers where they referred to shares. The resulting error messages weren't |
---|
2392 | very good. These are a bit better. |
---|
2393 | ] |
---|
2394 | [Change "UploadHappinessError" to "UploadUnhappinessError" |
---|
2395 | Kevan Carstensen <kevan@isnotajoke.com>**20091205043037 |
---|
2396 | Ignore-this: 236b64ab19836854af4993bb5c1b221a |
---|
2397 | ] |
---|
2398 | [Alter the error message returned when peer selection fails |
---|
2399 | Kevan Carstensen <kevan@isnotajoke.com>**20091123002405 |
---|
2400 | Ignore-this: b2a7dc163edcab8d9613bfd6907e5166 |
---|
2401 | |
---|
2402 | The Tahoe2PeerSelector returned either NoSharesError or NotEnoughSharesError |
---|
2403 | for a variety of error conditions that weren't informatively described by them. |
---|
2404 | This patch creates a new error, UploadHappinessError, replaces uses of |
---|
2405 | NoSharesError and NotEnoughSharesError with it, and alters the error message |
---|
2406 | raised with the errors to be more in line with the new servers_of_happiness |
---|
2407 | behavior. See ticket #834 for more information. |
---|
2408 | ] |
---|
2409 | [Eliminate overcounting iof servers_of_happiness in Tahoe2PeerSelector; also reorganize some things. |
---|
2410 | Kevan Carstensen <kevan@isnotajoke.com>**20091118014542 |
---|
2411 | Ignore-this: a6cb032cbff74f4f9d4238faebd99868 |
---|
2412 | ] |
---|
2413 | [Change stray "shares_of_happiness" to "servers_of_happiness" |
---|
2414 | Kevan Carstensen <kevan@isnotajoke.com>**20091116212459 |
---|
2415 | Ignore-this: 1c971ba8c3c4d2e7ba9f020577b28b73 |
---|
2416 | ] |
---|
2417 | [Alter Tahoe2PeerSelector to make sure that it recognizes existing shares on readonly servers, fixing an issue in #778 |
---|
2418 | Kevan Carstensen <kevan@isnotajoke.com>**20091116192805 |
---|
2419 | Ignore-this: 15289f4d709e03851ed0587b286fd955 |
---|
2420 | ] |
---|
2421 | [Alter 'immutable/encode.py' and 'immutable/upload.py' to use servers_of_happiness instead of shares_of_happiness. |
---|
2422 | Kevan Carstensen <kevan@isnotajoke.com>**20091104111222 |
---|
2423 | Ignore-this: abb3283314820a8bbf9b5d0cbfbb57c8 |
---|
2424 | ] |
---|
2425 | [Alter the signature of set_shareholders in IEncoder to add a 'servermap' parameter, which gives IEncoders enough information to perform a sane check for servers_of_happiness. |
---|
2426 | Kevan Carstensen <kevan@isnotajoke.com>**20091104033241 |
---|
2427 | Ignore-this: b3a6649a8ac66431beca1026a31fed94 |
---|
2428 | ] |
---|
2429 | [Alter CiphertextDownloader to work with servers_of_happiness |
---|
2430 | Kevan Carstensen <kevan@isnotajoke.com>**20090924041932 |
---|
2431 | Ignore-this: e81edccf0308c2d3bedbc4cf217da197 |
---|
2432 | ] |
---|
2433 | [Revisions of the #778 tests, per reviewers' comments |
---|
2434 | Kevan Carstensen <kevan@isnotajoke.com>**20100514012542 |
---|
2435 | Ignore-this: 735bbc7f663dce633caeb3b66a53cf6e |
---|
2436 | |
---|
2437 | - Fix comments and confusing naming. |
---|
2438 | - Add tests for the new error messages suggested by David-Sarah |
---|
2439 | and Zooko. |
---|
2440 | - Alter existing tests for new error messages. |
---|
2441 | - Make sure that the tests continue to work with the trunk. |
---|
2442 | - Add a test for a mutual disjointedness assertion that I added to |
---|
2443 | upload.servers_of_happiness. |
---|
2444 | - Fix the comments to correctly reflect read-onlyness |
---|
2445 | - Add a test for an edge case in should_add_server |
---|
2446 | - Add an assertion to make sure that share redistribution works as it |
---|
2447 | should |
---|
2448 | - Alter tests to work with revised servers_of_happiness semantics |
---|
2449 | - Remove tests for should_add_server, since that function no longer exists. |
---|
2450 | - Alter tests to know about merge_peers, and to use it before calling |
---|
2451 | servers_of_happiness. |
---|
2452 | - Add tests for merge_peers. |
---|
2453 | - Add Zooko's puzzles to the tests. |
---|
2454 | - Edit encoding tests to expect the new kind of failure message. |
---|
2455 | - Edit tests to expect error messages with the word "only" moved as far |
---|
2456 | to the right as possible. |
---|
2457 | - Extended and cleaned up some helper functions. |
---|
2458 | - Changed some tests to call more appropriate helper functions. |
---|
2459 | - Added a test for the failing redistribution algorithm |
---|
2460 | - Added a test for the progress message |
---|
2461 | - Added a test for the upper bound on readonly peer share discovery. |
---|
2462 | |
---|
2463 | ] |
---|
2464 | [Alter various unit tests to work with the new happy behavior |
---|
2465 | Kevan Carstensen <kevan@isnotajoke.com>**20100107181325 |
---|
2466 | Ignore-this: 132032bbf865e63a079f869b663be34a |
---|
2467 | ] |
---|
2468 | [Replace "UploadHappinessError" with "UploadUnhappinessError" in tests. |
---|
2469 | Kevan Carstensen <kevan@isnotajoke.com>**20091205043453 |
---|
2470 | Ignore-this: 83f4bc50c697d21b5f4e2a4cd91862ca |
---|
2471 | ] |
---|
2472 | [Add tests for the behavior described in #834. |
---|
2473 | Kevan Carstensen <kevan@isnotajoke.com>**20091123012008 |
---|
2474 | Ignore-this: d8e0aa0f3f7965ce9b5cea843c6d6f9f |
---|
2475 | ] |
---|
2476 | [Re-work 'test_upload.py' to be more readable; add more tests for #778 |
---|
2477 | Kevan Carstensen <kevan@isnotajoke.com>**20091116192334 |
---|
2478 | Ignore-this: 7e8565f92fe51dece5ae28daf442d659 |
---|
2479 | ] |
---|
2480 | [Test Tahoe2PeerSelector to make sure that it recognizeses existing shares on readonly servers |
---|
2481 | Kevan Carstensen <kevan@isnotajoke.com>**20091109003735 |
---|
2482 | Ignore-this: 12f9b4cff5752fca7ed32a6ebcff6446 |
---|
2483 | ] |
---|
2484 | [Add more tests for comment:53 in ticket #778 |
---|
2485 | Kevan Carstensen <kevan@isnotajoke.com>**20091104112849 |
---|
2486 | Ignore-this: 3bb2edd299a944cc9586e14d5d83ec8c |
---|
2487 | ] |
---|
2488 | [Add a test for upload.shares_by_server |
---|
2489 | Kevan Carstensen <kevan@isnotajoke.com>**20091104111324 |
---|
2490 | Ignore-this: f9802e82d6982a93e00f92e0b276f018 |
---|
2491 | ] |
---|
2492 | [Minor tweak to an existing test -- make the first server read-write, instead of read-only |
---|
2493 | Kevan Carstensen <kevan@isnotajoke.com>**20091104034232 |
---|
2494 | Ignore-this: a951a46c93f7f58dd44d93d8623b2aee |
---|
2495 | ] |
---|
2496 | [Alter tests to use the new form of set_shareholders |
---|
2497 | Kevan Carstensen <kevan@isnotajoke.com>**20091104033602 |
---|
2498 | Ignore-this: 3deac11fc831618d11441317463ef830 |
---|
2499 | ] |
---|
2500 | [Refactor some behavior into a mixin, and add tests for the behavior described in #778 |
---|
2501 | "Kevan Carstensen" <kevan@isnotajoke.com>**20091030091908 |
---|
2502 | Ignore-this: a6f9797057ca135579b249af3b2b66ac |
---|
2503 | ] |
---|
2504 | [Alter NoNetworkGrid to allow the creation of readonly servers for testing purposes. |
---|
2505 | Kevan Carstensen <kevan@isnotajoke.com>**20091018013013 |
---|
2506 | Ignore-this: e12cd7c4ddeb65305c5a7e08df57c754 |
---|
2507 | ] |
---|
2508 | [Update 'docs/architecture.txt' to reflect readonly share discovery |
---|
2509 | kevan@isnotajoke.com**20100514003852 |
---|
2510 | Ignore-this: 7ead71b34df3b1ecfdcfd3cb2882e4f9 |
---|
2511 | ] |
---|
2512 | [Alter the wording in docs/architecture.txt to more accurately describe the servers_of_happiness behavior. |
---|
2513 | Kevan Carstensen <kevan@isnotajoke.com>**20100428002455 |
---|
2514 | Ignore-this: 6eff7fa756858a1c6f73728d989544cc |
---|
2515 | ] |
---|
2516 | [Alter wording in 'interfaces.py' to be correct wrt #778 |
---|
2517 | "Kevan Carstensen" <kevan@isnotajoke.com>**20091205034005 |
---|
2518 | Ignore-this: c9913c700ac14e7a63569458b06980e0 |
---|
2519 | ] |
---|
2520 | [Update 'docs/configuration.txt' to reflect the servers_of_happiness behavior. |
---|
2521 | Kevan Carstensen <kevan@isnotajoke.com>**20091205033813 |
---|
2522 | Ignore-this: 5e1cb171f8239bfb5b565d73c75ac2b8 |
---|
2523 | ] |
---|
2524 | [Clarify quickstart instructions for installing pywin32 |
---|
2525 | david-sarah@jacaranda.org**20100511180300 |
---|
2526 | Ignore-this: d4668359673600d2acbc7cd8dd44b93c |
---|
2527 | ] |
---|
2528 | [web: add a simple test that you can load directory.xhtml |
---|
2529 | zooko@zooko.com**20100510063729 |
---|
2530 | Ignore-this: e49b25fa3c67b3c7a56c8b1ae01bb463 |
---|
2531 | ] |
---|
2532 | [setup: fix typos in misc/show-tool-versions.py |
---|
2533 | zooko@zooko.com**20100510063615 |
---|
2534 | Ignore-this: 2181b1303a0e288e7a9ebd4c4855628 |
---|
2535 | ] |
---|
2536 | [setup: show code-coverage tool versions in show-tools-versions.py |
---|
2537 | zooko@zooko.com**20100510062955 |
---|
2538 | Ignore-this: 4b4c68eb3780b762c8dbbd22b39df7cf |
---|
2539 | ] |
---|
2540 | [docs: update README, mv it to README.txt, update setup.py |
---|
2541 | zooko@zooko.com**20100504094340 |
---|
2542 | Ignore-this: 40e28ca36c299ea1fd12d3b91e5b421c |
---|
2543 | ] |
---|
2544 | [Dependency on Windmill test framework is not needed yet. |
---|
2545 | david-sarah@jacaranda.org**20100504161043 |
---|
2546 | Ignore-this: be088712bec650d4ef24766c0026ebc8 |
---|
2547 | ] |
---|
2548 | [tests: pass z to tar so that BSD tar will know to ungzip |
---|
2549 | zooko@zooko.com**20100504090628 |
---|
2550 | Ignore-this: 1339e493f255e8fc0b01b70478f23a09 |
---|
2551 | ] |
---|
2552 | [setup: update comments and URLs in setup.cfg |
---|
2553 | zooko@zooko.com**20100504061653 |
---|
2554 | Ignore-this: f97692807c74bcab56d33100c899f829 |
---|
2555 | ] |
---|
2556 | [setup: reorder and extend the show-tool-versions script, the better to glean information about our new buildslaves |
---|
2557 | zooko@zooko.com**20100504045643 |
---|
2558 | Ignore-this: 836084b56b8d4ee8f1de1f4efb706d36 |
---|
2559 | ] |
---|
2560 | [CLI: Support for https url in option --node-url |
---|
2561 | Francois Deppierraz <francois@ctrlaltdel.ch>**20100430185609 |
---|
2562 | Ignore-this: 1717176b4d27c877e6bc67a944d9bf34 |
---|
2563 | |
---|
2564 | This patch modifies the regular expression used for verifying of '--node-url' |
---|
2565 | parameter. Support for accessing a Tahoe gateway over HTTPS was already |
---|
2566 | present, thanks to Python's urllib. |
---|
2567 | |
---|
2568 | ] |
---|
2569 | [backupdb.did_create_directory: use REPLACE INTO, not INSERT INTO + ignore error |
---|
2570 | Brian Warner <warner@lothar.com>**20100428050803 |
---|
2571 | Ignore-this: 1fca7b8f364a21ae413be8767161e32f |
---|
2572 | |
---|
2573 | This handles the case where we upload a new tahoe directory for a |
---|
2574 | previously-processed local directory, possibly creating a new dircap (if the |
---|
2575 | metadata had changed). Now we replace the old dirhash->dircap record. The |
---|
2576 | previous behavior left the old record in place (with the old dircap and |
---|
2577 | timestamps), so we'd never stop creating new directories and never converge |
---|
2578 | on a null backup. |
---|
2579 | ] |
---|
2580 | ["tahoe webopen": add --info flag, to get ?t=info |
---|
2581 | Brian Warner <warner@lothar.com>**20100424233003 |
---|
2582 | Ignore-this: 126b0bb6db340fabacb623d295eb45fa |
---|
2583 | |
---|
2584 | Also fix some trailing whitespace. |
---|
2585 | ] |
---|
2586 | [docs: install.html http-equiv refresh to quickstart.html |
---|
2587 | zooko@zooko.com**20100421165708 |
---|
2588 | Ignore-this: 52b4b619f9dde5886ae2cd7f1f3b734b |
---|
2589 | ] |
---|
2590 | [docs: install.html -> quickstart.html |
---|
2591 | zooko@zooko.com**20100421155757 |
---|
2592 | Ignore-this: 6084e203909306bed93efb09d0e6181d |
---|
2593 | It is not called "installing" because that implies that it is going to change the configuration of your operating system. It is not called "building" because that implies that you need developer tools like a compiler. Also I added a stern warning against looking at the "InstallDetails" wiki page, which I have renamed to "AdvancedInstall". |
---|
2594 | ] |
---|
2595 | [Fix another typo in tahoe_storagespace munin plugin |
---|
2596 | david-sarah@jacaranda.org**20100416220935 |
---|
2597 | Ignore-this: ad1f7aa66b554174f91dfb2b7a3ea5f3 |
---|
2598 | ] |
---|
2599 | [Add dependency on windmill >= 1.3 |
---|
2600 | david-sarah@jacaranda.org**20100416190404 |
---|
2601 | Ignore-this: 4437a7a464e92d6c9012926b18676211 |
---|
2602 | ] |
---|
2603 | [licensing: phrase the OpenSSL-exemption in the vocabulary of copyright instead of computer technology, and replicate the exemption from the GPL to the TGPPL |
---|
2604 | zooko@zooko.com**20100414232521 |
---|
2605 | Ignore-this: a5494b2f582a295544c6cad3f245e91 |
---|
2606 | ] |
---|
2607 | [munin-tahoe_storagespace |
---|
2608 | freestorm77@gmail.com**20100221203626 |
---|
2609 | Ignore-this: 14d6d6a587afe1f8883152bf2e46b4aa |
---|
2610 | |
---|
2611 | Plugin configuration rename |
---|
2612 | |
---|
2613 | ] |
---|
2614 | [setup: add licensing declaration for setuptools (noticed by the FSF compliance folks) |
---|
2615 | zooko@zooko.com**20100309184415 |
---|
2616 | Ignore-this: 2dfa7d812d65fec7c72ddbf0de609ccb |
---|
2617 | ] |
---|
2618 | [setup: fix error in licensing declaration from Shawn Willden, as noted by the FSF compliance division |
---|
2619 | zooko@zooko.com**20100309163736 |
---|
2620 | Ignore-this: c0623d27e469799d86cabf67921a13f8 |
---|
2621 | ] |
---|
2622 | [CREDITS to Jacob Appelbaum |
---|
2623 | zooko@zooko.com**20100304015616 |
---|
2624 | Ignore-this: 70db493abbc23968fcc8db93f386ea54 |
---|
2625 | ] |
---|
2626 | [desert-island-build-with-proper-versions |
---|
2627 | jacob@appelbaum.net**20100304013858] |
---|
2628 | [docs: a few small edits to try to guide newcomers through the docs |
---|
2629 | zooko@zooko.com**20100303231902 |
---|
2630 | Ignore-this: a6aab44f5bf5ad97ea73e6976bc4042d |
---|
2631 | These edits were suggested by my watching over Jake Appelbaum's shoulder as he completely ignored/skipped/missed install.html and also as he decided that debian.txt wouldn't help him with basic installation. Then I threw in a few docs edits that have been sitting around in my sandbox asking to be committed for months. |
---|
2632 | ] |
---|
2633 | [TAG allmydata-tahoe-1.6.1 |
---|
2634 | david-sarah@jacaranda.org**20100228062314 |
---|
2635 | Ignore-this: eb5f03ada8ea953ee7780e7fe068539 |
---|
2636 | ] |
---|
2637 | Patch bundle hash: |
---|
2638 | ab650c46d18c7e17e294bf10dd0a86677010cd12 |
---|