1 | 2 patches for repository zooko@tahoe-lafs.org:/home/source/darcs/tahoe-lafs/ticket999-S3-backend: |
---|
2 | |
---|
3 | Thu Sep 29 23:46:28 MDT 2011 zooko@zooko.com |
---|
4 | * debugprint the values of blocks and hashes thereof; make the test data and the seg size small in order to make the debugprints easy to look at |
---|
5 | |
---|
6 | Thu Sep 29 23:59:43 MDT 2011 zooko@zooko.com |
---|
7 | * make randomness of salts explicit in method arguments |
---|
8 | This is an experiment, and so far it is not going well. The idea is: don't let code call os.urandom() to get new random strings, but instead let the code receive a random seed as one of its arguments. The main reason to do this is to increase testability by making things repeatable. There may also be other benefits. However, the drawback is that you have to pass this "randseed" argument through many different levels of the call stack, and at each level a mistake which causes a randseed to be re-used could lead to a failure of confidentiality. It hardly seems worth it. |
---|
9 | However, since I'm currently trying to understand a failure of a complex test in test_mutable, I'm continuing to use this patch for now in the attempt to reduce non-repeatability between different test runs or different variants of the code. |
---|
10 | |
---|
11 | New patches: |
---|
12 | |
---|
13 | [debugprint the values of blocks and hashes thereof; make the test data and the seg size small in order to make the debugprints easy to look at |
---|
14 | zooko@zooko.com**20110930054628 |
---|
15 | Ignore-this: bcfedc06aeedb090dfb02440f6e6c3bc |
---|
16 | ] { |
---|
17 | hunk ./src/allmydata/mutable/publish.py 28 |
---|
18 | SDMFSlotWriteProxy |
---|
19 | |
---|
20 | KiB = 1024 |
---|
21 | -DEFAULT_MAX_SEGMENT_SIZE = 128 * KiB |
---|
22 | +DEFAULT_MAX_SEGMENT_SIZE = 64 |
---|
23 | PUSHING_BLOCKS_STATE = 0 |
---|
24 | PUSHING_EVERYTHING_ELSE_STATE = 1 |
---|
25 | DONE_STATE = 2 |
---|
26 | hunk ./src/allmydata/mutable/publish.py 766 |
---|
27 | hashed = sharedata |
---|
28 | block_hash = hashutil.block_hash(hashed) |
---|
29 | self.blockhashes[shareid][segnum] = block_hash |
---|
30 | + log.msg("yyy 0 shareid: %s, segnum: %s, blockhash: %s, sharedata: %s, salt: %s" % (shareid, segnum, base32.b2a(block_hash), base32.b2a(sharedata), base32.b2a(salt),)) |
---|
31 | # find the writer for this share |
---|
32 | writer = self.writers[shareid] |
---|
33 | writer.put_block(sharedata, segnum, salt) |
---|
34 | hunk ./src/allmydata/mutable/retrieve.py 771 |
---|
35 | sharehashes[1].keys()) |
---|
36 | bht = self._block_hash_trees[reader.shnum] |
---|
37 | |
---|
38 | + for bhk, bhv in blockhashes.iteritems(): |
---|
39 | + log.msg("xxx 0 blockhash: %s %s" % (bhk, base32.b2a(bhv),)) |
---|
40 | + |
---|
41 | if bht.needed_hashes(segnum, include_leaf=True): |
---|
42 | try: |
---|
43 | bht.set_hashes(blockhashes) |
---|
44 | hunk ./src/allmydata/test/test_mutable.py 2944 |
---|
45 | self.set_up_grid() |
---|
46 | self.c = self.g.clients[0] |
---|
47 | self.nm = self.c.nodemaker |
---|
48 | - self.data = "test data" * 100000 # about 900 KiB; MDMF |
---|
49 | + self.data = "test data" * 32 # about 900 KiB; MDMF |
---|
50 | self.small_data = "test data" * 10 # about 90 B; SDMF |
---|
51 | |
---|
52 | |
---|
53 | hunk ./src/allmydata/test/test_mutable.py 3374 |
---|
54 | self.set_up_grid() |
---|
55 | self.c = self.g.clients[0] |
---|
56 | self.nm = self.c.nodemaker |
---|
57 | - self.data = "testdata " * 100000 # about 900 KiB; MDMF |
---|
58 | + self.data = "testdata " * 30 # about 900 KiB; MDMF |
---|
59 | self.small_data = "test data" * 10 # about 90 B; SDMF |
---|
60 | |
---|
61 | |
---|
62 | } |
---|
63 | [make randomness of salts explicit in method arguments |
---|
64 | zooko@zooko.com**20110930055943 |
---|
65 | Ignore-this: ad9634d250a2fe72abbaa5f96d0a5c9 |
---|
66 | This is an experiment, and so far it is not going well. The idea is: don't let code call os.urandom() to get new random strings, but instead let the code receive a random seed as one of its arguments. The main reason to do this is to increase testability by making things repeatable. There may also be other benefits. However, the drawback is that you have to pass this "randseed" argument through many different levels of the call stack, and at each level a mistake which causes a randseed to be re-used could lead to a failure of confidentiality. It hardly seems worth it. |
---|
67 | However, since I'm currently trying to understand a failure of a complex test in test_mutable, I'm continuing to use this patch for now in the attempt to reduce non-repeatability between different test runs or different variants of the code. |
---|
68 | ] { |
---|
69 | hunk ./src/allmydata/mutable/filenode.py 134 |
---|
70 | |
---|
71 | return self |
---|
72 | |
---|
73 | - def create_with_keys(self, (pubkey, privkey), contents, |
---|
74 | + def create_with_keys(self, (pubkey, privkey), contents, randseed, |
---|
75 | version=SDMF_VERSION): |
---|
76 | """Call this to create a brand-new mutable file. It will create the |
---|
77 | shares, find homes for them, and upload the initial contents (created |
---|
78 | hunk ./src/allmydata/mutable/filenode.py 141 |
---|
79 | with the same rules as IClient.create_mutable_file() ). Returns a |
---|
80 | Deferred that fires (with the MutableFileNode instance you should |
---|
81 | use) when it completes. |
---|
82 | + |
---|
83 | + @param randseed is required to be a unique value every time you |
---|
84 | + invoke this method. Using a repeated value could lead to a |
---|
85 | + failure of confidentiality. |
---|
86 | """ |
---|
87 | hunk ./src/allmydata/mutable/filenode.py 146 |
---|
88 | + precondition(isinstance(randseed, str), randseed) |
---|
89 | + precondition(len(randseed) == 32, randseed) |
---|
90 | self._pubkey, self._privkey = pubkey, privkey |
---|
91 | pubkey_s = self._pubkey.serialize() |
---|
92 | privkey_s = self._privkey.serialize() |
---|
93 | hunk ./src/allmydata/mutable/filenode.py 163 |
---|
94 | self._readkey = self._uri.readkey |
---|
95 | self._storage_index = self._uri.storage_index |
---|
96 | initial_contents = self._get_initial_contents(contents) |
---|
97 | - return self._upload(initial_contents, None) |
---|
98 | + return self._upload(initial_contents, None, randseed) |
---|
99 | |
---|
100 | def _get_initial_contents(self, contents): |
---|
101 | if contents is None: |
---|
102 | hunk ./src/allmydata/mutable/filenode.py 688 |
---|
103 | return d |
---|
104 | |
---|
105 | |
---|
106 | - def _upload(self, new_contents, servermap): |
---|
107 | + def _upload(self, new_contents, servermap, randseed): |
---|
108 | """ |
---|
109 | A MutableFileNode still has to have some way of getting |
---|
110 | published initially, which is what I am here for. After that, |
---|
111 | hunk ./src/allmydata/mutable/filenode.py 694 |
---|
112 | all publishing, updating, modifying and so on happens through |
---|
113 | MutableFileVersions. |
---|
114 | + |
---|
115 | + @param randseed is required to be a unique value every time you |
---|
116 | + invoke this method. Using a repeated value could lead to a |
---|
117 | + failure of confidentiality. |
---|
118 | """ |
---|
119 | assert self._pubkey, "update_servermap must be called before publish" |
---|
120 | |
---|
121 | hunk ./src/allmydata/mutable/filenode.py 703 |
---|
122 | # Define IPublishInvoker with a set_downloader_hints method? |
---|
123 | # Then have the publisher call that method when it's done publishing? |
---|
124 | - p = Publish(self, self._storage_broker, servermap) |
---|
125 | + p = Publish(self, self._storage_broker, servermap, randseed) |
---|
126 | if self._history: |
---|
127 | self._history.notify_publish(p.get_status(), |
---|
128 | new_contents.get_size()) |
---|
129 | hunk ./src/allmydata/mutable/filenode.py 1023 |
---|
130 | self._most_recent_size = size |
---|
131 | return res |
---|
132 | |
---|
133 | - def update(self, data, offset): |
---|
134 | + def update(self, data, offset, randseed): |
---|
135 | """ |
---|
136 | Do an update of this mutable file version by inserting data at |
---|
137 | offset within the file. If offset is the EOF, this is an append |
---|
138 | hunk ./src/allmydata/mutable/filenode.py 1036 |
---|
139 | O(data.get_size()) memory/bandwidth/CPU to perform the update. |
---|
140 | Otherwise, it must download, re-encode, and upload the entire |
---|
141 | file again, which will use O(filesize) resources. |
---|
142 | + |
---|
143 | + @param randseed is required to be a unique value every time you call |
---|
144 | + this method. Using a repeated value could lead to a critical |
---|
145 | + failure of confidentiality. |
---|
146 | """ |
---|
147 | hunk ./src/allmydata/mutable/filenode.py 1041 |
---|
148 | - return self._do_serialized(self._update, data, offset) |
---|
149 | + precondition(isinstance(randseed, str), randseed) |
---|
150 | + precondition(len(randseed) == 32, randseed) |
---|
151 | + return self._do_serialized(self._update, data, offset, randseed) |
---|
152 | |
---|
153 | hunk ./src/allmydata/mutable/filenode.py 1045 |
---|
154 | - def _update(self, data, offset): |
---|
155 | + def _update(self, data, offset, randseed): |
---|
156 | """ |
---|
157 | I update the mutable file version represented by this particular |
---|
158 | IMutableVersion by inserting the data in data at the offset |
---|
159 | hunk ./src/allmydata/mutable/filenode.py 1051 |
---|
160 | offset. I return a Deferred that fires when this has been |
---|
161 | completed. |
---|
162 | + |
---|
163 | + @param randseed is required to be a unique value every time you call |
---|
164 | + this method. Using a repeated value could lead to a critical |
---|
165 | + failure of confidentiality. |
---|
166 | """ |
---|
167 | hunk ./src/allmydata/mutable/filenode.py 1056 |
---|
168 | + precondition(isinstance(randseed, str), randseed) |
---|
169 | + precondition(len(randseed) == 32, randseed) |
---|
170 | new_size = data.get_size() + offset |
---|
171 | old_size = self.get_size() |
---|
172 | segment_size = self._version[3] |
---|
173 | hunk ./src/allmydata/mutable/filenode.py 1077 |
---|
174 | log.msg("updating in place") |
---|
175 | d = self._do_update_update(data, offset) |
---|
176 | d.addCallback(self._decode_and_decrypt_segments, data, offset) |
---|
177 | - d.addCallback(self._build_uploadable_and_finish, data, offset) |
---|
178 | + d.addCallback(self._build_uploadable_and_finish, data, offset, randseed) |
---|
179 | return d |
---|
180 | |
---|
181 | def _do_modify_update(self, data, offset): |
---|
182 | hunk ./src/allmydata/mutable/filenode.py 1170 |
---|
183 | d3 = defer.succeed(blockhashes) |
---|
184 | return deferredutil.gatherResults([d1, d2, d3]) |
---|
185 | |
---|
186 | - def _build_uploadable_and_finish(self, segments_and_bht, data, offset): |
---|
187 | + def _build_uploadable_and_finish(self, segments_and_bht, data, offset, randseed): |
---|
188 | """ |
---|
189 | After the process has the plaintext segments, I build the |
---|
190 | TransformingUploadable that the publisher will eventually |
---|
191 | hunk ./src/allmydata/mutable/filenode.py 1177 |
---|
192 | re-upload to the grid. I then invoke the publisher with that |
---|
193 | uploadable, and return a Deferred when the publish operation has |
---|
194 | completed without issue. |
---|
195 | + |
---|
196 | + @param randseed is required to be a unique value every time you |
---|
197 | + invoke this method. Using a repeated value could lead to a |
---|
198 | + failure of confidentiality. |
---|
199 | """ |
---|
200 | hunk ./src/allmydata/mutable/filenode.py 1182 |
---|
201 | + precondition(isinstance(randseed, str), randseed) |
---|
202 | + precondition(len(randseed) == 32, randseed) |
---|
203 | u = TransformingUploadable(data, offset, |
---|
204 | self._version[3], |
---|
205 | segments_and_bht[0], |
---|
206 | hunk ./src/allmydata/mutable/filenode.py 1188 |
---|
207 | segments_and_bht[1]) |
---|
208 | - p = Publish(self._node, self._storage_broker, self._servermap) |
---|
209 | + p = Publish(self._node, self._storage_broker, self._servermap, randseed) |
---|
210 | return p.update(u, offset, segments_and_bht[2], self._version) |
---|
211 | |
---|
212 | def _update_servermap(self, mode=MODE_WRITE, update_range=None): |
---|
213 | hunk ./src/allmydata/mutable/publish.py 11 |
---|
214 | from twisted.python import failure |
---|
215 | from allmydata.interfaces import IPublishStatus, SDMF_VERSION, MDMF_VERSION, \ |
---|
216 | IMutableUploadable |
---|
217 | -from allmydata.util import base32, hashutil, mathutil, idlib, log |
---|
218 | +from allmydata.util import base32, hashutil, mathutil, idlib, log, randutil |
---|
219 | from allmydata.util.dictutil import DictOfSets |
---|
220 | hunk ./src/allmydata/mutable/publish.py 13 |
---|
221 | +from allmydata.util.assertutil import precondition |
---|
222 | from allmydata import hashtree, codec |
---|
223 | from allmydata.storage.server import si_b2a |
---|
224 | from pycryptopp.cipher.aes import AES |
---|
225 | hunk ./src/allmydata/mutable/publish.py 110 |
---|
226 | the current state of the world. |
---|
227 | |
---|
228 | To make the initial publish, set servermap to None. |
---|
229 | + |
---|
230 | + @param randseed is required to be a unique value every time you construct |
---|
231 | + a Publish instance; using a repeated value could lead to a critical |
---|
232 | + failure of confidentiality |
---|
233 | """ |
---|
234 | |
---|
235 | hunk ./src/allmydata/mutable/publish.py 116 |
---|
236 | - def __init__(self, filenode, storage_broker, servermap): |
---|
237 | + def __init__(self, filenode, storage_broker, servermap, randseed): |
---|
238 | + precondition(isinstance(randseed, str), randseed) |
---|
239 | + precondition(len(randseed) == 32, randseed) |
---|
240 | self._node = filenode |
---|
241 | self._storage_broker = storage_broker |
---|
242 | self._servermap = servermap |
---|
243 | hunk ./src/allmydata/mutable/publish.py 122 |
---|
244 | + self._rando = randutil.RandomObj(randseed) |
---|
245 | self._storage_index = self._node.get_storage_index() |
---|
246 | self._log_prefix = prefix = si_b2a(self._storage_index)[:5] |
---|
247 | num = self.log("Publish(%s): starting" % prefix, parent=None) |
---|
248 | hunk ./src/allmydata/mutable/publish.py 651 |
---|
249 | # return a deferred so that we don't block execution when this |
---|
250 | # is first called in the upload method. |
---|
251 | if self._state == PUSHING_BLOCKS_STATE: |
---|
252 | - return self.push_segment(self._current_segment) |
---|
253 | + return self.push_segment() |
---|
254 | |
---|
255 | elif self._state == PUSHING_EVERYTHING_ELSE_STATE: |
---|
256 | return self.push_everything_else() |
---|
257 | hunk ./src/allmydata/mutable/publish.py 661 |
---|
258 | return self._done() |
---|
259 | |
---|
260 | |
---|
261 | - def push_segment(self, segnum): |
---|
262 | + def push_segment(self): |
---|
263 | if self.num_segments == 0 and self._version == SDMF_VERSION: |
---|
264 | self._add_dummy_salts() |
---|
265 | |
---|
266 | hunk ./src/allmydata/mutable/publish.py 665 |
---|
267 | - if segnum > self.end_segment: |
---|
268 | + if self._current_segment > self.end_segment: |
---|
269 | # We don't have any more segments to push. |
---|
270 | self._state = PUSHING_EVERYTHING_ELSE_STATE |
---|
271 | return self._push() |
---|
272 | hunk ./src/allmydata/mutable/publish.py 670 |
---|
273 | |
---|
274 | - d = self._encode_segment(segnum) |
---|
275 | - d.addCallback(self._push_segment, segnum) |
---|
276 | + salt = self._rando.randstr(hashutil.IVLEN) |
---|
277 | + |
---|
278 | + d = self._encode_segment(self._current_segment, salt) |
---|
279 | + d.addCallback(self._push_segment, self._current_segment) |
---|
280 | def _increment_segnum(ign): |
---|
281 | self._current_segment += 1 |
---|
282 | hunk ./src/allmydata/mutable/publish.py 676 |
---|
283 | + |
---|
284 | # XXX: I don't think we need to do addBoth here -- any errBacks |
---|
285 | # should be handled within push_segment. |
---|
286 | d.addCallback(_increment_segnum) |
---|
287 | hunk ./src/allmydata/mutable/publish.py 699 |
---|
288 | won't make sense. This method adds a dummy salt to each of our |
---|
289 | SDMF writers so that they can write the signature later. |
---|
290 | """ |
---|
291 | - salt = os.urandom(16) |
---|
292 | assert self._version == SDMF_VERSION |
---|
293 | |
---|
294 | for writer in self.writers.itervalues(): |
---|
295 | hunk ./src/allmydata/mutable/publish.py 702 |
---|
296 | - writer.put_salt(salt) |
---|
297 | + writer.put_salt('\x00'*hashutil.IVLEN) |
---|
298 | |
---|
299 | |
---|
300 | hunk ./src/allmydata/mutable/publish.py 705 |
---|
301 | - def _encode_segment(self, segnum): |
---|
302 | + def _encode_segment(self, segnum, salt): |
---|
303 | """ |
---|
304 | I encrypt and encode the segment segnum. |
---|
305 | """ |
---|
306 | hunk ./src/allmydata/mutable/publish.py 724 |
---|
307 | |
---|
308 | assert len(data) == segsize, len(data) |
---|
309 | |
---|
310 | - salt = os.urandom(16) |
---|
311 | - |
---|
312 | key = hashutil.ssk_readkey_data_hash(salt, self.readkey) |
---|
313 | self._status.set_status("Encrypting") |
---|
314 | enc = AES(key) |
---|
315 | replace ./src/allmydata/mutable/publish.py [A-Za-z_0-9] IVLEN SALTLEN |
---|
316 | hunk ./src/allmydata/mutable/retrieve.py 11 |
---|
317 | from foolscap.api import eventually, fireEventually |
---|
318 | from allmydata.interfaces import IRetrieveStatus, NotEnoughSharesError, \ |
---|
319 | DownloadStopped, MDMF_VERSION, SDMF_VERSION |
---|
320 | -from allmydata.util import hashutil, log, mathutil |
---|
321 | +from allmydata.util import base32, hashutil, log, mathutil |
---|
322 | from allmydata.util.dictutil import DictOfSets |
---|
323 | from allmydata import hashtree, codec |
---|
324 | from allmydata.storage.server import si_b2a |
---|
325 | hunk ./src/allmydata/nodemaker.py 112 |
---|
326 | return self._create_dirnode(filenode) |
---|
327 | return None |
---|
328 | |
---|
329 | - def create_mutable_file(self, contents=None, keysize=None, |
---|
330 | + def create_mutable_file(self, randseed, contents=None, keysize=None, |
---|
331 | version=SDMF_VERSION): |
---|
332 | hunk ./src/allmydata/nodemaker.py 114 |
---|
333 | + """ |
---|
334 | + @param randseed is required to be a unique value every time you call |
---|
335 | + this method. Using a repeated value could lead to a critical |
---|
336 | + failure of confidentiality. |
---|
337 | + """ |
---|
338 | + precondition(isinstance(randseed, str), randseed) |
---|
339 | + precondition(len(randseed) == 32, randseed) |
---|
340 | n = MutableFileNode(self.storage_broker, self.secret_holder, |
---|
341 | self.default_encoding_parameters, self.history) |
---|
342 | d = self.key_generator.generate(keysize) |
---|
343 | hunk ./src/allmydata/nodemaker.py 124 |
---|
344 | - d.addCallback(n.create_with_keys, contents, version=version) |
---|
345 | + d.addCallback(n.create_with_keys, contents, randseed=randseed, version=version) |
---|
346 | d.addCallback(lambda res: n) |
---|
347 | return d |
---|
348 | |
---|
349 | hunk ./src/allmydata/test/test_mutable.py 8 |
---|
350 | from twisted.internet import defer, reactor |
---|
351 | from allmydata import uri, client |
---|
352 | from allmydata.nodemaker import NodeMaker |
---|
353 | -from allmydata.util import base32, consumer, fileutil, mathutil |
---|
354 | +from allmydata.util import base32, consumer, fileutil, mathutil, randutil |
---|
355 | from allmydata.util.hashutil import tagged_hash, ssk_writekey_hash, \ |
---|
356 | ssk_pubkey_fingerprint_hash |
---|
357 | from allmydata.util.consumer import MemoryConsumer |
---|
358 | hunk ./src/allmydata/test/test_mutable.py 3394 |
---|
359 | d.addCallback(_then2) |
---|
360 | return d |
---|
361 | |
---|
362 | - def do_upload_mdmf(self): |
---|
363 | - d = self.nm.create_mutable_file(MutableData(self.data), |
---|
364 | + def do_upload_mdmf(self, randseed): |
---|
365 | + rando = randutil.RandomObj(randseed) |
---|
366 | + d = self.nm.create_mutable_file(rando.randstr(32), MutableData(self.data), |
---|
367 | version=MDMF_VERSION) |
---|
368 | def _then(n): |
---|
369 | assert isinstance(n, MutableFileNode) |
---|
370 | hunk ./src/allmydata/test/test_mutable.py 3404 |
---|
371 | # Make MDMF node that has 255 shares. |
---|
372 | self.nm.default_encoding_parameters['n'] = 255 |
---|
373 | self.nm.default_encoding_parameters['k'] = 127 |
---|
374 | - return self.nm.create_mutable_file(MutableData(self.data), |
---|
375 | + return self.nm.create_mutable_file(rando.randstr(32), MutableData(self.data), |
---|
376 | version=MDMF_VERSION) |
---|
377 | d.addCallback(_then) |
---|
378 | def _then2(n): |
---|
379 | hunk ./src/allmydata/test/test_mutable.py 3413 |
---|
380 | d.addCallback(_then2) |
---|
381 | return d |
---|
382 | |
---|
383 | - def _test_replace(self, offset, new_data): |
---|
384 | + def _test_replace(self, offset, new_data, randseed): |
---|
385 | expected = self.data[:offset]+new_data+self.data[offset+len(new_data):] |
---|
386 | hunk ./src/allmydata/test/test_mutable.py 3415 |
---|
387 | - d0 = self.do_upload_mdmf() |
---|
388 | + d0 = self.do_upload_mdmf(randseed) |
---|
389 | def _run(ign): |
---|
390 | d = defer.succeed(None) |
---|
391 | for node in (self.mdmf_node, self.mdmf_max_shares_node): |
---|
392 | hunk ./src/allmydata/test/test_mutable.py 3421 |
---|
393 | d.addCallback(lambda ign: node.get_best_mutable_version()) |
---|
394 | d.addCallback(lambda mv: |
---|
395 | - mv.update(MutableData(new_data), offset)) |
---|
396 | + mv.update(MutableData(new_data), offset, randseed)) |
---|
397 | # close around node. |
---|
398 | d.addCallback(lambda ignored, node=node: |
---|
399 | node.download_best_version()) |
---|
400 | hunk ./src/allmydata/test/test_mutable.py 3439 |
---|
401 | def test_append(self): |
---|
402 | # We should be able to append data to a mutable file and get |
---|
403 | # what we expect. |
---|
404 | - return self._test_replace(len(self.data), "appended") |
---|
405 | + return self._test_replace(len(self.data), "appended", randseed='test_append000000000000000000000') |
---|
406 | |
---|
407 | def test_replace_middle(self): |
---|
408 | # We should be able to replace data in the middle of a mutable |
---|
409 | replace ./src/allmydata/util/hashutil.py [A-Za-z_0-9] IVLEN SALTLEN |
---|
410 | addfile ./src/allmydata/util/randutil.py |
---|
411 | hunk ./src/allmydata/util/randutil.py 1 |
---|
412 | +import random |
---|
413 | + |
---|
414 | +class RandomObj(random.Random): |
---|
415 | + def randstr(self, n): |
---|
416 | + return ''.join(map(chr, map(self.randrange, [0]*n, [256]*n))) |
---|
417 | } |
---|
418 | |
---|
419 | Context: |
---|
420 | |
---|
421 | [free up the buffer used to hold data while it is being written to ImmutableS3ShareForWriting |
---|
422 | zooko@zooko.com**20110930060238 |
---|
423 | Ignore-this: 603b2c8bb1f4656bdde5876ac95aa5c9 |
---|
424 | ] |
---|
425 | [FIX THE BUG! |
---|
426 | zooko@zooko.com**20110930032140 |
---|
427 | Ignore-this: fd32c4ac3054ae6fc2b9433f113b2fd6 |
---|
428 | ] |
---|
429 | [fix another bug in ImmutableShareS3ForWriting |
---|
430 | zooko@zooko.com**20110930025701 |
---|
431 | Ignore-this: 6ad7bd17111b12d96991172fbe04d76 |
---|
432 | ] |
---|
433 | [really fix the bug in ImmutableS3ShareForWriting |
---|
434 | zooko@zooko.com**20110930023501 |
---|
435 | Ignore-this: 36a7804433cab667566d119af7223425 |
---|
436 | ] |
---|
437 | [Add dummy lease methods to immutable S3 share objects. refs #999 |
---|
438 | david-sarah@jacaranda.org**20110930021703 |
---|
439 | Ignore-this: 7c21f140020edd64027c71be0f32c2b2 |
---|
440 | ] |
---|
441 | [test_storage.py: Server class uses ShouldFailMixin. refs #999 |
---|
442 | david-sarah@jacaranda.org**20110930001349 |
---|
443 | Ignore-this: 4cf1ef21bbf85d7fe52ab660f59ff237 |
---|
444 | ] |
---|
445 | [mock_s3.py: fix bug in MockS3Error constructor. refs #999 |
---|
446 | david-sarah@jacaranda.org**20110930001326 |
---|
447 | Ignore-this: 4d0ebd9120fc8e99b15924c671cd0927 |
---|
448 | ] |
---|
449 | [fix bug in ImmutableS3ShareForWriting |
---|
450 | zooko@zooko.com**20110930020535 |
---|
451 | Ignore-this: f7f63d2fc2086903a195cc000f306b88 |
---|
452 | ] |
---|
453 | [return res |
---|
454 | zooko@zooko.com**20110930000446 |
---|
455 | Ignore-this: 6f73b3e389612c73c6590007229ad8e |
---|
456 | ] |
---|
457 | [s3_bucket.py: fix an incorrect argument signature for list_objects. refs #999 |
---|
458 | david-sarah@jacaranda.org**20110929235646 |
---|
459 | Ignore-this: f02e3a23f28fadef71c70fd0b1592ba6 |
---|
460 | ] |
---|
461 | [Make sure that the statedir is created before trying to use it. refs #999 |
---|
462 | david-sarah@jacaranda.org**20110929234845 |
---|
463 | Ignore-this: b5f0529b1f2a5b5250c2ee2091cbe24b |
---|
464 | ] |
---|
465 | [test/mock_s3.py: fix a typo. refs #999 |
---|
466 | david-sarah@jacaranda.org**20110929234808 |
---|
467 | Ignore-this: ccdff591f9b301f7f486454a4366c2b3 |
---|
468 | ] |
---|
469 | [test_storage.py: only run test_large_share on the disk backend. (It will wedge your machine if run on the S3 backend with MockS3Bucket.) refs #999 |
---|
470 | david-sarah@jacaranda.org**20110929234725 |
---|
471 | Ignore-this: ffa7c08458ee0159455b6f1cd1c3ff48 |
---|
472 | ] |
---|
473 | [fix doc to say that secret access key goes into private/s3secret |
---|
474 | zooko@zooko.com**20110930000256 |
---|
475 | Ignore-this: c054ff78041a05b3177b3c1b3e9d4ae7 |
---|
476 | ] |
---|
477 | [Fixes to S3 config parsing, with tests. refs #999 |
---|
478 | david-sarah@jacaranda.org**20110929225014 |
---|
479 | Ignore-this: 19aa5a3e9575b0c2f77b19fe1bcbafcb |
---|
480 | ] |
---|
481 | [Add missing src/allmydata/test/mock_s3.py (mock implementation of an S3 bucket). refs #999 |
---|
482 | david-sarah@jacaranda.org**20110929212229 |
---|
483 | Ignore-this: a1433555d4bb0b8b36fb80feb122187b |
---|
484 | ] |
---|
485 | [Make the s3.region option case-insensitive (txaws expects uppercase). refs #999 |
---|
486 | david-sarah@jacaranda.org**20110929211606 |
---|
487 | Ignore-this: def83d3fa368c315573e5f1bad5ee7f9 |
---|
488 | ] |
---|
489 | [Fix missing add_lease method on ImmutableS3ShareForWriting. refs #999 |
---|
490 | david-sarah@jacaranda.org**20110929211524 |
---|
491 | Ignore-this: 832f0d94f912b17006b0dbaab94846b6 |
---|
492 | ] |
---|
493 | [Add missing src/allmydata/storage/backends/s3/s3_bucket.py. refs #999 |
---|
494 | david-sarah@jacaranda.org**20110929211416 |
---|
495 | Ignore-this: aa783c5d7c32af172b5c5a3d62c3faf2 |
---|
496 | ] |
---|
497 | [scripts/debug.py: repair stale code, and use the get_disk_share function defined by disk_backend instead of duplicating it. refs #999 |
---|
498 | david-sarah@jacaranda.org**20110929211252 |
---|
499 | Ignore-this: 5dda548e8703e35f0c103467346627ef |
---|
500 | ] |
---|
501 | [Fix a bug in the new config parsing code when reserved_space is not present for a disk backend. refs #999 |
---|
502 | david-sarah@jacaranda.org**20110929211106 |
---|
503 | Ignore-this: b05bd3c4ff7d90b5ecb1e6a54717b735 |
---|
504 | ] |
---|
505 | [test_storage.py: Avoid using the same working directory for different test classes. refs #999 |
---|
506 | david-sarah@jacaranda.org**20110929210954 |
---|
507 | Ignore-this: 3a01048e941c61c603eec603d064bebb |
---|
508 | ] |
---|
509 | [More asycification of tests. refs #999 |
---|
510 | david-sarah@jacaranda.org**20110929210727 |
---|
511 | Ignore-this: 87690a62f89a07e63b859c24948d262d |
---|
512 | ] |
---|
513 | [Fix a bug in disk_backend.py. refs #999 |
---|
514 | david-sarah@jacaranda.org**20110929182511 |
---|
515 | Ignore-this: 4f9a62adf03fc3221e46b54f7a4a960b |
---|
516 | ] |
---|
517 | [docs/backends/S3.rst: add s3.region option. Also minor changes to configuration.rst. refs #999 |
---|
518 | david-sarah@jacaranda.org**20110929182442 |
---|
519 | Ignore-this: 2992ead5f8d9357a0d9b912b1e0bd932 |
---|
520 | ] |
---|
521 | [Updates to test_backends.py. refs #999 |
---|
522 | david-sarah@jacaranda.org**20110929182016 |
---|
523 | Ignore-this: 3bac19179308e6f27e54c45c7cad4dc6 |
---|
524 | ] |
---|
525 | [Implement selection of backends from tahoe.cfg options. Also remove the discard_storage parameter from the disk backend. refs #999 |
---|
526 | david-sarah@jacaranda.org**20110929181754 |
---|
527 | Ignore-this: c7f78e7db98326723033f44e56858683 |
---|
528 | ] |
---|
529 | [test_storage.py: fix an incorrect argument in construction of S3Backend. refs #999 |
---|
530 | david-sarah@jacaranda.org**20110929081331 |
---|
531 | Ignore-this: 33ad68e0d3a15e3fa1dda90df1b8365c |
---|
532 | ] |
---|
533 | [Move the implementation of lease methods to disk_backend.py, and add stub implementations in s3_backend.py that raise NotImplementedError. Fix the lease methods in the disk backend to be synchronous. Also make sure that get_shares() returns a Deferred list sorted by shnum. refs #999 |
---|
534 | david-sarah@jacaranda.org**20110929081132 |
---|
535 | Ignore-this: 32cbad21c7236360e2e8e84a07f88597 |
---|
536 | ] |
---|
537 | [Make the make_bucket_writer method synchronous. refs #999 |
---|
538 | david-sarah@jacaranda.org**20110929080712 |
---|
539 | Ignore-this: 1de299e791baf1cf1e2a8d4b593e8ba1 |
---|
540 | ] |
---|
541 | [Add get_s3_share function in place of S3ShareSet._load_shares. refs #999 |
---|
542 | david-sarah@jacaranda.org**20110929080530 |
---|
543 | Ignore-this: f99665979612e42ecefa293bda0db5de |
---|
544 | ] |
---|
545 | [Complete the splitting of the immutable IStoredShare interface into IShareForReading and IShareForWriting. Also remove the 'load' method from shares, and other minor interface changes. refs #999 |
---|
546 | david-sarah@jacaranda.org**20110929075544 |
---|
547 | Ignore-this: 8c923051869cf162d9840770b4a08573 |
---|
548 | ] |
---|
549 | [split Immutable S3 Share into for-reading and for-writing classes, remove unused (as far as I can tell) methods, use cStringIO for buffering the writes |
---|
550 | zooko@zooko.com**20110929055038 |
---|
551 | Ignore-this: 82d8c4488a8548936285a975ef5a1559 |
---|
552 | TODO: define the interfaces that the new classes claim to implement |
---|
553 | ] |
---|
554 | [Comment out an assertion that was causing all mutable tests to fail. THIS IS PROBABLY WRONG. refs #999 |
---|
555 | david-sarah@jacaranda.org**20110929041110 |
---|
556 | Ignore-this: 1e402d51ec021405b191757a37b35a94 |
---|
557 | ] |
---|
558 | [Fix some incorrect or incomplete asyncifications. refs #999 |
---|
559 | david-sarah@jacaranda.org**20110929040800 |
---|
560 | Ignore-this: ed70e9af2190217c84fd2e8c41de4c7e |
---|
561 | ] |
---|
562 | [Add some debugging assertions that share objects are not Deferred. refs #999 |
---|
563 | david-sarah@jacaranda.org**20110929040657 |
---|
564 | Ignore-this: 5c7f56a146f5a3c353c6fe5b090a7dc5 |
---|
565 | ] |
---|
566 | [scripts/debug.py: take account of some API changes. refs #999 |
---|
567 | david-sarah@jacaranda.org**20110929040539 |
---|
568 | Ignore-this: 933c3d44b993c041105038c7d4514386 |
---|
569 | ] |
---|
570 | [Make get_sharesets_for_prefix synchronous for the time being (returning a Deferred breaks crawlers). refs #999 |
---|
571 | david-sarah@jacaranda.org**20110929040136 |
---|
572 | Ignore-this: e94b93d4f3f6173d9de80c4121b68748 |
---|
573 | ] |
---|
574 | [More asyncification of tests. refs #999 |
---|
575 | david-sarah@jacaranda.org**20110929035644 |
---|
576 | Ignore-this: 28b650a9ef593b3fd7524f6cb562ad71 |
---|
577 | ] |
---|
578 | [no_network.py: add some assertions that the things we wrap using LocalWrapper are not Deferred (which is not supported and causes hard-to-debug failures). refs #999 |
---|
579 | david-sarah@jacaranda.org**20110929035537 |
---|
580 | Ignore-this: fd103fbbb54fbbc17b9517c78313120e |
---|
581 | ] |
---|
582 | [Add some debugging code (switched off) to no_network.py. When switched on (PRINT_TRACEBACKS = True), this prints the stack trace associated with the caller of a remote method, mitigating the problem that the traceback normally gets lost at that point. TODO: think of a better way to preserve the traceback that can be enabled by default. refs #999 |
---|
583 | david-sarah@jacaranda.org**20110929035341 |
---|
584 | Ignore-this: 2a593ec3ee450719b241ea8d60a0f320 |
---|
585 | ] |
---|
586 | [Use factory functions to create share objects rather than their constructors, to allow the factory to return a Deferred. Also change some methods on IShareSet and IStoredShare to return Deferreds. Refactor some constants associated with mutable shares. refs #999 |
---|
587 | david-sarah@jacaranda.org**20110928052324 |
---|
588 | Ignore-this: bce0ac02f475bcf31b0e3b340cd91198 |
---|
589 | ] |
---|
590 | [Work in progress for asyncifying the backend interface (necessary to call txaws methods that return Deferreds). This is incomplete so lots of tests fail. refs #999 |
---|
591 | david-sarah@jacaranda.org**20110927073903 |
---|
592 | Ignore-this: ebdc6c06c3baa9460af128ec8f5b418b |
---|
593 | ] |
---|
594 | [mutable/publish.py: don't crash if there are no writers in _report_verinfo. refs #999 |
---|
595 | david-sarah@jacaranda.org**20110928014126 |
---|
596 | Ignore-this: 9999c82bb3057f755a6e86baeafb8a39 |
---|
597 | ] |
---|
598 | [scripts/debug.py: fix incorrect arguments to dump_immutable_share. refs #999 |
---|
599 | david-sarah@jacaranda.org**20110928014049 |
---|
600 | Ignore-this: 1078ee3f06a2f36b29e0cf694d2851cd |
---|
601 | ] |
---|
602 | [test_system.py: more debug output for a failing check in test_filesystem. refs #999 |
---|
603 | david-sarah@jacaranda.org**20110928014019 |
---|
604 | Ignore-this: e8bb77b8f7db12db7cd69efb6e0ed130 |
---|
605 | ] |
---|
606 | [test_system.py: incorrect arguments were being passed to the constructor for MutableDiskShare. refs #999 |
---|
607 | david-sarah@jacaranda.org**20110928013857 |
---|
608 | Ignore-this: e9719f74e7e073e37537f9a71614b8a0 |
---|
609 | ] |
---|
610 | [Undo an incompatible change to RIStorageServer. refs #999 |
---|
611 | david-sarah@jacaranda.org**20110928013729 |
---|
612 | Ignore-this: bea4c0f6cb71202fab942cd846eab693 |
---|
613 | ] |
---|
614 | [mutable/publish.py: resolve conflicting patches. refs #999 |
---|
615 | david-sarah@jacaranda.org**20110927073530 |
---|
616 | Ignore-this: 6154a113723dc93148151288bd032439 |
---|
617 | ] |
---|
618 | [test_storage.py: fix test_no_st_blocks. refs #999 |
---|
619 | david-sarah@jacaranda.org**20110927072848 |
---|
620 | Ignore-this: 5f12b784920f87d09c97c676d0afa6f8 |
---|
621 | ] |
---|
622 | [Cleanups to S3 backend (not including Deferred changes). refs #999 |
---|
623 | david-sarah@jacaranda.org**20110927071855 |
---|
624 | Ignore-this: f0dca788190d92b1edb1ee1498fb34dc |
---|
625 | ] |
---|
626 | [Cleanups to disk backend. refs #999 |
---|
627 | david-sarah@jacaranda.org**20110927071544 |
---|
628 | Ignore-this: e9d3fd0e85aaf301c04342fffdc8f26 |
---|
629 | ] |
---|
630 | [test_storage.py: fix test_status_bad_disk_stats. refs #999 |
---|
631 | david-sarah@jacaranda.org**20110927071403 |
---|
632 | Ignore-this: 6108fee69a60962be2df2ad11b483a11 |
---|
633 | ] |
---|
634 | [util/deferredutil.py: add some utilities for asynchronous iteration. refs #999 |
---|
635 | david-sarah@jacaranda.org**20110927070947 |
---|
636 | Ignore-this: ac4946c1e5779ea64b85a1a420d34c9e |
---|
637 | ] |
---|
638 | [Add 'has-immutable-readv' to server version information. refs #999 |
---|
639 | david-sarah@jacaranda.org**20110923220935 |
---|
640 | Ignore-this: c3c4358f2ab8ac503f99c968ace8efcf |
---|
641 | ] |
---|
642 | [Minor cleanup to disk backend. refs #999 |
---|
643 | david-sarah@jacaranda.org**20110923205510 |
---|
644 | Ignore-this: 79f92d7c2edb14cfedb167247c3f0d08 |
---|
645 | ] |
---|
646 | [Update the S3 backend. refs #999 |
---|
647 | david-sarah@jacaranda.org**20110923205345 |
---|
648 | Ignore-this: 5ca623a17e09ddad4cab2f51b49aec0a |
---|
649 | ] |
---|
650 | [Update the null backend to take into account interface changes. Also, it now records which shares are present, but not their contents. refs #999 |
---|
651 | david-sarah@jacaranda.org**20110923205219 |
---|
652 | Ignore-this: 42a23d7e253255003dc63facea783251 |
---|
653 | ] |
---|
654 | [Make EmptyShare.check_testv a simple function. refs #999 |
---|
655 | david-sarah@jacaranda.org**20110923204945 |
---|
656 | Ignore-this: d0132c085f40c39815fa920b77fc39ab |
---|
657 | ] |
---|
658 | [The cancel secret needs to be unique, even if it isn't explicitly provided. refs #999 |
---|
659 | david-sarah@jacaranda.org**20110923204914 |
---|
660 | Ignore-this: 6c44bb908dd4c0cdc59506b2d87a47b0 |
---|
661 | ] |
---|
662 | [Implement readv for immutable shares. refs #999 |
---|
663 | david-sarah@jacaranda.org**20110923204611 |
---|
664 | Ignore-this: 24f14b663051169d66293020e40c5a05 |
---|
665 | ] |
---|
666 | [Remove redundant si_s argument from check_write_enabler. refs #999 |
---|
667 | david-sarah@jacaranda.org**20110923204425 |
---|
668 | Ignore-this: 25be760118dbce2eb661137f7d46dd20 |
---|
669 | ] |
---|
670 | [interfaces.py: add fill_in_space_stats method to IStorageBackend. refs #999 |
---|
671 | david-sarah@jacaranda.org**20110923203723 |
---|
672 | Ignore-this: 59371c150532055939794fed6c77dcb6 |
---|
673 | ] |
---|
674 | [Add incomplete S3 backend. refs #999 |
---|
675 | david-sarah@jacaranda.org**20110923041314 |
---|
676 | Ignore-this: b48df65699e3926dcbb87b5f755cdbf1 |
---|
677 | ] |
---|
678 | [Move advise_corrupt_share to allmydata/storage/backends/base.py, since it will be common to the disk and S3 backends. refs #999 |
---|
679 | david-sarah@jacaranda.org**20110923041115 |
---|
680 | Ignore-this: 782b49f243bd98fcb6c249f8e40fd9f |
---|
681 | ] |
---|
682 | [A few comment cleanups. refs #999 |
---|
683 | david-sarah@jacaranda.org**20110923041003 |
---|
684 | Ignore-this: f574b4a3954b6946016646011ad15edf |
---|
685 | ] |
---|
686 | [mutable/publish.py: elements should not be removed from a dictionary while it is being iterated over. refs #393 |
---|
687 | david-sarah@jacaranda.org**20110923040825 |
---|
688 | Ignore-this: 135da94bd344db6ccd59a576b54901c1 |
---|
689 | ] |
---|
690 | [Blank line cleanups. |
---|
691 | david-sarah@jacaranda.org**20110923012044 |
---|
692 | Ignore-this: 8e1c4ecb5b0c65673af35872876a8591 |
---|
693 | ] |
---|
694 | [Reinstate the cancel_lease methods of ImmutableDiskShare and MutableDiskShare, since they are needed for lease expiry. refs #999 |
---|
695 | david-sarah@jacaranda.org**20110922183323 |
---|
696 | Ignore-this: a11fb0dd0078ff627cb727fc769ec848 |
---|
697 | ] |
---|
698 | [Fix most of the crawler tests. refs #999 |
---|
699 | david-sarah@jacaranda.org**20110922183008 |
---|
700 | Ignore-this: 116c0848008f3989ba78d87c07ec783c |
---|
701 | ] |
---|
702 | [Fix some more test failures. refs #999 |
---|
703 | david-sarah@jacaranda.org**20110922045451 |
---|
704 | Ignore-this: b726193cbd03a7c3d343f6e4a0f33ee7 |
---|
705 | ] |
---|
706 | [uri.py: resolve a conflict between trunk and the pluggable-backends patches. refs #999 |
---|
707 | david-sarah@jacaranda.org**20110921222038 |
---|
708 | Ignore-this: ffeeab60d8e71a6a29a002d024d76fcf |
---|
709 | ] |
---|
710 | [Fix more shallow bugs, mainly FilePathification. Also, remove the max_space_per_bucket parameter from BucketWriter since it can be obtained from the _max_size attribute of the share (via a new get_allocated_size() accessor). refs #999 |
---|
711 | david-sarah@jacaranda.org**20110921221421 |
---|
712 | Ignore-this: 600e3ccef8533aa43442fa576c7d88cf |
---|
713 | ] |
---|
714 | [More fixes to tests needed for pluggable backends. refs #999 |
---|
715 | david-sarah@jacaranda.org**20110921184649 |
---|
716 | Ignore-this: 9be0d3a98e350fd4e17a07d2c00bb4ca |
---|
717 | ] |
---|
718 | [docs/backends/S3.rst, disk.rst: describe type of space settings as 'quantity of space', not 'str'. refs #999 |
---|
719 | david-sarah@jacaranda.org**20110921031705 |
---|
720 | Ignore-this: a74ed8e01b0a1ab5f07a1487d7bf138 |
---|
721 | ] |
---|
722 | [docs/backends/S3.rst: remove Issues section. refs #999 |
---|
723 | david-sarah@jacaranda.org**20110921031625 |
---|
724 | Ignore-this: c83d8f52b790bc32488869e6ee1df8c2 |
---|
725 | ] |
---|
726 | [Fix some incorrect attribute accesses. refs #999 |
---|
727 | david-sarah@jacaranda.org**20110921031207 |
---|
728 | Ignore-this: f1ea4c3ea191f6d4b719afaebd2b2bcd |
---|
729 | ] |
---|
730 | [docs/backends: document the configuration options for the pluggable backends scheme. refs #999 |
---|
731 | david-sarah@jacaranda.org**20110920171737 |
---|
732 | Ignore-this: 5947e864682a43cb04e557334cda7c19 |
---|
733 | ] |
---|
734 | [Work-in-progress, includes fix to bug involving BucketWriter. refs #999 |
---|
735 | david-sarah@jacaranda.org**20110920033803 |
---|
736 | Ignore-this: 64e9e019421454e4d08141d10b6e4eed |
---|
737 | ] |
---|
738 | [Pluggable backends -- all other changes. refs #999 |
---|
739 | david-sarah@jacaranda.org**20110919233256 |
---|
740 | Ignore-this: 1a77b6b5d178b32a9b914b699ba7e957 |
---|
741 | ] |
---|
742 | [Pluggable backends -- new and moved files, changes to moved files. refs #999 |
---|
743 | david-sarah@jacaranda.org**20110919232926 |
---|
744 | Ignore-this: ec5d2d1362a092d919e84327d3092424 |
---|
745 | ] |
---|
746 | [interfaces.py: 'which -> that' grammar cleanup. |
---|
747 | david-sarah@jacaranda.org**20110825003217 |
---|
748 | Ignore-this: a3e15f3676de1b346ad78aabdfb8cac6 |
---|
749 | ] |
---|
750 | [test/test_runner.py: BinTahoe.test_path has rare nondeterministic failures; this patch probably fixes a problem where the actual cause of failure is masked by a string conversion error. |
---|
751 | david-sarah@jacaranda.org**20110927225336 |
---|
752 | Ignore-this: 6f1ad68004194cc9cea55ace3745e4af |
---|
753 | ] |
---|
754 | [docs/configuration.rst: add section about the types of node, and clarify when setting web.port enables web-API service. fixes #1444 |
---|
755 | zooko@zooko.com**20110926203801 |
---|
756 | Ignore-this: ab94d470c68e720101a7ff3c207a719e |
---|
757 | ] |
---|
758 | [TAG allmydata-tahoe-1.9.0a2 |
---|
759 | warner@lothar.com**20110925234811 |
---|
760 | Ignore-this: e9649c58f9c9017a7d55008938dba64f |
---|
761 | ] |
---|
762 | Patch bundle hash: |
---|
763 | 8a27d8a395a489260241d90e0f70e4b38e6b15cd |
---|