1 | |
---|
2 | import time |
---|
3 | from twisted.trial import unittest |
---|
4 | from twisted.internet import defer |
---|
5 | from allmydata import uri, dirnode |
---|
6 | from allmydata.client import Client |
---|
7 | from allmydata.immutable import upload |
---|
8 | from allmydata.interfaces import IFileNode, \ |
---|
9 | ExistingChildError, NoSuchChildError, \ |
---|
10 | IDeepCheckResults, IDeepCheckAndRepairResults, CannotPackUnknownNodeError |
---|
11 | from allmydata.mutable.filenode import MutableFileNode |
---|
12 | from allmydata.mutable.common import UncoordinatedWriteError |
---|
13 | from allmydata.util import hashutil, base32 |
---|
14 | from allmydata.monitor import Monitor |
---|
15 | from allmydata.test.common import make_chk_file_uri, make_mutable_file_uri, \ |
---|
16 | ErrorMixin |
---|
17 | from allmydata.test.no_network import GridTestMixin |
---|
18 | from allmydata.unknown import UnknownNode |
---|
19 | from allmydata.nodemaker import NodeMaker |
---|
20 | from base64 import b32decode |
---|
21 | import common_util as testutil |
---|
22 | |
---|
23 | class Dirnode(GridTestMixin, unittest.TestCase, |
---|
24 | testutil.ShouldFailMixin, testutil.StallMixin, ErrorMixin): |
---|
25 | timeout = 240 # It takes longer than 120 seconds on Francois's arm box. |
---|
26 | |
---|
27 | def test_basic(self): |
---|
28 | self.basedir = "dirnode/Dirnode/test_basic" |
---|
29 | self.set_up_grid() |
---|
30 | c = self.g.clients[0] |
---|
31 | d = c.create_dirnode() |
---|
32 | def _done(res): |
---|
33 | self.failUnless(isinstance(res, dirnode.DirectoryNode)) |
---|
34 | rep = str(res) |
---|
35 | self.failUnless("RW" in rep) |
---|
36 | d.addCallback(_done) |
---|
37 | return d |
---|
38 | |
---|
39 | def test_initial_children(self): |
---|
40 | self.basedir = "dirnode/Dirnode/test_initial_children" |
---|
41 | self.set_up_grid() |
---|
42 | c = self.g.clients[0] |
---|
43 | nm = c.nodemaker |
---|
44 | setup_py_uri = "URI:CHK:n7r3m6wmomelk4sep3kw5cvduq:os7ijw5c3maek7pg65e5254k2fzjflavtpejjyhshpsxuqzhcwwq:3:20:14861" |
---|
45 | one_uri = "URI:LIT:n5xgk" # LIT for "one" |
---|
46 | kids = {u"one": (nm.create_from_cap(one_uri), {}), |
---|
47 | u"two": (nm.create_from_cap(setup_py_uri), |
---|
48 | {"metakey": "metavalue"}), |
---|
49 | } |
---|
50 | d = c.create_dirnode(kids) |
---|
51 | def _created(dn): |
---|
52 | self.failUnless(isinstance(dn, dirnode.DirectoryNode)) |
---|
53 | rep = str(dn) |
---|
54 | self.failUnless("RW" in rep) |
---|
55 | return dn.list() |
---|
56 | d.addCallback(_created) |
---|
57 | def _check_kids(children): |
---|
58 | self.failUnlessEqual(sorted(children.keys()), [u"one", u"two"]) |
---|
59 | one_node, one_metadata = children[u"one"] |
---|
60 | two_node, two_metadata = children[u"two"] |
---|
61 | self.failUnlessEqual(one_node.get_size(), 3) |
---|
62 | self.failUnlessEqual(two_node.get_size(), 14861) |
---|
63 | self.failUnless(isinstance(one_metadata, dict), one_metadata) |
---|
64 | self.failUnlessEqual(two_metadata["metakey"], "metavalue") |
---|
65 | d.addCallback(_check_kids) |
---|
66 | d.addCallback(lambda ign: nm.create_new_mutable_directory(kids)) |
---|
67 | d.addCallback(lambda dn: dn.list()) |
---|
68 | d.addCallback(_check_kids) |
---|
69 | future_writecap = "x-tahoe-crazy://I_am_from_the_future." |
---|
70 | future_readcap = "x-tahoe-crazy-readonly://I_am_from_the_future." |
---|
71 | future_node = UnknownNode(future_writecap, future_readcap) |
---|
72 | bad_kids1 = {u"one": (future_node, {})} |
---|
73 | d.addCallback(lambda ign: |
---|
74 | self.shouldFail(AssertionError, "bad_kids1", |
---|
75 | "does not accept UnknownNode", |
---|
76 | nm.create_new_mutable_directory, |
---|
77 | bad_kids1)) |
---|
78 | bad_kids2 = {u"one": (nm.create_from_cap(one_uri), None)} |
---|
79 | d.addCallback(lambda ign: |
---|
80 | self.shouldFail(AssertionError, "bad_kids2", |
---|
81 | "requires metadata to be a dict", |
---|
82 | nm.create_new_mutable_directory, |
---|
83 | bad_kids2)) |
---|
84 | return d |
---|
85 | |
---|
86 | def test_check(self): |
---|
87 | self.basedir = "dirnode/Dirnode/test_check" |
---|
88 | self.set_up_grid() |
---|
89 | c = self.g.clients[0] |
---|
90 | d = c.create_dirnode() |
---|
91 | d.addCallback(lambda dn: dn.check(Monitor())) |
---|
92 | def _done(res): |
---|
93 | self.failUnless(res.is_healthy()) |
---|
94 | d.addCallback(_done) |
---|
95 | return d |
---|
96 | |
---|
97 | def _test_deepcheck_create(self): |
---|
98 | # create a small tree with a loop, and some non-directories |
---|
99 | # root/ |
---|
100 | # root/subdir/ |
---|
101 | # root/subdir/file1 |
---|
102 | # root/subdir/link -> root |
---|
103 | # root/rodir |
---|
104 | c = self.g.clients[0] |
---|
105 | d = c.create_dirnode() |
---|
106 | def _created_root(rootnode): |
---|
107 | self._rootnode = rootnode |
---|
108 | return rootnode.create_subdirectory(u"subdir") |
---|
109 | d.addCallback(_created_root) |
---|
110 | def _created_subdir(subdir): |
---|
111 | self._subdir = subdir |
---|
112 | d = subdir.add_file(u"file1", upload.Data("data"*100, None)) |
---|
113 | d.addCallback(lambda res: subdir.set_node(u"link", self._rootnode)) |
---|
114 | d.addCallback(lambda res: c.create_dirnode()) |
---|
115 | d.addCallback(lambda dn: |
---|
116 | self._rootnode.set_uri(u"rodir", |
---|
117 | dn.get_uri(), |
---|
118 | dn.get_readonly_uri())) |
---|
119 | return d |
---|
120 | d.addCallback(_created_subdir) |
---|
121 | def _done(res): |
---|
122 | return self._rootnode |
---|
123 | d.addCallback(_done) |
---|
124 | return d |
---|
125 | |
---|
126 | def test_deepcheck(self): |
---|
127 | self.basedir = "dirnode/Dirnode/test_deepcheck" |
---|
128 | self.set_up_grid() |
---|
129 | d = self._test_deepcheck_create() |
---|
130 | d.addCallback(lambda rootnode: rootnode.start_deep_check().when_done()) |
---|
131 | def _check_results(r): |
---|
132 | self.failUnless(IDeepCheckResults.providedBy(r)) |
---|
133 | c = r.get_counters() |
---|
134 | self.failUnlessEqual(c, |
---|
135 | {"count-objects-checked": 4, |
---|
136 | "count-objects-healthy": 4, |
---|
137 | "count-objects-unhealthy": 0, |
---|
138 | "count-objects-unrecoverable": 0, |
---|
139 | "count-corrupt-shares": 0, |
---|
140 | }) |
---|
141 | self.failIf(r.get_corrupt_shares()) |
---|
142 | self.failUnlessEqual(len(r.get_all_results()), 4) |
---|
143 | d.addCallback(_check_results) |
---|
144 | return d |
---|
145 | |
---|
146 | def test_deepcheck_and_repair(self): |
---|
147 | self.basedir = "dirnode/Dirnode/test_deepcheck_and_repair" |
---|
148 | self.set_up_grid() |
---|
149 | d = self._test_deepcheck_create() |
---|
150 | d.addCallback(lambda rootnode: |
---|
151 | rootnode.start_deep_check_and_repair().when_done()) |
---|
152 | def _check_results(r): |
---|
153 | self.failUnless(IDeepCheckAndRepairResults.providedBy(r)) |
---|
154 | c = r.get_counters() |
---|
155 | self.failUnlessEqual(c, |
---|
156 | {"count-objects-checked": 4, |
---|
157 | "count-objects-healthy-pre-repair": 4, |
---|
158 | "count-objects-unhealthy-pre-repair": 0, |
---|
159 | "count-objects-unrecoverable-pre-repair": 0, |
---|
160 | "count-corrupt-shares-pre-repair": 0, |
---|
161 | "count-objects-healthy-post-repair": 4, |
---|
162 | "count-objects-unhealthy-post-repair": 0, |
---|
163 | "count-objects-unrecoverable-post-repair": 0, |
---|
164 | "count-corrupt-shares-post-repair": 0, |
---|
165 | "count-repairs-attempted": 0, |
---|
166 | "count-repairs-successful": 0, |
---|
167 | "count-repairs-unsuccessful": 0, |
---|
168 | }) |
---|
169 | self.failIf(r.get_corrupt_shares()) |
---|
170 | self.failIf(r.get_remaining_corrupt_shares()) |
---|
171 | self.failUnlessEqual(len(r.get_all_results()), 4) |
---|
172 | d.addCallback(_check_results) |
---|
173 | return d |
---|
174 | |
---|
175 | def _mark_file_bad(self, rootnode): |
---|
176 | si = rootnode.get_storage_index() |
---|
177 | self.delete_shares_numbered(rootnode.get_uri(), [0]) |
---|
178 | return rootnode |
---|
179 | |
---|
180 | def test_deepcheck_problems(self): |
---|
181 | self.basedir = "dirnode/Dirnode/test_deepcheck_problems" |
---|
182 | self.set_up_grid() |
---|
183 | d = self._test_deepcheck_create() |
---|
184 | d.addCallback(lambda rootnode: self._mark_file_bad(rootnode)) |
---|
185 | d.addCallback(lambda rootnode: rootnode.start_deep_check().when_done()) |
---|
186 | def _check_results(r): |
---|
187 | c = r.get_counters() |
---|
188 | self.failUnlessEqual(c, |
---|
189 | {"count-objects-checked": 4, |
---|
190 | "count-objects-healthy": 3, |
---|
191 | "count-objects-unhealthy": 1, |
---|
192 | "count-objects-unrecoverable": 0, |
---|
193 | "count-corrupt-shares": 0, |
---|
194 | }) |
---|
195 | #self.failUnlessEqual(len(r.get_problems()), 1) # TODO |
---|
196 | d.addCallback(_check_results) |
---|
197 | return d |
---|
198 | |
---|
199 | def test_readonly(self): |
---|
200 | self.basedir = "dirnode/Dirnode/test_readonly" |
---|
201 | self.set_up_grid() |
---|
202 | c = self.g.clients[0] |
---|
203 | nm = c.nodemaker |
---|
204 | filecap = make_chk_file_uri(1234) |
---|
205 | filenode = nm.create_from_cap(filecap) |
---|
206 | uploadable = upload.Data("some data", convergence="some convergence string") |
---|
207 | |
---|
208 | d = c.create_dirnode() |
---|
209 | def _created(rw_dn): |
---|
210 | d2 = rw_dn.set_uri(u"child", filecap, filecap) |
---|
211 | d2.addCallback(lambda res: rw_dn) |
---|
212 | return d2 |
---|
213 | d.addCallback(_created) |
---|
214 | |
---|
215 | def _ready(rw_dn): |
---|
216 | ro_uri = rw_dn.get_readonly_uri() |
---|
217 | ro_dn = c.create_node_from_uri(ro_uri) |
---|
218 | self.failUnless(ro_dn.is_readonly()) |
---|
219 | self.failUnless(ro_dn.is_mutable()) |
---|
220 | |
---|
221 | self.shouldFail(dirnode.NotMutableError, "set_uri ro", None, |
---|
222 | ro_dn.set_uri, u"newchild", filecap, filecap) |
---|
223 | self.shouldFail(dirnode.NotMutableError, "set_uri ro", None, |
---|
224 | ro_dn.set_node, u"newchild", filenode) |
---|
225 | self.shouldFail(dirnode.NotMutableError, "set_nodes ro", None, |
---|
226 | ro_dn.set_nodes, { u"newchild": (filenode, None) }) |
---|
227 | self.shouldFail(dirnode.NotMutableError, "set_uri ro", None, |
---|
228 | ro_dn.add_file, u"newchild", uploadable) |
---|
229 | self.shouldFail(dirnode.NotMutableError, "set_uri ro", None, |
---|
230 | ro_dn.delete, u"child") |
---|
231 | self.shouldFail(dirnode.NotMutableError, "set_uri ro", None, |
---|
232 | ro_dn.create_subdirectory, u"newchild") |
---|
233 | self.shouldFail(dirnode.NotMutableError, "set_metadata_for ro", None, |
---|
234 | ro_dn.set_metadata_for, u"child", {}) |
---|
235 | self.shouldFail(dirnode.NotMutableError, "set_uri ro", None, |
---|
236 | ro_dn.move_child_to, u"child", rw_dn) |
---|
237 | self.shouldFail(dirnode.NotMutableError, "set_uri ro", None, |
---|
238 | rw_dn.move_child_to, u"child", ro_dn) |
---|
239 | return ro_dn.list() |
---|
240 | d.addCallback(_ready) |
---|
241 | def _listed(children): |
---|
242 | self.failUnless(u"child" in children) |
---|
243 | d.addCallback(_listed) |
---|
244 | return d |
---|
245 | |
---|
246 | def failUnlessGreaterThan(self, a, b): |
---|
247 | self.failUnless(a > b, "%r should be > %r" % (a, b)) |
---|
248 | |
---|
249 | def failUnlessGreaterOrEqualThan(self, a, b): |
---|
250 | self.failUnless(a >= b, "%r should be >= %r" % (a, b)) |
---|
251 | |
---|
252 | def test_create(self): |
---|
253 | self.basedir = "dirnode/Dirnode/test_create" |
---|
254 | self.set_up_grid() |
---|
255 | c = self.g.clients[0] |
---|
256 | |
---|
257 | self.expected_manifest = [] |
---|
258 | self.expected_verifycaps = set() |
---|
259 | self.expected_storage_indexes = set() |
---|
260 | |
---|
261 | d = c.create_dirnode() |
---|
262 | def _then(n): |
---|
263 | # / |
---|
264 | self.rootnode = n |
---|
265 | self.failUnless(n.is_mutable()) |
---|
266 | u = n.get_uri() |
---|
267 | self.failUnless(u) |
---|
268 | self.failUnless(u.startswith("URI:DIR2:"), u) |
---|
269 | u_ro = n.get_readonly_uri() |
---|
270 | self.failUnless(u_ro.startswith("URI:DIR2-RO:"), u_ro) |
---|
271 | u_v = n.get_verify_cap().to_string() |
---|
272 | self.failUnless(u_v.startswith("URI:DIR2-Verifier:"), u_v) |
---|
273 | u_r = n.get_repair_cap().to_string() |
---|
274 | self.failUnlessEqual(u_r, u) |
---|
275 | self.expected_manifest.append( ((), u) ) |
---|
276 | self.expected_verifycaps.add(u_v) |
---|
277 | si = n.get_storage_index() |
---|
278 | self.expected_storage_indexes.add(base32.b2a(si)) |
---|
279 | expected_si = n._uri._filenode_uri.storage_index |
---|
280 | self.failUnlessEqual(si, expected_si) |
---|
281 | |
---|
282 | d = n.list() |
---|
283 | d.addCallback(lambda res: self.failUnlessEqual(res, {})) |
---|
284 | d.addCallback(lambda res: n.has_child(u"missing")) |
---|
285 | d.addCallback(lambda res: self.failIf(res)) |
---|
286 | |
---|
287 | fake_file_uri = make_mutable_file_uri() |
---|
288 | other_file_uri = make_mutable_file_uri() |
---|
289 | m = c.nodemaker.create_from_cap(fake_file_uri) |
---|
290 | ffu_v = m.get_verify_cap().to_string() |
---|
291 | self.expected_manifest.append( ((u"child",) , m.get_uri()) ) |
---|
292 | self.expected_verifycaps.add(ffu_v) |
---|
293 | self.expected_storage_indexes.add(base32.b2a(m.get_storage_index())) |
---|
294 | d.addCallback(lambda res: n.set_uri(u"child", |
---|
295 | fake_file_uri, fake_file_uri)) |
---|
296 | d.addCallback(lambda res: |
---|
297 | self.shouldFail(ExistingChildError, "set_uri-no", |
---|
298 | "child 'child' already exists", |
---|
299 | n.set_uri, u"child", |
---|
300 | other_file_uri, other_file_uri, |
---|
301 | overwrite=False)) |
---|
302 | # / |
---|
303 | # /child = mutable |
---|
304 | |
---|
305 | d.addCallback(lambda res: n.create_subdirectory(u"subdir")) |
---|
306 | |
---|
307 | # / |
---|
308 | # /child = mutable |
---|
309 | # /subdir = directory |
---|
310 | def _created(subdir): |
---|
311 | self.failUnless(isinstance(subdir, dirnode.DirectoryNode)) |
---|
312 | self.subdir = subdir |
---|
313 | new_v = subdir.get_verify_cap().to_string() |
---|
314 | assert isinstance(new_v, str) |
---|
315 | self.expected_manifest.append( ((u"subdir",), subdir.get_uri()) ) |
---|
316 | self.expected_verifycaps.add(new_v) |
---|
317 | si = subdir.get_storage_index() |
---|
318 | self.expected_storage_indexes.add(base32.b2a(si)) |
---|
319 | d.addCallback(_created) |
---|
320 | |
---|
321 | d.addCallback(lambda res: |
---|
322 | self.shouldFail(ExistingChildError, "mkdir-no", |
---|
323 | "child 'subdir' already exists", |
---|
324 | n.create_subdirectory, u"subdir", |
---|
325 | overwrite=False)) |
---|
326 | |
---|
327 | d.addCallback(lambda res: n.list()) |
---|
328 | d.addCallback(lambda children: |
---|
329 | self.failUnlessEqual(sorted(children.keys()), |
---|
330 | sorted([u"child", u"subdir"]))) |
---|
331 | |
---|
332 | d.addCallback(lambda res: n.start_deep_stats().when_done()) |
---|
333 | def _check_deepstats(stats): |
---|
334 | self.failUnless(isinstance(stats, dict)) |
---|
335 | expected = {"count-immutable-files": 0, |
---|
336 | "count-mutable-files": 1, |
---|
337 | "count-literal-files": 0, |
---|
338 | "count-files": 1, |
---|
339 | "count-directories": 2, |
---|
340 | "size-immutable-files": 0, |
---|
341 | "size-literal-files": 0, |
---|
342 | #"size-directories": 616, # varies |
---|
343 | #"largest-directory": 616, |
---|
344 | "largest-directory-children": 2, |
---|
345 | "largest-immutable-file": 0, |
---|
346 | } |
---|
347 | for k,v in expected.iteritems(): |
---|
348 | self.failUnlessEqual(stats[k], v, |
---|
349 | "stats[%s] was %s, not %s" % |
---|
350 | (k, stats[k], v)) |
---|
351 | self.failUnless(stats["size-directories"] > 500, |
---|
352 | stats["size-directories"]) |
---|
353 | self.failUnless(stats["largest-directory"] > 500, |
---|
354 | stats["largest-directory"]) |
---|
355 | self.failUnlessEqual(stats["size-files-histogram"], []) |
---|
356 | d.addCallback(_check_deepstats) |
---|
357 | |
---|
358 | d.addCallback(lambda res: n.build_manifest().when_done()) |
---|
359 | def _check_manifest(res): |
---|
360 | manifest = res["manifest"] |
---|
361 | self.failUnlessEqual(sorted(manifest), |
---|
362 | sorted(self.expected_manifest)) |
---|
363 | stats = res["stats"] |
---|
364 | _check_deepstats(stats) |
---|
365 | self.failUnlessEqual(self.expected_verifycaps, |
---|
366 | res["verifycaps"]) |
---|
367 | self.failUnlessEqual(self.expected_storage_indexes, |
---|
368 | res["storage-index"]) |
---|
369 | d.addCallback(_check_manifest) |
---|
370 | |
---|
371 | def _add_subsubdir(res): |
---|
372 | return self.subdir.create_subdirectory(u"subsubdir") |
---|
373 | d.addCallback(_add_subsubdir) |
---|
374 | # / |
---|
375 | # /child = mutable |
---|
376 | # /subdir = directory |
---|
377 | # /subdir/subsubdir = directory |
---|
378 | d.addCallback(lambda res: n.get_child_at_path(u"subdir/subsubdir")) |
---|
379 | d.addCallback(lambda subsubdir: |
---|
380 | self.failUnless(isinstance(subsubdir, |
---|
381 | dirnode.DirectoryNode))) |
---|
382 | d.addCallback(lambda res: n.get_child_at_path(u"")) |
---|
383 | d.addCallback(lambda res: self.failUnlessEqual(res.get_uri(), |
---|
384 | n.get_uri())) |
---|
385 | |
---|
386 | d.addCallback(lambda res: n.get_metadata_for(u"child")) |
---|
387 | d.addCallback(lambda metadata: |
---|
388 | self.failUnlessEqual(set(metadata.keys()), |
---|
389 | set(["tahoe", "ctime", "mtime"]))) |
---|
390 | |
---|
391 | d.addCallback(lambda res: |
---|
392 | self.shouldFail(NoSuchChildError, "gcamap-no", |
---|
393 | "nope", |
---|
394 | n.get_child_and_metadata_at_path, |
---|
395 | u"subdir/nope")) |
---|
396 | d.addCallback(lambda res: |
---|
397 | n.get_child_and_metadata_at_path(u"")) |
---|
398 | def _check_child_and_metadata1(res): |
---|
399 | child, metadata = res |
---|
400 | self.failUnless(isinstance(child, dirnode.DirectoryNode)) |
---|
401 | # edge-metadata needs at least one path segment |
---|
402 | self.failUnlessEqual(sorted(metadata.keys()), []) |
---|
403 | d.addCallback(_check_child_and_metadata1) |
---|
404 | d.addCallback(lambda res: |
---|
405 | n.get_child_and_metadata_at_path(u"child")) |
---|
406 | |
---|
407 | def _check_child_and_metadata2(res): |
---|
408 | child, metadata = res |
---|
409 | self.failUnlessEqual(child.get_uri(), |
---|
410 | fake_file_uri) |
---|
411 | self.failUnlessEqual(set(metadata.keys()), |
---|
412 | set(["tahoe", "ctime", "mtime"])) |
---|
413 | d.addCallback(_check_child_and_metadata2) |
---|
414 | |
---|
415 | d.addCallback(lambda res: |
---|
416 | n.get_child_and_metadata_at_path(u"subdir/subsubdir")) |
---|
417 | def _check_child_and_metadata3(res): |
---|
418 | child, metadata = res |
---|
419 | self.failUnless(isinstance(child, dirnode.DirectoryNode)) |
---|
420 | self.failUnlessEqual(set(metadata.keys()), |
---|
421 | set(["tahoe", "ctime", "mtime"])) |
---|
422 | d.addCallback(_check_child_and_metadata3) |
---|
423 | |
---|
424 | # set_uri + metadata |
---|
425 | # it should be possible to add a child without any metadata |
---|
426 | d.addCallback(lambda res: n.set_uri(u"c2", |
---|
427 | fake_file_uri, fake_file_uri, |
---|
428 | {})) |
---|
429 | d.addCallback(lambda res: n.get_metadata_for(u"c2")) |
---|
430 | d.addCallback(lambda metadata: self.failUnlessEqual(metadata.keys(), ['tahoe'])) |
---|
431 | |
---|
432 | # You can't override the link timestamps. |
---|
433 | d.addCallback(lambda res: n.set_uri(u"c2", |
---|
434 | fake_file_uri, fake_file_uri, |
---|
435 | { 'tahoe': {'linkcrtime': "bogus"}})) |
---|
436 | d.addCallback(lambda res: n.get_metadata_for(u"c2")) |
---|
437 | def _has_good_linkcrtime(metadata): |
---|
438 | self.failUnless(metadata.has_key('tahoe')) |
---|
439 | self.failUnless(metadata['tahoe'].has_key('linkcrtime')) |
---|
440 | self.failIfEqual(metadata['tahoe']['linkcrtime'], 'bogus') |
---|
441 | d.addCallback(_has_good_linkcrtime) |
---|
442 | |
---|
443 | # if we don't set any defaults, the child should get timestamps |
---|
444 | d.addCallback(lambda res: n.set_uri(u"c3", |
---|
445 | fake_file_uri, fake_file_uri)) |
---|
446 | d.addCallback(lambda res: n.get_metadata_for(u"c3")) |
---|
447 | d.addCallback(lambda metadata: |
---|
448 | self.failUnlessEqual(set(metadata.keys()), |
---|
449 | set(["tahoe", "ctime", "mtime"]))) |
---|
450 | |
---|
451 | # or we can add specific metadata at set_uri() time, which |
---|
452 | # overrides the timestamps |
---|
453 | d.addCallback(lambda res: n.set_uri(u"c4", |
---|
454 | fake_file_uri, fake_file_uri, |
---|
455 | {"key": "value"})) |
---|
456 | d.addCallback(lambda res: n.get_metadata_for(u"c4")) |
---|
457 | d.addCallback(lambda metadata: |
---|
458 | self.failUnless((set(metadata.keys()) == set(["key", "tahoe"])) and |
---|
459 | (metadata['key'] == "value"), metadata)) |
---|
460 | |
---|
461 | d.addCallback(lambda res: n.delete(u"c2")) |
---|
462 | d.addCallback(lambda res: n.delete(u"c3")) |
---|
463 | d.addCallback(lambda res: n.delete(u"c4")) |
---|
464 | |
---|
465 | # set_node + metadata |
---|
466 | # it should be possible to add a child without any metadata |
---|
467 | d.addCallback(lambda res: n.set_node(u"d2", n, {})) |
---|
468 | d.addCallback(lambda res: c.create_dirnode()) |
---|
469 | d.addCallback(lambda n2: |
---|
470 | self.shouldFail(ExistingChildError, "set_node-no", |
---|
471 | "child 'd2' already exists", |
---|
472 | n.set_node, u"d2", n2, |
---|
473 | overwrite=False)) |
---|
474 | d.addCallback(lambda res: n.get_metadata_for(u"d2")) |
---|
475 | d.addCallback(lambda metadata: self.failUnlessEqual(metadata.keys(), ['tahoe'])) |
---|
476 | |
---|
477 | # if we don't set any defaults, the child should get timestamps |
---|
478 | d.addCallback(lambda res: n.set_node(u"d3", n)) |
---|
479 | d.addCallback(lambda res: n.get_metadata_for(u"d3")) |
---|
480 | d.addCallback(lambda metadata: |
---|
481 | self.failUnlessEqual(set(metadata.keys()), |
---|
482 | set(["tahoe", "ctime", "mtime"]))) |
---|
483 | |
---|
484 | # or we can add specific metadata at set_node() time, which |
---|
485 | # overrides the timestamps |
---|
486 | d.addCallback(lambda res: n.set_node(u"d4", n, |
---|
487 | {"key": "value"})) |
---|
488 | d.addCallback(lambda res: n.get_metadata_for(u"d4")) |
---|
489 | d.addCallback(lambda metadata: |
---|
490 | self.failUnless((set(metadata.keys()) == set(["key", "tahoe"])) and |
---|
491 | (metadata['key'] == "value"), metadata)) |
---|
492 | |
---|
493 | d.addCallback(lambda res: n.delete(u"d2")) |
---|
494 | d.addCallback(lambda res: n.delete(u"d3")) |
---|
495 | d.addCallback(lambda res: n.delete(u"d4")) |
---|
496 | |
---|
497 | # metadata through set_children() |
---|
498 | d.addCallback(lambda res: |
---|
499 | n.set_children({ |
---|
500 | u"e1": (fake_file_uri, fake_file_uri), |
---|
501 | u"e2": (fake_file_uri, fake_file_uri, {}), |
---|
502 | u"e3": (fake_file_uri, fake_file_uri, |
---|
503 | {"key": "value"}), |
---|
504 | })) |
---|
505 | d.addCallback(lambda n2: self.failUnlessIdentical(n2, n)) |
---|
506 | d.addCallback(lambda res: |
---|
507 | self.shouldFail(ExistingChildError, "set_children-no", |
---|
508 | "child 'e1' already exists", |
---|
509 | n.set_children, |
---|
510 | { u"e1": (other_file_uri, |
---|
511 | other_file_uri), |
---|
512 | u"new": (other_file_uri, |
---|
513 | other_file_uri), |
---|
514 | }, |
---|
515 | overwrite=False)) |
---|
516 | # and 'new' should not have been created |
---|
517 | d.addCallback(lambda res: n.list()) |
---|
518 | d.addCallback(lambda children: self.failIf(u"new" in children)) |
---|
519 | d.addCallback(lambda res: n.get_metadata_for(u"e1")) |
---|
520 | d.addCallback(lambda metadata: |
---|
521 | self.failUnlessEqual(set(metadata.keys()), |
---|
522 | set(["tahoe", "ctime", "mtime"]))) |
---|
523 | d.addCallback(lambda res: n.get_metadata_for(u"e2")) |
---|
524 | d.addCallback(lambda metadata: |
---|
525 | self.failUnlessEqual(set(metadata.keys()), set(['tahoe']))) |
---|
526 | d.addCallback(lambda res: n.get_metadata_for(u"e3")) |
---|
527 | d.addCallback(lambda metadata: |
---|
528 | self.failUnless((set(metadata.keys()) == set(["key", "tahoe"])) |
---|
529 | and (metadata['key'] == "value"), metadata)) |
---|
530 | |
---|
531 | d.addCallback(lambda res: n.delete(u"e1")) |
---|
532 | d.addCallback(lambda res: n.delete(u"e2")) |
---|
533 | d.addCallback(lambda res: n.delete(u"e3")) |
---|
534 | |
---|
535 | # metadata through set_nodes() |
---|
536 | d.addCallback(lambda res: |
---|
537 | n.set_nodes({ u"f1": (n, None), |
---|
538 | u"f2": (n, {}), |
---|
539 | u"f3": (n, {"key": "value"}), |
---|
540 | })) |
---|
541 | d.addCallback(lambda n2: self.failUnlessIdentical(n2, n)) |
---|
542 | d.addCallback(lambda res: |
---|
543 | self.shouldFail(ExistingChildError, "set_nodes-no", |
---|
544 | "child 'f1' already exists", |
---|
545 | n.set_nodes, { u"f1": (n, None), |
---|
546 | u"new": (n, None), }, |
---|
547 | overwrite=False)) |
---|
548 | # and 'new' should not have been created |
---|
549 | d.addCallback(lambda res: n.list()) |
---|
550 | d.addCallback(lambda children: self.failIf(u"new" in children)) |
---|
551 | d.addCallback(lambda res: n.get_metadata_for(u"f1")) |
---|
552 | d.addCallback(lambda metadata: |
---|
553 | self.failUnlessEqual(set(metadata.keys()), |
---|
554 | set(["tahoe", "ctime", "mtime"]))) |
---|
555 | d.addCallback(lambda res: n.get_metadata_for(u"f2")) |
---|
556 | d.addCallback( |
---|
557 | lambda metadata: self.failUnlessEqual(set(metadata.keys()), set(['tahoe']))) |
---|
558 | d.addCallback(lambda res: n.get_metadata_for(u"f3")) |
---|
559 | d.addCallback(lambda metadata: |
---|
560 | self.failUnless((set(metadata.keys()) == set(["key", "tahoe"])) and |
---|
561 | (metadata['key'] == "value"), metadata)) |
---|
562 | |
---|
563 | d.addCallback(lambda res: n.delete(u"f1")) |
---|
564 | d.addCallback(lambda res: n.delete(u"f2")) |
---|
565 | d.addCallback(lambda res: n.delete(u"f3")) |
---|
566 | |
---|
567 | |
---|
568 | d.addCallback(lambda res: |
---|
569 | n.set_metadata_for(u"child", |
---|
570 | {"tags": ["web2.0-compatible"]})) |
---|
571 | d.addCallback(lambda n1: n1.get_metadata_for(u"child")) |
---|
572 | d.addCallback(lambda metadata: |
---|
573 | self.failUnlessEqual(metadata, |
---|
574 | {"tags": ["web2.0-compatible"]})) |
---|
575 | |
---|
576 | def _start(res): |
---|
577 | self._start_timestamp = time.time() |
---|
578 | d.addCallback(_start) |
---|
579 | # simplejson-1.7.1 (as shipped on Ubuntu 'gutsy') rounds all |
---|
580 | # floats to hundredeths (it uses str(num) instead of repr(num)). |
---|
581 | # simplejson-1.7.3 does not have this bug. To prevent this bug |
---|
582 | # from causing the test to fail, stall for more than a few |
---|
583 | # hundrededths of a second. |
---|
584 | d.addCallback(self.stall, 0.1) |
---|
585 | d.addCallback(lambda res: n.add_file(u"timestamps", |
---|
586 | upload.Data("stamp me", convergence="some convergence string"))) |
---|
587 | d.addCallback(self.stall, 0.1) |
---|
588 | def _stop(res): |
---|
589 | self._stop_timestamp = time.time() |
---|
590 | d.addCallback(_stop) |
---|
591 | |
---|
592 | d.addCallback(lambda res: n.get_metadata_for(u"timestamps")) |
---|
593 | def _check_timestamp1(metadata): |
---|
594 | self.failUnless("ctime" in metadata) |
---|
595 | self.failUnless("mtime" in metadata) |
---|
596 | self.failUnlessGreaterOrEqualThan(metadata["ctime"], |
---|
597 | self._start_timestamp) |
---|
598 | self.failUnlessGreaterOrEqualThan(self._stop_timestamp, |
---|
599 | metadata["ctime"]) |
---|
600 | self.failUnlessGreaterOrEqualThan(metadata["mtime"], |
---|
601 | self._start_timestamp) |
---|
602 | self.failUnlessGreaterOrEqualThan(self._stop_timestamp, |
---|
603 | metadata["mtime"]) |
---|
604 | # Our current timestamp rules say that replacing an existing |
---|
605 | # child should preserve the 'ctime' but update the mtime |
---|
606 | self._old_ctime = metadata["ctime"] |
---|
607 | self._old_mtime = metadata["mtime"] |
---|
608 | d.addCallback(_check_timestamp1) |
---|
609 | d.addCallback(self.stall, 2.0) # accomodate low-res timestamps |
---|
610 | d.addCallback(lambda res: n.set_node(u"timestamps", n)) |
---|
611 | d.addCallback(lambda res: n.get_metadata_for(u"timestamps")) |
---|
612 | def _check_timestamp2(metadata): |
---|
613 | self.failUnlessEqual(metadata["ctime"], self._old_ctime, |
---|
614 | "%s != %s" % (metadata["ctime"], |
---|
615 | self._old_ctime)) |
---|
616 | self.failUnlessGreaterThan(metadata["mtime"], self._old_mtime) |
---|
617 | return n.delete(u"timestamps") |
---|
618 | d.addCallback(_check_timestamp2) |
---|
619 | |
---|
620 | # also make sure we can add/update timestamps on a |
---|
621 | # previously-existing child that didn't have any, since there are |
---|
622 | # a lot of 0.7.0-generated edges around out there |
---|
623 | d.addCallback(lambda res: n.set_node(u"no_timestamps", n, {})) |
---|
624 | d.addCallback(lambda res: n.set_node(u"no_timestamps", n)) |
---|
625 | d.addCallback(lambda res: n.get_metadata_for(u"no_timestamps")) |
---|
626 | d.addCallback(lambda metadata: |
---|
627 | self.failUnlessEqual(set(metadata.keys()), |
---|
628 | set(["tahoe", "ctime", "mtime"]))) |
---|
629 | d.addCallback(lambda res: n.delete(u"no_timestamps")) |
---|
630 | |
---|
631 | d.addCallback(lambda res: n.delete(u"subdir")) |
---|
632 | d.addCallback(lambda old_child: |
---|
633 | self.failUnlessEqual(old_child.get_uri(), |
---|
634 | self.subdir.get_uri())) |
---|
635 | |
---|
636 | d.addCallback(lambda res: n.list()) |
---|
637 | d.addCallback(lambda children: |
---|
638 | self.failUnlessEqual(sorted(children.keys()), |
---|
639 | sorted([u"child"]))) |
---|
640 | |
---|
641 | uploadable1 = upload.Data("some data", convergence="converge") |
---|
642 | d.addCallback(lambda res: n.add_file(u"newfile", uploadable1)) |
---|
643 | d.addCallback(lambda newnode: |
---|
644 | self.failUnless(IFileNode.providedBy(newnode))) |
---|
645 | uploadable2 = upload.Data("some data", convergence="stuff") |
---|
646 | d.addCallback(lambda res: |
---|
647 | self.shouldFail(ExistingChildError, "add_file-no", |
---|
648 | "child 'newfile' already exists", |
---|
649 | n.add_file, u"newfile", |
---|
650 | uploadable2, |
---|
651 | overwrite=False)) |
---|
652 | d.addCallback(lambda res: n.list()) |
---|
653 | d.addCallback(lambda children: |
---|
654 | self.failUnlessEqual(sorted(children.keys()), |
---|
655 | sorted([u"child", u"newfile"]))) |
---|
656 | d.addCallback(lambda res: n.get_metadata_for(u"newfile")) |
---|
657 | d.addCallback(lambda metadata: |
---|
658 | self.failUnlessEqual(set(metadata.keys()), |
---|
659 | set(["tahoe", "ctime", "mtime"]))) |
---|
660 | |
---|
661 | uploadable3 = upload.Data("some data", convergence="converge") |
---|
662 | d.addCallback(lambda res: n.add_file(u"newfile-metadata", |
---|
663 | uploadable3, |
---|
664 | {"key": "value"})) |
---|
665 | d.addCallback(lambda newnode: |
---|
666 | self.failUnless(IFileNode.providedBy(newnode))) |
---|
667 | d.addCallback(lambda res: n.get_metadata_for(u"newfile-metadata")) |
---|
668 | d.addCallback(lambda metadata: |
---|
669 | self.failUnless((set(metadata.keys()) == set(["key", "tahoe"])) and |
---|
670 | (metadata['key'] == "value"), metadata)) |
---|
671 | d.addCallback(lambda res: n.delete(u"newfile-metadata")) |
---|
672 | |
---|
673 | d.addCallback(lambda res: n.create_subdirectory(u"subdir2")) |
---|
674 | def _created2(subdir2): |
---|
675 | self.subdir2 = subdir2 |
---|
676 | # put something in the way, to make sure it gets overwritten |
---|
677 | return subdir2.add_file(u"child", upload.Data("overwrite me", |
---|
678 | "converge")) |
---|
679 | d.addCallback(_created2) |
---|
680 | |
---|
681 | d.addCallback(lambda res: |
---|
682 | n.move_child_to(u"child", self.subdir2)) |
---|
683 | d.addCallback(lambda res: n.list()) |
---|
684 | d.addCallback(lambda children: |
---|
685 | self.failUnlessEqual(sorted(children.keys()), |
---|
686 | sorted([u"newfile", u"subdir2"]))) |
---|
687 | d.addCallback(lambda res: self.subdir2.list()) |
---|
688 | d.addCallback(lambda children: |
---|
689 | self.failUnlessEqual(sorted(children.keys()), |
---|
690 | sorted([u"child"]))) |
---|
691 | d.addCallback(lambda res: self.subdir2.get(u"child")) |
---|
692 | d.addCallback(lambda child: |
---|
693 | self.failUnlessEqual(child.get_uri(), |
---|
694 | fake_file_uri)) |
---|
695 | |
---|
696 | # move it back, using new_child_name= |
---|
697 | d.addCallback(lambda res: |
---|
698 | self.subdir2.move_child_to(u"child", n, u"newchild")) |
---|
699 | d.addCallback(lambda res: n.list()) |
---|
700 | d.addCallback(lambda children: |
---|
701 | self.failUnlessEqual(sorted(children.keys()), |
---|
702 | sorted([u"newchild", u"newfile", |
---|
703 | u"subdir2"]))) |
---|
704 | d.addCallback(lambda res: self.subdir2.list()) |
---|
705 | d.addCallback(lambda children: |
---|
706 | self.failUnlessEqual(sorted(children.keys()), [])) |
---|
707 | |
---|
708 | # now make sure that we honor overwrite=False |
---|
709 | d.addCallback(lambda res: |
---|
710 | self.subdir2.set_uri(u"newchild", |
---|
711 | other_file_uri, other_file_uri)) |
---|
712 | |
---|
713 | d.addCallback(lambda res: |
---|
714 | self.shouldFail(ExistingChildError, "move_child_to-no", |
---|
715 | "child 'newchild' already exists", |
---|
716 | n.move_child_to, u"newchild", |
---|
717 | self.subdir2, |
---|
718 | overwrite=False)) |
---|
719 | d.addCallback(lambda res: self.subdir2.get(u"newchild")) |
---|
720 | d.addCallback(lambda child: |
---|
721 | self.failUnlessEqual(child.get_uri(), |
---|
722 | other_file_uri)) |
---|
723 | |
---|
724 | return d |
---|
725 | |
---|
726 | d.addCallback(_then) |
---|
727 | |
---|
728 | d.addErrback(self.explain_error) |
---|
729 | return d |
---|
730 | |
---|
731 | def test_create_subdirectory(self): |
---|
732 | self.basedir = "dirnode/Dirnode/test_create_subdirectory" |
---|
733 | self.set_up_grid() |
---|
734 | c = self.g.clients[0] |
---|
735 | nm = c.nodemaker |
---|
736 | |
---|
737 | d = c.create_dirnode() |
---|
738 | def _then(n): |
---|
739 | # / |
---|
740 | self.rootnode = n |
---|
741 | fake_file_uri = make_mutable_file_uri() |
---|
742 | other_file_uri = make_mutable_file_uri() |
---|
743 | md = {"metakey": "metavalue"} |
---|
744 | kids = {u"kid1": (nm.create_from_cap(fake_file_uri), {}), |
---|
745 | u"kid2": (nm.create_from_cap(other_file_uri), md), |
---|
746 | } |
---|
747 | d = n.create_subdirectory(u"subdir", kids) |
---|
748 | def _check(sub): |
---|
749 | d = n.get_child_at_path(u"subdir") |
---|
750 | d.addCallback(lambda sub2: self.failUnlessEqual(sub2.get_uri(), |
---|
751 | sub.get_uri())) |
---|
752 | d.addCallback(lambda ign: sub.list()) |
---|
753 | return d |
---|
754 | d.addCallback(_check) |
---|
755 | def _check_kids(kids2): |
---|
756 | self.failUnlessEqual(sorted(kids.keys()), sorted(kids2.keys())) |
---|
757 | self.failUnlessEqual(kids2[u"kid2"][1]["metakey"], "metavalue") |
---|
758 | d.addCallback(_check_kids) |
---|
759 | return d |
---|
760 | d.addCallback(_then) |
---|
761 | return d |
---|
762 | |
---|
763 | class Packing(unittest.TestCase): |
---|
764 | # This is a base32-encoded representation of the directory tree |
---|
765 | # root/file1 |
---|
766 | # root/file2 |
---|
767 | # root/file3 |
---|
768 | # as represented after being fed to _pack_contents. |
---|
769 | # We have it here so we can decode it, feed it to |
---|
770 | # _unpack_contents, and verify that _unpack_contents |
---|
771 | # works correctly. |
---|
772 | |
---|
773 | known_tree = "GM4TOORVHJTGS3DFGEWDSNJ2KVJESOSDJBFTU33MPB2GS3LZNVYG6N3GGI3WU5TIORTXC3DOMJ2G4NB2MVWXUZDONBVTE5LNGRZWK2LYN55GY23XGNYXQMTOMZUWU5TENN4DG23ZG5UTO2L2NQ2DO6LFMRWDMZJWGRQTUMZ2GEYDUMJQFQYTIMZ22XZKZORX5XS7CAQCSK3URR6QOHISHRCMGER5LRFSZRNAS5ZSALCS6TWFQAE754IVOIKJVK73WZPP3VUUEDTX3WHTBBZ5YX3CEKHCPG3ZWQLYA4QM6LDRCF7TJQYWLIZHKGN5ROA3AUZPXESBNLQQ6JTC2DBJU2D47IZJTLR3PKZ4RVF57XLPWY7FX7SZV3T6IJ3ORFW37FXUPGOE3ROPFNUX5DCGMAQJ3PGGULBRGM3TU6ZCMN2GS3LFEI5CAMJSGQ3DMNRTHA4TOLRUGI3TKNRWGEWCAITUMFUG6ZJCHIQHWITMNFXGW3LPORUW2ZJCHIQDCMRUGY3DMMZYHE3S4NBSG42TMNRRFQQCE3DJNZVWG4TUNFWWKIR2EAYTENBWGY3DGOBZG4XDIMRXGU3DMML5FQQCE3LUNFWWKIR2EAYTENBWGY3DGOBZG4XDIMRXGU3DMML5FQWDGOJRHI2TUZTJNRSTELBZGQ5FKUSJHJBUQSZ2MFYGKZ3SOBSWQ43IO52WO23CNAZWU3DUGVSWSNTIOE5DK33POVTW4ZLNMNWDK6DHPA2GS2THNF2W25DEN5VGY2LQNFRGG5DKNNRHO5TZPFTWI6LNMRYGQ2LCGJTHM4J2GM5DCMB2GQWDCNBSHKVVQBGRYMACKJ27CVQ6O6B4QPR72RFVTGOZUI76XUSWAX73JRV5PYRHMIFYZIA25MXDPGUGML6M2NMRSG4YD4W4K37ZDYSXHMJ3IUVT4F64YTQQVBJFFFOUC7J7LAB2VFCL5UKKGMR2D3F4EPOYC7UYWQZNR5KXHBSNXLCNBX2SNF22DCXJIHSMEKWEWOG5XCJEVVZ7UW5IB6I64XXQSJ34B5CAYZGZIIMR6LBRGMZTU6ZCMN2GS3LFEI5CAMJSGQ3DMNRTHA4TOLRUGMYDEMJYFQQCE5DBNBXWKIR2EB5SE3DJNZVW233UNFWWKIR2EAYTENBWGY3DGOBZG4XDIMZQGIYTQLBAEJWGS3TLMNZHI2LNMURDUIBRGI2DMNRWGM4DSNZOGQZTAMRRHB6SYIBCNV2GS3LFEI5CAMJSGQ3DMNRTHA4TOLRUGMYDEMJYPUWCYMZZGU5DKOTGNFWGKMZMHE2DUVKSJE5EGSCLHJRW25DDPBYTO2DXPB3GM6DBNYZTI6LJMV3DM2LWNB4TU4LWMNSWW3LKORXWK5DEMN3TI23NNE3WEM3SORRGY5THPA3TKNBUMNZG453BOF2GSZLXMVWWI3DJOFZW623RHIZTUMJQHI2SYMJUGI5BOSHWDPG3WKPAVXCF3XMKA7QVIWPRMWJHDTQHD27AHDCPJWDQENQ5H5ZZILTXQNIXXCIW4LKQABU2GCFRG5FHQN7CHD7HF4EKNRZFIV2ZYQIBM7IQU7F4RGB3XCX3FREPBKQ7UCICHVWPCYFGA6OLH3J45LXQ6GWWICJ3PGWJNLZ7PCRNLAPNYUGU6BENS7OXMBEOOFRIZV3PF2FFWZ5WHDPKXERYP7GNHKRMGEZTOOT3EJRXI2LNMURDUIBRGI2DMNRWGM4DSNZOGQZTGNRSGY4SYIBCORQWQ33FEI5CA6ZCNRUW423NN52GS3LFEI5CAMJSGQ3DMNRTHA4TOLRUGMZTMMRWHEWCAITMNFXGWY3SORUW2ZJCHIQDCMRUGY3DMMZYHE3S4NBTGM3DENRZPUWCAITNORUW2ZJCHIQDCMRUGY3DMMZYHE3S4NBTGM3DENRZPUWCY===" |
---|
774 | |
---|
775 | def test_unpack_and_pack_behavior(self): |
---|
776 | known_tree = b32decode(self.known_tree) |
---|
777 | nodemaker = NodeMaker(None, None, None, |
---|
778 | None, None, None, |
---|
779 | {"k": 3, "n": 10}, None) |
---|
780 | writecap = "URI:SSK-RO:e3mdrzfwhoq42hy5ubcz6rp3o4:ybyibhnp3vvwuq2vaw2ckjmesgkklfs6ghxleztqidihjyofgw7q" |
---|
781 | filenode = nodemaker.create_from_cap(writecap) |
---|
782 | node = dirnode.DirectoryNode(filenode, nodemaker, None) |
---|
783 | children = node._unpack_contents(known_tree) |
---|
784 | self._check_children(children) |
---|
785 | |
---|
786 | packed_children = node._pack_contents(children) |
---|
787 | children = node._unpack_contents(packed_children) |
---|
788 | self._check_children(children) |
---|
789 | |
---|
790 | def _check_children(self, children): |
---|
791 | # Are all the expected child nodes there? |
---|
792 | self.failUnless(children.has_key(u'file1')) |
---|
793 | self.failUnless(children.has_key(u'file2')) |
---|
794 | self.failUnless(children.has_key(u'file3')) |
---|
795 | |
---|
796 | # Are the metadata for child 3 right? |
---|
797 | file3_rocap = "URI:CHK:cmtcxq7hwxvfxan34yiev6ivhy:qvcekmjtoetdcw4kmi7b3rtblvgx7544crnwaqtiewemdliqsokq:3:10:5" |
---|
798 | file3_rwcap = "URI:CHK:cmtcxq7hwxvfxan34yiev6ivhy:qvcekmjtoetdcw4kmi7b3rtblvgx7544crnwaqtiewemdliqsokq:3:10:5" |
---|
799 | file3_metadata = {'ctime': 1246663897.4336269, 'tahoe': {'linkmotime': 1246663897.4336269, 'linkcrtime': 1246663897.4336269}, 'mtime': 1246663897.4336269} |
---|
800 | self.failUnlessEqual(file3_metadata, children[u'file3'][1]) |
---|
801 | self.failUnlessEqual(file3_rocap, |
---|
802 | children[u'file3'][0].get_readonly_uri()) |
---|
803 | self.failUnlessEqual(file3_rwcap, |
---|
804 | children[u'file3'][0].get_uri()) |
---|
805 | |
---|
806 | # Are the metadata for child 2 right? |
---|
807 | file2_rocap = "URI:CHK:apegrpehshwugkbh3jlt5ei6hq:5oougnemcl5xgx4ijgiumtdojlipibctjkbwvyygdymdphib2fvq:3:10:4" |
---|
808 | file2_rwcap = "URI:CHK:apegrpehshwugkbh3jlt5ei6hq:5oougnemcl5xgx4ijgiumtdojlipibctjkbwvyygdymdphib2fvq:3:10:4" |
---|
809 | file2_metadata = {'ctime': 1246663897.430218, 'tahoe': {'linkmotime': 1246663897.430218, 'linkcrtime': 1246663897.430218}, 'mtime': 1246663897.430218} |
---|
810 | self.failUnlessEqual(file2_metadata, children[u'file2'][1]) |
---|
811 | self.failUnlessEqual(file2_rocap, |
---|
812 | children[u'file2'][0].get_readonly_uri()) |
---|
813 | self.failUnlessEqual(file2_rwcap, |
---|
814 | children[u'file2'][0].get_uri()) |
---|
815 | |
---|
816 | # Are the metadata for child 1 right? |
---|
817 | file1_rocap = "URI:CHK:olxtimympo7f27jvhtgqlnbtn4:emzdnhk2um4seixozlkw3qx2nfijvdkx3ky7i7izl47yedl6e64a:3:10:10" |
---|
818 | file1_rwcap = "URI:CHK:olxtimympo7f27jvhtgqlnbtn4:emzdnhk2um4seixozlkw3qx2nfijvdkx3ky7i7izl47yedl6e64a:3:10:10" |
---|
819 | file1_metadata = {'ctime': 1246663897.4275661, 'tahoe': {'linkmotime': 1246663897.4275661, 'linkcrtime': 1246663897.4275661}, 'mtime': 1246663897.4275661} |
---|
820 | self.failUnlessEqual(file1_metadata, children[u'file1'][1]) |
---|
821 | self.failUnlessEqual(file1_rocap, |
---|
822 | children[u'file1'][0].get_readonly_uri()) |
---|
823 | self.failUnlessEqual(file1_rwcap, |
---|
824 | children[u'file1'][0].get_uri()) |
---|
825 | |
---|
826 | class FakeMutableFile: |
---|
827 | counter = 0 |
---|
828 | def __init__(self, initial_contents=""): |
---|
829 | self.data = self._get_initial_contents(initial_contents) |
---|
830 | counter = FakeMutableFile.counter |
---|
831 | FakeMutableFile.counter += 1 |
---|
832 | writekey = hashutil.ssk_writekey_hash(str(counter)) |
---|
833 | fingerprint = hashutil.ssk_pubkey_fingerprint_hash(str(counter)) |
---|
834 | self.uri = uri.WriteableSSKFileURI(writekey, fingerprint) |
---|
835 | |
---|
836 | def _get_initial_contents(self, contents): |
---|
837 | if isinstance(contents, str): |
---|
838 | return contents |
---|
839 | if contents is None: |
---|
840 | return "" |
---|
841 | assert callable(contents), "%s should be callable, not %s" % \ |
---|
842 | (contents, type(contents)) |
---|
843 | return contents(self) |
---|
844 | |
---|
845 | def get_uri(self): |
---|
846 | return self.uri.to_string() |
---|
847 | def download_best_version(self): |
---|
848 | return defer.succeed(self.data) |
---|
849 | def get_writekey(self): |
---|
850 | return "writekey" |
---|
851 | def is_readonly(self): |
---|
852 | return False |
---|
853 | def is_mutable(self): |
---|
854 | return True |
---|
855 | def modify(self, modifier): |
---|
856 | self.data = modifier(self.data, None, True) |
---|
857 | return defer.succeed(None) |
---|
858 | |
---|
859 | class FakeNodeMaker(NodeMaker): |
---|
860 | def create_mutable_file(self, contents="", keysize=None): |
---|
861 | return defer.succeed(FakeMutableFile(contents)) |
---|
862 | |
---|
863 | class FakeClient2(Client): |
---|
864 | def __init__(self): |
---|
865 | self.nodemaker = FakeNodeMaker(None, None, None, |
---|
866 | None, None, None, |
---|
867 | {"k":3,"n":10}, None) |
---|
868 | def create_node_from_uri(self, rwcap, rocap): |
---|
869 | return self.nodemaker.create_from_cap(rwcap, rocap) |
---|
870 | |
---|
871 | class Dirnode2(unittest.TestCase, testutil.ShouldFailMixin): |
---|
872 | def setUp(self): |
---|
873 | self.client = FakeClient2() |
---|
874 | self.nodemaker = self.client.nodemaker |
---|
875 | |
---|
876 | def test_from_future(self): |
---|
877 | # create a dirnode that contains unknown URI types, and make sure we |
---|
878 | # tolerate them properly. Since dirnodes aren't allowed to add |
---|
879 | # unknown node types, we have to be tricky. |
---|
880 | d = self.nodemaker.create_new_mutable_directory() |
---|
881 | future_writecap = "x-tahoe-crazy://I_am_from_the_future." |
---|
882 | future_readcap = "x-tahoe-crazy-readonly://I_am_from_the_future." |
---|
883 | future_node = UnknownNode(future_writecap, future_readcap) |
---|
884 | def _then(n): |
---|
885 | self._node = n |
---|
886 | return n.set_node(u"future", future_node) |
---|
887 | d.addCallback(_then) |
---|
888 | |
---|
889 | # we should be prohibited from adding an unknown URI to a directory, |
---|
890 | # since we don't know how to diminish the cap to a readcap (for the |
---|
891 | # dirnode's rocap slot), and we don't want to accidentally grant |
---|
892 | # write access to a holder of the dirnode's readcap. |
---|
893 | d.addCallback(lambda ign: |
---|
894 | self.shouldFail(CannotPackUnknownNodeError, |
---|
895 | "copy unknown", |
---|
896 | "cannot pack unknown node as child add", |
---|
897 | self._node.set_uri, u"add", |
---|
898 | future_writecap, future_readcap)) |
---|
899 | d.addCallback(lambda ign: self._node.list()) |
---|
900 | def _check(children): |
---|
901 | self.failUnlessEqual(len(children), 1) |
---|
902 | (fn, metadata) = children[u"future"] |
---|
903 | self.failUnless(isinstance(fn, UnknownNode), fn) |
---|
904 | self.failUnlessEqual(fn.get_uri(), future_writecap) |
---|
905 | self.failUnlessEqual(fn.get_readonly_uri(), future_readcap) |
---|
906 | # but we *should* be allowed to copy this node, because the |
---|
907 | # UnknownNode contains all the information that was in the |
---|
908 | # original directory (readcap and writecap), so we're preserving |
---|
909 | # everything. |
---|
910 | return self._node.set_node(u"copy", fn) |
---|
911 | d.addCallback(_check) |
---|
912 | d.addCallback(lambda ign: self._node.list()) |
---|
913 | def _check2(children): |
---|
914 | self.failUnlessEqual(len(children), 2) |
---|
915 | (fn, metadata) = children[u"copy"] |
---|
916 | self.failUnless(isinstance(fn, UnknownNode), fn) |
---|
917 | self.failUnlessEqual(fn.get_uri(), future_writecap) |
---|
918 | self.failUnlessEqual(fn.get_readonly_uri(), future_readcap) |
---|
919 | return d |
---|
920 | |
---|
921 | class DeepStats(unittest.TestCase): |
---|
922 | timeout = 240 # It takes longer than 120 seconds on Francois's arm box. |
---|
923 | def test_stats(self): |
---|
924 | ds = dirnode.DeepStats(None) |
---|
925 | ds.add("count-files") |
---|
926 | ds.add("size-immutable-files", 123) |
---|
927 | ds.histogram("size-files-histogram", 123) |
---|
928 | ds.max("largest-directory", 444) |
---|
929 | |
---|
930 | s = ds.get_results() |
---|
931 | self.failUnlessEqual(s["count-files"], 1) |
---|
932 | self.failUnlessEqual(s["size-immutable-files"], 123) |
---|
933 | self.failUnlessEqual(s["largest-directory"], 444) |
---|
934 | self.failUnlessEqual(s["count-literal-files"], 0) |
---|
935 | |
---|
936 | ds.add("count-files") |
---|
937 | ds.add("size-immutable-files", 321) |
---|
938 | ds.histogram("size-files-histogram", 321) |
---|
939 | ds.max("largest-directory", 2) |
---|
940 | |
---|
941 | s = ds.get_results() |
---|
942 | self.failUnlessEqual(s["count-files"], 2) |
---|
943 | self.failUnlessEqual(s["size-immutable-files"], 444) |
---|
944 | self.failUnlessEqual(s["largest-directory"], 444) |
---|
945 | self.failUnlessEqual(s["count-literal-files"], 0) |
---|
946 | self.failUnlessEqual(s["size-files-histogram"], |
---|
947 | [ (101, 316, 1), (317, 1000, 1) ]) |
---|
948 | |
---|
949 | ds = dirnode.DeepStats(None) |
---|
950 | for i in range(1, 1100): |
---|
951 | ds.histogram("size-files-histogram", i) |
---|
952 | ds.histogram("size-files-histogram", 4*1000*1000*1000*1000) # 4TB |
---|
953 | s = ds.get_results() |
---|
954 | self.failUnlessEqual(s["size-files-histogram"], |
---|
955 | [ (1, 3, 3), |
---|
956 | (4, 10, 7), |
---|
957 | (11, 31, 21), |
---|
958 | (32, 100, 69), |
---|
959 | (101, 316, 216), |
---|
960 | (317, 1000, 684), |
---|
961 | (1001, 3162, 99), |
---|
962 | (3162277660169L, 10000000000000L, 1), |
---|
963 | ]) |
---|
964 | |
---|
965 | class UCWEingMutableFileNode(MutableFileNode): |
---|
966 | please_ucwe_after_next_upload = False |
---|
967 | |
---|
968 | def _upload(self, new_contents, servermap): |
---|
969 | d = MutableFileNode._upload(self, new_contents, servermap) |
---|
970 | def _ucwe(res): |
---|
971 | if self.please_ucwe_after_next_upload: |
---|
972 | self.please_ucwe_after_next_upload = False |
---|
973 | raise UncoordinatedWriteError() |
---|
974 | return res |
---|
975 | d.addCallback(_ucwe) |
---|
976 | return d |
---|
977 | |
---|
978 | class UCWEingNodeMaker(NodeMaker): |
---|
979 | def _create_mutable(self, cap): |
---|
980 | n = UCWEingMutableFileNode(self.storage_broker, self.secret_holder, |
---|
981 | self.default_encoding_parameters, |
---|
982 | self.history) |
---|
983 | return n.init_from_uri(cap) |
---|
984 | |
---|
985 | |
---|
986 | class Deleter(GridTestMixin, unittest.TestCase): |
---|
987 | timeout = 3600 # It takes longer than 433 seconds on Zandr's ARM box. |
---|
988 | def test_retry(self): |
---|
989 | # ticket #550, a dirnode.delete which experiences an |
---|
990 | # UncoordinatedWriteError will fail with an incorrect "you're |
---|
991 | # deleting something which isn't there" NoSuchChildError exception. |
---|
992 | |
---|
993 | # to trigger this, we start by creating a directory with a single |
---|
994 | # file in it. Then we create a special dirnode that uses a modified |
---|
995 | # MutableFileNode which will raise UncoordinatedWriteError once on |
---|
996 | # demand. We then call dirnode.delete, which ought to retry and |
---|
997 | # succeed. |
---|
998 | |
---|
999 | self.basedir = self.mktemp() |
---|
1000 | self.set_up_grid() |
---|
1001 | c0 = self.g.clients[0] |
---|
1002 | d = c0.create_dirnode() |
---|
1003 | small = upload.Data("Small enough for a LIT", None) |
---|
1004 | def _created_dir(dn): |
---|
1005 | self.root = dn |
---|
1006 | self.root_uri = dn.get_uri() |
---|
1007 | return dn.add_file(u"file", small) |
---|
1008 | d.addCallback(_created_dir) |
---|
1009 | def _do_delete(ignored): |
---|
1010 | nm = UCWEingNodeMaker(c0.storage_broker, c0._secret_holder, |
---|
1011 | c0.get_history(), c0.getServiceNamed("uploader"), |
---|
1012 | c0.downloader, |
---|
1013 | c0.download_cache_dirman, |
---|
1014 | c0.get_encoding_parameters(), |
---|
1015 | c0._key_generator) |
---|
1016 | n = nm.create_from_cap(self.root_uri) |
---|
1017 | assert n._node.please_ucwe_after_next_upload == False |
---|
1018 | n._node.please_ucwe_after_next_upload = True |
---|
1019 | # This should succeed, not raise an exception |
---|
1020 | return n.delete(u"file") |
---|
1021 | d.addCallback(_do_delete) |
---|
1022 | |
---|
1023 | return d |
---|
1024 | |
---|
1025 | class Adder(GridTestMixin, unittest.TestCase, testutil.ShouldFailMixin): |
---|
1026 | |
---|
1027 | def test_overwrite(self): |
---|
1028 | # note: This functionality could be tested without actually creating |
---|
1029 | # several RSA keys. It would be faster without the GridTestMixin: use |
---|
1030 | # dn.set_node(nodemaker.create_from_cap(make_chk_file_uri())) instead |
---|
1031 | # of dn.add_file, and use a special NodeMaker that creates fake |
---|
1032 | # mutable files. |
---|
1033 | self.basedir = "dirnode/Adder/test_overwrite" |
---|
1034 | self.set_up_grid() |
---|
1035 | c = self.g.clients[0] |
---|
1036 | fileuri = make_chk_file_uri(1234) |
---|
1037 | filenode = c.nodemaker.create_from_cap(fileuri) |
---|
1038 | d = c.create_dirnode() |
---|
1039 | |
---|
1040 | def _create_directory_tree(root_node): |
---|
1041 | # Build |
---|
1042 | # root/file1 |
---|
1043 | # root/file2 |
---|
1044 | # root/dir1 |
---|
1045 | d = root_node.add_file(u'file1', upload.Data("Important Things", |
---|
1046 | None)) |
---|
1047 | d.addCallback(lambda res: |
---|
1048 | root_node.add_file(u'file2', upload.Data("Sekrit Codes", None))) |
---|
1049 | d.addCallback(lambda res: |
---|
1050 | root_node.create_subdirectory(u"dir1")) |
---|
1051 | d.addCallback(lambda res: root_node) |
---|
1052 | return d |
---|
1053 | |
---|
1054 | d.addCallback(_create_directory_tree) |
---|
1055 | |
---|
1056 | def _test_adder(root_node): |
---|
1057 | d = root_node.set_node(u'file1', filenode) |
---|
1058 | # We've overwritten file1. Let's try it with a directory |
---|
1059 | d.addCallback(lambda res: |
---|
1060 | root_node.create_subdirectory(u'dir2')) |
---|
1061 | d.addCallback(lambda res: |
---|
1062 | root_node.set_node(u'dir2', filenode)) |
---|
1063 | # We try overwriting a file with a child while also specifying |
---|
1064 | # overwrite=False. We should receive an ExistingChildError |
---|
1065 | # when we do this. |
---|
1066 | d.addCallback(lambda res: |
---|
1067 | self.shouldFail(ExistingChildError, "set_node", |
---|
1068 | "child 'file1' already exists", |
---|
1069 | root_node.set_node, u"file1", |
---|
1070 | filenode, overwrite=False)) |
---|
1071 | # If we try with a directory, we should see the same thing |
---|
1072 | d.addCallback(lambda res: |
---|
1073 | self.shouldFail(ExistingChildError, "set_node", |
---|
1074 | "child 'dir1' already exists", |
---|
1075 | root_node.set_node, u'dir1', filenode, |
---|
1076 | overwrite=False)) |
---|
1077 | d.addCallback(lambda res: |
---|
1078 | root_node.set_node(u'file1', filenode, |
---|
1079 | overwrite="only-files")) |
---|
1080 | d.addCallback(lambda res: |
---|
1081 | self.shouldFail(ExistingChildError, "set_node", |
---|
1082 | "child 'dir1' already exists", |
---|
1083 | root_node.set_node, u'dir1', filenode, |
---|
1084 | overwrite="only-files")) |
---|
1085 | return d |
---|
1086 | |
---|
1087 | d.addCallback(_test_adder) |
---|
1088 | return d |
---|