1 | # coding=utf-8 |
---|
2 | |
---|
3 | import os.path |
---|
4 | from twisted.trial import unittest |
---|
5 | from cStringIO import StringIO |
---|
6 | import urllib |
---|
7 | import re |
---|
8 | import simplejson |
---|
9 | |
---|
10 | from allmydata.util import fileutil, hashutil, base32 |
---|
11 | from allmydata import uri |
---|
12 | from allmydata.immutable import upload |
---|
13 | |
---|
14 | # Test that the scripts can be imported -- although the actual tests of their functionality are |
---|
15 | # done by invoking them in a subprocess. |
---|
16 | from allmydata.scripts import tahoe_ls, tahoe_get, tahoe_put, tahoe_rm, tahoe_cp |
---|
17 | _hush_pyflakes = [tahoe_ls, tahoe_get, tahoe_put, tahoe_rm, tahoe_cp] |
---|
18 | |
---|
19 | from allmydata.scripts import common |
---|
20 | from allmydata.scripts.common import DEFAULT_ALIAS, get_aliases, get_alias, \ |
---|
21 | DefaultAliasMarker |
---|
22 | |
---|
23 | from allmydata.scripts import cli, debug, runner, backupdb |
---|
24 | from allmydata.test.common_util import StallMixin |
---|
25 | from allmydata.test.no_network import GridTestMixin |
---|
26 | from twisted.internet import threads # CLI tests use deferToThread |
---|
27 | from twisted.python import usage |
---|
28 | |
---|
29 | timeout = 480 # deep_check takes 360s on Zandr's linksys box, others take > 240s |
---|
30 | |
---|
31 | class CLI(unittest.TestCase): |
---|
32 | # this test case only looks at argument-processing and simple stuff. |
---|
33 | def test_options(self): |
---|
34 | fileutil.rm_dir("cli/test_options") |
---|
35 | fileutil.make_dirs("cli/test_options") |
---|
36 | fileutil.make_dirs("cli/test_options/private") |
---|
37 | open("cli/test_options/node.url","w").write("http://localhost:8080/\n") |
---|
38 | filenode_uri = uri.WriteableSSKFileURI(writekey="\x00"*16, |
---|
39 | fingerprint="\x00"*32) |
---|
40 | private_uri = uri.DirectoryURI(filenode_uri).to_string() |
---|
41 | open("cli/test_options/private/root_dir.cap", "w").write(private_uri + "\n") |
---|
42 | o = cli.ListOptions() |
---|
43 | o.parseOptions(["--node-directory", "cli/test_options"]) |
---|
44 | self.failUnlessEqual(o['node-url'], "http://localhost:8080/") |
---|
45 | self.failUnlessEqual(o.aliases[DEFAULT_ALIAS], private_uri) |
---|
46 | self.failUnlessEqual(o.where, "") |
---|
47 | |
---|
48 | o = cli.ListOptions() |
---|
49 | o.parseOptions(["--node-directory", "cli/test_options", |
---|
50 | "--node-url", "http://example.org:8111/"]) |
---|
51 | self.failUnlessEqual(o['node-url'], "http://example.org:8111/") |
---|
52 | self.failUnlessEqual(o.aliases[DEFAULT_ALIAS], private_uri) |
---|
53 | self.failUnlessEqual(o.where, "") |
---|
54 | |
---|
55 | o = cli.ListOptions() |
---|
56 | o.parseOptions(["--node-directory", "cli/test_options", |
---|
57 | "--dir-cap", "root"]) |
---|
58 | self.failUnlessEqual(o['node-url'], "http://localhost:8080/") |
---|
59 | self.failUnlessEqual(o.aliases[DEFAULT_ALIAS], "root") |
---|
60 | self.failUnlessEqual(o.where, "") |
---|
61 | |
---|
62 | o = cli.ListOptions() |
---|
63 | other_filenode_uri = uri.WriteableSSKFileURI(writekey="\x11"*16, |
---|
64 | fingerprint="\x11"*32) |
---|
65 | other_uri = uri.DirectoryURI(other_filenode_uri).to_string() |
---|
66 | o.parseOptions(["--node-directory", "cli/test_options", |
---|
67 | "--dir-cap", other_uri]) |
---|
68 | self.failUnlessEqual(o['node-url'], "http://localhost:8080/") |
---|
69 | self.failUnlessEqual(o.aliases[DEFAULT_ALIAS], other_uri) |
---|
70 | self.failUnlessEqual(o.where, "") |
---|
71 | |
---|
72 | o = cli.ListOptions() |
---|
73 | o.parseOptions(["--node-directory", "cli/test_options", |
---|
74 | "--dir-cap", other_uri, "subdir"]) |
---|
75 | self.failUnlessEqual(o['node-url'], "http://localhost:8080/") |
---|
76 | self.failUnlessEqual(o.aliases[DEFAULT_ALIAS], other_uri) |
---|
77 | self.failUnlessEqual(o.where, "subdir") |
---|
78 | |
---|
79 | o = cli.ListOptions() |
---|
80 | self.failUnlessRaises(usage.UsageError, |
---|
81 | o.parseOptions, |
---|
82 | ["--node-directory", "cli/test_options", |
---|
83 | "--node-url", "NOT-A-URL"]) |
---|
84 | |
---|
85 | o = cli.ListOptions() |
---|
86 | o.parseOptions(["--node-directory", "cli/test_options", |
---|
87 | "--node-url", "http://localhost:8080"]) |
---|
88 | self.failUnlessEqual(o["node-url"], "http://localhost:8080/") |
---|
89 | |
---|
90 | def _dump_cap(self, *args): |
---|
91 | config = debug.DumpCapOptions() |
---|
92 | config.stdout,config.stderr = StringIO(), StringIO() |
---|
93 | config.parseOptions(args) |
---|
94 | debug.dump_cap(config) |
---|
95 | self.failIf(config.stderr.getvalue()) |
---|
96 | output = config.stdout.getvalue() |
---|
97 | return output |
---|
98 | |
---|
99 | def test_dump_cap_chk(self): |
---|
100 | key = "\x00\x01\x02\x03\x04\x05\x06\x07\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f" |
---|
101 | uri_extension_hash = hashutil.uri_extension_hash("stuff") |
---|
102 | needed_shares = 25 |
---|
103 | total_shares = 100 |
---|
104 | size = 1234 |
---|
105 | u = uri.CHKFileURI(key=key, |
---|
106 | uri_extension_hash=uri_extension_hash, |
---|
107 | needed_shares=needed_shares, |
---|
108 | total_shares=total_shares, |
---|
109 | size=size) |
---|
110 | output = self._dump_cap(u.to_string()) |
---|
111 | self.failUnless("CHK File:" in output, output) |
---|
112 | self.failUnless("key: aaaqeayeaudaocajbifqydiob4" in output, output) |
---|
113 | self.failUnless("UEB hash: nf3nimquen7aeqm36ekgxomalstenpkvsdmf6fplj7swdatbv5oa" in output, output) |
---|
114 | self.failUnless("size: 1234" in output, output) |
---|
115 | self.failUnless("k/N: 25/100" in output, output) |
---|
116 | self.failUnless("storage index: hdis5iaveku6lnlaiccydyid7q" in output, output) |
---|
117 | |
---|
118 | output = self._dump_cap("--client-secret", "5s33nk3qpvnj2fw3z4mnm2y6fa", |
---|
119 | u.to_string()) |
---|
120 | self.failUnless("client renewal secret: znxmki5zdibb5qlt46xbdvk2t55j7hibejq3i5ijyurkr6m6jkhq" in output, output) |
---|
121 | |
---|
122 | output = self._dump_cap(u.get_verify_cap().to_string()) |
---|
123 | self.failIf("key: " in output, output) |
---|
124 | self.failUnless("UEB hash: nf3nimquen7aeqm36ekgxomalstenpkvsdmf6fplj7swdatbv5oa" in output, output) |
---|
125 | self.failUnless("size: 1234" in output, output) |
---|
126 | self.failUnless("k/N: 25/100" in output, output) |
---|
127 | self.failUnless("storage index: hdis5iaveku6lnlaiccydyid7q" in output, output) |
---|
128 | |
---|
129 | prefixed_u = "http://127.0.0.1/uri/%s" % urllib.quote(u.to_string()) |
---|
130 | output = self._dump_cap(prefixed_u) |
---|
131 | self.failUnless("CHK File:" in output, output) |
---|
132 | self.failUnless("key: aaaqeayeaudaocajbifqydiob4" in output, output) |
---|
133 | self.failUnless("UEB hash: nf3nimquen7aeqm36ekgxomalstenpkvsdmf6fplj7swdatbv5oa" in output, output) |
---|
134 | self.failUnless("size: 1234" in output, output) |
---|
135 | self.failUnless("k/N: 25/100" in output, output) |
---|
136 | self.failUnless("storage index: hdis5iaveku6lnlaiccydyid7q" in output, output) |
---|
137 | |
---|
138 | def test_dump_cap_lit(self): |
---|
139 | u = uri.LiteralFileURI("this is some data") |
---|
140 | output = self._dump_cap(u.to_string()) |
---|
141 | self.failUnless("Literal File URI:" in output, output) |
---|
142 | self.failUnless("data: this is some data" in output, output) |
---|
143 | |
---|
144 | def test_dump_cap_ssk(self): |
---|
145 | writekey = "\x01" * 16 |
---|
146 | fingerprint = "\xfe" * 32 |
---|
147 | u = uri.WriteableSSKFileURI(writekey, fingerprint) |
---|
148 | |
---|
149 | output = self._dump_cap(u.to_string()) |
---|
150 | self.failUnless("SSK Writeable URI:" in output, output) |
---|
151 | self.failUnless("writekey: aeaqcaibaeaqcaibaeaqcaibae" in output, output) |
---|
152 | self.failUnless("readkey: nvgh5vj2ekzzkim5fgtb4gey5y" in output, output) |
---|
153 | self.failUnless("storage index: nt4fwemuw7flestsezvo2eveke" in output, output) |
---|
154 | self.failUnless("fingerprint: 737p57x6737p57x6737p57x6737p57x6737p57x6737p57x6737a" in output, output) |
---|
155 | |
---|
156 | output = self._dump_cap("--client-secret", "5s33nk3qpvnj2fw3z4mnm2y6fa", |
---|
157 | u.to_string()) |
---|
158 | self.failUnless("file renewal secret: arpszxzc2t6kb4okkg7sp765xgkni5z7caavj7lta73vmtymjlxq" in output, output) |
---|
159 | |
---|
160 | fileutil.make_dirs("cli/test_dump_cap/private") |
---|
161 | f = open("cli/test_dump_cap/private/secret", "w") |
---|
162 | f.write("5s33nk3qpvnj2fw3z4mnm2y6fa\n") |
---|
163 | f.close() |
---|
164 | output = self._dump_cap("--client-dir", "cli/test_dump_cap", |
---|
165 | u.to_string()) |
---|
166 | self.failUnless("file renewal secret: arpszxzc2t6kb4okkg7sp765xgkni5z7caavj7lta73vmtymjlxq" in output, output) |
---|
167 | |
---|
168 | output = self._dump_cap("--client-dir", "cli/test_dump_cap_BOGUS", |
---|
169 | u.to_string()) |
---|
170 | self.failIf("file renewal secret:" in output, output) |
---|
171 | |
---|
172 | output = self._dump_cap("--nodeid", "tqc35esocrvejvg4mablt6aowg6tl43j", |
---|
173 | u.to_string()) |
---|
174 | self.failUnless("write_enabler: mgcavriox2wlb5eer26unwy5cw56elh3sjweffckkmivvsxtaknq" in output, output) |
---|
175 | self.failIf("file renewal secret:" in output, output) |
---|
176 | |
---|
177 | output = self._dump_cap("--nodeid", "tqc35esocrvejvg4mablt6aowg6tl43j", |
---|
178 | "--client-secret", "5s33nk3qpvnj2fw3z4mnm2y6fa", |
---|
179 | u.to_string()) |
---|
180 | self.failUnless("write_enabler: mgcavriox2wlb5eer26unwy5cw56elh3sjweffckkmivvsxtaknq" in output, output) |
---|
181 | self.failUnless("file renewal secret: arpszxzc2t6kb4okkg7sp765xgkni5z7caavj7lta73vmtymjlxq" in output, output) |
---|
182 | self.failUnless("lease renewal secret: 7pjtaumrb7znzkkbvekkmuwpqfjyfyamznfz4bwwvmh4nw33lorq" in output, output) |
---|
183 | |
---|
184 | u = u.get_readonly() |
---|
185 | output = self._dump_cap(u.to_string()) |
---|
186 | self.failUnless("SSK Read-only URI:" in output, output) |
---|
187 | self.failUnless("readkey: nvgh5vj2ekzzkim5fgtb4gey5y" in output, output) |
---|
188 | self.failUnless("storage index: nt4fwemuw7flestsezvo2eveke" in output, output) |
---|
189 | self.failUnless("fingerprint: 737p57x6737p57x6737p57x6737p57x6737p57x6737p57x6737a" in output, output) |
---|
190 | |
---|
191 | u = u.get_verify_cap() |
---|
192 | output = self._dump_cap(u.to_string()) |
---|
193 | self.failUnless("SSK Verifier URI:" in output, output) |
---|
194 | self.failUnless("storage index: nt4fwemuw7flestsezvo2eveke" in output, output) |
---|
195 | self.failUnless("fingerprint: 737p57x6737p57x6737p57x6737p57x6737p57x6737p57x6737a" in output, output) |
---|
196 | |
---|
197 | def test_dump_cap_directory(self): |
---|
198 | writekey = "\x01" * 16 |
---|
199 | fingerprint = "\xfe" * 32 |
---|
200 | u1 = uri.WriteableSSKFileURI(writekey, fingerprint) |
---|
201 | u = uri.DirectoryURI(u1) |
---|
202 | |
---|
203 | output = self._dump_cap(u.to_string()) |
---|
204 | self.failUnless("Directory Writeable URI:" in output, output) |
---|
205 | self.failUnless("writekey: aeaqcaibaeaqcaibaeaqcaibae" in output, |
---|
206 | output) |
---|
207 | self.failUnless("readkey: nvgh5vj2ekzzkim5fgtb4gey5y" in output, output) |
---|
208 | self.failUnless("storage index: nt4fwemuw7flestsezvo2eveke" in output, |
---|
209 | output) |
---|
210 | self.failUnless("fingerprint: 737p57x6737p57x6737p57x6737p57x6737p57x6737p57x6737a" in output, output) |
---|
211 | |
---|
212 | output = self._dump_cap("--client-secret", "5s33nk3qpvnj2fw3z4mnm2y6fa", |
---|
213 | u.to_string()) |
---|
214 | self.failUnless("file renewal secret: arpszxzc2t6kb4okkg7sp765xgkni5z7caavj7lta73vmtymjlxq" in output, output) |
---|
215 | |
---|
216 | output = self._dump_cap("--nodeid", "tqc35esocrvejvg4mablt6aowg6tl43j", |
---|
217 | u.to_string()) |
---|
218 | self.failUnless("write_enabler: mgcavriox2wlb5eer26unwy5cw56elh3sjweffckkmivvsxtaknq" in output, output) |
---|
219 | self.failIf("file renewal secret:" in output, output) |
---|
220 | |
---|
221 | output = self._dump_cap("--nodeid", "tqc35esocrvejvg4mablt6aowg6tl43j", |
---|
222 | "--client-secret", "5s33nk3qpvnj2fw3z4mnm2y6fa", |
---|
223 | u.to_string()) |
---|
224 | self.failUnless("write_enabler: mgcavriox2wlb5eer26unwy5cw56elh3sjweffckkmivvsxtaknq" in output, output) |
---|
225 | self.failUnless("file renewal secret: arpszxzc2t6kb4okkg7sp765xgkni5z7caavj7lta73vmtymjlxq" in output, output) |
---|
226 | self.failUnless("lease renewal secret: 7pjtaumrb7znzkkbvekkmuwpqfjyfyamznfz4bwwvmh4nw33lorq" in output, output) |
---|
227 | |
---|
228 | u = u.get_readonly() |
---|
229 | output = self._dump_cap(u.to_string()) |
---|
230 | self.failUnless("Directory Read-only URI:" in output, output) |
---|
231 | self.failUnless("readkey: nvgh5vj2ekzzkim5fgtb4gey5y" in output, output) |
---|
232 | self.failUnless("storage index: nt4fwemuw7flestsezvo2eveke" in output, output) |
---|
233 | self.failUnless("fingerprint: 737p57x6737p57x6737p57x6737p57x6737p57x6737p57x6737a" in output, output) |
---|
234 | |
---|
235 | u = u.get_verify_cap() |
---|
236 | output = self._dump_cap(u.to_string()) |
---|
237 | self.failUnless("Directory Verifier URI:" in output, output) |
---|
238 | self.failUnless("storage index: nt4fwemuw7flestsezvo2eveke" in output, output) |
---|
239 | self.failUnless("fingerprint: 737p57x6737p57x6737p57x6737p57x6737p57x6737p57x6737a" in output, output) |
---|
240 | |
---|
241 | def _catalog_shares(self, *basedirs): |
---|
242 | o = debug.CatalogSharesOptions() |
---|
243 | o.stdout,o.stderr = StringIO(), StringIO() |
---|
244 | args = list(basedirs) |
---|
245 | o.parseOptions(args) |
---|
246 | debug.catalog_shares(o) |
---|
247 | out = o.stdout.getvalue() |
---|
248 | err = o.stderr.getvalue() |
---|
249 | return out, err |
---|
250 | |
---|
251 | def test_catalog_shares_error(self): |
---|
252 | nodedir1 = "cli/test_catalog_shares/node1" |
---|
253 | sharedir = os.path.join(nodedir1, "storage", "shares", "mq", "mqfblse6m5a6dh45isu2cg7oji") |
---|
254 | fileutil.make_dirs(sharedir) |
---|
255 | f = open(os.path.join(sharedir, "8"), "wb") |
---|
256 | open("cli/test_catalog_shares/node1/storage/shares/mq/not-a-dir", "wb").close() |
---|
257 | # write a bogus share that looks a little bit like CHK |
---|
258 | f.write("\x00\x00\x00\x01" + "\xff" * 200) # this triggers an assert |
---|
259 | f.close() |
---|
260 | |
---|
261 | nodedir2 = "cli/test_catalog_shares/node2" |
---|
262 | fileutil.make_dirs(nodedir2) |
---|
263 | open("cli/test_catalog_shares/node1/storage/shares/not-a-dir", "wb").close() |
---|
264 | |
---|
265 | # now make sure that the 'catalog-shares' commands survives the error |
---|
266 | out, err = self._catalog_shares(nodedir1, nodedir2) |
---|
267 | self.failUnlessEqual(out, "", out) |
---|
268 | self.failUnless("Error processing " in err, |
---|
269 | "didn't see 'error processing' in '%s'" % err) |
---|
270 | #self.failUnless(nodedir1 in err, |
---|
271 | # "didn't see '%s' in '%s'" % (nodedir1, err)) |
---|
272 | # windows mangles the path, and os.path.join isn't enough to make |
---|
273 | # up for it, so just look for individual strings |
---|
274 | self.failUnless("node1" in err, |
---|
275 | "didn't see 'node1' in '%s'" % err) |
---|
276 | self.failUnless("mqfblse6m5a6dh45isu2cg7oji" in err, |
---|
277 | "didn't see 'mqfblse6m5a6dh45isu2cg7oji' in '%s'" % err) |
---|
278 | |
---|
279 | def test_alias(self): |
---|
280 | aliases = {"tahoe": "TA", |
---|
281 | "work": "WA", |
---|
282 | "c": "CA"} |
---|
283 | def ga1(path): |
---|
284 | return get_alias(aliases, path, "tahoe") |
---|
285 | uses_lettercolon = common.platform_uses_lettercolon_drivename() |
---|
286 | self.failUnlessEqual(ga1("bare"), ("TA", "bare")) |
---|
287 | self.failUnlessEqual(ga1("baredir/file"), ("TA", "baredir/file")) |
---|
288 | self.failUnlessEqual(ga1("baredir/file:7"), ("TA", "baredir/file:7")) |
---|
289 | self.failUnlessEqual(ga1("tahoe:"), ("TA", "")) |
---|
290 | self.failUnlessEqual(ga1("tahoe:file"), ("TA", "file")) |
---|
291 | self.failUnlessEqual(ga1("tahoe:dir/file"), ("TA", "dir/file")) |
---|
292 | self.failUnlessEqual(ga1("work:"), ("WA", "")) |
---|
293 | self.failUnlessEqual(ga1("work:file"), ("WA", "file")) |
---|
294 | self.failUnlessEqual(ga1("work:dir/file"), ("WA", "dir/file")) |
---|
295 | # default != None means we really expect a tahoe path, regardless of |
---|
296 | # whether we're on windows or not. This is what 'tahoe get' uses. |
---|
297 | self.failUnlessEqual(ga1("c:"), ("CA", "")) |
---|
298 | self.failUnlessEqual(ga1("c:file"), ("CA", "file")) |
---|
299 | self.failUnlessEqual(ga1("c:dir/file"), ("CA", "dir/file")) |
---|
300 | self.failUnlessEqual(ga1("URI:stuff"), ("URI:stuff", "")) |
---|
301 | self.failUnlessEqual(ga1("URI:stuff:./file"), ("URI:stuff", "file")) |
---|
302 | self.failUnlessEqual(ga1("URI:stuff:./dir/file"), |
---|
303 | ("URI:stuff", "dir/file")) |
---|
304 | self.failUnlessRaises(common.UnknownAliasError, ga1, "missing:") |
---|
305 | self.failUnlessRaises(common.UnknownAliasError, ga1, "missing:dir") |
---|
306 | self.failUnlessRaises(common.UnknownAliasError, ga1, "missing:dir/file") |
---|
307 | |
---|
308 | def ga2(path): |
---|
309 | return get_alias(aliases, path, None) |
---|
310 | self.failUnlessEqual(ga2("bare"), (DefaultAliasMarker, "bare")) |
---|
311 | self.failUnlessEqual(ga2("baredir/file"), |
---|
312 | (DefaultAliasMarker, "baredir/file")) |
---|
313 | self.failUnlessEqual(ga2("baredir/file:7"), |
---|
314 | (DefaultAliasMarker, "baredir/file:7")) |
---|
315 | self.failUnlessEqual(ga2("baredir/sub:1/file:7"), |
---|
316 | (DefaultAliasMarker, "baredir/sub:1/file:7")) |
---|
317 | self.failUnlessEqual(ga2("tahoe:"), ("TA", "")) |
---|
318 | self.failUnlessEqual(ga2("tahoe:file"), ("TA", "file")) |
---|
319 | self.failUnlessEqual(ga2("tahoe:dir/file"), ("TA", "dir/file")) |
---|
320 | # on windows, we really want c:foo to indicate a local file. |
---|
321 | # default==None is what 'tahoe cp' uses. |
---|
322 | if uses_lettercolon: |
---|
323 | self.failUnlessEqual(ga2("c:"), (DefaultAliasMarker, "c:")) |
---|
324 | self.failUnlessEqual(ga2("c:file"), (DefaultAliasMarker, "c:file")) |
---|
325 | self.failUnlessEqual(ga2("c:dir/file"), |
---|
326 | (DefaultAliasMarker, "c:dir/file")) |
---|
327 | else: |
---|
328 | self.failUnlessEqual(ga2("c:"), ("CA", "")) |
---|
329 | self.failUnlessEqual(ga2("c:file"), ("CA", "file")) |
---|
330 | self.failUnlessEqual(ga2("c:dir/file"), ("CA", "dir/file")) |
---|
331 | self.failUnlessEqual(ga2("work:"), ("WA", "")) |
---|
332 | self.failUnlessEqual(ga2("work:file"), ("WA", "file")) |
---|
333 | self.failUnlessEqual(ga2("work:dir/file"), ("WA", "dir/file")) |
---|
334 | self.failUnlessEqual(ga2("URI:stuff"), ("URI:stuff", "")) |
---|
335 | self.failUnlessEqual(ga2("URI:stuff:./file"), ("URI:stuff", "file")) |
---|
336 | self.failUnlessEqual(ga2("URI:stuff:./dir/file"), ("URI:stuff", "dir/file")) |
---|
337 | self.failUnlessRaises(common.UnknownAliasError, ga2, "missing:") |
---|
338 | self.failUnlessRaises(common.UnknownAliasError, ga2, "missing:dir") |
---|
339 | self.failUnlessRaises(common.UnknownAliasError, ga2, "missing:dir/file") |
---|
340 | |
---|
341 | def ga3(path): |
---|
342 | old = common.pretend_platform_uses_lettercolon |
---|
343 | try: |
---|
344 | common.pretend_platform_uses_lettercolon = True |
---|
345 | retval = get_alias(aliases, path, None) |
---|
346 | finally: |
---|
347 | common.pretend_platform_uses_lettercolon = old |
---|
348 | return retval |
---|
349 | self.failUnlessEqual(ga3("bare"), (DefaultAliasMarker, "bare")) |
---|
350 | self.failUnlessEqual(ga3("baredir/file"), |
---|
351 | (DefaultAliasMarker, "baredir/file")) |
---|
352 | self.failUnlessEqual(ga3("baredir/file:7"), |
---|
353 | (DefaultAliasMarker, "baredir/file:7")) |
---|
354 | self.failUnlessEqual(ga3("baredir/sub:1/file:7"), |
---|
355 | (DefaultAliasMarker, "baredir/sub:1/file:7")) |
---|
356 | self.failUnlessEqual(ga3("tahoe:"), ("TA", "")) |
---|
357 | self.failUnlessEqual(ga3("tahoe:file"), ("TA", "file")) |
---|
358 | self.failUnlessEqual(ga3("tahoe:dir/file"), ("TA", "dir/file")) |
---|
359 | self.failUnlessEqual(ga3("c:"), (DefaultAliasMarker, "c:")) |
---|
360 | self.failUnlessEqual(ga3("c:file"), (DefaultAliasMarker, "c:file")) |
---|
361 | self.failUnlessEqual(ga3("c:dir/file"), |
---|
362 | (DefaultAliasMarker, "c:dir/file")) |
---|
363 | self.failUnlessEqual(ga3("work:"), ("WA", "")) |
---|
364 | self.failUnlessEqual(ga3("work:file"), ("WA", "file")) |
---|
365 | self.failUnlessEqual(ga3("work:dir/file"), ("WA", "dir/file")) |
---|
366 | self.failUnlessEqual(ga3("URI:stuff"), ("URI:stuff", "")) |
---|
367 | self.failUnlessEqual(ga3("URI:stuff:./file"), ("URI:stuff", "file")) |
---|
368 | self.failUnlessEqual(ga3("URI:stuff:./dir/file"), ("URI:stuff", "dir/file")) |
---|
369 | self.failUnlessRaises(common.UnknownAliasError, ga3, "missing:") |
---|
370 | self.failUnlessRaises(common.UnknownAliasError, ga3, "missing:dir") |
---|
371 | self.failUnlessRaises(common.UnknownAliasError, ga3, "missing:dir/file") |
---|
372 | |
---|
373 | |
---|
374 | class Help(unittest.TestCase): |
---|
375 | |
---|
376 | def test_get(self): |
---|
377 | help = str(cli.GetOptions()) |
---|
378 | self.failUnless("get REMOTE_FILE LOCAL_FILE" in help, help) |
---|
379 | self.failUnless("% tahoe get FOO |less" in help, help) |
---|
380 | |
---|
381 | def test_put(self): |
---|
382 | help = str(cli.PutOptions()) |
---|
383 | self.failUnless("put LOCAL_FILE REMOTE_FILE" in help, help) |
---|
384 | self.failUnless("% cat FILE | tahoe put" in help, help) |
---|
385 | |
---|
386 | def test_rm(self): |
---|
387 | help = str(cli.RmOptions()) |
---|
388 | self.failUnless("rm REMOTE_FILE" in help, help) |
---|
389 | |
---|
390 | def test_mv(self): |
---|
391 | help = str(cli.MvOptions()) |
---|
392 | self.failUnless("mv FROM TO" in help, help) |
---|
393 | self.failUnless("Use 'tahoe mv' to move files" in help) |
---|
394 | |
---|
395 | def test_ln(self): |
---|
396 | help = str(cli.LnOptions()) |
---|
397 | self.failUnless("ln FROM TO" in help, help) |
---|
398 | |
---|
399 | def test_backup(self): |
---|
400 | help = str(cli.BackupOptions()) |
---|
401 | self.failUnless("backup FROM ALIAS:TO" in help, help) |
---|
402 | |
---|
403 | def test_webopen(self): |
---|
404 | help = str(cli.WebopenOptions()) |
---|
405 | self.failUnless("webopen [ALIAS:PATH]" in help, help) |
---|
406 | |
---|
407 | def test_manifest(self): |
---|
408 | help = str(cli.ManifestOptions()) |
---|
409 | self.failUnless("manifest [ALIAS:PATH]" in help, help) |
---|
410 | |
---|
411 | def test_stats(self): |
---|
412 | help = str(cli.StatsOptions()) |
---|
413 | self.failUnless("stats [ALIAS:PATH]" in help, help) |
---|
414 | |
---|
415 | def test_check(self): |
---|
416 | help = str(cli.CheckOptions()) |
---|
417 | self.failUnless("check [ALIAS:PATH]" in help, help) |
---|
418 | |
---|
419 | def test_deep_check(self): |
---|
420 | help = str(cli.DeepCheckOptions()) |
---|
421 | self.failUnless("deep-check [ALIAS:PATH]" in help, help) |
---|
422 | |
---|
423 | def test_create_alias(self): |
---|
424 | help = str(cli.CreateAliasOptions()) |
---|
425 | self.failUnless("create-alias ALIAS" in help, help) |
---|
426 | |
---|
427 | def test_add_aliases(self): |
---|
428 | help = str(cli.AddAliasOptions()) |
---|
429 | self.failUnless("add-alias ALIAS DIRCAP" in help, help) |
---|
430 | |
---|
431 | class CLITestMixin: |
---|
432 | def do_cli(self, verb, *args, **kwargs): |
---|
433 | nodeargs = [ |
---|
434 | "--node-directory", self.get_clientdir(), |
---|
435 | ] |
---|
436 | argv = [verb] + nodeargs + list(args) |
---|
437 | stdin = kwargs.get("stdin", "") |
---|
438 | stdout, stderr = StringIO(), StringIO() |
---|
439 | d = threads.deferToThread(runner.runner, argv, run_by_human=False, |
---|
440 | stdin=StringIO(stdin), |
---|
441 | stdout=stdout, stderr=stderr) |
---|
442 | def _done(rc): |
---|
443 | return rc, stdout.getvalue(), stderr.getvalue() |
---|
444 | d.addCallback(_done) |
---|
445 | return d |
---|
446 | |
---|
447 | class CreateAlias(GridTestMixin, CLITestMixin, unittest.TestCase): |
---|
448 | |
---|
449 | def _test_webopen(self, args, expected_url): |
---|
450 | woo = cli.WebopenOptions() |
---|
451 | all_args = ["--node-directory", self.get_clientdir()] + list(args) |
---|
452 | woo.parseOptions(all_args) |
---|
453 | urls = [] |
---|
454 | rc = cli.webopen(woo, urls.append) |
---|
455 | self.failUnlessEqual(rc, 0) |
---|
456 | self.failUnlessEqual(len(urls), 1) |
---|
457 | self.failUnlessEqual(urls[0], expected_url) |
---|
458 | |
---|
459 | def test_create(self): |
---|
460 | self.basedir = "cli/CreateAlias/create" |
---|
461 | self.set_up_grid() |
---|
462 | aliasfile = os.path.join(self.get_clientdir(), "private", "aliases") |
---|
463 | |
---|
464 | d = self.do_cli("create-alias", "tahoe") |
---|
465 | def _done((rc,stdout,stderr)): |
---|
466 | self.failUnless("Alias 'tahoe' created" in stdout) |
---|
467 | self.failIf(stderr) |
---|
468 | aliases = get_aliases(self.get_clientdir()) |
---|
469 | self.failUnless("tahoe" in aliases) |
---|
470 | self.failUnless(aliases["tahoe"].startswith("URI:DIR2:")) |
---|
471 | d.addCallback(_done) |
---|
472 | d.addCallback(lambda res: self.do_cli("create-alias", "two")) |
---|
473 | |
---|
474 | def _stash_urls(res): |
---|
475 | aliases = get_aliases(self.get_clientdir()) |
---|
476 | node_url_file = os.path.join(self.get_clientdir(), "node.url") |
---|
477 | nodeurl = open(node_url_file, "r").read().strip() |
---|
478 | self.welcome_url = nodeurl |
---|
479 | uribase = nodeurl + "uri/" |
---|
480 | self.tahoe_url = uribase + urllib.quote(aliases["tahoe"]) |
---|
481 | self.tahoe_subdir_url = self.tahoe_url + "/subdir" |
---|
482 | self.two_url = uribase + urllib.quote(aliases["two"]) |
---|
483 | self.two_uri = aliases["two"] |
---|
484 | d.addCallback(_stash_urls) |
---|
485 | |
---|
486 | d.addCallback(lambda res: self.do_cli("create-alias", "two")) # dup |
---|
487 | def _check_create_duplicate((rc,stdout,stderr)): |
---|
488 | self.failIfEqual(rc, 0) |
---|
489 | self.failUnless("Alias 'two' already exists!" in stderr) |
---|
490 | aliases = get_aliases(self.get_clientdir()) |
---|
491 | self.failUnlessEqual(aliases["two"], self.two_uri) |
---|
492 | d.addCallback(_check_create_duplicate) |
---|
493 | |
---|
494 | d.addCallback(lambda res: self.do_cli("add-alias", "added", self.two_uri)) |
---|
495 | def _check_add((rc,stdout,stderr)): |
---|
496 | self.failUnlessEqual(rc, 0) |
---|
497 | self.failUnless("Alias 'added' added" in stdout) |
---|
498 | d.addCallback(_check_add) |
---|
499 | |
---|
500 | # check add-alias with a duplicate |
---|
501 | d.addCallback(lambda res: self.do_cli("add-alias", "two", self.two_uri)) |
---|
502 | def _check_add_duplicate((rc,stdout,stderr)): |
---|
503 | self.failIfEqual(rc, 0) |
---|
504 | self.failUnless("Alias 'two' already exists!" in stderr) |
---|
505 | aliases = get_aliases(self.get_clientdir()) |
---|
506 | self.failUnlessEqual(aliases["two"], self.two_uri) |
---|
507 | d.addCallback(_check_add_duplicate) |
---|
508 | |
---|
509 | def _test_urls(junk): |
---|
510 | self._test_webopen([], self.welcome_url) |
---|
511 | self._test_webopen(["/"], self.tahoe_url) |
---|
512 | self._test_webopen(["tahoe:"], self.tahoe_url) |
---|
513 | self._test_webopen(["tahoe:/"], self.tahoe_url) |
---|
514 | self._test_webopen(["tahoe:subdir"], self.tahoe_subdir_url) |
---|
515 | self._test_webopen(["tahoe:subdir/"], self.tahoe_subdir_url + '/') |
---|
516 | self._test_webopen(["tahoe:subdir/file"], self.tahoe_subdir_url + '/file') |
---|
517 | # if "file" is indeed a file, then the url produced by webopen in |
---|
518 | # this case is disallowed by the webui. but by design, webopen |
---|
519 | # passes through the mistake from the user to the resultant |
---|
520 | # webopened url |
---|
521 | self._test_webopen(["tahoe:subdir/file/"], self.tahoe_subdir_url + '/file/') |
---|
522 | self._test_webopen(["two:"], self.two_url) |
---|
523 | d.addCallback(_test_urls) |
---|
524 | |
---|
525 | def _remove_trailing_newline_and_create_alias(ign): |
---|
526 | f = open(aliasfile, "r") |
---|
527 | old = f.read() |
---|
528 | f.close() |
---|
529 | # ticket #741 is about a manually-edited alias file (which |
---|
530 | # doesn't end in a newline) being corrupted by a subsequent |
---|
531 | # "tahoe create-alias" |
---|
532 | f = open(aliasfile, "w") |
---|
533 | f.write(old.rstrip()) |
---|
534 | f.close() |
---|
535 | return self.do_cli("create-alias", "un-corrupted1") |
---|
536 | d.addCallback(_remove_trailing_newline_and_create_alias) |
---|
537 | def _check_not_corrupted1((rc,stdout,stderr)): |
---|
538 | self.failUnless("Alias 'un-corrupted1' created" in stdout, stdout) |
---|
539 | self.failIf(stderr) |
---|
540 | # the old behavior was to simply append the new record, causing a |
---|
541 | # line that looked like "NAME1: CAP1NAME2: CAP2". This won't look |
---|
542 | # like a valid dircap, so get_aliases() will raise an exception. |
---|
543 | aliases = get_aliases(self.get_clientdir()) |
---|
544 | self.failUnless("added" in aliases) |
---|
545 | self.failUnless(aliases["added"].startswith("URI:DIR2:")) |
---|
546 | # to be safe, let's confirm that we don't see "NAME2:" in CAP1. |
---|
547 | # No chance of a false-negative, because the hyphen in |
---|
548 | # "un-corrupted1" is not a valid base32 character. |
---|
549 | self.failIfIn("un-corrupted1:", aliases["added"]) |
---|
550 | self.failUnless("un-corrupted1" in aliases) |
---|
551 | self.failUnless(aliases["un-corrupted1"].startswith("URI:DIR2:")) |
---|
552 | d.addCallback(_check_not_corrupted1) |
---|
553 | |
---|
554 | def _remove_trailing_newline_and_add_alias(ign): |
---|
555 | # same thing, but for "tahoe add-alias" |
---|
556 | f = open(aliasfile, "r") |
---|
557 | old = f.read() |
---|
558 | f.close() |
---|
559 | f = open(aliasfile, "w") |
---|
560 | f.write(old.rstrip()) |
---|
561 | f.close() |
---|
562 | return self.do_cli("add-alias", "un-corrupted2", self.two_uri) |
---|
563 | d.addCallback(_remove_trailing_newline_and_add_alias) |
---|
564 | def _check_not_corrupted((rc,stdout,stderr)): |
---|
565 | self.failUnless("Alias 'un-corrupted2' added" in stdout, stdout) |
---|
566 | self.failIf(stderr) |
---|
567 | aliases = get_aliases(self.get_clientdir()) |
---|
568 | self.failUnless("un-corrupted1" in aliases) |
---|
569 | self.failUnless(aliases["un-corrupted1"].startswith("URI:DIR2:")) |
---|
570 | self.failIfIn("un-corrupted2:", aliases["un-corrupted1"]) |
---|
571 | self.failUnless("un-corrupted2" in aliases) |
---|
572 | self.failUnless(aliases["un-corrupted2"].startswith("URI:DIR2:")) |
---|
573 | d.addCallback(_check_not_corrupted) |
---|
574 | |
---|
575 | return d |
---|
576 | |
---|
577 | class Put(GridTestMixin, CLITestMixin, unittest.TestCase): |
---|
578 | |
---|
579 | def test_unlinked_immutable_stdin(self): |
---|
580 | # tahoe get `echo DATA | tahoe put` |
---|
581 | # tahoe get `echo DATA | tahoe put -` |
---|
582 | self.basedir = "cli/Put/unlinked_immutable_stdin" |
---|
583 | DATA = "data" * 100 |
---|
584 | self.set_up_grid() |
---|
585 | d = self.do_cli("put", stdin=DATA) |
---|
586 | def _uploaded(res): |
---|
587 | (rc, stdout, stderr) = res |
---|
588 | self.failUnless("waiting for file data on stdin.." in stderr) |
---|
589 | self.failUnless("200 OK" in stderr, stderr) |
---|
590 | self.readcap = stdout |
---|
591 | self.failUnless(self.readcap.startswith("URI:CHK:")) |
---|
592 | d.addCallback(_uploaded) |
---|
593 | d.addCallback(lambda res: self.do_cli("get", self.readcap)) |
---|
594 | def _downloaded(res): |
---|
595 | (rc, stdout, stderr) = res |
---|
596 | self.failUnlessEqual(stderr, "") |
---|
597 | self.failUnlessEqual(stdout, DATA) |
---|
598 | d.addCallback(_downloaded) |
---|
599 | d.addCallback(lambda res: self.do_cli("put", "-", stdin=DATA)) |
---|
600 | d.addCallback(lambda (rc,stdout,stderr): |
---|
601 | self.failUnlessEqual(stdout, self.readcap)) |
---|
602 | return d |
---|
603 | |
---|
604 | def test_unlinked_immutable_from_file(self): |
---|
605 | # tahoe put file.txt |
---|
606 | # tahoe put ./file.txt |
---|
607 | # tahoe put /tmp/file.txt |
---|
608 | # tahoe put ~/file.txt |
---|
609 | self.basedir = "cli/Put/unlinked_immutable_from_file" |
---|
610 | self.set_up_grid() |
---|
611 | |
---|
612 | rel_fn = os.path.join(self.basedir, "DATAFILE") |
---|
613 | abs_fn = os.path.abspath(rel_fn) |
---|
614 | # we make the file small enough to fit in a LIT file, for speed |
---|
615 | f = open(rel_fn, "w") |
---|
616 | f.write("short file") |
---|
617 | f.close() |
---|
618 | d = self.do_cli("put", rel_fn) |
---|
619 | def _uploaded((rc,stdout,stderr)): |
---|
620 | readcap = stdout |
---|
621 | self.failUnless(readcap.startswith("URI:LIT:")) |
---|
622 | self.readcap = readcap |
---|
623 | d.addCallback(_uploaded) |
---|
624 | d.addCallback(lambda res: self.do_cli("put", "./" + rel_fn)) |
---|
625 | d.addCallback(lambda (rc,stdout,stderr): |
---|
626 | self.failUnlessEqual(stdout, self.readcap)) |
---|
627 | d.addCallback(lambda res: self.do_cli("put", abs_fn)) |
---|
628 | d.addCallback(lambda (rc,stdout,stderr): |
---|
629 | self.failUnlessEqual(stdout, self.readcap)) |
---|
630 | # we just have to assume that ~ is handled properly |
---|
631 | return d |
---|
632 | |
---|
633 | def test_immutable_from_file(self): |
---|
634 | # tahoe put file.txt uploaded.txt |
---|
635 | # tahoe - uploaded.txt |
---|
636 | # tahoe put file.txt subdir/uploaded.txt |
---|
637 | # tahoe put file.txt tahoe:uploaded.txt |
---|
638 | # tahoe put file.txt tahoe:subdir/uploaded.txt |
---|
639 | # tahoe put file.txt DIRCAP:./uploaded.txt |
---|
640 | # tahoe put file.txt DIRCAP:./subdir/uploaded.txt |
---|
641 | self.basedir = "cli/Put/immutable_from_file" |
---|
642 | self.set_up_grid() |
---|
643 | |
---|
644 | rel_fn = os.path.join(self.basedir, "DATAFILE") |
---|
645 | # we make the file small enough to fit in a LIT file, for speed |
---|
646 | DATA = "short file" |
---|
647 | DATA2 = "short file two" |
---|
648 | f = open(rel_fn, "w") |
---|
649 | f.write(DATA) |
---|
650 | f.close() |
---|
651 | |
---|
652 | d = self.do_cli("create-alias", "tahoe") |
---|
653 | |
---|
654 | d.addCallback(lambda res: |
---|
655 | self.do_cli("put", rel_fn, "uploaded.txt")) |
---|
656 | def _uploaded((rc,stdout,stderr)): |
---|
657 | readcap = stdout.strip() |
---|
658 | self.failUnless(readcap.startswith("URI:LIT:")) |
---|
659 | self.failUnless("201 Created" in stderr, stderr) |
---|
660 | self.readcap = readcap |
---|
661 | d.addCallback(_uploaded) |
---|
662 | d.addCallback(lambda res: |
---|
663 | self.do_cli("get", "tahoe:uploaded.txt")) |
---|
664 | d.addCallback(lambda (rc,stdout,stderr): |
---|
665 | self.failUnlessEqual(stdout, DATA)) |
---|
666 | |
---|
667 | d.addCallback(lambda res: |
---|
668 | self.do_cli("put", "-", "uploaded.txt", stdin=DATA2)) |
---|
669 | def _replaced((rc,stdout,stderr)): |
---|
670 | readcap = stdout.strip() |
---|
671 | self.failUnless(readcap.startswith("URI:LIT:")) |
---|
672 | self.failUnless("200 OK" in stderr, stderr) |
---|
673 | d.addCallback(_replaced) |
---|
674 | |
---|
675 | d.addCallback(lambda res: |
---|
676 | self.do_cli("put", rel_fn, "subdir/uploaded2.txt")) |
---|
677 | d.addCallback(lambda res: self.do_cli("get", "subdir/uploaded2.txt")) |
---|
678 | d.addCallback(lambda (rc,stdout,stderr): |
---|
679 | self.failUnlessEqual(stdout, DATA)) |
---|
680 | |
---|
681 | d.addCallback(lambda res: |
---|
682 | self.do_cli("put", rel_fn, "tahoe:uploaded3.txt")) |
---|
683 | d.addCallback(lambda res: self.do_cli("get", "tahoe:uploaded3.txt")) |
---|
684 | d.addCallback(lambda (rc,stdout,stderr): |
---|
685 | self.failUnlessEqual(stdout, DATA)) |
---|
686 | |
---|
687 | d.addCallback(lambda res: |
---|
688 | self.do_cli("put", rel_fn, "tahoe:subdir/uploaded4.txt")) |
---|
689 | d.addCallback(lambda res: |
---|
690 | self.do_cli("get", "tahoe:subdir/uploaded4.txt")) |
---|
691 | d.addCallback(lambda (rc,stdout,stderr): |
---|
692 | self.failUnlessEqual(stdout, DATA)) |
---|
693 | |
---|
694 | def _get_dircap(res): |
---|
695 | self.dircap = get_aliases(self.get_clientdir())["tahoe"] |
---|
696 | d.addCallback(_get_dircap) |
---|
697 | |
---|
698 | d.addCallback(lambda res: |
---|
699 | self.do_cli("put", rel_fn, |
---|
700 | self.dircap+":./uploaded5.txt")) |
---|
701 | d.addCallback(lambda res: |
---|
702 | self.do_cli("get", "tahoe:uploaded5.txt")) |
---|
703 | d.addCallback(lambda (rc,stdout,stderr): |
---|
704 | self.failUnlessEqual(stdout, DATA)) |
---|
705 | |
---|
706 | d.addCallback(lambda res: |
---|
707 | self.do_cli("put", rel_fn, |
---|
708 | self.dircap+":./subdir/uploaded6.txt")) |
---|
709 | d.addCallback(lambda res: |
---|
710 | self.do_cli("get", "tahoe:subdir/uploaded6.txt")) |
---|
711 | d.addCallback(lambda (rc,stdout,stderr): |
---|
712 | self.failUnlessEqual(stdout, DATA)) |
---|
713 | |
---|
714 | return d |
---|
715 | |
---|
716 | def test_mutable_unlinked(self): |
---|
717 | # FILECAP = `echo DATA | tahoe put --mutable` |
---|
718 | # tahoe get FILECAP, compare against DATA |
---|
719 | # echo DATA2 | tahoe put - FILECAP |
---|
720 | # tahoe get FILECAP, compare against DATA2 |
---|
721 | # tahoe put file.txt FILECAP |
---|
722 | self.basedir = "cli/Put/mutable_unlinked" |
---|
723 | self.set_up_grid() |
---|
724 | |
---|
725 | DATA = "data" * 100 |
---|
726 | DATA2 = "two" * 100 |
---|
727 | rel_fn = os.path.join(self.basedir, "DATAFILE") |
---|
728 | DATA3 = "three" * 100 |
---|
729 | f = open(rel_fn, "w") |
---|
730 | f.write(DATA3) |
---|
731 | f.close() |
---|
732 | |
---|
733 | d = self.do_cli("put", "--mutable", stdin=DATA) |
---|
734 | def _created(res): |
---|
735 | (rc, stdout, stderr) = res |
---|
736 | self.failUnless("waiting for file data on stdin.." in stderr) |
---|
737 | self.failUnless("200 OK" in stderr) |
---|
738 | self.filecap = stdout |
---|
739 | self.failUnless(self.filecap.startswith("URI:SSK:")) |
---|
740 | d.addCallback(_created) |
---|
741 | d.addCallback(lambda res: self.do_cli("get", self.filecap)) |
---|
742 | d.addCallback(lambda (rc,out,err): self.failUnlessEqual(out, DATA)) |
---|
743 | |
---|
744 | d.addCallback(lambda res: self.do_cli("put", "-", self.filecap, stdin=DATA2)) |
---|
745 | def _replaced(res): |
---|
746 | (rc, stdout, stderr) = res |
---|
747 | self.failUnless("waiting for file data on stdin.." in stderr) |
---|
748 | self.failUnless("200 OK" in stderr) |
---|
749 | self.failUnlessEqual(self.filecap, stdout) |
---|
750 | d.addCallback(_replaced) |
---|
751 | d.addCallback(lambda res: self.do_cli("get", self.filecap)) |
---|
752 | d.addCallback(lambda (rc,out,err): self.failUnlessEqual(out, DATA2)) |
---|
753 | |
---|
754 | d.addCallback(lambda res: self.do_cli("put", rel_fn, self.filecap)) |
---|
755 | def _replaced2(res): |
---|
756 | (rc, stdout, stderr) = res |
---|
757 | self.failUnless("200 OK" in stderr) |
---|
758 | self.failUnlessEqual(self.filecap, stdout) |
---|
759 | d.addCallback(_replaced2) |
---|
760 | d.addCallback(lambda res: self.do_cli("get", self.filecap)) |
---|
761 | d.addCallback(lambda (rc,out,err): self.failUnlessEqual(out, DATA3)) |
---|
762 | |
---|
763 | return d |
---|
764 | |
---|
765 | def test_mutable(self): |
---|
766 | # echo DATA1 | tahoe put --mutable - uploaded.txt |
---|
767 | # echo DATA2 | tahoe put - uploaded.txt # should modify-in-place |
---|
768 | # tahoe get uploaded.txt, compare against DATA2 |
---|
769 | |
---|
770 | self.basedir = "cli/Put/mutable" |
---|
771 | self.set_up_grid() |
---|
772 | |
---|
773 | DATA1 = "data" * 100 |
---|
774 | fn1 = os.path.join(self.basedir, "DATA1") |
---|
775 | f = open(fn1, "w") |
---|
776 | f.write(DATA1) |
---|
777 | f.close() |
---|
778 | DATA2 = "two" * 100 |
---|
779 | fn2 = os.path.join(self.basedir, "DATA2") |
---|
780 | f = open(fn2, "w") |
---|
781 | f.write(DATA2) |
---|
782 | f.close() |
---|
783 | |
---|
784 | d = self.do_cli("create-alias", "tahoe") |
---|
785 | d.addCallback(lambda res: |
---|
786 | self.do_cli("put", "--mutable", fn1, "tahoe:uploaded.txt")) |
---|
787 | d.addCallback(lambda res: |
---|
788 | self.do_cli("put", fn2, "tahoe:uploaded.txt")) |
---|
789 | d.addCallback(lambda res: |
---|
790 | self.do_cli("get", "tahoe:uploaded.txt")) |
---|
791 | d.addCallback(lambda (rc,out,err): self.failUnlessEqual(out, DATA2)) |
---|
792 | return d |
---|
793 | |
---|
794 | class List(GridTestMixin, CLITestMixin, unittest.TestCase): |
---|
795 | def test_list(self): |
---|
796 | self.basedir = "cli/List/list" |
---|
797 | self.set_up_grid() |
---|
798 | c0 = self.g.clients[0] |
---|
799 | small = "small" |
---|
800 | d = c0.create_dirnode() |
---|
801 | def _stash_root_and_create_file(n): |
---|
802 | self.rootnode = n |
---|
803 | self.rooturi = n.get_uri() |
---|
804 | return n.add_file(u"good", upload.Data(small, convergence="")) |
---|
805 | d.addCallback(_stash_root_and_create_file) |
---|
806 | def _stash_goodcap(n): |
---|
807 | self.goodcap = n.get_uri() |
---|
808 | d.addCallback(_stash_goodcap) |
---|
809 | d.addCallback(lambda ign: self.rootnode.create_subdirectory(u"1share")) |
---|
810 | d.addCallback(lambda n: |
---|
811 | self.delete_shares_numbered(n.get_uri(), range(1,10))) |
---|
812 | d.addCallback(lambda ign: self.rootnode.create_subdirectory(u"0share")) |
---|
813 | d.addCallback(lambda n: |
---|
814 | self.delete_shares_numbered(n.get_uri(), range(0,10))) |
---|
815 | d.addCallback(lambda ign: |
---|
816 | self.do_cli("add-alias", "tahoe", self.rooturi)) |
---|
817 | d.addCallback(lambda ign: self.do_cli("ls")) |
---|
818 | def _check1((rc,out,err)): |
---|
819 | self.failUnlessEqual(err, "") |
---|
820 | self.failUnlessEqual(rc, 0) |
---|
821 | self.failUnlessEqual(out.splitlines(), ["0share", "1share", "good"]) |
---|
822 | d.addCallback(_check1) |
---|
823 | d.addCallback(lambda ign: self.do_cli("ls", "missing")) |
---|
824 | def _check2((rc,out,err)): |
---|
825 | self.failIfEqual(rc, 0) |
---|
826 | self.failUnlessEqual(err.strip(), "No such file or directory") |
---|
827 | self.failUnlessEqual(out, "") |
---|
828 | d.addCallback(_check2) |
---|
829 | d.addCallback(lambda ign: self.do_cli("ls", "1share")) |
---|
830 | def _check3((rc,out,err)): |
---|
831 | self.failIfEqual(rc, 0) |
---|
832 | self.failUnlessIn("Error during GET: 410 Gone ", err) |
---|
833 | self.failUnlessIn("UnrecoverableFileError:", err) |
---|
834 | self.failUnlessIn("could not be retrieved, because there were " |
---|
835 | "insufficient good shares.", err) |
---|
836 | self.failUnlessEqual(out, "") |
---|
837 | d.addCallback(_check3) |
---|
838 | d.addCallback(lambda ign: self.do_cli("ls", "0share")) |
---|
839 | d.addCallback(_check3) |
---|
840 | def _check4((rc, out, err)): |
---|
841 | # listing a file (as dir/filename) should have the edge metadata, |
---|
842 | # including the filename |
---|
843 | self.failUnlessEqual(rc, 0) |
---|
844 | self.failUnlessIn("good", out) |
---|
845 | self.failIfIn("-r-- %d -" % len(small), out, |
---|
846 | "trailing hyphen means unknown date") |
---|
847 | d.addCallback(lambda ign: self.do_cli("ls", "-l", "good")) |
---|
848 | d.addCallback(_check4) |
---|
849 | def _check5((rc, out, err)): |
---|
850 | # listing a raw filecap should not explode, but it will have no |
---|
851 | # metadata, just the size |
---|
852 | self.failUnlessEqual(rc, 0) |
---|
853 | self.failUnlessEqual("-r-- %d -" % len(small), out.strip()) |
---|
854 | d.addCallback(lambda ign: self.do_cli("ls", "-l", self.goodcap)) |
---|
855 | d.addCallback(_check5) |
---|
856 | return d |
---|
857 | |
---|
858 | class Mv(GridTestMixin, CLITestMixin, unittest.TestCase): |
---|
859 | def test_mv_behavior(self): |
---|
860 | self.basedir = "cli/Mv/mv_behavior" |
---|
861 | self.set_up_grid() |
---|
862 | fn1 = os.path.join(self.basedir, "file1") |
---|
863 | DATA1 = "Nuclear launch codes" |
---|
864 | open(fn1, "wb").write(DATA1) |
---|
865 | fn2 = os.path.join(self.basedir, "file2") |
---|
866 | DATA2 = "UML diagrams" |
---|
867 | open(fn2, "wb").write(DATA2) |
---|
868 | # copy both files to the grid |
---|
869 | d = self.do_cli("create-alias", "tahoe") |
---|
870 | d.addCallback(lambda res: |
---|
871 | self.do_cli("cp", fn1, "tahoe:")) |
---|
872 | d.addCallback(lambda res: |
---|
873 | self.do_cli("cp", fn2, "tahoe:")) |
---|
874 | |
---|
875 | # do mv file1 file3 |
---|
876 | # (we should be able to rename files) |
---|
877 | d.addCallback(lambda res: |
---|
878 | self.do_cli("mv", "tahoe:file1", "tahoe:file3")) |
---|
879 | d.addCallback(lambda (rc, out, err): |
---|
880 | self.failUnlessIn("OK", out, "mv didn't rename a file")) |
---|
881 | |
---|
882 | # do mv file3 file2 |
---|
883 | # (This should succeed without issue) |
---|
884 | d.addCallback(lambda res: |
---|
885 | self.do_cli("mv", "tahoe:file3", "tahoe:file2")) |
---|
886 | # Out should contain "OK" to show that the transfer worked. |
---|
887 | d.addCallback(lambda (rc,out,err): |
---|
888 | self.failUnlessIn("OK", out, "mv didn't output OK after mving")) |
---|
889 | |
---|
890 | # Next, make a remote directory. |
---|
891 | d.addCallback(lambda res: |
---|
892 | self.do_cli("mkdir", "tahoe:directory")) |
---|
893 | |
---|
894 | # mv file2 directory |
---|
895 | # (should fail with a descriptive error message; the CLI mv |
---|
896 | # client should support this) |
---|
897 | d.addCallback(lambda res: |
---|
898 | self.do_cli("mv", "tahoe:file2", "tahoe:directory")) |
---|
899 | d.addCallback(lambda (rc, out, err): |
---|
900 | self.failUnlessIn( |
---|
901 | "Error: You can't overwrite a directory with a file", err, |
---|
902 | "mv shouldn't overwrite directories" )) |
---|
903 | |
---|
904 | # mv file2 directory/ |
---|
905 | # (should succeed by making file2 a child node of directory) |
---|
906 | d.addCallback(lambda res: |
---|
907 | self.do_cli("mv", "tahoe:file2", "tahoe:directory/")) |
---|
908 | # We should see an "OK"... |
---|
909 | d.addCallback(lambda (rc, out, err): |
---|
910 | self.failUnlessIn("OK", out, |
---|
911 | "mv didn't mv a file into a directory")) |
---|
912 | # ... and be able to GET the file |
---|
913 | d.addCallback(lambda res: |
---|
914 | self.do_cli("get", "tahoe:directory/file2", self.basedir + "new")) |
---|
915 | d.addCallback(lambda (rc, out, err): |
---|
916 | self.failUnless(os.path.exists(self.basedir + "new"), |
---|
917 | "mv didn't write the destination file")) |
---|
918 | # ... and not find the file where it was before. |
---|
919 | d.addCallback(lambda res: |
---|
920 | self.do_cli("get", "tahoe:file2", "file2")) |
---|
921 | d.addCallback(lambda (rc, out, err): |
---|
922 | self.failUnlessIn("404", err, |
---|
923 | "mv left the source file intact")) |
---|
924 | |
---|
925 | # Let's build: |
---|
926 | # directory/directory2/some_file |
---|
927 | # directory3 |
---|
928 | d.addCallback(lambda res: |
---|
929 | self.do_cli("mkdir", "tahoe:directory/directory2")) |
---|
930 | d.addCallback(lambda res: |
---|
931 | self.do_cli("cp", fn2, "tahoe:directory/directory2/some_file")) |
---|
932 | d.addCallback(lambda res: |
---|
933 | self.do_cli("mkdir", "tahoe:directory3")) |
---|
934 | |
---|
935 | # Let's now try to mv directory/directory2/some_file to |
---|
936 | # directory3/some_file |
---|
937 | d.addCallback(lambda res: |
---|
938 | self.do_cli("mv", "tahoe:directory/directory2/some_file", |
---|
939 | "tahoe:directory3/")) |
---|
940 | # We should have just some_file in tahoe:directory3 |
---|
941 | d.addCallback(lambda res: |
---|
942 | self.do_cli("get", "tahoe:directory3/some_file", "some_file")) |
---|
943 | d.addCallback(lambda (rc, out, err): |
---|
944 | self.failUnless("404" not in err, |
---|
945 | "mv didn't handle nested directories correctly")) |
---|
946 | d.addCallback(lambda res: |
---|
947 | self.do_cli("get", "tahoe:directory3/directory", "directory")) |
---|
948 | d.addCallback(lambda (rc, out, err): |
---|
949 | self.failUnlessIn("404", err, |
---|
950 | "mv moved the wrong thing")) |
---|
951 | return d |
---|
952 | |
---|
953 | class Cp(GridTestMixin, CLITestMixin, unittest.TestCase): |
---|
954 | |
---|
955 | def test_not_enough_args(self): |
---|
956 | o = cli.CpOptions() |
---|
957 | self.failUnlessRaises(usage.UsageError, |
---|
958 | o.parseOptions, ["onearg"]) |
---|
959 | |
---|
960 | def test_unicode_filename(self): |
---|
961 | self.basedir = "cli/Cp/unicode_filename" |
---|
962 | self.set_up_grid() |
---|
963 | |
---|
964 | fn1 = os.path.join(self.basedir, "Ärtonwall") |
---|
965 | DATA1 = "unicode file content" |
---|
966 | open(fn1, "wb").write(DATA1) |
---|
967 | |
---|
968 | fn2 = os.path.join(self.basedir, "Metallica") |
---|
969 | DATA2 = "non-unicode file content" |
---|
970 | open(fn2, "wb").write(DATA2) |
---|
971 | |
---|
972 | # Bug #534 |
---|
973 | # Assure that uploading a file whose name contains unicode character doesn't |
---|
974 | # prevent further uploads in the same directory |
---|
975 | d = self.do_cli("create-alias", "tahoe") |
---|
976 | d.addCallback(lambda res: self.do_cli("cp", fn1, "tahoe:")) |
---|
977 | d.addCallback(lambda res: self.do_cli("cp", fn2, "tahoe:")) |
---|
978 | |
---|
979 | d.addCallback(lambda res: self.do_cli("get", "tahoe:Ärtonwall")) |
---|
980 | d.addCallback(lambda (rc,out,err): self.failUnlessEqual(out, DATA1)) |
---|
981 | |
---|
982 | d.addCallback(lambda res: self.do_cli("get", "tahoe:Metallica")) |
---|
983 | d.addCallback(lambda (rc,out,err): self.failUnlessEqual(out, DATA2)) |
---|
984 | |
---|
985 | return d |
---|
986 | test_unicode_filename.todo = "This behavior is not yet supported, although it does happen to work (for reasons that are ill-understood) on many platforms. See issue ticket #534." |
---|
987 | |
---|
988 | def test_dangling_symlink_vs_recursion(self): |
---|
989 | if not hasattr(os, 'symlink'): |
---|
990 | raise unittest.SkipTest("There is no symlink on this platform.") |
---|
991 | # cp -r on a directory containing a dangling symlink shouldn't assert |
---|
992 | self.basedir = "cli/Cp/dangling_symlink_vs_recursion" |
---|
993 | self.set_up_grid() |
---|
994 | dn = os.path.join(self.basedir, "dir") |
---|
995 | os.mkdir(dn) |
---|
996 | fn = os.path.join(dn, "Fakebandica") |
---|
997 | ln = os.path.join(dn, "link") |
---|
998 | os.symlink(fn, ln) |
---|
999 | |
---|
1000 | d = self.do_cli("create-alias", "tahoe") |
---|
1001 | d.addCallback(lambda res: self.do_cli("cp", "--recursive", |
---|
1002 | dn, "tahoe:")) |
---|
1003 | return d |
---|
1004 | |
---|
1005 | def test_copy_using_filecap(self): |
---|
1006 | self.basedir = "cli/Cp/test_copy_using_filecap" |
---|
1007 | self.set_up_grid() |
---|
1008 | outdir = os.path.join(self.basedir, "outdir") |
---|
1009 | os.mkdir(outdir) |
---|
1010 | self.do_cli("create-alias", "tahoe") |
---|
1011 | fn1 = os.path.join(self.basedir, "Metallica") |
---|
1012 | fn2 = os.path.join(outdir, "Not Metallica") |
---|
1013 | fn3 = os.path.join(outdir, "test2") |
---|
1014 | DATA1 = "puppies" * 10000 |
---|
1015 | open(fn1, "wb").write(DATA1) |
---|
1016 | d = self.do_cli("put", fn1) |
---|
1017 | def _put_file((rc, out, err)): |
---|
1018 | self.failUnlessEqual(rc, 0) |
---|
1019 | # keep track of the filecap |
---|
1020 | self.filecap = out.strip() |
---|
1021 | d.addCallback(_put_file) |
---|
1022 | # Let's try copying this to the disk using the filecap |
---|
1023 | # cp FILECAP filename |
---|
1024 | d.addCallback(lambda res: self.do_cli("cp", self.filecap, fn2)) |
---|
1025 | def _copy_file((rc, out, err)): |
---|
1026 | self.failUnlessEqual(rc, 0) |
---|
1027 | results = open(fn2, "r").read() |
---|
1028 | self.failUnlessEqual(results, DATA1) |
---|
1029 | # Test with ./ (see #761) |
---|
1030 | # cp FILECAP localdir |
---|
1031 | d.addCallback(lambda res: self.do_cli("cp", self.filecap, outdir)) |
---|
1032 | def _resp((rc, out, err)): |
---|
1033 | self.failUnlessEqual(rc, 1) |
---|
1034 | self.failUnlessIn("error: you must specify a destination filename", |
---|
1035 | err) |
---|
1036 | d.addCallback(_resp) |
---|
1037 | # Create a directory, linked at tahoe:test |
---|
1038 | d.addCallback(lambda res: self.do_cli("mkdir", "tahoe:test")) |
---|
1039 | def _get_dir((rc, out, err)): |
---|
1040 | self.failUnlessEqual(rc, 0) |
---|
1041 | self.dircap = out.strip() |
---|
1042 | d.addCallback(_get_dir) |
---|
1043 | # Upload a file to the directory |
---|
1044 | d.addCallback(lambda res: |
---|
1045 | self.do_cli("put", fn1, "tahoe:test/test_file")) |
---|
1046 | d.addCallback(lambda (rc, out, err): self.failUnlessEqual(rc, 0)) |
---|
1047 | # cp DIRCAP/filename localdir |
---|
1048 | d.addCallback(lambda res: |
---|
1049 | self.do_cli("cp", self.dircap + "/test_file", outdir)) |
---|
1050 | def _get_resp((rc, out, err)): |
---|
1051 | self.failUnlessEqual(rc, 0) |
---|
1052 | results = open(os.path.join(outdir, "test_file"), "r").read() |
---|
1053 | self.failUnlessEqual(results, DATA1) |
---|
1054 | d.addCallback(_get_resp) |
---|
1055 | # cp -r DIRCAP/filename filename2 |
---|
1056 | d.addCallback(lambda res: |
---|
1057 | self.do_cli("cp", self.dircap + "/test_file", fn3)) |
---|
1058 | def _get_resp2((rc, out, err)): |
---|
1059 | self.failUnlessEqual(rc, 0) |
---|
1060 | results = open(fn3, "r").read() |
---|
1061 | self.failUnlessEqual(results, DATA1) |
---|
1062 | d.addCallback(_get_resp2) |
---|
1063 | return d |
---|
1064 | |
---|
1065 | class Backup(GridTestMixin, CLITestMixin, StallMixin, unittest.TestCase): |
---|
1066 | |
---|
1067 | def writeto(self, path, data): |
---|
1068 | d = os.path.dirname(os.path.join(self.basedir, "home", path)) |
---|
1069 | fileutil.make_dirs(d) |
---|
1070 | f = open(os.path.join(self.basedir, "home", path), "w") |
---|
1071 | f.write(data) |
---|
1072 | f.close() |
---|
1073 | |
---|
1074 | def count_output(self, out): |
---|
1075 | mo = re.search(r"(\d)+ files uploaded \((\d+) reused\), (\d+) directories created \((\d+) reused\)", out) |
---|
1076 | return [int(s) for s in mo.groups()] |
---|
1077 | |
---|
1078 | def count_output2(self, out): |
---|
1079 | mo = re.search(r"(\d)+ files checked, (\d+) directories checked", out) |
---|
1080 | return [int(s) for s in mo.groups()] |
---|
1081 | |
---|
1082 | def test_backup(self): |
---|
1083 | self.basedir = "cli/Backup/backup" |
---|
1084 | self.set_up_grid() |
---|
1085 | |
---|
1086 | # is the backupdb available? If so, we test that a second backup does |
---|
1087 | # not create new directories. |
---|
1088 | hush = StringIO() |
---|
1089 | have_bdb = backupdb.get_backupdb(os.path.join(self.basedir, "dbtest"), |
---|
1090 | hush) |
---|
1091 | |
---|
1092 | # create a small local directory with a couple of files |
---|
1093 | source = os.path.join(self.basedir, "home") |
---|
1094 | fileutil.make_dirs(os.path.join(source, "empty")) |
---|
1095 | self.writeto("parent/subdir/foo.txt", "foo") |
---|
1096 | self.writeto("parent/subdir/bar.txt", "bar\n" * 1000) |
---|
1097 | self.writeto("parent/blah.txt", "blah") |
---|
1098 | |
---|
1099 | def do_backup(verbose=False): |
---|
1100 | cmd = ["backup"] |
---|
1101 | if verbose: |
---|
1102 | cmd.append("--verbose") |
---|
1103 | cmd.append(source) |
---|
1104 | cmd.append("tahoe:backups") |
---|
1105 | return self.do_cli(*cmd) |
---|
1106 | |
---|
1107 | d = self.do_cli("create-alias", "tahoe") |
---|
1108 | |
---|
1109 | if not have_bdb: |
---|
1110 | d.addCallback(lambda res: self.do_cli("backup", source, "tahoe:backups")) |
---|
1111 | def _should_complain((rc, out, err)): |
---|
1112 | self.failUnless("I was unable to import a python sqlite library" in err, err) |
---|
1113 | d.addCallback(_should_complain) |
---|
1114 | d.addCallback(self.stall, 1.1) # make sure the backups get distinct timestamps |
---|
1115 | |
---|
1116 | d.addCallback(lambda res: do_backup()) |
---|
1117 | def _check0((rc, out, err)): |
---|
1118 | self.failUnlessEqual(err, "") |
---|
1119 | self.failUnlessEqual(rc, 0) |
---|
1120 | fu, fr, dc, dr = self.count_output(out) |
---|
1121 | # foo.txt, bar.txt, blah.txt |
---|
1122 | self.failUnlessEqual(fu, 3) |
---|
1123 | self.failUnlessEqual(fr, 0) |
---|
1124 | # empty, home, home/parent, home/parent/subdir |
---|
1125 | self.failUnlessEqual(dc, 4) |
---|
1126 | self.failUnlessEqual(dr, 0) |
---|
1127 | d.addCallback(_check0) |
---|
1128 | |
---|
1129 | d.addCallback(lambda res: self.do_cli("ls", "--uri", "tahoe:backups")) |
---|
1130 | def _check1((rc, out, err)): |
---|
1131 | self.failUnlessEqual(err, "") |
---|
1132 | self.failUnlessEqual(rc, 0) |
---|
1133 | lines = out.split("\n") |
---|
1134 | children = dict([line.split() for line in lines if line]) |
---|
1135 | latest_uri = children["Latest"] |
---|
1136 | self.failUnless(latest_uri.startswith("URI:DIR2-CHK:"), latest_uri) |
---|
1137 | childnames = children.keys() |
---|
1138 | self.failUnlessEqual(sorted(childnames), ["Archives", "Latest"]) |
---|
1139 | d.addCallback(_check1) |
---|
1140 | d.addCallback(lambda res: self.do_cli("ls", "tahoe:backups/Latest")) |
---|
1141 | def _check2((rc, out, err)): |
---|
1142 | self.failUnlessEqual(err, "") |
---|
1143 | self.failUnlessEqual(rc, 0) |
---|
1144 | self.failUnlessEqual(sorted(out.split()), ["empty", "parent"]) |
---|
1145 | d.addCallback(_check2) |
---|
1146 | d.addCallback(lambda res: self.do_cli("ls", "tahoe:backups/Latest/empty")) |
---|
1147 | def _check2a((rc, out, err)): |
---|
1148 | self.failUnlessEqual(err, "") |
---|
1149 | self.failUnlessEqual(rc, 0) |
---|
1150 | self.failUnlessEqual(out.strip(), "") |
---|
1151 | d.addCallback(_check2a) |
---|
1152 | d.addCallback(lambda res: self.do_cli("get", "tahoe:backups/Latest/parent/subdir/foo.txt")) |
---|
1153 | def _check3((rc, out, err)): |
---|
1154 | self.failUnlessEqual(err, "") |
---|
1155 | self.failUnlessEqual(rc, 0) |
---|
1156 | self.failUnlessEqual(out, "foo") |
---|
1157 | d.addCallback(_check3) |
---|
1158 | d.addCallback(lambda res: self.do_cli("ls", "tahoe:backups/Archives")) |
---|
1159 | def _check4((rc, out, err)): |
---|
1160 | self.failUnlessEqual(err, "") |
---|
1161 | self.failUnlessEqual(rc, 0) |
---|
1162 | self.old_archives = out.split() |
---|
1163 | self.failUnlessEqual(len(self.old_archives), 1) |
---|
1164 | d.addCallback(_check4) |
---|
1165 | |
---|
1166 | |
---|
1167 | d.addCallback(self.stall, 1.1) |
---|
1168 | d.addCallback(lambda res: do_backup()) |
---|
1169 | def _check4a((rc, out, err)): |
---|
1170 | # second backup should reuse everything, if the backupdb is |
---|
1171 | # available |
---|
1172 | self.failUnlessEqual(err, "") |
---|
1173 | self.failUnlessEqual(rc, 0) |
---|
1174 | if have_bdb: |
---|
1175 | fu, fr, dc, dr = self.count_output(out) |
---|
1176 | # foo.txt, bar.txt, blah.txt |
---|
1177 | self.failUnlessEqual(fu, 0) |
---|
1178 | self.failUnlessEqual(fr, 3) |
---|
1179 | # empty, home, home/parent, home/parent/subdir |
---|
1180 | self.failUnlessEqual(dc, 0) |
---|
1181 | self.failUnlessEqual(dr, 4) |
---|
1182 | d.addCallback(_check4a) |
---|
1183 | |
---|
1184 | if have_bdb: |
---|
1185 | # sneak into the backupdb, crank back the "last checked" |
---|
1186 | # timestamp to force a check on all files |
---|
1187 | def _reset_last_checked(res): |
---|
1188 | dbfile = os.path.join(self.get_clientdir(), |
---|
1189 | "private", "backupdb.sqlite") |
---|
1190 | self.failUnless(os.path.exists(dbfile), dbfile) |
---|
1191 | bdb = backupdb.get_backupdb(dbfile) |
---|
1192 | bdb.cursor.execute("UPDATE last_upload SET last_checked=0") |
---|
1193 | bdb.cursor.execute("UPDATE directories SET last_checked=0") |
---|
1194 | bdb.connection.commit() |
---|
1195 | |
---|
1196 | d.addCallback(_reset_last_checked) |
---|
1197 | |
---|
1198 | d.addCallback(self.stall, 1.1) |
---|
1199 | d.addCallback(lambda res: do_backup(verbose=True)) |
---|
1200 | def _check4b((rc, out, err)): |
---|
1201 | # we should check all files, and re-use all of them. None of |
---|
1202 | # the directories should have been changed, so we should |
---|
1203 | # re-use all of them too. |
---|
1204 | self.failUnlessEqual(err, "") |
---|
1205 | self.failUnlessEqual(rc, 0) |
---|
1206 | fu, fr, dc, dr = self.count_output(out) |
---|
1207 | fchecked, dchecked = self.count_output2(out) |
---|
1208 | self.failUnlessEqual(fchecked, 3) |
---|
1209 | self.failUnlessEqual(fu, 0) |
---|
1210 | self.failUnlessEqual(fr, 3) |
---|
1211 | self.failUnlessEqual(dchecked, 4) |
---|
1212 | self.failUnlessEqual(dc, 0) |
---|
1213 | self.failUnlessEqual(dr, 4) |
---|
1214 | d.addCallback(_check4b) |
---|
1215 | |
---|
1216 | d.addCallback(lambda res: self.do_cli("ls", "tahoe:backups/Archives")) |
---|
1217 | def _check5((rc, out, err)): |
---|
1218 | self.failUnlessEqual(err, "") |
---|
1219 | self.failUnlessEqual(rc, 0) |
---|
1220 | self.new_archives = out.split() |
---|
1221 | expected_new = 2 |
---|
1222 | if have_bdb: |
---|
1223 | expected_new += 1 |
---|
1224 | self.failUnlessEqual(len(self.new_archives), expected_new, out) |
---|
1225 | # the original backup should still be the oldest (i.e. sorts |
---|
1226 | # alphabetically towards the beginning) |
---|
1227 | self.failUnlessEqual(sorted(self.new_archives)[0], |
---|
1228 | self.old_archives[0]) |
---|
1229 | d.addCallback(_check5) |
---|
1230 | |
---|
1231 | d.addCallback(self.stall, 1.1) |
---|
1232 | def _modify(res): |
---|
1233 | self.writeto("parent/subdir/foo.txt", "FOOF!") |
---|
1234 | # and turn a file into a directory |
---|
1235 | os.unlink(os.path.join(source, "parent/blah.txt")) |
---|
1236 | os.mkdir(os.path.join(source, "parent/blah.txt")) |
---|
1237 | self.writeto("parent/blah.txt/surprise file", "surprise") |
---|
1238 | self.writeto("parent/blah.txt/surprisedir/subfile", "surprise") |
---|
1239 | # turn a directory into a file |
---|
1240 | os.rmdir(os.path.join(source, "empty")) |
---|
1241 | self.writeto("empty", "imagine nothing being here") |
---|
1242 | return do_backup() |
---|
1243 | d.addCallback(_modify) |
---|
1244 | def _check5a((rc, out, err)): |
---|
1245 | # second backup should reuse bar.txt (if backupdb is available), |
---|
1246 | # and upload the rest. None of the directories can be reused. |
---|
1247 | self.failUnlessEqual(err, "") |
---|
1248 | self.failUnlessEqual(rc, 0) |
---|
1249 | if have_bdb: |
---|
1250 | fu, fr, dc, dr = self.count_output(out) |
---|
1251 | # new foo.txt, surprise file, subfile, empty |
---|
1252 | self.failUnlessEqual(fu, 4) |
---|
1253 | # old bar.txt |
---|
1254 | self.failUnlessEqual(fr, 1) |
---|
1255 | # home, parent, subdir, blah.txt, surprisedir |
---|
1256 | self.failUnlessEqual(dc, 5) |
---|
1257 | self.failUnlessEqual(dr, 0) |
---|
1258 | d.addCallback(_check5a) |
---|
1259 | d.addCallback(lambda res: self.do_cli("ls", "tahoe:backups/Archives")) |
---|
1260 | def _check6((rc, out, err)): |
---|
1261 | self.failUnlessEqual(err, "") |
---|
1262 | self.failUnlessEqual(rc, 0) |
---|
1263 | self.new_archives = out.split() |
---|
1264 | expected_new = 3 |
---|
1265 | if have_bdb: |
---|
1266 | expected_new += 1 |
---|
1267 | self.failUnlessEqual(len(self.new_archives), expected_new) |
---|
1268 | self.failUnlessEqual(sorted(self.new_archives)[0], |
---|
1269 | self.old_archives[0]) |
---|
1270 | d.addCallback(_check6) |
---|
1271 | d.addCallback(lambda res: self.do_cli("get", "tahoe:backups/Latest/parent/subdir/foo.txt")) |
---|
1272 | def _check7((rc, out, err)): |
---|
1273 | self.failUnlessEqual(err, "") |
---|
1274 | self.failUnlessEqual(rc, 0) |
---|
1275 | self.failUnlessEqual(out, "FOOF!") |
---|
1276 | # the old snapshot should not be modified |
---|
1277 | return self.do_cli("get", "tahoe:backups/Archives/%s/parent/subdir/foo.txt" % self.old_archives[0]) |
---|
1278 | d.addCallback(_check7) |
---|
1279 | def _check8((rc, out, err)): |
---|
1280 | self.failUnlessEqual(err, "") |
---|
1281 | self.failUnlessEqual(rc, 0) |
---|
1282 | self.failUnlessEqual(out, "foo") |
---|
1283 | d.addCallback(_check8) |
---|
1284 | |
---|
1285 | return d |
---|
1286 | |
---|
1287 | # on our old dapper buildslave, this test takes a long time (usually |
---|
1288 | # 130s), so we have to bump up the default 120s timeout. The create-alias |
---|
1289 | # and initial backup alone take 60s, probably because of the handful of |
---|
1290 | # dirnodes being created (RSA key generation). The backup between check4 |
---|
1291 | # and check4a takes 6s, as does the backup before check4b. |
---|
1292 | test_backup.timeout = 3000 |
---|
1293 | |
---|
1294 | def test_exclude_options(self): |
---|
1295 | root_listdir = ('lib.a', '_darcs', 'subdir', 'nice_doc.lyx') |
---|
1296 | subdir_listdir = ('another_doc.lyx', 'run_snake_run.py', 'CVS', '.svn', '_darcs') |
---|
1297 | basedir = "cli/Backup/exclude_options" |
---|
1298 | fileutil.make_dirs(basedir) |
---|
1299 | nodeurl_path = os.path.join(basedir, 'node.url') |
---|
1300 | nodeurl = file(nodeurl_path, 'w') |
---|
1301 | nodeurl.write('http://example.net:2357/') |
---|
1302 | nodeurl.close() |
---|
1303 | |
---|
1304 | def _check_filtering(filtered, all, included, excluded): |
---|
1305 | filtered = set(filtered) |
---|
1306 | all = set(all) |
---|
1307 | included = set(included) |
---|
1308 | excluded = set(excluded) |
---|
1309 | self.failUnlessEqual(filtered, included) |
---|
1310 | self.failUnlessEqual(all.difference(filtered), excluded) |
---|
1311 | |
---|
1312 | # test simple exclude |
---|
1313 | backup_options = cli.BackupOptions() |
---|
1314 | backup_options.parseOptions(['--exclude', '*lyx', '--node-directory', |
---|
1315 | basedir, 'from', 'to']) |
---|
1316 | filtered = list(backup_options.filter_listdir(root_listdir)) |
---|
1317 | _check_filtering(filtered, root_listdir, ('lib.a', '_darcs', 'subdir'), |
---|
1318 | ('nice_doc.lyx',)) |
---|
1319 | # multiple exclude |
---|
1320 | backup_options = cli.BackupOptions() |
---|
1321 | backup_options.parseOptions(['--exclude', '*lyx', '--exclude', 'lib.?', '--node-directory', |
---|
1322 | basedir, 'from', 'to']) |
---|
1323 | filtered = list(backup_options.filter_listdir(root_listdir)) |
---|
1324 | _check_filtering(filtered, root_listdir, ('_darcs', 'subdir'), |
---|
1325 | ('nice_doc.lyx', 'lib.a')) |
---|
1326 | # vcs metadata exclusion |
---|
1327 | backup_options = cli.BackupOptions() |
---|
1328 | backup_options.parseOptions(['--exclude-vcs', '--node-directory', |
---|
1329 | basedir, 'from', 'to']) |
---|
1330 | filtered = list(backup_options.filter_listdir(subdir_listdir)) |
---|
1331 | _check_filtering(filtered, subdir_listdir, ('another_doc.lyx', 'run_snake_run.py',), |
---|
1332 | ('CVS', '.svn', '_darcs')) |
---|
1333 | # read exclude patterns from file |
---|
1334 | exclusion_string = "_darcs\n*py\n.svn" |
---|
1335 | excl_filepath = os.path.join(basedir, 'exclusion') |
---|
1336 | excl_file = file(excl_filepath, 'w') |
---|
1337 | excl_file.write(exclusion_string) |
---|
1338 | excl_file.close() |
---|
1339 | backup_options = cli.BackupOptions() |
---|
1340 | backup_options.parseOptions(['--exclude-from', excl_filepath, '--node-directory', |
---|
1341 | basedir, 'from', 'to']) |
---|
1342 | filtered = list(backup_options.filter_listdir(subdir_listdir)) |
---|
1343 | _check_filtering(filtered, subdir_listdir, ('another_doc.lyx', 'CVS'), |
---|
1344 | ('.svn', '_darcs', 'run_snake_run.py')) |
---|
1345 | # text BackupConfigurationError |
---|
1346 | self.failUnlessRaises(cli.BackupConfigurationError, |
---|
1347 | backup_options.parseOptions, |
---|
1348 | ['--exclude-from', excl_filepath + '.no', '--node-directory', |
---|
1349 | basedir, 'from', 'to']) |
---|
1350 | |
---|
1351 | # test that an iterator works too |
---|
1352 | backup_options = cli.BackupOptions() |
---|
1353 | backup_options.parseOptions(['--exclude', '*lyx', '--node-directory', |
---|
1354 | basedir, 'from', 'to']) |
---|
1355 | filtered = list(backup_options.filter_listdir(iter(root_listdir))) |
---|
1356 | _check_filtering(filtered, root_listdir, ('lib.a', '_darcs', 'subdir'), |
---|
1357 | ('nice_doc.lyx',)) |
---|
1358 | |
---|
1359 | class Check(GridTestMixin, CLITestMixin, unittest.TestCase): |
---|
1360 | |
---|
1361 | def test_check(self): |
---|
1362 | self.basedir = "cli/Check/check" |
---|
1363 | self.set_up_grid() |
---|
1364 | c0 = self.g.clients[0] |
---|
1365 | DATA = "data" * 100 |
---|
1366 | d = c0.create_mutable_file(DATA) |
---|
1367 | def _stash_uri(n): |
---|
1368 | self.uri = n.get_uri() |
---|
1369 | d.addCallback(_stash_uri) |
---|
1370 | |
---|
1371 | d.addCallback(lambda ign: self.do_cli("check", self.uri)) |
---|
1372 | def _check1((rc, out, err)): |
---|
1373 | self.failUnlessEqual(err, "") |
---|
1374 | self.failUnlessEqual(rc, 0) |
---|
1375 | lines = out.splitlines() |
---|
1376 | self.failUnless("Summary: Healthy" in lines, out) |
---|
1377 | self.failUnless(" good-shares: 10 (encoding is 3-of-10)" in lines, out) |
---|
1378 | d.addCallback(_check1) |
---|
1379 | |
---|
1380 | d.addCallback(lambda ign: self.do_cli("check", "--raw", self.uri)) |
---|
1381 | def _check2((rc, out, err)): |
---|
1382 | self.failUnlessEqual(err, "") |
---|
1383 | self.failUnlessEqual(rc, 0) |
---|
1384 | data = simplejson.loads(out) |
---|
1385 | self.failUnlessEqual(data["summary"], "Healthy") |
---|
1386 | d.addCallback(_check2) |
---|
1387 | |
---|
1388 | def _clobber_shares(ignored): |
---|
1389 | # delete one, corrupt a second |
---|
1390 | shares = self.find_shares(self.uri) |
---|
1391 | self.failUnlessEqual(len(shares), 10) |
---|
1392 | os.unlink(shares[0][2]) |
---|
1393 | cso = debug.CorruptShareOptions() |
---|
1394 | cso.stdout = StringIO() |
---|
1395 | cso.parseOptions([shares[1][2]]) |
---|
1396 | storage_index = uri.from_string(self.uri).get_storage_index() |
---|
1397 | self._corrupt_share_line = " server %s, SI %s, shnum %d" % \ |
---|
1398 | (base32.b2a(shares[1][1]), |
---|
1399 | base32.b2a(storage_index), |
---|
1400 | shares[1][0]) |
---|
1401 | debug.corrupt_share(cso) |
---|
1402 | d.addCallback(_clobber_shares) |
---|
1403 | |
---|
1404 | d.addCallback(lambda ign: self.do_cli("check", "--verify", self.uri)) |
---|
1405 | def _check3((rc, out, err)): |
---|
1406 | self.failUnlessEqual(err, "") |
---|
1407 | self.failUnlessEqual(rc, 0) |
---|
1408 | lines = out.splitlines() |
---|
1409 | summary = [l for l in lines if l.startswith("Summary")][0] |
---|
1410 | self.failUnless("Summary: Unhealthy: 8 shares (enc 3-of-10)" |
---|
1411 | in summary, summary) |
---|
1412 | self.failUnless(" good-shares: 8 (encoding is 3-of-10)" in lines, out) |
---|
1413 | self.failUnless(" corrupt shares:" in lines, out) |
---|
1414 | self.failUnless(self._corrupt_share_line in lines, out) |
---|
1415 | d.addCallback(_check3) |
---|
1416 | |
---|
1417 | d.addCallback(lambda ign: |
---|
1418 | self.do_cli("check", "--verify", "--repair", self.uri)) |
---|
1419 | def _check4((rc, out, err)): |
---|
1420 | self.failUnlessEqual(err, "") |
---|
1421 | self.failUnlessEqual(rc, 0) |
---|
1422 | lines = out.splitlines() |
---|
1423 | self.failUnless("Summary: not healthy" in lines, out) |
---|
1424 | self.failUnless(" good-shares: 8 (encoding is 3-of-10)" in lines, out) |
---|
1425 | self.failUnless(" corrupt shares:" in lines, out) |
---|
1426 | self.failUnless(self._corrupt_share_line in lines, out) |
---|
1427 | self.failUnless(" repair successful" in lines, out) |
---|
1428 | d.addCallback(_check4) |
---|
1429 | |
---|
1430 | d.addCallback(lambda ign: |
---|
1431 | self.do_cli("check", "--verify", "--repair", self.uri)) |
---|
1432 | def _check5((rc, out, err)): |
---|
1433 | self.failUnlessEqual(err, "") |
---|
1434 | self.failUnlessEqual(rc, 0) |
---|
1435 | lines = out.splitlines() |
---|
1436 | self.failUnless("Summary: healthy" in lines, out) |
---|
1437 | self.failUnless(" good-shares: 10 (encoding is 3-of-10)" in lines, out) |
---|
1438 | self.failIf(" corrupt shares:" in lines, out) |
---|
1439 | d.addCallback(_check5) |
---|
1440 | |
---|
1441 | return d |
---|
1442 | |
---|
1443 | def test_deep_check(self): |
---|
1444 | self.basedir = "cli/Check/deep_check" |
---|
1445 | self.set_up_grid() |
---|
1446 | c0 = self.g.clients[0] |
---|
1447 | self.uris = {} |
---|
1448 | self.fileurls = {} |
---|
1449 | DATA = "data" * 100 |
---|
1450 | d = c0.create_dirnode() |
---|
1451 | def _stash_root_and_create_file(n): |
---|
1452 | self.rootnode = n |
---|
1453 | self.rooturi = n.get_uri() |
---|
1454 | return n.add_file(u"good", upload.Data(DATA, convergence="")) |
---|
1455 | d.addCallback(_stash_root_and_create_file) |
---|
1456 | def _stash_uri(fn, which): |
---|
1457 | self.uris[which] = fn.get_uri() |
---|
1458 | return fn |
---|
1459 | d.addCallback(_stash_uri, "good") |
---|
1460 | d.addCallback(lambda ign: |
---|
1461 | self.rootnode.add_file(u"small", |
---|
1462 | upload.Data("literal", |
---|
1463 | convergence=""))) |
---|
1464 | d.addCallback(_stash_uri, "small") |
---|
1465 | d.addCallback(lambda ign: c0.create_mutable_file(DATA+"1")) |
---|
1466 | d.addCallback(lambda fn: self.rootnode.set_node(u"mutable", fn)) |
---|
1467 | d.addCallback(_stash_uri, "mutable") |
---|
1468 | |
---|
1469 | d.addCallback(lambda ign: self.do_cli("deep-check", self.rooturi)) |
---|
1470 | def _check1((rc, out, err)): |
---|
1471 | self.failUnlessEqual(err, "") |
---|
1472 | self.failUnlessEqual(rc, 0) |
---|
1473 | lines = out.splitlines() |
---|
1474 | self.failUnless("done: 4 objects checked, 4 healthy, 0 unhealthy" |
---|
1475 | in lines, out) |
---|
1476 | d.addCallback(_check1) |
---|
1477 | |
---|
1478 | # root |
---|
1479 | # root/good |
---|
1480 | # root/small |
---|
1481 | # root/mutable |
---|
1482 | |
---|
1483 | d.addCallback(lambda ign: self.do_cli("deep-check", "--verbose", |
---|
1484 | self.rooturi)) |
---|
1485 | def _check2((rc, out, err)): |
---|
1486 | self.failUnlessEqual(err, "") |
---|
1487 | self.failUnlessEqual(rc, 0) |
---|
1488 | lines = out.splitlines() |
---|
1489 | self.failUnless("<root>: Healthy" in lines, out) |
---|
1490 | self.failUnless("small: Healthy (LIT)" in lines, out) |
---|
1491 | self.failUnless("good: Healthy" in lines, out) |
---|
1492 | self.failUnless("mutable: Healthy" in lines, out) |
---|
1493 | self.failUnless("done: 4 objects checked, 4 healthy, 0 unhealthy" |
---|
1494 | in lines, out) |
---|
1495 | d.addCallback(_check2) |
---|
1496 | |
---|
1497 | d.addCallback(lambda ign: self.do_cli("stats", self.rooturi)) |
---|
1498 | def _check_stats((rc, out, err)): |
---|
1499 | self.failUnlessEqual(err, "") |
---|
1500 | self.failUnlessEqual(rc, 0) |
---|
1501 | lines = out.splitlines() |
---|
1502 | self.failUnlessIn(" count-immutable-files: 1", lines) |
---|
1503 | self.failUnlessIn(" count-mutable-files: 1", lines) |
---|
1504 | self.failUnlessIn(" count-literal-files: 1", lines) |
---|
1505 | self.failUnlessIn(" count-directories: 1", lines) |
---|
1506 | self.failUnlessIn(" size-immutable-files: 400", lines) |
---|
1507 | self.failUnlessIn("Size Histogram:", lines) |
---|
1508 | self.failUnlessIn(" 4-10 : 1 (10 B, 10 B)", lines) |
---|
1509 | self.failUnlessIn(" 317-1000 : 1 (1000 B, 1000 B)", lines) |
---|
1510 | d.addCallback(_check_stats) |
---|
1511 | |
---|
1512 | def _clobber_shares(ignored): |
---|
1513 | shares = self.find_shares(self.uris["good"]) |
---|
1514 | self.failUnlessEqual(len(shares), 10) |
---|
1515 | os.unlink(shares[0][2]) |
---|
1516 | |
---|
1517 | shares = self.find_shares(self.uris["mutable"]) |
---|
1518 | cso = debug.CorruptShareOptions() |
---|
1519 | cso.stdout = StringIO() |
---|
1520 | cso.parseOptions([shares[1][2]]) |
---|
1521 | storage_index = uri.from_string(self.uris["mutable"]).get_storage_index() |
---|
1522 | self._corrupt_share_line = " corrupt: server %s, SI %s, shnum %d" % \ |
---|
1523 | (base32.b2a(shares[1][1]), |
---|
1524 | base32.b2a(storage_index), |
---|
1525 | shares[1][0]) |
---|
1526 | debug.corrupt_share(cso) |
---|
1527 | d.addCallback(_clobber_shares) |
---|
1528 | |
---|
1529 | # root |
---|
1530 | # root/good [9 shares] |
---|
1531 | # root/small |
---|
1532 | # root/mutable [1 corrupt share] |
---|
1533 | |
---|
1534 | d.addCallback(lambda ign: |
---|
1535 | self.do_cli("deep-check", "--verbose", self.rooturi)) |
---|
1536 | def _check3((rc, out, err)): |
---|
1537 | self.failUnlessEqual(err, "") |
---|
1538 | self.failUnlessEqual(rc, 0) |
---|
1539 | lines = out.splitlines() |
---|
1540 | self.failUnless("<root>: Healthy" in lines, out) |
---|
1541 | self.failUnless("small: Healthy (LIT)" in lines, out) |
---|
1542 | self.failUnless("mutable: Healthy" in lines, out) # needs verifier |
---|
1543 | self.failUnless("good: Not Healthy: 9 shares (enc 3-of-10)" |
---|
1544 | in lines, out) |
---|
1545 | self.failIf(self._corrupt_share_line in lines, out) |
---|
1546 | self.failUnless("done: 4 objects checked, 3 healthy, 1 unhealthy" |
---|
1547 | in lines, out) |
---|
1548 | d.addCallback(_check3) |
---|
1549 | |
---|
1550 | d.addCallback(lambda ign: |
---|
1551 | self.do_cli("deep-check", "--verbose", "--verify", |
---|
1552 | self.rooturi)) |
---|
1553 | def _check4((rc, out, err)): |
---|
1554 | self.failUnlessEqual(err, "") |
---|
1555 | self.failUnlessEqual(rc, 0) |
---|
1556 | lines = out.splitlines() |
---|
1557 | self.failUnless("<root>: Healthy" in lines, out) |
---|
1558 | self.failUnless("small: Healthy (LIT)" in lines, out) |
---|
1559 | mutable = [l for l in lines if l.startswith("mutable")][0] |
---|
1560 | self.failUnless(mutable.startswith("mutable: Unhealthy: 9 shares (enc 3-of-10)"), |
---|
1561 | mutable) |
---|
1562 | self.failUnless(self._corrupt_share_line in lines, out) |
---|
1563 | self.failUnless("good: Not Healthy: 9 shares (enc 3-of-10)" |
---|
1564 | in lines, out) |
---|
1565 | self.failUnless("done: 4 objects checked, 2 healthy, 2 unhealthy" |
---|
1566 | in lines, out) |
---|
1567 | d.addCallback(_check4) |
---|
1568 | |
---|
1569 | d.addCallback(lambda ign: |
---|
1570 | self.do_cli("deep-check", "--raw", |
---|
1571 | self.rooturi)) |
---|
1572 | def _check5((rc, out, err)): |
---|
1573 | self.failUnlessEqual(err, "") |
---|
1574 | self.failUnlessEqual(rc, 0) |
---|
1575 | lines = out.splitlines() |
---|
1576 | units = [simplejson.loads(line) for line in lines] |
---|
1577 | # root, small, good, mutable, stats |
---|
1578 | self.failUnlessEqual(len(units), 4+1) |
---|
1579 | d.addCallback(_check5) |
---|
1580 | |
---|
1581 | d.addCallback(lambda ign: |
---|
1582 | self.do_cli("deep-check", |
---|
1583 | "--verbose", "--verify", "--repair", |
---|
1584 | self.rooturi)) |
---|
1585 | def _check6((rc, out, err)): |
---|
1586 | self.failUnlessEqual(err, "") |
---|
1587 | self.failUnlessEqual(rc, 0) |
---|
1588 | lines = out.splitlines() |
---|
1589 | self.failUnless("<root>: healthy" in lines, out) |
---|
1590 | self.failUnless("small: healthy" in lines, out) |
---|
1591 | self.failUnless("mutable: not healthy" in lines, out) |
---|
1592 | self.failUnless(self._corrupt_share_line in lines, out) |
---|
1593 | self.failUnless("good: not healthy" in lines, out) |
---|
1594 | self.failUnless("done: 4 objects checked" in lines, out) |
---|
1595 | self.failUnless(" pre-repair: 2 healthy, 2 unhealthy" in lines, out) |
---|
1596 | self.failUnless(" 2 repairs attempted, 2 successful, 0 failed" |
---|
1597 | in lines, out) |
---|
1598 | self.failUnless(" post-repair: 4 healthy, 0 unhealthy" in lines,out) |
---|
1599 | d.addCallback(_check6) |
---|
1600 | |
---|
1601 | # now add a subdir, and a file below that, then make the subdir |
---|
1602 | # unrecoverable |
---|
1603 | |
---|
1604 | d.addCallback(lambda ign: self.rootnode.create_subdirectory(u"subdir")) |
---|
1605 | d.addCallback(_stash_uri, "subdir") |
---|
1606 | d.addCallback(lambda fn: |
---|
1607 | fn.add_file(u"subfile", upload.Data(DATA+"2", ""))) |
---|
1608 | d.addCallback(lambda ign: |
---|
1609 | self.delete_shares_numbered(self.uris["subdir"], |
---|
1610 | range(10))) |
---|
1611 | |
---|
1612 | # root |
---|
1613 | # root/good |
---|
1614 | # root/small |
---|
1615 | # root/mutable |
---|
1616 | # root/subdir [unrecoverable: 0 shares] |
---|
1617 | # root/subfile |
---|
1618 | |
---|
1619 | d.addCallback(lambda ign: self.do_cli("manifest", self.rooturi)) |
---|
1620 | def _manifest_failed((rc, out, err)): |
---|
1621 | self.failIfEqual(rc, 0) |
---|
1622 | self.failUnlessIn("ERROR: UnrecoverableFileError", err) |
---|
1623 | # the fatal directory should still show up, as the last line |
---|
1624 | self.failUnlessIn(" subdir\n", out) |
---|
1625 | d.addCallback(_manifest_failed) |
---|
1626 | |
---|
1627 | d.addCallback(lambda ign: self.do_cli("deep-check", self.rooturi)) |
---|
1628 | def _deep_check_failed((rc, out, err)): |
---|
1629 | self.failIfEqual(rc, 0) |
---|
1630 | self.failUnlessIn("ERROR: UnrecoverableFileError", err) |
---|
1631 | # we want to make sure that the error indication is the last |
---|
1632 | # thing that gets emitted |
---|
1633 | self.failIf("done:" in out, out) |
---|
1634 | d.addCallback(_deep_check_failed) |
---|
1635 | |
---|
1636 | # this test is disabled until the deep-repair response to an |
---|
1637 | # unrepairable directory is fixed. The failure-to-repair should not |
---|
1638 | # throw an exception, but the failure-to-traverse that follows |
---|
1639 | # should throw UnrecoverableFileError. |
---|
1640 | |
---|
1641 | #d.addCallback(lambda ign: |
---|
1642 | # self.do_cli("deep-check", "--repair", self.rooturi)) |
---|
1643 | #def _deep_check_repair_failed((rc, out, err)): |
---|
1644 | # self.failIfEqual(rc, 0) |
---|
1645 | # print err |
---|
1646 | # self.failUnlessIn("ERROR: UnrecoverableFileError", err) |
---|
1647 | # self.failIf("done:" in out, out) |
---|
1648 | #d.addCallback(_deep_check_repair_failed) |
---|
1649 | |
---|
1650 | return d |
---|
1651 | |
---|
1652 | class Errors(GridTestMixin, CLITestMixin, unittest.TestCase): |
---|
1653 | def test_get(self): |
---|
1654 | self.basedir = "cli/Errors/get" |
---|
1655 | self.set_up_grid() |
---|
1656 | c0 = self.g.clients[0] |
---|
1657 | self.fileurls = {} |
---|
1658 | DATA = "data" * 100 |
---|
1659 | d = c0.upload(upload.Data(DATA, convergence="")) |
---|
1660 | def _stash_bad(ur): |
---|
1661 | self.uri_1share = ur.uri |
---|
1662 | self.delete_shares_numbered(ur.uri, range(1,10)) |
---|
1663 | d.addCallback(_stash_bad) |
---|
1664 | |
---|
1665 | d.addCallback(lambda ign: self.do_cli("get", self.uri_1share)) |
---|
1666 | def _check1((rc, out, err)): |
---|
1667 | self.failIfEqual(rc, 0) |
---|
1668 | self.failUnless("410 Gone" in err, err) |
---|
1669 | self.failUnlessIn("NotEnoughSharesError: ", err) |
---|
1670 | self.failUnlessIn("Failed to get enough shareholders: have 1, need 3", err) |
---|
1671 | d.addCallback(_check1) |
---|
1672 | |
---|
1673 | targetf = os.path.join(self.basedir, "output") |
---|
1674 | d.addCallback(lambda ign: self.do_cli("get", self.uri_1share, targetf)) |
---|
1675 | def _check2((rc, out, err)): |
---|
1676 | self.failIfEqual(rc, 0) |
---|
1677 | self.failUnless("410 Gone" in err, err) |
---|
1678 | self.failUnlessIn("NotEnoughSharesError: ", err) |
---|
1679 | self.failUnlessIn("Failed to get enough shareholders: have 1, need 3", err) |
---|
1680 | self.failIf(os.path.exists(targetf)) |
---|
1681 | d.addCallback(_check2) |
---|
1682 | |
---|
1683 | return d |
---|
1684 | |
---|
1685 | class Stats(GridTestMixin, CLITestMixin, unittest.TestCase): |
---|
1686 | def test_empty_directory(self): |
---|
1687 | self.basedir = "cli/Stats/empty_directory" |
---|
1688 | self.set_up_grid() |
---|
1689 | c0 = self.g.clients[0] |
---|
1690 | self.fileurls = {} |
---|
1691 | d = c0.create_dirnode() |
---|
1692 | def _stash_root(n): |
---|
1693 | self.rootnode = n |
---|
1694 | self.rooturi = n.get_uri() |
---|
1695 | d.addCallback(_stash_root) |
---|
1696 | |
---|
1697 | # make sure we can get stats on an empty directory too |
---|
1698 | d.addCallback(lambda ign: self.do_cli("stats", self.rooturi)) |
---|
1699 | def _check_stats((rc, out, err)): |
---|
1700 | self.failUnlessEqual(err, "") |
---|
1701 | self.failUnlessEqual(rc, 0) |
---|
1702 | lines = out.splitlines() |
---|
1703 | self.failUnlessIn(" count-immutable-files: 0", lines) |
---|
1704 | self.failUnlessIn(" count-mutable-files: 0", lines) |
---|
1705 | self.failUnlessIn(" count-literal-files: 0", lines) |
---|
1706 | self.failUnlessIn(" count-directories: 1", lines) |
---|
1707 | self.failUnlessIn(" size-immutable-files: 0", lines) |
---|
1708 | self.failIfIn("Size Histogram:", lines) |
---|
1709 | d.addCallback(_check_stats) |
---|
1710 | |
---|
1711 | return d |
---|