1 | from __future__ import annotations |
---|
2 | |
---|
3 | import os |
---|
4 | from unittest import skipIf |
---|
5 | from functools import ( |
---|
6 | partial, |
---|
7 | ) |
---|
8 | |
---|
9 | import twisted |
---|
10 | from yaml import ( |
---|
11 | safe_dump, |
---|
12 | ) |
---|
13 | from fixtures import ( |
---|
14 | Fixture, |
---|
15 | TempDir, |
---|
16 | ) |
---|
17 | |
---|
18 | from hypothesis import ( |
---|
19 | given, |
---|
20 | ) |
---|
21 | from hypothesis.strategies import ( |
---|
22 | sampled_from, |
---|
23 | booleans, |
---|
24 | ) |
---|
25 | |
---|
26 | from eliot.testing import ( |
---|
27 | assertHasAction, |
---|
28 | ) |
---|
29 | from twisted.trial import unittest |
---|
30 | from twisted.application import service |
---|
31 | from twisted.internet import defer |
---|
32 | from twisted.python.filepath import ( |
---|
33 | FilePath, |
---|
34 | ) |
---|
35 | from twisted.python.runtime import platform |
---|
36 | from testtools.matchers import ( |
---|
37 | Equals, |
---|
38 | AfterPreprocessing, |
---|
39 | MatchesListwise, |
---|
40 | MatchesDict, |
---|
41 | ContainsDict, |
---|
42 | Always, |
---|
43 | Is, |
---|
44 | raises, |
---|
45 | ) |
---|
46 | from testtools.twistedsupport import ( |
---|
47 | succeeded, |
---|
48 | failed, |
---|
49 | ) |
---|
50 | |
---|
51 | import allmydata |
---|
52 | import allmydata.util.log |
---|
53 | |
---|
54 | from allmydata.nodemaker import ( |
---|
55 | NodeMaker, |
---|
56 | ) |
---|
57 | from allmydata.node import OldConfigError, UnescapedHashError, create_node_dir |
---|
58 | from allmydata import client |
---|
59 | from allmydata.storage_client import ( |
---|
60 | StorageClientConfig, |
---|
61 | StorageFarmBroker, |
---|
62 | ) |
---|
63 | from allmydata.util import ( |
---|
64 | base32, |
---|
65 | fileutil, |
---|
66 | encodingutil, |
---|
67 | configutil, |
---|
68 | jsonbytes as json, |
---|
69 | ) |
---|
70 | from allmydata.util.eliotutil import capture_logging |
---|
71 | from allmydata.util.fileutil import abspath_expanduser_unicode |
---|
72 | from allmydata.interfaces import IFilesystemNode, IFileNode, \ |
---|
73 | IImmutableFileNode, IMutableFileNode, IDirectoryNode |
---|
74 | from allmydata.scripts.common import ( |
---|
75 | write_introducer, |
---|
76 | ) |
---|
77 | from foolscap.api import flushEventualQueue |
---|
78 | import allmydata.test.common_util as testutil |
---|
79 | from .common import ( |
---|
80 | superuser, |
---|
81 | EMPTY_CLIENT_CONFIG, |
---|
82 | SyncTestCase, |
---|
83 | AsyncBrokenTestCase, |
---|
84 | UseTestPlugins, |
---|
85 | MemoryIntroducerClient, |
---|
86 | get_published_announcements, |
---|
87 | UseNode, |
---|
88 | ) |
---|
89 | from .matchers import ( |
---|
90 | MatchesSameElements, |
---|
91 | matches_storage_announcement, |
---|
92 | matches_furl, |
---|
93 | ) |
---|
94 | from .strategies import ( |
---|
95 | write_capabilities, |
---|
96 | ) |
---|
97 | |
---|
98 | SOME_FURL = "pb://abcde@nowhere/fake" |
---|
99 | |
---|
100 | BASECONFIG = "[client]\n" |
---|
101 | |
---|
102 | class Basic(testutil.ReallyEqualMixin, unittest.TestCase): |
---|
103 | def test_loadable(self): |
---|
104 | basedir = "test_client.Basic.test_loadable" |
---|
105 | os.mkdir(basedir) |
---|
106 | fileutil.write(os.path.join(basedir, "tahoe.cfg"), \ |
---|
107 | BASECONFIG) |
---|
108 | return client.create_client(basedir) |
---|
109 | |
---|
110 | @defer.inlineCallbacks |
---|
111 | def test_unreadable_introducers(self): |
---|
112 | """ |
---|
113 | The Deferred from create_client fails when |
---|
114 | private/introducers.yaml is unreadable (but exists) |
---|
115 | """ |
---|
116 | basedir = "test_client.Basic.test_unreadable_introduers" |
---|
117 | os.mkdir(basedir, 0o700) |
---|
118 | os.mkdir(os.path.join(basedir, 'private'), 0o700) |
---|
119 | intro_fname = os.path.join(basedir, 'private', 'introducers.yaml') |
---|
120 | with open(intro_fname, 'w') as f: |
---|
121 | f.write("---\n") |
---|
122 | os.chmod(intro_fname, 0o000) |
---|
123 | self.addCleanup(lambda: os.chmod(intro_fname, 0o700)) |
---|
124 | |
---|
125 | with self.assertRaises(EnvironmentError): |
---|
126 | yield client.create_client(basedir) |
---|
127 | |
---|
128 | @defer.inlineCallbacks |
---|
129 | def test_comment(self): |
---|
130 | """ |
---|
131 | A comment character (#) in a furl results in an |
---|
132 | UnescapedHashError Failure. |
---|
133 | """ |
---|
134 | should_fail = [r"test#test", r"#testtest", r"test\\#test", r"test\#test", |
---|
135 | r"test\\\#test"] |
---|
136 | |
---|
137 | basedir = "test_client.Basic.test_comment" |
---|
138 | os.mkdir(basedir) |
---|
139 | |
---|
140 | def write_config(s): |
---|
141 | config = ("[client]\n" |
---|
142 | "helper.furl = %s\n" % s) |
---|
143 | fileutil.write(os.path.join(basedir, "tahoe.cfg"), config) |
---|
144 | |
---|
145 | for s in should_fail: |
---|
146 | write_config(s) |
---|
147 | with self.assertRaises(UnescapedHashError) as ctx: |
---|
148 | yield client.create_client(basedir) |
---|
149 | self.assertIn("[client]helper.furl", str(ctx.exception)) |
---|
150 | |
---|
151 | # if somebody knows a clever way to do this (cause |
---|
152 | # EnvironmentError when reading a file that really exists), on |
---|
153 | # windows, please fix this |
---|
154 | @skipIf(platform.isWindows(), "We don't know how to set permissions on Windows.") |
---|
155 | @skipIf(superuser, "cannot test as superuser with all permissions") |
---|
156 | def test_unreadable_config(self): |
---|
157 | basedir = "test_client.Basic.test_unreadable_config" |
---|
158 | os.mkdir(basedir) |
---|
159 | fn = os.path.join(basedir, "tahoe.cfg") |
---|
160 | fileutil.write(fn, BASECONFIG) |
---|
161 | old_mode = os.stat(fn).st_mode |
---|
162 | os.chmod(fn, 0) |
---|
163 | try: |
---|
164 | e = self.assertRaises( |
---|
165 | EnvironmentError, |
---|
166 | client.read_config, |
---|
167 | basedir, |
---|
168 | "client.port", |
---|
169 | ) |
---|
170 | self.assertIn("Permission denied", str(e)) |
---|
171 | finally: |
---|
172 | # don't leave undeleteable junk lying around |
---|
173 | os.chmod(fn, old_mode) |
---|
174 | |
---|
175 | def test_error_on_old_config_files(self): |
---|
176 | basedir = "test_client.Basic.test_error_on_old_config_files" |
---|
177 | os.mkdir(basedir) |
---|
178 | fileutil.write(os.path.join(basedir, "tahoe.cfg"), |
---|
179 | BASECONFIG + |
---|
180 | "[storage]\n" + |
---|
181 | "enabled = false\n" + |
---|
182 | "reserved_space = bogus\n") |
---|
183 | fileutil.write(os.path.join(basedir, "introducer.furl"), "") |
---|
184 | fileutil.write(os.path.join(basedir, "no_storage"), "") |
---|
185 | fileutil.write(os.path.join(basedir, "readonly_storage"), "") |
---|
186 | fileutil.write(os.path.join(basedir, "debug_discard_storage"), "") |
---|
187 | |
---|
188 | logged_messages = [] |
---|
189 | self.patch(twisted.python.log, 'msg', logged_messages.append) |
---|
190 | |
---|
191 | e = self.failUnlessRaises( |
---|
192 | OldConfigError, |
---|
193 | client.read_config, |
---|
194 | basedir, |
---|
195 | "client.port", |
---|
196 | ) |
---|
197 | abs_basedir = fileutil.abspath_expanduser_unicode(str(basedir)) |
---|
198 | self.failUnlessIn(os.path.join(abs_basedir, "introducer.furl"), e.args[0]) |
---|
199 | self.failUnlessIn(os.path.join(abs_basedir, "no_storage"), e.args[0]) |
---|
200 | self.failUnlessIn(os.path.join(abs_basedir, "readonly_storage"), e.args[0]) |
---|
201 | self.failUnlessIn(os.path.join(abs_basedir, "debug_discard_storage"), e.args[0]) |
---|
202 | |
---|
203 | for oldfile in ['introducer.furl', 'no_storage', 'readonly_storage', |
---|
204 | 'debug_discard_storage']: |
---|
205 | logged = [ m for m in logged_messages if |
---|
206 | ("Found pre-Tahoe-LAFS-v1.3 configuration file" in str(m) and oldfile in str(m)) ] |
---|
207 | self.failUnless(logged, (oldfile, logged_messages)) |
---|
208 | |
---|
209 | for oldfile in [ |
---|
210 | 'nickname', 'webport', 'keepalive_timeout', 'log_gatherer.furl', |
---|
211 | 'disconnect_timeout', 'advertised_ip_addresses', 'helper.furl', |
---|
212 | 'key_generator.furl', 'stats_gatherer.furl', 'sizelimit', |
---|
213 | 'run_helper']: |
---|
214 | logged = [ m for m in logged_messages if |
---|
215 | ("Found pre-Tahoe-LAFS-v1.3 configuration file" in str(m) and oldfile in str(m)) ] |
---|
216 | self.failIf(logged, (oldfile, logged_messages)) |
---|
217 | |
---|
218 | @defer.inlineCallbacks |
---|
219 | def test_secrets(self): |
---|
220 | """ |
---|
221 | A new client has renewal + cancel secrets |
---|
222 | """ |
---|
223 | basedir = "test_client.Basic.test_secrets" |
---|
224 | os.mkdir(basedir) |
---|
225 | fileutil.write(os.path.join(basedir, "tahoe.cfg"), \ |
---|
226 | BASECONFIG) |
---|
227 | c = yield client.create_client(basedir) |
---|
228 | secret_fname = os.path.join(basedir, "private", "secret") |
---|
229 | self.failUnless(os.path.exists(secret_fname), secret_fname) |
---|
230 | renew_secret = c.get_renewal_secret() |
---|
231 | self.failUnless(base32.b2a(renew_secret)) |
---|
232 | cancel_secret = c.get_cancel_secret() |
---|
233 | self.failUnless(base32.b2a(cancel_secret)) |
---|
234 | |
---|
235 | @defer.inlineCallbacks |
---|
236 | def test_nodekey_yes_storage(self): |
---|
237 | """ |
---|
238 | We have a nodeid if we're providing storage |
---|
239 | """ |
---|
240 | basedir = "test_client.Basic.test_nodekey_yes_storage" |
---|
241 | os.mkdir(basedir) |
---|
242 | fileutil.write(os.path.join(basedir, "tahoe.cfg"), |
---|
243 | BASECONFIG) |
---|
244 | c = yield client.create_client(basedir) |
---|
245 | self.failUnless(c.get_long_nodeid().startswith(b"v0-")) |
---|
246 | |
---|
247 | @defer.inlineCallbacks |
---|
248 | def test_nodekey_no_storage(self): |
---|
249 | """ |
---|
250 | We have a nodeid if we're not providing storage |
---|
251 | """ |
---|
252 | basedir = "test_client.Basic.test_nodekey_no_storage" |
---|
253 | os.mkdir(basedir) |
---|
254 | fileutil.write(os.path.join(basedir, "tahoe.cfg"), |
---|
255 | BASECONFIG + "[storage]\n" + "enabled = false\n") |
---|
256 | c = yield client.create_client(basedir) |
---|
257 | self.failUnless(c.get_long_nodeid().startswith(b"v0-")) |
---|
258 | |
---|
259 | def test_storage_anonymous_enabled_by_default(self): |
---|
260 | """ |
---|
261 | Anonymous storage access is enabled if storage is enabled and *anonymous* |
---|
262 | is not set. |
---|
263 | """ |
---|
264 | config = client.config_from_string( |
---|
265 | "test_storage_default_anonymous_enabled", |
---|
266 | "tub.port", |
---|
267 | BASECONFIG + ( |
---|
268 | "[storage]\n" |
---|
269 | "enabled = true\n" |
---|
270 | ) |
---|
271 | ) |
---|
272 | self.assertTrue(client.anonymous_storage_enabled(config)) |
---|
273 | |
---|
274 | def test_storage_anonymous_enabled_explicitly(self): |
---|
275 | """ |
---|
276 | Anonymous storage access is enabled if storage is enabled and *anonymous* |
---|
277 | is set to true. |
---|
278 | """ |
---|
279 | config = client.config_from_string( |
---|
280 | self.id(), |
---|
281 | "tub.port", |
---|
282 | BASECONFIG + ( |
---|
283 | "[storage]\n" |
---|
284 | "enabled = true\n" |
---|
285 | "anonymous = true\n" |
---|
286 | ) |
---|
287 | ) |
---|
288 | self.assertTrue(client.anonymous_storage_enabled(config)) |
---|
289 | |
---|
290 | def test_storage_anonymous_disabled_explicitly(self): |
---|
291 | """ |
---|
292 | Anonymous storage access is disabled if storage is enabled and *anonymous* |
---|
293 | is set to false. |
---|
294 | """ |
---|
295 | config = client.config_from_string( |
---|
296 | self.id(), |
---|
297 | "tub.port", |
---|
298 | BASECONFIG + ( |
---|
299 | "[storage]\n" |
---|
300 | "enabled = true\n" |
---|
301 | "anonymous = false\n" |
---|
302 | ) |
---|
303 | ) |
---|
304 | self.assertFalse(client.anonymous_storage_enabled(config)) |
---|
305 | |
---|
306 | def test_storage_anonymous_disabled_by_storage(self): |
---|
307 | """ |
---|
308 | Anonymous storage access is disabled if storage is disabled and *anonymous* |
---|
309 | is set to true. |
---|
310 | """ |
---|
311 | config = client.config_from_string( |
---|
312 | self.id(), |
---|
313 | "tub.port", |
---|
314 | BASECONFIG + ( |
---|
315 | "[storage]\n" |
---|
316 | "enabled = false\n" |
---|
317 | "anonymous = true\n" |
---|
318 | ) |
---|
319 | ) |
---|
320 | self.assertFalse(client.anonymous_storage_enabled(config)) |
---|
321 | |
---|
322 | @defer.inlineCallbacks |
---|
323 | def test_reserved_1(self): |
---|
324 | """ |
---|
325 | reserved_space option is propagated |
---|
326 | """ |
---|
327 | basedir = "client.Basic.test_reserved_1" |
---|
328 | os.mkdir(basedir) |
---|
329 | fileutil.write(os.path.join(basedir, "tahoe.cfg"), \ |
---|
330 | BASECONFIG + \ |
---|
331 | "[storage]\n" + \ |
---|
332 | "enabled = true\n" + \ |
---|
333 | "reserved_space = 1000\n") |
---|
334 | c = yield client.create_client(basedir) |
---|
335 | self.failUnlessEqual(c.getServiceNamed("storage").reserved_space, 1000) |
---|
336 | |
---|
337 | @defer.inlineCallbacks |
---|
338 | def test_reserved_2(self): |
---|
339 | """ |
---|
340 | reserved_space option understands 'K' to mean kilobytes |
---|
341 | """ |
---|
342 | basedir = "client.Basic.test_reserved_2" |
---|
343 | os.mkdir(basedir) |
---|
344 | fileutil.write(os.path.join(basedir, "tahoe.cfg"), \ |
---|
345 | BASECONFIG + \ |
---|
346 | "[storage]\n" + \ |
---|
347 | "enabled = true\n" + \ |
---|
348 | "reserved_space = 10K\n") |
---|
349 | c = yield client.create_client(basedir) |
---|
350 | self.failUnlessEqual(c.getServiceNamed("storage").reserved_space, 10*1000) |
---|
351 | |
---|
352 | @defer.inlineCallbacks |
---|
353 | def test_reserved_3(self): |
---|
354 | """ |
---|
355 | reserved_space option understands 'mB' to mean megabytes |
---|
356 | """ |
---|
357 | basedir = "client.Basic.test_reserved_3" |
---|
358 | os.mkdir(basedir) |
---|
359 | fileutil.write(os.path.join(basedir, "tahoe.cfg"), \ |
---|
360 | BASECONFIG + \ |
---|
361 | "[storage]\n" + \ |
---|
362 | "enabled = true\n" + \ |
---|
363 | "reserved_space = 5mB\n") |
---|
364 | c = yield client.create_client(basedir) |
---|
365 | self.failUnlessEqual(c.getServiceNamed("storage").reserved_space, |
---|
366 | 5*1000*1000) |
---|
367 | |
---|
368 | @defer.inlineCallbacks |
---|
369 | def test_reserved_4(self): |
---|
370 | """ |
---|
371 | reserved_space option understands 'Gb' to mean gigabytes |
---|
372 | """ |
---|
373 | basedir = "client.Basic.test_reserved_4" |
---|
374 | os.mkdir(basedir) |
---|
375 | fileutil.write(os.path.join(basedir, "tahoe.cfg"), \ |
---|
376 | BASECONFIG + \ |
---|
377 | "[storage]\n" + \ |
---|
378 | "enabled = true\n" + \ |
---|
379 | "reserved_space = 78Gb\n") |
---|
380 | c = yield client.create_client(basedir) |
---|
381 | self.failUnlessEqual(c.getServiceNamed("storage").reserved_space, |
---|
382 | 78*1000*1000*1000) |
---|
383 | |
---|
384 | @defer.inlineCallbacks |
---|
385 | def test_reserved_bad(self): |
---|
386 | """ |
---|
387 | reserved_space option produces errors on non-numbers |
---|
388 | """ |
---|
389 | basedir = "client.Basic.test_reserved_bad" |
---|
390 | os.mkdir(basedir) |
---|
391 | fileutil.write(os.path.join(basedir, "tahoe.cfg"), \ |
---|
392 | BASECONFIG + \ |
---|
393 | "[storage]\n" + \ |
---|
394 | "enabled = true\n" + \ |
---|
395 | "reserved_space = bogus\n") |
---|
396 | with self.assertRaises(ValueError): |
---|
397 | yield client.create_client(basedir) |
---|
398 | |
---|
399 | @defer.inlineCallbacks |
---|
400 | def test_web_apiauthtoken(self): |
---|
401 | """ |
---|
402 | Client loads the proper API auth token from disk |
---|
403 | """ |
---|
404 | basedir = u"client.Basic.test_web_apiauthtoken" |
---|
405 | create_node_dir(basedir, "testing") |
---|
406 | |
---|
407 | c = yield client.create_client(basedir) |
---|
408 | # this must come after we create the client, as it will create |
---|
409 | # a new, random authtoken itself |
---|
410 | with open(os.path.join(basedir, "private", "api_auth_token"), "w") as f: |
---|
411 | f.write("deadbeef") |
---|
412 | |
---|
413 | token = c.get_auth_token() |
---|
414 | self.assertEqual(b"deadbeef", token) |
---|
415 | |
---|
416 | @defer.inlineCallbacks |
---|
417 | def test_web_staticdir(self): |
---|
418 | """ |
---|
419 | a relative web.static dir is expanded properly |
---|
420 | """ |
---|
421 | basedir = u"client.Basic.test_web_staticdir" |
---|
422 | os.mkdir(basedir) |
---|
423 | fileutil.write(os.path.join(basedir, "tahoe.cfg"), |
---|
424 | BASECONFIG + |
---|
425 | "[node]\n" + |
---|
426 | "web.port = tcp:0:interface=127.0.0.1\n" + |
---|
427 | "web.static = relative\n") |
---|
428 | c = yield client.create_client(basedir) |
---|
429 | w = c.getServiceNamed("webish") |
---|
430 | abs_basedir = fileutil.abspath_expanduser_unicode(basedir) |
---|
431 | expected = fileutil.abspath_expanduser_unicode(u"relative", abs_basedir) |
---|
432 | self.failUnlessReallyEqual(w.staticdir, expected) |
---|
433 | |
---|
434 | # TODO: also test config options for SFTP. See Git history for deleted FTP |
---|
435 | # tests that could be used as basis for these tests. |
---|
436 | |
---|
437 | @defer.inlineCallbacks |
---|
438 | def _storage_dir_test(self, basedir, storage_path, expected_path): |
---|
439 | """ |
---|
440 | generic helper for following storage_dir tests |
---|
441 | """ |
---|
442 | assert isinstance(basedir, str) |
---|
443 | assert isinstance(storage_path, (str, type(None))) |
---|
444 | assert isinstance(expected_path, str) |
---|
445 | os.mkdir(basedir) |
---|
446 | cfg_path = os.path.join(basedir, "tahoe.cfg") |
---|
447 | fileutil.write( |
---|
448 | cfg_path, |
---|
449 | BASECONFIG + |
---|
450 | "[storage]\n" |
---|
451 | "enabled = true\n", |
---|
452 | ) |
---|
453 | if storage_path is not None: |
---|
454 | fileutil.write( |
---|
455 | cfg_path, |
---|
456 | "storage_dir = %s\n" % (storage_path,), |
---|
457 | mode="ab", |
---|
458 | ) |
---|
459 | c = yield client.create_client(basedir) |
---|
460 | self.assertEqual( |
---|
461 | c.getServiceNamed("storage").storedir, |
---|
462 | expected_path, |
---|
463 | ) |
---|
464 | |
---|
465 | def test_default_storage_dir(self): |
---|
466 | """ |
---|
467 | If no value is given for ``storage_dir`` in the ``storage`` section of |
---|
468 | ``tahoe.cfg`` then the ``storage`` directory beneath the node |
---|
469 | directory is used. |
---|
470 | """ |
---|
471 | basedir = u"client.Basic.test_default_storage_dir" |
---|
472 | config_path = None |
---|
473 | expected_path = os.path.join( |
---|
474 | abspath_expanduser_unicode(basedir), |
---|
475 | u"storage", |
---|
476 | ) |
---|
477 | return self._storage_dir_test( |
---|
478 | basedir, |
---|
479 | config_path, |
---|
480 | expected_path, |
---|
481 | ) |
---|
482 | |
---|
483 | def test_relative_storage_dir(self): |
---|
484 | """ |
---|
485 | A storage node can be directed to use a particular directory for share |
---|
486 | file storage by setting ``storage_dir`` in the ``storage`` section of |
---|
487 | ``tahoe.cfg``. If the path is relative, it is interpreted relative to |
---|
488 | the node's basedir. |
---|
489 | """ |
---|
490 | basedir = u"client.Basic.test_relative_storage_dir" |
---|
491 | config_path = u"myowndir" |
---|
492 | expected_path = os.path.join( |
---|
493 | abspath_expanduser_unicode(basedir), |
---|
494 | u"myowndir", |
---|
495 | ) |
---|
496 | return self._storage_dir_test( |
---|
497 | basedir, |
---|
498 | config_path, |
---|
499 | expected_path, |
---|
500 | ) |
---|
501 | |
---|
502 | def test_absolute_storage_dir(self): |
---|
503 | """ |
---|
504 | If the ``storage_dir`` item in the ``storage`` section of the |
---|
505 | configuration gives an absolute path then exactly that path is used. |
---|
506 | """ |
---|
507 | basedir = u"client.Basic.test_absolute_storage_dir" |
---|
508 | # create_client is going to try to make the storage directory so we |
---|
509 | # don't want a literal absolute path like /myowndir which we won't |
---|
510 | # have write permission to. So construct an absolute path that we |
---|
511 | # should be able to write to. |
---|
512 | base = u"\N{SNOWMAN}" |
---|
513 | if encodingutil.filesystem_encoding != "utf-8": |
---|
514 | base = u"melted_snowman" |
---|
515 | expected_path = abspath_expanduser_unicode( |
---|
516 | u"client.Basic.test_absolute_storage_dir_myowndir/" + base |
---|
517 | ) |
---|
518 | config_path = expected_path |
---|
519 | return self._storage_dir_test( |
---|
520 | basedir, |
---|
521 | config_path, |
---|
522 | expected_path, |
---|
523 | ) |
---|
524 | |
---|
525 | def _permute(self, sb, key): |
---|
526 | return [ s.get_longname() for s in sb.get_servers_for_psi(key) ] |
---|
527 | |
---|
528 | def test_permute(self): |
---|
529 | """ |
---|
530 | Permutations need to be stable across Tahoe releases, which is why we |
---|
531 | hardcode a specific expected order. |
---|
532 | |
---|
533 | This is because the order of these results determines which servers a |
---|
534 | client will choose to place shares on and which servers it will consult |
---|
535 | (and in what order) when trying to retrieve those shares. If the order |
---|
536 | ever changes, all already-placed shares become (at best) harder to find |
---|
537 | or (at worst) impossible to find. |
---|
538 | """ |
---|
539 | sb = StorageFarmBroker(True, None, EMPTY_CLIENT_CONFIG) |
---|
540 | ks = [b"%d" % i for i in range(5)] |
---|
541 | for k in ks: |
---|
542 | ann = {"anonymous-storage-FURL": SOME_FURL, |
---|
543 | "permutation-seed-base32": base32.b2a(k) } |
---|
544 | sb.test_add_rref(k, "rref", ann) |
---|
545 | |
---|
546 | one = self._permute(sb, b"one") |
---|
547 | two = self._permute(sb, b"two") |
---|
548 | self.failUnlessReallyEqual(one, [b'3',b'1',b'0',b'4',b'2']) |
---|
549 | self.failUnlessReallyEqual(two, [b'0',b'4',b'2',b'1',b'3']) |
---|
550 | self.assertEqual(sorted(one), ks) |
---|
551 | self.assertEqual(sorted(two), ks) |
---|
552 | self.assertNotEqual(one, two) |
---|
553 | sb.servers.clear() |
---|
554 | self.failUnlessReallyEqual(self._permute(sb, b"one"), []) |
---|
555 | |
---|
556 | def test_permute_with_preferred(self): |
---|
557 | """ |
---|
558 | Permutations need to be stable across Tahoe releases, which is why we |
---|
559 | hardcode a specific expected order. In this case, two values are |
---|
560 | preferred and should come first. |
---|
561 | """ |
---|
562 | sb = StorageFarmBroker( |
---|
563 | True, |
---|
564 | None, |
---|
565 | EMPTY_CLIENT_CONFIG, |
---|
566 | StorageClientConfig(preferred_peers=[b'1',b'4']), |
---|
567 | ) |
---|
568 | ks = [b"%d" % i for i in range(5)] |
---|
569 | for k in [b"%d" % i for i in range(5)]: |
---|
570 | ann = {"anonymous-storage-FURL": SOME_FURL, |
---|
571 | "permutation-seed-base32": base32.b2a(k) } |
---|
572 | sb.test_add_rref(k, "rref", ann) |
---|
573 | |
---|
574 | one = self._permute(sb, b"one") |
---|
575 | two = self._permute(sb, b"two") |
---|
576 | self.failUnlessReallyEqual(b"".join(one), b'14302') |
---|
577 | self.failUnlessReallyEqual(b"".join(two), b'41023') |
---|
578 | self.assertEqual(sorted(one), ks) |
---|
579 | self.assertEqual(sorted(one[:2]), [b"1", b"4"]) |
---|
580 | self.assertEqual(sorted(two), ks) |
---|
581 | self.assertEqual(sorted(two[:2]), [b"1", b"4"]) |
---|
582 | self.assertNotEqual(one, two) |
---|
583 | sb.servers.clear() |
---|
584 | self.failUnlessReallyEqual(self._permute(sb, b"one"), []) |
---|
585 | |
---|
586 | @defer.inlineCallbacks |
---|
587 | def test_versions(self): |
---|
588 | """ |
---|
589 | A client knows the versions of software it has |
---|
590 | """ |
---|
591 | basedir = "test_client.Basic.test_versions" |
---|
592 | os.mkdir(basedir) |
---|
593 | fileutil.write(os.path.join(basedir, "tahoe.cfg"), \ |
---|
594 | BASECONFIG + \ |
---|
595 | "[storage]\n" + \ |
---|
596 | "enabled = true\n") |
---|
597 | c = yield client.create_client(basedir) |
---|
598 | ss = c.getServiceNamed("storage") |
---|
599 | verdict = ss.get_version() |
---|
600 | self.failUnlessReallyEqual(verdict[b"application-version"], |
---|
601 | allmydata.__full_version__.encode("ascii")) |
---|
602 | self.failIfEqual(str(allmydata.__version__), "unknown") |
---|
603 | self.failUnless("." in str(allmydata.__full_version__), |
---|
604 | "non-numeric version in '%s'" % allmydata.__version__) |
---|
605 | # also test stats |
---|
606 | stats = c.get_stats() |
---|
607 | self.failUnless("node.uptime" in stats) |
---|
608 | self.failUnless(isinstance(stats["node.uptime"], float)) |
---|
609 | |
---|
610 | @defer.inlineCallbacks |
---|
611 | def test_helper_furl(self): |
---|
612 | """ |
---|
613 | various helper.furl arguments are parsed correctly |
---|
614 | """ |
---|
615 | basedir = "test_client.Basic.test_helper_furl" |
---|
616 | os.mkdir(basedir) |
---|
617 | |
---|
618 | @defer.inlineCallbacks |
---|
619 | def _check(config, expected_furl): |
---|
620 | fileutil.write(os.path.join(basedir, "tahoe.cfg"), |
---|
621 | BASECONFIG + config) |
---|
622 | c = yield client.create_client(basedir) |
---|
623 | uploader = c.getServiceNamed("uploader") |
---|
624 | furl, connected = uploader.get_helper_info() |
---|
625 | self.failUnlessEqual(furl, expected_furl) |
---|
626 | |
---|
627 | yield _check("", None) |
---|
628 | yield _check("helper.furl =\n", None) |
---|
629 | yield _check("helper.furl = \n", None) |
---|
630 | yield _check("helper.furl = None", None) |
---|
631 | yield _check("helper.furl = pb://blah\n", "pb://blah") |
---|
632 | |
---|
633 | |
---|
634 | def flush_but_dont_ignore(res): |
---|
635 | d = flushEventualQueue() |
---|
636 | def _done(ignored): |
---|
637 | return res |
---|
638 | d.addCallback(_done) |
---|
639 | return d |
---|
640 | |
---|
641 | |
---|
642 | class AnonymousStorage(SyncTestCase): |
---|
643 | """ |
---|
644 | Tests for behaviors of the client object with respect to the anonymous |
---|
645 | storage service. |
---|
646 | """ |
---|
647 | @defer.inlineCallbacks |
---|
648 | def test_anonymous_storage_enabled(self): |
---|
649 | """ |
---|
650 | If anonymous storage access is enabled then the client announces it. |
---|
651 | """ |
---|
652 | basedir = FilePath(self.id()) |
---|
653 | basedir.child("private").makedirs() |
---|
654 | write_introducer(basedir, "someintroducer", SOME_FURL) |
---|
655 | config = client.config_from_string( |
---|
656 | basedir.path, |
---|
657 | "tub.port", |
---|
658 | BASECONFIG + ( |
---|
659 | "[storage]\n" |
---|
660 | "enabled = true\n" |
---|
661 | "anonymous = true\n" |
---|
662 | ) |
---|
663 | ) |
---|
664 | node = yield client.create_client_from_config( |
---|
665 | config, |
---|
666 | _introducer_factory=MemoryIntroducerClient, |
---|
667 | ) |
---|
668 | self.assertThat( |
---|
669 | get_published_announcements(node), |
---|
670 | MatchesListwise([ |
---|
671 | matches_storage_announcement( |
---|
672 | basedir.path, |
---|
673 | anonymous=True, |
---|
674 | ), |
---|
675 | ]), |
---|
676 | ) |
---|
677 | |
---|
678 | @defer.inlineCallbacks |
---|
679 | def test_anonymous_storage_disabled(self): |
---|
680 | """ |
---|
681 | If anonymous storage access is disabled then the client does not announce |
---|
682 | it nor does it write a fURL for it to beneath the node directory. |
---|
683 | """ |
---|
684 | basedir = FilePath(self.id()) |
---|
685 | basedir.child("private").makedirs() |
---|
686 | write_introducer(basedir, "someintroducer", SOME_FURL) |
---|
687 | config = client.config_from_string( |
---|
688 | basedir.path, |
---|
689 | "tub.port", |
---|
690 | BASECONFIG + ( |
---|
691 | "[storage]\n" |
---|
692 | "enabled = true\n" |
---|
693 | "anonymous = false\n" |
---|
694 | ) |
---|
695 | ) |
---|
696 | node = yield client.create_client_from_config( |
---|
697 | config, |
---|
698 | _introducer_factory=MemoryIntroducerClient, |
---|
699 | ) |
---|
700 | self.expectThat( |
---|
701 | get_published_announcements(node), |
---|
702 | MatchesListwise([ |
---|
703 | matches_storage_announcement( |
---|
704 | basedir.path, |
---|
705 | anonymous=False, |
---|
706 | ), |
---|
707 | ]), |
---|
708 | ) |
---|
709 | self.expectThat( |
---|
710 | config.get_private_config("storage.furl", default=None), |
---|
711 | Is(None), |
---|
712 | ) |
---|
713 | |
---|
714 | @defer.inlineCallbacks |
---|
715 | def test_anonymous_storage_enabled_then_disabled(self): |
---|
716 | """ |
---|
717 | If a node is run with anonymous storage enabled and then later anonymous |
---|
718 | storage is disabled in the configuration for that node, it is not |
---|
719 | possible to reach the anonymous storage server via the originally |
---|
720 | published fURL. |
---|
721 | """ |
---|
722 | basedir = FilePath(self.id()) |
---|
723 | basedir.child("private").makedirs() |
---|
724 | enabled_config = client.config_from_string( |
---|
725 | basedir.path, |
---|
726 | "tub.port", |
---|
727 | BASECONFIG + ( |
---|
728 | "[storage]\n" |
---|
729 | "enabled = true\n" |
---|
730 | "anonymous = true\n" |
---|
731 | ) |
---|
732 | ) |
---|
733 | node = yield client.create_client_from_config( |
---|
734 | enabled_config, |
---|
735 | _introducer_factory=MemoryIntroducerClient, |
---|
736 | ) |
---|
737 | anonymous_storage_furl = enabled_config.get_private_config("storage.furl") |
---|
738 | def check_furl(): |
---|
739 | return node.tub.getReferenceForURL(anonymous_storage_furl) |
---|
740 | # Perform a sanity check that our test code makes sense: is this a |
---|
741 | # legit way to verify whether a fURL will refer to an object? |
---|
742 | self.assertThat( |
---|
743 | check_furl(), |
---|
744 | # If it doesn't raise a KeyError we're in business. |
---|
745 | Always(), |
---|
746 | ) |
---|
747 | |
---|
748 | disabled_config = client.config_from_string( |
---|
749 | basedir.path, |
---|
750 | "tub.port", |
---|
751 | BASECONFIG + ( |
---|
752 | "[storage]\n" |
---|
753 | "enabled = true\n" |
---|
754 | "anonymous = false\n" |
---|
755 | ) |
---|
756 | ) |
---|
757 | node = yield client.create_client_from_config( |
---|
758 | disabled_config, |
---|
759 | _introducer_factory=MemoryIntroducerClient, |
---|
760 | ) |
---|
761 | self.assertThat( |
---|
762 | check_furl, |
---|
763 | raises(KeyError), |
---|
764 | ) |
---|
765 | |
---|
766 | |
---|
767 | class IntroducerClients(unittest.TestCase): |
---|
768 | |
---|
769 | def test_invalid_introducer_furl(self): |
---|
770 | """ |
---|
771 | An introducer.furl of 'None' in the deprecated [client]introducer.furl |
---|
772 | field is invalid and causes `create_introducer_clients` to fail. |
---|
773 | """ |
---|
774 | cfg = ( |
---|
775 | "[client]\n" |
---|
776 | "introducer.furl = None\n" |
---|
777 | ) |
---|
778 | config = client.config_from_string("basedir", "client.port", cfg) |
---|
779 | |
---|
780 | with self.assertRaises(ValueError) as ctx: |
---|
781 | client.create_introducer_clients(config, main_tub=None) |
---|
782 | self.assertIn( |
---|
783 | "invalid 'introducer.furl = None'", |
---|
784 | str(ctx.exception) |
---|
785 | ) |
---|
786 | |
---|
787 | |
---|
788 | def get_known_server_details(a_client): |
---|
789 | """ |
---|
790 | Get some details about known storage servers from a client. |
---|
791 | |
---|
792 | :param _Client a_client: The client to inspect. |
---|
793 | |
---|
794 | :return: A ``list`` of two-tuples. Each element of the list corresponds |
---|
795 | to a "known server". The first element of each tuple is a server id. |
---|
796 | The second is the server's announcement. |
---|
797 | """ |
---|
798 | return list( |
---|
799 | (s.get_serverid(), s.get_announcement()) |
---|
800 | for s |
---|
801 | in a_client.storage_broker.get_known_servers() |
---|
802 | ) |
---|
803 | |
---|
804 | |
---|
805 | class StaticServers(Fixture): |
---|
806 | """ |
---|
807 | Create a ``servers.yaml`` file. |
---|
808 | """ |
---|
809 | def __init__(self, basedir, server_details): |
---|
810 | super(StaticServers, self).__init__() |
---|
811 | self._basedir = basedir |
---|
812 | self._server_details = server_details |
---|
813 | |
---|
814 | def _setUp(self): |
---|
815 | private = self._basedir.child(u"private") |
---|
816 | private.makedirs() |
---|
817 | servers = private.child(u"servers.yaml") |
---|
818 | servers.setContent(safe_dump({ |
---|
819 | u"storage": { |
---|
820 | serverid: { |
---|
821 | u"ann": announcement, |
---|
822 | } |
---|
823 | for (serverid, announcement) |
---|
824 | in self._server_details |
---|
825 | }, |
---|
826 | }).encode("utf-8")) |
---|
827 | |
---|
828 | |
---|
829 | class StorageClients(SyncTestCase): |
---|
830 | """ |
---|
831 | Tests for storage-related behavior of ``_Client``. |
---|
832 | """ |
---|
833 | def setUp(self): |
---|
834 | super(StorageClients, self).setUp() |
---|
835 | # Some other tests create Nodes and Node mutates tempfile.tempdir and |
---|
836 | # that screws us up because we're *not* making a Node. "Fix" it. See |
---|
837 | # https://tahoe-lafs.org/trac/tahoe-lafs/ticket/3052 for the real fix, |
---|
838 | # though. |
---|
839 | import tempfile |
---|
840 | tempfile.tempdir = None |
---|
841 | |
---|
842 | tempdir = TempDir() |
---|
843 | self.useFixture(tempdir) |
---|
844 | self.basedir = FilePath(tempdir.path) |
---|
845 | |
---|
846 | @capture_logging( |
---|
847 | lambda case, logger: assertHasAction( |
---|
848 | case, |
---|
849 | logger, |
---|
850 | actionType=u"storage-client:broker:set-static-servers", |
---|
851 | succeeded=True, |
---|
852 | ), |
---|
853 | encoder_=json.AnyBytesJSONEncoder |
---|
854 | ) |
---|
855 | def test_static_servers(self, logger): |
---|
856 | """ |
---|
857 | Storage servers defined in ``private/servers.yaml`` are loaded into the |
---|
858 | storage broker. |
---|
859 | """ |
---|
860 | serverid = u"v0-aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" |
---|
861 | announcement = { |
---|
862 | u"nickname": u"some-storage-server", |
---|
863 | u"anonymous-storage-FURL": u"pb://xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx@tcp:storage.example:100/swissnum", |
---|
864 | } |
---|
865 | self.useFixture( |
---|
866 | StaticServers( |
---|
867 | self.basedir, |
---|
868 | [(serverid, announcement)], |
---|
869 | ), |
---|
870 | ) |
---|
871 | self.assertThat( |
---|
872 | client.create_client(self.basedir.asTextMode().path), |
---|
873 | succeeded( |
---|
874 | AfterPreprocessing( |
---|
875 | get_known_server_details, |
---|
876 | Equals([(serverid.encode("utf-8"), announcement)]), |
---|
877 | ), |
---|
878 | ), |
---|
879 | ) |
---|
880 | |
---|
881 | @capture_logging( |
---|
882 | lambda case, logger: assertHasAction( |
---|
883 | case, |
---|
884 | logger, |
---|
885 | actionType=u"storage-client:broker:make-storage-server", |
---|
886 | succeeded=False, |
---|
887 | ), |
---|
888 | encoder_=json.AnyBytesJSONEncoder |
---|
889 | ) |
---|
890 | def test_invalid_static_server(self, logger): |
---|
891 | """ |
---|
892 | An invalid announcement for a static server does not prevent other static |
---|
893 | servers from being loaded. |
---|
894 | """ |
---|
895 | # Some good details |
---|
896 | serverid = u"v1-aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" |
---|
897 | announcement = { |
---|
898 | u"nickname": u"some-storage-server", |
---|
899 | u"anonymous-storage-FURL": u"pb://xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx@tcp:storage.example:100/swissnum", |
---|
900 | } |
---|
901 | self.useFixture( |
---|
902 | StaticServers( |
---|
903 | self.basedir, |
---|
904 | [(serverid.encode("ascii"), announcement), |
---|
905 | # Along with a "bad" server announcement. Order in this list |
---|
906 | # doesn't matter, yaml serializer and Python dicts are going |
---|
907 | # to shuffle everything around kind of randomly. |
---|
908 | (u"v0-bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb", |
---|
909 | {u"nickname": u"another-storage-server", |
---|
910 | u"anonymous-storage-FURL": None, |
---|
911 | }), |
---|
912 | ], |
---|
913 | ), |
---|
914 | ) |
---|
915 | self.assertThat( |
---|
916 | client.create_client(self.basedir.asTextMode().path), |
---|
917 | succeeded( |
---|
918 | AfterPreprocessing( |
---|
919 | get_known_server_details, |
---|
920 | # It should have the good server details. |
---|
921 | Equals([(serverid.encode("utf-8"), announcement)]), |
---|
922 | ), |
---|
923 | ), |
---|
924 | ) |
---|
925 | |
---|
926 | |
---|
927 | class Run(unittest.TestCase, testutil.StallMixin): |
---|
928 | |
---|
929 | def setUp(self): |
---|
930 | self.sparent = service.MultiService() |
---|
931 | self.sparent.startService() |
---|
932 | def tearDown(self): |
---|
933 | d = self.sparent.stopService() |
---|
934 | d.addBoth(flush_but_dont_ignore) |
---|
935 | return d |
---|
936 | |
---|
937 | @defer.inlineCallbacks |
---|
938 | def test_loadable(self): |
---|
939 | """ |
---|
940 | A configuration consisting only of an introducer can be turned into a |
---|
941 | client node. |
---|
942 | """ |
---|
943 | basedir = FilePath("test_client.Run.test_loadable") |
---|
944 | private = basedir.child("private") |
---|
945 | private.makedirs() |
---|
946 | dummy = "pb://wl74cyahejagspqgy4x5ukrvfnevlknt@127.0.0.1:58889/bogus" |
---|
947 | write_introducer(basedir, "someintroducer", dummy) |
---|
948 | basedir.child("tahoe.cfg").setContent(BASECONFIG.encode("ascii")) |
---|
949 | basedir.child(client._Client.EXIT_TRIGGER_FILE).touch() |
---|
950 | yield client.create_client(basedir.path) |
---|
951 | |
---|
952 | @defer.inlineCallbacks |
---|
953 | def test_reloadable(self): |
---|
954 | from twisted.internet import reactor |
---|
955 | |
---|
956 | dummy = "pb://wl74cyahejagspqgy4x5ukrvfnevlknt@127.0.0.1:58889/bogus" |
---|
957 | fixture = UseNode(None, None, FilePath(self.mktemp()), dummy, reactor=reactor) |
---|
958 | fixture.setUp() |
---|
959 | self.addCleanup(fixture.cleanUp) |
---|
960 | |
---|
961 | c1 = yield fixture.create_node() |
---|
962 | c1.setServiceParent(self.sparent) |
---|
963 | |
---|
964 | # delay to let the service start up completely. I'm not entirely sure |
---|
965 | # this is necessary. |
---|
966 | yield self.stall(delay=2.0) |
---|
967 | yield c1.disownServiceParent() |
---|
968 | # the cygwin buildslave seems to need more time to let the old |
---|
969 | # service completely shut down. When delay=0.1, I saw this test fail, |
---|
970 | # probably due to the logport trying to reclaim the old socket |
---|
971 | # number. This suggests that either we're dropping a Deferred |
---|
972 | # somewhere in the shutdown sequence, or that cygwin is just cranky. |
---|
973 | yield self.stall(delay=2.0) |
---|
974 | |
---|
975 | # TODO: pause for slightly over one second, to let |
---|
976 | # Client._check_exit_trigger poll the file once. That will exercise |
---|
977 | # another few lines. Then add another test in which we don't |
---|
978 | # update the file at all, and watch to see the node shutdown. |
---|
979 | # (To do this, use a modified node which overrides Node.shutdown(), |
---|
980 | # also change _check_exit_trigger to use it instead of a raw |
---|
981 | # reactor.stop, also instrument the shutdown event in an |
---|
982 | # attribute that we can check.) |
---|
983 | c2 = yield fixture.create_node() |
---|
984 | c2.setServiceParent(self.sparent) |
---|
985 | yield c2.disownServiceParent() |
---|
986 | |
---|
987 | class NodeMakerTests(testutil.ReallyEqualMixin, AsyncBrokenTestCase): |
---|
988 | |
---|
989 | def _make_node_maker(self, mode, writecap, deep_immutable): |
---|
990 | """ |
---|
991 | Create a callable which can create an ``IFilesystemNode`` provider for the |
---|
992 | given cap. |
---|
993 | |
---|
994 | :param unicode mode: The read/write combination to pass to |
---|
995 | ``NodeMaker.create_from_cap``. If it contains ``u"r"`` then a |
---|
996 | readcap will be passed in. If it contains ``u"w"`` then a |
---|
997 | writecap will be passed in. |
---|
998 | |
---|
999 | :param IURI writecap: The capability for which to create a node. |
---|
1000 | |
---|
1001 | :param bool deep_immutable: Whether to request a "deep immutable" node |
---|
1002 | which forces the result to be an immutable ``IFilesystemNode`` (I |
---|
1003 | think -exarkun). |
---|
1004 | """ |
---|
1005 | if writecap.is_mutable(): |
---|
1006 | # It's just not a valid combination to have a mutable alongside |
---|
1007 | # deep_immutable = True. It's easier to fix deep_immutable than |
---|
1008 | # writecap to clear up this conflict. |
---|
1009 | deep_immutable = False |
---|
1010 | |
---|
1011 | if "r" in mode: |
---|
1012 | readcap = writecap.get_readonly().to_string() |
---|
1013 | else: |
---|
1014 | readcap = None |
---|
1015 | if "w" in mode: |
---|
1016 | writecap = writecap.to_string() |
---|
1017 | else: |
---|
1018 | writecap = None |
---|
1019 | |
---|
1020 | nm = NodeMaker( |
---|
1021 | storage_broker=None, |
---|
1022 | secret_holder=None, |
---|
1023 | history=None, |
---|
1024 | uploader=None, |
---|
1025 | terminator=None, |
---|
1026 | default_encoding_parameters={u"k": 1, u"n": 1}, |
---|
1027 | mutable_file_default=None, |
---|
1028 | key_generator=None, |
---|
1029 | blacklist=None, |
---|
1030 | ) |
---|
1031 | return partial( |
---|
1032 | nm.create_from_cap, |
---|
1033 | writecap, |
---|
1034 | readcap, |
---|
1035 | deep_immutable, |
---|
1036 | ) |
---|
1037 | |
---|
1038 | @given( |
---|
1039 | mode=sampled_from(["w", "r", "rw"]), |
---|
1040 | writecap=write_capabilities(), |
---|
1041 | deep_immutable=booleans(), |
---|
1042 | ) |
---|
1043 | def test_cached_result(self, mode, writecap, deep_immutable): |
---|
1044 | """ |
---|
1045 | ``NodeMaker.create_from_cap`` returns the same object when called with the |
---|
1046 | same arguments. |
---|
1047 | """ |
---|
1048 | make_node = self._make_node_maker(mode, writecap, deep_immutable) |
---|
1049 | original = make_node() |
---|
1050 | additional = make_node() |
---|
1051 | |
---|
1052 | self.assertThat( |
---|
1053 | original, |
---|
1054 | Is(additional), |
---|
1055 | ) |
---|
1056 | |
---|
1057 | @given( |
---|
1058 | mode=sampled_from(["w", "r", "rw"]), |
---|
1059 | writecap=write_capabilities(), |
---|
1060 | deep_immutable=booleans(), |
---|
1061 | ) |
---|
1062 | def test_cache_expired(self, mode, writecap, deep_immutable): |
---|
1063 | """ |
---|
1064 | After the node object returned by an earlier call to |
---|
1065 | ``NodeMaker.create_from_cap`` has been garbage collected, a new call |
---|
1066 | to ``NodeMaker.create_from_cap`` returns a node object, maybe even a |
---|
1067 | new one although we can't really prove it. |
---|
1068 | """ |
---|
1069 | make_node = self._make_node_maker(mode, writecap, deep_immutable) |
---|
1070 | make_node() |
---|
1071 | additional = make_node() |
---|
1072 | self.assertThat( |
---|
1073 | additional, |
---|
1074 | AfterPreprocessing( |
---|
1075 | lambda node: node.get_readonly_uri(), |
---|
1076 | Equals(writecap.get_readonly().to_string()), |
---|
1077 | ), |
---|
1078 | ) |
---|
1079 | |
---|
1080 | @defer.inlineCallbacks |
---|
1081 | def test_maker(self): |
---|
1082 | basedir = "client/NodeMaker/maker" |
---|
1083 | fileutil.make_dirs(basedir) |
---|
1084 | fileutil.write(os.path.join(basedir, "tahoe.cfg"), BASECONFIG) |
---|
1085 | c = yield client.create_client(basedir) |
---|
1086 | |
---|
1087 | n = c.create_node_from_uri(b"URI:CHK:6nmrpsubgbe57udnexlkiwzmlu:bjt7j6hshrlmadjyr7otq3dc24end5meo5xcr5xe5r663po6itmq:3:10:7277") |
---|
1088 | self.failUnless(IFilesystemNode.providedBy(n)) |
---|
1089 | self.failUnless(IFileNode.providedBy(n)) |
---|
1090 | self.failUnless(IImmutableFileNode.providedBy(n)) |
---|
1091 | self.failIf(IMutableFileNode.providedBy(n)) |
---|
1092 | self.failIf(IDirectoryNode.providedBy(n)) |
---|
1093 | self.failUnless(n.is_readonly()) |
---|
1094 | self.failIf(n.is_mutable()) |
---|
1095 | |
---|
1096 | # Testing #1679. There was a bug that would occur when downloader was |
---|
1097 | # downloading the same readcap more than once concurrently, so the |
---|
1098 | # filenode object was cached, and there was a failure from one of the |
---|
1099 | # servers in one of the download attempts. No subsequent download |
---|
1100 | # attempt would attempt to use that server again, which would lead to |
---|
1101 | # the file being undownloadable until the gateway was restarted. The |
---|
1102 | # current fix for this (hopefully to be superceded by a better fix |
---|
1103 | # eventually) is to prevent re-use of filenodes, so the NodeMaker is |
---|
1104 | # hereby required *not* to cache and re-use filenodes for CHKs. |
---|
1105 | other_n = c.create_node_from_uri(b"URI:CHK:6nmrpsubgbe57udnexlkiwzmlu:bjt7j6hshrlmadjyr7otq3dc24end5meo5xcr5xe5r663po6itmq:3:10:7277") |
---|
1106 | self.failIf(n is other_n, (n, other_n)) |
---|
1107 | |
---|
1108 | n = c.create_node_from_uri(b"URI:LIT:n5xgk") |
---|
1109 | self.failUnless(IFilesystemNode.providedBy(n)) |
---|
1110 | self.failUnless(IFileNode.providedBy(n)) |
---|
1111 | self.failUnless(IImmutableFileNode.providedBy(n)) |
---|
1112 | self.failIf(IMutableFileNode.providedBy(n)) |
---|
1113 | self.failIf(IDirectoryNode.providedBy(n)) |
---|
1114 | self.failUnless(n.is_readonly()) |
---|
1115 | self.failIf(n.is_mutable()) |
---|
1116 | |
---|
1117 | n = c.create_node_from_uri(b"URI:SSK:n6x24zd3seu725yluj75q5boaa:mm6yoqjhl6ueh7iereldqxue4nene4wl7rqfjfybqrehdqmqskvq") |
---|
1118 | self.failUnless(IFilesystemNode.providedBy(n)) |
---|
1119 | self.failUnless(IFileNode.providedBy(n)) |
---|
1120 | self.failIf(IImmutableFileNode.providedBy(n)) |
---|
1121 | self.failUnless(IMutableFileNode.providedBy(n)) |
---|
1122 | self.failIf(IDirectoryNode.providedBy(n)) |
---|
1123 | self.failIf(n.is_readonly()) |
---|
1124 | self.failUnless(n.is_mutable()) |
---|
1125 | |
---|
1126 | n = c.create_node_from_uri(b"URI:SSK-RO:b7sr5qsifnicca7cbk3rhrhbvq:mm6yoqjhl6ueh7iereldqxue4nene4wl7rqfjfybqrehdqmqskvq") |
---|
1127 | self.failUnless(IFilesystemNode.providedBy(n)) |
---|
1128 | self.failUnless(IFileNode.providedBy(n)) |
---|
1129 | self.failIf(IImmutableFileNode.providedBy(n)) |
---|
1130 | self.failUnless(IMutableFileNode.providedBy(n)) |
---|
1131 | self.failIf(IDirectoryNode.providedBy(n)) |
---|
1132 | self.failUnless(n.is_readonly()) |
---|
1133 | self.failUnless(n.is_mutable()) |
---|
1134 | |
---|
1135 | n = c.create_node_from_uri(b"URI:DIR2:n6x24zd3seu725yluj75q5boaa:mm6yoqjhl6ueh7iereldqxue4nene4wl7rqfjfybqrehdqmqskvq") |
---|
1136 | self.failUnless(IFilesystemNode.providedBy(n)) |
---|
1137 | self.failIf(IFileNode.providedBy(n)) |
---|
1138 | self.failIf(IImmutableFileNode.providedBy(n)) |
---|
1139 | self.failIf(IMutableFileNode.providedBy(n)) |
---|
1140 | self.failUnless(IDirectoryNode.providedBy(n)) |
---|
1141 | self.failIf(n.is_readonly()) |
---|
1142 | self.failUnless(n.is_mutable()) |
---|
1143 | |
---|
1144 | n = c.create_node_from_uri(b"URI:DIR2-RO:b7sr5qsifnicca7cbk3rhrhbvq:mm6yoqjhl6ueh7iereldqxue4nene4wl7rqfjfybqrehdqmqskvq") |
---|
1145 | self.failUnless(IFilesystemNode.providedBy(n)) |
---|
1146 | self.failIf(IFileNode.providedBy(n)) |
---|
1147 | self.failIf(IImmutableFileNode.providedBy(n)) |
---|
1148 | self.failIf(IMutableFileNode.providedBy(n)) |
---|
1149 | self.failUnless(IDirectoryNode.providedBy(n)) |
---|
1150 | self.failUnless(n.is_readonly()) |
---|
1151 | self.failUnless(n.is_mutable()) |
---|
1152 | |
---|
1153 | unknown_rw = b"lafs://from_the_future" |
---|
1154 | unknown_ro = b"lafs://readonly_from_the_future" |
---|
1155 | n = c.create_node_from_uri(unknown_rw, unknown_ro) |
---|
1156 | self.failUnless(IFilesystemNode.providedBy(n)) |
---|
1157 | self.failIf(IFileNode.providedBy(n)) |
---|
1158 | self.failIf(IImmutableFileNode.providedBy(n)) |
---|
1159 | self.failIf(IMutableFileNode.providedBy(n)) |
---|
1160 | self.failIf(IDirectoryNode.providedBy(n)) |
---|
1161 | self.failUnless(n.is_unknown()) |
---|
1162 | self.failUnlessReallyEqual(n.get_uri(), unknown_rw) |
---|
1163 | self.failUnlessReallyEqual(n.get_write_uri(), unknown_rw) |
---|
1164 | self.failUnlessReallyEqual(n.get_readonly_uri(), b"ro." + unknown_ro) |
---|
1165 | |
---|
1166 | # Note: it isn't that we *intend* to deploy non-ASCII caps in |
---|
1167 | # the future, it is that we want to make sure older Tahoe-LAFS |
---|
1168 | # versions wouldn't choke on them if we were to do so. See |
---|
1169 | # #1051 and wiki:NewCapDesign for details. |
---|
1170 | unknown_rw = u"lafs://from_the_future_rw_\u263A".encode('utf-8') |
---|
1171 | unknown_ro = u"lafs://readonly_from_the_future_ro_\u263A".encode('utf-8') |
---|
1172 | n = c.create_node_from_uri(unknown_rw, unknown_ro) |
---|
1173 | self.failUnless(IFilesystemNode.providedBy(n)) |
---|
1174 | self.failIf(IFileNode.providedBy(n)) |
---|
1175 | self.failIf(IImmutableFileNode.providedBy(n)) |
---|
1176 | self.failIf(IMutableFileNode.providedBy(n)) |
---|
1177 | self.failIf(IDirectoryNode.providedBy(n)) |
---|
1178 | self.failUnless(n.is_unknown()) |
---|
1179 | self.failUnlessReallyEqual(n.get_uri(), unknown_rw) |
---|
1180 | self.failUnlessReallyEqual(n.get_write_uri(), unknown_rw) |
---|
1181 | self.failUnlessReallyEqual(n.get_readonly_uri(), b"ro." + unknown_ro) |
---|
1182 | |
---|
1183 | |
---|
1184 | |
---|
1185 | def matches_dummy_announcement(name, value): |
---|
1186 | """ |
---|
1187 | Matches the portion of an announcement for the ``DummyStorage`` storage |
---|
1188 | server plugin. |
---|
1189 | |
---|
1190 | :param unicode name: The name of the dummy plugin. |
---|
1191 | |
---|
1192 | :param unicode value: The arbitrary value in the dummy plugin |
---|
1193 | announcement. |
---|
1194 | |
---|
1195 | :return: a testtools-style matcher |
---|
1196 | """ |
---|
1197 | return MatchesDict({ |
---|
1198 | # Everyone gets a name and a fURL added to their announcement. |
---|
1199 | u"name": Equals(name), |
---|
1200 | u"storage-server-FURL": matches_furl(), |
---|
1201 | # The plugin can contribute things, too. |
---|
1202 | u"value": Equals(value), |
---|
1203 | }) |
---|
1204 | |
---|
1205 | |
---|
1206 | |
---|
1207 | class StorageAnnouncementTests(SyncTestCase): |
---|
1208 | """ |
---|
1209 | Tests for the storage announcement published by the client. |
---|
1210 | """ |
---|
1211 | def setUp(self): |
---|
1212 | super(StorageAnnouncementTests, self).setUp() |
---|
1213 | self.basedir = FilePath(self.useFixture(TempDir()).path) |
---|
1214 | create_node_dir(self.basedir.path, u"") |
---|
1215 | # Write an introducer configuration or we can't observer |
---|
1216 | # announcements. |
---|
1217 | write_introducer(self.basedir, "someintroducer", SOME_FURL) |
---|
1218 | |
---|
1219 | |
---|
1220 | def get_config(self, storage_enabled, more_storage="", more_sections=""): |
---|
1221 | return """ |
---|
1222 | [client] |
---|
1223 | # Empty |
---|
1224 | |
---|
1225 | [node] |
---|
1226 | tub.location = tcp:192.0.2.0:1234 |
---|
1227 | |
---|
1228 | [storage] |
---|
1229 | enabled = {storage_enabled} |
---|
1230 | {more_storage} |
---|
1231 | |
---|
1232 | {more_sections} |
---|
1233 | """.format( |
---|
1234 | storage_enabled=storage_enabled, |
---|
1235 | more_storage=more_storage, |
---|
1236 | more_sections=more_sections, |
---|
1237 | ) |
---|
1238 | |
---|
1239 | |
---|
1240 | def test_no_announcement(self): |
---|
1241 | """ |
---|
1242 | No storage announcement is published if storage is not enabled. |
---|
1243 | """ |
---|
1244 | config = client.config_from_string( |
---|
1245 | self.basedir.path, |
---|
1246 | "tub.port", |
---|
1247 | self.get_config(storage_enabled=False), |
---|
1248 | ) |
---|
1249 | self.assertThat( |
---|
1250 | client.create_client_from_config( |
---|
1251 | config, |
---|
1252 | _introducer_factory=MemoryIntroducerClient, |
---|
1253 | ), |
---|
1254 | succeeded(AfterPreprocessing( |
---|
1255 | get_published_announcements, |
---|
1256 | Equals([]), |
---|
1257 | )), |
---|
1258 | ) |
---|
1259 | |
---|
1260 | |
---|
1261 | def test_anonymous_storage_announcement(self): |
---|
1262 | """ |
---|
1263 | A storage announcement with the anonymous storage fURL is published when |
---|
1264 | storage is enabled. |
---|
1265 | """ |
---|
1266 | config = client.config_from_string( |
---|
1267 | self.basedir.path, |
---|
1268 | "tub.port", |
---|
1269 | self.get_config(storage_enabled=True), |
---|
1270 | ) |
---|
1271 | client_deferred = client.create_client_from_config( |
---|
1272 | config, |
---|
1273 | _introducer_factory=MemoryIntroducerClient, |
---|
1274 | ) |
---|
1275 | self.assertThat( |
---|
1276 | client_deferred, |
---|
1277 | # The Deferred succeeds |
---|
1278 | succeeded(AfterPreprocessing( |
---|
1279 | # The announcements published by the client should ... |
---|
1280 | get_published_announcements, |
---|
1281 | # Match the following list (of one element) ... |
---|
1282 | MatchesListwise([ |
---|
1283 | # The only element in the list ... |
---|
1284 | matches_storage_announcement(self.basedir.path), |
---|
1285 | ]), |
---|
1286 | )), |
---|
1287 | ) |
---|
1288 | |
---|
1289 | |
---|
1290 | def test_single_storage_plugin_announcement(self): |
---|
1291 | """ |
---|
1292 | The announcement from a single enabled storage plugin is published when |
---|
1293 | storage is enabled. |
---|
1294 | """ |
---|
1295 | self.useFixture(UseTestPlugins()) |
---|
1296 | |
---|
1297 | value = u"thing" |
---|
1298 | config = client.config_from_string( |
---|
1299 | self.basedir.path, |
---|
1300 | "tub.port", |
---|
1301 | self.get_config( |
---|
1302 | storage_enabled=True, |
---|
1303 | more_storage="plugins=tahoe-lafs-dummy-v1", |
---|
1304 | more_sections=( |
---|
1305 | "[storageserver.plugins.tahoe-lafs-dummy-v1]\n" |
---|
1306 | "some = {}\n".format(value) |
---|
1307 | ), |
---|
1308 | ), |
---|
1309 | ) |
---|
1310 | self.assertThat( |
---|
1311 | client.create_client_from_config( |
---|
1312 | config, |
---|
1313 | _introducer_factory=MemoryIntroducerClient, |
---|
1314 | ), |
---|
1315 | succeeded(AfterPreprocessing( |
---|
1316 | get_published_announcements, |
---|
1317 | MatchesListwise([ |
---|
1318 | matches_storage_announcement( |
---|
1319 | self.basedir.path, |
---|
1320 | options=[ |
---|
1321 | matches_dummy_announcement( |
---|
1322 | u"tahoe-lafs-dummy-v1", |
---|
1323 | value, |
---|
1324 | ), |
---|
1325 | ], |
---|
1326 | ), |
---|
1327 | ]), |
---|
1328 | )), |
---|
1329 | ) |
---|
1330 | |
---|
1331 | |
---|
1332 | def test_multiple_storage_plugin_announcements(self): |
---|
1333 | """ |
---|
1334 | The announcements from several enabled storage plugins are published when |
---|
1335 | storage is enabled. |
---|
1336 | """ |
---|
1337 | self.useFixture(UseTestPlugins()) |
---|
1338 | |
---|
1339 | config = client.config_from_string( |
---|
1340 | self.basedir.path, |
---|
1341 | "tub.port", |
---|
1342 | self.get_config( |
---|
1343 | storage_enabled=True, |
---|
1344 | more_storage="plugins=tahoe-lafs-dummy-v1,tahoe-lafs-dummy-v2", |
---|
1345 | more_sections=( |
---|
1346 | "[storageserver.plugins.tahoe-lafs-dummy-v1]\n" |
---|
1347 | "some = thing-1\n" |
---|
1348 | "[storageserver.plugins.tahoe-lafs-dummy-v2]\n" |
---|
1349 | "some = thing-2\n" |
---|
1350 | ), |
---|
1351 | ), |
---|
1352 | ) |
---|
1353 | self.assertThat( |
---|
1354 | client.create_client_from_config( |
---|
1355 | config, |
---|
1356 | _introducer_factory=MemoryIntroducerClient, |
---|
1357 | ), |
---|
1358 | succeeded(AfterPreprocessing( |
---|
1359 | get_published_announcements, |
---|
1360 | MatchesListwise([ |
---|
1361 | matches_storage_announcement( |
---|
1362 | self.basedir.path, |
---|
1363 | options=[ |
---|
1364 | matches_dummy_announcement( |
---|
1365 | u"tahoe-lafs-dummy-v1", |
---|
1366 | u"thing-1", |
---|
1367 | ), |
---|
1368 | matches_dummy_announcement( |
---|
1369 | u"tahoe-lafs-dummy-v2", |
---|
1370 | u"thing-2", |
---|
1371 | ), |
---|
1372 | ], |
---|
1373 | ), |
---|
1374 | ]), |
---|
1375 | )), |
---|
1376 | ) |
---|
1377 | |
---|
1378 | |
---|
1379 | def test_stable_storage_server_furl(self): |
---|
1380 | """ |
---|
1381 | The value for the ``storage-server-FURL`` item in the announcement for a |
---|
1382 | particular storage server plugin is stable across different node |
---|
1383 | instantiations. |
---|
1384 | """ |
---|
1385 | self.useFixture(UseTestPlugins()) |
---|
1386 | |
---|
1387 | config = client.config_from_string( |
---|
1388 | self.basedir.path, |
---|
1389 | "tub.port", |
---|
1390 | self.get_config( |
---|
1391 | storage_enabled=True, |
---|
1392 | more_storage="plugins=tahoe-lafs-dummy-v1", |
---|
1393 | more_sections=( |
---|
1394 | "[storageserver.plugins.tahoe-lafs-dummy-v1]\n" |
---|
1395 | "some = thing\n" |
---|
1396 | ), |
---|
1397 | ), |
---|
1398 | ) |
---|
1399 | node_a = client.create_client_from_config( |
---|
1400 | config, |
---|
1401 | _introducer_factory=MemoryIntroducerClient, |
---|
1402 | ) |
---|
1403 | node_b = client.create_client_from_config( |
---|
1404 | config, |
---|
1405 | _introducer_factory=MemoryIntroducerClient, |
---|
1406 | ) |
---|
1407 | |
---|
1408 | self.assertThat( |
---|
1409 | defer.gatherResults([node_a, node_b]), |
---|
1410 | succeeded(AfterPreprocessing( |
---|
1411 | partial(map, get_published_announcements), |
---|
1412 | MatchesSameElements(), |
---|
1413 | )), |
---|
1414 | ) |
---|
1415 | |
---|
1416 | |
---|
1417 | def test_storage_plugin_without_configuration(self): |
---|
1418 | """ |
---|
1419 | A storage plugin with no configuration is loaded and announced. |
---|
1420 | """ |
---|
1421 | self.useFixture(UseTestPlugins()) |
---|
1422 | |
---|
1423 | config = client.config_from_string( |
---|
1424 | self.basedir.path, |
---|
1425 | "tub.port", |
---|
1426 | self.get_config( |
---|
1427 | storage_enabled=True, |
---|
1428 | more_storage="plugins=tahoe-lafs-dummy-v1", |
---|
1429 | ), |
---|
1430 | ) |
---|
1431 | self.assertThat( |
---|
1432 | client.create_client_from_config( |
---|
1433 | config, |
---|
1434 | _introducer_factory=MemoryIntroducerClient, |
---|
1435 | ), |
---|
1436 | succeeded(AfterPreprocessing( |
---|
1437 | get_published_announcements, |
---|
1438 | MatchesListwise([ |
---|
1439 | matches_storage_announcement( |
---|
1440 | self.basedir.path, |
---|
1441 | options=[ |
---|
1442 | matches_dummy_announcement( |
---|
1443 | u"tahoe-lafs-dummy-v1", |
---|
1444 | u"default-value", |
---|
1445 | ), |
---|
1446 | ], |
---|
1447 | ), |
---|
1448 | ]), |
---|
1449 | )), |
---|
1450 | ) |
---|
1451 | |
---|
1452 | |
---|
1453 | def test_broken_storage_plugin(self): |
---|
1454 | """ |
---|
1455 | A storage plugin that raises an exception from ``get_storage_server`` |
---|
1456 | causes ``client.create_client_from_config`` to return ``Deferred`` |
---|
1457 | that fails. |
---|
1458 | """ |
---|
1459 | self.useFixture(UseTestPlugins()) |
---|
1460 | |
---|
1461 | config = client.config_from_string( |
---|
1462 | self.basedir.path, |
---|
1463 | "tub.port", |
---|
1464 | self.get_config( |
---|
1465 | storage_enabled=True, |
---|
1466 | more_storage="plugins=tahoe-lafs-dummy-v1", |
---|
1467 | more_sections=( |
---|
1468 | "[storageserver.plugins.tahoe-lafs-dummy-v1]\n" |
---|
1469 | # This will make it explode on instantiation. |
---|
1470 | "invalid = configuration\n" |
---|
1471 | ) |
---|
1472 | ), |
---|
1473 | ) |
---|
1474 | self.assertThat( |
---|
1475 | client.create_client_from_config( |
---|
1476 | config, |
---|
1477 | _introducer_factory=MemoryIntroducerClient, |
---|
1478 | ), |
---|
1479 | failed(Always()), |
---|
1480 | ) |
---|
1481 | |
---|
1482 | def test_storage_plugin_not_found(self): |
---|
1483 | """ |
---|
1484 | ``client.create_client_from_config`` raises ``UnknownConfigError`` when |
---|
1485 | called with a configuration which enables a storage plugin that is not |
---|
1486 | available on the system. |
---|
1487 | """ |
---|
1488 | config = client.config_from_string( |
---|
1489 | self.basedir.path, |
---|
1490 | "tub.port", |
---|
1491 | self.get_config( |
---|
1492 | storage_enabled=True, |
---|
1493 | more_storage="plugins=tahoe-lafs-dummy-vX", |
---|
1494 | ), |
---|
1495 | ) |
---|
1496 | self.assertThat( |
---|
1497 | client.create_client_from_config( |
---|
1498 | config, |
---|
1499 | _introducer_factory=MemoryIntroducerClient, |
---|
1500 | ), |
---|
1501 | failed( |
---|
1502 | AfterPreprocessing( |
---|
1503 | lambda f: f.type, |
---|
1504 | Equals(configutil.UnknownConfigError), |
---|
1505 | ), |
---|
1506 | ), |
---|
1507 | ) |
---|
1508 | |
---|
1509 | def test_announcement_includes_grid_manager(self): |
---|
1510 | """ |
---|
1511 | When Grid Manager is enabled certificates are included in the |
---|
1512 | announcement |
---|
1513 | """ |
---|
1514 | fake_cert = { |
---|
1515 | "certificate": "{\"expires\":1601687822,\"public_key\":\"pub-v0-cbq6hcf3pxcz6ouoafrbktmkixkeuywpcpbcomzd3lqbkq4nmfga\",\"version\":1}", |
---|
1516 | "signature": "fvjd3uvvupf2v6tnvkwjd473u3m3inyqkwiclhp7balmchkmn3px5pei3qyfjnhymq4cjcwvbpqmcwwnwswdtrfkpnlaxuih2zbdmda", |
---|
1517 | } |
---|
1518 | with self.basedir.child("zero.cert").open("w") as f: |
---|
1519 | f.write(json.dumps_bytes(fake_cert)) |
---|
1520 | with self.basedir.child("gm0.cert").open("w") as f: |
---|
1521 | f.write(json.dumps_bytes(fake_cert)) |
---|
1522 | |
---|
1523 | config = client.config_from_string( |
---|
1524 | self.basedir.path, |
---|
1525 | "tub.port", |
---|
1526 | self.get_config( |
---|
1527 | storage_enabled=True, |
---|
1528 | more_storage="grid_management = True", |
---|
1529 | more_sections=( |
---|
1530 | "[grid_managers]\n" |
---|
1531 | "gm0 = pub-v0-ibpbsexcjfbv3ni7gwlclgn6mldaqnqd5mrtan2fnq2b27xnovca\n" |
---|
1532 | "[grid_manager_certificates]\n" |
---|
1533 | "foo = zero.cert\n" |
---|
1534 | ) |
---|
1535 | ), |
---|
1536 | ) |
---|
1537 | |
---|
1538 | self.assertThat( |
---|
1539 | client.create_client_from_config( |
---|
1540 | config, |
---|
1541 | _introducer_factory=MemoryIntroducerClient, |
---|
1542 | ), |
---|
1543 | succeeded(AfterPreprocessing( |
---|
1544 | lambda client: get_published_announcements(client)[0].ann, |
---|
1545 | ContainsDict({ |
---|
1546 | "grid-manager-certificates": Equals([fake_cert]), |
---|
1547 | }), |
---|
1548 | )), |
---|
1549 | ) |
---|