1 | """ |
---|
2 | Tests for the ``IStorageServer`` interface. |
---|
3 | |
---|
4 | Keep in mind that ``IStorageServer`` is actually the storage _client_ interface. |
---|
5 | |
---|
6 | Note that for performance, in the future we might want the same node to be |
---|
7 | reused across tests, so each test should be careful to generate unique storage |
---|
8 | indexes. |
---|
9 | """ |
---|
10 | |
---|
11 | from __future__ import annotations |
---|
12 | |
---|
13 | from future.utils import bchr |
---|
14 | |
---|
15 | from random import Random |
---|
16 | from unittest import SkipTest |
---|
17 | |
---|
18 | from twisted.internet.defer import inlineCallbacks, returnValue |
---|
19 | from twisted.internet.task import Clock |
---|
20 | from foolscap.api import Referenceable, RemoteException |
---|
21 | |
---|
22 | # A better name for this would be IStorageClient... |
---|
23 | from allmydata.interfaces import IStorageServer |
---|
24 | |
---|
25 | from .common_system import SystemTestMixin |
---|
26 | from .common import AsyncTestCase |
---|
27 | from allmydata.storage.server import StorageServer # not a IStorageServer!! |
---|
28 | |
---|
29 | |
---|
30 | # Use random generator with known seed, so results are reproducible if tests |
---|
31 | # are run in the same order. |
---|
32 | _RANDOM = Random(0) |
---|
33 | |
---|
34 | |
---|
35 | def _randbytes(length): |
---|
36 | # type: (int) -> bytes |
---|
37 | """Return random bytes string of given length.""" |
---|
38 | return b"".join([bchr(_RANDOM.randrange(0, 256)) for _ in range(length)]) |
---|
39 | |
---|
40 | |
---|
41 | def new_storage_index(): |
---|
42 | # type: () -> bytes |
---|
43 | """Return a new random storage index.""" |
---|
44 | return _randbytes(16) |
---|
45 | |
---|
46 | |
---|
47 | def new_secret(): |
---|
48 | # type: () -> bytes |
---|
49 | """Return a new random secret (for lease renewal or cancellation).""" |
---|
50 | return _randbytes(32) |
---|
51 | |
---|
52 | |
---|
53 | class IStorageServerSharedAPIsTestsMixin(object): |
---|
54 | """ |
---|
55 | Tests for ``IStorageServer``'s shared APIs. |
---|
56 | |
---|
57 | ``self.storage_client`` is expected to provide ``IStorageServer``. |
---|
58 | """ |
---|
59 | |
---|
60 | @inlineCallbacks |
---|
61 | def test_version(self): |
---|
62 | """ |
---|
63 | ``IStorageServer`` returns a dictionary where the key is an expected |
---|
64 | protocol version. |
---|
65 | """ |
---|
66 | result = yield self.storage_client.get_version() |
---|
67 | self.assertIsInstance(result, dict) |
---|
68 | self.assertIn(b"http://allmydata.org/tahoe/protocols/storage/v1", result) |
---|
69 | |
---|
70 | |
---|
71 | class IStorageServerImmutableAPIsTestsMixin(object): |
---|
72 | """ |
---|
73 | Tests for ``IStorageServer``'s immutable APIs. |
---|
74 | |
---|
75 | ``self.storage_client`` is expected to provide ``IStorageServer``. |
---|
76 | |
---|
77 | ``self.disconnect()`` should disconnect and then reconnect, creating a new |
---|
78 | ``self.storage_client``. Some implementations may wish to skip tests using |
---|
79 | this; HTTP has no notion of disconnection. |
---|
80 | |
---|
81 | ``self.server`` is expected to be the corresponding |
---|
82 | ``allmydata.storage.server.StorageServer`` instance. Time should be |
---|
83 | instrumented, such that ``self.fake_time()`` and ``self.fake_sleep()`` |
---|
84 | return and advance the server time, respectively. |
---|
85 | """ |
---|
86 | |
---|
87 | @inlineCallbacks |
---|
88 | def test_allocate_buckets_new(self): |
---|
89 | """ |
---|
90 | allocate_buckets() with a new storage index returns the matching |
---|
91 | shares. |
---|
92 | """ |
---|
93 | (already_got, allocated) = yield self.storage_client.allocate_buckets( |
---|
94 | new_storage_index(), |
---|
95 | renew_secret=new_secret(), |
---|
96 | cancel_secret=new_secret(), |
---|
97 | sharenums=set(range(5)), |
---|
98 | allocated_size=1024, |
---|
99 | canary=Referenceable(), |
---|
100 | ) |
---|
101 | self.assertEqual(already_got, set()) |
---|
102 | self.assertEqual(set(allocated.keys()), set(range(5))) |
---|
103 | # We validate the bucket objects' interface in a later test. |
---|
104 | |
---|
105 | @inlineCallbacks |
---|
106 | def test_allocate_buckets_repeat(self): |
---|
107 | """ |
---|
108 | ``IStorageServer.allocate_buckets()`` with the same storage index does not return |
---|
109 | work-in-progress buckets, but will add any newly added buckets. |
---|
110 | """ |
---|
111 | storage_index, renew_secret, cancel_secret = ( |
---|
112 | new_storage_index(), |
---|
113 | new_secret(), |
---|
114 | new_secret(), |
---|
115 | ) |
---|
116 | (already_got, allocated) = yield self.storage_client.allocate_buckets( |
---|
117 | storage_index, |
---|
118 | renew_secret, |
---|
119 | cancel_secret, |
---|
120 | sharenums=set(range(4)), |
---|
121 | allocated_size=1024, |
---|
122 | canary=Referenceable(), |
---|
123 | ) |
---|
124 | (already_got2, allocated2) = yield self.storage_client.allocate_buckets( |
---|
125 | storage_index, |
---|
126 | renew_secret, |
---|
127 | cancel_secret, |
---|
128 | set(range(5)), |
---|
129 | 1024, |
---|
130 | Referenceable(), |
---|
131 | ) |
---|
132 | self.assertEqual(already_got, already_got2) |
---|
133 | self.assertEqual(set(allocated2.keys()), {4}) |
---|
134 | |
---|
135 | @inlineCallbacks |
---|
136 | def abort_or_disconnect_half_way(self, abort_or_disconnect): |
---|
137 | """ |
---|
138 | If we disconnect/abort in the middle of writing to a bucket, all data |
---|
139 | is wiped, and it's even possible to write different data to the bucket. |
---|
140 | |
---|
141 | (In the real world one shouldn't do that, but writing different data is |
---|
142 | a good way to test that the original data really was wiped.) |
---|
143 | |
---|
144 | ``abort_or_disconnect`` is a callback that takes a bucket and aborts up |
---|
145 | load, or perhaps disconnects the whole connection. |
---|
146 | """ |
---|
147 | storage_index, renew_secret, cancel_secret = ( |
---|
148 | new_storage_index(), |
---|
149 | new_secret(), |
---|
150 | new_secret(), |
---|
151 | ) |
---|
152 | (_, allocated) = yield self.storage_client.allocate_buckets( |
---|
153 | storage_index, |
---|
154 | renew_secret, |
---|
155 | cancel_secret, |
---|
156 | sharenums={0}, |
---|
157 | allocated_size=1024, |
---|
158 | canary=Referenceable(), |
---|
159 | ) |
---|
160 | |
---|
161 | # Bucket 1 get some data written (but not all, or HTTP implicitly |
---|
162 | # finishes the upload) |
---|
163 | yield allocated[0].callRemote("write", 0, b"1" * 1023) |
---|
164 | |
---|
165 | # Disconnect or abort, depending on the test: |
---|
166 | yield abort_or_disconnect(allocated[0]) |
---|
167 | |
---|
168 | # Write different data with no complaint: |
---|
169 | (_, allocated) = yield self.storage_client.allocate_buckets( |
---|
170 | storage_index, |
---|
171 | renew_secret, |
---|
172 | cancel_secret, |
---|
173 | sharenums={0}, |
---|
174 | allocated_size=1024, |
---|
175 | canary=Referenceable(), |
---|
176 | ) |
---|
177 | yield allocated[0].callRemote("write", 0, b"2" * 1024) |
---|
178 | |
---|
179 | @inlineCallbacks |
---|
180 | def test_written_shares_are_allocated(self): |
---|
181 | """ |
---|
182 | Shares that are fully written to show up as allocated in result from |
---|
183 | ``IStorageServer.allocate_buckets()``. Partially-written or empty |
---|
184 | shares don't. |
---|
185 | """ |
---|
186 | storage_index, renew_secret, cancel_secret = ( |
---|
187 | new_storage_index(), |
---|
188 | new_secret(), |
---|
189 | new_secret(), |
---|
190 | ) |
---|
191 | (_, allocated) = yield self.storage_client.allocate_buckets( |
---|
192 | storage_index, |
---|
193 | renew_secret, |
---|
194 | cancel_secret, |
---|
195 | sharenums=set(range(5)), |
---|
196 | allocated_size=1024, |
---|
197 | canary=Referenceable(), |
---|
198 | ) |
---|
199 | |
---|
200 | # Bucket 1 is fully written in one go. |
---|
201 | yield allocated[1].callRemote("write", 0, b"1" * 1024) |
---|
202 | yield allocated[1].callRemote("close") |
---|
203 | |
---|
204 | # Bucket 2 is fully written in two steps. |
---|
205 | yield allocated[2].callRemote("write", 0, b"1" * 512) |
---|
206 | yield allocated[2].callRemote("write", 512, b"2" * 512) |
---|
207 | yield allocated[2].callRemote("close") |
---|
208 | |
---|
209 | # Bucket 0 has partial write. |
---|
210 | yield allocated[0].callRemote("write", 0, b"1" * 512) |
---|
211 | |
---|
212 | (already_got, _) = yield self.storage_client.allocate_buckets( |
---|
213 | storage_index, |
---|
214 | renew_secret, |
---|
215 | cancel_secret, |
---|
216 | sharenums=set(range(5)), |
---|
217 | allocated_size=1024, |
---|
218 | canary=Referenceable(), |
---|
219 | ) |
---|
220 | self.assertEqual(already_got, {1, 2}) |
---|
221 | |
---|
222 | @inlineCallbacks |
---|
223 | def test_written_shares_are_readable(self): |
---|
224 | """ |
---|
225 | Shares that are fully written to can be read. |
---|
226 | |
---|
227 | The result is not affected by the order in which writes |
---|
228 | happened, only by their offsets. |
---|
229 | """ |
---|
230 | storage_index, renew_secret, cancel_secret = ( |
---|
231 | new_storage_index(), |
---|
232 | new_secret(), |
---|
233 | new_secret(), |
---|
234 | ) |
---|
235 | (_, allocated) = yield self.storage_client.allocate_buckets( |
---|
236 | storage_index, |
---|
237 | renew_secret, |
---|
238 | cancel_secret, |
---|
239 | sharenums=set(range(5)), |
---|
240 | allocated_size=1024, |
---|
241 | canary=Referenceable(), |
---|
242 | ) |
---|
243 | |
---|
244 | # Bucket 1 is fully written in order |
---|
245 | yield allocated[1].callRemote("write", 0, b"1" * 512) |
---|
246 | yield allocated[1].callRemote("write", 512, b"2" * 512) |
---|
247 | yield allocated[1].callRemote("close") |
---|
248 | |
---|
249 | # Bucket 2 is fully written in reverse. |
---|
250 | yield allocated[2].callRemote("write", 512, b"4" * 512) |
---|
251 | yield allocated[2].callRemote("write", 0, b"3" * 512) |
---|
252 | yield allocated[2].callRemote("close") |
---|
253 | |
---|
254 | buckets = yield self.storage_client.get_buckets(storage_index) |
---|
255 | self.assertEqual(set(buckets.keys()), {1, 2}) |
---|
256 | |
---|
257 | self.assertEqual( |
---|
258 | (yield buckets[1].callRemote("read", 0, 1024)), b"1" * 512 + b"2" * 512 |
---|
259 | ) |
---|
260 | self.assertEqual( |
---|
261 | (yield buckets[2].callRemote("read", 0, 1024)), b"3" * 512 + b"4" * 512 |
---|
262 | ) |
---|
263 | |
---|
264 | @inlineCallbacks |
---|
265 | def test_non_matching_overlapping_writes(self): |
---|
266 | """ |
---|
267 | When doing overlapping writes in immutable uploads, non-matching writes |
---|
268 | fail. |
---|
269 | """ |
---|
270 | storage_index, renew_secret, cancel_secret = ( |
---|
271 | new_storage_index(), |
---|
272 | new_secret(), |
---|
273 | new_secret(), |
---|
274 | ) |
---|
275 | (_, allocated) = yield self.storage_client.allocate_buckets( |
---|
276 | storage_index, |
---|
277 | renew_secret, |
---|
278 | cancel_secret, |
---|
279 | sharenums={0}, |
---|
280 | allocated_size=30, |
---|
281 | canary=Referenceable(), |
---|
282 | ) |
---|
283 | |
---|
284 | yield allocated[0].callRemote("write", 0, b"1" * 25) |
---|
285 | # Overlapping write that doesn't match: |
---|
286 | with self.assertRaises(RemoteException): |
---|
287 | yield allocated[0].callRemote("write", 20, b"2" * 10) |
---|
288 | |
---|
289 | @inlineCallbacks |
---|
290 | def test_matching_overlapping_writes(self): |
---|
291 | """ |
---|
292 | When doing overlapping writes in immutable uploads, matching writes |
---|
293 | succeed. |
---|
294 | """ |
---|
295 | storage_index, renew_secret, cancel_secret = ( |
---|
296 | new_storage_index(), |
---|
297 | new_secret(), |
---|
298 | new_secret(), |
---|
299 | ) |
---|
300 | (_, allocated) = yield self.storage_client.allocate_buckets( |
---|
301 | storage_index, |
---|
302 | renew_secret, |
---|
303 | cancel_secret, |
---|
304 | sharenums={0}, |
---|
305 | allocated_size=25, |
---|
306 | canary=Referenceable(), |
---|
307 | ) |
---|
308 | |
---|
309 | yield allocated[0].callRemote("write", 0, b"1" * 10) |
---|
310 | # Overlapping write that matches: |
---|
311 | yield allocated[0].callRemote("write", 5, b"1" * 20) |
---|
312 | yield allocated[0].callRemote("close") |
---|
313 | |
---|
314 | buckets = yield self.storage_client.get_buckets(storage_index) |
---|
315 | self.assertEqual(set(buckets.keys()), {0}) |
---|
316 | |
---|
317 | self.assertEqual((yield buckets[0].callRemote("read", 0, 25)), b"1" * 25) |
---|
318 | |
---|
319 | def test_abort(self): |
---|
320 | """ |
---|
321 | If we call ``abort`` on the ``RIBucketWriter`` to disconnect in the |
---|
322 | middle of writing to a bucket, all data is wiped, and it's even |
---|
323 | possible to write different data to the bucket. |
---|
324 | |
---|
325 | (In the real world one probably wouldn't do that, but writing different |
---|
326 | data is a good way to test that the original data really was wiped.) |
---|
327 | """ |
---|
328 | return self.abort_or_disconnect_half_way( |
---|
329 | lambda bucket: bucket.callRemote("abort") |
---|
330 | ) |
---|
331 | |
---|
332 | @inlineCallbacks |
---|
333 | def test_get_buckets_skips_unfinished_buckets(self): |
---|
334 | """ |
---|
335 | Buckets that are not fully written are not returned by |
---|
336 | ``IStorageServer.get_buckets()`` implementations. |
---|
337 | """ |
---|
338 | storage_index = new_storage_index() |
---|
339 | (_, allocated) = yield self.storage_client.allocate_buckets( |
---|
340 | storage_index, |
---|
341 | renew_secret=new_secret(), |
---|
342 | cancel_secret=new_secret(), |
---|
343 | sharenums=set(range(5)), |
---|
344 | allocated_size=10, |
---|
345 | canary=Referenceable(), |
---|
346 | ) |
---|
347 | |
---|
348 | # Bucket 1 is fully written |
---|
349 | yield allocated[1].callRemote("write", 0, b"1" * 10) |
---|
350 | yield allocated[1].callRemote("close") |
---|
351 | |
---|
352 | # Bucket 2 is partially written |
---|
353 | yield allocated[2].callRemote("write", 0, b"1" * 5) |
---|
354 | |
---|
355 | buckets = yield self.storage_client.get_buckets(storage_index) |
---|
356 | self.assertEqual(set(buckets.keys()), {1}) |
---|
357 | |
---|
358 | @inlineCallbacks |
---|
359 | def test_read_bucket_at_offset(self): |
---|
360 | """ |
---|
361 | Given a read bucket returned from ``IStorageServer.get_buckets()``, it |
---|
362 | is possible to read at different offsets and lengths, with reads past |
---|
363 | the end resulting in empty bytes. |
---|
364 | """ |
---|
365 | length = 256 * 17 |
---|
366 | |
---|
367 | storage_index = new_storage_index() |
---|
368 | (_, allocated) = yield self.storage_client.allocate_buckets( |
---|
369 | storage_index, |
---|
370 | renew_secret=new_secret(), |
---|
371 | cancel_secret=new_secret(), |
---|
372 | sharenums=set(range(1)), |
---|
373 | allocated_size=length, |
---|
374 | canary=Referenceable(), |
---|
375 | ) |
---|
376 | |
---|
377 | total_data = _randbytes(256 * 17) |
---|
378 | yield allocated[0].callRemote("write", 0, total_data) |
---|
379 | yield allocated[0].callRemote("close") |
---|
380 | |
---|
381 | buckets = yield self.storage_client.get_buckets(storage_index) |
---|
382 | bucket = buckets[0] |
---|
383 | for start, to_read in [ |
---|
384 | (0, 250), # fraction |
---|
385 | (0, length), # whole thing |
---|
386 | (100, 1024), # offset fraction |
---|
387 | (length + 1, 100), # completely out of bounds |
---|
388 | (length - 100, 200), # partially out of bounds |
---|
389 | ]: |
---|
390 | data = yield bucket.callRemote("read", start, to_read) |
---|
391 | self.assertEqual( |
---|
392 | data, |
---|
393 | total_data[start : start + to_read], |
---|
394 | "Didn't match for start {}, length {}".format(start, to_read), |
---|
395 | ) |
---|
396 | |
---|
397 | @inlineCallbacks |
---|
398 | def create_share(self): |
---|
399 | """Create a share, return the storage index.""" |
---|
400 | storage_index = new_storage_index() |
---|
401 | renew_secret = new_secret() |
---|
402 | cancel_secret = new_secret() |
---|
403 | (_, allocated) = yield self.storage_client.allocate_buckets( |
---|
404 | storage_index, |
---|
405 | renew_secret=renew_secret, |
---|
406 | cancel_secret=cancel_secret, |
---|
407 | sharenums=set(range(1)), |
---|
408 | allocated_size=10, |
---|
409 | canary=Referenceable(), |
---|
410 | ) |
---|
411 | |
---|
412 | yield allocated[0].callRemote("write", 0, b"0123456789") |
---|
413 | yield allocated[0].callRemote("close") |
---|
414 | returnValue((storage_index, renew_secret, cancel_secret)) |
---|
415 | |
---|
416 | @inlineCallbacks |
---|
417 | def test_bucket_advise_corrupt_share(self): |
---|
418 | """ |
---|
419 | Calling ``advise_corrupt_share()`` on a bucket returned by |
---|
420 | ``IStorageServer.get_buckets()`` does not result in error (other |
---|
421 | behavior is opaque at this level of abstraction). |
---|
422 | """ |
---|
423 | storage_index, _, _ = yield self.create_share() |
---|
424 | buckets = yield self.storage_client.get_buckets(storage_index) |
---|
425 | yield buckets[0].callRemote("advise_corrupt_share", b"OH NO") |
---|
426 | |
---|
427 | @inlineCallbacks |
---|
428 | def test_advise_corrupt_share(self): |
---|
429 | """ |
---|
430 | Calling ``advise_corrupt_share()`` on an immutable share does not |
---|
431 | result in error (other behavior is opaque at this level of |
---|
432 | abstraction). |
---|
433 | """ |
---|
434 | storage_index, _, _ = yield self.create_share() |
---|
435 | yield self.storage_client.advise_corrupt_share( |
---|
436 | b"immutable", storage_index, 0, b"ono" |
---|
437 | ) |
---|
438 | |
---|
439 | @inlineCallbacks |
---|
440 | def test_advise_corrupt_share_unknown_share_number(self): |
---|
441 | """ |
---|
442 | Calling ``advise_corrupt_share()`` on an immutable share, with an |
---|
443 | unknown share number, does not result in error. |
---|
444 | """ |
---|
445 | storage_index, _, _ = yield self.create_share() |
---|
446 | yield self.storage_client.advise_corrupt_share( |
---|
447 | b"immutable", storage_index, 999, b"ono" |
---|
448 | ) |
---|
449 | |
---|
450 | @inlineCallbacks |
---|
451 | def test_allocate_buckets_creates_lease(self): |
---|
452 | """ |
---|
453 | When buckets are created using ``allocate_buckets()``, a lease is |
---|
454 | created once writing is done. |
---|
455 | """ |
---|
456 | storage_index, _, _ = yield self.create_share() |
---|
457 | [lease] = self.server.get_leases(storage_index) |
---|
458 | # Lease expires in 31 days. |
---|
459 | self.assertTrue( |
---|
460 | lease.get_expiration_time() - self.fake_time() > (31 * 24 * 60 * 60 - 10) |
---|
461 | ) |
---|
462 | |
---|
463 | @inlineCallbacks |
---|
464 | def test_add_lease_non_existent(self): |
---|
465 | """ |
---|
466 | If the storage index doesn't exist, adding the lease silently does nothing. |
---|
467 | """ |
---|
468 | storage_index = new_storage_index() |
---|
469 | self.assertEqual(list(self.server.get_leases(storage_index)), []) |
---|
470 | |
---|
471 | renew_secret = new_secret() |
---|
472 | cancel_secret = new_secret() |
---|
473 | |
---|
474 | # Add a lease: |
---|
475 | yield self.storage_client.add_lease(storage_index, renew_secret, cancel_secret) |
---|
476 | self.assertEqual(list(self.server.get_leases(storage_index)), []) |
---|
477 | |
---|
478 | @inlineCallbacks |
---|
479 | def test_add_lease_renewal(self): |
---|
480 | """ |
---|
481 | If the lease secret is reused, ``add_lease()`` extends the existing |
---|
482 | lease. |
---|
483 | """ |
---|
484 | storage_index, renew_secret, cancel_secret = yield self.create_share() |
---|
485 | [lease] = self.server.get_leases(storage_index) |
---|
486 | initial_expiration_time = lease.get_expiration_time() |
---|
487 | |
---|
488 | # Time passes: |
---|
489 | self.fake_sleep(178) |
---|
490 | |
---|
491 | # We renew the lease: |
---|
492 | yield self.storage_client.add_lease(storage_index, renew_secret, cancel_secret) |
---|
493 | [lease] = self.server.get_leases(storage_index) |
---|
494 | new_expiration_time = lease.get_expiration_time() |
---|
495 | self.assertEqual(new_expiration_time - initial_expiration_time, 178) |
---|
496 | |
---|
497 | @inlineCallbacks |
---|
498 | def test_add_new_lease(self): |
---|
499 | """ |
---|
500 | If a new lease secret is used, ``add_lease()`` creates a new lease. |
---|
501 | """ |
---|
502 | storage_index, _, _ = yield self.create_share() |
---|
503 | [lease] = self.server.get_leases(storage_index) |
---|
504 | initial_expiration_time = lease.get_expiration_time() |
---|
505 | |
---|
506 | # Time passes: |
---|
507 | self.fake_sleep(167) |
---|
508 | |
---|
509 | # We create a new lease: |
---|
510 | renew_secret = new_secret() |
---|
511 | cancel_secret = new_secret() |
---|
512 | yield self.storage_client.add_lease(storage_index, renew_secret, cancel_secret) |
---|
513 | [lease1, lease2] = self.server.get_leases(storage_index) |
---|
514 | self.assertEqual(lease1.get_expiration_time(), initial_expiration_time) |
---|
515 | self.assertEqual(lease2.get_expiration_time() - initial_expiration_time, 167) |
---|
516 | |
---|
517 | |
---|
518 | class IStorageServerMutableAPIsTestsMixin(object): |
---|
519 | """ |
---|
520 | Tests for ``IStorageServer``'s mutable APIs. |
---|
521 | |
---|
522 | ``self.storage_client`` is expected to provide ``IStorageServer``. |
---|
523 | |
---|
524 | ``self.server`` is expected to be the corresponding |
---|
525 | ``allmydata.storage.server.StorageServer`` instance. |
---|
526 | |
---|
527 | ``STARAW`` is short for ``slot_testv_and_readv_and_writev``. |
---|
528 | """ |
---|
529 | |
---|
530 | def new_secrets(self): |
---|
531 | """Return a 3-tuple of secrets for STARAW calls.""" |
---|
532 | return (new_secret(), new_secret(), new_secret()) |
---|
533 | |
---|
534 | def staraw(self, *args, **kwargs): |
---|
535 | """Like ``slot_testv_and_readv_and_writev``, but less typing.""" |
---|
536 | return self.storage_client.slot_testv_and_readv_and_writev(*args, **kwargs) |
---|
537 | |
---|
538 | @inlineCallbacks |
---|
539 | def test_STARAW_reads_after_write(self): |
---|
540 | """ |
---|
541 | When data is written with |
---|
542 | ``IStorageServer.slot_testv_and_readv_and_writev``, it can then be read |
---|
543 | by a separate call using that API. |
---|
544 | """ |
---|
545 | secrets = self.new_secrets() |
---|
546 | storage_index = new_storage_index() |
---|
547 | (written, _) = yield self.staraw( |
---|
548 | storage_index, |
---|
549 | secrets, |
---|
550 | tw_vectors={ |
---|
551 | 0: ([], [(0, b"abcdefg")], 7), |
---|
552 | 1: ([], [(0, b"0123"), (4, b"456")], 7), |
---|
553 | }, |
---|
554 | r_vector=[], |
---|
555 | ) |
---|
556 | self.assertEqual(written, True) |
---|
557 | |
---|
558 | (_, reads) = yield self.staraw( |
---|
559 | storage_index, |
---|
560 | secrets, |
---|
561 | tw_vectors={}, |
---|
562 | # Whole thing, partial, going beyond the edge, completely outside |
---|
563 | # range: |
---|
564 | r_vector=[(0, 7), (2, 3), (6, 8), (100, 10)], |
---|
565 | ) |
---|
566 | self.assertEqual( |
---|
567 | reads, |
---|
568 | {0: [b"abcdefg", b"cde", b"g", b""], 1: [b"0123456", b"234", b"6", b""]}, |
---|
569 | ) |
---|
570 | |
---|
571 | @inlineCallbacks |
---|
572 | def test_SATRAW_reads_happen_before_writes_in_single_query(self): |
---|
573 | """ |
---|
574 | If a ``IStorageServer.slot_testv_and_readv_and_writev`` command |
---|
575 | contains both reads and writes, the read returns results that precede |
---|
576 | the write. |
---|
577 | """ |
---|
578 | secrets = self.new_secrets() |
---|
579 | storage_index = new_storage_index() |
---|
580 | (written, _) = yield self.staraw( |
---|
581 | storage_index, |
---|
582 | secrets, |
---|
583 | tw_vectors={ |
---|
584 | 0: ([], [(0, b"abcdefg")], 7), |
---|
585 | }, |
---|
586 | r_vector=[], |
---|
587 | ) |
---|
588 | self.assertEqual(written, True) |
---|
589 | |
---|
590 | # Read and write in same command; read happens before write: |
---|
591 | (written, reads) = yield self.staraw( |
---|
592 | storage_index, |
---|
593 | secrets, |
---|
594 | tw_vectors={ |
---|
595 | 0: ([], [(0, b"X" * 7)], 7), |
---|
596 | }, |
---|
597 | r_vector=[(0, 7)], |
---|
598 | ) |
---|
599 | self.assertEqual(written, True) |
---|
600 | self.assertEqual(reads, {0: [b"abcdefg"]}) |
---|
601 | |
---|
602 | # The write is available in next read: |
---|
603 | (_, reads) = yield self.staraw( |
---|
604 | storage_index, |
---|
605 | secrets, |
---|
606 | tw_vectors={}, |
---|
607 | r_vector=[(0, 7)], |
---|
608 | ) |
---|
609 | self.assertEqual(reads, {0: [b"X" * 7]}) |
---|
610 | |
---|
611 | @inlineCallbacks |
---|
612 | def test_SATRAW_writes_happens_only_if_test_matches(self): |
---|
613 | """ |
---|
614 | If a ``IStorageServer.slot_testv_and_readv_and_writev`` includes both a |
---|
615 | test and a write, the write succeeds if the test matches, and fails if |
---|
616 | the test does not match. |
---|
617 | """ |
---|
618 | secrets = self.new_secrets() |
---|
619 | storage_index = new_storage_index() |
---|
620 | (written, _) = yield self.staraw( |
---|
621 | storage_index, |
---|
622 | secrets, |
---|
623 | tw_vectors={ |
---|
624 | 0: ([], [(0, b"1" * 7)], 7), |
---|
625 | }, |
---|
626 | r_vector=[], |
---|
627 | ) |
---|
628 | self.assertEqual(written, True) |
---|
629 | |
---|
630 | # Test matches, so write happens: |
---|
631 | (written, _) = yield self.staraw( |
---|
632 | storage_index, |
---|
633 | secrets, |
---|
634 | tw_vectors={ |
---|
635 | 0: ( |
---|
636 | [(0, 3, b"1" * 3), (3, 4, b"1" * 4)], |
---|
637 | [(0, b"2" * 7)], |
---|
638 | 7, |
---|
639 | ), |
---|
640 | }, |
---|
641 | r_vector=[], |
---|
642 | ) |
---|
643 | self.assertEqual(written, True) |
---|
644 | (_, reads) = yield self.staraw( |
---|
645 | storage_index, |
---|
646 | secrets, |
---|
647 | tw_vectors={}, |
---|
648 | r_vector=[(0, 7)], |
---|
649 | ) |
---|
650 | self.assertEqual(reads, {0: [b"2" * 7]}) |
---|
651 | |
---|
652 | # Test does not match, so write does not happen: |
---|
653 | (written, _) = yield self.staraw( |
---|
654 | storage_index, |
---|
655 | secrets, |
---|
656 | tw_vectors={ |
---|
657 | 0: ([(0, 7, b"1" * 7)], [(0, b"3" * 7)], 7), |
---|
658 | }, |
---|
659 | r_vector=[], |
---|
660 | ) |
---|
661 | self.assertEqual(written, False) |
---|
662 | (_, reads) = yield self.staraw( |
---|
663 | storage_index, |
---|
664 | secrets, |
---|
665 | tw_vectors={}, |
---|
666 | r_vector=[(0, 7)], |
---|
667 | ) |
---|
668 | self.assertEqual(reads, {0: [b"2" * 7]}) |
---|
669 | |
---|
670 | @inlineCallbacks |
---|
671 | def test_SATRAW_tests_past_end_of_data(self): |
---|
672 | """ |
---|
673 | If a ``IStorageServer.slot_testv_and_readv_and_writev`` includes a test |
---|
674 | vector that reads past the end of the data, the result is limited to |
---|
675 | actual available data. |
---|
676 | """ |
---|
677 | secrets = self.new_secrets() |
---|
678 | storage_index = new_storage_index() |
---|
679 | |
---|
680 | # Since there is no data on server, the test vector will return empty |
---|
681 | # string, which matches expected result, so write will succeed. |
---|
682 | (written, _) = yield self.staraw( |
---|
683 | storage_index, |
---|
684 | secrets, |
---|
685 | tw_vectors={ |
---|
686 | 0: ([(0, 10, b"")], [(0, b"1" * 7)], 7), |
---|
687 | }, |
---|
688 | r_vector=[], |
---|
689 | ) |
---|
690 | self.assertEqual(written, True) |
---|
691 | |
---|
692 | # Now the test vector is a 10-read off of a 7-byte value, but expected |
---|
693 | # value is still 7 bytes, so the write will again succeed. |
---|
694 | (written, _) = yield self.staraw( |
---|
695 | storage_index, |
---|
696 | secrets, |
---|
697 | tw_vectors={ |
---|
698 | 0: ([(0, 10, b"1" * 7)], [(0, b"2" * 7)], 7), |
---|
699 | }, |
---|
700 | r_vector=[], |
---|
701 | ) |
---|
702 | self.assertEqual(written, True) |
---|
703 | |
---|
704 | @inlineCallbacks |
---|
705 | def test_SATRAW_reads_past_end_of_data(self): |
---|
706 | """ |
---|
707 | If a ``IStorageServer.slot_testv_and_readv_and_writev`` reads past the |
---|
708 | end of the data, the result is limited to actual available data. |
---|
709 | """ |
---|
710 | secrets = self.new_secrets() |
---|
711 | storage_index = new_storage_index() |
---|
712 | |
---|
713 | # Write some data |
---|
714 | (written, _) = yield self.staraw( |
---|
715 | storage_index, |
---|
716 | secrets, |
---|
717 | tw_vectors={ |
---|
718 | 0: ([], [(0, b"12345")], 5), |
---|
719 | }, |
---|
720 | r_vector=[], |
---|
721 | ) |
---|
722 | self.assertEqual(written, True) |
---|
723 | |
---|
724 | # Reads past end. |
---|
725 | (_, reads) = yield self.staraw( |
---|
726 | storage_index, |
---|
727 | secrets, |
---|
728 | tw_vectors={}, |
---|
729 | r_vector=[(0, 100), (2, 50)], |
---|
730 | ) |
---|
731 | self.assertEqual(reads, {0: [b"12345", b"345"]}) |
---|
732 | |
---|
733 | @inlineCallbacks |
---|
734 | def test_STARAW_write_enabler_must_match(self): |
---|
735 | """ |
---|
736 | If the write enabler secret passed to |
---|
737 | ``IStorageServer.slot_testv_and_readv_and_writev`` doesn't match |
---|
738 | previous writes, the write fails. |
---|
739 | """ |
---|
740 | secrets = self.new_secrets() |
---|
741 | storage_index = new_storage_index() |
---|
742 | (written, _) = yield self.staraw( |
---|
743 | storage_index, |
---|
744 | secrets, |
---|
745 | tw_vectors={ |
---|
746 | 0: ([], [(0, b"1" * 7)], 7), |
---|
747 | }, |
---|
748 | r_vector=[], |
---|
749 | ) |
---|
750 | self.assertEqual(written, True) |
---|
751 | |
---|
752 | # Write enabler secret does not match, so write does not happen: |
---|
753 | bad_secrets = (new_secret(),) + secrets[1:] |
---|
754 | with self.assertRaises(RemoteException): |
---|
755 | yield self.staraw( |
---|
756 | storage_index, |
---|
757 | bad_secrets, |
---|
758 | tw_vectors={ |
---|
759 | 0: ([], [(0, b"2" * 7)], 7), |
---|
760 | }, |
---|
761 | r_vector=[], |
---|
762 | ) |
---|
763 | (_, reads) = yield self.staraw( |
---|
764 | storage_index, |
---|
765 | secrets, |
---|
766 | tw_vectors={}, |
---|
767 | r_vector=[(0, 7)], |
---|
768 | ) |
---|
769 | self.assertEqual(reads, {0: [b"1" * 7]}) |
---|
770 | |
---|
771 | @inlineCallbacks |
---|
772 | def test_STARAW_zero_new_length_deletes(self): |
---|
773 | """ |
---|
774 | A zero new length passed to |
---|
775 | ``IStorageServer.slot_testv_and_readv_and_writev`` deletes the share. |
---|
776 | """ |
---|
777 | secrets = self.new_secrets() |
---|
778 | storage_index = new_storage_index() |
---|
779 | (written, _) = yield self.staraw( |
---|
780 | storage_index, |
---|
781 | secrets, |
---|
782 | tw_vectors={ |
---|
783 | 0: ([], [(0, b"1" * 7)], 7), |
---|
784 | }, |
---|
785 | r_vector=[], |
---|
786 | ) |
---|
787 | self.assertEqual(written, True) |
---|
788 | |
---|
789 | # Write with new length of 0: |
---|
790 | (written, _) = yield self.staraw( |
---|
791 | storage_index, |
---|
792 | secrets, |
---|
793 | tw_vectors={ |
---|
794 | 0: ([], [(0, b"1" * 7)], 0), |
---|
795 | }, |
---|
796 | r_vector=[], |
---|
797 | ) |
---|
798 | self.assertEqual(written, True) |
---|
799 | |
---|
800 | # It's gone! |
---|
801 | (_, reads) = yield self.staraw( |
---|
802 | storage_index, |
---|
803 | secrets, |
---|
804 | tw_vectors={}, |
---|
805 | r_vector=[(0, 7)], |
---|
806 | ) |
---|
807 | self.assertEqual(reads, {}) |
---|
808 | |
---|
809 | @inlineCallbacks |
---|
810 | def test_slot_readv(self): |
---|
811 | """ |
---|
812 | Data written with ``IStorageServer.slot_testv_and_readv_and_writev()`` |
---|
813 | can be read using ``IStorageServer.slot_readv()``. Reads can't go past |
---|
814 | the end of the data. |
---|
815 | """ |
---|
816 | secrets = self.new_secrets() |
---|
817 | storage_index = new_storage_index() |
---|
818 | (written, _) = yield self.staraw( |
---|
819 | storage_index, |
---|
820 | secrets, |
---|
821 | tw_vectors={ |
---|
822 | 0: ([], [(0, b"abcdefg")], 7), |
---|
823 | 1: ([], [(0, b"0123"), (4, b"456")], 7), |
---|
824 | # This will never get read from, just here to show we only read |
---|
825 | # from shares explicitly requested by slot_readv: |
---|
826 | 2: ([], [(0, b"XYZW")], 4), |
---|
827 | }, |
---|
828 | r_vector=[], |
---|
829 | ) |
---|
830 | self.assertEqual(written, True) |
---|
831 | |
---|
832 | reads = yield self.storage_client.slot_readv( |
---|
833 | storage_index, |
---|
834 | shares=[0, 1], |
---|
835 | # Whole thing, partial, going beyond the edge, completely outside |
---|
836 | # range: |
---|
837 | readv=[(0, 7), (2, 3), (6, 8), (100, 10)], |
---|
838 | ) |
---|
839 | self.assertEqual( |
---|
840 | reads, |
---|
841 | {0: [b"abcdefg", b"cde", b"g", b""], 1: [b"0123456", b"234", b"6", b""]}, |
---|
842 | ) |
---|
843 | |
---|
844 | @inlineCallbacks |
---|
845 | def test_slot_readv_no_shares(self): |
---|
846 | """ |
---|
847 | With no shares given, ``IStorageServer.slot_readv()`` reads from all shares. |
---|
848 | """ |
---|
849 | secrets = self.new_secrets() |
---|
850 | storage_index = new_storage_index() |
---|
851 | (written, _) = yield self.staraw( |
---|
852 | storage_index, |
---|
853 | secrets, |
---|
854 | tw_vectors={ |
---|
855 | 0: ([], [(0, b"abcdefg")], 7), |
---|
856 | 1: ([], [(0, b"0123456")], 7), |
---|
857 | 2: ([], [(0, b"9876543")], 7), |
---|
858 | }, |
---|
859 | r_vector=[], |
---|
860 | ) |
---|
861 | self.assertEqual(written, True) |
---|
862 | |
---|
863 | reads = yield self.storage_client.slot_readv( |
---|
864 | storage_index, |
---|
865 | shares=[], |
---|
866 | readv=[(0, 7)], |
---|
867 | ) |
---|
868 | self.assertEqual( |
---|
869 | reads, |
---|
870 | {0: [b"abcdefg"], 1: [b"0123456"], 2: [b"9876543"]}, |
---|
871 | ) |
---|
872 | |
---|
873 | @inlineCallbacks |
---|
874 | def test_slot_readv_unknown_storage_index(self): |
---|
875 | """ |
---|
876 | With unknown storage index, ``IStorageServer.slot_readv()`` returns |
---|
877 | empty dict. |
---|
878 | """ |
---|
879 | storage_index = new_storage_index() |
---|
880 | reads = yield self.storage_client.slot_readv( |
---|
881 | storage_index, |
---|
882 | shares=[], |
---|
883 | readv=[(0, 7)], |
---|
884 | ) |
---|
885 | self.assertEqual( |
---|
886 | reads, |
---|
887 | {}, |
---|
888 | ) |
---|
889 | |
---|
890 | @inlineCallbacks |
---|
891 | def create_slot(self): |
---|
892 | """Create a slot with sharenum 0.""" |
---|
893 | secrets = self.new_secrets() |
---|
894 | storage_index = new_storage_index() |
---|
895 | (written, _) = yield self.staraw( |
---|
896 | storage_index, |
---|
897 | secrets, |
---|
898 | tw_vectors={ |
---|
899 | 0: ([], [(0, b"abcdefg")], 7), |
---|
900 | }, |
---|
901 | r_vector=[], |
---|
902 | ) |
---|
903 | self.assertEqual(written, True) |
---|
904 | returnValue((secrets, storage_index)) |
---|
905 | |
---|
906 | @inlineCallbacks |
---|
907 | def test_advise_corrupt_share(self): |
---|
908 | """ |
---|
909 | Calling ``advise_corrupt_share()`` on a mutable share does not |
---|
910 | result in error (other behavior is opaque at this level of |
---|
911 | abstraction). |
---|
912 | """ |
---|
913 | secrets, storage_index = yield self.create_slot() |
---|
914 | |
---|
915 | yield self.storage_client.advise_corrupt_share( |
---|
916 | b"mutable", storage_index, 0, b"ono" |
---|
917 | ) |
---|
918 | |
---|
919 | @inlineCallbacks |
---|
920 | def test_advise_corrupt_share_unknown_share_number(self): |
---|
921 | """ |
---|
922 | Calling ``advise_corrupt_share()`` on a mutable share with an unknown |
---|
923 | share number does not result in error (other behavior is opaque at this |
---|
924 | level of abstraction). |
---|
925 | """ |
---|
926 | secrets, storage_index = yield self.create_slot() |
---|
927 | |
---|
928 | yield self.storage_client.advise_corrupt_share( |
---|
929 | b"mutable", storage_index, 999, b"ono" |
---|
930 | ) |
---|
931 | |
---|
932 | @inlineCallbacks |
---|
933 | def test_STARAW_create_lease(self): |
---|
934 | """ |
---|
935 | When STARAW creates a new slot, it also creates a lease. |
---|
936 | """ |
---|
937 | _, storage_index = yield self.create_slot() |
---|
938 | [lease] = self.server.get_slot_leases(storage_index) |
---|
939 | # Lease expires in 31 days. |
---|
940 | self.assertTrue( |
---|
941 | lease.get_expiration_time() - self.fake_time() > (31 * 24 * 60 * 60 - 10) |
---|
942 | ) |
---|
943 | |
---|
944 | @inlineCallbacks |
---|
945 | def test_STARAW_renews_lease(self): |
---|
946 | """ |
---|
947 | When STARAW is run on an existing slot with same renewal secret, it |
---|
948 | renews the lease. |
---|
949 | """ |
---|
950 | secrets, storage_index = yield self.create_slot() |
---|
951 | [lease] = self.server.get_slot_leases(storage_index) |
---|
952 | initial_expire = lease.get_expiration_time() |
---|
953 | |
---|
954 | # Time passes... |
---|
955 | self.fake_sleep(17) |
---|
956 | |
---|
957 | # We do another write: |
---|
958 | (written, _) = yield self.staraw( |
---|
959 | storage_index, |
---|
960 | secrets, |
---|
961 | tw_vectors={ |
---|
962 | 0: ([], [(0, b"1234567")], 7), |
---|
963 | }, |
---|
964 | r_vector=[], |
---|
965 | ) |
---|
966 | self.assertEqual(written, True) |
---|
967 | |
---|
968 | # The lease has been renewed: |
---|
969 | [lease] = self.server.get_slot_leases(storage_index) |
---|
970 | self.assertEqual(lease.get_expiration_time() - initial_expire, 17) |
---|
971 | |
---|
972 | @inlineCallbacks |
---|
973 | def test_STARAW_new_lease(self): |
---|
974 | """ |
---|
975 | When STARAW is run with a new renewal secret on an existing slot, it |
---|
976 | adds a new lease. |
---|
977 | """ |
---|
978 | secrets, storage_index = yield self.create_slot() |
---|
979 | [lease] = self.server.get_slot_leases(storage_index) |
---|
980 | initial_expire = lease.get_expiration_time() |
---|
981 | |
---|
982 | # Time passes... |
---|
983 | self.fake_sleep(19) |
---|
984 | |
---|
985 | # We do another write: |
---|
986 | (written, _) = yield self.staraw( |
---|
987 | storage_index, |
---|
988 | (secrets[0], new_secret(), new_secret()), |
---|
989 | tw_vectors={ |
---|
990 | 0: ([], [(0, b"1234567")], 7), |
---|
991 | }, |
---|
992 | r_vector=[], |
---|
993 | ) |
---|
994 | self.assertEqual(written, True) |
---|
995 | |
---|
996 | # A new lease was added: |
---|
997 | [lease1, lease2] = self.server.get_slot_leases(storage_index) |
---|
998 | self.assertEqual(lease1.get_expiration_time(), initial_expire) |
---|
999 | self.assertEqual(lease2.get_expiration_time() - initial_expire, 19) |
---|
1000 | |
---|
1001 | @inlineCallbacks |
---|
1002 | def test_add_lease_renewal(self): |
---|
1003 | """ |
---|
1004 | If the lease secret is reused, ``add_lease()`` extends the existing |
---|
1005 | lease. |
---|
1006 | """ |
---|
1007 | secrets, storage_index = yield self.create_slot() |
---|
1008 | [lease] = self.server.get_slot_leases(storage_index) |
---|
1009 | initial_expiration_time = lease.get_expiration_time() |
---|
1010 | |
---|
1011 | # Time passes: |
---|
1012 | self.fake_sleep(178) |
---|
1013 | |
---|
1014 | # We renew the lease: |
---|
1015 | yield self.storage_client.add_lease(storage_index, secrets[1], secrets[2]) |
---|
1016 | [lease] = self.server.get_slot_leases(storage_index) |
---|
1017 | new_expiration_time = lease.get_expiration_time() |
---|
1018 | self.assertEqual(new_expiration_time - initial_expiration_time, 178) |
---|
1019 | |
---|
1020 | @inlineCallbacks |
---|
1021 | def test_add_new_lease(self): |
---|
1022 | """ |
---|
1023 | If a new lease secret is used, ``add_lease()`` creates a new lease. |
---|
1024 | """ |
---|
1025 | secrets, storage_index = yield self.create_slot() |
---|
1026 | [lease] = self.server.get_slot_leases(storage_index) |
---|
1027 | initial_expiration_time = lease.get_expiration_time() |
---|
1028 | |
---|
1029 | # Time passes: |
---|
1030 | self.fake_sleep(167) |
---|
1031 | |
---|
1032 | # We create a new lease: |
---|
1033 | renew_secret = new_secret() |
---|
1034 | cancel_secret = new_secret() |
---|
1035 | yield self.storage_client.add_lease(storage_index, renew_secret, cancel_secret) |
---|
1036 | [lease1, lease2] = self.server.get_slot_leases(storage_index) |
---|
1037 | self.assertEqual(lease1.get_expiration_time(), initial_expiration_time) |
---|
1038 | self.assertEqual(lease2.get_expiration_time() - initial_expiration_time, 167) |
---|
1039 | |
---|
1040 | |
---|
1041 | class _SharedMixin(SystemTestMixin): |
---|
1042 | """Base class for Foolscap and HTTP mixins.""" |
---|
1043 | |
---|
1044 | SKIP_TESTS : set[str] = set() |
---|
1045 | |
---|
1046 | def _get_istorage_server(self): |
---|
1047 | native_server = next(iter(self.clients[0].storage_broker.get_known_servers())) |
---|
1048 | client = native_server.get_storage_server() |
---|
1049 | self.assertTrue(IStorageServer.providedBy(client)) |
---|
1050 | return client |
---|
1051 | |
---|
1052 | @inlineCallbacks |
---|
1053 | def setUp(self): |
---|
1054 | if self._testMethodName in self.SKIP_TESTS: |
---|
1055 | raise SkipTest( |
---|
1056 | "Test {} is still not supported".format(self._testMethodName) |
---|
1057 | ) |
---|
1058 | |
---|
1059 | AsyncTestCase.setUp(self) |
---|
1060 | |
---|
1061 | self.basedir = "test_istorageserver/" + self.id() |
---|
1062 | yield SystemTestMixin.setUp(self) |
---|
1063 | yield self.set_up_nodes(1) |
---|
1064 | self.server = None |
---|
1065 | for s in self.clients[0].services: |
---|
1066 | if isinstance(s, StorageServer): |
---|
1067 | self.server = s |
---|
1068 | break |
---|
1069 | assert self.server is not None, "Couldn't find StorageServer" |
---|
1070 | self._clock = Clock() |
---|
1071 | self._clock.advance(123456) |
---|
1072 | self.server._clock = self._clock |
---|
1073 | self.storage_client = self._get_istorage_server() |
---|
1074 | |
---|
1075 | def fake_time(self): |
---|
1076 | """Return the current fake, test-controlled, time.""" |
---|
1077 | return self._clock.seconds() |
---|
1078 | |
---|
1079 | def fake_sleep(self, seconds): |
---|
1080 | """Advance the fake time by the given number of seconds.""" |
---|
1081 | self._clock.advance(seconds) |
---|
1082 | |
---|
1083 | @inlineCallbacks |
---|
1084 | def tearDown(self): |
---|
1085 | AsyncTestCase.tearDown(self) |
---|
1086 | yield SystemTestMixin.tearDown(self) |
---|
1087 | |
---|
1088 | |
---|
1089 | class FoolscapSharedAPIsTests( |
---|
1090 | _SharedMixin, IStorageServerSharedAPIsTestsMixin, AsyncTestCase |
---|
1091 | ): |
---|
1092 | """Foolscap-specific tests for shared ``IStorageServer`` APIs.""" |
---|
1093 | |
---|
1094 | FORCE_FOOLSCAP_FOR_STORAGE = True |
---|
1095 | |
---|
1096 | |
---|
1097 | class HTTPSharedAPIsTests( |
---|
1098 | _SharedMixin, IStorageServerSharedAPIsTestsMixin, AsyncTestCase |
---|
1099 | ): |
---|
1100 | """HTTP-specific tests for shared ``IStorageServer`` APIs.""" |
---|
1101 | |
---|
1102 | FORCE_FOOLSCAP_FOR_STORAGE = False |
---|
1103 | |
---|
1104 | |
---|
1105 | class FoolscapImmutableAPIsTests( |
---|
1106 | _SharedMixin, IStorageServerImmutableAPIsTestsMixin, AsyncTestCase |
---|
1107 | ): |
---|
1108 | """Foolscap-specific tests for immutable ``IStorageServer`` APIs.""" |
---|
1109 | |
---|
1110 | FORCE_FOOLSCAP_FOR_STORAGE = True |
---|
1111 | |
---|
1112 | def test_disconnection(self): |
---|
1113 | """ |
---|
1114 | If we disconnect in the middle of writing to a bucket, all data is |
---|
1115 | wiped, and it's even possible to write different data to the bucket. |
---|
1116 | |
---|
1117 | (In the real world one shouldn't do that, but writing different data is |
---|
1118 | a good way to test that the original data really was wiped.) |
---|
1119 | |
---|
1120 | HTTP protocol doesn't need this test, since disconnection is a |
---|
1121 | meaningless concept; this is more about testing the implicit contract |
---|
1122 | the Foolscap implementation depends on doesn't change as we refactor |
---|
1123 | things. |
---|
1124 | """ |
---|
1125 | return self.abort_or_disconnect_half_way(lambda _: self.disconnect()) |
---|
1126 | |
---|
1127 | @inlineCallbacks |
---|
1128 | def disconnect(self): |
---|
1129 | """ |
---|
1130 | Disconnect and then reconnect with a new ``IStorageServer``. |
---|
1131 | """ |
---|
1132 | current = self.storage_client |
---|
1133 | yield self.bounce_client(0) |
---|
1134 | self.storage_client = self._get_istorage_server() |
---|
1135 | assert self.storage_client is not current |
---|
1136 | |
---|
1137 | |
---|
1138 | class HTTPImmutableAPIsTests( |
---|
1139 | _SharedMixin, IStorageServerImmutableAPIsTestsMixin, AsyncTestCase |
---|
1140 | ): |
---|
1141 | """HTTP-specific tests for immutable ``IStorageServer`` APIs.""" |
---|
1142 | |
---|
1143 | FORCE_FOOLSCAP_FOR_STORAGE = False |
---|
1144 | |
---|
1145 | |
---|
1146 | class FoolscapMutableAPIsTests( |
---|
1147 | _SharedMixin, IStorageServerMutableAPIsTestsMixin, AsyncTestCase |
---|
1148 | ): |
---|
1149 | """Foolscap-specific tests for mutable ``IStorageServer`` APIs.""" |
---|
1150 | |
---|
1151 | FORCE_FOOLSCAP_FOR_STORAGE = True |
---|
1152 | |
---|
1153 | |
---|
1154 | class HTTPMutableAPIsTests( |
---|
1155 | _SharedMixin, IStorageServerMutableAPIsTestsMixin, AsyncTestCase |
---|
1156 | ): |
---|
1157 | """HTTP-specific tests for mutable ``IStorageServer`` APIs.""" |
---|
1158 | |
---|
1159 | FORCE_FOOLSCAP_FOR_STORAGE = False |
---|