1 | """ |
---|
2 | Tests for twisted.storage that uses Web APIs. |
---|
3 | |
---|
4 | Partially ported to Python 3. |
---|
5 | """ |
---|
6 | |
---|
7 | import time |
---|
8 | import os.path |
---|
9 | import re |
---|
10 | import json |
---|
11 | from unittest import skipIf |
---|
12 | from io import StringIO |
---|
13 | |
---|
14 | from twisted.trial import unittest |
---|
15 | from twisted.internet import defer |
---|
16 | from twisted.application import service |
---|
17 | from twisted.web.template import flattenString |
---|
18 | from twisted.python.filepath import FilePath |
---|
19 | from twisted.python.runtime import platform |
---|
20 | |
---|
21 | from foolscap.api import fireEventually |
---|
22 | from allmydata.util import fileutil, hashutil, base32, pollmixin |
---|
23 | from allmydata.storage.common import storage_index_to_dir, \ |
---|
24 | UnknownMutableContainerVersionError, UnknownImmutableContainerVersionError |
---|
25 | from allmydata.storage.server import StorageServer |
---|
26 | from allmydata.storage.crawler import ( |
---|
27 | BucketCountingCrawler, |
---|
28 | _LeaseStateSerializer, |
---|
29 | ) |
---|
30 | from allmydata.storage.expirer import ( |
---|
31 | LeaseCheckingCrawler, |
---|
32 | _HistorySerializer, |
---|
33 | ) |
---|
34 | from allmydata.web.storage import ( |
---|
35 | StorageStatus, |
---|
36 | StorageStatusElement, |
---|
37 | remove_prefix |
---|
38 | ) |
---|
39 | from allmydata.scripts.admin import ( |
---|
40 | migrate_crawler, |
---|
41 | ) |
---|
42 | from allmydata.scripts.runner import ( |
---|
43 | Options, |
---|
44 | ) |
---|
45 | |
---|
46 | from .common_web import ( |
---|
47 | render, |
---|
48 | ) |
---|
49 | |
---|
50 | def remove_tags(s): |
---|
51 | s = re.sub(br'<[^>]*>', b' ', s) |
---|
52 | s = re.sub(br'\s+', b' ', s) |
---|
53 | return s |
---|
54 | |
---|
55 | def renderSynchronously(ss): |
---|
56 | """ |
---|
57 | Return fully rendered HTML document. |
---|
58 | |
---|
59 | :param _StorageStatus ss: a StorageStatus instance. |
---|
60 | """ |
---|
61 | return unittest.TestCase().successResultOf(renderDeferred(ss)) |
---|
62 | |
---|
63 | def renderDeferred(ss): |
---|
64 | """ |
---|
65 | Return a `Deferred` HTML renderer. |
---|
66 | |
---|
67 | :param _StorageStatus ss: a StorageStatus instance. |
---|
68 | """ |
---|
69 | elem = StorageStatusElement(ss._storage, ss._nickname) |
---|
70 | return flattenString(None, elem) |
---|
71 | |
---|
72 | def renderJSON(resource): |
---|
73 | """ |
---|
74 | Render a JSON from the given resource. |
---|
75 | """ |
---|
76 | return render(resource, {b"t": [b"json"]}) |
---|
77 | |
---|
78 | class MyBucketCountingCrawler(BucketCountingCrawler): |
---|
79 | def finished_prefix(self, cycle, prefix): |
---|
80 | BucketCountingCrawler.finished_prefix(self, cycle, prefix) |
---|
81 | if self.hook_ds: |
---|
82 | d = self.hook_ds.pop(0) |
---|
83 | d.callback(None) |
---|
84 | |
---|
85 | class MyStorageServer(StorageServer): |
---|
86 | def add_bucket_counter(self): |
---|
87 | statefile = os.path.join(self.storedir, "bucket_counter.state") |
---|
88 | self.bucket_counter = MyBucketCountingCrawler(self, statefile) |
---|
89 | self.bucket_counter.setServiceParent(self) |
---|
90 | |
---|
91 | |
---|
92 | class BucketCounter(unittest.TestCase, pollmixin.PollMixin): |
---|
93 | |
---|
94 | def setUp(self): |
---|
95 | self.s = service.MultiService() |
---|
96 | self.s.startService() |
---|
97 | def tearDown(self): |
---|
98 | return self.s.stopService() |
---|
99 | |
---|
100 | def test_bucket_counter(self): |
---|
101 | basedir = "storage/BucketCounter/bucket_counter" |
---|
102 | fileutil.make_dirs(basedir) |
---|
103 | ss = StorageServer(basedir, b"\x00" * 20) |
---|
104 | # to make sure we capture the bucket-counting-crawler in the middle |
---|
105 | # of a cycle, we reach in and reduce its maximum slice time to 0. We |
---|
106 | # also make it start sooner than usual. |
---|
107 | ss.bucket_counter.slow_start = 0 |
---|
108 | orig_cpu_slice = ss.bucket_counter.cpu_slice |
---|
109 | ss.bucket_counter.cpu_slice = 0 |
---|
110 | ss.setServiceParent(self.s) |
---|
111 | |
---|
112 | w = StorageStatus(ss) |
---|
113 | |
---|
114 | # this sample is before the crawler has started doing anything |
---|
115 | html = renderSynchronously(w) |
---|
116 | self.failUnlessIn(b"<h1>Storage Server Status</h1>", html) |
---|
117 | s = remove_tags(html) |
---|
118 | self.failUnlessIn(b"Accepting new shares: Yes", s) |
---|
119 | self.failUnlessIn(b"Reserved space: - 0 B (0)", s) |
---|
120 | self.failUnlessIn(b"Total buckets: Not computed yet", s) |
---|
121 | self.failUnlessIn(b"Next crawl in", s) |
---|
122 | |
---|
123 | # give the bucket-counting-crawler one tick to get started. The |
---|
124 | # cpu_slice=0 will force it to yield right after it processes the |
---|
125 | # first prefix |
---|
126 | |
---|
127 | d = fireEventually() |
---|
128 | def _check(ignored): |
---|
129 | # are we really right after the first prefix? |
---|
130 | state = ss.bucket_counter.get_state() |
---|
131 | if state["last-complete-prefix"] is None: |
---|
132 | d2 = fireEventually() |
---|
133 | d2.addCallback(_check) |
---|
134 | return d2 |
---|
135 | self.failUnlessEqual(state["last-complete-prefix"], |
---|
136 | ss.bucket_counter.prefixes[0]) |
---|
137 | ss.bucket_counter.cpu_slice = 100.0 # finish as fast as possible |
---|
138 | html = renderSynchronously(w) |
---|
139 | s = remove_tags(html) |
---|
140 | self.failUnlessIn(b" Current crawl ", s) |
---|
141 | self.failUnlessIn(b" (next work in ", s) |
---|
142 | d.addCallback(_check) |
---|
143 | |
---|
144 | # now give it enough time to complete a full cycle |
---|
145 | def _watch(): |
---|
146 | return not ss.bucket_counter.get_progress()["cycle-in-progress"] |
---|
147 | d.addCallback(lambda ignored: self.poll(_watch)) |
---|
148 | def _check2(ignored): |
---|
149 | ss.bucket_counter.cpu_slice = orig_cpu_slice |
---|
150 | html = renderSynchronously(w) |
---|
151 | s = remove_tags(html) |
---|
152 | self.failUnlessIn(b"Total buckets: 0 (the number of", s) |
---|
153 | self.failUnless(b"Next crawl in 59 minutes" in s or b"Next crawl in 60 minutes" in s, s) |
---|
154 | d.addCallback(_check2) |
---|
155 | return d |
---|
156 | |
---|
157 | def test_bucket_counter_cleanup(self): |
---|
158 | basedir = "storage/BucketCounter/bucket_counter_cleanup" |
---|
159 | fileutil.make_dirs(basedir) |
---|
160 | ss = StorageServer(basedir, b"\x00" * 20) |
---|
161 | # to make sure we capture the bucket-counting-crawler in the middle |
---|
162 | # of a cycle, we reach in and reduce its maximum slice time to 0. |
---|
163 | ss.bucket_counter.slow_start = 0 |
---|
164 | orig_cpu_slice = ss.bucket_counter.cpu_slice |
---|
165 | ss.bucket_counter.cpu_slice = 0 |
---|
166 | ss.setServiceParent(self.s) |
---|
167 | |
---|
168 | d = fireEventually() |
---|
169 | |
---|
170 | def _after_first_prefix(ignored): |
---|
171 | state = ss.bucket_counter.state |
---|
172 | if state["last-complete-prefix"] is None: |
---|
173 | d2 = fireEventually() |
---|
174 | d2.addCallback(_after_first_prefix) |
---|
175 | return d2 |
---|
176 | ss.bucket_counter.cpu_slice = 100.0 # finish as fast as possible |
---|
177 | # now sneak in and mess with its state, to make sure it cleans up |
---|
178 | # properly at the end of the cycle |
---|
179 | self.failUnlessEqual(state["last-complete-prefix"], |
---|
180 | ss.bucket_counter.prefixes[0]) |
---|
181 | state["bucket-counts"][-12] = {} |
---|
182 | state["storage-index-samples"]["bogusprefix!"] = (-12, []) |
---|
183 | ss.bucket_counter.save_state() |
---|
184 | d.addCallback(_after_first_prefix) |
---|
185 | |
---|
186 | # now give it enough time to complete a cycle |
---|
187 | def _watch(): |
---|
188 | return not ss.bucket_counter.get_progress()["cycle-in-progress"] |
---|
189 | d.addCallback(lambda ignored: self.poll(_watch)) |
---|
190 | def _check2(ignored): |
---|
191 | ss.bucket_counter.cpu_slice = orig_cpu_slice |
---|
192 | s = ss.bucket_counter.get_state() |
---|
193 | self.failIf(-12 in s["bucket-counts"], list(s["bucket-counts"].keys())) |
---|
194 | self.failIf("bogusprefix!" in s["storage-index-samples"], |
---|
195 | list(s["storage-index-samples"].keys())) |
---|
196 | d.addCallback(_check2) |
---|
197 | return d |
---|
198 | |
---|
199 | def test_bucket_counter_eta(self): |
---|
200 | basedir = "storage/BucketCounter/bucket_counter_eta" |
---|
201 | fileutil.make_dirs(basedir) |
---|
202 | ss = MyStorageServer(basedir, b"\x00" * 20) |
---|
203 | ss.bucket_counter.slow_start = 0 |
---|
204 | # these will be fired inside finished_prefix() |
---|
205 | hooks = ss.bucket_counter.hook_ds = [defer.Deferred() for i in range(3)] |
---|
206 | w = StorageStatus(ss) |
---|
207 | |
---|
208 | d = defer.Deferred() |
---|
209 | |
---|
210 | def _check_1(ignored): |
---|
211 | # no ETA is available yet |
---|
212 | html = renderSynchronously(w) |
---|
213 | s = remove_tags(html) |
---|
214 | self.failUnlessIn(b"complete (next work", s) |
---|
215 | |
---|
216 | def _check_2(ignored): |
---|
217 | # one prefix has finished, so an ETA based upon that elapsed time |
---|
218 | # should be available. |
---|
219 | html = renderSynchronously(w) |
---|
220 | s = remove_tags(html) |
---|
221 | self.failUnlessIn(b"complete (ETA ", s) |
---|
222 | |
---|
223 | def _check_3(ignored): |
---|
224 | # two prefixes have finished |
---|
225 | html = renderSynchronously(w) |
---|
226 | s = remove_tags(html) |
---|
227 | self.failUnlessIn(b"complete (ETA ", s) |
---|
228 | d.callback("done") |
---|
229 | |
---|
230 | hooks[0].addCallback(_check_1).addErrback(d.errback) |
---|
231 | hooks[1].addCallback(_check_2).addErrback(d.errback) |
---|
232 | hooks[2].addCallback(_check_3).addErrback(d.errback) |
---|
233 | |
---|
234 | ss.setServiceParent(self.s) |
---|
235 | return d |
---|
236 | |
---|
237 | class InstrumentedLeaseCheckingCrawler(LeaseCheckingCrawler): |
---|
238 | stop_after_first_bucket = False |
---|
239 | def process_bucket(self, *args, **kwargs): |
---|
240 | LeaseCheckingCrawler.process_bucket(self, *args, **kwargs) |
---|
241 | if self.stop_after_first_bucket: |
---|
242 | self.stop_after_first_bucket = False |
---|
243 | self.cpu_slice = -1.0 |
---|
244 | def yielding(self, sleep_time): |
---|
245 | if not self.stop_after_first_bucket: |
---|
246 | self.cpu_slice = 500 |
---|
247 | |
---|
248 | class BrokenStatResults(object): |
---|
249 | pass |
---|
250 | |
---|
251 | class No_ST_BLOCKS_LeaseCheckingCrawler(LeaseCheckingCrawler): |
---|
252 | def stat(self, fn): |
---|
253 | s = os.stat(fn) |
---|
254 | bsr = BrokenStatResults() |
---|
255 | for attrname in dir(s): |
---|
256 | if attrname.startswith("_"): |
---|
257 | continue |
---|
258 | if attrname == "st_blocks": |
---|
259 | continue |
---|
260 | setattr(bsr, attrname, getattr(s, attrname)) |
---|
261 | return bsr |
---|
262 | |
---|
263 | class InstrumentedStorageServer(StorageServer): |
---|
264 | LeaseCheckerClass = InstrumentedLeaseCheckingCrawler |
---|
265 | class No_ST_BLOCKS_StorageServer(StorageServer): |
---|
266 | LeaseCheckerClass = No_ST_BLOCKS_LeaseCheckingCrawler |
---|
267 | |
---|
268 | class LeaseCrawler(unittest.TestCase, pollmixin.PollMixin): |
---|
269 | |
---|
270 | def setUp(self): |
---|
271 | self.s = service.MultiService() |
---|
272 | self.s.startService() |
---|
273 | def tearDown(self): |
---|
274 | return self.s.stopService() |
---|
275 | |
---|
276 | def make_shares(self, ss): |
---|
277 | def make(si): |
---|
278 | return (si, hashutil.tagged_hash(b"renew", si), |
---|
279 | hashutil.tagged_hash(b"cancel", si)) |
---|
280 | def make_mutable(si): |
---|
281 | return (si, hashutil.tagged_hash(b"renew", si), |
---|
282 | hashutil.tagged_hash(b"cancel", si), |
---|
283 | hashutil.tagged_hash(b"write-enabler", si)) |
---|
284 | def make_extra_lease(si, num): |
---|
285 | return (hashutil.tagged_hash(b"renew-%d" % num, si), |
---|
286 | hashutil.tagged_hash(b"cancel-%d" % num, si)) |
---|
287 | |
---|
288 | immutable_si_0, rs0, cs0 = make(b"\x00" * 16) |
---|
289 | immutable_si_1, rs1, cs1 = make(b"\x01" * 16) |
---|
290 | rs1a, cs1a = make_extra_lease(immutable_si_1, 1) |
---|
291 | mutable_si_2, rs2, cs2, we2 = make_mutable(b"\x02" * 16) |
---|
292 | mutable_si_3, rs3, cs3, we3 = make_mutable(b"\x03" * 16) |
---|
293 | rs3a, cs3a = make_extra_lease(mutable_si_3, 1) |
---|
294 | sharenums = [0] |
---|
295 | # note: 'tahoe debug dump-share' will not handle this file, since the |
---|
296 | # inner contents are not a valid CHK share |
---|
297 | data = b"\xff" * 1000 |
---|
298 | |
---|
299 | a,w = ss.allocate_buckets(immutable_si_0, rs0, cs0, sharenums, |
---|
300 | 1000) |
---|
301 | w[0].write(0, data) |
---|
302 | w[0].close() |
---|
303 | |
---|
304 | a,w = ss.allocate_buckets(immutable_si_1, rs1, cs1, sharenums, |
---|
305 | 1000) |
---|
306 | w[0].write(0, data) |
---|
307 | w[0].close() |
---|
308 | ss.add_lease(immutable_si_1, rs1a, cs1a) |
---|
309 | |
---|
310 | writev = ss.slot_testv_and_readv_and_writev |
---|
311 | writev(mutable_si_2, (we2, rs2, cs2), |
---|
312 | {0: ([], [(0,data)], len(data))}, []) |
---|
313 | writev(mutable_si_3, (we3, rs3, cs3), |
---|
314 | {0: ([], [(0,data)], len(data))}, []) |
---|
315 | ss.add_lease(mutable_si_3, rs3a, cs3a) |
---|
316 | |
---|
317 | self.sis = [immutable_si_0, immutable_si_1, mutable_si_2, mutable_si_3] |
---|
318 | self.renew_secrets = [rs0, rs1, rs1a, rs2, rs3, rs3a] |
---|
319 | self.cancel_secrets = [cs0, cs1, cs1a, cs2, cs3, cs3a] |
---|
320 | |
---|
321 | def test_basic(self): |
---|
322 | basedir = "storage/LeaseCrawler/basic" |
---|
323 | fileutil.make_dirs(basedir) |
---|
324 | ss = InstrumentedStorageServer(basedir, b"\x00" * 20) |
---|
325 | # make it start sooner than usual. |
---|
326 | lc = ss.lease_checker |
---|
327 | lc.slow_start = 0 |
---|
328 | lc.cpu_slice = 500 |
---|
329 | lc.stop_after_first_bucket = True |
---|
330 | webstatus = StorageStatus(ss) |
---|
331 | |
---|
332 | # create a few shares, with some leases on them |
---|
333 | self.make_shares(ss) |
---|
334 | [immutable_si_0, immutable_si_1, mutable_si_2, mutable_si_3] = self.sis |
---|
335 | |
---|
336 | # add a non-sharefile to exercise another code path |
---|
337 | fn = os.path.join(ss.sharedir, |
---|
338 | storage_index_to_dir(immutable_si_0), |
---|
339 | "not-a-share") |
---|
340 | f = open(fn, "wb") |
---|
341 | f.write(b"I am not a share.\n") |
---|
342 | f.close() |
---|
343 | |
---|
344 | # this is before the crawl has started, so we're not in a cycle yet |
---|
345 | initial_state = lc.get_state() |
---|
346 | self.failIf(lc.get_progress()["cycle-in-progress"]) |
---|
347 | self.failIfIn("cycle-to-date", initial_state) |
---|
348 | self.failIfIn("estimated-remaining-cycle", initial_state) |
---|
349 | self.failIfIn("estimated-current-cycle", initial_state) |
---|
350 | self.failUnlessIn("history", initial_state) |
---|
351 | self.failUnlessEqual(initial_state["history"], {}) |
---|
352 | |
---|
353 | ss.setServiceParent(self.s) |
---|
354 | |
---|
355 | DAY = 24*60*60 |
---|
356 | |
---|
357 | d = fireEventually() |
---|
358 | |
---|
359 | # now examine the state right after the first bucket has been |
---|
360 | # processed. |
---|
361 | def _after_first_bucket(ignored): |
---|
362 | initial_state = lc.get_state() |
---|
363 | if "cycle-to-date" not in initial_state: |
---|
364 | d2 = fireEventually() |
---|
365 | d2.addCallback(_after_first_bucket) |
---|
366 | return d2 |
---|
367 | self.failUnlessIn("cycle-to-date", initial_state) |
---|
368 | self.failUnlessIn("estimated-remaining-cycle", initial_state) |
---|
369 | self.failUnlessIn("estimated-current-cycle", initial_state) |
---|
370 | self.failUnlessIn("history", initial_state) |
---|
371 | self.failUnlessEqual(initial_state["history"], {}) |
---|
372 | |
---|
373 | so_far = initial_state["cycle-to-date"] |
---|
374 | self.failUnlessEqual(so_far["expiration-enabled"], False) |
---|
375 | self.failUnlessIn("configured-expiration-mode", so_far) |
---|
376 | self.failUnlessIn("lease-age-histogram", so_far) |
---|
377 | lah = so_far["lease-age-histogram"] |
---|
378 | self.failUnlessEqual(type(lah), list) |
---|
379 | self.failUnlessEqual(len(lah), 1) |
---|
380 | self.failUnlessEqual(lah, [ (0.0, DAY, 1) ] ) |
---|
381 | self.failUnlessEqual(so_far["leases-per-share-histogram"], {"1": 1}) |
---|
382 | self.failUnlessEqual(so_far["corrupt-shares"], []) |
---|
383 | sr1 = so_far["space-recovered"] |
---|
384 | self.failUnlessEqual(sr1["examined-buckets"], 1) |
---|
385 | self.failUnlessEqual(sr1["examined-shares"], 1) |
---|
386 | self.failUnlessEqual(sr1["actual-shares"], 0) |
---|
387 | self.failUnlessEqual(sr1["configured-diskbytes"], 0) |
---|
388 | self.failUnlessEqual(sr1["original-sharebytes"], 0) |
---|
389 | left = initial_state["estimated-remaining-cycle"] |
---|
390 | sr2 = left["space-recovered"] |
---|
391 | self.failUnless(sr2["examined-buckets"] > 0, sr2["examined-buckets"]) |
---|
392 | self.failUnless(sr2["examined-shares"] > 0, sr2["examined-shares"]) |
---|
393 | self.failIfEqual(sr2["actual-shares"], None) |
---|
394 | self.failIfEqual(sr2["configured-diskbytes"], None) |
---|
395 | self.failIfEqual(sr2["original-sharebytes"], None) |
---|
396 | d.addCallback(_after_first_bucket) |
---|
397 | d.addCallback(lambda ign: renderDeferred(webstatus)) |
---|
398 | def _check_html_in_cycle(html): |
---|
399 | s = remove_tags(html) |
---|
400 | self.failUnlessIn(b"So far, this cycle has examined " |
---|
401 | b"1 shares in 1 buckets (0 mutable / 1 immutable) ", s) |
---|
402 | self.failUnlessIn(b"and has recovered: " |
---|
403 | b"0 shares, 0 buckets (0 mutable / 0 immutable), " |
---|
404 | b"0 B (0 B / 0 B)", s) |
---|
405 | self.failUnlessIn(b"If expiration were enabled, " |
---|
406 | b"we would have recovered: " |
---|
407 | b"0 shares, 0 buckets (0 mutable / 0 immutable)," |
---|
408 | b" 0 B (0 B / 0 B) by now", s) |
---|
409 | self.failUnlessIn(b"and the remainder of this cycle " |
---|
410 | b"would probably recover: " |
---|
411 | b"0 shares, 0 buckets (0 mutable / 0 immutable)," |
---|
412 | b" 0 B (0 B / 0 B)", s) |
---|
413 | self.failUnlessIn(b"and the whole cycle would probably recover: " |
---|
414 | b"0 shares, 0 buckets (0 mutable / 0 immutable)," |
---|
415 | b" 0 B (0 B / 0 B)", s) |
---|
416 | self.failUnlessIn(b"if we were strictly using each lease's default " |
---|
417 | b"31-day lease lifetime", s) |
---|
418 | self.failUnlessIn(b"this cycle would be expected to recover: ", s) |
---|
419 | d.addCallback(_check_html_in_cycle) |
---|
420 | |
---|
421 | # wait for the crawler to finish the first cycle. Nothing should have |
---|
422 | # been removed. |
---|
423 | def _wait(): |
---|
424 | return bool(lc.get_state()["last-cycle-finished"] is not None) |
---|
425 | d.addCallback(lambda ign: self.poll(_wait)) |
---|
426 | |
---|
427 | def _after_first_cycle(ignored): |
---|
428 | s = lc.get_state() |
---|
429 | self.failIf("cycle-to-date" in s) |
---|
430 | self.failIf("estimated-remaining-cycle" in s) |
---|
431 | self.failIf("estimated-current-cycle" in s) |
---|
432 | last = s["history"]["0"] |
---|
433 | self.failUnlessIn("cycle-start-finish-times", last) |
---|
434 | self.failUnlessEqual(type(last["cycle-start-finish-times"]), list) |
---|
435 | self.failUnlessEqual(last["expiration-enabled"], False) |
---|
436 | self.failUnlessIn("configured-expiration-mode", last) |
---|
437 | |
---|
438 | self.failUnlessIn("lease-age-histogram", last) |
---|
439 | lah = last["lease-age-histogram"] |
---|
440 | self.failUnlessEqual(type(lah), list) |
---|
441 | self.failUnlessEqual(len(lah), 1) |
---|
442 | self.failUnlessEqual(lah, [ [0.0, DAY, 6] ] ) |
---|
443 | |
---|
444 | self.failUnlessEqual(last["leases-per-share-histogram"], {"1": 2, "2": 2}) |
---|
445 | self.failUnlessEqual(last["corrupt-shares"], []) |
---|
446 | |
---|
447 | rec = last["space-recovered"] |
---|
448 | self.failUnlessEqual(rec["examined-buckets"], 4) |
---|
449 | self.failUnlessEqual(rec["examined-shares"], 4) |
---|
450 | self.failUnlessEqual(rec["actual-buckets"], 0) |
---|
451 | self.failUnlessEqual(rec["original-buckets"], 0) |
---|
452 | self.failUnlessEqual(rec["configured-buckets"], 0) |
---|
453 | self.failUnlessEqual(rec["actual-shares"], 0) |
---|
454 | self.failUnlessEqual(rec["original-shares"], 0) |
---|
455 | self.failUnlessEqual(rec["configured-shares"], 0) |
---|
456 | self.failUnlessEqual(rec["actual-diskbytes"], 0) |
---|
457 | self.failUnlessEqual(rec["original-diskbytes"], 0) |
---|
458 | self.failUnlessEqual(rec["configured-diskbytes"], 0) |
---|
459 | self.failUnlessEqual(rec["actual-sharebytes"], 0) |
---|
460 | self.failUnlessEqual(rec["original-sharebytes"], 0) |
---|
461 | self.failUnlessEqual(rec["configured-sharebytes"], 0) |
---|
462 | |
---|
463 | def _get_sharefile(si): |
---|
464 | return list(ss._iter_share_files(si))[0] |
---|
465 | def count_leases(si): |
---|
466 | return len(list(_get_sharefile(si).get_leases())) |
---|
467 | self.failUnlessEqual(count_leases(immutable_si_0), 1) |
---|
468 | self.failUnlessEqual(count_leases(immutable_si_1), 2) |
---|
469 | self.failUnlessEqual(count_leases(mutable_si_2), 1) |
---|
470 | self.failUnlessEqual(count_leases(mutable_si_3), 2) |
---|
471 | d.addCallback(_after_first_cycle) |
---|
472 | d.addCallback(lambda ign: renderDeferred(webstatus)) |
---|
473 | def _check_html(html): |
---|
474 | s = remove_tags(html) |
---|
475 | self.failUnlessIn(b"recovered: 0 shares, 0 buckets " |
---|
476 | b"(0 mutable / 0 immutable), 0 B (0 B / 0 B) ", s) |
---|
477 | self.failUnlessIn(b"and saw a total of 4 shares, 4 buckets " |
---|
478 | b"(2 mutable / 2 immutable),", s) |
---|
479 | self.failUnlessIn(b"but expiration was not enabled", s) |
---|
480 | d.addCallback(_check_html) |
---|
481 | d.addCallback(lambda ign: renderJSON(webstatus)) |
---|
482 | def _check_json(raw): |
---|
483 | data = json.loads(raw) |
---|
484 | self.failUnlessIn("lease-checker", data) |
---|
485 | self.failUnlessIn("lease-checker-progress", data) |
---|
486 | d.addCallback(_check_json) |
---|
487 | return d |
---|
488 | |
---|
489 | def backdate_lease(self, sf, renew_secret, new_expire_time): |
---|
490 | sf.renew_lease(renew_secret, new_expire_time, allow_backdate=True) |
---|
491 | |
---|
492 | def test_expire_age(self): |
---|
493 | basedir = "storage/LeaseCrawler/expire_age" |
---|
494 | fileutil.make_dirs(basedir) |
---|
495 | # setting expiration_time to 2000 means that any lease which is more |
---|
496 | # than 2000s old will be expired. |
---|
497 | ss = InstrumentedStorageServer(basedir, b"\x00" * 20, |
---|
498 | expiration_enabled=True, |
---|
499 | expiration_mode="age", |
---|
500 | expiration_override_lease_duration=2000) |
---|
501 | # make it start sooner than usual. |
---|
502 | lc = ss.lease_checker |
---|
503 | lc.slow_start = 0 |
---|
504 | lc.stop_after_first_bucket = True |
---|
505 | webstatus = StorageStatus(ss) |
---|
506 | |
---|
507 | # create a few shares, with some leases on them |
---|
508 | self.make_shares(ss) |
---|
509 | [immutable_si_0, immutable_si_1, mutable_si_2, mutable_si_3] = self.sis |
---|
510 | |
---|
511 | def count_shares(si): |
---|
512 | return len(list(ss._iter_share_files(si))) |
---|
513 | def _get_sharefile(si): |
---|
514 | return list(ss._iter_share_files(si))[0] |
---|
515 | def count_leases(si): |
---|
516 | return len(list(_get_sharefile(si).get_leases())) |
---|
517 | |
---|
518 | self.failUnlessEqual(count_shares(immutable_si_0), 1) |
---|
519 | self.failUnlessEqual(count_leases(immutable_si_0), 1) |
---|
520 | self.failUnlessEqual(count_shares(immutable_si_1), 1) |
---|
521 | self.failUnlessEqual(count_leases(immutable_si_1), 2) |
---|
522 | self.failUnlessEqual(count_shares(mutable_si_2), 1) |
---|
523 | self.failUnlessEqual(count_leases(mutable_si_2), 1) |
---|
524 | self.failUnlessEqual(count_shares(mutable_si_3), 1) |
---|
525 | self.failUnlessEqual(count_leases(mutable_si_3), 2) |
---|
526 | |
---|
527 | # artificially crank back the expiration time on the first lease of |
---|
528 | # each share, to make it look like it expired already (age=1000s). |
---|
529 | # Some shares have an extra lease which is set to expire at the |
---|
530 | # default time in 31 days from now (age=31days). We then run the |
---|
531 | # crawler, which will expire the first lease, making some shares get |
---|
532 | # deleted and others stay alive (with one remaining lease) |
---|
533 | now = time.time() |
---|
534 | |
---|
535 | sf0 = _get_sharefile(immutable_si_0) |
---|
536 | self.backdate_lease(sf0, self.renew_secrets[0], now - 1000) |
---|
537 | sf0_size = os.stat(sf0.home).st_size |
---|
538 | |
---|
539 | # immutable_si_1 gets an extra lease |
---|
540 | sf1 = _get_sharefile(immutable_si_1) |
---|
541 | self.backdate_lease(sf1, self.renew_secrets[1], now - 1000) |
---|
542 | |
---|
543 | sf2 = _get_sharefile(mutable_si_2) |
---|
544 | self.backdate_lease(sf2, self.renew_secrets[3], now - 1000) |
---|
545 | sf2_size = os.stat(sf2.home).st_size |
---|
546 | |
---|
547 | # mutable_si_3 gets an extra lease |
---|
548 | sf3 = _get_sharefile(mutable_si_3) |
---|
549 | self.backdate_lease(sf3, self.renew_secrets[4], now - 1000) |
---|
550 | |
---|
551 | ss.setServiceParent(self.s) |
---|
552 | |
---|
553 | d = fireEventually() |
---|
554 | # examine the state right after the first bucket has been processed |
---|
555 | def _after_first_bucket(ignored): |
---|
556 | p = lc.get_progress() |
---|
557 | if not p["cycle-in-progress"]: |
---|
558 | d2 = fireEventually() |
---|
559 | d2.addCallback(_after_first_bucket) |
---|
560 | return d2 |
---|
561 | d.addCallback(_after_first_bucket) |
---|
562 | d.addCallback(lambda ign: renderDeferred(webstatus)) |
---|
563 | def _check_html_in_cycle(html): |
---|
564 | s = remove_tags(html) |
---|
565 | # the first bucket encountered gets deleted, and its prefix |
---|
566 | # happens to be about 1/5th of the way through the ring, so the |
---|
567 | # predictor thinks we'll have 5 shares and that we'll delete them |
---|
568 | # all. This part of the test depends upon the SIs landing right |
---|
569 | # where they do now. |
---|
570 | self.failUnlessIn(b"The remainder of this cycle is expected to " |
---|
571 | b"recover: 4 shares, 4 buckets", s) |
---|
572 | self.failUnlessIn(b"The whole cycle is expected to examine " |
---|
573 | b"5 shares in 5 buckets and to recover: " |
---|
574 | b"5 shares, 5 buckets", s) |
---|
575 | d.addCallback(_check_html_in_cycle) |
---|
576 | |
---|
577 | # wait for the crawler to finish the first cycle. Two shares should |
---|
578 | # have been removed |
---|
579 | def _wait(): |
---|
580 | return bool(lc.get_state()["last-cycle-finished"] is not None) |
---|
581 | d.addCallback(lambda ign: self.poll(_wait)) |
---|
582 | |
---|
583 | def _after_first_cycle(ignored): |
---|
584 | self.failUnlessEqual(count_shares(immutable_si_0), 0) |
---|
585 | self.failUnlessEqual(count_shares(immutable_si_1), 1) |
---|
586 | self.failUnlessEqual(count_leases(immutable_si_1), 1) |
---|
587 | self.failUnlessEqual(count_shares(mutable_si_2), 0) |
---|
588 | self.failUnlessEqual(count_shares(mutable_si_3), 1) |
---|
589 | self.failUnlessEqual(count_leases(mutable_si_3), 1) |
---|
590 | |
---|
591 | s = lc.get_state() |
---|
592 | last = s["history"]["0"] |
---|
593 | |
---|
594 | self.failUnlessEqual(last["expiration-enabled"], True) |
---|
595 | self.failUnlessEqual(last["configured-expiration-mode"], |
---|
596 | ["age", 2000, None, ["mutable", "immutable"]]) |
---|
597 | self.failUnlessEqual(last["leases-per-share-histogram"], {"1": 2, "2": 2}) |
---|
598 | |
---|
599 | rec = last["space-recovered"] |
---|
600 | self.failUnlessEqual(rec["examined-buckets"], 4) |
---|
601 | self.failUnlessEqual(rec["examined-shares"], 4) |
---|
602 | self.failUnlessEqual(rec["actual-buckets"], 2) |
---|
603 | self.failUnlessEqual(rec["original-buckets"], 2) |
---|
604 | self.failUnlessEqual(rec["configured-buckets"], 2) |
---|
605 | self.failUnlessEqual(rec["actual-shares"], 2) |
---|
606 | self.failUnlessEqual(rec["original-shares"], 2) |
---|
607 | self.failUnlessEqual(rec["configured-shares"], 2) |
---|
608 | size = sf0_size + sf2_size |
---|
609 | self.failUnlessEqual(rec["actual-sharebytes"], size) |
---|
610 | self.failUnlessEqual(rec["original-sharebytes"], size) |
---|
611 | self.failUnlessEqual(rec["configured-sharebytes"], size) |
---|
612 | # different platforms have different notions of "blocks used by |
---|
613 | # this file", so merely assert that it's a number |
---|
614 | self.failUnless(rec["actual-diskbytes"] >= 0, |
---|
615 | rec["actual-diskbytes"]) |
---|
616 | self.failUnless(rec["original-diskbytes"] >= 0, |
---|
617 | rec["original-diskbytes"]) |
---|
618 | self.failUnless(rec["configured-diskbytes"] >= 0, |
---|
619 | rec["configured-diskbytes"]) |
---|
620 | d.addCallback(_after_first_cycle) |
---|
621 | d.addCallback(lambda ign: renderDeferred(webstatus)) |
---|
622 | def _check_html(html): |
---|
623 | s = remove_tags(html) |
---|
624 | self.failUnlessIn(b"Expiration Enabled: expired leases will be removed", s) |
---|
625 | self.failUnlessIn(b"Leases created or last renewed more than 33 minutes ago will be considered expired.", s) |
---|
626 | self.failUnlessIn(b" recovered: 2 shares, 2 buckets (1 mutable / 1 immutable), ", s) |
---|
627 | d.addCallback(_check_html) |
---|
628 | return d |
---|
629 | |
---|
630 | def test_expire_cutoff_date(self): |
---|
631 | basedir = "storage/LeaseCrawler/expire_cutoff_date" |
---|
632 | fileutil.make_dirs(basedir) |
---|
633 | # setting cutoff-date to 2000 seconds ago means that any lease which |
---|
634 | # is more than 2000s old will be expired. |
---|
635 | now = time.time() |
---|
636 | then = int(now - 2000) |
---|
637 | ss = InstrumentedStorageServer(basedir, b"\x00" * 20, |
---|
638 | expiration_enabled=True, |
---|
639 | expiration_mode="cutoff-date", |
---|
640 | expiration_cutoff_date=then) |
---|
641 | # make it start sooner than usual. |
---|
642 | lc = ss.lease_checker |
---|
643 | lc.slow_start = 0 |
---|
644 | lc.stop_after_first_bucket = True |
---|
645 | webstatus = StorageStatus(ss) |
---|
646 | |
---|
647 | # create a few shares, with some leases on them |
---|
648 | self.make_shares(ss) |
---|
649 | [immutable_si_0, immutable_si_1, mutable_si_2, mutable_si_3] = self.sis |
---|
650 | |
---|
651 | def count_shares(si): |
---|
652 | return len(list(ss._iter_share_files(si))) |
---|
653 | def _get_sharefile(si): |
---|
654 | return list(ss._iter_share_files(si))[0] |
---|
655 | def count_leases(si): |
---|
656 | return len(list(_get_sharefile(si).get_leases())) |
---|
657 | |
---|
658 | self.failUnlessEqual(count_shares(immutable_si_0), 1) |
---|
659 | self.failUnlessEqual(count_leases(immutable_si_0), 1) |
---|
660 | self.failUnlessEqual(count_shares(immutable_si_1), 1) |
---|
661 | self.failUnlessEqual(count_leases(immutable_si_1), 2) |
---|
662 | self.failUnlessEqual(count_shares(mutable_si_2), 1) |
---|
663 | self.failUnlessEqual(count_leases(mutable_si_2), 1) |
---|
664 | self.failUnlessEqual(count_shares(mutable_si_3), 1) |
---|
665 | self.failUnlessEqual(count_leases(mutable_si_3), 2) |
---|
666 | |
---|
667 | # artificially crank back the expiration time on the first lease of |
---|
668 | # each share, to make it look like was renewed 3000s ago. To achieve |
---|
669 | # this, we need to set the expiration time to now-3000+31days. This |
---|
670 | # will change when the lease format is improved to contain both |
---|
671 | # create/renew time and duration. |
---|
672 | new_expiration_time = now - 3000 + 31*24*60*60 |
---|
673 | |
---|
674 | # Some shares have an extra lease which is set to expire at the |
---|
675 | # default time in 31 days from now (age=31days). We then run the |
---|
676 | # crawler, which will expire the first lease, making some shares get |
---|
677 | # deleted and others stay alive (with one remaining lease) |
---|
678 | |
---|
679 | sf0 = _get_sharefile(immutable_si_0) |
---|
680 | self.backdate_lease(sf0, self.renew_secrets[0], new_expiration_time) |
---|
681 | sf0_size = os.stat(sf0.home).st_size |
---|
682 | |
---|
683 | # immutable_si_1 gets an extra lease |
---|
684 | sf1 = _get_sharefile(immutable_si_1) |
---|
685 | self.backdate_lease(sf1, self.renew_secrets[1], new_expiration_time) |
---|
686 | |
---|
687 | sf2 = _get_sharefile(mutable_si_2) |
---|
688 | self.backdate_lease(sf2, self.renew_secrets[3], new_expiration_time) |
---|
689 | sf2_size = os.stat(sf2.home).st_size |
---|
690 | |
---|
691 | # mutable_si_3 gets an extra lease |
---|
692 | sf3 = _get_sharefile(mutable_si_3) |
---|
693 | self.backdate_lease(sf3, self.renew_secrets[4], new_expiration_time) |
---|
694 | |
---|
695 | ss.setServiceParent(self.s) |
---|
696 | |
---|
697 | d = fireEventually() |
---|
698 | # examine the state right after the first bucket has been processed |
---|
699 | def _after_first_bucket(ignored): |
---|
700 | p = lc.get_progress() |
---|
701 | if not p["cycle-in-progress"]: |
---|
702 | d2 = fireEventually() |
---|
703 | d2.addCallback(_after_first_bucket) |
---|
704 | return d2 |
---|
705 | d.addCallback(_after_first_bucket) |
---|
706 | d.addCallback(lambda ign: renderDeferred(webstatus)) |
---|
707 | def _check_html_in_cycle(html): |
---|
708 | s = remove_tags(html) |
---|
709 | # the first bucket encountered gets deleted, and its prefix |
---|
710 | # happens to be about 1/5th of the way through the ring, so the |
---|
711 | # predictor thinks we'll have 5 shares and that we'll delete them |
---|
712 | # all. This part of the test depends upon the SIs landing right |
---|
713 | # where they do now. |
---|
714 | self.failUnlessIn(b"The remainder of this cycle is expected to " |
---|
715 | b"recover: 4 shares, 4 buckets", s) |
---|
716 | self.failUnlessIn(b"The whole cycle is expected to examine " |
---|
717 | b"5 shares in 5 buckets and to recover: " |
---|
718 | b"5 shares, 5 buckets", s) |
---|
719 | d.addCallback(_check_html_in_cycle) |
---|
720 | |
---|
721 | # wait for the crawler to finish the first cycle. Two shares should |
---|
722 | # have been removed |
---|
723 | def _wait(): |
---|
724 | return bool(lc.get_state()["last-cycle-finished"] is not None) |
---|
725 | d.addCallback(lambda ign: self.poll(_wait)) |
---|
726 | |
---|
727 | def _after_first_cycle(ignored): |
---|
728 | self.failUnlessEqual(count_shares(immutable_si_0), 0) |
---|
729 | self.failUnlessEqual(count_shares(immutable_si_1), 1) |
---|
730 | self.failUnlessEqual(count_leases(immutable_si_1), 1) |
---|
731 | self.failUnlessEqual(count_shares(mutable_si_2), 0) |
---|
732 | self.failUnlessEqual(count_shares(mutable_si_3), 1) |
---|
733 | self.failUnlessEqual(count_leases(mutable_si_3), 1) |
---|
734 | |
---|
735 | s = lc.get_state() |
---|
736 | last = s["history"]["0"] |
---|
737 | |
---|
738 | self.failUnlessEqual(last["expiration-enabled"], True) |
---|
739 | self.failUnlessEqual(last["configured-expiration-mode"], |
---|
740 | ["cutoff-date", None, then, |
---|
741 | ["mutable", "immutable"]]) |
---|
742 | self.failUnlessEqual(last["leases-per-share-histogram"], |
---|
743 | {"1": 2, "2": 2}) |
---|
744 | |
---|
745 | rec = last["space-recovered"] |
---|
746 | self.failUnlessEqual(rec["examined-buckets"], 4) |
---|
747 | self.failUnlessEqual(rec["examined-shares"], 4) |
---|
748 | self.failUnlessEqual(rec["actual-buckets"], 2) |
---|
749 | self.failUnlessEqual(rec["original-buckets"], 0) |
---|
750 | self.failUnlessEqual(rec["configured-buckets"], 2) |
---|
751 | self.failUnlessEqual(rec["actual-shares"], 2) |
---|
752 | self.failUnlessEqual(rec["original-shares"], 0) |
---|
753 | self.failUnlessEqual(rec["configured-shares"], 2) |
---|
754 | size = sf0_size + sf2_size |
---|
755 | self.failUnlessEqual(rec["actual-sharebytes"], size) |
---|
756 | self.failUnlessEqual(rec["original-sharebytes"], 0) |
---|
757 | self.failUnlessEqual(rec["configured-sharebytes"], size) |
---|
758 | # different platforms have different notions of "blocks used by |
---|
759 | # this file", so merely assert that it's a number |
---|
760 | self.failUnless(rec["actual-diskbytes"] >= 0, |
---|
761 | rec["actual-diskbytes"]) |
---|
762 | self.failUnless(rec["original-diskbytes"] >= 0, |
---|
763 | rec["original-diskbytes"]) |
---|
764 | self.failUnless(rec["configured-diskbytes"] >= 0, |
---|
765 | rec["configured-diskbytes"]) |
---|
766 | d.addCallback(_after_first_cycle) |
---|
767 | d.addCallback(lambda ign: renderDeferred(webstatus)) |
---|
768 | def _check_html(html): |
---|
769 | s = remove_tags(html) |
---|
770 | self.failUnlessIn(b"Expiration Enabled:" |
---|
771 | b" expired leases will be removed", s) |
---|
772 | date = time.strftime( |
---|
773 | u"%Y-%m-%d (%d-%b-%Y) UTC", time.gmtime(then)).encode("ascii") |
---|
774 | substr =b"Leases created or last renewed before %s will be considered expired." % date |
---|
775 | self.failUnlessIn(substr, s) |
---|
776 | self.failUnlessIn(b" recovered: 2 shares, 2 buckets (1 mutable / 1 immutable), ", s) |
---|
777 | d.addCallback(_check_html) |
---|
778 | return d |
---|
779 | |
---|
780 | def test_only_immutable(self): |
---|
781 | basedir = "storage/LeaseCrawler/only_immutable" |
---|
782 | fileutil.make_dirs(basedir) |
---|
783 | now = time.time() |
---|
784 | then = int(now - 2000) |
---|
785 | ss = StorageServer(basedir, b"\x00" * 20, |
---|
786 | expiration_enabled=True, |
---|
787 | expiration_mode="cutoff-date", |
---|
788 | expiration_cutoff_date=then, |
---|
789 | expiration_sharetypes=("immutable",)) |
---|
790 | lc = ss.lease_checker |
---|
791 | lc.slow_start = 0 |
---|
792 | webstatus = StorageStatus(ss) |
---|
793 | |
---|
794 | self.make_shares(ss) |
---|
795 | [immutable_si_0, immutable_si_1, mutable_si_2, mutable_si_3] = self.sis |
---|
796 | # set all leases to be expirable |
---|
797 | new_expiration_time = now - 3000 + 31*24*60*60 |
---|
798 | |
---|
799 | def count_shares(si): |
---|
800 | return len(list(ss._iter_share_files(si))) |
---|
801 | def _get_sharefile(si): |
---|
802 | return list(ss._iter_share_files(si))[0] |
---|
803 | def count_leases(si): |
---|
804 | return len(list(_get_sharefile(si).get_leases())) |
---|
805 | |
---|
806 | sf0 = _get_sharefile(immutable_si_0) |
---|
807 | self.backdate_lease(sf0, self.renew_secrets[0], new_expiration_time) |
---|
808 | sf1 = _get_sharefile(immutable_si_1) |
---|
809 | self.backdate_lease(sf1, self.renew_secrets[1], new_expiration_time) |
---|
810 | self.backdate_lease(sf1, self.renew_secrets[2], new_expiration_time) |
---|
811 | sf2 = _get_sharefile(mutable_si_2) |
---|
812 | self.backdate_lease(sf2, self.renew_secrets[3], new_expiration_time) |
---|
813 | sf3 = _get_sharefile(mutable_si_3) |
---|
814 | self.backdate_lease(sf3, self.renew_secrets[4], new_expiration_time) |
---|
815 | self.backdate_lease(sf3, self.renew_secrets[5], new_expiration_time) |
---|
816 | |
---|
817 | ss.setServiceParent(self.s) |
---|
818 | def _wait(): |
---|
819 | return bool(lc.get_state()["last-cycle-finished"] is not None) |
---|
820 | d = self.poll(_wait) |
---|
821 | |
---|
822 | def _after_first_cycle(ignored): |
---|
823 | self.failUnlessEqual(count_shares(immutable_si_0), 0) |
---|
824 | self.failUnlessEqual(count_shares(immutable_si_1), 0) |
---|
825 | self.failUnlessEqual(count_shares(mutable_si_2), 1) |
---|
826 | self.failUnlessEqual(count_leases(mutable_si_2), 1) |
---|
827 | self.failUnlessEqual(count_shares(mutable_si_3), 1) |
---|
828 | self.failUnlessEqual(count_leases(mutable_si_3), 2) |
---|
829 | d.addCallback(_after_first_cycle) |
---|
830 | d.addCallback(lambda ign: renderDeferred(webstatus)) |
---|
831 | def _check_html(html): |
---|
832 | s = remove_tags(html) |
---|
833 | self.failUnlessIn(b"The following sharetypes will be expired: immutable.", s) |
---|
834 | d.addCallback(_check_html) |
---|
835 | return d |
---|
836 | |
---|
837 | def test_only_mutable(self): |
---|
838 | basedir = "storage/LeaseCrawler/only_mutable" |
---|
839 | fileutil.make_dirs(basedir) |
---|
840 | now = time.time() |
---|
841 | then = int(now - 2000) |
---|
842 | ss = StorageServer(basedir, b"\x00" * 20, |
---|
843 | expiration_enabled=True, |
---|
844 | expiration_mode="cutoff-date", |
---|
845 | expiration_cutoff_date=then, |
---|
846 | expiration_sharetypes=("mutable",)) |
---|
847 | lc = ss.lease_checker |
---|
848 | lc.slow_start = 0 |
---|
849 | webstatus = StorageStatus(ss) |
---|
850 | |
---|
851 | self.make_shares(ss) |
---|
852 | [immutable_si_0, immutable_si_1, mutable_si_2, mutable_si_3] = self.sis |
---|
853 | # set all leases to be expirable |
---|
854 | new_expiration_time = now - 3000 + 31*24*60*60 |
---|
855 | |
---|
856 | def count_shares(si): |
---|
857 | return len(list(ss._iter_share_files(si))) |
---|
858 | def _get_sharefile(si): |
---|
859 | return list(ss._iter_share_files(si))[0] |
---|
860 | def count_leases(si): |
---|
861 | return len(list(_get_sharefile(si).get_leases())) |
---|
862 | |
---|
863 | sf0 = _get_sharefile(immutable_si_0) |
---|
864 | self.backdate_lease(sf0, self.renew_secrets[0], new_expiration_time) |
---|
865 | sf1 = _get_sharefile(immutable_si_1) |
---|
866 | self.backdate_lease(sf1, self.renew_secrets[1], new_expiration_time) |
---|
867 | self.backdate_lease(sf1, self.renew_secrets[2], new_expiration_time) |
---|
868 | sf2 = _get_sharefile(mutable_si_2) |
---|
869 | self.backdate_lease(sf2, self.renew_secrets[3], new_expiration_time) |
---|
870 | sf3 = _get_sharefile(mutable_si_3) |
---|
871 | self.backdate_lease(sf3, self.renew_secrets[4], new_expiration_time) |
---|
872 | self.backdate_lease(sf3, self.renew_secrets[5], new_expiration_time) |
---|
873 | |
---|
874 | ss.setServiceParent(self.s) |
---|
875 | def _wait(): |
---|
876 | return bool(lc.get_state()["last-cycle-finished"] is not None) |
---|
877 | d = self.poll(_wait) |
---|
878 | |
---|
879 | def _after_first_cycle(ignored): |
---|
880 | self.failUnlessEqual(count_shares(immutable_si_0), 1) |
---|
881 | self.failUnlessEqual(count_leases(immutable_si_0), 1) |
---|
882 | self.failUnlessEqual(count_shares(immutable_si_1), 1) |
---|
883 | self.failUnlessEqual(count_leases(immutable_si_1), 2) |
---|
884 | self.failUnlessEqual(count_shares(mutable_si_2), 0) |
---|
885 | self.failUnlessEqual(count_shares(mutable_si_3), 0) |
---|
886 | d.addCallback(_after_first_cycle) |
---|
887 | d.addCallback(lambda ign: renderDeferred(webstatus)) |
---|
888 | def _check_html(html): |
---|
889 | s = remove_tags(html) |
---|
890 | self.failUnlessIn(b"The following sharetypes will be expired: mutable.", s) |
---|
891 | d.addCallback(_check_html) |
---|
892 | return d |
---|
893 | |
---|
894 | def test_bad_mode(self): |
---|
895 | basedir = "storage/LeaseCrawler/bad_mode" |
---|
896 | fileutil.make_dirs(basedir) |
---|
897 | e = self.failUnlessRaises(ValueError, |
---|
898 | StorageServer, basedir, b"\x00" * 20, |
---|
899 | expiration_mode="bogus") |
---|
900 | self.failUnlessIn("GC mode 'bogus' must be 'age' or 'cutoff-date'", str(e)) |
---|
901 | |
---|
902 | def test_limited_history(self): |
---|
903 | basedir = "storage/LeaseCrawler/limited_history" |
---|
904 | fileutil.make_dirs(basedir) |
---|
905 | ss = StorageServer(basedir, b"\x00" * 20) |
---|
906 | # make it start sooner than usual. |
---|
907 | lc = ss.lease_checker |
---|
908 | lc.slow_start = 0 |
---|
909 | lc.cpu_slice = 500 |
---|
910 | |
---|
911 | # create a few shares, with some leases on them |
---|
912 | self.make_shares(ss) |
---|
913 | |
---|
914 | ss.setServiceParent(self.s) |
---|
915 | |
---|
916 | def _wait_until_15_cycles_done(): |
---|
917 | last = lc.state["last-cycle-finished"] |
---|
918 | if last is not None and last >= 15: |
---|
919 | return True |
---|
920 | if lc.timer: |
---|
921 | lc.timer.reset(0) |
---|
922 | return False |
---|
923 | d = self.poll(_wait_until_15_cycles_done) |
---|
924 | |
---|
925 | def _check(ignored): |
---|
926 | s = lc.get_state() |
---|
927 | h = s["history"] |
---|
928 | self.failUnlessEqual(len(h), 10) |
---|
929 | self.failUnlessEqual(max(int(k) for k in h.keys()), 15) |
---|
930 | self.failUnlessEqual(min(int(k) for k in h.keys()), 6) |
---|
931 | d.addCallback(_check) |
---|
932 | return d |
---|
933 | |
---|
934 | def test_unpredictable_future(self): |
---|
935 | basedir = "storage/LeaseCrawler/unpredictable_future" |
---|
936 | fileutil.make_dirs(basedir) |
---|
937 | ss = StorageServer(basedir, b"\x00" * 20) |
---|
938 | # make it start sooner than usual. |
---|
939 | lc = ss.lease_checker |
---|
940 | lc.slow_start = 0 |
---|
941 | lc.cpu_slice = -1.0 # stop quickly |
---|
942 | |
---|
943 | self.make_shares(ss) |
---|
944 | |
---|
945 | ss.setServiceParent(self.s) |
---|
946 | |
---|
947 | d = fireEventually() |
---|
948 | def _check(ignored): |
---|
949 | # this should fire after the first bucket is complete, but before |
---|
950 | # the first prefix is complete, so the progress-measurer won't |
---|
951 | # think we've gotten far enough to raise our percent-complete |
---|
952 | # above 0%, triggering the cannot-predict-the-future code in |
---|
953 | # expirer.py . This will have to change if/when the |
---|
954 | # progress-measurer gets smart enough to count buckets (we'll |
---|
955 | # have to interrupt it even earlier, before it's finished the |
---|
956 | # first bucket). |
---|
957 | s = lc.get_state() |
---|
958 | if "cycle-to-date" not in s: |
---|
959 | d2 = fireEventually() |
---|
960 | d2.addCallback(_check) |
---|
961 | return d2 |
---|
962 | self.failUnlessIn("cycle-to-date", s) |
---|
963 | self.failUnlessIn("estimated-remaining-cycle", s) |
---|
964 | self.failUnlessIn("estimated-current-cycle", s) |
---|
965 | |
---|
966 | left = s["estimated-remaining-cycle"]["space-recovered"] |
---|
967 | self.failUnlessEqual(left["actual-buckets"], None) |
---|
968 | self.failUnlessEqual(left["original-buckets"], None) |
---|
969 | self.failUnlessEqual(left["configured-buckets"], None) |
---|
970 | self.failUnlessEqual(left["actual-shares"], None) |
---|
971 | self.failUnlessEqual(left["original-shares"], None) |
---|
972 | self.failUnlessEqual(left["configured-shares"], None) |
---|
973 | self.failUnlessEqual(left["actual-diskbytes"], None) |
---|
974 | self.failUnlessEqual(left["original-diskbytes"], None) |
---|
975 | self.failUnlessEqual(left["configured-diskbytes"], None) |
---|
976 | self.failUnlessEqual(left["actual-sharebytes"], None) |
---|
977 | self.failUnlessEqual(left["original-sharebytes"], None) |
---|
978 | self.failUnlessEqual(left["configured-sharebytes"], None) |
---|
979 | |
---|
980 | full = s["estimated-remaining-cycle"]["space-recovered"] |
---|
981 | self.failUnlessEqual(full["actual-buckets"], None) |
---|
982 | self.failUnlessEqual(full["original-buckets"], None) |
---|
983 | self.failUnlessEqual(full["configured-buckets"], None) |
---|
984 | self.failUnlessEqual(full["actual-shares"], None) |
---|
985 | self.failUnlessEqual(full["original-shares"], None) |
---|
986 | self.failUnlessEqual(full["configured-shares"], None) |
---|
987 | self.failUnlessEqual(full["actual-diskbytes"], None) |
---|
988 | self.failUnlessEqual(full["original-diskbytes"], None) |
---|
989 | self.failUnlessEqual(full["configured-diskbytes"], None) |
---|
990 | self.failUnlessEqual(full["actual-sharebytes"], None) |
---|
991 | self.failUnlessEqual(full["original-sharebytes"], None) |
---|
992 | self.failUnlessEqual(full["configured-sharebytes"], None) |
---|
993 | |
---|
994 | d.addCallback(_check) |
---|
995 | return d |
---|
996 | |
---|
997 | def test_no_st_blocks(self): |
---|
998 | basedir = "storage/LeaseCrawler/no_st_blocks" |
---|
999 | fileutil.make_dirs(basedir) |
---|
1000 | ss = No_ST_BLOCKS_StorageServer(basedir, b"\x00" * 20, |
---|
1001 | expiration_mode="age", |
---|
1002 | expiration_override_lease_duration=-1000) |
---|
1003 | # a negative expiration_time= means the "configured-" |
---|
1004 | # space-recovered counts will be non-zero, since all shares will have |
---|
1005 | # expired by then |
---|
1006 | |
---|
1007 | # make it start sooner than usual. |
---|
1008 | lc = ss.lease_checker |
---|
1009 | lc.slow_start = 0 |
---|
1010 | |
---|
1011 | self.make_shares(ss) |
---|
1012 | ss.setServiceParent(self.s) |
---|
1013 | def _wait(): |
---|
1014 | return bool(lc.get_state()["last-cycle-finished"] is not None) |
---|
1015 | d = self.poll(_wait) |
---|
1016 | |
---|
1017 | def _check(ignored): |
---|
1018 | s = lc.get_state() |
---|
1019 | last = s["history"]["0"] |
---|
1020 | rec = last["space-recovered"] |
---|
1021 | self.failUnlessEqual(rec["configured-buckets"], 4) |
---|
1022 | self.failUnlessEqual(rec["configured-shares"], 4) |
---|
1023 | self.failUnless(rec["configured-sharebytes"] > 0, |
---|
1024 | rec["configured-sharebytes"]) |
---|
1025 | # without the .st_blocks field in os.stat() results, we should be |
---|
1026 | # reporting diskbytes==sharebytes |
---|
1027 | self.failUnlessEqual(rec["configured-sharebytes"], |
---|
1028 | rec["configured-diskbytes"]) |
---|
1029 | d.addCallback(_check) |
---|
1030 | return d |
---|
1031 | |
---|
1032 | def test_share_corruption(self): |
---|
1033 | self._poll_should_ignore_these_errors = [ |
---|
1034 | UnknownMutableContainerVersionError, |
---|
1035 | UnknownImmutableContainerVersionError, |
---|
1036 | ] |
---|
1037 | basedir = "storage/LeaseCrawler/share_corruption" |
---|
1038 | fileutil.make_dirs(basedir) |
---|
1039 | ss = InstrumentedStorageServer(basedir, b"\x00" * 20) |
---|
1040 | w = StorageStatus(ss) |
---|
1041 | # make it start sooner than usual. |
---|
1042 | lc = ss.lease_checker |
---|
1043 | lc.stop_after_first_bucket = True |
---|
1044 | lc.slow_start = 0 |
---|
1045 | lc.cpu_slice = 500 |
---|
1046 | |
---|
1047 | # create a few shares, with some leases on them |
---|
1048 | self.make_shares(ss) |
---|
1049 | |
---|
1050 | # now corrupt one, and make sure the lease-checker keeps going |
---|
1051 | [immutable_si_0, immutable_si_1, mutable_si_2, mutable_si_3] = self.sis |
---|
1052 | first = min(self.sis) |
---|
1053 | first_b32 = base32.b2a(first) |
---|
1054 | fn = os.path.join(ss.sharedir, storage_index_to_dir(first), "0") |
---|
1055 | f = open(fn, "rb+") |
---|
1056 | f.seek(0) |
---|
1057 | f.write(b"BAD MAGIC") |
---|
1058 | f.close() |
---|
1059 | # if get_share_file() doesn't see the correct mutable magic, it |
---|
1060 | # assumes the file is an immutable share, and then |
---|
1061 | # immutable.ShareFile sees a bad version. So regardless of which kind |
---|
1062 | # of share we corrupted, this will trigger an |
---|
1063 | # UnknownImmutableContainerVersionError. |
---|
1064 | |
---|
1065 | # also create an empty bucket |
---|
1066 | empty_si = base32.b2a(b"\x04"*16) |
---|
1067 | empty_bucket_dir = os.path.join(ss.sharedir, |
---|
1068 | storage_index_to_dir(empty_si)) |
---|
1069 | fileutil.make_dirs(empty_bucket_dir) |
---|
1070 | |
---|
1071 | ss.setServiceParent(self.s) |
---|
1072 | |
---|
1073 | d = fireEventually() |
---|
1074 | |
---|
1075 | # now examine the state right after the first bucket has been |
---|
1076 | # processed. |
---|
1077 | def _after_first_bucket(ignored): |
---|
1078 | s = lc.get_state() |
---|
1079 | if "cycle-to-date" not in s: |
---|
1080 | d2 = fireEventually() |
---|
1081 | d2.addCallback(_after_first_bucket) |
---|
1082 | return d2 |
---|
1083 | so_far = s["cycle-to-date"] |
---|
1084 | rec = so_far["space-recovered"] |
---|
1085 | self.failUnlessEqual(rec["examined-buckets"], 1) |
---|
1086 | self.failUnlessEqual(rec["examined-shares"], 0) |
---|
1087 | [(actual_b32, i)] = so_far["corrupt-shares"] |
---|
1088 | actual_b32 = actual_b32.encode("ascii") |
---|
1089 | self.failUnlessEqual((actual_b32, i), (first_b32, 0)) |
---|
1090 | d.addCallback(_after_first_bucket) |
---|
1091 | |
---|
1092 | d.addCallback(lambda ign: renderJSON(w)) |
---|
1093 | def _check_json(raw): |
---|
1094 | data = json.loads(raw) |
---|
1095 | # grr. json turns all dict keys into strings. |
---|
1096 | so_far = data["lease-checker"]["cycle-to-date"] |
---|
1097 | corrupt_shares = so_far["corrupt-shares"] |
---|
1098 | # it also turns all tuples into lists, and result is unicode: |
---|
1099 | [(actual_b32, i)] = corrupt_shares |
---|
1100 | actual_b32 = actual_b32.encode("ascii") |
---|
1101 | self.failUnlessEqual([actual_b32, i], [first_b32, 0]) |
---|
1102 | d.addCallback(_check_json) |
---|
1103 | d.addCallback(lambda ign: renderDeferred(w)) |
---|
1104 | def _check_html(html): |
---|
1105 | s = remove_tags(html) |
---|
1106 | self.failUnlessIn(b"Corrupt shares: SI %s shnum 0" % first_b32, s) |
---|
1107 | d.addCallback(_check_html) |
---|
1108 | |
---|
1109 | def _wait(): |
---|
1110 | return bool(lc.get_state()["last-cycle-finished"] is not None) |
---|
1111 | d.addCallback(lambda ign: self.poll(_wait)) |
---|
1112 | |
---|
1113 | def _after_first_cycle(ignored): |
---|
1114 | s = lc.get_state() |
---|
1115 | last = s["history"]["0"] |
---|
1116 | rec = last["space-recovered"] |
---|
1117 | self.failUnlessEqual(rec["examined-buckets"], 5) |
---|
1118 | self.failUnlessEqual(rec["examined-shares"], 3) |
---|
1119 | [(actual_b32, i)] = last["corrupt-shares"] |
---|
1120 | actual_b32 = actual_b32.encode("ascii") |
---|
1121 | self.failUnlessEqual((actual_b32, i), (first_b32, 0)) |
---|
1122 | d.addCallback(_after_first_cycle) |
---|
1123 | d.addCallback(lambda ign: renderJSON(w)) |
---|
1124 | def _check_json_history(raw): |
---|
1125 | data = json.loads(raw) |
---|
1126 | last = data["lease-checker"]["history"]["0"] |
---|
1127 | [(actual_b32, i)] = last["corrupt-shares"] |
---|
1128 | actual_b32 = actual_b32.encode("ascii") |
---|
1129 | self.failUnlessEqual([actual_b32, i], [first_b32, 0]) |
---|
1130 | d.addCallback(_check_json_history) |
---|
1131 | d.addCallback(lambda ign: renderDeferred(w)) |
---|
1132 | def _check_html_history(html): |
---|
1133 | s = remove_tags(html) |
---|
1134 | self.failUnlessIn(b"Corrupt shares: SI %s shnum 0" % first_b32, s) |
---|
1135 | d.addCallback(_check_html_history) |
---|
1136 | |
---|
1137 | def _cleanup(res): |
---|
1138 | self.flushLoggedErrors(UnknownMutableContainerVersionError, |
---|
1139 | UnknownImmutableContainerVersionError) |
---|
1140 | return res |
---|
1141 | d.addBoth(_cleanup) |
---|
1142 | return d |
---|
1143 | |
---|
1144 | @skipIf(platform.isWindows(), "pickle test-data can't be loaded on windows") |
---|
1145 | def test_deserialize_pickle(self): |
---|
1146 | """ |
---|
1147 | The crawler can read existing state from the old pickle format |
---|
1148 | """ |
---|
1149 | # this file came from an "in the wild" tahoe version 1.16.0 |
---|
1150 | original_pickle = FilePath(__file__).parent().child("data").child("lease_checker.state.txt") |
---|
1151 | root = FilePath(self.mktemp()) |
---|
1152 | storage = root.child("storage") |
---|
1153 | storage.makedirs() |
---|
1154 | test_pickle = storage.child("lease_checker.state") |
---|
1155 | with test_pickle.open("wb") as local, original_pickle.open("rb") as remote: |
---|
1156 | local.write(remote.read()) |
---|
1157 | |
---|
1158 | # convert from pickle format to JSON |
---|
1159 | top = Options() |
---|
1160 | top.parseOptions([ |
---|
1161 | "admin", "migrate-crawler", |
---|
1162 | "--basedir", storage.parent().path, |
---|
1163 | ]) |
---|
1164 | options = top.subOptions |
---|
1165 | while hasattr(options, "subOptions"): |
---|
1166 | options = options.subOptions |
---|
1167 | options.stdout = StringIO() |
---|
1168 | migrate_crawler(options) |
---|
1169 | |
---|
1170 | # the (existing) state file should have been upgraded to JSON |
---|
1171 | self.assertFalse(test_pickle.exists()) |
---|
1172 | self.assertTrue(test_pickle.siblingExtension(".json").exists()) |
---|
1173 | serial = _LeaseStateSerializer(test_pickle.path) |
---|
1174 | |
---|
1175 | self.assertEqual( |
---|
1176 | serial.load(), |
---|
1177 | { |
---|
1178 | u'last-complete-prefix': None, |
---|
1179 | u'version': 1, |
---|
1180 | u'current-cycle-start-time': 1635003106.611748, |
---|
1181 | u'last-cycle-finished': 312, |
---|
1182 | u'cycle-to-date': { |
---|
1183 | u'leases-per-share-histogram': { |
---|
1184 | u'1': 36793, |
---|
1185 | u'2': 1, |
---|
1186 | }, |
---|
1187 | u'space-recovered': { |
---|
1188 | u'examined-buckets-immutable': 17183, |
---|
1189 | u'configured-buckets-mutable': 0, |
---|
1190 | u'examined-shares-mutable': 1796, |
---|
1191 | u'original-shares-mutable': 1563, |
---|
1192 | u'configured-buckets-immutable': 0, |
---|
1193 | u'original-shares-immutable': 27926, |
---|
1194 | u'original-diskbytes-immutable': 431149056, |
---|
1195 | u'examined-shares-immutable': 34998, |
---|
1196 | u'original-buckets': 14661, |
---|
1197 | u'actual-shares-immutable': 0, |
---|
1198 | u'configured-shares': 0, |
---|
1199 | u'original-buckets-mutable': 899, |
---|
1200 | u'actual-diskbytes': 4096, |
---|
1201 | u'actual-shares-mutable': 0, |
---|
1202 | u'configured-buckets': 1, |
---|
1203 | u'examined-buckets-unknown': 14, |
---|
1204 | u'actual-sharebytes': 0, |
---|
1205 | u'original-shares': 29489, |
---|
1206 | u'actual-buckets-immutable': 0, |
---|
1207 | u'original-sharebytes': 312664812, |
---|
1208 | u'examined-sharebytes-immutable': 383801602, |
---|
1209 | u'actual-shares': 0, |
---|
1210 | u'actual-sharebytes-immutable': 0, |
---|
1211 | u'original-diskbytes': 441643008, |
---|
1212 | u'configured-diskbytes-mutable': 0, |
---|
1213 | u'configured-sharebytes-immutable': 0, |
---|
1214 | u'configured-shares-mutable': 0, |
---|
1215 | u'actual-diskbytes-immutable': 0, |
---|
1216 | u'configured-diskbytes-immutable': 0, |
---|
1217 | u'original-diskbytes-mutable': 10489856, |
---|
1218 | u'actual-sharebytes-mutable': 0, |
---|
1219 | u'configured-sharebytes': 0, |
---|
1220 | u'examined-shares': 36794, |
---|
1221 | u'actual-diskbytes-mutable': 0, |
---|
1222 | u'actual-buckets': 1, |
---|
1223 | u'original-buckets-immutable': 13761, |
---|
1224 | u'configured-sharebytes-mutable': 0, |
---|
1225 | u'examined-sharebytes': 390369660, |
---|
1226 | u'original-sharebytes-immutable': 308125753, |
---|
1227 | u'original-sharebytes-mutable': 4539059, |
---|
1228 | u'actual-buckets-mutable': 0, |
---|
1229 | u'examined-buckets-mutable': 1043, |
---|
1230 | u'configured-shares-immutable': 0, |
---|
1231 | u'examined-diskbytes': 476598272, |
---|
1232 | u'examined-diskbytes-mutable': 9154560, |
---|
1233 | u'examined-sharebytes-mutable': 6568058, |
---|
1234 | u'examined-buckets': 18241, |
---|
1235 | u'configured-diskbytes': 4096, |
---|
1236 | u'examined-diskbytes-immutable': 467443712}, |
---|
1237 | u'corrupt-shares': [ |
---|
1238 | [u'2dn6xnlnsqwtnapwxfdivpm3s4', 4], |
---|
1239 | [u'2dn6xnlnsqwtnapwxfdivpm3s4', 1], |
---|
1240 | [u'2rrzthwsrrxolevmwdvbdy3rqi', 4], |
---|
1241 | [u'2rrzthwsrrxolevmwdvbdy3rqi', 1], |
---|
1242 | [u'2skfngcto6h7eqmn4uo7ntk3ne', 4], |
---|
1243 | [u'2skfngcto6h7eqmn4uo7ntk3ne', 1], |
---|
1244 | [u'32d5swqpqx2mwix7xmqzvhdwje', 4], |
---|
1245 | [u'32d5swqpqx2mwix7xmqzvhdwje', 1], |
---|
1246 | [u'5mmayp66yflmpon3o6unsnbaca', 4], |
---|
1247 | [u'5mmayp66yflmpon3o6unsnbaca', 1], |
---|
1248 | [u'6ixhpvbtre7fnrl6pehlrlflc4', 4], |
---|
1249 | [u'6ixhpvbtre7fnrl6pehlrlflc4', 1], |
---|
1250 | [u'ewzhvswjsz4vp2bqkb6mi3bz2u', 4], |
---|
1251 | [u'ewzhvswjsz4vp2bqkb6mi3bz2u', 1], |
---|
1252 | [u'fu7pazf6ogavkqj6z4q5qqex3u', 4], |
---|
1253 | [u'fu7pazf6ogavkqj6z4q5qqex3u', 1], |
---|
1254 | [u'hbyjtqvpcimwxiyqbcbbdn2i4a', 4], |
---|
1255 | [u'hbyjtqvpcimwxiyqbcbbdn2i4a', 1], |
---|
1256 | [u'pmcjbdkbjdl26k3e6yja77femq', 4], |
---|
1257 | [u'pmcjbdkbjdl26k3e6yja77femq', 1], |
---|
1258 | [u'r6swof4v2uttbiiqwj5pi32cm4', 4], |
---|
1259 | [u'r6swof4v2uttbiiqwj5pi32cm4', 1], |
---|
1260 | [u't45v5akoktf53evc2fi6gwnv6y', 4], |
---|
1261 | [u't45v5akoktf53evc2fi6gwnv6y', 1], |
---|
1262 | [u'y6zb4faar3rdvn3e6pfg4wlotm', 4], |
---|
1263 | [u'y6zb4faar3rdvn3e6pfg4wlotm', 1], |
---|
1264 | [u'z3yghutvqoqbchjao4lndnrh3a', 4], |
---|
1265 | [u'z3yghutvqoqbchjao4lndnrh3a', 1], |
---|
1266 | ], |
---|
1267 | u'lease-age-histogram': { |
---|
1268 | "1641600,1728000": 78, |
---|
1269 | "12441600,12528000": 78, |
---|
1270 | "8640000,8726400": 32, |
---|
1271 | "1814400,1900800": 1860, |
---|
1272 | "2764800,2851200": 76, |
---|
1273 | "11491200,11577600": 20, |
---|
1274 | "10713600,10800000": 183, |
---|
1275 | "47865600,47952000": 7, |
---|
1276 | "3110400,3196800": 328, |
---|
1277 | "10627200,10713600": 43, |
---|
1278 | "45619200,45705600": 4, |
---|
1279 | "12873600,12960000": 5, |
---|
1280 | "7430400,7516800": 7228, |
---|
1281 | "1555200,1641600": 492, |
---|
1282 | "38880000,38966400": 3, |
---|
1283 | "12528000,12614400": 193, |
---|
1284 | "7344000,7430400": 12689, |
---|
1285 | "2678400,2764800": 278, |
---|
1286 | "2332800,2419200": 12, |
---|
1287 | "9244800,9331200": 73, |
---|
1288 | "12787200,12873600": 218, |
---|
1289 | "49075200,49161600": 19, |
---|
1290 | "10368000,10454400": 117, |
---|
1291 | "4665600,4752000": 256, |
---|
1292 | "7516800,7603200": 993, |
---|
1293 | "42336000,42422400": 33, |
---|
1294 | "10972800,11059200": 122, |
---|
1295 | "39052800,39139200": 51, |
---|
1296 | "12614400,12700800": 210, |
---|
1297 | "7603200,7689600": 2004, |
---|
1298 | "10540800,10627200": 16, |
---|
1299 | "950400,1036800": 4435, |
---|
1300 | "42076800,42163200": 4, |
---|
1301 | "8812800,8899200": 57, |
---|
1302 | "5788800,5875200": 954, |
---|
1303 | "36374400,36460800": 3, |
---|
1304 | "9331200,9417600": 12, |
---|
1305 | "30499200,30585600": 5, |
---|
1306 | "12700800,12787200": 25, |
---|
1307 | "2073600,2160000": 388, |
---|
1308 | "12960000,13046400": 8, |
---|
1309 | "11923200,12009600": 89, |
---|
1310 | "3369600,3456000": 79, |
---|
1311 | "3196800,3283200": 628, |
---|
1312 | "37497600,37584000": 11, |
---|
1313 | "33436800,33523200": 7, |
---|
1314 | "44928000,45014400": 2, |
---|
1315 | "37929600,38016000": 3, |
---|
1316 | "38966400,39052800": 61, |
---|
1317 | "3283200,3369600": 86, |
---|
1318 | "11750400,11836800": 7, |
---|
1319 | "3801600,3888000": 32, |
---|
1320 | "46310400,46396800": 1, |
---|
1321 | "4838400,4924800": 386, |
---|
1322 | "8208000,8294400": 38, |
---|
1323 | "37411200,37497600": 4, |
---|
1324 | "12009600,12096000": 329, |
---|
1325 | "10454400,10540800": 1239, |
---|
1326 | "40176000,40262400": 1, |
---|
1327 | "3715200,3801600": 104, |
---|
1328 | "44409600,44496000": 13, |
---|
1329 | "38361600,38448000": 5, |
---|
1330 | "12268800,12355200": 2, |
---|
1331 | "28771200,28857600": 6, |
---|
1332 | "41990400,42076800": 10, |
---|
1333 | "2592000,2678400": 40, |
---|
1334 | }, |
---|
1335 | }, |
---|
1336 | 'current-cycle': None, |
---|
1337 | 'last-complete-bucket': None, |
---|
1338 | } |
---|
1339 | ) |
---|
1340 | second_serial = _LeaseStateSerializer(serial._path.path) |
---|
1341 | self.assertEqual( |
---|
1342 | serial.load(), |
---|
1343 | second_serial.load(), |
---|
1344 | ) |
---|
1345 | |
---|
1346 | @skipIf(platform.isWindows(), "pickle test-data can't be loaded on windows") |
---|
1347 | def test_deserialize_history_pickle(self): |
---|
1348 | """ |
---|
1349 | The crawler can read existing history state from the old pickle |
---|
1350 | format |
---|
1351 | """ |
---|
1352 | # this file came from an "in the wild" tahoe version 1.16.0 |
---|
1353 | original_pickle = FilePath(__file__).parent().child("data").child("lease_checker.history.txt") |
---|
1354 | root = FilePath(self.mktemp()) |
---|
1355 | storage = root.child("storage") |
---|
1356 | storage.makedirs() |
---|
1357 | test_pickle = storage.child("lease_checker.history") |
---|
1358 | with test_pickle.open("wb") as local, original_pickle.open("rb") as remote: |
---|
1359 | local.write(remote.read()) |
---|
1360 | |
---|
1361 | # convert from pickle format to JSON |
---|
1362 | top = Options() |
---|
1363 | top.parseOptions([ |
---|
1364 | "admin", "migrate-crawler", |
---|
1365 | "--basedir", storage.parent().path, |
---|
1366 | ]) |
---|
1367 | options = top.subOptions |
---|
1368 | while hasattr(options, "subOptions"): |
---|
1369 | options = options.subOptions |
---|
1370 | options.stdout = StringIO() |
---|
1371 | migrate_crawler(options) |
---|
1372 | |
---|
1373 | serial = _HistorySerializer(test_pickle.path) |
---|
1374 | |
---|
1375 | self.maxDiff = None |
---|
1376 | self.assertEqual( |
---|
1377 | serial.load(), |
---|
1378 | { |
---|
1379 | "363": { |
---|
1380 | 'configured-expiration-mode': ['age', None, None, ['immutable', 'mutable']], |
---|
1381 | 'expiration-enabled': False, |
---|
1382 | 'leases-per-share-histogram': { |
---|
1383 | '1': 39774, |
---|
1384 | }, |
---|
1385 | 'lease-age-histogram': [ |
---|
1386 | [0, 86400, 3125], |
---|
1387 | [345600, 432000, 4175], |
---|
1388 | [950400, 1036800, 141], |
---|
1389 | [1036800, 1123200, 345], |
---|
1390 | [1123200, 1209600, 81], |
---|
1391 | [1296000, 1382400, 1832], |
---|
1392 | [1555200, 1641600, 390], |
---|
1393 | [1728000, 1814400, 12], |
---|
1394 | [2073600, 2160000, 84], |
---|
1395 | [2160000, 2246400, 228], |
---|
1396 | [2246400, 2332800, 75], |
---|
1397 | [2592000, 2678400, 644], |
---|
1398 | [2678400, 2764800, 273], |
---|
1399 | [2764800, 2851200, 94], |
---|
1400 | [2851200, 2937600, 97], |
---|
1401 | [3196800, 3283200, 143], |
---|
1402 | [3283200, 3369600, 48], |
---|
1403 | [4147200, 4233600, 374], |
---|
1404 | [4320000, 4406400, 534], |
---|
1405 | [5270400, 5356800, 1005], |
---|
1406 | [6739200, 6825600, 8704], |
---|
1407 | [6825600, 6912000, 3986], |
---|
1408 | [6912000, 6998400, 7592], |
---|
1409 | [6998400, 7084800, 2607], |
---|
1410 | [7689600, 7776000, 35], |
---|
1411 | [8035200, 8121600, 33], |
---|
1412 | [8294400, 8380800, 54], |
---|
1413 | [8640000, 8726400, 45], |
---|
1414 | [8726400, 8812800, 27], |
---|
1415 | [8812800, 8899200, 12], |
---|
1416 | [9763200, 9849600, 77], |
---|
1417 | [9849600, 9936000, 91], |
---|
1418 | [9936000, 10022400, 1210], |
---|
1419 | [10022400, 10108800, 45], |
---|
1420 | [10108800, 10195200, 186], |
---|
1421 | [10368000, 10454400, 113], |
---|
1422 | [10972800, 11059200, 21], |
---|
1423 | [11232000, 11318400, 5], |
---|
1424 | [11318400, 11404800, 19], |
---|
1425 | [11404800, 11491200, 238], |
---|
1426 | [11491200, 11577600, 159], |
---|
1427 | [11750400, 11836800, 1], |
---|
1428 | [11836800, 11923200, 32], |
---|
1429 | [11923200, 12009600, 192], |
---|
1430 | [12009600, 12096000, 222], |
---|
1431 | [12096000, 12182400, 18], |
---|
1432 | [12182400, 12268800, 224], |
---|
1433 | [12268800, 12355200, 9], |
---|
1434 | [12355200, 12441600, 9], |
---|
1435 | [12441600, 12528000, 10], |
---|
1436 | [12528000, 12614400, 6], |
---|
1437 | [12614400, 12700800, 6], |
---|
1438 | [12700800, 12787200, 18], |
---|
1439 | [12787200, 12873600, 6], |
---|
1440 | [12873600, 12960000, 62], |
---|
1441 | ], |
---|
1442 | 'cycle-start-finish-times': [1634446505.241972, 1634446666.055401], |
---|
1443 | 'space-recovered': { |
---|
1444 | 'examined-buckets-immutable': 17896, |
---|
1445 | 'configured-buckets-mutable': 0, |
---|
1446 | 'examined-shares-mutable': 2473, |
---|
1447 | 'original-shares-mutable': 1185, |
---|
1448 | 'configured-buckets-immutable': 0, |
---|
1449 | 'original-shares-immutable': 27457, |
---|
1450 | 'original-diskbytes-immutable': 2810982400, |
---|
1451 | 'examined-shares-immutable': 37301, |
---|
1452 | 'original-buckets': 14047, |
---|
1453 | 'actual-shares-immutable': 0, |
---|
1454 | 'configured-shares': 0, |
---|
1455 | 'original-buckets-mutable': 691, |
---|
1456 | 'actual-diskbytes': 4096, |
---|
1457 | 'actual-shares-mutable': 0, |
---|
1458 | 'configured-buckets': 1, |
---|
1459 | 'examined-buckets-unknown': 14, |
---|
1460 | 'actual-sharebytes': 0, |
---|
1461 | 'original-shares': 28642, |
---|
1462 | 'actual-buckets-immutable': 0, |
---|
1463 | 'original-sharebytes': 2695552941, |
---|
1464 | 'examined-sharebytes-immutable': 2754798505, |
---|
1465 | 'actual-shares': 0, |
---|
1466 | 'actual-sharebytes-immutable': 0, |
---|
1467 | 'original-diskbytes': 2818981888, |
---|
1468 | 'configured-diskbytes-mutable': 0, |
---|
1469 | 'configured-sharebytes-immutable': 0, |
---|
1470 | 'configured-shares-mutable': 0, |
---|
1471 | 'actual-diskbytes-immutable': 0, |
---|
1472 | 'configured-diskbytes-immutable': 0, |
---|
1473 | 'original-diskbytes-mutable': 7995392, |
---|
1474 | 'actual-sharebytes-mutable': 0, |
---|
1475 | 'configured-sharebytes': 0, |
---|
1476 | 'examined-shares': 39774, |
---|
1477 | 'actual-diskbytes-mutable': 0, |
---|
1478 | 'actual-buckets': 1, |
---|
1479 | 'original-buckets-immutable': 13355, |
---|
1480 | 'configured-sharebytes-mutable': 0, |
---|
1481 | 'examined-sharebytes': 2763646972, |
---|
1482 | 'original-sharebytes-immutable': 2692076909, |
---|
1483 | 'original-sharebytes-mutable': 3476032, |
---|
1484 | 'actual-buckets-mutable': 0, |
---|
1485 | 'examined-buckets-mutable': 1286, |
---|
1486 | 'configured-shares-immutable': 0, |
---|
1487 | 'examined-diskbytes': 2854801408, |
---|
1488 | 'examined-diskbytes-mutable': 12161024, |
---|
1489 | 'examined-sharebytes-mutable': 8848467, |
---|
1490 | 'examined-buckets': 19197, |
---|
1491 | 'configured-diskbytes': 4096, |
---|
1492 | 'examined-diskbytes-immutable': 2842640384 |
---|
1493 | }, |
---|
1494 | 'corrupt-shares': [ |
---|
1495 | ['2dn6xnlnsqwtnapwxfdivpm3s4', 3], |
---|
1496 | ['2dn6xnlnsqwtnapwxfdivpm3s4', 0], |
---|
1497 | ['2rrzthwsrrxolevmwdvbdy3rqi', 3], |
---|
1498 | ['2rrzthwsrrxolevmwdvbdy3rqi', 0], |
---|
1499 | ['2skfngcto6h7eqmn4uo7ntk3ne', 3], |
---|
1500 | ['2skfngcto6h7eqmn4uo7ntk3ne', 0], |
---|
1501 | ['32d5swqpqx2mwix7xmqzvhdwje', 3], |
---|
1502 | ['32d5swqpqx2mwix7xmqzvhdwje', 0], |
---|
1503 | ['5mmayp66yflmpon3o6unsnbaca', 3], |
---|
1504 | ['5mmayp66yflmpon3o6unsnbaca', 0], |
---|
1505 | ['6ixhpvbtre7fnrl6pehlrlflc4', 3], |
---|
1506 | ['6ixhpvbtre7fnrl6pehlrlflc4', 0], |
---|
1507 | ['ewzhvswjsz4vp2bqkb6mi3bz2u', 3], |
---|
1508 | ['ewzhvswjsz4vp2bqkb6mi3bz2u', 0], |
---|
1509 | ['fu7pazf6ogavkqj6z4q5qqex3u', 3], |
---|
1510 | ['fu7pazf6ogavkqj6z4q5qqex3u', 0], |
---|
1511 | ['hbyjtqvpcimwxiyqbcbbdn2i4a', 3], |
---|
1512 | ['hbyjtqvpcimwxiyqbcbbdn2i4a', 0], |
---|
1513 | ['pmcjbdkbjdl26k3e6yja77femq', 3], |
---|
1514 | ['pmcjbdkbjdl26k3e6yja77femq', 0], |
---|
1515 | ['r6swof4v2uttbiiqwj5pi32cm4', 3], |
---|
1516 | ['r6swof4v2uttbiiqwj5pi32cm4', 0], |
---|
1517 | ['t45v5akoktf53evc2fi6gwnv6y', 3], |
---|
1518 | ['t45v5akoktf53evc2fi6gwnv6y', 0], |
---|
1519 | ['y6zb4faar3rdvn3e6pfg4wlotm', 3], |
---|
1520 | ['y6zb4faar3rdvn3e6pfg4wlotm', 0], |
---|
1521 | ['z3yghutvqoqbchjao4lndnrh3a', 3], |
---|
1522 | ['z3yghutvqoqbchjao4lndnrh3a', 0], |
---|
1523 | ] |
---|
1524 | } |
---|
1525 | } |
---|
1526 | ) |
---|
1527 | |
---|
1528 | |
---|
1529 | class WebStatus(unittest.TestCase, pollmixin.PollMixin): |
---|
1530 | |
---|
1531 | def setUp(self): |
---|
1532 | self.s = service.MultiService() |
---|
1533 | self.s.startService() |
---|
1534 | def tearDown(self): |
---|
1535 | return self.s.stopService() |
---|
1536 | |
---|
1537 | def test_no_server(self): |
---|
1538 | w = StorageStatus(None) |
---|
1539 | html = renderSynchronously(w) |
---|
1540 | self.failUnlessIn(b"<h1>No Storage Server Running</h1>", html) |
---|
1541 | |
---|
1542 | def test_status(self): |
---|
1543 | basedir = "storage/WebStatus/status" |
---|
1544 | fileutil.make_dirs(basedir) |
---|
1545 | nodeid = b"\x00" * 20 |
---|
1546 | ss = StorageServer(basedir, nodeid) |
---|
1547 | ss.setServiceParent(self.s) |
---|
1548 | w = StorageStatus(ss, "nickname") |
---|
1549 | d = renderDeferred(w) |
---|
1550 | def _check_html(html): |
---|
1551 | self.failUnlessIn(b"<h1>Storage Server Status</h1>", html) |
---|
1552 | s = remove_tags(html) |
---|
1553 | self.failUnlessIn(b"Server Nickname: nickname", s) |
---|
1554 | self.failUnlessIn(b"Server Nodeid: %s" % base32.b2a(nodeid), s) |
---|
1555 | self.failUnlessIn(b"Accepting new shares: Yes", s) |
---|
1556 | self.failUnlessIn(b"Reserved space: - 0 B (0)", s) |
---|
1557 | d.addCallback(_check_html) |
---|
1558 | d.addCallback(lambda ign: renderJSON(w)) |
---|
1559 | def _check_json(raw): |
---|
1560 | data = json.loads(raw) |
---|
1561 | s = data["stats"] |
---|
1562 | self.failUnlessEqual(s["storage_server.accepting_immutable_shares"], 1) |
---|
1563 | self.failUnlessEqual(s["storage_server.reserved_space"], 0) |
---|
1564 | self.failUnlessIn("bucket-counter", data) |
---|
1565 | self.failUnlessIn("lease-checker", data) |
---|
1566 | d.addCallback(_check_json) |
---|
1567 | return d |
---|
1568 | |
---|
1569 | |
---|
1570 | def test_status_no_disk_stats(self): |
---|
1571 | def call_get_disk_stats(whichdir, reserved_space=0): |
---|
1572 | raise AttributeError() |
---|
1573 | self.patch(fileutil, 'get_disk_stats', call_get_disk_stats) |
---|
1574 | |
---|
1575 | # Some platforms may have no disk stats API. Make sure the code can handle that |
---|
1576 | # (test runs on all platforms). |
---|
1577 | basedir = "storage/WebStatus/status_no_disk_stats" |
---|
1578 | fileutil.make_dirs(basedir) |
---|
1579 | ss = StorageServer(basedir, b"\x00" * 20) |
---|
1580 | ss.setServiceParent(self.s) |
---|
1581 | w = StorageStatus(ss) |
---|
1582 | html = renderSynchronously(w) |
---|
1583 | self.failUnlessIn(b"<h1>Storage Server Status</h1>", html) |
---|
1584 | s = remove_tags(html) |
---|
1585 | self.failUnlessIn(b"Accepting new shares: Yes", s) |
---|
1586 | self.failUnlessIn(b"Total disk space: ?", s) |
---|
1587 | self.failUnlessIn(b"Space Available to Tahoe: ?", s) |
---|
1588 | self.failUnless(ss.get_available_space() is None) |
---|
1589 | |
---|
1590 | def test_status_bad_disk_stats(self): |
---|
1591 | def call_get_disk_stats(whichdir, reserved_space=0): |
---|
1592 | raise OSError() |
---|
1593 | self.patch(fileutil, 'get_disk_stats', call_get_disk_stats) |
---|
1594 | |
---|
1595 | # If the API to get disk stats exists but a call to it fails, then the status should |
---|
1596 | # show that no shares will be accepted, and get_available_space() should be 0. |
---|
1597 | basedir = "storage/WebStatus/status_bad_disk_stats" |
---|
1598 | fileutil.make_dirs(basedir) |
---|
1599 | ss = StorageServer(basedir, b"\x00" * 20) |
---|
1600 | ss.setServiceParent(self.s) |
---|
1601 | w = StorageStatus(ss) |
---|
1602 | html = renderSynchronously(w) |
---|
1603 | self.failUnlessIn(b"<h1>Storage Server Status</h1>", html) |
---|
1604 | s = remove_tags(html) |
---|
1605 | self.failUnlessIn(b"Accepting new shares: No", s) |
---|
1606 | self.failUnlessIn(b"Total disk space: ?", s) |
---|
1607 | self.failUnlessIn(b"Space Available to Tahoe: ?", s) |
---|
1608 | self.failUnlessEqual(ss.get_available_space(), 0) |
---|
1609 | |
---|
1610 | def test_status_right_disk_stats(self): |
---|
1611 | GB = 1000000000 |
---|
1612 | total = 5*GB |
---|
1613 | free_for_root = 4*GB |
---|
1614 | free_for_nonroot = 3*GB |
---|
1615 | reserved = 1*GB |
---|
1616 | |
---|
1617 | basedir = "storage/WebStatus/status_right_disk_stats" |
---|
1618 | fileutil.make_dirs(basedir) |
---|
1619 | ss = StorageServer(basedir, b"\x00" * 20, reserved_space=reserved) |
---|
1620 | expecteddir = ss.sharedir |
---|
1621 | |
---|
1622 | def call_get_disk_stats(whichdir, reserved_space=0): |
---|
1623 | self.failUnlessEqual(whichdir, expecteddir) |
---|
1624 | self.failUnlessEqual(reserved_space, reserved) |
---|
1625 | used = total - free_for_root |
---|
1626 | avail = max(free_for_nonroot - reserved_space, 0) |
---|
1627 | return { |
---|
1628 | 'total': total, |
---|
1629 | 'free_for_root': free_for_root, |
---|
1630 | 'free_for_nonroot': free_for_nonroot, |
---|
1631 | 'used': used, |
---|
1632 | 'avail': avail, |
---|
1633 | } |
---|
1634 | self.patch(fileutil, 'get_disk_stats', call_get_disk_stats) |
---|
1635 | |
---|
1636 | ss.setServiceParent(self.s) |
---|
1637 | w = StorageStatus(ss) |
---|
1638 | html = renderSynchronously(w) |
---|
1639 | |
---|
1640 | self.failUnlessIn(b"<h1>Storage Server Status</h1>", html) |
---|
1641 | s = remove_tags(html) |
---|
1642 | self.failUnlessIn(b"Total disk space: 5.00 GB", s) |
---|
1643 | self.failUnlessIn(b"Disk space used: - 1.00 GB", s) |
---|
1644 | self.failUnlessIn(b"Disk space free (root): 4.00 GB", s) |
---|
1645 | self.failUnlessIn(b"Disk space free (non-root): 3.00 GB", s) |
---|
1646 | self.failUnlessIn(b"Reserved space: - 1.00 GB", s) |
---|
1647 | self.failUnlessIn(b"Space Available to Tahoe: 2.00 GB", s) |
---|
1648 | self.failUnlessEqual(ss.get_available_space(), 2*GB) |
---|
1649 | |
---|
1650 | def test_readonly(self): |
---|
1651 | basedir = "storage/WebStatus/readonly" |
---|
1652 | fileutil.make_dirs(basedir) |
---|
1653 | ss = StorageServer(basedir, b"\x00" * 20, readonly_storage=True) |
---|
1654 | ss.setServiceParent(self.s) |
---|
1655 | w = StorageStatus(ss) |
---|
1656 | html = renderSynchronously(w) |
---|
1657 | self.failUnlessIn(b"<h1>Storage Server Status</h1>", html) |
---|
1658 | s = remove_tags(html) |
---|
1659 | self.failUnlessIn(b"Accepting new shares: No", s) |
---|
1660 | |
---|
1661 | def test_reserved(self): |
---|
1662 | basedir = "storage/WebStatus/reserved" |
---|
1663 | fileutil.make_dirs(basedir) |
---|
1664 | ss = StorageServer(basedir, b"\x00" * 20, reserved_space=10e6) |
---|
1665 | ss.setServiceParent(self.s) |
---|
1666 | w = StorageStatus(ss) |
---|
1667 | html = renderSynchronously(w) |
---|
1668 | self.failUnlessIn(b"<h1>Storage Server Status</h1>", html) |
---|
1669 | s = remove_tags(html) |
---|
1670 | self.failUnlessIn(b"Reserved space: - 10.00 MB (10000000)", s) |
---|
1671 | |
---|
1672 | def test_huge_reserved(self): |
---|
1673 | basedir = "storage/WebStatus/reserved" |
---|
1674 | fileutil.make_dirs(basedir) |
---|
1675 | ss = StorageServer(basedir, b"\x00" * 20, reserved_space=10e6) |
---|
1676 | ss.setServiceParent(self.s) |
---|
1677 | w = StorageStatus(ss) |
---|
1678 | html = renderSynchronously(w) |
---|
1679 | self.failUnlessIn(b"<h1>Storage Server Status</h1>", html) |
---|
1680 | s = remove_tags(html) |
---|
1681 | self.failUnlessIn(b"Reserved space: - 10.00 MB (10000000)", s) |
---|
1682 | |
---|
1683 | def test_util(self): |
---|
1684 | w = StorageStatusElement(None, None) |
---|
1685 | self.failUnlessEqual(w.render_space(None), "?") |
---|
1686 | self.failUnlessEqual(w.render_space(10e6), "10000000") |
---|
1687 | self.failUnlessEqual(w.render_abbrev_space(None), "?") |
---|
1688 | self.failUnlessEqual(w.render_abbrev_space(10e6), "10.00 MB") |
---|
1689 | self.failUnlessEqual(remove_prefix("foo.bar", "foo."), "bar") |
---|
1690 | self.failUnlessEqual(remove_prefix("foo.bar", "baz."), None) |
---|