Changeset fe62573 in trunk


Ignore:
Timestamp:
2020-08-20T16:59:42Z (5 years ago)
Author:
GitHub <noreply@…>
Branches:
master
Children:
356a5a32, 46b498f9, f227b1b
Parents:
6969872a (diff), ed2f6bf (diff)
Note: this is a merge changeset, the changes displayed below correspond to the merge itself.
Use the (diff) links above to see all the changes relative to each parent.
git-author:
Itamar Turner-Trauring <itamar@…> (2020-08-20 16:59:42)
git-committer:
GitHub <noreply@…> (2020-08-20 16:59:42)
Message:

Merge pull request #782 from tahoe-lafs/3383.storage-tests-run-py-3

Make storage tests run (not pass, run) on Python 3

Files:
2 added
3 edited

Legend:

Unmodified
Added
Removed
  • TabularUnified src/allmydata/interfaces.py

    r6969872a rfe62573  
     1
     2from past.builtins import long
    13
    24from zope.interface import Interface, Attribute
  • TabularUnified src/allmydata/storage/crawler.py

    r6969872a rfe62573  
    11
    22import os, time, struct
    3 import cPickle as pickle
     3try:
     4    import cPickle as pickle
     5except ImportError:
     6    import pickle
    47from twisted.internet import reactor
    58from twisted.application import service
  • TabularUnified src/allmydata/test/test_storage.py

    r6969872a rfe62573  
    33import platform
    44import stat
    5 import re
    6 import json
    75import struct
    86import shutil
     
    1210
    1311from twisted.internet import defer
    14 from twisted.application import service
    15 from twisted.web.template import flattenString
    16 
    17 # We need to use `nevow.inevow.IRequest` for now for compatibility
    18 # with the code in web/common.py.  Once nevow bits are gone from
    19 # web/common.py, we can use `twisted.web.iweb.IRequest` here.
    20 from nevow.inevow import IRequest
    21 
    22 from twisted.web.server import Request
    23 from twisted.web.test.requesthelper import DummyChannel
    24 from zope.interface import implementer
    25 
    26 from foolscap.api import fireEventually
     12
    2713import itertools
    2814from allmydata import interfaces
    29 from allmydata.util import fileutil, hashutil, base32, pollmixin
     15from allmydata.util import fileutil, hashutil, base32
    3016from allmydata.storage.server import StorageServer
    3117from allmydata.storage.mutable import MutableShareFile
     
    3420     UnknownMutableContainerVersionError, UnknownImmutableContainerVersionError
    3521from allmydata.storage.lease import LeaseInfo
    36 from allmydata.storage.crawler import BucketCountingCrawler
    37 from allmydata.storage.expirer import LeaseCheckingCrawler
    3822from allmydata.immutable.layout import WriteBucketProxy, WriteBucketProxy_v2, \
    3923     ReadBucketProxy
     
    4933from allmydata.test.common import LoggingServiceParent, ShouldFailMixin
    5034from allmydata.test.no_network import NoNetworkServer
    51 from allmydata.web.storage import (
    52     StorageStatus,
    53     StorageStatusElement,
    54     remove_prefix
    55 )
    5635from allmydata.storage_client import (
    5736    _StorageServer,
     
    29812960        self.failUnless(output["get"]["99_0_percentile"] is None, output)
    29822961        self.failUnless(output["get"]["99_9_percentile"] is None, output)
    2983 
    2984 def remove_tags(s):
    2985     s = re.sub(r'<[^>]*>', ' ', s)
    2986     s = re.sub(r'\s+', ' ', s)
    2987     return s
    2988 
    2989 def renderSynchronously(ss):
    2990     """
    2991     Return fully rendered HTML document.
    2992 
    2993     :param _StorageStatus ss: a StorageStatus instance.
    2994     """
    2995     return unittest.TestCase().successResultOf(renderDeferred(ss))
    2996 
    2997 def renderDeferred(ss):
    2998     """
    2999     Return a `Deferred` HTML renderer.
    3000 
    3001     :param _StorageStatus ss: a StorageStatus instance.
    3002     """
    3003     elem = StorageStatusElement(ss._storage, ss._nickname)
    3004     return flattenString(None, elem)
    3005 
    3006 def renderJSON(resource):
    3007     """Render a JSON from the given resource."""
    3008 
    3009     @implementer(IRequest)
    3010     class JSONRequest(Request):
    3011         """
    3012         A Request with t=json argument added to it.  This is useful to
    3013         invoke a Resouce.render_JSON() method.
    3014         """
    3015         def __init__(self):
    3016             Request.__init__(self, DummyChannel())
    3017             self.args = {"t": ["json"]}
    3018             self.fields = {}
    3019 
    3020     return resource.render(JSONRequest())
    3021 
    3022 class MyBucketCountingCrawler(BucketCountingCrawler):
    3023     def finished_prefix(self, cycle, prefix):
    3024         BucketCountingCrawler.finished_prefix(self, cycle, prefix)
    3025         if self.hook_ds:
    3026             d = self.hook_ds.pop(0)
    3027             d.callback(None)
    3028 
    3029 class MyStorageServer(StorageServer):
    3030     def add_bucket_counter(self):
    3031         statefile = os.path.join(self.storedir, "bucket_counter.state")
    3032         self.bucket_counter = MyBucketCountingCrawler(self, statefile)
    3033         self.bucket_counter.setServiceParent(self)
    3034 
    3035 class BucketCounter(unittest.TestCase, pollmixin.PollMixin):
    3036 
    3037     def setUp(self):
    3038         self.s = service.MultiService()
    3039         self.s.startService()
    3040     def tearDown(self):
    3041         return self.s.stopService()
    3042 
    3043     def test_bucket_counter(self):
    3044         basedir = "storage/BucketCounter/bucket_counter"
    3045         fileutil.make_dirs(basedir)
    3046         ss = StorageServer(basedir, "\x00" * 20)
    3047         # to make sure we capture the bucket-counting-crawler in the middle
    3048         # of a cycle, we reach in and reduce its maximum slice time to 0. We
    3049         # also make it start sooner than usual.
    3050         ss.bucket_counter.slow_start = 0
    3051         orig_cpu_slice = ss.bucket_counter.cpu_slice
    3052         ss.bucket_counter.cpu_slice = 0
    3053         ss.setServiceParent(self.s)
    3054 
    3055         w = StorageStatus(ss)
    3056 
    3057         # this sample is before the crawler has started doing anything
    3058         html = renderSynchronously(w)
    3059         self.failUnlessIn("<h1>Storage Server Status</h1>", html)
    3060         s = remove_tags(html)
    3061         self.failUnlessIn("Accepting new shares: Yes", s)
    3062         self.failUnlessIn("Reserved space: - 0 B (0)", s)
    3063         self.failUnlessIn("Total buckets: Not computed yet", s)
    3064         self.failUnlessIn("Next crawl in", s)
    3065 
    3066         # give the bucket-counting-crawler one tick to get started. The
    3067         # cpu_slice=0 will force it to yield right after it processes the
    3068         # first prefix
    3069 
    3070         d = fireEventually()
    3071         def _check(ignored):
    3072             # are we really right after the first prefix?
    3073             state = ss.bucket_counter.get_state()
    3074             if state["last-complete-prefix"] is None:
    3075                 d2 = fireEventually()
    3076                 d2.addCallback(_check)
    3077                 return d2
    3078             self.failUnlessEqual(state["last-complete-prefix"],
    3079                                  ss.bucket_counter.prefixes[0])
    3080             ss.bucket_counter.cpu_slice = 100.0 # finish as fast as possible
    3081             html = renderSynchronously(w)
    3082             s = remove_tags(html)
    3083             self.failUnlessIn(" Current crawl ", s)
    3084             self.failUnlessIn(" (next work in ", s)
    3085         d.addCallback(_check)
    3086 
    3087         # now give it enough time to complete a full cycle
    3088         def _watch():
    3089             return not ss.bucket_counter.get_progress()["cycle-in-progress"]
    3090         d.addCallback(lambda ignored: self.poll(_watch))
    3091         def _check2(ignored):
    3092             ss.bucket_counter.cpu_slice = orig_cpu_slice
    3093             html = renderSynchronously(w)
    3094             s = remove_tags(html)
    3095             self.failUnlessIn("Total buckets: 0 (the number of", s)
    3096             self.failUnless("Next crawl in 59 minutes" in s or "Next crawl in 60 minutes" in s, s)
    3097         d.addCallback(_check2)
    3098         return d
    3099 
    3100     def test_bucket_counter_cleanup(self):
    3101         basedir = "storage/BucketCounter/bucket_counter_cleanup"
    3102         fileutil.make_dirs(basedir)
    3103         ss = StorageServer(basedir, "\x00" * 20)
    3104         # to make sure we capture the bucket-counting-crawler in the middle
    3105         # of a cycle, we reach in and reduce its maximum slice time to 0.
    3106         ss.bucket_counter.slow_start = 0
    3107         orig_cpu_slice = ss.bucket_counter.cpu_slice
    3108         ss.bucket_counter.cpu_slice = 0
    3109         ss.setServiceParent(self.s)
    3110 
    3111         d = fireEventually()
    3112 
    3113         def _after_first_prefix(ignored):
    3114             state = ss.bucket_counter.state
    3115             if state["last-complete-prefix"] is None:
    3116                 d2 = fireEventually()
    3117                 d2.addCallback(_after_first_prefix)
    3118                 return d2
    3119             ss.bucket_counter.cpu_slice = 100.0 # finish as fast as possible
    3120             # now sneak in and mess with its state, to make sure it cleans up
    3121             # properly at the end of the cycle
    3122             self.failUnlessEqual(state["last-complete-prefix"],
    3123                                  ss.bucket_counter.prefixes[0])
    3124             state["bucket-counts"][-12] = {}
    3125             state["storage-index-samples"]["bogusprefix!"] = (-12, [])
    3126             ss.bucket_counter.save_state()
    3127         d.addCallback(_after_first_prefix)
    3128 
    3129         # now give it enough time to complete a cycle
    3130         def _watch():
    3131             return not ss.bucket_counter.get_progress()["cycle-in-progress"]
    3132         d.addCallback(lambda ignored: self.poll(_watch))
    3133         def _check2(ignored):
    3134             ss.bucket_counter.cpu_slice = orig_cpu_slice
    3135             s = ss.bucket_counter.get_state()
    3136             self.failIf(-12 in s["bucket-counts"], s["bucket-counts"].keys())
    3137             self.failIf("bogusprefix!" in s["storage-index-samples"],
    3138                         s["storage-index-samples"].keys())
    3139         d.addCallback(_check2)
    3140         return d
    3141 
    3142     def test_bucket_counter_eta(self):
    3143         basedir = "storage/BucketCounter/bucket_counter_eta"
    3144         fileutil.make_dirs(basedir)
    3145         ss = MyStorageServer(basedir, "\x00" * 20)
    3146         ss.bucket_counter.slow_start = 0
    3147         # these will be fired inside finished_prefix()
    3148         hooks = ss.bucket_counter.hook_ds = [defer.Deferred() for i in range(3)]
    3149         w = StorageStatus(ss)
    3150 
    3151         d = defer.Deferred()
    3152 
    3153         def _check_1(ignored):
    3154             # no ETA is available yet
    3155             html = renderSynchronously(w)
    3156             s = remove_tags(html)
    3157             self.failUnlessIn("complete (next work", s)
    3158 
    3159         def _check_2(ignored):
    3160             # one prefix has finished, so an ETA based upon that elapsed time
    3161             # should be available.
    3162             html = renderSynchronously(w)
    3163             s = remove_tags(html)
    3164             self.failUnlessIn("complete (ETA ", s)
    3165 
    3166         def _check_3(ignored):
    3167             # two prefixes have finished
    3168             html = renderSynchronously(w)
    3169             s = remove_tags(html)
    3170             self.failUnlessIn("complete (ETA ", s)
    3171             d.callback("done")
    3172 
    3173         hooks[0].addCallback(_check_1).addErrback(d.errback)
    3174         hooks[1].addCallback(_check_2).addErrback(d.errback)
    3175         hooks[2].addCallback(_check_3).addErrback(d.errback)
    3176 
    3177         ss.setServiceParent(self.s)
    3178         return d
    3179 
    3180 class InstrumentedLeaseCheckingCrawler(LeaseCheckingCrawler):
    3181     stop_after_first_bucket = False
    3182     def process_bucket(self, *args, **kwargs):
    3183         LeaseCheckingCrawler.process_bucket(self, *args, **kwargs)
    3184         if self.stop_after_first_bucket:
    3185             self.stop_after_first_bucket = False
    3186             self.cpu_slice = -1.0
    3187     def yielding(self, sleep_time):
    3188         if not self.stop_after_first_bucket:
    3189             self.cpu_slice = 500
    3190 
    3191 class BrokenStatResults(object):
    3192     pass
    3193 
    3194 class No_ST_BLOCKS_LeaseCheckingCrawler(LeaseCheckingCrawler):
    3195     def stat(self, fn):
    3196         s = os.stat(fn)
    3197         bsr = BrokenStatResults()
    3198         for attrname in dir(s):
    3199             if attrname.startswith("_"):
    3200                 continue
    3201             if attrname == "st_blocks":
    3202                 continue
    3203             setattr(bsr, attrname, getattr(s, attrname))
    3204         return bsr
    3205 
    3206 class InstrumentedStorageServer(StorageServer):
    3207     LeaseCheckerClass = InstrumentedLeaseCheckingCrawler
    3208 class No_ST_BLOCKS_StorageServer(StorageServer):
    3209     LeaseCheckerClass = No_ST_BLOCKS_LeaseCheckingCrawler
    3210 
    3211 class LeaseCrawler(unittest.TestCase, pollmixin.PollMixin):
    3212 
    3213     def setUp(self):
    3214         self.s = service.MultiService()
    3215         self.s.startService()
    3216     def tearDown(self):
    3217         return self.s.stopService()
    3218 
    3219     def make_shares(self, ss):
    3220         def make(si):
    3221             return (si, hashutil.tagged_hash("renew", si),
    3222                     hashutil.tagged_hash("cancel", si))
    3223         def make_mutable(si):
    3224             return (si, hashutil.tagged_hash("renew", si),
    3225                     hashutil.tagged_hash("cancel", si),
    3226                     hashutil.tagged_hash("write-enabler", si))
    3227         def make_extra_lease(si, num):
    3228             return (hashutil.tagged_hash("renew-%d" % num, si),
    3229                     hashutil.tagged_hash("cancel-%d" % num, si))
    3230 
    3231         immutable_si_0, rs0, cs0 = make("\x00" * 16)
    3232         immutable_si_1, rs1, cs1 = make("\x01" * 16)
    3233         rs1a, cs1a = make_extra_lease(immutable_si_1, 1)
    3234         mutable_si_2, rs2, cs2, we2 = make_mutable("\x02" * 16)
    3235         mutable_si_3, rs3, cs3, we3 = make_mutable("\x03" * 16)
    3236         rs3a, cs3a = make_extra_lease(mutable_si_3, 1)
    3237         sharenums = [0]
    3238         canary = FakeCanary()
    3239         # note: 'tahoe debug dump-share' will not handle this file, since the
    3240         # inner contents are not a valid CHK share
    3241         data = "\xff" * 1000
    3242 
    3243         a,w = ss.remote_allocate_buckets(immutable_si_0, rs0, cs0, sharenums,
    3244                                          1000, canary)
    3245         w[0].remote_write(0, data)
    3246         w[0].remote_close()
    3247 
    3248         a,w = ss.remote_allocate_buckets(immutable_si_1, rs1, cs1, sharenums,
    3249                                          1000, canary)
    3250         w[0].remote_write(0, data)
    3251         w[0].remote_close()
    3252         ss.remote_add_lease(immutable_si_1, rs1a, cs1a)
    3253 
    3254         writev = ss.remote_slot_testv_and_readv_and_writev
    3255         writev(mutable_si_2, (we2, rs2, cs2),
    3256                {0: ([], [(0,data)], len(data))}, [])
    3257         writev(mutable_si_3, (we3, rs3, cs3),
    3258                {0: ([], [(0,data)], len(data))}, [])
    3259         ss.remote_add_lease(mutable_si_3, rs3a, cs3a)
    3260 
    3261         self.sis = [immutable_si_0, immutable_si_1, mutable_si_2, mutable_si_3]
    3262         self.renew_secrets = [rs0, rs1, rs1a, rs2, rs3, rs3a]
    3263         self.cancel_secrets = [cs0, cs1, cs1a, cs2, cs3, cs3a]
    3264 
    3265     def test_basic(self):
    3266         basedir = "storage/LeaseCrawler/basic"
    3267         fileutil.make_dirs(basedir)
    3268         ss = InstrumentedStorageServer(basedir, "\x00" * 20)
    3269         # make it start sooner than usual.
    3270         lc = ss.lease_checker
    3271         lc.slow_start = 0
    3272         lc.cpu_slice = 500
    3273         lc.stop_after_first_bucket = True
    3274         webstatus = StorageStatus(ss)
    3275 
    3276         # create a few shares, with some leases on them
    3277         self.make_shares(ss)
    3278         [immutable_si_0, immutable_si_1, mutable_si_2, mutable_si_3] = self.sis
    3279 
    3280         # add a non-sharefile to exercise another code path
    3281         fn = os.path.join(ss.sharedir,
    3282                           storage_index_to_dir(immutable_si_0),
    3283                           "not-a-share")
    3284         f = open(fn, "wb")
    3285         f.write("I am not a share.\n")
    3286         f.close()
    3287 
    3288         # this is before the crawl has started, so we're not in a cycle yet
    3289         initial_state = lc.get_state()
    3290         self.failIf(lc.get_progress()["cycle-in-progress"])
    3291         self.failIfIn("cycle-to-date", initial_state)
    3292         self.failIfIn("estimated-remaining-cycle", initial_state)
    3293         self.failIfIn("estimated-current-cycle", initial_state)
    3294         self.failUnlessIn("history", initial_state)
    3295         self.failUnlessEqual(initial_state["history"], {})
    3296 
    3297         ss.setServiceParent(self.s)
    3298 
    3299         DAY = 24*60*60
    3300 
    3301         d = fireEventually()
    3302 
    3303         # now examine the state right after the first bucket has been
    3304         # processed.
    3305         def _after_first_bucket(ignored):
    3306             initial_state = lc.get_state()
    3307             if "cycle-to-date" not in initial_state:
    3308                 d2 = fireEventually()
    3309                 d2.addCallback(_after_first_bucket)
    3310                 return d2
    3311             self.failUnlessIn("cycle-to-date", initial_state)
    3312             self.failUnlessIn("estimated-remaining-cycle", initial_state)
    3313             self.failUnlessIn("estimated-current-cycle", initial_state)
    3314             self.failUnlessIn("history", initial_state)
    3315             self.failUnlessEqual(initial_state["history"], {})
    3316 
    3317             so_far = initial_state["cycle-to-date"]
    3318             self.failUnlessEqual(so_far["expiration-enabled"], False)
    3319             self.failUnlessIn("configured-expiration-mode", so_far)
    3320             self.failUnlessIn("lease-age-histogram", so_far)
    3321             lah = so_far["lease-age-histogram"]
    3322             self.failUnlessEqual(type(lah), list)
    3323             self.failUnlessEqual(len(lah), 1)
    3324             self.failUnlessEqual(lah, [ (0.0, DAY, 1) ] )
    3325             self.failUnlessEqual(so_far["leases-per-share-histogram"], {1: 1})
    3326             self.failUnlessEqual(so_far["corrupt-shares"], [])
    3327             sr1 = so_far["space-recovered"]
    3328             self.failUnlessEqual(sr1["examined-buckets"], 1)
    3329             self.failUnlessEqual(sr1["examined-shares"], 1)
    3330             self.failUnlessEqual(sr1["actual-shares"], 0)
    3331             self.failUnlessEqual(sr1["configured-diskbytes"], 0)
    3332             self.failUnlessEqual(sr1["original-sharebytes"], 0)
    3333             left = initial_state["estimated-remaining-cycle"]
    3334             sr2 = left["space-recovered"]
    3335             self.failUnless(sr2["examined-buckets"] > 0, sr2["examined-buckets"])
    3336             self.failUnless(sr2["examined-shares"] > 0, sr2["examined-shares"])
    3337             self.failIfEqual(sr2["actual-shares"], None)
    3338             self.failIfEqual(sr2["configured-diskbytes"], None)
    3339             self.failIfEqual(sr2["original-sharebytes"], None)
    3340         d.addCallback(_after_first_bucket)
    3341         d.addCallback(lambda ign: renderDeferred(webstatus))
    3342         def _check_html_in_cycle(html):
    3343             s = remove_tags(html)
    3344             self.failUnlessIn("So far, this cycle has examined "
    3345                               "1 shares in 1 buckets (0 mutable / 1 immutable) ", s)
    3346             self.failUnlessIn("and has recovered: "
    3347                               "0 shares, 0 buckets (0 mutable / 0 immutable), "
    3348                               "0 B (0 B / 0 B)", s)
    3349             self.failUnlessIn("If expiration were enabled, "
    3350                               "we would have recovered: "
    3351                               "0 shares, 0 buckets (0 mutable / 0 immutable),"
    3352                               " 0 B (0 B / 0 B) by now", s)
    3353             self.failUnlessIn("and the remainder of this cycle "
    3354                               "would probably recover: "
    3355                               "0 shares, 0 buckets (0 mutable / 0 immutable),"
    3356                               " 0 B (0 B / 0 B)", s)
    3357             self.failUnlessIn("and the whole cycle would probably recover: "
    3358                               "0 shares, 0 buckets (0 mutable / 0 immutable),"
    3359                               " 0 B (0 B / 0 B)", s)
    3360             self.failUnlessIn("if we were strictly using each lease's default "
    3361                               "31-day lease lifetime", s)
    3362             self.failUnlessIn("this cycle would be expected to recover: ", s)
    3363         d.addCallback(_check_html_in_cycle)
    3364 
    3365         # wait for the crawler to finish the first cycle. Nothing should have
    3366         # been removed.
    3367         def _wait():
    3368             return bool(lc.get_state()["last-cycle-finished"] is not None)
    3369         d.addCallback(lambda ign: self.poll(_wait))
    3370 
    3371         def _after_first_cycle(ignored):
    3372             s = lc.get_state()
    3373             self.failIf("cycle-to-date" in s)
    3374             self.failIf("estimated-remaining-cycle" in s)
    3375             self.failIf("estimated-current-cycle" in s)
    3376             last = s["history"][0]
    3377             self.failUnlessIn("cycle-start-finish-times", last)
    3378             self.failUnlessEqual(type(last["cycle-start-finish-times"]), tuple)
    3379             self.failUnlessEqual(last["expiration-enabled"], False)
    3380             self.failUnlessIn("configured-expiration-mode", last)
    3381 
    3382             self.failUnlessIn("lease-age-histogram", last)
    3383             lah = last["lease-age-histogram"]
    3384             self.failUnlessEqual(type(lah), list)
    3385             self.failUnlessEqual(len(lah), 1)
    3386             self.failUnlessEqual(lah, [ (0.0, DAY, 6) ] )
    3387 
    3388             self.failUnlessEqual(last["leases-per-share-histogram"], {1: 2, 2: 2})
    3389             self.failUnlessEqual(last["corrupt-shares"], [])
    3390 
    3391             rec = last["space-recovered"]
    3392             self.failUnlessEqual(rec["examined-buckets"], 4)
    3393             self.failUnlessEqual(rec["examined-shares"], 4)
    3394             self.failUnlessEqual(rec["actual-buckets"], 0)
    3395             self.failUnlessEqual(rec["original-buckets"], 0)
    3396             self.failUnlessEqual(rec["configured-buckets"], 0)
    3397             self.failUnlessEqual(rec["actual-shares"], 0)
    3398             self.failUnlessEqual(rec["original-shares"], 0)
    3399             self.failUnlessEqual(rec["configured-shares"], 0)
    3400             self.failUnlessEqual(rec["actual-diskbytes"], 0)
    3401             self.failUnlessEqual(rec["original-diskbytes"], 0)
    3402             self.failUnlessEqual(rec["configured-diskbytes"], 0)
    3403             self.failUnlessEqual(rec["actual-sharebytes"], 0)
    3404             self.failUnlessEqual(rec["original-sharebytes"], 0)
    3405             self.failUnlessEqual(rec["configured-sharebytes"], 0)
    3406 
    3407             def _get_sharefile(si):
    3408                 return list(ss._iter_share_files(si))[0]
    3409             def count_leases(si):
    3410                 return len(list(_get_sharefile(si).get_leases()))
    3411             self.failUnlessEqual(count_leases(immutable_si_0), 1)
    3412             self.failUnlessEqual(count_leases(immutable_si_1), 2)
    3413             self.failUnlessEqual(count_leases(mutable_si_2), 1)
    3414             self.failUnlessEqual(count_leases(mutable_si_3), 2)
    3415         d.addCallback(_after_first_cycle)
    3416         d.addCallback(lambda ign: renderDeferred(webstatus))
    3417         def _check_html(html):
    3418             s = remove_tags(html)
    3419             self.failUnlessIn("recovered: 0 shares, 0 buckets "
    3420                               "(0 mutable / 0 immutable), 0 B (0 B / 0 B) ", s)
    3421             self.failUnlessIn("and saw a total of 4 shares, 4 buckets "
    3422                               "(2 mutable / 2 immutable),", s)
    3423             self.failUnlessIn("but expiration was not enabled", s)
    3424         d.addCallback(_check_html)
    3425         d.addCallback(lambda ign: renderJSON(webstatus))
    3426         def _check_json(raw):
    3427             data = json.loads(raw)
    3428             self.failUnlessIn("lease-checker", data)
    3429             self.failUnlessIn("lease-checker-progress", data)
    3430         d.addCallback(_check_json)
    3431         return d
    3432 
    3433     def backdate_lease(self, sf, renew_secret, new_expire_time):
    3434         # ShareFile.renew_lease ignores attempts to back-date a lease (i.e.
    3435         # "renew" a lease with a new_expire_time that is older than what the
    3436         # current lease has), so we have to reach inside it.
    3437         for i,lease in enumerate(sf.get_leases()):
    3438             if lease.renew_secret == renew_secret:
    3439                 lease.expiration_time = new_expire_time
    3440                 f = open(sf.home, 'rb+')
    3441                 sf._write_lease_record(f, i, lease)
    3442                 f.close()
    3443                 return
    3444         raise IndexError("unable to renew non-existent lease")
    3445 
    3446     def test_expire_age(self):
    3447         basedir = "storage/LeaseCrawler/expire_age"
    3448         fileutil.make_dirs(basedir)
    3449         # setting expiration_time to 2000 means that any lease which is more
    3450         # than 2000s old will be expired.
    3451         ss = InstrumentedStorageServer(basedir, "\x00" * 20,
    3452                                        expiration_enabled=True,
    3453                                        expiration_mode="age",
    3454                                        expiration_override_lease_duration=2000)
    3455         # make it start sooner than usual.
    3456         lc = ss.lease_checker
    3457         lc.slow_start = 0
    3458         lc.stop_after_first_bucket = True
    3459         webstatus = StorageStatus(ss)
    3460 
    3461         # create a few shares, with some leases on them
    3462         self.make_shares(ss)
    3463         [immutable_si_0, immutable_si_1, mutable_si_2, mutable_si_3] = self.sis
    3464 
    3465         def count_shares(si):
    3466             return len(list(ss._iter_share_files(si)))
    3467         def _get_sharefile(si):
    3468             return list(ss._iter_share_files(si))[0]
    3469         def count_leases(si):
    3470             return len(list(_get_sharefile(si).get_leases()))
    3471 
    3472         self.failUnlessEqual(count_shares(immutable_si_0), 1)
    3473         self.failUnlessEqual(count_leases(immutable_si_0), 1)
    3474         self.failUnlessEqual(count_shares(immutable_si_1), 1)
    3475         self.failUnlessEqual(count_leases(immutable_si_1), 2)
    3476         self.failUnlessEqual(count_shares(mutable_si_2), 1)
    3477         self.failUnlessEqual(count_leases(mutable_si_2), 1)
    3478         self.failUnlessEqual(count_shares(mutable_si_3), 1)
    3479         self.failUnlessEqual(count_leases(mutable_si_3), 2)
    3480 
    3481         # artificially crank back the expiration time on the first lease of
    3482         # each share, to make it look like it expired already (age=1000s).
    3483         # Some shares have an extra lease which is set to expire at the
    3484         # default time in 31 days from now (age=31days). We then run the
    3485         # crawler, which will expire the first lease, making some shares get
    3486         # deleted and others stay alive (with one remaining lease)
    3487         now = time.time()
    3488 
    3489         sf0 = _get_sharefile(immutable_si_0)
    3490         self.backdate_lease(sf0, self.renew_secrets[0], now - 1000)
    3491         sf0_size = os.stat(sf0.home).st_size
    3492 
    3493         # immutable_si_1 gets an extra lease
    3494         sf1 = _get_sharefile(immutable_si_1)
    3495         self.backdate_lease(sf1, self.renew_secrets[1], now - 1000)
    3496 
    3497         sf2 = _get_sharefile(mutable_si_2)
    3498         self.backdate_lease(sf2, self.renew_secrets[3], now - 1000)
    3499         sf2_size = os.stat(sf2.home).st_size
    3500 
    3501         # mutable_si_3 gets an extra lease
    3502         sf3 = _get_sharefile(mutable_si_3)
    3503         self.backdate_lease(sf3, self.renew_secrets[4], now - 1000)
    3504 
    3505         ss.setServiceParent(self.s)
    3506 
    3507         d = fireEventually()
    3508         # examine the state right after the first bucket has been processed
    3509         def _after_first_bucket(ignored):
    3510             p = lc.get_progress()
    3511             if not p["cycle-in-progress"]:
    3512                 d2 = fireEventually()
    3513                 d2.addCallback(_after_first_bucket)
    3514                 return d2
    3515         d.addCallback(_after_first_bucket)
    3516         d.addCallback(lambda ign: renderDeferred(webstatus))
    3517         def _check_html_in_cycle(html):
    3518             s = remove_tags(html)
    3519             # the first bucket encountered gets deleted, and its prefix
    3520             # happens to be about 1/5th of the way through the ring, so the
    3521             # predictor thinks we'll have 5 shares and that we'll delete them
    3522             # all. This part of the test depends upon the SIs landing right
    3523             # where they do now.
    3524             self.failUnlessIn("The remainder of this cycle is expected to "
    3525                               "recover: 4 shares, 4 buckets", s)
    3526             self.failUnlessIn("The whole cycle is expected to examine "
    3527                               "5 shares in 5 buckets and to recover: "
    3528                               "5 shares, 5 buckets", s)
    3529         d.addCallback(_check_html_in_cycle)
    3530 
    3531         # wait for the crawler to finish the first cycle. Two shares should
    3532         # have been removed
    3533         def _wait():
    3534             return bool(lc.get_state()["last-cycle-finished"] is not None)
    3535         d.addCallback(lambda ign: self.poll(_wait))
    3536 
    3537         def _after_first_cycle(ignored):
    3538             self.failUnlessEqual(count_shares(immutable_si_0), 0)
    3539             self.failUnlessEqual(count_shares(immutable_si_1), 1)
    3540             self.failUnlessEqual(count_leases(immutable_si_1), 1)
    3541             self.failUnlessEqual(count_shares(mutable_si_2), 0)
    3542             self.failUnlessEqual(count_shares(mutable_si_3), 1)
    3543             self.failUnlessEqual(count_leases(mutable_si_3), 1)
    3544 
    3545             s = lc.get_state()
    3546             last = s["history"][0]
    3547 
    3548             self.failUnlessEqual(last["expiration-enabled"], True)
    3549             self.failUnlessEqual(last["configured-expiration-mode"],
    3550                                  ("age", 2000, None, ("mutable", "immutable")))
    3551             self.failUnlessEqual(last["leases-per-share-histogram"], {1: 2, 2: 2})
    3552 
    3553             rec = last["space-recovered"]
    3554             self.failUnlessEqual(rec["examined-buckets"], 4)
    3555             self.failUnlessEqual(rec["examined-shares"], 4)
    3556             self.failUnlessEqual(rec["actual-buckets"], 2)
    3557             self.failUnlessEqual(rec["original-buckets"], 2)
    3558             self.failUnlessEqual(rec["configured-buckets"], 2)
    3559             self.failUnlessEqual(rec["actual-shares"], 2)
    3560             self.failUnlessEqual(rec["original-shares"], 2)
    3561             self.failUnlessEqual(rec["configured-shares"], 2)
    3562             size = sf0_size + sf2_size
    3563             self.failUnlessEqual(rec["actual-sharebytes"], size)
    3564             self.failUnlessEqual(rec["original-sharebytes"], size)
    3565             self.failUnlessEqual(rec["configured-sharebytes"], size)
    3566             # different platforms have different notions of "blocks used by
    3567             # this file", so merely assert that it's a number
    3568             self.failUnless(rec["actual-diskbytes"] >= 0,
    3569                             rec["actual-diskbytes"])
    3570             self.failUnless(rec["original-diskbytes"] >= 0,
    3571                             rec["original-diskbytes"])
    3572             self.failUnless(rec["configured-diskbytes"] >= 0,
    3573                             rec["configured-diskbytes"])
    3574         d.addCallback(_after_first_cycle)
    3575         d.addCallback(lambda ign: renderDeferred(webstatus))
    3576         def _check_html(html):
    3577             s = remove_tags(html)
    3578             self.failUnlessIn("Expiration Enabled: expired leases will be removed", s)
    3579             self.failUnlessIn("Leases created or last renewed more than 33 minutes ago will be considered expired.", s)
    3580             self.failUnlessIn(" recovered: 2 shares, 2 buckets (1 mutable / 1 immutable), ", s)
    3581         d.addCallback(_check_html)
    3582         return d
    3583 
    3584     def test_expire_cutoff_date(self):
    3585         basedir = "storage/LeaseCrawler/expire_cutoff_date"
    3586         fileutil.make_dirs(basedir)
    3587         # setting cutoff-date to 2000 seconds ago means that any lease which
    3588         # is more than 2000s old will be expired.
    3589         now = time.time()
    3590         then = int(now - 2000)
    3591         ss = InstrumentedStorageServer(basedir, "\x00" * 20,
    3592                                        expiration_enabled=True,
    3593                                        expiration_mode="cutoff-date",
    3594                                        expiration_cutoff_date=then)
    3595         # make it start sooner than usual.
    3596         lc = ss.lease_checker
    3597         lc.slow_start = 0
    3598         lc.stop_after_first_bucket = True
    3599         webstatus = StorageStatus(ss)
    3600 
    3601         # create a few shares, with some leases on them
    3602         self.make_shares(ss)
    3603         [immutable_si_0, immutable_si_1, mutable_si_2, mutable_si_3] = self.sis
    3604 
    3605         def count_shares(si):
    3606             return len(list(ss._iter_share_files(si)))
    3607         def _get_sharefile(si):
    3608             return list(ss._iter_share_files(si))[0]
    3609         def count_leases(si):
    3610             return len(list(_get_sharefile(si).get_leases()))
    3611 
    3612         self.failUnlessEqual(count_shares(immutable_si_0), 1)
    3613         self.failUnlessEqual(count_leases(immutable_si_0), 1)
    3614         self.failUnlessEqual(count_shares(immutable_si_1), 1)
    3615         self.failUnlessEqual(count_leases(immutable_si_1), 2)
    3616         self.failUnlessEqual(count_shares(mutable_si_2), 1)
    3617         self.failUnlessEqual(count_leases(mutable_si_2), 1)
    3618         self.failUnlessEqual(count_shares(mutable_si_3), 1)
    3619         self.failUnlessEqual(count_leases(mutable_si_3), 2)
    3620 
    3621         # artificially crank back the expiration time on the first lease of
    3622         # each share, to make it look like was renewed 3000s ago. To achieve
    3623         # this, we need to set the expiration time to now-3000+31days. This
    3624         # will change when the lease format is improved to contain both
    3625         # create/renew time and duration.
    3626         new_expiration_time = now - 3000 + 31*24*60*60
    3627 
    3628         # Some shares have an extra lease which is set to expire at the
    3629         # default time in 31 days from now (age=31days). We then run the
    3630         # crawler, which will expire the first lease, making some shares get
    3631         # deleted and others stay alive (with one remaining lease)
    3632 
    3633         sf0 = _get_sharefile(immutable_si_0)
    3634         self.backdate_lease(sf0, self.renew_secrets[0], new_expiration_time)
    3635         sf0_size = os.stat(sf0.home).st_size
    3636 
    3637         # immutable_si_1 gets an extra lease
    3638         sf1 = _get_sharefile(immutable_si_1)
    3639         self.backdate_lease(sf1, self.renew_secrets[1], new_expiration_time)
    3640 
    3641         sf2 = _get_sharefile(mutable_si_2)
    3642         self.backdate_lease(sf2, self.renew_secrets[3], new_expiration_time)
    3643         sf2_size = os.stat(sf2.home).st_size
    3644 
    3645         # mutable_si_3 gets an extra lease
    3646         sf3 = _get_sharefile(mutable_si_3)
    3647         self.backdate_lease(sf3, self.renew_secrets[4], new_expiration_time)
    3648 
    3649         ss.setServiceParent(self.s)
    3650 
    3651         d = fireEventually()
    3652         # examine the state right after the first bucket has been processed
    3653         def _after_first_bucket(ignored):
    3654             p = lc.get_progress()
    3655             if not p["cycle-in-progress"]:
    3656                 d2 = fireEventually()
    3657                 d2.addCallback(_after_first_bucket)
    3658                 return d2
    3659         d.addCallback(_after_first_bucket)
    3660         d.addCallback(lambda ign: renderDeferred(webstatus))
    3661         def _check_html_in_cycle(html):
    3662             s = remove_tags(html)
    3663             # the first bucket encountered gets deleted, and its prefix
    3664             # happens to be about 1/5th of the way through the ring, so the
    3665             # predictor thinks we'll have 5 shares and that we'll delete them
    3666             # all. This part of the test depends upon the SIs landing right
    3667             # where they do now.
    3668             self.failUnlessIn("The remainder of this cycle is expected to "
    3669                               "recover: 4 shares, 4 buckets", s)
    3670             self.failUnlessIn("The whole cycle is expected to examine "
    3671                               "5 shares in 5 buckets and to recover: "
    3672                               "5 shares, 5 buckets", s)
    3673         d.addCallback(_check_html_in_cycle)
    3674 
    3675         # wait for the crawler to finish the first cycle. Two shares should
    3676         # have been removed
    3677         def _wait():
    3678             return bool(lc.get_state()["last-cycle-finished"] is not None)
    3679         d.addCallback(lambda ign: self.poll(_wait))
    3680 
    3681         def _after_first_cycle(ignored):
    3682             self.failUnlessEqual(count_shares(immutable_si_0), 0)
    3683             self.failUnlessEqual(count_shares(immutable_si_1), 1)
    3684             self.failUnlessEqual(count_leases(immutable_si_1), 1)
    3685             self.failUnlessEqual(count_shares(mutable_si_2), 0)
    3686             self.failUnlessEqual(count_shares(mutable_si_3), 1)
    3687             self.failUnlessEqual(count_leases(mutable_si_3), 1)
    3688 
    3689             s = lc.get_state()
    3690             last = s["history"][0]
    3691 
    3692             self.failUnlessEqual(last["expiration-enabled"], True)
    3693             self.failUnlessEqual(last["configured-expiration-mode"],
    3694                                  ("cutoff-date", None, then,
    3695                                   ("mutable", "immutable")))
    3696             self.failUnlessEqual(last["leases-per-share-histogram"],
    3697                                  {1: 2, 2: 2})
    3698 
    3699             rec = last["space-recovered"]
    3700             self.failUnlessEqual(rec["examined-buckets"], 4)
    3701             self.failUnlessEqual(rec["examined-shares"], 4)
    3702             self.failUnlessEqual(rec["actual-buckets"], 2)
    3703             self.failUnlessEqual(rec["original-buckets"], 0)
    3704             self.failUnlessEqual(rec["configured-buckets"], 2)
    3705             self.failUnlessEqual(rec["actual-shares"], 2)
    3706             self.failUnlessEqual(rec["original-shares"], 0)
    3707             self.failUnlessEqual(rec["configured-shares"], 2)
    3708             size = sf0_size + sf2_size
    3709             self.failUnlessEqual(rec["actual-sharebytes"], size)
    3710             self.failUnlessEqual(rec["original-sharebytes"], 0)
    3711             self.failUnlessEqual(rec["configured-sharebytes"], size)
    3712             # different platforms have different notions of "blocks used by
    3713             # this file", so merely assert that it's a number
    3714             self.failUnless(rec["actual-diskbytes"] >= 0,
    3715                             rec["actual-diskbytes"])
    3716             self.failUnless(rec["original-diskbytes"] >= 0,
    3717                             rec["original-diskbytes"])
    3718             self.failUnless(rec["configured-diskbytes"] >= 0,
    3719                             rec["configured-diskbytes"])
    3720         d.addCallback(_after_first_cycle)
    3721         d.addCallback(lambda ign: renderDeferred(webstatus))
    3722         def _check_html(html):
    3723             s = remove_tags(html)
    3724             self.failUnlessIn("Expiration Enabled:"
    3725                               " expired leases will be removed", s)
    3726             date = time.strftime("%Y-%m-%d (%d-%b-%Y) UTC", time.gmtime(then))
    3727             substr = "Leases created or last renewed before %s will be considered expired." % date
    3728             self.failUnlessIn(substr, s)
    3729             self.failUnlessIn(" recovered: 2 shares, 2 buckets (1 mutable / 1 immutable), ", s)
    3730         d.addCallback(_check_html)
    3731         return d
    3732 
    3733     def test_only_immutable(self):
    3734         basedir = "storage/LeaseCrawler/only_immutable"
    3735         fileutil.make_dirs(basedir)
    3736         now = time.time()
    3737         then = int(now - 2000)
    3738         ss = StorageServer(basedir, "\x00" * 20,
    3739                            expiration_enabled=True,
    3740                            expiration_mode="cutoff-date",
    3741                            expiration_cutoff_date=then,
    3742                            expiration_sharetypes=("immutable",))
    3743         lc = ss.lease_checker
    3744         lc.slow_start = 0
    3745         webstatus = StorageStatus(ss)
    3746 
    3747         self.make_shares(ss)
    3748         [immutable_si_0, immutable_si_1, mutable_si_2, mutable_si_3] = self.sis
    3749         # set all leases to be expirable
    3750         new_expiration_time = now - 3000 + 31*24*60*60
    3751 
    3752         def count_shares(si):
    3753             return len(list(ss._iter_share_files(si)))
    3754         def _get_sharefile(si):
    3755             return list(ss._iter_share_files(si))[0]
    3756         def count_leases(si):
    3757             return len(list(_get_sharefile(si).get_leases()))
    3758 
    3759         sf0 = _get_sharefile(immutable_si_0)
    3760         self.backdate_lease(sf0, self.renew_secrets[0], new_expiration_time)
    3761         sf1 = _get_sharefile(immutable_si_1)
    3762         self.backdate_lease(sf1, self.renew_secrets[1], new_expiration_time)
    3763         self.backdate_lease(sf1, self.renew_secrets[2], new_expiration_time)
    3764         sf2 = _get_sharefile(mutable_si_2)
    3765         self.backdate_lease(sf2, self.renew_secrets[3], new_expiration_time)
    3766         sf3 = _get_sharefile(mutable_si_3)
    3767         self.backdate_lease(sf3, self.renew_secrets[4], new_expiration_time)
    3768         self.backdate_lease(sf3, self.renew_secrets[5], new_expiration_time)
    3769 
    3770         ss.setServiceParent(self.s)
    3771         def _wait():
    3772             return bool(lc.get_state()["last-cycle-finished"] is not None)
    3773         d = self.poll(_wait)
    3774 
    3775         def _after_first_cycle(ignored):
    3776             self.failUnlessEqual(count_shares(immutable_si_0), 0)
    3777             self.failUnlessEqual(count_shares(immutable_si_1), 0)
    3778             self.failUnlessEqual(count_shares(mutable_si_2), 1)
    3779             self.failUnlessEqual(count_leases(mutable_si_2), 1)
    3780             self.failUnlessEqual(count_shares(mutable_si_3), 1)
    3781             self.failUnlessEqual(count_leases(mutable_si_3), 2)
    3782         d.addCallback(_after_first_cycle)
    3783         d.addCallback(lambda ign: renderDeferred(webstatus))
    3784         def _check_html(html):
    3785             s = remove_tags(html)
    3786             self.failUnlessIn("The following sharetypes will be expired: immutable.", s)
    3787         d.addCallback(_check_html)
    3788         return d
    3789 
    3790     def test_only_mutable(self):
    3791         basedir = "storage/LeaseCrawler/only_mutable"
    3792         fileutil.make_dirs(basedir)
    3793         now = time.time()
    3794         then = int(now - 2000)
    3795         ss = StorageServer(basedir, "\x00" * 20,
    3796                            expiration_enabled=True,
    3797                            expiration_mode="cutoff-date",
    3798                            expiration_cutoff_date=then,
    3799                            expiration_sharetypes=("mutable",))
    3800         lc = ss.lease_checker
    3801         lc.slow_start = 0
    3802         webstatus = StorageStatus(ss)
    3803 
    3804         self.make_shares(ss)
    3805         [immutable_si_0, immutable_si_1, mutable_si_2, mutable_si_3] = self.sis
    3806         # set all leases to be expirable
    3807         new_expiration_time = now - 3000 + 31*24*60*60
    3808 
    3809         def count_shares(si):
    3810             return len(list(ss._iter_share_files(si)))
    3811         def _get_sharefile(si):
    3812             return list(ss._iter_share_files(si))[0]
    3813         def count_leases(si):
    3814             return len(list(_get_sharefile(si).get_leases()))
    3815 
    3816         sf0 = _get_sharefile(immutable_si_0)
    3817         self.backdate_lease(sf0, self.renew_secrets[0], new_expiration_time)
    3818         sf1 = _get_sharefile(immutable_si_1)
    3819         self.backdate_lease(sf1, self.renew_secrets[1], new_expiration_time)
    3820         self.backdate_lease(sf1, self.renew_secrets[2], new_expiration_time)
    3821         sf2 = _get_sharefile(mutable_si_2)
    3822         self.backdate_lease(sf2, self.renew_secrets[3], new_expiration_time)
    3823         sf3 = _get_sharefile(mutable_si_3)
    3824         self.backdate_lease(sf3, self.renew_secrets[4], new_expiration_time)
    3825         self.backdate_lease(sf3, self.renew_secrets[5], new_expiration_time)
    3826 
    3827         ss.setServiceParent(self.s)
    3828         def _wait():
    3829             return bool(lc.get_state()["last-cycle-finished"] is not None)
    3830         d = self.poll(_wait)
    3831 
    3832         def _after_first_cycle(ignored):
    3833             self.failUnlessEqual(count_shares(immutable_si_0), 1)
    3834             self.failUnlessEqual(count_leases(immutable_si_0), 1)
    3835             self.failUnlessEqual(count_shares(immutable_si_1), 1)
    3836             self.failUnlessEqual(count_leases(immutable_si_1), 2)
    3837             self.failUnlessEqual(count_shares(mutable_si_2), 0)
    3838             self.failUnlessEqual(count_shares(mutable_si_3), 0)
    3839         d.addCallback(_after_first_cycle)
    3840         d.addCallback(lambda ign: renderDeferred(webstatus))
    3841         def _check_html(html):
    3842             s = remove_tags(html)
    3843             self.failUnlessIn("The following sharetypes will be expired: mutable.", s)
    3844         d.addCallback(_check_html)
    3845         return d
    3846 
    3847     def test_bad_mode(self):
    3848         basedir = "storage/LeaseCrawler/bad_mode"
    3849         fileutil.make_dirs(basedir)
    3850         e = self.failUnlessRaises(ValueError,
    3851                                   StorageServer, basedir, "\x00" * 20,
    3852                                   expiration_mode="bogus")
    3853         self.failUnlessIn("GC mode 'bogus' must be 'age' or 'cutoff-date'", str(e))
    3854 
    3855     def test_limited_history(self):
    3856         basedir = "storage/LeaseCrawler/limited_history"
    3857         fileutil.make_dirs(basedir)
    3858         ss = StorageServer(basedir, "\x00" * 20)
    3859         # make it start sooner than usual.
    3860         lc = ss.lease_checker
    3861         lc.slow_start = 0
    3862         lc.cpu_slice = 500
    3863 
    3864         # create a few shares, with some leases on them
    3865         self.make_shares(ss)
    3866 
    3867         ss.setServiceParent(self.s)
    3868 
    3869         def _wait_until_15_cycles_done():
    3870             last = lc.state["last-cycle-finished"]
    3871             if last is not None and last >= 15:
    3872                 return True
    3873             if lc.timer:
    3874                 lc.timer.reset(0)
    3875             return False
    3876         d = self.poll(_wait_until_15_cycles_done)
    3877 
    3878         def _check(ignored):
    3879             s = lc.get_state()
    3880             h = s["history"]
    3881             self.failUnlessEqual(len(h), 10)
    3882             self.failUnlessEqual(max(h.keys()), 15)
    3883             self.failUnlessEqual(min(h.keys()), 6)
    3884         d.addCallback(_check)
    3885         return d
    3886 
    3887     def test_unpredictable_future(self):
    3888         basedir = "storage/LeaseCrawler/unpredictable_future"
    3889         fileutil.make_dirs(basedir)
    3890         ss = StorageServer(basedir, "\x00" * 20)
    3891         # make it start sooner than usual.
    3892         lc = ss.lease_checker
    3893         lc.slow_start = 0
    3894         lc.cpu_slice = -1.0 # stop quickly
    3895 
    3896         self.make_shares(ss)
    3897 
    3898         ss.setServiceParent(self.s)
    3899 
    3900         d = fireEventually()
    3901         def _check(ignored):
    3902             # this should fire after the first bucket is complete, but before
    3903             # the first prefix is complete, so the progress-measurer won't
    3904             # think we've gotten far enough to raise our percent-complete
    3905             # above 0%, triggering the cannot-predict-the-future code in
    3906             # expirer.py . This will have to change if/when the
    3907             # progress-measurer gets smart enough to count buckets (we'll
    3908             # have to interrupt it even earlier, before it's finished the
    3909             # first bucket).
    3910             s = lc.get_state()
    3911             if "cycle-to-date" not in s:
    3912                 d2 = fireEventually()
    3913                 d2.addCallback(_check)
    3914                 return d2
    3915             self.failUnlessIn("cycle-to-date", s)
    3916             self.failUnlessIn("estimated-remaining-cycle", s)
    3917             self.failUnlessIn("estimated-current-cycle", s)
    3918 
    3919             left = s["estimated-remaining-cycle"]["space-recovered"]
    3920             self.failUnlessEqual(left["actual-buckets"], None)
    3921             self.failUnlessEqual(left["original-buckets"], None)
    3922             self.failUnlessEqual(left["configured-buckets"], None)
    3923             self.failUnlessEqual(left["actual-shares"], None)
    3924             self.failUnlessEqual(left["original-shares"], None)
    3925             self.failUnlessEqual(left["configured-shares"], None)
    3926             self.failUnlessEqual(left["actual-diskbytes"], None)
    3927             self.failUnlessEqual(left["original-diskbytes"], None)
    3928             self.failUnlessEqual(left["configured-diskbytes"], None)
    3929             self.failUnlessEqual(left["actual-sharebytes"], None)
    3930             self.failUnlessEqual(left["original-sharebytes"], None)
    3931             self.failUnlessEqual(left["configured-sharebytes"], None)
    3932 
    3933             full = s["estimated-remaining-cycle"]["space-recovered"]
    3934             self.failUnlessEqual(full["actual-buckets"], None)
    3935             self.failUnlessEqual(full["original-buckets"], None)
    3936             self.failUnlessEqual(full["configured-buckets"], None)
    3937             self.failUnlessEqual(full["actual-shares"], None)
    3938             self.failUnlessEqual(full["original-shares"], None)
    3939             self.failUnlessEqual(full["configured-shares"], None)
    3940             self.failUnlessEqual(full["actual-diskbytes"], None)
    3941             self.failUnlessEqual(full["original-diskbytes"], None)
    3942             self.failUnlessEqual(full["configured-diskbytes"], None)
    3943             self.failUnlessEqual(full["actual-sharebytes"], None)
    3944             self.failUnlessEqual(full["original-sharebytes"], None)
    3945             self.failUnlessEqual(full["configured-sharebytes"], None)
    3946 
    3947         d.addCallback(_check)
    3948         return d
    3949 
    3950     def test_no_st_blocks(self):
    3951         basedir = "storage/LeaseCrawler/no_st_blocks"
    3952         fileutil.make_dirs(basedir)
    3953         ss = No_ST_BLOCKS_StorageServer(basedir, "\x00" * 20,
    3954                                         expiration_mode="age",
    3955                                         expiration_override_lease_duration=-1000)
    3956         # a negative expiration_time= means the "configured-"
    3957         # space-recovered counts will be non-zero, since all shares will have
    3958         # expired by then
    3959 
    3960         # make it start sooner than usual.
    3961         lc = ss.lease_checker
    3962         lc.slow_start = 0
    3963 
    3964         self.make_shares(ss)
    3965         ss.setServiceParent(self.s)
    3966         def _wait():
    3967             return bool(lc.get_state()["last-cycle-finished"] is not None)
    3968         d = self.poll(_wait)
    3969 
    3970         def _check(ignored):
    3971             s = lc.get_state()
    3972             last = s["history"][0]
    3973             rec = last["space-recovered"]
    3974             self.failUnlessEqual(rec["configured-buckets"], 4)
    3975             self.failUnlessEqual(rec["configured-shares"], 4)
    3976             self.failUnless(rec["configured-sharebytes"] > 0,
    3977                             rec["configured-sharebytes"])
    3978             # without the .st_blocks field in os.stat() results, we should be
    3979             # reporting diskbytes==sharebytes
    3980             self.failUnlessEqual(rec["configured-sharebytes"],
    3981                                  rec["configured-diskbytes"])
    3982         d.addCallback(_check)
    3983         return d
    3984 
    3985     def test_share_corruption(self):
    3986         self._poll_should_ignore_these_errors = [
    3987             UnknownMutableContainerVersionError,
    3988             UnknownImmutableContainerVersionError,
    3989             ]
    3990         basedir = "storage/LeaseCrawler/share_corruption"
    3991         fileutil.make_dirs(basedir)
    3992         ss = InstrumentedStorageServer(basedir, "\x00" * 20)
    3993         w = StorageStatus(ss)
    3994         # make it start sooner than usual.
    3995         lc = ss.lease_checker
    3996         lc.stop_after_first_bucket = True
    3997         lc.slow_start = 0
    3998         lc.cpu_slice = 500
    3999 
    4000         # create a few shares, with some leases on them
    4001         self.make_shares(ss)
    4002 
    4003         # now corrupt one, and make sure the lease-checker keeps going
    4004         [immutable_si_0, immutable_si_1, mutable_si_2, mutable_si_3] = self.sis
    4005         first = min(self.sis)
    4006         first_b32 = base32.b2a(first)
    4007         fn = os.path.join(ss.sharedir, storage_index_to_dir(first), "0")
    4008         f = open(fn, "rb+")
    4009         f.seek(0)
    4010         f.write("BAD MAGIC")
    4011         f.close()
    4012         # if get_share_file() doesn't see the correct mutable magic, it
    4013         # assumes the file is an immutable share, and then
    4014         # immutable.ShareFile sees a bad version. So regardless of which kind
    4015         # of share we corrupted, this will trigger an
    4016         # UnknownImmutableContainerVersionError.
    4017 
    4018         # also create an empty bucket
    4019         empty_si = base32.b2a("\x04"*16)
    4020         empty_bucket_dir = os.path.join(ss.sharedir,
    4021                                         storage_index_to_dir(empty_si))
    4022         fileutil.make_dirs(empty_bucket_dir)
    4023 
    4024         ss.setServiceParent(self.s)
    4025 
    4026         d = fireEventually()
    4027 
    4028         # now examine the state right after the first bucket has been
    4029         # processed.
    4030         def _after_first_bucket(ignored):
    4031             s = lc.get_state()
    4032             if "cycle-to-date" not in s:
    4033                 d2 = fireEventually()
    4034                 d2.addCallback(_after_first_bucket)
    4035                 return d2
    4036             so_far = s["cycle-to-date"]
    4037             rec = so_far["space-recovered"]
    4038             self.failUnlessEqual(rec["examined-buckets"], 1)
    4039             self.failUnlessEqual(rec["examined-shares"], 0)
    4040             self.failUnlessEqual(so_far["corrupt-shares"], [(first_b32, 0)])
    4041         d.addCallback(_after_first_bucket)
    4042 
    4043         d.addCallback(lambda ign: renderJSON(w))
    4044         def _check_json(raw):
    4045             data = json.loads(raw)
    4046             # grr. json turns all dict keys into strings.
    4047             so_far = data["lease-checker"]["cycle-to-date"]
    4048             corrupt_shares = so_far["corrupt-shares"]
    4049             # it also turns all tuples into lists
    4050             self.failUnlessEqual(corrupt_shares, [[first_b32, 0]])
    4051         d.addCallback(_check_json)
    4052         d.addCallback(lambda ign: renderDeferred(w))
    4053         def _check_html(html):
    4054             s = remove_tags(html)
    4055             self.failUnlessIn("Corrupt shares: SI %s shnum 0" % first_b32, s)
    4056         d.addCallback(_check_html)
    4057 
    4058         def _wait():
    4059             return bool(lc.get_state()["last-cycle-finished"] is not None)
    4060         d.addCallback(lambda ign: self.poll(_wait))
    4061 
    4062         def _after_first_cycle(ignored):
    4063             s = lc.get_state()
    4064             last = s["history"][0]
    4065             rec = last["space-recovered"]
    4066             self.failUnlessEqual(rec["examined-buckets"], 5)
    4067             self.failUnlessEqual(rec["examined-shares"], 3)
    4068             self.failUnlessEqual(last["corrupt-shares"], [(first_b32, 0)])
    4069         d.addCallback(_after_first_cycle)
    4070         d.addCallback(lambda ign: renderJSON(w))
    4071         def _check_json_history(raw):
    4072             data = json.loads(raw)
    4073             last = data["lease-checker"]["history"]["0"]
    4074             corrupt_shares = last["corrupt-shares"]
    4075             self.failUnlessEqual(corrupt_shares, [[first_b32, 0]])
    4076         d.addCallback(_check_json_history)
    4077         d.addCallback(lambda ign: renderDeferred(w))
    4078         def _check_html_history(html):
    4079             s = remove_tags(html)
    4080             self.failUnlessIn("Corrupt shares: SI %s shnum 0" % first_b32, s)
    4081         d.addCallback(_check_html_history)
    4082 
    4083         def _cleanup(res):
    4084             self.flushLoggedErrors(UnknownMutableContainerVersionError,
    4085                                    UnknownImmutableContainerVersionError)
    4086             return res
    4087         d.addBoth(_cleanup)
    4088         return d
    4089 
    4090 
    4091 class WebStatus(unittest.TestCase, pollmixin.PollMixin):
    4092 
    4093     def setUp(self):
    4094         self.s = service.MultiService()
    4095         self.s.startService()
    4096     def tearDown(self):
    4097         return self.s.stopService()
    4098 
    4099     def test_no_server(self):
    4100         w = StorageStatus(None)
    4101         html = renderSynchronously(w)
    4102         self.failUnlessIn("<h1>No Storage Server Running</h1>", html)
    4103 
    4104     def test_status(self):
    4105         basedir = "storage/WebStatus/status"
    4106         fileutil.make_dirs(basedir)
    4107         nodeid = "\x00" * 20
    4108         ss = StorageServer(basedir, nodeid)
    4109         ss.setServiceParent(self.s)
    4110         w = StorageStatus(ss, "nickname")
    4111         d = renderDeferred(w)
    4112         def _check_html(html):
    4113             self.failUnlessIn("<h1>Storage Server Status</h1>", html)
    4114             s = remove_tags(html)
    4115             self.failUnlessIn("Server Nickname: nickname", s)
    4116             self.failUnlessIn("Server Nodeid: %s"  % base32.b2a(nodeid), s)
    4117             self.failUnlessIn("Accepting new shares: Yes", s)
    4118             self.failUnlessIn("Reserved space: - 0 B (0)", s)
    4119         d.addCallback(_check_html)
    4120         d.addCallback(lambda ign: renderJSON(w))
    4121         def _check_json(raw):
    4122             data = json.loads(raw)
    4123             s = data["stats"]
    4124             self.failUnlessEqual(s["storage_server.accepting_immutable_shares"], 1)
    4125             self.failUnlessEqual(s["storage_server.reserved_space"], 0)
    4126             self.failUnlessIn("bucket-counter", data)
    4127             self.failUnlessIn("lease-checker", data)
    4128         d.addCallback(_check_json)
    4129         return d
    4130 
    4131 
    4132     def test_status_no_disk_stats(self):
    4133         def call_get_disk_stats(whichdir, reserved_space=0):
    4134             raise AttributeError()
    4135         self.patch(fileutil, 'get_disk_stats', call_get_disk_stats)
    4136 
    4137         # Some platforms may have no disk stats API. Make sure the code can handle that
    4138         # (test runs on all platforms).
    4139         basedir = "storage/WebStatus/status_no_disk_stats"
    4140         fileutil.make_dirs(basedir)
    4141         ss = StorageServer(basedir, "\x00" * 20)
    4142         ss.setServiceParent(self.s)
    4143         w = StorageStatus(ss)
    4144         html = renderSynchronously(w)
    4145         self.failUnlessIn("<h1>Storage Server Status</h1>", html)
    4146         s = remove_tags(html)
    4147         self.failUnlessIn("Accepting new shares: Yes", s)
    4148         self.failUnlessIn("Total disk space: ?", s)
    4149         self.failUnlessIn("Space Available to Tahoe: ?", s)
    4150         self.failUnless(ss.get_available_space() is None)
    4151 
    4152     def test_status_bad_disk_stats(self):
    4153         def call_get_disk_stats(whichdir, reserved_space=0):
    4154             raise OSError()
    4155         self.patch(fileutil, 'get_disk_stats', call_get_disk_stats)
    4156 
    4157         # If the API to get disk stats exists but a call to it fails, then the status should
    4158         # show that no shares will be accepted, and get_available_space() should be 0.
    4159         basedir = "storage/WebStatus/status_bad_disk_stats"
    4160         fileutil.make_dirs(basedir)
    4161         ss = StorageServer(basedir, "\x00" * 20)
    4162         ss.setServiceParent(self.s)
    4163         w = StorageStatus(ss)
    4164         html = renderSynchronously(w)
    4165         self.failUnlessIn("<h1>Storage Server Status</h1>", html)
    4166         s = remove_tags(html)
    4167         self.failUnlessIn("Accepting new shares: No", s)
    4168         self.failUnlessIn("Total disk space: ?", s)
    4169         self.failUnlessIn("Space Available to Tahoe: ?", s)
    4170         self.failUnlessEqual(ss.get_available_space(), 0)
    4171 
    4172     def test_status_right_disk_stats(self):
    4173         GB = 1000000000
    4174         total            = 5*GB
    4175         free_for_root    = 4*GB
    4176         free_for_nonroot = 3*GB
    4177         reserved         = 1*GB
    4178 
    4179         basedir = "storage/WebStatus/status_right_disk_stats"
    4180         fileutil.make_dirs(basedir)
    4181         ss = StorageServer(basedir, "\x00" * 20, reserved_space=reserved)
    4182         expecteddir = ss.sharedir
    4183 
    4184         def call_get_disk_stats(whichdir, reserved_space=0):
    4185             self.failUnlessEqual(whichdir, expecteddir)
    4186             self.failUnlessEqual(reserved_space, reserved)
    4187             used = total - free_for_root
    4188             avail = max(free_for_nonroot - reserved_space, 0)
    4189             return {
    4190               'total': total,
    4191               'free_for_root': free_for_root,
    4192               'free_for_nonroot': free_for_nonroot,
    4193               'used': used,
    4194               'avail': avail,
    4195             }
    4196         self.patch(fileutil, 'get_disk_stats', call_get_disk_stats)
    4197 
    4198         ss.setServiceParent(self.s)
    4199         w = StorageStatus(ss)
    4200         html = renderSynchronously(w)
    4201 
    4202         self.failUnlessIn("<h1>Storage Server Status</h1>", html)
    4203         s = remove_tags(html)
    4204         self.failUnlessIn("Total disk space: 5.00 GB", s)
    4205         self.failUnlessIn("Disk space used: - 1.00 GB", s)
    4206         self.failUnlessIn("Disk space free (root): 4.00 GB", s)
    4207         self.failUnlessIn("Disk space free (non-root): 3.00 GB", s)
    4208         self.failUnlessIn("Reserved space: - 1.00 GB", s)
    4209         self.failUnlessIn("Space Available to Tahoe: 2.00 GB", s)
    4210         self.failUnlessEqual(ss.get_available_space(), 2*GB)
    4211 
    4212     def test_readonly(self):
    4213         basedir = "storage/WebStatus/readonly"
    4214         fileutil.make_dirs(basedir)
    4215         ss = StorageServer(basedir, "\x00" * 20, readonly_storage=True)
    4216         ss.setServiceParent(self.s)
    4217         w = StorageStatus(ss)
    4218         html = renderSynchronously(w)
    4219         self.failUnlessIn("<h1>Storage Server Status</h1>", html)
    4220         s = remove_tags(html)
    4221         self.failUnlessIn("Accepting new shares: No", s)
    4222 
    4223     def test_reserved(self):
    4224         basedir = "storage/WebStatus/reserved"
    4225         fileutil.make_dirs(basedir)
    4226         ss = StorageServer(basedir, "\x00" * 20, reserved_space=10e6)
    4227         ss.setServiceParent(self.s)
    4228         w = StorageStatus(ss)
    4229         html = renderSynchronously(w)
    4230         self.failUnlessIn("<h1>Storage Server Status</h1>", html)
    4231         s = remove_tags(html)
    4232         self.failUnlessIn("Reserved space: - 10.00 MB (10000000)", s)
    4233 
    4234     def test_huge_reserved(self):
    4235         basedir = "storage/WebStatus/reserved"
    4236         fileutil.make_dirs(basedir)
    4237         ss = StorageServer(basedir, "\x00" * 20, reserved_space=10e6)
    4238         ss.setServiceParent(self.s)
    4239         w = StorageStatus(ss)
    4240         html = renderSynchronously(w)
    4241         self.failUnlessIn("<h1>Storage Server Status</h1>", html)
    4242         s = remove_tags(html)
    4243         self.failUnlessIn("Reserved space: - 10.00 MB (10000000)", s)
    4244 
    4245     def test_util(self):
    4246         w = StorageStatusElement(None, None)
    4247         self.failUnlessEqual(w.render_space(None), "?")
    4248         self.failUnlessEqual(w.render_space(10e6), "10000000")
    4249         self.failUnlessEqual(w.render_abbrev_space(None), "?")
    4250         self.failUnlessEqual(w.render_abbrev_space(10e6), "10.00 MB")
    4251         self.failUnlessEqual(remove_prefix("foo.bar", "foo."), "bar")
    4252         self.failUnlessEqual(remove_prefix("foo.bar", "baz."), None)
Note: See TracChangeset for help on using the changeset viewer.