Changeset 67ad0175 in trunk
- Timestamp:
- 2011-05-27T12:01:35Z (14 years ago)
- Branches:
- master
- Children:
- ff136b8e
- Parents:
- d566e46
- Files:
-
- 5 edited
Legend:
- Unmodified
- Added
- Removed
-
TabularUnified NEWS.rst ¶
rd566e46 r67ad0175 1 ==================================1 ================================== 2 2 User-Visible Changes in Tahoe-LAFS 3 3 ================================== 4 5 Release 1.9.0 (2011-??-??) 6 -------------------------- 7 8 9 - Nodes now emit "None" for percentiles with higher implied precision 10 than the number of observations can support. Older stats gatherers 11 will throw an exception if they gather stats from a new storage 12 server and it sends a "None" for a percentile. (`#1392`_) 13 4 14 5 15 Release 1.8.2 (2011-01-30) -
TabularUnified docs/stats.rst ¶
rd566e46 r67ad0175 1 ================1 ================ 2 2 Tahoe Statistics 3 3 ================ … … 45 45 by client-only nodes which have been configured to not run a storage server 46 46 (with [storage]enabled=false in tahoe.cfg) 47 47 48 48 allocate, write, close, abort 49 49 these are for immutable file uploads. 'allocate' is incremented when a … … 135 135 given number, and is the same threshold used by Amazon's 136 136 internal SLA, according to the Dynamo paper). 137 Percentiles are only reported in the case of a sufficient 138 number of observations for unambiguous interpretation. For 139 example, the 99.9th percentile is (at the level of thousandths 140 precision) 9 thousandths greater than the 99th 141 percentile for sample sizes greater than or equal to 1000, 142 thus the 99.9th percentile is only reported for samples of 1000 143 or more observations. 144 137 145 138 146 **counters.uploader.files_uploaded** … … 196 204 active_uploads 197 205 how many files are currently being uploaded. 0 when idle. 198 206 199 207 incoming_count 200 208 how many cache files are present in the incoming/ directory, -
TabularUnified src/allmydata/interfaces.py ¶
rd566e46 r67ad0175 2391 2391 """ 2392 2392 returns a dictionary containing 'counters' and 'stats', each a 2393 dictionary with string counter/stat name keys, and numeric values.2393 dictionary with string counter/stat name keys, and numeric or None values. 2394 2394 counters are monotonically increasing measures of work done, and 2395 2395 stats are instantaneous measures (potentially time averaged 2396 2396 internally) 2397 2397 """ 2398 return DictOf(str, DictOf(str, ChoiceOf(float, int, long )))2398 return DictOf(str, DictOf(str, ChoiceOf(float, int, long, None))) 2399 2399 2400 2400 class RIStatsGatherer(RemoteInterface): -
TabularUnified src/allmydata/storage/server.py ¶
rd566e46 r67ad0175 117 117 def get_latencies(self): 118 118 """Return a dict, indexed by category, that contains a dict of 119 latency numbers for each category. Each dict will contain the 119 latency numbers for each category. If there are sufficient samples 120 for unambiguous interpretation, each dict will contain the 120 121 following keys: mean, 01_0_percentile, 10_0_percentile, 121 122 50_0_percentile (median), 90_0_percentile, 95_0_percentile, 122 99_0_percentile, 99_9_percentile. If no samples have been collected 123 for the given category, then that category name will not be present 124 in the return value.""" 123 99_0_percentile, 99_9_percentile. If there are insufficient 124 samples for a given percentile to be interpreted unambiguously 125 that percentile will be reported as None. If no samples have been 126 collected for the given category, then that category name will 127 not be present in the return value. """ 125 128 # note that Amazon's Dynamo paper says they use 99.9% percentile. 126 129 output = {} … … 130 133 stats = {} 131 134 samples = self.latencies[category][:] 135 count = len(samples) 136 stats["samplesize"] = count 132 137 samples.sort() 133 count = len(samples) 134 stats["mean"] = sum(samples) / count 135 stats["01_0_percentile"] = samples[int(0.01 * count)] 136 stats["10_0_percentile"] = samples[int(0.1 * count)] 137 stats["50_0_percentile"] = samples[int(0.5 * count)] 138 stats["90_0_percentile"] = samples[int(0.9 * count)] 139 stats["95_0_percentile"] = samples[int(0.95 * count)] 140 stats["99_0_percentile"] = samples[int(0.99 * count)] 141 stats["99_9_percentile"] = samples[int(0.999 * count)] 138 if count > 1: 139 stats["mean"] = sum(samples) / count 140 else: 141 stats["mean"] = None 142 143 orderstatlist = [(0.01, "01_0_percentile", 100), (0.1, "10_0_percentile", 10),\ 144 (0.50, "50_0_percentile", 10), (0.90, "90_0_percentile", 10),\ 145 (0.95, "95_0_percentile", 20), (0.99, "99_0_percentile", 100),\ 146 (0.999, "99_9_percentile", 1000)] 147 148 for percentile, percentilestring, minnumtoobserve in orderstatlist: 149 if count >= minnumtoobserve: 150 stats[percentilestring] = samples[int(percentile*count)] 151 else: 152 stats[percentilestring] = None 153 142 154 output[category] = stats 143 155 return output … … 552 564 level=log.SCARY, umid="SGx2fA") 553 565 return None 554 -
TabularUnified src/allmydata/test/test_storage.py ¶
rd566e46 r67ad0175 1312 1312 for i in range(1000): 1313 1313 ss.add_latency("renew", 1.0 * i) 1314 for i in range(20): 1315 ss.add_latency("write", 1.0 * i) 1314 1316 for i in range(10): 1315 1317 ss.add_latency("cancel", 2.0 * i) … … 1319 1321 1320 1322 self.failUnlessEqual(sorted(output.keys()), 1321 sorted(["allocate", "renew", "cancel", " get"]))1323 sorted(["allocate", "renew", "cancel", "write", "get"])) 1322 1324 self.failUnlessEqual(len(ss.latencies["allocate"]), 1000) 1323 1325 self.failUnless(abs(output["allocate"]["mean"] - 9500) < 1, output) … … 1340 1342 self.failUnless(abs(output["renew"]["99_9_percentile"] - 999) < 1, output) 1341 1343 1344 self.failUnlessEqual(len(ss.latencies["write"]), 20) 1345 self.failUnless(abs(output["write"]["mean"] - 9) < 1, output) 1346 self.failUnless(output["write"]["01_0_percentile"] is None, output) 1347 self.failUnless(abs(output["write"]["10_0_percentile"] - 2) < 1, output) 1348 self.failUnless(abs(output["write"]["50_0_percentile"] - 10) < 1, output) 1349 self.failUnless(abs(output["write"]["90_0_percentile"] - 18) < 1, output) 1350 self.failUnless(abs(output["write"]["95_0_percentile"] - 19) < 1, output) 1351 self.failUnless(output["write"]["99_0_percentile"] is None, output) 1352 self.failUnless(output["write"]["99_9_percentile"] is None, output) 1353 1342 1354 self.failUnlessEqual(len(ss.latencies["cancel"]), 10) 1343 1355 self.failUnless(abs(output["cancel"]["mean"] - 9) < 1, output) 1344 self.failUnless( abs(output["cancel"]["01_0_percentile"] - 0) < 1, output)1356 self.failUnless(output["cancel"]["01_0_percentile"] is None, output) 1345 1357 self.failUnless(abs(output["cancel"]["10_0_percentile"] - 2) < 1, output) 1346 1358 self.failUnless(abs(output["cancel"]["50_0_percentile"] - 10) < 1, output) 1347 1359 self.failUnless(abs(output["cancel"]["90_0_percentile"] - 18) < 1, output) 1348 self.failUnless( abs(output["cancel"]["95_0_percentile"] - 18) < 1, output)1349 self.failUnless( abs(output["cancel"]["99_0_percentile"] - 18) < 1, output)1350 self.failUnless( abs(output["cancel"]["99_9_percentile"] - 18) < 1, output)1360 self.failUnless(output["cancel"]["95_0_percentile"] is None, output) 1361 self.failUnless(output["cancel"]["99_0_percentile"] is None, output) 1362 self.failUnless(output["cancel"]["99_9_percentile"] is None, output) 1351 1363 1352 1364 self.failUnlessEqual(len(ss.latencies["get"]), 1) 1353 self.failUnless( abs(output["get"]["mean"] - 5) < 1, output)1354 self.failUnless( abs(output["get"]["01_0_percentile"] - 5) < 1, output)1355 self.failUnless( abs(output["get"]["10_0_percentile"] - 5) < 1, output)1356 self.failUnless( abs(output["get"]["50_0_percentile"] - 5) < 1, output)1357 self.failUnless( abs(output["get"]["90_0_percentile"] - 5) < 1, output)1358 self.failUnless( abs(output["get"]["95_0_percentile"] - 5) < 1, output)1359 self.failUnless( abs(output["get"]["99_0_percentile"] - 5) < 1, output)1360 self.failUnless( abs(output["get"]["99_9_percentile"] - 5) < 1, output)1365 self.failUnless(output["get"]["mean"] is None, output) 1366 self.failUnless(output["get"]["01_0_percentile"] is None, output) 1367 self.failUnless(output["get"]["10_0_percentile"] is None, output) 1368 self.failUnless(output["get"]["50_0_percentile"] is None, output) 1369 self.failUnless(output["get"]["90_0_percentile"] is None, output) 1370 self.failUnless(output["get"]["95_0_percentile"] is None, output) 1371 self.failUnless(output["get"]["99_0_percentile"] is None, output) 1372 self.failUnless(output["get"]["99_9_percentile"] is None, output) 1361 1373 1362 1374 def remove_tags(s):
Note: See TracChangeset
for help on using the changeset viewer.