1 | 27 patches for repository http://tahoe-lafs.org/source/tahoe/trunk: |
---|
2 | |
---|
3 | Thu Aug 25 01:32:17 BST 2011 david-sarah@jacaranda.org |
---|
4 | * interfaces.py: 'which -> that' grammar cleanup. |
---|
5 | |
---|
6 | Tue Sep 20 00:29:26 BST 2011 david-sarah@jacaranda.org |
---|
7 | * Pluggable backends -- new and moved files, changes to moved files. refs #999 |
---|
8 | |
---|
9 | Tue Sep 20 00:32:56 BST 2011 david-sarah@jacaranda.org |
---|
10 | * Pluggable backends -- all other changes. refs #999 |
---|
11 | |
---|
12 | Tue Sep 20 04:38:03 BST 2011 david-sarah@jacaranda.org |
---|
13 | * Work-in-progress, includes fix to bug involving BucketWriter. refs #999 |
---|
14 | |
---|
15 | Tue Sep 20 18:17:37 BST 2011 david-sarah@jacaranda.org |
---|
16 | * docs/backends: document the configuration options for the pluggable backends scheme. refs #999 |
---|
17 | |
---|
18 | Wed Sep 21 04:12:07 BST 2011 david-sarah@jacaranda.org |
---|
19 | * Fix some incorrect attribute accesses. refs #999 |
---|
20 | |
---|
21 | Wed Sep 21 04:16:25 BST 2011 david-sarah@jacaranda.org |
---|
22 | * docs/backends/S3.rst: remove Issues section. refs #999 |
---|
23 | |
---|
24 | Wed Sep 21 04:17:05 BST 2011 david-sarah@jacaranda.org |
---|
25 | * docs/backends/S3.rst, disk.rst: describe type of space settings as 'quantity of space', not 'str'. refs #999 |
---|
26 | |
---|
27 | Wed Sep 21 19:46:49 BST 2011 david-sarah@jacaranda.org |
---|
28 | * More fixes to tests needed for pluggable backends. refs #999 |
---|
29 | |
---|
30 | Wed Sep 21 23:14:21 BST 2011 david-sarah@jacaranda.org |
---|
31 | * Fix more shallow bugs, mainly FilePathification. Also, remove the max_space_per_bucket parameter from BucketWriter since it can be obtained from the _max_size attribute of the share (via a new get_allocated_size() accessor). refs #999 |
---|
32 | |
---|
33 | Wed Sep 21 23:20:38 BST 2011 david-sarah@jacaranda.org |
---|
34 | * uri.py: resolve a conflict between trunk and the pluggable-backends patches. refs #999 |
---|
35 | |
---|
36 | Thu Sep 22 05:54:51 BST 2011 david-sarah@jacaranda.org |
---|
37 | * Fix some more test failures. refs #999 |
---|
38 | |
---|
39 | Thu Sep 22 19:30:08 BST 2011 david-sarah@jacaranda.org |
---|
40 | * Fix most of the crawler tests. refs #999 |
---|
41 | |
---|
42 | Thu Sep 22 19:33:23 BST 2011 david-sarah@jacaranda.org |
---|
43 | * Reinstate the cancel_lease methods of ImmutableDiskShare and MutableDiskShare, since they are needed for lease expiry. refs #999 |
---|
44 | |
---|
45 | Fri Sep 23 02:20:44 BST 2011 david-sarah@jacaranda.org |
---|
46 | * Blank line cleanups. |
---|
47 | |
---|
48 | Fri Sep 23 05:08:25 BST 2011 david-sarah@jacaranda.org |
---|
49 | * mutable/publish.py: elements should not be removed from a dictionary while it is being iterated over. refs #393 |
---|
50 | |
---|
51 | Fri Sep 23 05:10:03 BST 2011 david-sarah@jacaranda.org |
---|
52 | * A few comment cleanups. refs #999 |
---|
53 | |
---|
54 | Fri Sep 23 05:11:15 BST 2011 david-sarah@jacaranda.org |
---|
55 | * Move advise_corrupt_share to allmydata/storage/backends/base.py, since it will be common to the disk and S3 backends. refs #999 |
---|
56 | |
---|
57 | Fri Sep 23 05:13:14 BST 2011 david-sarah@jacaranda.org |
---|
58 | * Add incomplete S3 backend. refs #999 |
---|
59 | |
---|
60 | Fri Sep 23 21:37:23 BST 2011 david-sarah@jacaranda.org |
---|
61 | * interfaces.py: add fill_in_space_stats method to IStorageBackend. refs #999 |
---|
62 | |
---|
63 | Fri Sep 23 21:44:25 BST 2011 david-sarah@jacaranda.org |
---|
64 | * Remove redundant si_s argument from check_write_enabler. refs #999 |
---|
65 | |
---|
66 | Fri Sep 23 21:46:11 BST 2011 david-sarah@jacaranda.org |
---|
67 | * Implement readv for immutable shares. refs #999 |
---|
68 | |
---|
69 | Fri Sep 23 21:49:14 BST 2011 david-sarah@jacaranda.org |
---|
70 | * The cancel secret needs to be unique, even if it isn't explicitly provided. refs #999 |
---|
71 | |
---|
72 | Fri Sep 23 21:49:45 BST 2011 david-sarah@jacaranda.org |
---|
73 | * Make EmptyShare.check_testv a simple function. refs #999 |
---|
74 | |
---|
75 | Fri Sep 23 21:52:19 BST 2011 david-sarah@jacaranda.org |
---|
76 | * Update the null backend to take into account interface changes. Also, it now records which shares are present, but not their contents. refs #999 |
---|
77 | |
---|
78 | Fri Sep 23 21:53:45 BST 2011 david-sarah@jacaranda.org |
---|
79 | * Update the S3 backend. refs #999 |
---|
80 | |
---|
81 | Fri Sep 23 21:55:10 BST 2011 david-sarah@jacaranda.org |
---|
82 | * Minor cleanup to disk backend. refs #999 |
---|
83 | |
---|
84 | New patches: |
---|
85 | |
---|
86 | [interfaces.py: 'which -> that' grammar cleanup. |
---|
87 | david-sarah@jacaranda.org**20110825003217 |
---|
88 | Ignore-this: a3e15f3676de1b346ad78aabdfb8cac6 |
---|
89 | ] { |
---|
90 | hunk ./src/allmydata/interfaces.py 38 |
---|
91 | the StubClient. This object doesn't actually offer any services, but the |
---|
92 | announcement helps the Introducer keep track of which clients are |
---|
93 | subscribed (so the grid admin can keep track of things like the size of |
---|
94 | - the grid and the client versions in use. This is the (empty) |
---|
95 | + the grid and the client versions in use). This is the (empty) |
---|
96 | RemoteInterface for the StubClient.""" |
---|
97 | |
---|
98 | class RIBucketWriter(RemoteInterface): |
---|
99 | hunk ./src/allmydata/interfaces.py 276 |
---|
100 | (binary) storage index string, and 'shnum' is the integer share |
---|
101 | number. 'reason' is a human-readable explanation of the problem, |
---|
102 | probably including some expected hash values and the computed ones |
---|
103 | - which did not match. Corruption advisories for mutable shares should |
---|
104 | + that did not match. Corruption advisories for mutable shares should |
---|
105 | include a hash of the public key (the same value that appears in the |
---|
106 | mutable-file verify-cap), since the current share format does not |
---|
107 | store that on disk. |
---|
108 | hunk ./src/allmydata/interfaces.py 413 |
---|
109 | remote_host: the IAddress, if connected, otherwise None |
---|
110 | |
---|
111 | This method is intended for monitoring interfaces, such as a web page |
---|
112 | - which describes connecting and connected peers. |
---|
113 | + that describes connecting and connected peers. |
---|
114 | """ |
---|
115 | |
---|
116 | def get_all_peerids(): |
---|
117 | hunk ./src/allmydata/interfaces.py 515 |
---|
118 | |
---|
119 | # TODO: rename to get_read_cap() |
---|
120 | def get_readonly(): |
---|
121 | - """Return another IURI instance, which represents a read-only form of |
---|
122 | + """Return another IURI instance that represents a read-only form of |
---|
123 | this one. If is_readonly() is True, this returns self.""" |
---|
124 | |
---|
125 | def get_verify_cap(): |
---|
126 | hunk ./src/allmydata/interfaces.py 542 |
---|
127 | passing into init_from_string.""" |
---|
128 | |
---|
129 | class IDirnodeURI(Interface): |
---|
130 | - """I am a URI which represents a dirnode.""" |
---|
131 | + """I am a URI that represents a dirnode.""" |
---|
132 | |
---|
133 | class IFileURI(Interface): |
---|
134 | hunk ./src/allmydata/interfaces.py 545 |
---|
135 | - """I am a URI which represents a filenode.""" |
---|
136 | + """I am a URI that represents a filenode.""" |
---|
137 | def get_size(): |
---|
138 | """Return the length (in bytes) of the file that I represent.""" |
---|
139 | |
---|
140 | hunk ./src/allmydata/interfaces.py 553 |
---|
141 | pass |
---|
142 | |
---|
143 | class IMutableFileURI(Interface): |
---|
144 | - """I am a URI which represents a mutable filenode.""" |
---|
145 | + """I am a URI that represents a mutable filenode.""" |
---|
146 | def get_extension_params(): |
---|
147 | """Return the extension parameters in the URI""" |
---|
148 | |
---|
149 | hunk ./src/allmydata/interfaces.py 856 |
---|
150 | """ |
---|
151 | |
---|
152 | class IFileNode(IFilesystemNode): |
---|
153 | - """I am a node which represents a file: a sequence of bytes. I am not a |
---|
154 | + """I am a node that represents a file: a sequence of bytes. I am not a |
---|
155 | container, like IDirectoryNode.""" |
---|
156 | def get_best_readable_version(): |
---|
157 | """Return a Deferred that fires with an IReadable for the 'best' |
---|
158 | hunk ./src/allmydata/interfaces.py 905 |
---|
159 | multiple versions of a file present in the grid, some of which might be |
---|
160 | unrecoverable (i.e. have fewer than 'k' shares). These versions are |
---|
161 | loosely ordered: each has a sequence number and a hash, and any version |
---|
162 | - with seqnum=N was uploaded by a node which has seen at least one version |
---|
163 | + with seqnum=N was uploaded by a node that has seen at least one version |
---|
164 | with seqnum=N-1. |
---|
165 | |
---|
166 | The 'servermap' (an instance of IMutableFileServerMap) is used to |
---|
167 | hunk ./src/allmydata/interfaces.py 1014 |
---|
168 | as a guide to where the shares are located. |
---|
169 | |
---|
170 | I return a Deferred that fires with the requested contents, or |
---|
171 | - errbacks with UnrecoverableFileError. Note that a servermap which was |
---|
172 | + errbacks with UnrecoverableFileError. Note that a servermap that was |
---|
173 | updated with MODE_ANYTHING or MODE_READ may not know about shares for |
---|
174 | all versions (those modes stop querying servers as soon as they can |
---|
175 | fulfil their goals), so you may want to use MODE_CHECK (which checks |
---|
176 | hunk ./src/allmydata/interfaces.py 1073 |
---|
177 | """Upload was unable to satisfy 'servers_of_happiness'""" |
---|
178 | |
---|
179 | class UnableToFetchCriticalDownloadDataError(Exception): |
---|
180 | - """I was unable to fetch some piece of critical data which is supposed to |
---|
181 | + """I was unable to fetch some piece of critical data that is supposed to |
---|
182 | be identically present in all shares.""" |
---|
183 | |
---|
184 | class NoServersError(Exception): |
---|
185 | hunk ./src/allmydata/interfaces.py 1085 |
---|
186 | exists, and overwrite= was set to False.""" |
---|
187 | |
---|
188 | class NoSuchChildError(Exception): |
---|
189 | - """A directory node was asked to fetch a child which does not exist.""" |
---|
190 | + """A directory node was asked to fetch a child that does not exist.""" |
---|
191 | |
---|
192 | class ChildOfWrongTypeError(Exception): |
---|
193 | """An operation was attempted on a child of the wrong type (file or directory).""" |
---|
194 | hunk ./src/allmydata/interfaces.py 1403 |
---|
195 | if you initially thought you were going to use 10 peers, started |
---|
196 | encoding, and then two of the peers dropped out: you could use |
---|
197 | desired_share_ids= to skip the work (both memory and CPU) of |
---|
198 | - producing shares for the peers which are no longer available. |
---|
199 | + producing shares for the peers that are no longer available. |
---|
200 | |
---|
201 | """ |
---|
202 | |
---|
203 | hunk ./src/allmydata/interfaces.py 1478 |
---|
204 | if you initially thought you were going to use 10 peers, started |
---|
205 | encoding, and then two of the peers dropped out: you could use |
---|
206 | desired_share_ids= to skip the work (both memory and CPU) of |
---|
207 | - producing shares for the peers which are no longer available. |
---|
208 | + producing shares for the peers that are no longer available. |
---|
209 | |
---|
210 | For each call, encode() will return a Deferred that fires with two |
---|
211 | lists, one containing shares and the other containing the shareids. |
---|
212 | hunk ./src/allmydata/interfaces.py 1535 |
---|
213 | required to be of the same length. The i'th element of their_shareids |
---|
214 | is required to be the shareid of the i'th buffer in some_shares. |
---|
215 | |
---|
216 | - This returns a Deferred which fires with a sequence of buffers. This |
---|
217 | + This returns a Deferred that fires with a sequence of buffers. This |
---|
218 | sequence will contain all of the segments of the original data, in |
---|
219 | order. The sum of the lengths of all of the buffers will be the |
---|
220 | 'data_size' value passed into the original ICodecEncode.set_params() |
---|
221 | hunk ./src/allmydata/interfaces.py 1582 |
---|
222 | Encoding parameters can be set in three ways. 1: The Encoder class |
---|
223 | provides defaults (3/7/10). 2: the Encoder can be constructed with |
---|
224 | an 'options' dictionary, in which the |
---|
225 | - needed_and_happy_and_total_shares' key can be a (k,d,n) tuple. 3: |
---|
226 | + 'needed_and_happy_and_total_shares' key can be a (k,d,n) tuple. 3: |
---|
227 | set_params((k,d,n)) can be called. |
---|
228 | |
---|
229 | If you intend to use set_params(), you must call it before |
---|
230 | hunk ./src/allmydata/interfaces.py 1780 |
---|
231 | produced, so that the segment hashes can be generated with only a |
---|
232 | single pass. |
---|
233 | |
---|
234 | - This returns a Deferred which fires with a sequence of hashes, using: |
---|
235 | + This returns a Deferred that fires with a sequence of hashes, using: |
---|
236 | |
---|
237 | tuple(segment_hashes[first:last]) |
---|
238 | |
---|
239 | hunk ./src/allmydata/interfaces.py 1796 |
---|
240 | def get_plaintext_hash(): |
---|
241 | """OBSOLETE; Get the hash of the whole plaintext. |
---|
242 | |
---|
243 | - This returns a Deferred which fires with a tagged SHA-256 hash of the |
---|
244 | + This returns a Deferred that fires with a tagged SHA-256 hash of the |
---|
245 | whole plaintext, obtained from hashutil.plaintext_hash(data). |
---|
246 | """ |
---|
247 | |
---|
248 | hunk ./src/allmydata/interfaces.py 1856 |
---|
249 | be used to encrypt the data. The key will also be hashed to derive |
---|
250 | the StorageIndex. |
---|
251 | |
---|
252 | - Uploadables which want to achieve convergence should hash their file |
---|
253 | + Uploadables that want to achieve convergence should hash their file |
---|
254 | contents and the serialized_encoding_parameters to form the key |
---|
255 | (which of course requires a full pass over the data). Uploadables can |
---|
256 | use the upload.ConvergentUploadMixin class to achieve this |
---|
257 | hunk ./src/allmydata/interfaces.py 1862 |
---|
258 | automatically. |
---|
259 | |
---|
260 | - Uploadables which do not care about convergence (or do not wish to |
---|
261 | + Uploadables that do not care about convergence (or do not wish to |
---|
262 | make multiple passes over the data) can simply return a |
---|
263 | strongly-random 16 byte string. |
---|
264 | |
---|
265 | hunk ./src/allmydata/interfaces.py 1872 |
---|
266 | |
---|
267 | def read(length): |
---|
268 | """Return a Deferred that fires with a list of strings (perhaps with |
---|
269 | - only a single element) which, when concatenated together, contain the |
---|
270 | + only a single element) that, when concatenated together, contain the |
---|
271 | next 'length' bytes of data. If EOF is near, this may provide fewer |
---|
272 | than 'length' bytes. The total number of bytes provided by read() |
---|
273 | before it signals EOF must equal the size provided by get_size(). |
---|
274 | hunk ./src/allmydata/interfaces.py 1919 |
---|
275 | |
---|
276 | def read(length): |
---|
277 | """ |
---|
278 | - Returns a list of strings which, when concatenated, are the next |
---|
279 | + Returns a list of strings that, when concatenated, are the next |
---|
280 | length bytes of the file, or fewer if there are fewer bytes |
---|
281 | between the current location and the end of the file. |
---|
282 | """ |
---|
283 | hunk ./src/allmydata/interfaces.py 1932 |
---|
284 | |
---|
285 | class IUploadResults(Interface): |
---|
286 | """I am returned by upload() methods. I contain a number of public |
---|
287 | - attributes which can be read to determine the results of the upload. Some |
---|
288 | + attributes that can be read to determine the results of the upload. Some |
---|
289 | of these are functional, some are timing information. All of these may be |
---|
290 | None. |
---|
291 | |
---|
292 | hunk ./src/allmydata/interfaces.py 1965 |
---|
293 | |
---|
294 | class IDownloadResults(Interface): |
---|
295 | """I am created internally by download() methods. I contain a number of |
---|
296 | - public attributes which contain details about the download process.:: |
---|
297 | + public attributes that contain details about the download process.:: |
---|
298 | |
---|
299 | .file_size : the size of the file, in bytes |
---|
300 | .servers_used : set of server peerids that were used during download |
---|
301 | hunk ./src/allmydata/interfaces.py 1991 |
---|
302 | class IUploader(Interface): |
---|
303 | def upload(uploadable): |
---|
304 | """Upload the file. 'uploadable' must impement IUploadable. This |
---|
305 | - returns a Deferred which fires with an IUploadResults instance, from |
---|
306 | + returns a Deferred that fires with an IUploadResults instance, from |
---|
307 | which the URI of the file can be obtained as results.uri .""" |
---|
308 | |
---|
309 | def upload_ssk(write_capability, new_version, uploadable): |
---|
310 | hunk ./src/allmydata/interfaces.py 2041 |
---|
311 | kind of lease that is obtained (which account number to claim, etc). |
---|
312 | |
---|
313 | TODO: any problems seen during checking will be reported to the |
---|
314 | - health-manager.furl, a centralized object which is responsible for |
---|
315 | + health-manager.furl, a centralized object that is responsible for |
---|
316 | figuring out why files are unhealthy so corrective action can be |
---|
317 | taken. |
---|
318 | """ |
---|
319 | hunk ./src/allmydata/interfaces.py 2056 |
---|
320 | will be put in the check-and-repair results. The Deferred will not |
---|
321 | fire until the repair is complete. |
---|
322 | |
---|
323 | - This returns a Deferred which fires with an instance of |
---|
324 | + This returns a Deferred that fires with an instance of |
---|
325 | ICheckAndRepairResults.""" |
---|
326 | |
---|
327 | class IDeepCheckable(Interface): |
---|
328 | hunk ./src/allmydata/interfaces.py 2141 |
---|
329 | that was found to be corrupt. Each share |
---|
330 | locator is a list of (serverid, storage_index, |
---|
331 | sharenum). |
---|
332 | - count-incompatible-shares: the number of shares which are of a share |
---|
333 | + count-incompatible-shares: the number of shares that are of a share |
---|
334 | format unknown to this checker |
---|
335 | list-incompatible-shares: a list of 'share locators', one for each |
---|
336 | share that was found to be of an unknown |
---|
337 | hunk ./src/allmydata/interfaces.py 2148 |
---|
338 | format. Each share locator is a list of |
---|
339 | (serverid, storage_index, sharenum). |
---|
340 | servers-responding: list of (binary) storage server identifiers, |
---|
341 | - one for each server which responded to the share |
---|
342 | + one for each server that responded to the share |
---|
343 | query (even if they said they didn't have |
---|
344 | shares, and even if they said they did have |
---|
345 | shares but then didn't send them when asked, or |
---|
346 | hunk ./src/allmydata/interfaces.py 2345 |
---|
347 | will use the data in the checker results to guide the repair process, |
---|
348 | such as which servers provided bad data and should therefore be |
---|
349 | avoided. The ICheckResults object is inside the |
---|
350 | - ICheckAndRepairResults object, which is returned by the |
---|
351 | + ICheckAndRepairResults object that is returned by the |
---|
352 | ICheckable.check() method:: |
---|
353 | |
---|
354 | d = filenode.check(repair=False) |
---|
355 | hunk ./src/allmydata/interfaces.py 2436 |
---|
356 | methods to create new objects. I return synchronously.""" |
---|
357 | |
---|
358 | def create_mutable_file(contents=None, keysize=None): |
---|
359 | - """I create a new mutable file, and return a Deferred which will fire |
---|
360 | + """I create a new mutable file, and return a Deferred that will fire |
---|
361 | with the IMutableFileNode instance when it is ready. If contents= is |
---|
362 | provided (a bytestring), it will be used as the initial contents of |
---|
363 | the new file, otherwise the file will contain zero bytes. keysize= is |
---|
364 | hunk ./src/allmydata/interfaces.py 2444 |
---|
365 | usual.""" |
---|
366 | |
---|
367 | def create_new_mutable_directory(initial_children={}): |
---|
368 | - """I create a new mutable directory, and return a Deferred which will |
---|
369 | + """I create a new mutable directory, and return a Deferred that will |
---|
370 | fire with the IDirectoryNode instance when it is ready. If |
---|
371 | initial_children= is provided (a dict mapping unicode child name to |
---|
372 | (childnode, metadata_dict) tuples), the directory will be populated |
---|
373 | hunk ./src/allmydata/interfaces.py 2452 |
---|
374 | |
---|
375 | class IClientStatus(Interface): |
---|
376 | def list_all_uploads(): |
---|
377 | - """Return a list of uploader objects, one for each upload which |
---|
378 | + """Return a list of uploader objects, one for each upload that |
---|
379 | currently has an object available (tracked with weakrefs). This is |
---|
380 | intended for debugging purposes.""" |
---|
381 | def list_active_uploads(): |
---|
382 | hunk ./src/allmydata/interfaces.py 2462 |
---|
383 | started uploads.""" |
---|
384 | |
---|
385 | def list_all_downloads(): |
---|
386 | - """Return a list of downloader objects, one for each download which |
---|
387 | + """Return a list of downloader objects, one for each download that |
---|
388 | currently has an object available (tracked with weakrefs). This is |
---|
389 | intended for debugging purposes.""" |
---|
390 | def list_active_downloads(): |
---|
391 | hunk ./src/allmydata/interfaces.py 2689 |
---|
392 | |
---|
393 | def provide(provider=RIStatsProvider, nickname=str): |
---|
394 | """ |
---|
395 | - @param provider: a stats collector instance which should be polled |
---|
396 | + @param provider: a stats collector instance that should be polled |
---|
397 | periodically by the gatherer to collect stats. |
---|
398 | @param nickname: a name useful to identify the provided client |
---|
399 | """ |
---|
400 | hunk ./src/allmydata/interfaces.py 2722 |
---|
401 | |
---|
402 | class IValidatedThingProxy(Interface): |
---|
403 | def start(): |
---|
404 | - """ Acquire a thing and validate it. Return a deferred which is |
---|
405 | + """ Acquire a thing and validate it. Return a deferred that is |
---|
406 | eventually fired with self if the thing is valid or errbacked if it |
---|
407 | can't be acquired or validated.""" |
---|
408 | |
---|
409 | } |
---|
410 | [Pluggable backends -- new and moved files, changes to moved files. refs #999 |
---|
411 | david-sarah@jacaranda.org**20110919232926 |
---|
412 | Ignore-this: ec5d2d1362a092d919e84327d3092424 |
---|
413 | ] { |
---|
414 | adddir ./src/allmydata/storage/backends |
---|
415 | adddir ./src/allmydata/storage/backends/disk |
---|
416 | move ./src/allmydata/storage/immutable.py ./src/allmydata/storage/backends/disk/immutable.py |
---|
417 | move ./src/allmydata/storage/mutable.py ./src/allmydata/storage/backends/disk/mutable.py |
---|
418 | adddir ./src/allmydata/storage/backends/null |
---|
419 | addfile ./src/allmydata/storage/backends/__init__.py |
---|
420 | addfile ./src/allmydata/storage/backends/base.py |
---|
421 | hunk ./src/allmydata/storage/backends/base.py 1 |
---|
422 | + |
---|
423 | +from twisted.application import service |
---|
424 | + |
---|
425 | +from allmydata.storage.common import si_b2a |
---|
426 | +from allmydata.storage.lease import LeaseInfo |
---|
427 | +from allmydata.storage.bucket import BucketReader |
---|
428 | + |
---|
429 | + |
---|
430 | +class Backend(service.MultiService): |
---|
431 | + def __init__(self): |
---|
432 | + service.MultiService.__init__(self) |
---|
433 | + |
---|
434 | + |
---|
435 | +class ShareSet(object): |
---|
436 | + """ |
---|
437 | + This class implements shareset logic that could work for all backends, but |
---|
438 | + might be useful to override for efficiency. |
---|
439 | + """ |
---|
440 | + |
---|
441 | + def __init__(self, storageindex): |
---|
442 | + self.storageindex = storageindex |
---|
443 | + |
---|
444 | + def get_storage_index(self): |
---|
445 | + return self.storageindex |
---|
446 | + |
---|
447 | + def get_storage_index_string(self): |
---|
448 | + return si_b2a(self.storageindex) |
---|
449 | + |
---|
450 | + def renew_lease(self, renew_secret, new_expiration_time): |
---|
451 | + found_shares = False |
---|
452 | + for share in self.get_shares(): |
---|
453 | + found_shares = True |
---|
454 | + share.renew_lease(renew_secret, new_expiration_time) |
---|
455 | + |
---|
456 | + if not found_shares: |
---|
457 | + raise IndexError("no such lease to renew") |
---|
458 | + |
---|
459 | + def get_leases(self): |
---|
460 | + # Since all shares get the same lease data, we just grab the leases |
---|
461 | + # from the first share. |
---|
462 | + try: |
---|
463 | + sf = self.get_shares().next() |
---|
464 | + return sf.get_leases() |
---|
465 | + except StopIteration: |
---|
466 | + return iter([]) |
---|
467 | + |
---|
468 | + def add_or_renew_lease(self, lease_info): |
---|
469 | + # This implementation assumes that lease data is duplicated in |
---|
470 | + # all shares of a shareset, which might not be true for all backends. |
---|
471 | + for share in self.get_shares(): |
---|
472 | + share.add_or_renew_lease(lease_info) |
---|
473 | + |
---|
474 | + def make_bucket_reader(self, storageserver, share): |
---|
475 | + return BucketReader(storageserver, share) |
---|
476 | + |
---|
477 | + def testv_and_readv_and_writev(self, storageserver, secrets, |
---|
478 | + test_and_write_vectors, read_vector, |
---|
479 | + expiration_time): |
---|
480 | + # The implementation here depends on the following helper methods, |
---|
481 | + # which must be provided by subclasses: |
---|
482 | + # |
---|
483 | + # def _clean_up_after_unlink(self): |
---|
484 | + # """clean up resources associated with the shareset after some |
---|
485 | + # shares might have been deleted""" |
---|
486 | + # |
---|
487 | + # def _create_mutable_share(self, storageserver, shnum, write_enabler): |
---|
488 | + # """create a mutable share with the given shnum and write_enabler""" |
---|
489 | + |
---|
490 | + # secrets might be a triple with cancel_secret in secrets[2], but if |
---|
491 | + # so we ignore the cancel_secret. |
---|
492 | + write_enabler = secrets[0] |
---|
493 | + renew_secret = secrets[1] |
---|
494 | + |
---|
495 | + si_s = self.get_storage_index_string() |
---|
496 | + shares = {} |
---|
497 | + for share in self.get_shares(): |
---|
498 | + # XXX is it correct to ignore immutable shares? Maybe get_shares should |
---|
499 | + # have a parameter saying what type it's expecting. |
---|
500 | + if share.sharetype == "mutable": |
---|
501 | + share.check_write_enabler(write_enabler, si_s) |
---|
502 | + shares[share.get_shnum()] = share |
---|
503 | + |
---|
504 | + # write_enabler is good for all existing shares |
---|
505 | + |
---|
506 | + # now evaluate test vectors |
---|
507 | + testv_is_good = True |
---|
508 | + for sharenum in test_and_write_vectors: |
---|
509 | + (testv, datav, new_length) = test_and_write_vectors[sharenum] |
---|
510 | + if sharenum in shares: |
---|
511 | + if not shares[sharenum].check_testv(testv): |
---|
512 | + self.log("testv failed: [%d]: %r" % (sharenum, testv)) |
---|
513 | + testv_is_good = False |
---|
514 | + break |
---|
515 | + else: |
---|
516 | + # compare the vectors against an empty share, in which all |
---|
517 | + # reads return empty strings |
---|
518 | + if not EmptyShare().check_testv(testv): |
---|
519 | + self.log("testv failed (empty): [%d] %r" % (sharenum, |
---|
520 | + testv)) |
---|
521 | + testv_is_good = False |
---|
522 | + break |
---|
523 | + |
---|
524 | + # gather the read vectors, before we do any writes |
---|
525 | + read_data = {} |
---|
526 | + for shnum, share in shares.items(): |
---|
527 | + read_data[shnum] = share.readv(read_vector) |
---|
528 | + |
---|
529 | + ownerid = 1 # TODO |
---|
530 | + lease_info = LeaseInfo(ownerid, renew_secret, |
---|
531 | + expiration_time, storageserver.get_serverid()) |
---|
532 | + |
---|
533 | + if testv_is_good: |
---|
534 | + # now apply the write vectors |
---|
535 | + for shnum in test_and_write_vectors: |
---|
536 | + (testv, datav, new_length) = test_and_write_vectors[shnum] |
---|
537 | + if new_length == 0: |
---|
538 | + if shnum in shares: |
---|
539 | + shares[shnum].unlink() |
---|
540 | + else: |
---|
541 | + if shnum not in shares: |
---|
542 | + # allocate a new share |
---|
543 | + share = self._create_mutable_share(storageserver, shnum, write_enabler) |
---|
544 | + shares[shnum] = share |
---|
545 | + shares[shnum].writev(datav, new_length) |
---|
546 | + # and update the lease |
---|
547 | + shares[shnum].add_or_renew_lease(lease_info) |
---|
548 | + |
---|
549 | + if new_length == 0: |
---|
550 | + self._clean_up_after_unlink() |
---|
551 | + |
---|
552 | + return (testv_is_good, read_data) |
---|
553 | + |
---|
554 | + def readv(self, wanted_shnums, read_vector): |
---|
555 | + """ |
---|
556 | + Read a vector from the numbered shares in this shareset. An empty |
---|
557 | + shares list means to return data from all known shares. |
---|
558 | + |
---|
559 | + @param wanted_shnums=ListOf(int) |
---|
560 | + @param read_vector=ReadVector |
---|
561 | + @return DictOf(int, ReadData): shnum -> results, with one key per share |
---|
562 | + """ |
---|
563 | + datavs = {} |
---|
564 | + for share in self.get_shares(): |
---|
565 | + shnum = share.get_shnum() |
---|
566 | + if not wanted_shnums or shnum in wanted_shnums: |
---|
567 | + datavs[shnum] = share.readv(read_vector) |
---|
568 | + |
---|
569 | + return datavs |
---|
570 | + |
---|
571 | + |
---|
572 | +def testv_compare(a, op, b): |
---|
573 | + assert op in ("lt", "le", "eq", "ne", "ge", "gt") |
---|
574 | + if op == "lt": |
---|
575 | + return a < b |
---|
576 | + if op == "le": |
---|
577 | + return a <= b |
---|
578 | + if op == "eq": |
---|
579 | + return a == b |
---|
580 | + if op == "ne": |
---|
581 | + return a != b |
---|
582 | + if op == "ge": |
---|
583 | + return a >= b |
---|
584 | + if op == "gt": |
---|
585 | + return a > b |
---|
586 | + # never reached |
---|
587 | + |
---|
588 | + |
---|
589 | +class EmptyShare: |
---|
590 | + def check_testv(self, testv): |
---|
591 | + test_good = True |
---|
592 | + for (offset, length, operator, specimen) in testv: |
---|
593 | + data = "" |
---|
594 | + if not testv_compare(data, operator, specimen): |
---|
595 | + test_good = False |
---|
596 | + break |
---|
597 | + return test_good |
---|
598 | + |
---|
599 | addfile ./src/allmydata/storage/backends/disk/__init__.py |
---|
600 | addfile ./src/allmydata/storage/backends/disk/disk_backend.py |
---|
601 | hunk ./src/allmydata/storage/backends/disk/disk_backend.py 1 |
---|
602 | + |
---|
603 | +import re |
---|
604 | + |
---|
605 | +from twisted.python.filepath import UnlistableError |
---|
606 | + |
---|
607 | +from zope.interface import implements |
---|
608 | +from allmydata.interfaces import IStorageBackend, IShareSet |
---|
609 | +from allmydata.util import fileutil, log, time_format |
---|
610 | +from allmydata.storage.common import si_b2a, si_a2b |
---|
611 | +from allmydata.storage.bucket import BucketWriter |
---|
612 | +from allmydata.storage.backends.base import Backend, ShareSet |
---|
613 | +from allmydata.storage.backends.disk.immutable import ImmutableDiskShare |
---|
614 | +from allmydata.storage.backends.disk.mutable import MutableDiskShare, create_mutable_disk_share |
---|
615 | + |
---|
616 | +# storage/ |
---|
617 | +# storage/shares/incoming |
---|
618 | +# incoming/ holds temp dirs named $START/$STORAGEINDEX/$SHARENUM which will |
---|
619 | +# be moved to storage/shares/$START/$STORAGEINDEX/$SHARENUM upon success |
---|
620 | +# storage/shares/$START/$STORAGEINDEX |
---|
621 | +# storage/shares/$START/$STORAGEINDEX/$SHARENUM |
---|
622 | + |
---|
623 | +# Where "$START" denotes the first 10 bits worth of $STORAGEINDEX (that's 2 |
---|
624 | +# base-32 chars). |
---|
625 | +# $SHARENUM matches this regex: |
---|
626 | +NUM_RE=re.compile("^[0-9]+$") |
---|
627 | + |
---|
628 | + |
---|
629 | +def si_si2dir(startfp, storageindex): |
---|
630 | + sia = si_b2a(storageindex) |
---|
631 | + newfp = startfp.child(sia[:2]) |
---|
632 | + return newfp.child(sia) |
---|
633 | + |
---|
634 | + |
---|
635 | +def get_share(fp): |
---|
636 | + f = fp.open('rb') |
---|
637 | + try: |
---|
638 | + prefix = f.read(32) |
---|
639 | + finally: |
---|
640 | + f.close() |
---|
641 | + |
---|
642 | + if prefix == MutableDiskShare.MAGIC: |
---|
643 | + return MutableDiskShare(fp) |
---|
644 | + else: |
---|
645 | + # assume it's immutable |
---|
646 | + return ImmutableDiskShare(fp) |
---|
647 | + |
---|
648 | + |
---|
649 | +class DiskBackend(Backend): |
---|
650 | + implements(IStorageBackend) |
---|
651 | + |
---|
652 | + def __init__(self, storedir, readonly=False, reserved_space=0, discard_storage=False): |
---|
653 | + Backend.__init__(self) |
---|
654 | + self._setup_storage(storedir, readonly, reserved_space, discard_storage) |
---|
655 | + self._setup_corruption_advisory() |
---|
656 | + |
---|
657 | + def _setup_storage(self, storedir, readonly, reserved_space, discard_storage): |
---|
658 | + self._storedir = storedir |
---|
659 | + self._readonly = readonly |
---|
660 | + self._reserved_space = int(reserved_space) |
---|
661 | + self._discard_storage = discard_storage |
---|
662 | + self._sharedir = self._storedir.child("shares") |
---|
663 | + fileutil.fp_make_dirs(self._sharedir) |
---|
664 | + self._incomingdir = self._sharedir.child('incoming') |
---|
665 | + self._clean_incomplete() |
---|
666 | + if self._reserved_space and (self.get_available_space() is None): |
---|
667 | + log.msg("warning: [storage]reserved_space= is set, but this platform does not support an API to get disk statistics (statvfs(2) or GetDiskFreeSpaceEx), so this reservation cannot be honored", |
---|
668 | + umid="0wZ27w", level=log.UNUSUAL) |
---|
669 | + |
---|
670 | + def _clean_incomplete(self): |
---|
671 | + fileutil.fp_remove(self._incomingdir) |
---|
672 | + fileutil.fp_make_dirs(self._incomingdir) |
---|
673 | + |
---|
674 | + def _setup_corruption_advisory(self): |
---|
675 | + # we don't actually create the corruption-advisory dir until necessary |
---|
676 | + self._corruption_advisory_dir = self._storedir.child("corruption-advisories") |
---|
677 | + |
---|
678 | + def _make_shareset(self, sharehomedir): |
---|
679 | + return self.get_shareset(si_a2b(sharehomedir.basename())) |
---|
680 | + |
---|
681 | + def get_sharesets_for_prefix(self, prefix): |
---|
682 | + prefixfp = self._sharedir.child(prefix) |
---|
683 | + try: |
---|
684 | + sharesets = map(self._make_shareset, prefixfp.children()) |
---|
685 | + def _by_base32si(b): |
---|
686 | + return b.get_storage_index_string() |
---|
687 | + sharesets.sort(key=_by_base32si) |
---|
688 | + except EnvironmentError: |
---|
689 | + sharesets = [] |
---|
690 | + return sharesets |
---|
691 | + |
---|
692 | + def get_shareset(self, storageindex): |
---|
693 | + sharehomedir = si_si2dir(self._sharedir, storageindex) |
---|
694 | + incominghomedir = si_si2dir(self._incomingdir, storageindex) |
---|
695 | + return DiskShareSet(storageindex, sharehomedir, incominghomedir, discard_storage=self._discard_storage) |
---|
696 | + |
---|
697 | + def fill_in_space_stats(self, stats): |
---|
698 | + stats['storage_server.reserved_space'] = self._reserved_space |
---|
699 | + try: |
---|
700 | + disk = fileutil.get_disk_stats(self._sharedir, self._reserved_space) |
---|
701 | + writeable = disk['avail'] > 0 |
---|
702 | + |
---|
703 | + # spacetime predictors should use disk_avail / (d(disk_used)/dt) |
---|
704 | + stats['storage_server.disk_total'] = disk['total'] |
---|
705 | + stats['storage_server.disk_used'] = disk['used'] |
---|
706 | + stats['storage_server.disk_free_for_root'] = disk['free_for_root'] |
---|
707 | + stats['storage_server.disk_free_for_nonroot'] = disk['free_for_nonroot'] |
---|
708 | + stats['storage_server.disk_avail'] = disk['avail'] |
---|
709 | + except AttributeError: |
---|
710 | + writeable = True |
---|
711 | + except EnvironmentError: |
---|
712 | + log.msg("OS call to get disk statistics failed", level=log.UNUSUAL) |
---|
713 | + writeable = False |
---|
714 | + |
---|
715 | + if self._readonly: |
---|
716 | + stats['storage_server.disk_avail'] = 0 |
---|
717 | + writeable = False |
---|
718 | + |
---|
719 | + stats['storage_server.accepting_immutable_shares'] = int(writeable) |
---|
720 | + |
---|
721 | + def get_available_space(self): |
---|
722 | + if self._readonly: |
---|
723 | + return 0 |
---|
724 | + return fileutil.get_available_space(self._sharedir, self._reserved_space) |
---|
725 | + |
---|
726 | + def advise_corrupt_share(self, sharetype, storageindex, shnum, reason): |
---|
727 | + fileutil.fp_make_dirs(self._corruption_advisory_dir) |
---|
728 | + now = time_format.iso_utc(sep="T") |
---|
729 | + si_s = si_b2a(storageindex) |
---|
730 | + |
---|
731 | + # Windows can't handle colons in the filename. |
---|
732 | + name = ("%s--%s-%d" % (now, si_s, shnum)).replace(":", "") |
---|
733 | + f = self._corruption_advisory_dir.child(name).open("w") |
---|
734 | + try: |
---|
735 | + f.write("report: Share Corruption\n") |
---|
736 | + f.write("type: %s\n" % sharetype) |
---|
737 | + f.write("storage_index: %s\n" % si_s) |
---|
738 | + f.write("share_number: %d\n" % shnum) |
---|
739 | + f.write("\n") |
---|
740 | + f.write(reason) |
---|
741 | + f.write("\n") |
---|
742 | + finally: |
---|
743 | + f.close() |
---|
744 | + |
---|
745 | + log.msg(format=("client claims corruption in (%(share_type)s) " + |
---|
746 | + "%(si)s-%(shnum)d: %(reason)s"), |
---|
747 | + share_type=sharetype, si=si_s, shnum=shnum, reason=reason, |
---|
748 | + level=log.SCARY, umid="SGx2fA") |
---|
749 | + |
---|
750 | + |
---|
751 | +class DiskShareSet(ShareSet): |
---|
752 | + implements(IShareSet) |
---|
753 | + |
---|
754 | + def __init__(self, storageindex, sharehomedir, incominghomedir=None, discard_storage=False): |
---|
755 | + ShareSet.__init__(self, storageindex) |
---|
756 | + self._sharehomedir = sharehomedir |
---|
757 | + self._incominghomedir = incominghomedir |
---|
758 | + self._discard_storage = discard_storage |
---|
759 | + |
---|
760 | + def get_overhead(self): |
---|
761 | + return (fileutil.get_disk_usage(self._sharehomedir) + |
---|
762 | + fileutil.get_disk_usage(self._incominghomedir)) |
---|
763 | + |
---|
764 | + def get_shares(self): |
---|
765 | + """ |
---|
766 | + Generate IStorageBackendShare objects for shares we have for this storage index. |
---|
767 | + ("Shares we have" means completed ones, excluding incoming ones.) |
---|
768 | + """ |
---|
769 | + try: |
---|
770 | + for fp in self._sharehomedir.children(): |
---|
771 | + shnumstr = fp.basename() |
---|
772 | + if not NUM_RE.match(shnumstr): |
---|
773 | + continue |
---|
774 | + sharehome = self._sharehomedir.child(shnumstr) |
---|
775 | + yield self.get_share(sharehome) |
---|
776 | + except UnlistableError: |
---|
777 | + # There is no shares directory at all. |
---|
778 | + pass |
---|
779 | + |
---|
780 | + def has_incoming(self, shnum): |
---|
781 | + if self._incominghomedir is None: |
---|
782 | + return False |
---|
783 | + return self._incominghomedir.child(str(shnum)).exists() |
---|
784 | + |
---|
785 | + def make_bucket_writer(self, storageserver, shnum, max_space_per_bucket, lease_info, canary): |
---|
786 | + sharehome = self._sharehomedir.child(str(shnum)) |
---|
787 | + incominghome = self._incominghomedir.child(str(shnum)) |
---|
788 | + immsh = ImmutableDiskShare(self.get_storage_index(), shnum, sharehome, incominghome, |
---|
789 | + max_size=max_space_per_bucket, create=True) |
---|
790 | + bw = BucketWriter(storageserver, immsh, max_space_per_bucket, lease_info, canary) |
---|
791 | + if self._discard_storage: |
---|
792 | + bw.throw_out_all_data = True |
---|
793 | + return bw |
---|
794 | + |
---|
795 | + def _create_mutable_share(self, storageserver, shnum, write_enabler): |
---|
796 | + fileutil.fp_make_dirs(self._sharehomedir) |
---|
797 | + sharehome = self._sharehomedir.child(str(shnum)) |
---|
798 | + serverid = storageserver.get_serverid() |
---|
799 | + return create_mutable_disk_share(sharehome, serverid, write_enabler, storageserver) |
---|
800 | + |
---|
801 | + def _clean_up_after_unlink(self): |
---|
802 | + fileutil.fp_rmdir_if_empty(self._sharehomedir) |
---|
803 | + |
---|
804 | hunk ./src/allmydata/storage/backends/disk/immutable.py 1 |
---|
805 | -import os, stat, struct, time |
---|
806 | |
---|
807 | hunk ./src/allmydata/storage/backends/disk/immutable.py 2 |
---|
808 | -from foolscap.api import Referenceable |
---|
809 | +import struct |
---|
810 | |
---|
811 | from zope.interface import implements |
---|
812 | hunk ./src/allmydata/storage/backends/disk/immutable.py 5 |
---|
813 | -from allmydata.interfaces import RIBucketWriter, RIBucketReader |
---|
814 | -from allmydata.util import base32, fileutil, log |
---|
815 | + |
---|
816 | +from allmydata.interfaces import IStoredShare |
---|
817 | +from allmydata.util import fileutil |
---|
818 | from allmydata.util.assertutil import precondition |
---|
819 | hunk ./src/allmydata/storage/backends/disk/immutable.py 9 |
---|
820 | +from allmydata.util.fileutil import fp_make_dirs |
---|
821 | from allmydata.util.hashutil import constant_time_compare |
---|
822 | hunk ./src/allmydata/storage/backends/disk/immutable.py 11 |
---|
823 | +from allmydata.util.encodingutil import quote_filepath |
---|
824 | +from allmydata.storage.common import si_b2a, UnknownImmutableContainerVersionError, DataTooLargeError |
---|
825 | from allmydata.storage.lease import LeaseInfo |
---|
826 | hunk ./src/allmydata/storage/backends/disk/immutable.py 14 |
---|
827 | -from allmydata.storage.common import UnknownImmutableContainerVersionError, \ |
---|
828 | - DataTooLargeError |
---|
829 | + |
---|
830 | |
---|
831 | # each share file (in storage/shares/$SI/$SHNUM) contains lease information |
---|
832 | # and share data. The share data is accessed by RIBucketWriter.write and |
---|
833 | hunk ./src/allmydata/storage/backends/disk/immutable.py 41 |
---|
834 | # then the value stored in this field will be the actual share data length |
---|
835 | # modulo 2**32. |
---|
836 | |
---|
837 | -class ShareFile: |
---|
838 | - LEASE_SIZE = struct.calcsize(">L32s32sL") |
---|
839 | +class ImmutableDiskShare(object): |
---|
840 | + implements(IStoredShare) |
---|
841 | + |
---|
842 | sharetype = "immutable" |
---|
843 | hunk ./src/allmydata/storage/backends/disk/immutable.py 45 |
---|
844 | + LEASE_SIZE = struct.calcsize(">L32s32sL") |
---|
845 | + |
---|
846 | |
---|
847 | hunk ./src/allmydata/storage/backends/disk/immutable.py 48 |
---|
848 | - def __init__(self, filename, max_size=None, create=False): |
---|
849 | - """ If max_size is not None then I won't allow more than max_size to be written to me. If create=True and max_size must not be None. """ |
---|
850 | + def __init__(self, storageindex, shnum, finalhome=None, incominghome=None, max_size=None, create=False): |
---|
851 | + """ If max_size is not None then I won't allow more than |
---|
852 | + max_size to be written to me. If create=True then max_size |
---|
853 | + must not be None. """ |
---|
854 | precondition((max_size is not None) or (not create), max_size, create) |
---|
855 | hunk ./src/allmydata/storage/backends/disk/immutable.py 53 |
---|
856 | - self.home = filename |
---|
857 | + self._storageindex = storageindex |
---|
858 | self._max_size = max_size |
---|
859 | hunk ./src/allmydata/storage/backends/disk/immutable.py 55 |
---|
860 | + self._incominghome = incominghome |
---|
861 | + self._home = finalhome |
---|
862 | + self._shnum = shnum |
---|
863 | if create: |
---|
864 | # touch the file, so later callers will see that we're working on |
---|
865 | # it. Also construct the metadata. |
---|
866 | hunk ./src/allmydata/storage/backends/disk/immutable.py 61 |
---|
867 | - assert not os.path.exists(self.home) |
---|
868 | - fileutil.make_dirs(os.path.dirname(self.home)) |
---|
869 | - f = open(self.home, 'wb') |
---|
870 | + assert not finalhome.exists() |
---|
871 | + fp_make_dirs(self._incominghome.parent()) |
---|
872 | # The second field -- the four-byte share data length -- is no |
---|
873 | # longer used as of Tahoe v1.3.0, but we continue to write it in |
---|
874 | # there in case someone downgrades a storage server from >= |
---|
875 | hunk ./src/allmydata/storage/backends/disk/immutable.py 72 |
---|
876 | # the largest length that can fit into the field. That way, even |
---|
877 | # if this does happen, the old < v1.3.0 server will still allow |
---|
878 | # clients to read the first part of the share. |
---|
879 | - f.write(struct.pack(">LLL", 1, min(2**32-1, max_size), 0)) |
---|
880 | - f.close() |
---|
881 | + self._incominghome.setContent(struct.pack(">LLL", 1, min(2**32-1, max_size), 0) ) |
---|
882 | self._lease_offset = max_size + 0x0c |
---|
883 | self._num_leases = 0 |
---|
884 | else: |
---|
885 | hunk ./src/allmydata/storage/backends/disk/immutable.py 76 |
---|
886 | - f = open(self.home, 'rb') |
---|
887 | - filesize = os.path.getsize(self.home) |
---|
888 | - (version, unused, num_leases) = struct.unpack(">LLL", f.read(0xc)) |
---|
889 | - f.close() |
---|
890 | + f = self._home.open(mode='rb') |
---|
891 | + try: |
---|
892 | + (version, unused, num_leases) = struct.unpack(">LLL", f.read(0xc)) |
---|
893 | + finally: |
---|
894 | + f.close() |
---|
895 | + filesize = self._home.getsize() |
---|
896 | if version != 1: |
---|
897 | msg = "sharefile %s had version %d but we wanted 1" % \ |
---|
898 | hunk ./src/allmydata/storage/backends/disk/immutable.py 84 |
---|
899 | - (filename, version) |
---|
900 | + (self._home, version) |
---|
901 | raise UnknownImmutableContainerVersionError(msg) |
---|
902 | self._num_leases = num_leases |
---|
903 | self._lease_offset = filesize - (num_leases * self.LEASE_SIZE) |
---|
904 | hunk ./src/allmydata/storage/backends/disk/immutable.py 90 |
---|
905 | self._data_offset = 0xc |
---|
906 | |
---|
907 | + def __repr__(self): |
---|
908 | + return ("<ImmutableDiskShare %s:%r at %s>" |
---|
909 | + % (si_b2a(self._storageindex), self._shnum, quote_filepath(self._home))) |
---|
910 | + |
---|
911 | + def close(self): |
---|
912 | + fileutil.fp_make_dirs(self._home.parent()) |
---|
913 | + self._incominghome.moveTo(self._home) |
---|
914 | + try: |
---|
915 | + # self._incominghome is like storage/shares/incoming/ab/abcde/4 . |
---|
916 | + # We try to delete the parent (.../ab/abcde) to avoid leaving |
---|
917 | + # these directories lying around forever, but the delete might |
---|
918 | + # fail if we're working on another share for the same storage |
---|
919 | + # index (like ab/abcde/5). The alternative approach would be to |
---|
920 | + # use a hierarchy of objects (PrefixHolder, BucketHolder, |
---|
921 | + # ShareWriter), each of which is responsible for a single |
---|
922 | + # directory on disk, and have them use reference counting of |
---|
923 | + # their children to know when they should do the rmdir. This |
---|
924 | + # approach is simpler, but relies on os.rmdir refusing to delete |
---|
925 | + # a non-empty directory. Do *not* use fileutil.fp_remove() here! |
---|
926 | + fileutil.fp_rmdir_if_empty(self._incominghome.parent()) |
---|
927 | + # we also delete the grandparent (prefix) directory, .../ab , |
---|
928 | + # again to avoid leaving directories lying around. This might |
---|
929 | + # fail if there is another bucket open that shares a prefix (like |
---|
930 | + # ab/abfff). |
---|
931 | + fileutil.fp_rmdir_if_empty(self._incominghome.parent().parent()) |
---|
932 | + # we leave the great-grandparent (incoming/) directory in place. |
---|
933 | + except EnvironmentError: |
---|
934 | + # ignore the "can't rmdir because the directory is not empty" |
---|
935 | + # exceptions, those are normal consequences of the |
---|
936 | + # above-mentioned conditions. |
---|
937 | + pass |
---|
938 | + pass |
---|
939 | + |
---|
940 | + def get_used_space(self): |
---|
941 | + return (fileutil.get_used_space(self._home) + |
---|
942 | + fileutil.get_used_space(self._incominghome)) |
---|
943 | + |
---|
944 | + def get_storage_index(self): |
---|
945 | + return self._storageindex |
---|
946 | + |
---|
947 | + def get_shnum(self): |
---|
948 | + return self._shnum |
---|
949 | + |
---|
950 | def unlink(self): |
---|
951 | hunk ./src/allmydata/storage/backends/disk/immutable.py 134 |
---|
952 | - os.unlink(self.home) |
---|
953 | + self._home.remove() |
---|
954 | + |
---|
955 | + def get_size(self): |
---|
956 | + return self._home.getsize() |
---|
957 | + |
---|
958 | + def get_data_length(self): |
---|
959 | + return self._lease_offset - self._data_offset |
---|
960 | + |
---|
961 | + #def readv(self, read_vector): |
---|
962 | + # ... |
---|
963 | |
---|
964 | def read_share_data(self, offset, length): |
---|
965 | precondition(offset >= 0) |
---|
966 | hunk ./src/allmydata/storage/backends/disk/immutable.py 147 |
---|
967 | - # reads beyond the end of the data are truncated. Reads that start |
---|
968 | + |
---|
969 | + # Reads beyond the end of the data are truncated. Reads that start |
---|
970 | # beyond the end of the data return an empty string. |
---|
971 | seekpos = self._data_offset+offset |
---|
972 | actuallength = max(0, min(length, self._lease_offset-seekpos)) |
---|
973 | hunk ./src/allmydata/storage/backends/disk/immutable.py 154 |
---|
974 | if actuallength == 0: |
---|
975 | return "" |
---|
976 | - f = open(self.home, 'rb') |
---|
977 | - f.seek(seekpos) |
---|
978 | - return f.read(actuallength) |
---|
979 | + f = self._home.open(mode='rb') |
---|
980 | + try: |
---|
981 | + f.seek(seekpos) |
---|
982 | + sharedata = f.read(actuallength) |
---|
983 | + finally: |
---|
984 | + f.close() |
---|
985 | + return sharedata |
---|
986 | |
---|
987 | def write_share_data(self, offset, data): |
---|
988 | length = len(data) |
---|
989 | hunk ./src/allmydata/storage/backends/disk/immutable.py 167 |
---|
990 | precondition(offset >= 0, offset) |
---|
991 | if self._max_size is not None and offset+length > self._max_size: |
---|
992 | raise DataTooLargeError(self._max_size, offset, length) |
---|
993 | - f = open(self.home, 'rb+') |
---|
994 | - real_offset = self._data_offset+offset |
---|
995 | - f.seek(real_offset) |
---|
996 | - assert f.tell() == real_offset |
---|
997 | - f.write(data) |
---|
998 | - f.close() |
---|
999 | + f = self._incominghome.open(mode='rb+') |
---|
1000 | + try: |
---|
1001 | + real_offset = self._data_offset+offset |
---|
1002 | + f.seek(real_offset) |
---|
1003 | + assert f.tell() == real_offset |
---|
1004 | + f.write(data) |
---|
1005 | + finally: |
---|
1006 | + f.close() |
---|
1007 | |
---|
1008 | def _write_lease_record(self, f, lease_number, lease_info): |
---|
1009 | offset = self._lease_offset + lease_number * self.LEASE_SIZE |
---|
1010 | hunk ./src/allmydata/storage/backends/disk/immutable.py 184 |
---|
1011 | |
---|
1012 | def _read_num_leases(self, f): |
---|
1013 | f.seek(0x08) |
---|
1014 | - (num_leases,) = struct.unpack(">L", f.read(4)) |
---|
1015 | + ro = f.read(4) |
---|
1016 | + (num_leases,) = struct.unpack(">L", ro) |
---|
1017 | return num_leases |
---|
1018 | |
---|
1019 | def _write_num_leases(self, f, num_leases): |
---|
1020 | hunk ./src/allmydata/storage/backends/disk/immutable.py 195 |
---|
1021 | def _truncate_leases(self, f, num_leases): |
---|
1022 | f.truncate(self._lease_offset + num_leases * self.LEASE_SIZE) |
---|
1023 | |
---|
1024 | + # These lease operations are intended for use by disk_backend.py. |
---|
1025 | + # Other clients should not depend on the fact that the disk backend |
---|
1026 | + # stores leases in share files. |
---|
1027 | + |
---|
1028 | def get_leases(self): |
---|
1029 | """Yields a LeaseInfo instance for all leases.""" |
---|
1030 | hunk ./src/allmydata/storage/backends/disk/immutable.py 201 |
---|
1031 | - f = open(self.home, 'rb') |
---|
1032 | - (version, unused, num_leases) = struct.unpack(">LLL", f.read(0xc)) |
---|
1033 | - f.seek(self._lease_offset) |
---|
1034 | - for i in range(num_leases): |
---|
1035 | - data = f.read(self.LEASE_SIZE) |
---|
1036 | - if data: |
---|
1037 | - yield LeaseInfo().from_immutable_data(data) |
---|
1038 | + f = self._home.open(mode='rb') |
---|
1039 | + try: |
---|
1040 | + (version, unused, num_leases) = struct.unpack(">LLL", f.read(0xc)) |
---|
1041 | + f.seek(self._lease_offset) |
---|
1042 | + for i in range(num_leases): |
---|
1043 | + data = f.read(self.LEASE_SIZE) |
---|
1044 | + if data: |
---|
1045 | + yield LeaseInfo().from_immutable_data(data) |
---|
1046 | + finally: |
---|
1047 | + f.close() |
---|
1048 | |
---|
1049 | def add_lease(self, lease_info): |
---|
1050 | hunk ./src/allmydata/storage/backends/disk/immutable.py 213 |
---|
1051 | - f = open(self.home, 'rb+') |
---|
1052 | - num_leases = self._read_num_leases(f) |
---|
1053 | - self._write_lease_record(f, num_leases, lease_info) |
---|
1054 | - self._write_num_leases(f, num_leases+1) |
---|
1055 | - f.close() |
---|
1056 | + f = self._incominghome.open(mode='rb') |
---|
1057 | + try: |
---|
1058 | + num_leases = self._read_num_leases(f) |
---|
1059 | + finally: |
---|
1060 | + f.close() |
---|
1061 | + f = self._home.open(mode='wb+') |
---|
1062 | + try: |
---|
1063 | + self._write_lease_record(f, num_leases, lease_info) |
---|
1064 | + self._write_num_leases(f, num_leases+1) |
---|
1065 | + finally: |
---|
1066 | + f.close() |
---|
1067 | |
---|
1068 | def renew_lease(self, renew_secret, new_expire_time): |
---|
1069 | hunk ./src/allmydata/storage/backends/disk/immutable.py 226 |
---|
1070 | - for i,lease in enumerate(self.get_leases()): |
---|
1071 | - if constant_time_compare(lease.renew_secret, renew_secret): |
---|
1072 | - # yup. See if we need to update the owner time. |
---|
1073 | - if new_expire_time > lease.expiration_time: |
---|
1074 | - # yes |
---|
1075 | - lease.expiration_time = new_expire_time |
---|
1076 | - f = open(self.home, 'rb+') |
---|
1077 | - self._write_lease_record(f, i, lease) |
---|
1078 | - f.close() |
---|
1079 | - return |
---|
1080 | + try: |
---|
1081 | + for i, lease in enumerate(self.get_leases()): |
---|
1082 | + if constant_time_compare(lease.renew_secret, renew_secret): |
---|
1083 | + # yup. See if we need to update the owner time. |
---|
1084 | + if new_expire_time > lease.expiration_time: |
---|
1085 | + # yes |
---|
1086 | + lease.expiration_time = new_expire_time |
---|
1087 | + f = self._home.open('rb+') |
---|
1088 | + try: |
---|
1089 | + self._write_lease_record(f, i, lease) |
---|
1090 | + finally: |
---|
1091 | + f.close() |
---|
1092 | + return |
---|
1093 | + except IndexError, e: |
---|
1094 | + raise Exception("IndexError: %s" % (e,)) |
---|
1095 | raise IndexError("unable to renew non-existent lease") |
---|
1096 | |
---|
1097 | def add_or_renew_lease(self, lease_info): |
---|
1098 | hunk ./src/allmydata/storage/backends/disk/immutable.py 249 |
---|
1099 | lease_info.expiration_time) |
---|
1100 | except IndexError: |
---|
1101 | self.add_lease(lease_info) |
---|
1102 | - |
---|
1103 | - |
---|
1104 | - def cancel_lease(self, cancel_secret): |
---|
1105 | - """Remove a lease with the given cancel_secret. If the last lease is |
---|
1106 | - cancelled, the file will be removed. Return the number of bytes that |
---|
1107 | - were freed (by truncating the list of leases, and possibly by |
---|
1108 | - deleting the file. Raise IndexError if there was no lease with the |
---|
1109 | - given cancel_secret. |
---|
1110 | - """ |
---|
1111 | - |
---|
1112 | - leases = list(self.get_leases()) |
---|
1113 | - num_leases_removed = 0 |
---|
1114 | - for i,lease in enumerate(leases): |
---|
1115 | - if constant_time_compare(lease.cancel_secret, cancel_secret): |
---|
1116 | - leases[i] = None |
---|
1117 | - num_leases_removed += 1 |
---|
1118 | - if not num_leases_removed: |
---|
1119 | - raise IndexError("unable to find matching lease to cancel") |
---|
1120 | - if num_leases_removed: |
---|
1121 | - # pack and write out the remaining leases. We write these out in |
---|
1122 | - # the same order as they were added, so that if we crash while |
---|
1123 | - # doing this, we won't lose any non-cancelled leases. |
---|
1124 | - leases = [l for l in leases if l] # remove the cancelled leases |
---|
1125 | - f = open(self.home, 'rb+') |
---|
1126 | - for i,lease in enumerate(leases): |
---|
1127 | - self._write_lease_record(f, i, lease) |
---|
1128 | - self._write_num_leases(f, len(leases)) |
---|
1129 | - self._truncate_leases(f, len(leases)) |
---|
1130 | - f.close() |
---|
1131 | - space_freed = self.LEASE_SIZE * num_leases_removed |
---|
1132 | - if not len(leases): |
---|
1133 | - space_freed += os.stat(self.home)[stat.ST_SIZE] |
---|
1134 | - self.unlink() |
---|
1135 | - return space_freed |
---|
1136 | - |
---|
1137 | - |
---|
1138 | -class BucketWriter(Referenceable): |
---|
1139 | - implements(RIBucketWriter) |
---|
1140 | - |
---|
1141 | - def __init__(self, ss, incominghome, finalhome, max_size, lease_info, canary): |
---|
1142 | - self.ss = ss |
---|
1143 | - self.incominghome = incominghome |
---|
1144 | - self.finalhome = finalhome |
---|
1145 | - self._max_size = max_size # don't allow the client to write more than this |
---|
1146 | - self._canary = canary |
---|
1147 | - self._disconnect_marker = canary.notifyOnDisconnect(self._disconnected) |
---|
1148 | - self.closed = False |
---|
1149 | - self.throw_out_all_data = False |
---|
1150 | - self._sharefile = ShareFile(incominghome, create=True, max_size=max_size) |
---|
1151 | - # also, add our lease to the file now, so that other ones can be |
---|
1152 | - # added by simultaneous uploaders |
---|
1153 | - self._sharefile.add_lease(lease_info) |
---|
1154 | - |
---|
1155 | - def allocated_size(self): |
---|
1156 | - return self._max_size |
---|
1157 | - |
---|
1158 | - def remote_write(self, offset, data): |
---|
1159 | - start = time.time() |
---|
1160 | - precondition(not self.closed) |
---|
1161 | - if self.throw_out_all_data: |
---|
1162 | - return |
---|
1163 | - self._sharefile.write_share_data(offset, data) |
---|
1164 | - self.ss.add_latency("write", time.time() - start) |
---|
1165 | - self.ss.count("write") |
---|
1166 | - |
---|
1167 | - def remote_close(self): |
---|
1168 | - precondition(not self.closed) |
---|
1169 | - start = time.time() |
---|
1170 | - |
---|
1171 | - fileutil.make_dirs(os.path.dirname(self.finalhome)) |
---|
1172 | - fileutil.rename(self.incominghome, self.finalhome) |
---|
1173 | - try: |
---|
1174 | - # self.incominghome is like storage/shares/incoming/ab/abcde/4 . |
---|
1175 | - # We try to delete the parent (.../ab/abcde) to avoid leaving |
---|
1176 | - # these directories lying around forever, but the delete might |
---|
1177 | - # fail if we're working on another share for the same storage |
---|
1178 | - # index (like ab/abcde/5). The alternative approach would be to |
---|
1179 | - # use a hierarchy of objects (PrefixHolder, BucketHolder, |
---|
1180 | - # ShareWriter), each of which is responsible for a single |
---|
1181 | - # directory on disk, and have them use reference counting of |
---|
1182 | - # their children to know when they should do the rmdir. This |
---|
1183 | - # approach is simpler, but relies on os.rmdir refusing to delete |
---|
1184 | - # a non-empty directory. Do *not* use fileutil.rm_dir() here! |
---|
1185 | - os.rmdir(os.path.dirname(self.incominghome)) |
---|
1186 | - # we also delete the grandparent (prefix) directory, .../ab , |
---|
1187 | - # again to avoid leaving directories lying around. This might |
---|
1188 | - # fail if there is another bucket open that shares a prefix (like |
---|
1189 | - # ab/abfff). |
---|
1190 | - os.rmdir(os.path.dirname(os.path.dirname(self.incominghome))) |
---|
1191 | - # we leave the great-grandparent (incoming/) directory in place. |
---|
1192 | - except EnvironmentError: |
---|
1193 | - # ignore the "can't rmdir because the directory is not empty" |
---|
1194 | - # exceptions, those are normal consequences of the |
---|
1195 | - # above-mentioned conditions. |
---|
1196 | - pass |
---|
1197 | - self._sharefile = None |
---|
1198 | - self.closed = True |
---|
1199 | - self._canary.dontNotifyOnDisconnect(self._disconnect_marker) |
---|
1200 | - |
---|
1201 | - filelen = os.stat(self.finalhome)[stat.ST_SIZE] |
---|
1202 | - self.ss.bucket_writer_closed(self, filelen) |
---|
1203 | - self.ss.add_latency("close", time.time() - start) |
---|
1204 | - self.ss.count("close") |
---|
1205 | - |
---|
1206 | - def _disconnected(self): |
---|
1207 | - if not self.closed: |
---|
1208 | - self._abort() |
---|
1209 | - |
---|
1210 | - def remote_abort(self): |
---|
1211 | - log.msg("storage: aborting sharefile %s" % self.incominghome, |
---|
1212 | - facility="tahoe.storage", level=log.UNUSUAL) |
---|
1213 | - if not self.closed: |
---|
1214 | - self._canary.dontNotifyOnDisconnect(self._disconnect_marker) |
---|
1215 | - self._abort() |
---|
1216 | - self.ss.count("abort") |
---|
1217 | - |
---|
1218 | - def _abort(self): |
---|
1219 | - if self.closed: |
---|
1220 | - return |
---|
1221 | - |
---|
1222 | - os.remove(self.incominghome) |
---|
1223 | - # if we were the last share to be moved, remove the incoming/ |
---|
1224 | - # directory that was our parent |
---|
1225 | - parentdir = os.path.split(self.incominghome)[0] |
---|
1226 | - if not os.listdir(parentdir): |
---|
1227 | - os.rmdir(parentdir) |
---|
1228 | - self._sharefile = None |
---|
1229 | - |
---|
1230 | - # We are now considered closed for further writing. We must tell |
---|
1231 | - # the storage server about this so that it stops expecting us to |
---|
1232 | - # use the space it allocated for us earlier. |
---|
1233 | - self.closed = True |
---|
1234 | - self.ss.bucket_writer_closed(self, 0) |
---|
1235 | - |
---|
1236 | - |
---|
1237 | -class BucketReader(Referenceable): |
---|
1238 | - implements(RIBucketReader) |
---|
1239 | - |
---|
1240 | - def __init__(self, ss, sharefname, storage_index=None, shnum=None): |
---|
1241 | - self.ss = ss |
---|
1242 | - self._share_file = ShareFile(sharefname) |
---|
1243 | - self.storage_index = storage_index |
---|
1244 | - self.shnum = shnum |
---|
1245 | - |
---|
1246 | - def __repr__(self): |
---|
1247 | - return "<%s %s %s>" % (self.__class__.__name__, |
---|
1248 | - base32.b2a_l(self.storage_index[:8], 60), |
---|
1249 | - self.shnum) |
---|
1250 | - |
---|
1251 | - def remote_read(self, offset, length): |
---|
1252 | - start = time.time() |
---|
1253 | - data = self._share_file.read_share_data(offset, length) |
---|
1254 | - self.ss.add_latency("read", time.time() - start) |
---|
1255 | - self.ss.count("read") |
---|
1256 | - return data |
---|
1257 | - |
---|
1258 | - def remote_advise_corrupt_share(self, reason): |
---|
1259 | - return self.ss.remote_advise_corrupt_share("immutable", |
---|
1260 | - self.storage_index, |
---|
1261 | - self.shnum, |
---|
1262 | - reason) |
---|
1263 | hunk ./src/allmydata/storage/backends/disk/mutable.py 1 |
---|
1264 | -import os, stat, struct |
---|
1265 | |
---|
1266 | hunk ./src/allmydata/storage/backends/disk/mutable.py 2 |
---|
1267 | -from allmydata.interfaces import BadWriteEnablerError |
---|
1268 | -from allmydata.util import idlib, log |
---|
1269 | +import struct |
---|
1270 | + |
---|
1271 | +from zope.interface import implements |
---|
1272 | + |
---|
1273 | +from allmydata.interfaces import IStoredMutableShare, BadWriteEnablerError |
---|
1274 | +from allmydata.util import fileutil, idlib, log |
---|
1275 | from allmydata.util.assertutil import precondition |
---|
1276 | from allmydata.util.hashutil import constant_time_compare |
---|
1277 | hunk ./src/allmydata/storage/backends/disk/mutable.py 10 |
---|
1278 | -from allmydata.storage.lease import LeaseInfo |
---|
1279 | -from allmydata.storage.common import UnknownMutableContainerVersionError, \ |
---|
1280 | +from allmydata.util.encodingutil import quote_filepath |
---|
1281 | +from allmydata.storage.common import si_b2a, UnknownMutableContainerVersionError, \ |
---|
1282 | DataTooLargeError |
---|
1283 | hunk ./src/allmydata/storage/backends/disk/mutable.py 13 |
---|
1284 | +from allmydata.storage.lease import LeaseInfo |
---|
1285 | +from allmydata.storage.backends.base import testv_compare |
---|
1286 | |
---|
1287 | hunk ./src/allmydata/storage/backends/disk/mutable.py 16 |
---|
1288 | -# the MutableShareFile is like the ShareFile, but used for mutable data. It |
---|
1289 | -# has a different layout. See docs/mutable.txt for more details. |
---|
1290 | + |
---|
1291 | +# The MutableDiskShare is like the ImmutableDiskShare, but used for mutable data. |
---|
1292 | +# It has a different layout. See docs/mutable.rst for more details. |
---|
1293 | |
---|
1294 | # # offset size name |
---|
1295 | # 1 0 32 magic verstr "tahoe mutable container v1" plus binary |
---|
1296 | hunk ./src/allmydata/storage/backends/disk/mutable.py 31 |
---|
1297 | # 4 4 expiration timestamp |
---|
1298 | # 8 32 renewal token |
---|
1299 | # 40 32 cancel token |
---|
1300 | -# 72 20 nodeid which accepted the tokens |
---|
1301 | +# 72 20 nodeid that accepted the tokens |
---|
1302 | # 7 468 (a) data |
---|
1303 | # 8 ?? 4 count of extra leases |
---|
1304 | # 9 ?? n*92 extra leases |
---|
1305 | hunk ./src/allmydata/storage/backends/disk/mutable.py 37 |
---|
1306 | |
---|
1307 | |
---|
1308 | -# The struct module doc says that L's are 4 bytes in size., and that Q's are |
---|
1309 | +# The struct module doc says that L's are 4 bytes in size, and that Q's are |
---|
1310 | # 8 bytes in size. Since compatibility depends upon this, double-check it. |
---|
1311 | assert struct.calcsize(">L") == 4, struct.calcsize(">L") |
---|
1312 | assert struct.calcsize(">Q") == 8, struct.calcsize(">Q") |
---|
1313 | hunk ./src/allmydata/storage/backends/disk/mutable.py 42 |
---|
1314 | |
---|
1315 | -class MutableShareFile: |
---|
1316 | + |
---|
1317 | +class MutableDiskShare(object): |
---|
1318 | + implements(IStoredMutableShare) |
---|
1319 | |
---|
1320 | sharetype = "mutable" |
---|
1321 | DATA_LENGTH_OFFSET = struct.calcsize(">32s20s32s") |
---|
1322 | hunk ./src/allmydata/storage/backends/disk/mutable.py 54 |
---|
1323 | assert LEASE_SIZE == 92 |
---|
1324 | DATA_OFFSET = HEADER_SIZE + 4*LEASE_SIZE |
---|
1325 | assert DATA_OFFSET == 468, DATA_OFFSET |
---|
1326 | + |
---|
1327 | # our sharefiles share with a recognizable string, plus some random |
---|
1328 | # binary data to reduce the chance that a regular text file will look |
---|
1329 | # like a sharefile. |
---|
1330 | hunk ./src/allmydata/storage/backends/disk/mutable.py 63 |
---|
1331 | MAX_SIZE = 2*1000*1000*1000 # 2GB, kind of arbitrary |
---|
1332 | # TODO: decide upon a policy for max share size |
---|
1333 | |
---|
1334 | - def __init__(self, filename, parent=None): |
---|
1335 | - self.home = filename |
---|
1336 | - if os.path.exists(self.home): |
---|
1337 | + def __init__(self, storageindex, shnum, home, parent=None): |
---|
1338 | + self._storageindex = storageindex |
---|
1339 | + self._shnum = shnum |
---|
1340 | + self._home = home |
---|
1341 | + if self._home.exists(): |
---|
1342 | # we don't cache anything, just check the magic |
---|
1343 | hunk ./src/allmydata/storage/backends/disk/mutable.py 69 |
---|
1344 | - f = open(self.home, 'rb') |
---|
1345 | - data = f.read(self.HEADER_SIZE) |
---|
1346 | - (magic, |
---|
1347 | - write_enabler_nodeid, write_enabler, |
---|
1348 | - data_length, extra_least_offset) = \ |
---|
1349 | - struct.unpack(">32s20s32sQQ", data) |
---|
1350 | - if magic != self.MAGIC: |
---|
1351 | - msg = "sharefile %s had magic '%r' but we wanted '%r'" % \ |
---|
1352 | - (filename, magic, self.MAGIC) |
---|
1353 | - raise UnknownMutableContainerVersionError(msg) |
---|
1354 | + f = self._home.open('rb') |
---|
1355 | + try: |
---|
1356 | + data = f.read(self.HEADER_SIZE) |
---|
1357 | + (magic, |
---|
1358 | + write_enabler_nodeid, write_enabler, |
---|
1359 | + data_length, extra_least_offset) = \ |
---|
1360 | + struct.unpack(">32s20s32sQQ", data) |
---|
1361 | + if magic != self.MAGIC: |
---|
1362 | + msg = "sharefile %s had magic '%r' but we wanted '%r'" % \ |
---|
1363 | + (quote_filepath(self._home), magic, self.MAGIC) |
---|
1364 | + raise UnknownMutableContainerVersionError(msg) |
---|
1365 | + finally: |
---|
1366 | + f.close() |
---|
1367 | self.parent = parent # for logging |
---|
1368 | |
---|
1369 | def log(self, *args, **kwargs): |
---|
1370 | hunk ./src/allmydata/storage/backends/disk/mutable.py 87 |
---|
1371 | return self.parent.log(*args, **kwargs) |
---|
1372 | |
---|
1373 | - def create(self, my_nodeid, write_enabler): |
---|
1374 | - assert not os.path.exists(self.home) |
---|
1375 | + def create(self, serverid, write_enabler): |
---|
1376 | + assert not self._home.exists() |
---|
1377 | data_length = 0 |
---|
1378 | extra_lease_offset = (self.HEADER_SIZE |
---|
1379 | + 4 * self.LEASE_SIZE |
---|
1380 | hunk ./src/allmydata/storage/backends/disk/mutable.py 95 |
---|
1381 | + data_length) |
---|
1382 | assert extra_lease_offset == self.DATA_OFFSET # true at creation |
---|
1383 | num_extra_leases = 0 |
---|
1384 | - f = open(self.home, 'wb') |
---|
1385 | - header = struct.pack(">32s20s32sQQ", |
---|
1386 | - self.MAGIC, my_nodeid, write_enabler, |
---|
1387 | - data_length, extra_lease_offset, |
---|
1388 | - ) |
---|
1389 | - leases = ("\x00"*self.LEASE_SIZE) * 4 |
---|
1390 | - f.write(header + leases) |
---|
1391 | - # data goes here, empty after creation |
---|
1392 | - f.write(struct.pack(">L", num_extra_leases)) |
---|
1393 | - # extra leases go here, none at creation |
---|
1394 | - f.close() |
---|
1395 | + f = self._home.open('wb') |
---|
1396 | + try: |
---|
1397 | + header = struct.pack(">32s20s32sQQ", |
---|
1398 | + self.MAGIC, serverid, write_enabler, |
---|
1399 | + data_length, extra_lease_offset, |
---|
1400 | + ) |
---|
1401 | + leases = ("\x00"*self.LEASE_SIZE) * 4 |
---|
1402 | + f.write(header + leases) |
---|
1403 | + # data goes here, empty after creation |
---|
1404 | + f.write(struct.pack(">L", num_extra_leases)) |
---|
1405 | + # extra leases go here, none at creation |
---|
1406 | + finally: |
---|
1407 | + f.close() |
---|
1408 | + |
---|
1409 | + def __repr__(self): |
---|
1410 | + return ("<MutableDiskShare %s:%r at %s>" |
---|
1411 | + % (si_b2a(self._storageindex), self._shnum, quote_filepath(self._home))) |
---|
1412 | + |
---|
1413 | + def get_used_space(self): |
---|
1414 | + return fileutil.get_used_space(self._home) |
---|
1415 | + |
---|
1416 | + def get_storage_index(self): |
---|
1417 | + return self._storageindex |
---|
1418 | + |
---|
1419 | + def get_shnum(self): |
---|
1420 | + return self._shnum |
---|
1421 | |
---|
1422 | def unlink(self): |
---|
1423 | hunk ./src/allmydata/storage/backends/disk/mutable.py 123 |
---|
1424 | - os.unlink(self.home) |
---|
1425 | + self._home.remove() |
---|
1426 | |
---|
1427 | def _read_data_length(self, f): |
---|
1428 | f.seek(self.DATA_LENGTH_OFFSET) |
---|
1429 | hunk ./src/allmydata/storage/backends/disk/mutable.py 291 |
---|
1430 | |
---|
1431 | def get_leases(self): |
---|
1432 | """Yields a LeaseInfo instance for all leases.""" |
---|
1433 | - f = open(self.home, 'rb') |
---|
1434 | - for i, lease in self._enumerate_leases(f): |
---|
1435 | - yield lease |
---|
1436 | - f.close() |
---|
1437 | + f = self._home.open('rb') |
---|
1438 | + try: |
---|
1439 | + for i, lease in self._enumerate_leases(f): |
---|
1440 | + yield lease |
---|
1441 | + finally: |
---|
1442 | + f.close() |
---|
1443 | |
---|
1444 | def _enumerate_leases(self, f): |
---|
1445 | for i in range(self._get_num_lease_slots(f)): |
---|
1446 | hunk ./src/allmydata/storage/backends/disk/mutable.py 303 |
---|
1447 | try: |
---|
1448 | data = self._read_lease_record(f, i) |
---|
1449 | if data is not None: |
---|
1450 | - yield i,data |
---|
1451 | + yield i, data |
---|
1452 | except IndexError: |
---|
1453 | return |
---|
1454 | |
---|
1455 | hunk ./src/allmydata/storage/backends/disk/mutable.py 307 |
---|
1456 | + # These lease operations are intended for use by disk_backend.py. |
---|
1457 | + # Other non-test clients should not depend on the fact that the disk |
---|
1458 | + # backend stores leases in share files. |
---|
1459 | + |
---|
1460 | def add_lease(self, lease_info): |
---|
1461 | precondition(lease_info.owner_num != 0) # 0 means "no lease here" |
---|
1462 | hunk ./src/allmydata/storage/backends/disk/mutable.py 313 |
---|
1463 | - f = open(self.home, 'rb+') |
---|
1464 | - num_lease_slots = self._get_num_lease_slots(f) |
---|
1465 | - empty_slot = self._get_first_empty_lease_slot(f) |
---|
1466 | - if empty_slot is not None: |
---|
1467 | - self._write_lease_record(f, empty_slot, lease_info) |
---|
1468 | - else: |
---|
1469 | - self._write_lease_record(f, num_lease_slots, lease_info) |
---|
1470 | - f.close() |
---|
1471 | + f = self._home.open('rb+') |
---|
1472 | + try: |
---|
1473 | + num_lease_slots = self._get_num_lease_slots(f) |
---|
1474 | + empty_slot = self._get_first_empty_lease_slot(f) |
---|
1475 | + if empty_slot is not None: |
---|
1476 | + self._write_lease_record(f, empty_slot, lease_info) |
---|
1477 | + else: |
---|
1478 | + self._write_lease_record(f, num_lease_slots, lease_info) |
---|
1479 | + finally: |
---|
1480 | + f.close() |
---|
1481 | |
---|
1482 | def renew_lease(self, renew_secret, new_expire_time): |
---|
1483 | accepting_nodeids = set() |
---|
1484 | hunk ./src/allmydata/storage/backends/disk/mutable.py 326 |
---|
1485 | - f = open(self.home, 'rb+') |
---|
1486 | - for (leasenum,lease) in self._enumerate_leases(f): |
---|
1487 | - if constant_time_compare(lease.renew_secret, renew_secret): |
---|
1488 | - # yup. See if we need to update the owner time. |
---|
1489 | - if new_expire_time > lease.expiration_time: |
---|
1490 | - # yes |
---|
1491 | - lease.expiration_time = new_expire_time |
---|
1492 | - self._write_lease_record(f, leasenum, lease) |
---|
1493 | - f.close() |
---|
1494 | - return |
---|
1495 | - accepting_nodeids.add(lease.nodeid) |
---|
1496 | - f.close() |
---|
1497 | + f = self._home.open('rb+') |
---|
1498 | + try: |
---|
1499 | + for (leasenum, lease) in self._enumerate_leases(f): |
---|
1500 | + if constant_time_compare(lease.renew_secret, renew_secret): |
---|
1501 | + # yup. See if we need to update the owner time. |
---|
1502 | + if new_expire_time > lease.expiration_time: |
---|
1503 | + # yes |
---|
1504 | + lease.expiration_time = new_expire_time |
---|
1505 | + self._write_lease_record(f, leasenum, lease) |
---|
1506 | + return |
---|
1507 | + accepting_nodeids.add(lease.nodeid) |
---|
1508 | + finally: |
---|
1509 | + f.close() |
---|
1510 | # Return the accepting_nodeids set, to give the client a chance to |
---|
1511 | hunk ./src/allmydata/storage/backends/disk/mutable.py 340 |
---|
1512 | - # update the leases on a share which has been migrated from its |
---|
1513 | + # update the leases on a share that has been migrated from its |
---|
1514 | # original server to a new one. |
---|
1515 | msg = ("Unable to renew non-existent lease. I have leases accepted by" |
---|
1516 | " nodeids: ") |
---|
1517 | hunk ./src/allmydata/storage/backends/disk/mutable.py 357 |
---|
1518 | except IndexError: |
---|
1519 | self.add_lease(lease_info) |
---|
1520 | |
---|
1521 | - def cancel_lease(self, cancel_secret): |
---|
1522 | - """Remove any leases with the given cancel_secret. If the last lease |
---|
1523 | - is cancelled, the file will be removed. Return the number of bytes |
---|
1524 | - that were freed (by truncating the list of leases, and possibly by |
---|
1525 | - deleting the file. Raise IndexError if there was no lease with the |
---|
1526 | - given cancel_secret.""" |
---|
1527 | - |
---|
1528 | - accepting_nodeids = set() |
---|
1529 | - modified = 0 |
---|
1530 | - remaining = 0 |
---|
1531 | - blank_lease = LeaseInfo(owner_num=0, |
---|
1532 | - renew_secret="\x00"*32, |
---|
1533 | - cancel_secret="\x00"*32, |
---|
1534 | - expiration_time=0, |
---|
1535 | - nodeid="\x00"*20) |
---|
1536 | - f = open(self.home, 'rb+') |
---|
1537 | - for (leasenum,lease) in self._enumerate_leases(f): |
---|
1538 | - accepting_nodeids.add(lease.nodeid) |
---|
1539 | - if constant_time_compare(lease.cancel_secret, cancel_secret): |
---|
1540 | - self._write_lease_record(f, leasenum, blank_lease) |
---|
1541 | - modified += 1 |
---|
1542 | - else: |
---|
1543 | - remaining += 1 |
---|
1544 | - if modified: |
---|
1545 | - freed_space = self._pack_leases(f) |
---|
1546 | - f.close() |
---|
1547 | - if not remaining: |
---|
1548 | - freed_space += os.stat(self.home)[stat.ST_SIZE] |
---|
1549 | - self.unlink() |
---|
1550 | - return freed_space |
---|
1551 | - |
---|
1552 | - msg = ("Unable to cancel non-existent lease. I have leases " |
---|
1553 | - "accepted by nodeids: ") |
---|
1554 | - msg += ",".join([("'%s'" % idlib.nodeid_b2a(anid)) |
---|
1555 | - for anid in accepting_nodeids]) |
---|
1556 | - msg += " ." |
---|
1557 | - raise IndexError(msg) |
---|
1558 | - |
---|
1559 | - def _pack_leases(self, f): |
---|
1560 | - # TODO: reclaim space from cancelled leases |
---|
1561 | - return 0 |
---|
1562 | - |
---|
1563 | def _read_write_enabler_and_nodeid(self, f): |
---|
1564 | f.seek(0) |
---|
1565 | data = f.read(self.HEADER_SIZE) |
---|
1566 | hunk ./src/allmydata/storage/backends/disk/mutable.py 369 |
---|
1567 | |
---|
1568 | def readv(self, readv): |
---|
1569 | datav = [] |
---|
1570 | - f = open(self.home, 'rb') |
---|
1571 | - for (offset, length) in readv: |
---|
1572 | - datav.append(self._read_share_data(f, offset, length)) |
---|
1573 | - f.close() |
---|
1574 | + f = self._home.open('rb') |
---|
1575 | + try: |
---|
1576 | + for (offset, length) in readv: |
---|
1577 | + datav.append(self._read_share_data(f, offset, length)) |
---|
1578 | + finally: |
---|
1579 | + f.close() |
---|
1580 | return datav |
---|
1581 | |
---|
1582 | hunk ./src/allmydata/storage/backends/disk/mutable.py 377 |
---|
1583 | -# def remote_get_length(self): |
---|
1584 | -# f = open(self.home, 'rb') |
---|
1585 | -# data_length = self._read_data_length(f) |
---|
1586 | -# f.close() |
---|
1587 | -# return data_length |
---|
1588 | + def get_size(self): |
---|
1589 | + return self._home.getsize() |
---|
1590 | + |
---|
1591 | + def get_data_length(self): |
---|
1592 | + f = self._home.open('rb') |
---|
1593 | + try: |
---|
1594 | + data_length = self._read_data_length(f) |
---|
1595 | + finally: |
---|
1596 | + f.close() |
---|
1597 | + return data_length |
---|
1598 | |
---|
1599 | def check_write_enabler(self, write_enabler, si_s): |
---|
1600 | hunk ./src/allmydata/storage/backends/disk/mutable.py 389 |
---|
1601 | - f = open(self.home, 'rb+') |
---|
1602 | - (real_write_enabler, write_enabler_nodeid) = \ |
---|
1603 | - self._read_write_enabler_and_nodeid(f) |
---|
1604 | - f.close() |
---|
1605 | + f = self._home.open('rb+') |
---|
1606 | + try: |
---|
1607 | + (real_write_enabler, write_enabler_nodeid) = self._read_write_enabler_and_nodeid(f) |
---|
1608 | + finally: |
---|
1609 | + f.close() |
---|
1610 | # avoid a timing attack |
---|
1611 | #if write_enabler != real_write_enabler: |
---|
1612 | if not constant_time_compare(write_enabler, real_write_enabler): |
---|
1613 | hunk ./src/allmydata/storage/backends/disk/mutable.py 410 |
---|
1614 | |
---|
1615 | def check_testv(self, testv): |
---|
1616 | test_good = True |
---|
1617 | - f = open(self.home, 'rb+') |
---|
1618 | - for (offset, length, operator, specimen) in testv: |
---|
1619 | - data = self._read_share_data(f, offset, length) |
---|
1620 | - if not testv_compare(data, operator, specimen): |
---|
1621 | - test_good = False |
---|
1622 | - break |
---|
1623 | - f.close() |
---|
1624 | + f = self._home.open('rb+') |
---|
1625 | + try: |
---|
1626 | + for (offset, length, operator, specimen) in testv: |
---|
1627 | + data = self._read_share_data(f, offset, length) |
---|
1628 | + if not testv_compare(data, operator, specimen): |
---|
1629 | + test_good = False |
---|
1630 | + break |
---|
1631 | + finally: |
---|
1632 | + f.close() |
---|
1633 | return test_good |
---|
1634 | |
---|
1635 | def writev(self, datav, new_length): |
---|
1636 | hunk ./src/allmydata/storage/backends/disk/mutable.py 422 |
---|
1637 | - f = open(self.home, 'rb+') |
---|
1638 | - for (offset, data) in datav: |
---|
1639 | - self._write_share_data(f, offset, data) |
---|
1640 | - if new_length is not None: |
---|
1641 | - cur_length = self._read_data_length(f) |
---|
1642 | - if new_length < cur_length: |
---|
1643 | - self._write_data_length(f, new_length) |
---|
1644 | - # TODO: if we're going to shrink the share file when the |
---|
1645 | - # share data has shrunk, then call |
---|
1646 | - # self._change_container_size() here. |
---|
1647 | - f.close() |
---|
1648 | - |
---|
1649 | -def testv_compare(a, op, b): |
---|
1650 | - assert op in ("lt", "le", "eq", "ne", "ge", "gt") |
---|
1651 | - if op == "lt": |
---|
1652 | - return a < b |
---|
1653 | - if op == "le": |
---|
1654 | - return a <= b |
---|
1655 | - if op == "eq": |
---|
1656 | - return a == b |
---|
1657 | - if op == "ne": |
---|
1658 | - return a != b |
---|
1659 | - if op == "ge": |
---|
1660 | - return a >= b |
---|
1661 | - if op == "gt": |
---|
1662 | - return a > b |
---|
1663 | - # never reached |
---|
1664 | + f = self._home.open('rb+') |
---|
1665 | + try: |
---|
1666 | + for (offset, data) in datav: |
---|
1667 | + self._write_share_data(f, offset, data) |
---|
1668 | + if new_length is not None: |
---|
1669 | + cur_length = self._read_data_length(f) |
---|
1670 | + if new_length < cur_length: |
---|
1671 | + self._write_data_length(f, new_length) |
---|
1672 | + # TODO: if we're going to shrink the share file when the |
---|
1673 | + # share data has shrunk, then call |
---|
1674 | + # self._change_container_size() here. |
---|
1675 | + finally: |
---|
1676 | + f.close() |
---|
1677 | |
---|
1678 | hunk ./src/allmydata/storage/backends/disk/mutable.py 436 |
---|
1679 | -class EmptyShare: |
---|
1680 | + def close(self): |
---|
1681 | + pass |
---|
1682 | |
---|
1683 | hunk ./src/allmydata/storage/backends/disk/mutable.py 439 |
---|
1684 | - def check_testv(self, testv): |
---|
1685 | - test_good = True |
---|
1686 | - for (offset, length, operator, specimen) in testv: |
---|
1687 | - data = "" |
---|
1688 | - if not testv_compare(data, operator, specimen): |
---|
1689 | - test_good = False |
---|
1690 | - break |
---|
1691 | - return test_good |
---|
1692 | |
---|
1693 | hunk ./src/allmydata/storage/backends/disk/mutable.py 440 |
---|
1694 | -def create_mutable_sharefile(filename, my_nodeid, write_enabler, parent): |
---|
1695 | - ms = MutableShareFile(filename, parent) |
---|
1696 | - ms.create(my_nodeid, write_enabler) |
---|
1697 | +def create_mutable_disk_share(fp, serverid, write_enabler, parent): |
---|
1698 | + ms = MutableDiskShare(fp, parent) |
---|
1699 | + ms.create(serverid, write_enabler) |
---|
1700 | del ms |
---|
1701 | hunk ./src/allmydata/storage/backends/disk/mutable.py 444 |
---|
1702 | - return MutableShareFile(filename, parent) |
---|
1703 | - |
---|
1704 | + return MutableDiskShare(fp, parent) |
---|
1705 | addfile ./src/allmydata/storage/backends/null/__init__.py |
---|
1706 | addfile ./src/allmydata/storage/backends/null/null_backend.py |
---|
1707 | hunk ./src/allmydata/storage/backends/null/null_backend.py 2 |
---|
1708 | |
---|
1709 | +import os, struct |
---|
1710 | + |
---|
1711 | +from zope.interface import implements |
---|
1712 | + |
---|
1713 | +from allmydata.interfaces import IStorageBackend, IShareSet, IStoredShare, IStoredMutableShare |
---|
1714 | +from allmydata.util.assertutil import precondition |
---|
1715 | +from allmydata.util.hashutil import constant_time_compare |
---|
1716 | +from allmydata.storage.backends.base import Backend, ShareSet |
---|
1717 | +from allmydata.storage.bucket import BucketWriter |
---|
1718 | +from allmydata.storage.common import si_b2a |
---|
1719 | +from allmydata.storage.lease import LeaseInfo |
---|
1720 | + |
---|
1721 | + |
---|
1722 | +class NullBackend(Backend): |
---|
1723 | + implements(IStorageBackend) |
---|
1724 | + |
---|
1725 | + def __init__(self): |
---|
1726 | + Backend.__init__(self) |
---|
1727 | + |
---|
1728 | + def get_available_space(self, reserved_space): |
---|
1729 | + return None |
---|
1730 | + |
---|
1731 | + def get_sharesets_for_prefix(self, prefix): |
---|
1732 | + pass |
---|
1733 | + |
---|
1734 | + def get_shareset(self, storageindex): |
---|
1735 | + return NullShareSet(storageindex) |
---|
1736 | + |
---|
1737 | + def fill_in_space_stats(self, stats): |
---|
1738 | + pass |
---|
1739 | + |
---|
1740 | + def set_storage_server(self, ss): |
---|
1741 | + self.ss = ss |
---|
1742 | + |
---|
1743 | + def advise_corrupt_share(self, sharetype, storageindex, shnum, reason): |
---|
1744 | + pass |
---|
1745 | + |
---|
1746 | + |
---|
1747 | +class NullShareSet(ShareSet): |
---|
1748 | + implements(IShareSet) |
---|
1749 | + |
---|
1750 | + def __init__(self, storageindex): |
---|
1751 | + self.storageindex = storageindex |
---|
1752 | + |
---|
1753 | + def get_overhead(self): |
---|
1754 | + return 0 |
---|
1755 | + |
---|
1756 | + def get_incoming_shnums(self): |
---|
1757 | + return frozenset() |
---|
1758 | + |
---|
1759 | + def get_shares(self): |
---|
1760 | + pass |
---|
1761 | + |
---|
1762 | + def get_share(self, shnum): |
---|
1763 | + return None |
---|
1764 | + |
---|
1765 | + def get_storage_index(self): |
---|
1766 | + return self.storageindex |
---|
1767 | + |
---|
1768 | + def get_storage_index_string(self): |
---|
1769 | + return si_b2a(self.storageindex) |
---|
1770 | + |
---|
1771 | + def make_bucket_writer(self, storageserver, shnum, max_space_per_bucket, lease_info, canary): |
---|
1772 | + immutableshare = ImmutableNullShare() |
---|
1773 | + return BucketWriter(self.ss, immutableshare, max_space_per_bucket, lease_info, canary) |
---|
1774 | + |
---|
1775 | + def _create_mutable_share(self, storageserver, shnum, write_enabler): |
---|
1776 | + return MutableNullShare() |
---|
1777 | + |
---|
1778 | + def _clean_up_after_unlink(self): |
---|
1779 | + pass |
---|
1780 | + |
---|
1781 | + |
---|
1782 | +class ImmutableNullShare: |
---|
1783 | + implements(IStoredShare) |
---|
1784 | + sharetype = "immutable" |
---|
1785 | + |
---|
1786 | + def __init__(self): |
---|
1787 | + """ If max_size is not None then I won't allow more than |
---|
1788 | + max_size to be written to me. If create=True then max_size |
---|
1789 | + must not be None. """ |
---|
1790 | + pass |
---|
1791 | + |
---|
1792 | + def get_shnum(self): |
---|
1793 | + return self.shnum |
---|
1794 | + |
---|
1795 | + def unlink(self): |
---|
1796 | + os.unlink(self.fname) |
---|
1797 | + |
---|
1798 | + def read_share_data(self, offset, length): |
---|
1799 | + precondition(offset >= 0) |
---|
1800 | + # Reads beyond the end of the data are truncated. Reads that start |
---|
1801 | + # beyond the end of the data return an empty string. |
---|
1802 | + seekpos = self._data_offset+offset |
---|
1803 | + fsize = os.path.getsize(self.fname) |
---|
1804 | + actuallength = max(0, min(length, fsize-seekpos)) # XXX #1528 |
---|
1805 | + if actuallength == 0: |
---|
1806 | + return "" |
---|
1807 | + f = open(self.fname, 'rb') |
---|
1808 | + f.seek(seekpos) |
---|
1809 | + return f.read(actuallength) |
---|
1810 | + |
---|
1811 | + def write_share_data(self, offset, data): |
---|
1812 | + pass |
---|
1813 | + |
---|
1814 | + def _write_lease_record(self, f, lease_number, lease_info): |
---|
1815 | + offset = self._lease_offset + lease_number * self.LEASE_SIZE |
---|
1816 | + f.seek(offset) |
---|
1817 | + assert f.tell() == offset |
---|
1818 | + f.write(lease_info.to_immutable_data()) |
---|
1819 | + |
---|
1820 | + def _read_num_leases(self, f): |
---|
1821 | + f.seek(0x08) |
---|
1822 | + (num_leases,) = struct.unpack(">L", f.read(4)) |
---|
1823 | + return num_leases |
---|
1824 | + |
---|
1825 | + def _write_num_leases(self, f, num_leases): |
---|
1826 | + f.seek(0x08) |
---|
1827 | + f.write(struct.pack(">L", num_leases)) |
---|
1828 | + |
---|
1829 | + def _truncate_leases(self, f, num_leases): |
---|
1830 | + f.truncate(self._lease_offset + num_leases * self.LEASE_SIZE) |
---|
1831 | + |
---|
1832 | + def get_leases(self): |
---|
1833 | + """Yields a LeaseInfo instance for all leases.""" |
---|
1834 | + f = open(self.fname, 'rb') |
---|
1835 | + (version, unused, num_leases) = struct.unpack(">LLL", f.read(0xc)) |
---|
1836 | + f.seek(self._lease_offset) |
---|
1837 | + for i in range(num_leases): |
---|
1838 | + data = f.read(self.LEASE_SIZE) |
---|
1839 | + if data: |
---|
1840 | + yield LeaseInfo().from_immutable_data(data) |
---|
1841 | + |
---|
1842 | + def add_lease(self, lease): |
---|
1843 | + pass |
---|
1844 | + |
---|
1845 | + def renew_lease(self, renew_secret, new_expire_time): |
---|
1846 | + for i,lease in enumerate(self.get_leases()): |
---|
1847 | + if constant_time_compare(lease.renew_secret, renew_secret): |
---|
1848 | + # yup. See if we need to update the owner time. |
---|
1849 | + if new_expire_time > lease.expiration_time: |
---|
1850 | + # yes |
---|
1851 | + lease.expiration_time = new_expire_time |
---|
1852 | + f = open(self.fname, 'rb+') |
---|
1853 | + self._write_lease_record(f, i, lease) |
---|
1854 | + f.close() |
---|
1855 | + return |
---|
1856 | + raise IndexError("unable to renew non-existent lease") |
---|
1857 | + |
---|
1858 | + def add_or_renew_lease(self, lease_info): |
---|
1859 | + try: |
---|
1860 | + self.renew_lease(lease_info.renew_secret, |
---|
1861 | + lease_info.expiration_time) |
---|
1862 | + except IndexError: |
---|
1863 | + self.add_lease(lease_info) |
---|
1864 | + |
---|
1865 | + |
---|
1866 | +class MutableNullShare: |
---|
1867 | + implements(IStoredMutableShare) |
---|
1868 | + sharetype = "mutable" |
---|
1869 | + |
---|
1870 | + """ XXX: TODO """ |
---|
1871 | addfile ./src/allmydata/storage/bucket.py |
---|
1872 | hunk ./src/allmydata/storage/bucket.py 1 |
---|
1873 | + |
---|
1874 | +import time |
---|
1875 | + |
---|
1876 | +from foolscap.api import Referenceable |
---|
1877 | + |
---|
1878 | +from zope.interface import implements |
---|
1879 | +from allmydata.interfaces import RIBucketWriter, RIBucketReader |
---|
1880 | +from allmydata.util import base32, log |
---|
1881 | +from allmydata.util.assertutil import precondition |
---|
1882 | + |
---|
1883 | + |
---|
1884 | +class BucketWriter(Referenceable): |
---|
1885 | + implements(RIBucketWriter) |
---|
1886 | + |
---|
1887 | + def __init__(self, ss, immutableshare, max_size, lease_info, canary): |
---|
1888 | + self.ss = ss |
---|
1889 | + self._max_size = max_size # don't allow the client to write more than this |
---|
1890 | + self._canary = canary |
---|
1891 | + self._disconnect_marker = canary.notifyOnDisconnect(self._disconnected) |
---|
1892 | + self.closed = False |
---|
1893 | + self.throw_out_all_data = False |
---|
1894 | + self._share = immutableshare |
---|
1895 | + # also, add our lease to the file now, so that other ones can be |
---|
1896 | + # added by simultaneous uploaders |
---|
1897 | + self._share.add_lease(lease_info) |
---|
1898 | + |
---|
1899 | + def allocated_size(self): |
---|
1900 | + return self._max_size |
---|
1901 | + |
---|
1902 | + def remote_write(self, offset, data): |
---|
1903 | + start = time.time() |
---|
1904 | + precondition(not self.closed) |
---|
1905 | + if self.throw_out_all_data: |
---|
1906 | + return |
---|
1907 | + self._share.write_share_data(offset, data) |
---|
1908 | + self.ss.add_latency("write", time.time() - start) |
---|
1909 | + self.ss.count("write") |
---|
1910 | + |
---|
1911 | + def remote_close(self): |
---|
1912 | + precondition(not self.closed) |
---|
1913 | + start = time.time() |
---|
1914 | + |
---|
1915 | + self._share.close() |
---|
1916 | + filelen = self._share.stat() |
---|
1917 | + self._share = None |
---|
1918 | + |
---|
1919 | + self.closed = True |
---|
1920 | + self._canary.dontNotifyOnDisconnect(self._disconnect_marker) |
---|
1921 | + |
---|
1922 | + self.ss.bucket_writer_closed(self, filelen) |
---|
1923 | + self.ss.add_latency("close", time.time() - start) |
---|
1924 | + self.ss.count("close") |
---|
1925 | + |
---|
1926 | + def _disconnected(self): |
---|
1927 | + if not self.closed: |
---|
1928 | + self._abort() |
---|
1929 | + |
---|
1930 | + def remote_abort(self): |
---|
1931 | + log.msg("storage: aborting write to share %r" % self._share, |
---|
1932 | + facility="tahoe.storage", level=log.UNUSUAL) |
---|
1933 | + if not self.closed: |
---|
1934 | + self._canary.dontNotifyOnDisconnect(self._disconnect_marker) |
---|
1935 | + self._abort() |
---|
1936 | + self.ss.count("abort") |
---|
1937 | + |
---|
1938 | + def _abort(self): |
---|
1939 | + if self.closed: |
---|
1940 | + return |
---|
1941 | + self._share.unlink() |
---|
1942 | + self._share = None |
---|
1943 | + |
---|
1944 | + # We are now considered closed for further writing. We must tell |
---|
1945 | + # the storage server about this so that it stops expecting us to |
---|
1946 | + # use the space it allocated for us earlier. |
---|
1947 | + self.closed = True |
---|
1948 | + self.ss.bucket_writer_closed(self, 0) |
---|
1949 | + |
---|
1950 | + |
---|
1951 | +class BucketReader(Referenceable): |
---|
1952 | + implements(RIBucketReader) |
---|
1953 | + |
---|
1954 | + def __init__(self, ss, share): |
---|
1955 | + self.ss = ss |
---|
1956 | + self._share = share |
---|
1957 | + self.storageindex = share.storageindex |
---|
1958 | + self.shnum = share.shnum |
---|
1959 | + |
---|
1960 | + def __repr__(self): |
---|
1961 | + return "<%s %s %s>" % (self.__class__.__name__, |
---|
1962 | + base32.b2a_l(self.storageindex[:8], 60), |
---|
1963 | + self.shnum) |
---|
1964 | + |
---|
1965 | + def remote_read(self, offset, length): |
---|
1966 | + start = time.time() |
---|
1967 | + data = self._share.read_share_data(offset, length) |
---|
1968 | + self.ss.add_latency("read", time.time() - start) |
---|
1969 | + self.ss.count("read") |
---|
1970 | + return data |
---|
1971 | + |
---|
1972 | + def remote_advise_corrupt_share(self, reason): |
---|
1973 | + return self.ss.remote_advise_corrupt_share("immutable", |
---|
1974 | + self.storageindex, |
---|
1975 | + self.shnum, |
---|
1976 | + reason) |
---|
1977 | addfile ./src/allmydata/test/test_backends.py |
---|
1978 | hunk ./src/allmydata/test/test_backends.py 1 |
---|
1979 | +import os, stat |
---|
1980 | +from twisted.trial import unittest |
---|
1981 | +from allmydata.util.log import msg |
---|
1982 | +from allmydata.test.common_util import ReallyEqualMixin |
---|
1983 | +import mock |
---|
1984 | + |
---|
1985 | +# This is the code that we're going to be testing. |
---|
1986 | +from allmydata.storage.server import StorageServer |
---|
1987 | +from allmydata.storage.backends.disk.disk_backend import DiskBackend, si_si2dir |
---|
1988 | +from allmydata.storage.backends.null.null_backend import NullBackend |
---|
1989 | + |
---|
1990 | +# The following share file content was generated with |
---|
1991 | +# storage.immutable.ShareFile from Tahoe-LAFS v1.8.2 |
---|
1992 | +# with share data == 'a'. The total size of this input |
---|
1993 | +# is 85 bytes. |
---|
1994 | +shareversionnumber = '\x00\x00\x00\x01' |
---|
1995 | +sharedatalength = '\x00\x00\x00\x01' |
---|
1996 | +numberofleases = '\x00\x00\x00\x01' |
---|
1997 | +shareinputdata = 'a' |
---|
1998 | +ownernumber = '\x00\x00\x00\x00' |
---|
1999 | +renewsecret = 'x'*32 |
---|
2000 | +cancelsecret = 'y'*32 |
---|
2001 | +expirationtime = '\x00(\xde\x80' |
---|
2002 | +nextlease = '' |
---|
2003 | +containerdata = shareversionnumber + sharedatalength + numberofleases |
---|
2004 | +client_data = shareinputdata + ownernumber + renewsecret + \ |
---|
2005 | + cancelsecret + expirationtime + nextlease |
---|
2006 | +share_data = containerdata + client_data |
---|
2007 | +testnodeid = 'testnodeidxxxxxxxxxx' |
---|
2008 | + |
---|
2009 | + |
---|
2010 | +class MockFileSystem(unittest.TestCase): |
---|
2011 | + """ I simulate a filesystem that the code under test can use. I simulate |
---|
2012 | + just the parts of the filesystem that the current implementation of Disk |
---|
2013 | + backend needs. """ |
---|
2014 | + def setUp(self): |
---|
2015 | + # Make patcher, patch, and effects for disk-using functions. |
---|
2016 | + msg( "%s.setUp()" % (self,)) |
---|
2017 | + self.mockedfilepaths = {} |
---|
2018 | + # keys are pathnames, values are MockFilePath objects. This is necessary because |
---|
2019 | + # MockFilePath behavior sometimes depends on the filesystem. Where it does, |
---|
2020 | + # self.mockedfilepaths has the relevant information. |
---|
2021 | + self.storedir = MockFilePath('teststoredir', self.mockedfilepaths) |
---|
2022 | + self.basedir = self.storedir.child('shares') |
---|
2023 | + self.baseincdir = self.basedir.child('incoming') |
---|
2024 | + self.sharedirfinalname = self.basedir.child('or').child('orsxg5dtorxxeylhmvpws3temv4a') |
---|
2025 | + self.sharedirincomingname = self.baseincdir.child('or').child('orsxg5dtorxxeylhmvpws3temv4a') |
---|
2026 | + self.shareincomingname = self.sharedirincomingname.child('0') |
---|
2027 | + self.sharefinalname = self.sharedirfinalname.child('0') |
---|
2028 | + |
---|
2029 | + # FIXME: these patches won't work; disk_backend no longer imports FilePath, BucketCountingCrawler, |
---|
2030 | + # or LeaseCheckingCrawler. |
---|
2031 | + |
---|
2032 | + self.FilePathFake = mock.patch('allmydata.storage.backends.disk.disk_backend.FilePath', new = MockFilePath) |
---|
2033 | + self.FilePathFake.__enter__() |
---|
2034 | + |
---|
2035 | + self.BCountingCrawler = mock.patch('allmydata.storage.backends.disk.disk_backend.BucketCountingCrawler') |
---|
2036 | + FakeBCC = self.BCountingCrawler.__enter__() |
---|
2037 | + FakeBCC.side_effect = self.call_FakeBCC |
---|
2038 | + |
---|
2039 | + self.LeaseCheckingCrawler = mock.patch('allmydata.storage.backends.disk.disk_backend.LeaseCheckingCrawler') |
---|
2040 | + FakeLCC = self.LeaseCheckingCrawler.__enter__() |
---|
2041 | + FakeLCC.side_effect = self.call_FakeLCC |
---|
2042 | + |
---|
2043 | + self.get_available_space = mock.patch('allmydata.util.fileutil.get_available_space') |
---|
2044 | + GetSpace = self.get_available_space.__enter__() |
---|
2045 | + GetSpace.side_effect = self.call_get_available_space |
---|
2046 | + |
---|
2047 | + self.statforsize = mock.patch('allmydata.storage.backends.disk.core.filepath.stat') |
---|
2048 | + getsize = self.statforsize.__enter__() |
---|
2049 | + getsize.side_effect = self.call_statforsize |
---|
2050 | + |
---|
2051 | + def call_FakeBCC(self, StateFile): |
---|
2052 | + return MockBCC() |
---|
2053 | + |
---|
2054 | + def call_FakeLCC(self, StateFile, HistoryFile, ExpirationPolicy): |
---|
2055 | + return MockLCC() |
---|
2056 | + |
---|
2057 | + def call_get_available_space(self, storedir, reservedspace): |
---|
2058 | + # The input vector has an input size of 85. |
---|
2059 | + return 85 - reservedspace |
---|
2060 | + |
---|
2061 | + def call_statforsize(self, fakefpname): |
---|
2062 | + return self.mockedfilepaths[fakefpname].fileobject.size() |
---|
2063 | + |
---|
2064 | + def tearDown(self): |
---|
2065 | + msg( "%s.tearDown()" % (self,)) |
---|
2066 | + self.FilePathFake.__exit__() |
---|
2067 | + self.mockedfilepaths = {} |
---|
2068 | + |
---|
2069 | + |
---|
2070 | +class MockFilePath: |
---|
2071 | + def __init__(self, pathstring, ffpathsenvironment, existence=False): |
---|
2072 | + # I can't just make the values MockFileObjects because they may be directories. |
---|
2073 | + self.mockedfilepaths = ffpathsenvironment |
---|
2074 | + self.path = pathstring |
---|
2075 | + self.existence = existence |
---|
2076 | + if not self.mockedfilepaths.has_key(self.path): |
---|
2077 | + # The first MockFilePath object is special |
---|
2078 | + self.mockedfilepaths[self.path] = self |
---|
2079 | + self.fileobject = None |
---|
2080 | + else: |
---|
2081 | + self.fileobject = self.mockedfilepaths[self.path].fileobject |
---|
2082 | + self.spawn = {} |
---|
2083 | + self.antecedent = os.path.dirname(self.path) |
---|
2084 | + |
---|
2085 | + def setContent(self, contentstring): |
---|
2086 | + # This method rewrites the data in the file that corresponds to its path |
---|
2087 | + # name whether it preexisted or not. |
---|
2088 | + self.fileobject = MockFileObject(contentstring) |
---|
2089 | + self.existence = True |
---|
2090 | + self.mockedfilepaths[self.path].fileobject = self.fileobject |
---|
2091 | + self.mockedfilepaths[self.path].existence = self.existence |
---|
2092 | + self.setparents() |
---|
2093 | + |
---|
2094 | + def create(self): |
---|
2095 | + # This method chokes if there's a pre-existing file! |
---|
2096 | + if self.mockedfilepaths[self.path].fileobject: |
---|
2097 | + raise OSError |
---|
2098 | + else: |
---|
2099 | + self.existence = True |
---|
2100 | + self.mockedfilepaths[self.path].fileobject = self.fileobject |
---|
2101 | + self.mockedfilepaths[self.path].existence = self.existence |
---|
2102 | + self.setparents() |
---|
2103 | + |
---|
2104 | + def open(self, mode='r'): |
---|
2105 | + # XXX Makes no use of mode. |
---|
2106 | + if not self.mockedfilepaths[self.path].fileobject: |
---|
2107 | + # If there's no fileobject there already then make one and put it there. |
---|
2108 | + self.fileobject = MockFileObject() |
---|
2109 | + self.existence = True |
---|
2110 | + self.mockedfilepaths[self.path].fileobject = self.fileobject |
---|
2111 | + self.mockedfilepaths[self.path].existence = self.existence |
---|
2112 | + else: |
---|
2113 | + # Otherwise get a ref to it. |
---|
2114 | + self.fileobject = self.mockedfilepaths[self.path].fileobject |
---|
2115 | + self.existence = self.mockedfilepaths[self.path].existence |
---|
2116 | + return self.fileobject.open(mode) |
---|
2117 | + |
---|
2118 | + def child(self, childstring): |
---|
2119 | + arg2child = os.path.join(self.path, childstring) |
---|
2120 | + child = MockFilePath(arg2child, self.mockedfilepaths) |
---|
2121 | + return child |
---|
2122 | + |
---|
2123 | + def children(self): |
---|
2124 | + childrenfromffs = [ffp for ffp in self.mockedfilepaths.values() if ffp.path.startswith(self.path)] |
---|
2125 | + childrenfromffs = [ffp for ffp in childrenfromffs if not ffp.path.endswith(self.path)] |
---|
2126 | + childrenfromffs = [ffp for ffp in childrenfromffs if ffp.exists()] |
---|
2127 | + self.spawn = frozenset(childrenfromffs) |
---|
2128 | + return self.spawn |
---|
2129 | + |
---|
2130 | + def parent(self): |
---|
2131 | + if self.mockedfilepaths.has_key(self.antecedent): |
---|
2132 | + parent = self.mockedfilepaths[self.antecedent] |
---|
2133 | + else: |
---|
2134 | + parent = MockFilePath(self.antecedent, self.mockedfilepaths) |
---|
2135 | + return parent |
---|
2136 | + |
---|
2137 | + def parents(self): |
---|
2138 | + antecedents = [] |
---|
2139 | + def f(fps, antecedents): |
---|
2140 | + newfps = os.path.split(fps)[0] |
---|
2141 | + if newfps: |
---|
2142 | + antecedents.append(newfps) |
---|
2143 | + f(newfps, antecedents) |
---|
2144 | + f(self.path, antecedents) |
---|
2145 | + return antecedents |
---|
2146 | + |
---|
2147 | + def setparents(self): |
---|
2148 | + for fps in self.parents(): |
---|
2149 | + if not self.mockedfilepaths.has_key(fps): |
---|
2150 | + self.mockedfilepaths[fps] = MockFilePath(fps, self.mockedfilepaths, exists=True) |
---|
2151 | + |
---|
2152 | + def basename(self): |
---|
2153 | + return os.path.split(self.path)[1] |
---|
2154 | + |
---|
2155 | + def moveTo(self, newffp): |
---|
2156 | + # XXX Makes no distinction between file and directory arguments, this is deviation from filepath.moveTo |
---|
2157 | + if self.mockedfilepaths[newffp.path].exists(): |
---|
2158 | + raise OSError |
---|
2159 | + else: |
---|
2160 | + self.mockedfilepaths[newffp.path] = self |
---|
2161 | + self.path = newffp.path |
---|
2162 | + |
---|
2163 | + def getsize(self): |
---|
2164 | + return self.fileobject.getsize() |
---|
2165 | + |
---|
2166 | + def exists(self): |
---|
2167 | + return self.existence |
---|
2168 | + |
---|
2169 | + def isdir(self): |
---|
2170 | + return True |
---|
2171 | + |
---|
2172 | + def makedirs(self): |
---|
2173 | + # XXX These methods assume that fp_<FOO> functions in fileutil will be tested elsewhere! |
---|
2174 | + pass |
---|
2175 | + |
---|
2176 | + def remove(self): |
---|
2177 | + pass |
---|
2178 | + |
---|
2179 | + |
---|
2180 | +class MockFileObject: |
---|
2181 | + def __init__(self, contentstring=''): |
---|
2182 | + self.buffer = contentstring |
---|
2183 | + self.pos = 0 |
---|
2184 | + def open(self, mode='r'): |
---|
2185 | + return self |
---|
2186 | + def write(self, instring): |
---|
2187 | + begin = self.pos |
---|
2188 | + padlen = begin - len(self.buffer) |
---|
2189 | + if padlen > 0: |
---|
2190 | + self.buffer += '\x00' * padlen |
---|
2191 | + end = self.pos + len(instring) |
---|
2192 | + self.buffer = self.buffer[:begin]+instring+self.buffer[end:] |
---|
2193 | + self.pos = end |
---|
2194 | + def close(self): |
---|
2195 | + self.pos = 0 |
---|
2196 | + def seek(self, pos): |
---|
2197 | + self.pos = pos |
---|
2198 | + def read(self, numberbytes): |
---|
2199 | + return self.buffer[self.pos:self.pos+numberbytes] |
---|
2200 | + def tell(self): |
---|
2201 | + return self.pos |
---|
2202 | + def size(self): |
---|
2203 | + # XXX This method A: Is not to be found in a real file B: Is part of a wild-mung-up of filepath.stat! |
---|
2204 | + # XXX Finally we shall hopefully use a getsize method soon, must consult first though. |
---|
2205 | + # Hmmm... perhaps we need to sometimes stat the address when there's not a mockfileobject present? |
---|
2206 | + return {stat.ST_SIZE:len(self.buffer)} |
---|
2207 | + def getsize(self): |
---|
2208 | + return len(self.buffer) |
---|
2209 | + |
---|
2210 | +class MockBCC: |
---|
2211 | + def setServiceParent(self, Parent): |
---|
2212 | + pass |
---|
2213 | + |
---|
2214 | + |
---|
2215 | +class MockLCC: |
---|
2216 | + def setServiceParent(self, Parent): |
---|
2217 | + pass |
---|
2218 | + |
---|
2219 | + |
---|
2220 | +class TestServerWithNullBackend(unittest.TestCase, ReallyEqualMixin): |
---|
2221 | + """ NullBackend is just for testing and executable documentation, so |
---|
2222 | + this test is actually a test of StorageServer in which we're using |
---|
2223 | + NullBackend as helper code for the test, rather than a test of |
---|
2224 | + NullBackend. """ |
---|
2225 | + def setUp(self): |
---|
2226 | + self.ss = StorageServer(testnodeid, NullBackend()) |
---|
2227 | + |
---|
2228 | + @mock.patch('os.mkdir') |
---|
2229 | + @mock.patch('__builtin__.open') |
---|
2230 | + @mock.patch('os.listdir') |
---|
2231 | + @mock.patch('os.path.isdir') |
---|
2232 | + def test_write_share(self, mockisdir, mocklistdir, mockopen, mockmkdir): |
---|
2233 | + """ |
---|
2234 | + Write a new share. This tests that StorageServer's remote_allocate_buckets |
---|
2235 | + generates the correct return types when given test-vector arguments. That |
---|
2236 | + bs is of the correct type is verified by attempting to invoke remote_write |
---|
2237 | + on bs[0]. |
---|
2238 | + """ |
---|
2239 | + alreadygot, bs = self.ss.remote_allocate_buckets('teststorage_index', 'x'*32, 'y'*32, set((0,)), 1, mock.Mock()) |
---|
2240 | + bs[0].remote_write(0, 'a') |
---|
2241 | + self.failIf(mockisdir.called) |
---|
2242 | + self.failIf(mocklistdir.called) |
---|
2243 | + self.failIf(mockopen.called) |
---|
2244 | + self.failIf(mockmkdir.called) |
---|
2245 | + |
---|
2246 | + |
---|
2247 | +class TestServerConstruction(MockFileSystem, ReallyEqualMixin): |
---|
2248 | + def test_create_server_disk_backend(self): |
---|
2249 | + """ This tests whether a server instance can be constructed with a |
---|
2250 | + filesystem backend. To pass the test, it mustn't use the filesystem |
---|
2251 | + outside of its configured storedir. """ |
---|
2252 | + StorageServer(testnodeid, DiskBackend(self.storedir)) |
---|
2253 | + |
---|
2254 | + |
---|
2255 | +class TestServerAndDiskBackend(MockFileSystem, ReallyEqualMixin): |
---|
2256 | + """ This tests both the StorageServer and the Disk backend together. """ |
---|
2257 | + def setUp(self): |
---|
2258 | + MockFileSystem.setUp(self) |
---|
2259 | + try: |
---|
2260 | + self.backend = DiskBackend(self.storedir) |
---|
2261 | + self.ss = StorageServer(testnodeid, self.backend) |
---|
2262 | + |
---|
2263 | + self.backendwithreserve = DiskBackend(self.storedir, reserved_space = 1) |
---|
2264 | + self.sswithreserve = StorageServer(testnodeid, self.backendwithreserve) |
---|
2265 | + except: |
---|
2266 | + MockFileSystem.tearDown(self) |
---|
2267 | + raise |
---|
2268 | + |
---|
2269 | + @mock.patch('time.time') |
---|
2270 | + @mock.patch('allmydata.util.fileutil.get_available_space') |
---|
2271 | + def test_out_of_space(self, mockget_available_space, mocktime): |
---|
2272 | + mocktime.return_value = 0 |
---|
2273 | + |
---|
2274 | + def call_get_available_space(dir, reserve): |
---|
2275 | + return 0 |
---|
2276 | + |
---|
2277 | + mockget_available_space.side_effect = call_get_available_space |
---|
2278 | + alreadygotc, bsc = self.sswithreserve.remote_allocate_buckets('teststorage_index', 'x'*32, 'y'*32, set((0,)), 1, mock.Mock()) |
---|
2279 | + self.failUnlessReallyEqual(bsc, {}) |
---|
2280 | + |
---|
2281 | + @mock.patch('time.time') |
---|
2282 | + def test_write_and_read_share(self, mocktime): |
---|
2283 | + """ |
---|
2284 | + Write a new share, read it, and test the server's (and disk backend's) |
---|
2285 | + handling of simultaneous and successive attempts to write the same |
---|
2286 | + share. |
---|
2287 | + """ |
---|
2288 | + mocktime.return_value = 0 |
---|
2289 | + # Inspect incoming and fail unless it's empty. |
---|
2290 | + incomingset = self.ss.backend.get_incoming_shnums('teststorage_index') |
---|
2291 | + |
---|
2292 | + self.failUnlessReallyEqual(incomingset, frozenset()) |
---|
2293 | + |
---|
2294 | + # Populate incoming with the sharenum: 0. |
---|
2295 | + alreadygot, bs = self.ss.remote_allocate_buckets('teststorage_index', 'x'*32, 'y'*32, frozenset((0,)), 1, mock.Mock()) |
---|
2296 | + |
---|
2297 | + # This is a transparent-box test: Inspect incoming and fail unless the sharenum: 0 is listed there. |
---|
2298 | + self.failUnlessReallyEqual(self.ss.backend.get_incoming_shnums('teststorage_index'), frozenset((0,))) |
---|
2299 | + |
---|
2300 | + |
---|
2301 | + |
---|
2302 | + # Attempt to create a second share writer with the same sharenum. |
---|
2303 | + alreadygota, bsa = self.ss.remote_allocate_buckets('teststorage_index', 'x'*32, 'y'*32, frozenset((0,)), 1, mock.Mock()) |
---|
2304 | + |
---|
2305 | + # Show that no sharewriter results from a remote_allocate_buckets |
---|
2306 | + # with the same si and sharenum, until BucketWriter.remote_close() |
---|
2307 | + # has been called. |
---|
2308 | + self.failIf(bsa) |
---|
2309 | + |
---|
2310 | + # Test allocated size. |
---|
2311 | + spaceint = self.ss.allocated_size() |
---|
2312 | + self.failUnlessReallyEqual(spaceint, 1) |
---|
2313 | + |
---|
2314 | + # Write 'a' to shnum 0. Only tested together with close and read. |
---|
2315 | + bs[0].remote_write(0, 'a') |
---|
2316 | + |
---|
2317 | + # Preclose: Inspect final, failUnless nothing there. |
---|
2318 | + self.failUnlessReallyEqual(len(list(self.backend.get_shares('teststorage_index'))), 0) |
---|
2319 | + bs[0].remote_close() |
---|
2320 | + |
---|
2321 | + # Postclose: (Omnibus) failUnless written data is in final. |
---|
2322 | + sharesinfinal = list(self.backend.get_shares('teststorage_index')) |
---|
2323 | + self.failUnlessReallyEqual(len(sharesinfinal), 1) |
---|
2324 | + contents = sharesinfinal[0].read_share_data(0, 73) |
---|
2325 | + self.failUnlessReallyEqual(contents, client_data) |
---|
2326 | + |
---|
2327 | + # Exercise the case that the share we're asking to allocate is |
---|
2328 | + # already (completely) uploaded. |
---|
2329 | + self.ss.remote_allocate_buckets('teststorage_index', 'x'*32, 'y'*32, set((0,)), 1, mock.Mock()) |
---|
2330 | + |
---|
2331 | + |
---|
2332 | + def test_read_old_share(self): |
---|
2333 | + """ This tests whether the code correctly finds and reads |
---|
2334 | + shares written out by old (Tahoe-LAFS <= v1.8.2) |
---|
2335 | + servers. There is a similar test in test_download, but that one |
---|
2336 | + is from the perspective of the client and exercises a deeper |
---|
2337 | + stack of code. This one is for exercising just the |
---|
2338 | + StorageServer object. """ |
---|
2339 | + # Contruct a file with the appropriate contents in the mockfilesystem. |
---|
2340 | + datalen = len(share_data) |
---|
2341 | + finalhome = si_si2dir(self.basedir, 'teststorage_index').child(str(0)) |
---|
2342 | + finalhome.setContent(share_data) |
---|
2343 | + |
---|
2344 | + # Now begin the test. |
---|
2345 | + bs = self.ss.remote_get_buckets('teststorage_index') |
---|
2346 | + |
---|
2347 | + self.failUnlessEqual(len(bs), 1) |
---|
2348 | + b = bs['0'] |
---|
2349 | + # These should match by definition, the next two cases cover cases without (completely) unambiguous behaviors. |
---|
2350 | + self.failUnlessReallyEqual(b.remote_read(0, datalen), client_data) |
---|
2351 | + # If you try to read past the end you get the as much data as is there. |
---|
2352 | + self.failUnlessReallyEqual(b.remote_read(0, datalen+20), client_data) |
---|
2353 | + # If you start reading past the end of the file you get the empty string. |
---|
2354 | + self.failUnlessReallyEqual(b.remote_read(datalen+1, 3), '') |
---|
2355 | } |
---|
2356 | [Pluggable backends -- all other changes. refs #999 |
---|
2357 | david-sarah@jacaranda.org**20110919233256 |
---|
2358 | Ignore-this: 1a77b6b5d178b32a9b914b699ba7e957 |
---|
2359 | ] { |
---|
2360 | hunk ./src/allmydata/client.py 245 |
---|
2361 | sharetypes.append("immutable") |
---|
2362 | if self.get_config("storage", "expire.mutable", True, boolean=True): |
---|
2363 | sharetypes.append("mutable") |
---|
2364 | - expiration_sharetypes = tuple(sharetypes) |
---|
2365 | |
---|
2366 | hunk ./src/allmydata/client.py 246 |
---|
2367 | + expiration_policy = { |
---|
2368 | + 'enabled': expire, |
---|
2369 | + 'mode': mode, |
---|
2370 | + 'override_lease_duration': o_l_d, |
---|
2371 | + 'cutoff_date': cutoff_date, |
---|
2372 | + 'sharetypes': tuple(sharetypes), |
---|
2373 | + } |
---|
2374 | ss = StorageServer(storedir, self.nodeid, |
---|
2375 | reserved_space=reserved, |
---|
2376 | discard_storage=discard, |
---|
2377 | hunk ./src/allmydata/client.py 258 |
---|
2378 | readonly_storage=readonly, |
---|
2379 | stats_provider=self.stats_provider, |
---|
2380 | - expiration_enabled=expire, |
---|
2381 | - expiration_mode=mode, |
---|
2382 | - expiration_override_lease_duration=o_l_d, |
---|
2383 | - expiration_cutoff_date=cutoff_date, |
---|
2384 | - expiration_sharetypes=expiration_sharetypes) |
---|
2385 | + expiration_policy=expiration_policy) |
---|
2386 | self.add_service(ss) |
---|
2387 | |
---|
2388 | d = self.when_tub_ready() |
---|
2389 | hunk ./src/allmydata/immutable/offloaded.py 306 |
---|
2390 | if os.path.exists(self._encoding_file): |
---|
2391 | self.log("ciphertext already present, bypassing fetch", |
---|
2392 | level=log.UNUSUAL) |
---|
2393 | + # XXX the following comment is probably stale, since |
---|
2394 | + # LocalCiphertextReader.get_plaintext_hashtree_leaves does not exist. |
---|
2395 | + # |
---|
2396 | # we'll still need the plaintext hashes (when |
---|
2397 | # LocalCiphertextReader.get_plaintext_hashtree_leaves() is |
---|
2398 | # called), and currently the easiest way to get them is to ask |
---|
2399 | hunk ./src/allmydata/immutable/upload.py 765 |
---|
2400 | self._status.set_progress(1, progress) |
---|
2401 | return cryptdata |
---|
2402 | |
---|
2403 | - |
---|
2404 | def get_plaintext_hashtree_leaves(self, first, last, num_segments): |
---|
2405 | hunk ./src/allmydata/immutable/upload.py 766 |
---|
2406 | + """OBSOLETE; Get the leaf nodes of a merkle hash tree over the |
---|
2407 | + plaintext segments, i.e. get the tagged hashes of the given segments. |
---|
2408 | + The segment size is expected to be generated by the |
---|
2409 | + IEncryptedUploadable before any plaintext is read or ciphertext |
---|
2410 | + produced, so that the segment hashes can be generated with only a |
---|
2411 | + single pass. |
---|
2412 | + |
---|
2413 | + This returns a Deferred that fires with a sequence of hashes, using: |
---|
2414 | + |
---|
2415 | + tuple(segment_hashes[first:last]) |
---|
2416 | + |
---|
2417 | + 'num_segments' is used to assert that the number of segments that the |
---|
2418 | + IEncryptedUploadable handled matches the number of segments that the |
---|
2419 | + encoder was expecting. |
---|
2420 | + |
---|
2421 | + This method must not be called until the final byte has been read |
---|
2422 | + from read_encrypted(). Once this method is called, read_encrypted() |
---|
2423 | + can never be called again. |
---|
2424 | + """ |
---|
2425 | # this is currently unused, but will live again when we fix #453 |
---|
2426 | if len(self._plaintext_segment_hashes) < num_segments: |
---|
2427 | # close out the last one |
---|
2428 | hunk ./src/allmydata/immutable/upload.py 803 |
---|
2429 | return defer.succeed(tuple(self._plaintext_segment_hashes[first:last])) |
---|
2430 | |
---|
2431 | def get_plaintext_hash(self): |
---|
2432 | + """OBSOLETE; Get the hash of the whole plaintext. |
---|
2433 | + |
---|
2434 | + This returns a Deferred that fires with a tagged SHA-256 hash of the |
---|
2435 | + whole plaintext, obtained from hashutil.plaintext_hash(data). |
---|
2436 | + """ |
---|
2437 | + # this is currently unused, but will live again when we fix #453 |
---|
2438 | h = self._plaintext_hasher.digest() |
---|
2439 | return defer.succeed(h) |
---|
2440 | |
---|
2441 | hunk ./src/allmydata/interfaces.py 29 |
---|
2442 | Number = IntegerConstraint(8) # 2**(8*8) == 16EiB ~= 18e18 ~= 18 exabytes |
---|
2443 | Offset = Number |
---|
2444 | ReadSize = int # the 'int' constraint is 2**31 == 2Gib -- large files are processed in not-so-large increments |
---|
2445 | -WriteEnablerSecret = Hash # used to protect mutable bucket modifications |
---|
2446 | -LeaseRenewSecret = Hash # used to protect bucket lease renewal requests |
---|
2447 | -LeaseCancelSecret = Hash # used to protect bucket lease cancellation requests |
---|
2448 | +WriteEnablerSecret = Hash # used to protect mutable share modifications |
---|
2449 | +LeaseRenewSecret = Hash # used to protect lease renewal requests |
---|
2450 | +LeaseCancelSecret = Hash # used to protect lease cancellation requests |
---|
2451 | |
---|
2452 | class RIStubClient(RemoteInterface): |
---|
2453 | """Each client publishes a service announcement for a dummy object called |
---|
2454 | hunk ./src/allmydata/interfaces.py 106 |
---|
2455 | sharenums=SetOf(int, maxLength=MAX_BUCKETS), |
---|
2456 | allocated_size=Offset, canary=Referenceable): |
---|
2457 | """ |
---|
2458 | - @param storage_index: the index of the bucket to be created or |
---|
2459 | + @param storage_index: the index of the shareset to be created or |
---|
2460 | increfed. |
---|
2461 | @param sharenums: these are the share numbers (probably between 0 and |
---|
2462 | 99) that the sender is proposing to store on this |
---|
2463 | hunk ./src/allmydata/interfaces.py 111 |
---|
2464 | server. |
---|
2465 | - @param renew_secret: This is the secret used to protect bucket refresh |
---|
2466 | + @param renew_secret: This is the secret used to protect lease renewal. |
---|
2467 | This secret is generated by the client and |
---|
2468 | stored for later comparison by the server. Each |
---|
2469 | server is given a different secret. |
---|
2470 | hunk ./src/allmydata/interfaces.py 115 |
---|
2471 | - @param cancel_secret: Like renew_secret, but protects bucket decref. |
---|
2472 | - @param canary: If the canary is lost before close(), the bucket is |
---|
2473 | + @param cancel_secret: ignored |
---|
2474 | + @param canary: If the canary is lost before close(), the allocation is |
---|
2475 | deleted. |
---|
2476 | @return: tuple of (alreadygot, allocated), where alreadygot is what we |
---|
2477 | already have and allocated is what we hereby agree to accept. |
---|
2478 | hunk ./src/allmydata/interfaces.py 129 |
---|
2479 | renew_secret=LeaseRenewSecret, |
---|
2480 | cancel_secret=LeaseCancelSecret): |
---|
2481 | """ |
---|
2482 | - Add a new lease on the given bucket. If the renew_secret matches an |
---|
2483 | + Add a new lease on the given shareset. If the renew_secret matches an |
---|
2484 | existing lease, that lease will be renewed instead. If there is no |
---|
2485 | hunk ./src/allmydata/interfaces.py 131 |
---|
2486 | - bucket for the given storage_index, return silently. (note that in |
---|
2487 | + shareset for the given storage_index, return silently. (Note that in |
---|
2488 | tahoe-1.3.0 and earlier, IndexError was raised if there was no |
---|
2489 | hunk ./src/allmydata/interfaces.py 133 |
---|
2490 | - bucket) |
---|
2491 | + shareset.) |
---|
2492 | """ |
---|
2493 | return Any() # returns None now, but future versions might change |
---|
2494 | |
---|
2495 | hunk ./src/allmydata/interfaces.py 139 |
---|
2496 | def renew_lease(storage_index=StorageIndex, renew_secret=LeaseRenewSecret): |
---|
2497 | """ |
---|
2498 | - Renew the lease on a given bucket, resetting the timer to 31 days. |
---|
2499 | - Some networks will use this, some will not. If there is no bucket for |
---|
2500 | + Renew the lease on a given shareset, resetting the timer to 31 days. |
---|
2501 | + Some networks will use this, some will not. If there is no shareset for |
---|
2502 | the given storage_index, IndexError will be raised. |
---|
2503 | |
---|
2504 | For mutable shares, if the given renew_secret does not match an |
---|
2505 | hunk ./src/allmydata/interfaces.py 146 |
---|
2506 | existing lease, IndexError will be raised with a note listing the |
---|
2507 | server-nodeids on the existing leases, so leases on migrated shares |
---|
2508 | - can be renewed or cancelled. For immutable shares, IndexError |
---|
2509 | - (without the note) will be raised. |
---|
2510 | + can be renewed. For immutable shares, IndexError (without the note) |
---|
2511 | + will be raised. |
---|
2512 | """ |
---|
2513 | return Any() |
---|
2514 | |
---|
2515 | hunk ./src/allmydata/interfaces.py 154 |
---|
2516 | def get_buckets(storage_index=StorageIndex): |
---|
2517 | return DictOf(int, RIBucketReader, maxKeys=MAX_BUCKETS) |
---|
2518 | |
---|
2519 | - |
---|
2520 | - |
---|
2521 | def slot_readv(storage_index=StorageIndex, |
---|
2522 | shares=ListOf(int), readv=ReadVector): |
---|
2523 | """Read a vector from the numbered shares associated with the given |
---|
2524 | hunk ./src/allmydata/interfaces.py 163 |
---|
2525 | |
---|
2526 | def slot_testv_and_readv_and_writev(storage_index=StorageIndex, |
---|
2527 | secrets=TupleOf(WriteEnablerSecret, |
---|
2528 | - LeaseRenewSecret, |
---|
2529 | - LeaseCancelSecret), |
---|
2530 | + LeaseRenewSecret), |
---|
2531 | tw_vectors=TestAndWriteVectorsForShares, |
---|
2532 | r_vector=ReadVector, |
---|
2533 | ): |
---|
2534 | hunk ./src/allmydata/interfaces.py 167 |
---|
2535 | - """General-purpose test-and-set operation for mutable slots. Perform |
---|
2536 | - a bunch of comparisons against the existing shares. If they all pass, |
---|
2537 | - then apply a bunch of write vectors to those shares. Then use the |
---|
2538 | - read vectors to extract data from all the shares and return the data. |
---|
2539 | + """ |
---|
2540 | + General-purpose atomic test-read-and-set operation for mutable slots. |
---|
2541 | + Perform a bunch of comparisons against the existing shares. If they |
---|
2542 | + all pass: use the read vectors to extract data from all the shares, |
---|
2543 | + then apply a bunch of write vectors to those shares. Return the read |
---|
2544 | + data, which does not include any modifications made by the writes. |
---|
2545 | |
---|
2546 | This method is, um, large. The goal is to allow clients to update all |
---|
2547 | the shares associated with a mutable file in a single round trip. |
---|
2548 | hunk ./src/allmydata/interfaces.py 177 |
---|
2549 | |
---|
2550 | - @param storage_index: the index of the bucket to be created or |
---|
2551 | + @param storage_index: the index of the shareset to be created or |
---|
2552 | increfed. |
---|
2553 | @param write_enabler: a secret that is stored along with the slot. |
---|
2554 | Writes are accepted from any caller who can |
---|
2555 | hunk ./src/allmydata/interfaces.py 183 |
---|
2556 | present the matching secret. A different secret |
---|
2557 | should be used for each slot*server pair. |
---|
2558 | - @param renew_secret: This is the secret used to protect bucket refresh |
---|
2559 | + @param renew_secret: This is the secret used to protect lease renewal. |
---|
2560 | This secret is generated by the client and |
---|
2561 | stored for later comparison by the server. Each |
---|
2562 | server is given a different secret. |
---|
2563 | hunk ./src/allmydata/interfaces.py 187 |
---|
2564 | - @param cancel_secret: Like renew_secret, but protects bucket decref. |
---|
2565 | + @param cancel_secret: ignored |
---|
2566 | |
---|
2567 | hunk ./src/allmydata/interfaces.py 189 |
---|
2568 | - The 'secrets' argument is a tuple of (write_enabler, renew_secret, |
---|
2569 | - cancel_secret). The first is required to perform any write. The |
---|
2570 | - latter two are used when allocating new shares. To simply acquire a |
---|
2571 | - new lease on existing shares, use an empty testv and an empty writev. |
---|
2572 | + The 'secrets' argument is a tuple with (write_enabler, renew_secret). |
---|
2573 | + The write_enabler is required to perform any write. The renew_secret |
---|
2574 | + is used when allocating new shares. |
---|
2575 | |
---|
2576 | Each share can have a separate test vector (i.e. a list of |
---|
2577 | comparisons to perform). If all vectors for all shares pass, then all |
---|
2578 | hunk ./src/allmydata/interfaces.py 280 |
---|
2579 | store that on disk. |
---|
2580 | """ |
---|
2581 | |
---|
2582 | -class IStorageBucketWriter(Interface): |
---|
2583 | + |
---|
2584 | +class IStorageBackend(Interface): |
---|
2585 | """ |
---|
2586 | hunk ./src/allmydata/interfaces.py 283 |
---|
2587 | - Objects of this kind live on the client side. |
---|
2588 | + Objects of this kind live on the server side and are used by the |
---|
2589 | + storage server object. |
---|
2590 | """ |
---|
2591 | hunk ./src/allmydata/interfaces.py 286 |
---|
2592 | - def put_block(segmentnum=int, data=ShareData): |
---|
2593 | - """@param data: For most segments, this data will be 'blocksize' |
---|
2594 | - bytes in length. The last segment might be shorter. |
---|
2595 | - @return: a Deferred that fires (with None) when the operation completes |
---|
2596 | + def get_available_space(): |
---|
2597 | + """ |
---|
2598 | + Returns available space for share storage in bytes, or |
---|
2599 | + None if this information is not available or if the available |
---|
2600 | + space is unlimited. |
---|
2601 | + |
---|
2602 | + If the backend is configured for read-only mode then this will |
---|
2603 | + return 0. |
---|
2604 | + """ |
---|
2605 | + |
---|
2606 | + def get_sharesets_for_prefix(prefix): |
---|
2607 | + """ |
---|
2608 | + Generates IShareSet objects for all storage indices matching the |
---|
2609 | + given prefix for which this backend holds shares. |
---|
2610 | + """ |
---|
2611 | + |
---|
2612 | + def get_shareset(storageindex): |
---|
2613 | + """ |
---|
2614 | + Get an IShareSet object for the given storage index. |
---|
2615 | + """ |
---|
2616 | + |
---|
2617 | + def advise_corrupt_share(storageindex, sharetype, shnum, reason): |
---|
2618 | + """ |
---|
2619 | + Clients who discover hash failures in shares that they have |
---|
2620 | + downloaded from me will use this method to inform me about the |
---|
2621 | + failures. I will record their concern so that my operator can |
---|
2622 | + manually inspect the shares in question. |
---|
2623 | + |
---|
2624 | + 'sharetype' is either 'mutable' or 'immutable'. 'shnum' is the integer |
---|
2625 | + share number. 'reason' is a human-readable explanation of the problem, |
---|
2626 | + probably including some expected hash values and the computed ones |
---|
2627 | + that did not match. Corruption advisories for mutable shares should |
---|
2628 | + include a hash of the public key (the same value that appears in the |
---|
2629 | + mutable-file verify-cap), since the current share format does not |
---|
2630 | + store that on disk. |
---|
2631 | + |
---|
2632 | + @param storageindex=str |
---|
2633 | + @param sharetype=str |
---|
2634 | + @param shnum=int |
---|
2635 | + @param reason=str |
---|
2636 | + """ |
---|
2637 | + |
---|
2638 | + |
---|
2639 | +class IShareSet(Interface): |
---|
2640 | + def get_storage_index(): |
---|
2641 | + """ |
---|
2642 | + Returns the storage index for this shareset. |
---|
2643 | + """ |
---|
2644 | + |
---|
2645 | + def get_storage_index_string(): |
---|
2646 | + """ |
---|
2647 | + Returns the base32-encoded storage index for this shareset. |
---|
2648 | + """ |
---|
2649 | + |
---|
2650 | + def get_overhead(): |
---|
2651 | + """ |
---|
2652 | + Returns the storage overhead, in bytes, of this shareset (exclusive |
---|
2653 | + of the space used by its shares). |
---|
2654 | + """ |
---|
2655 | + |
---|
2656 | + def get_shares(): |
---|
2657 | + """ |
---|
2658 | + Generates the IStoredShare objects held in this shareset. |
---|
2659 | + """ |
---|
2660 | + |
---|
2661 | + def has_incoming(shnum): |
---|
2662 | + """ |
---|
2663 | + Returns True if this shareset has an incoming (partial) share with this number, otherwise False. |
---|
2664 | + """ |
---|
2665 | + |
---|
2666 | + def make_bucket_writer(storageserver, shnum, max_space_per_bucket, lease_info, canary): |
---|
2667 | + """ |
---|
2668 | + Create a bucket writer that can be used to write data to a given share. |
---|
2669 | + |
---|
2670 | + @param storageserver=RIStorageServer |
---|
2671 | + @param shnum=int: A share number in this shareset |
---|
2672 | + @param max_space_per_bucket=int: The maximum space allocated for the |
---|
2673 | + share, in bytes |
---|
2674 | + @param lease_info=LeaseInfo: The initial lease information |
---|
2675 | + @param canary=Referenceable: If the canary is lost before close(), the |
---|
2676 | + bucket is deleted. |
---|
2677 | + @return an IStorageBucketWriter for the given share |
---|
2678 | + """ |
---|
2679 | + |
---|
2680 | + def make_bucket_reader(storageserver, share): |
---|
2681 | + """ |
---|
2682 | + Create a bucket reader that can be used to read data from a given share. |
---|
2683 | + |
---|
2684 | + @param storageserver=RIStorageServer |
---|
2685 | + @param share=IStoredShare |
---|
2686 | + @return an IStorageBucketReader for the given share |
---|
2687 | + """ |
---|
2688 | + |
---|
2689 | + def readv(wanted_shnums, read_vector): |
---|
2690 | + """ |
---|
2691 | + Read a vector from the numbered shares in this shareset. An empty |
---|
2692 | + wanted_shnums list means to return data from all known shares. |
---|
2693 | + |
---|
2694 | + @param wanted_shnums=ListOf(int) |
---|
2695 | + @param read_vector=ReadVector |
---|
2696 | + @return DictOf(int, ReadData): shnum -> results, with one key per share |
---|
2697 | + """ |
---|
2698 | + |
---|
2699 | + def testv_and_readv_and_writev(storageserver, secrets, test_and_write_vectors, read_vector, expiration_time): |
---|
2700 | + """ |
---|
2701 | + General-purpose atomic test-read-and-set operation for mutable slots. |
---|
2702 | + Perform a bunch of comparisons against the existing shares in this |
---|
2703 | + shareset. If they all pass: use the read vectors to extract data from |
---|
2704 | + all the shares, then apply a bunch of write vectors to those shares. |
---|
2705 | + Return the read data, which does not include any modifications made by |
---|
2706 | + the writes. |
---|
2707 | + |
---|
2708 | + See the similar method in RIStorageServer for more detail. |
---|
2709 | + |
---|
2710 | + @param storageserver=RIStorageServer |
---|
2711 | + @param secrets=TupleOf(WriteEnablerSecret, LeaseRenewSecret[, ...]) |
---|
2712 | + @param test_and_write_vectors=TestAndWriteVectorsForShares |
---|
2713 | + @param read_vector=ReadVector |
---|
2714 | + @param expiration_time=int |
---|
2715 | + @return TupleOf(bool, DictOf(int, ReadData)) |
---|
2716 | + """ |
---|
2717 | + |
---|
2718 | + def add_or_renew_lease(lease_info): |
---|
2719 | + """ |
---|
2720 | + Add a new lease on the shares in this shareset. If the renew_secret |
---|
2721 | + matches an existing lease, that lease will be renewed instead. If |
---|
2722 | + there are no shares in this shareset, return silently. |
---|
2723 | + |
---|
2724 | + @param lease_info=LeaseInfo |
---|
2725 | + """ |
---|
2726 | + |
---|
2727 | + def renew_lease(renew_secret, new_expiration_time): |
---|
2728 | + """ |
---|
2729 | + Renew a lease on the shares in this shareset, resetting the timer |
---|
2730 | + to 31 days. Some grids will use this, some will not. If there are no |
---|
2731 | + shares in this shareset, IndexError will be raised. |
---|
2732 | + |
---|
2733 | + For mutable shares, if the given renew_secret does not match an |
---|
2734 | + existing lease, IndexError will be raised with a note listing the |
---|
2735 | + server-nodeids on the existing leases, so leases on migrated shares |
---|
2736 | + can be renewed. For immutable shares, IndexError (without the note) |
---|
2737 | + will be raised. |
---|
2738 | + |
---|
2739 | + @param renew_secret=LeaseRenewSecret |
---|
2740 | + """ |
---|
2741 | + |
---|
2742 | + |
---|
2743 | +class IStoredShare(Interface): |
---|
2744 | + """ |
---|
2745 | + This object contains as much as all of the share data. It is intended |
---|
2746 | + for lazy evaluation, such that in many use cases substantially less than |
---|
2747 | + all of the share data will be accessed. |
---|
2748 | + """ |
---|
2749 | + def close(): |
---|
2750 | + """ |
---|
2751 | + Complete writing to this share. |
---|
2752 | + """ |
---|
2753 | + |
---|
2754 | + def get_storage_index(): |
---|
2755 | + """ |
---|
2756 | + Returns the storage index. |
---|
2757 | + """ |
---|
2758 | + |
---|
2759 | + def get_shnum(): |
---|
2760 | + """ |
---|
2761 | + Returns the share number. |
---|
2762 | + """ |
---|
2763 | + |
---|
2764 | + def get_data_length(): |
---|
2765 | + """ |
---|
2766 | + Returns the data length in bytes. |
---|
2767 | + """ |
---|
2768 | + |
---|
2769 | + def get_size(): |
---|
2770 | + """ |
---|
2771 | + Returns the size of the share in bytes. |
---|
2772 | + """ |
---|
2773 | + |
---|
2774 | + def get_used_space(): |
---|
2775 | + """ |
---|
2776 | + Returns the amount of backend storage including overhead, in bytes, used |
---|
2777 | + by this share. |
---|
2778 | + """ |
---|
2779 | + |
---|
2780 | + def unlink(): |
---|
2781 | + """ |
---|
2782 | + Signal that this share can be removed from the backend storage. This does |
---|
2783 | + not guarantee that the share data will be immediately inaccessible, or |
---|
2784 | + that it will be securely erased. |
---|
2785 | + """ |
---|
2786 | + |
---|
2787 | + def readv(read_vector): |
---|
2788 | + """ |
---|
2789 | + XXX |
---|
2790 | + """ |
---|
2791 | + |
---|
2792 | + |
---|
2793 | +class IStoredMutableShare(IStoredShare): |
---|
2794 | + def check_write_enabler(write_enabler, si_s): |
---|
2795 | + """ |
---|
2796 | + XXX |
---|
2797 | """ |
---|
2798 | |
---|
2799 | hunk ./src/allmydata/interfaces.py 489 |
---|
2800 | - def put_plaintext_hashes(hashes=ListOf(Hash)): |
---|
2801 | + def check_testv(test_vector): |
---|
2802 | + """ |
---|
2803 | + XXX |
---|
2804 | + """ |
---|
2805 | + |
---|
2806 | + def writev(datav, new_length): |
---|
2807 | + """ |
---|
2808 | + XXX |
---|
2809 | + """ |
---|
2810 | + |
---|
2811 | + |
---|
2812 | +class IStorageBucketWriter(Interface): |
---|
2813 | + """ |
---|
2814 | + Objects of this kind live on the client side. |
---|
2815 | + """ |
---|
2816 | + def put_block(segmentnum, data): |
---|
2817 | """ |
---|
2818 | hunk ./src/allmydata/interfaces.py 506 |
---|
2819 | + @param segmentnum=int |
---|
2820 | + @param data=ShareData: For most segments, this data will be 'blocksize' |
---|
2821 | + bytes in length. The last segment might be shorter. |
---|
2822 | @return: a Deferred that fires (with None) when the operation completes |
---|
2823 | """ |
---|
2824 | |
---|
2825 | hunk ./src/allmydata/interfaces.py 512 |
---|
2826 | - def put_crypttext_hashes(hashes=ListOf(Hash)): |
---|
2827 | + def put_crypttext_hashes(hashes): |
---|
2828 | """ |
---|
2829 | hunk ./src/allmydata/interfaces.py 514 |
---|
2830 | + @param hashes=ListOf(Hash) |
---|
2831 | @return: a Deferred that fires (with None) when the operation completes |
---|
2832 | """ |
---|
2833 | |
---|
2834 | hunk ./src/allmydata/interfaces.py 518 |
---|
2835 | - def put_block_hashes(blockhashes=ListOf(Hash)): |
---|
2836 | + def put_block_hashes(blockhashes): |
---|
2837 | """ |
---|
2838 | hunk ./src/allmydata/interfaces.py 520 |
---|
2839 | + @param blockhashes=ListOf(Hash) |
---|
2840 | @return: a Deferred that fires (with None) when the operation completes |
---|
2841 | """ |
---|
2842 | |
---|
2843 | hunk ./src/allmydata/interfaces.py 524 |
---|
2844 | - def put_share_hashes(sharehashes=ListOf(TupleOf(int, Hash))): |
---|
2845 | + def put_share_hashes(sharehashes): |
---|
2846 | """ |
---|
2847 | hunk ./src/allmydata/interfaces.py 526 |
---|
2848 | + @param sharehashes=ListOf(TupleOf(int, Hash)) |
---|
2849 | @return: a Deferred that fires (with None) when the operation completes |
---|
2850 | """ |
---|
2851 | |
---|
2852 | hunk ./src/allmydata/interfaces.py 530 |
---|
2853 | - def put_uri_extension(data=URIExtensionData): |
---|
2854 | + def put_uri_extension(data): |
---|
2855 | """This block of data contains integrity-checking information (hashes |
---|
2856 | of plaintext, crypttext, and shares), as well as encoding parameters |
---|
2857 | that are necessary to recover the data. This is a serialized dict |
---|
2858 | hunk ./src/allmydata/interfaces.py 535 |
---|
2859 | mapping strings to other strings. The hash of this data is kept in |
---|
2860 | - the URI and verified before any of the data is used. All buckets for |
---|
2861 | - a given file contain identical copies of this data. |
---|
2862 | + the URI and verified before any of the data is used. All share |
---|
2863 | + containers for a given file contain identical copies of this data. |
---|
2864 | |
---|
2865 | The serialization format is specified with the following pseudocode: |
---|
2866 | for k in sorted(dict.keys()): |
---|
2867 | hunk ./src/allmydata/interfaces.py 543 |
---|
2868 | assert re.match(r'^[a-zA-Z_\-]+$', k) |
---|
2869 | write(k + ':' + netstring(dict[k])) |
---|
2870 | |
---|
2871 | + @param data=URIExtensionData |
---|
2872 | @return: a Deferred that fires (with None) when the operation completes |
---|
2873 | """ |
---|
2874 | |
---|
2875 | hunk ./src/allmydata/interfaces.py 558 |
---|
2876 | |
---|
2877 | class IStorageBucketReader(Interface): |
---|
2878 | |
---|
2879 | - def get_block_data(blocknum=int, blocksize=int, size=int): |
---|
2880 | + def get_block_data(blocknum, blocksize, size): |
---|
2881 | """Most blocks will be the same size. The last block might be shorter |
---|
2882 | than the others. |
---|
2883 | |
---|
2884 | hunk ./src/allmydata/interfaces.py 562 |
---|
2885 | + @param blocknum=int |
---|
2886 | + @param blocksize=int |
---|
2887 | + @param size=int |
---|
2888 | @return: ShareData |
---|
2889 | """ |
---|
2890 | |
---|
2891 | hunk ./src/allmydata/interfaces.py 573 |
---|
2892 | @return: ListOf(Hash) |
---|
2893 | """ |
---|
2894 | |
---|
2895 | - def get_block_hashes(at_least_these=SetOf(int)): |
---|
2896 | + def get_block_hashes(at_least_these=()): |
---|
2897 | """ |
---|
2898 | hunk ./src/allmydata/interfaces.py 575 |
---|
2899 | + @param at_least_these=SetOf(int) |
---|
2900 | @return: ListOf(Hash) |
---|
2901 | """ |
---|
2902 | |
---|
2903 | hunk ./src/allmydata/interfaces.py 579 |
---|
2904 | - def get_share_hashes(at_least_these=SetOf(int)): |
---|
2905 | + def get_share_hashes(): |
---|
2906 | """ |
---|
2907 | @return: ListOf(TupleOf(int, Hash)) |
---|
2908 | """ |
---|
2909 | hunk ./src/allmydata/interfaces.py 611 |
---|
2910 | @return: unicode nickname, or None |
---|
2911 | """ |
---|
2912 | |
---|
2913 | - # methods moved from IntroducerClient, need review |
---|
2914 | - def get_all_connections(): |
---|
2915 | - """Return a frozenset of (nodeid, service_name, rref) tuples, one for |
---|
2916 | - each active connection we've established to a remote service. This is |
---|
2917 | - mostly useful for unit tests that need to wait until a certain number |
---|
2918 | - of connections have been made.""" |
---|
2919 | - |
---|
2920 | - def get_all_connectors(): |
---|
2921 | - """Return a dict that maps from (nodeid, service_name) to a |
---|
2922 | - RemoteServiceConnector instance for all services that we are actively |
---|
2923 | - trying to connect to. Each RemoteServiceConnector has the following |
---|
2924 | - public attributes:: |
---|
2925 | - |
---|
2926 | - service_name: the type of service provided, like 'storage' |
---|
2927 | - announcement_time: when we first heard about this service |
---|
2928 | - last_connect_time: when we last established a connection |
---|
2929 | - last_loss_time: when we last lost a connection |
---|
2930 | - |
---|
2931 | - version: the peer's version, from the most recent connection |
---|
2932 | - oldest_supported: the peer's oldest supported version, same |
---|
2933 | - |
---|
2934 | - rref: the RemoteReference, if connected, otherwise None |
---|
2935 | - remote_host: the IAddress, if connected, otherwise None |
---|
2936 | - |
---|
2937 | - This method is intended for monitoring interfaces, such as a web page |
---|
2938 | - that describes connecting and connected peers. |
---|
2939 | - """ |
---|
2940 | - |
---|
2941 | - def get_all_peerids(): |
---|
2942 | - """Return a frozenset of all peerids to whom we have a connection (to |
---|
2943 | - one or more services) established. Mostly useful for unit tests.""" |
---|
2944 | - |
---|
2945 | - def get_all_connections_for(service_name): |
---|
2946 | - """Return a frozenset of (nodeid, service_name, rref) tuples, one |
---|
2947 | - for each active connection that provides the given SERVICE_NAME.""" |
---|
2948 | - |
---|
2949 | - def get_permuted_peers(service_name, key): |
---|
2950 | - """Returns an ordered list of (peerid, rref) tuples, selecting from |
---|
2951 | - the connections that provide SERVICE_NAME, using a hash-based |
---|
2952 | - permutation keyed by KEY. This randomizes the service list in a |
---|
2953 | - repeatable way, to distribute load over many peers. |
---|
2954 | - """ |
---|
2955 | - |
---|
2956 | |
---|
2957 | class IMutableSlotWriter(Interface): |
---|
2958 | """ |
---|
2959 | hunk ./src/allmydata/interfaces.py 616 |
---|
2960 | The interface for a writer around a mutable slot on a remote server. |
---|
2961 | """ |
---|
2962 | - def set_checkstring(checkstring, *args): |
---|
2963 | + def set_checkstring(seqnum_or_checkstring, root_hash=None, salt=None): |
---|
2964 | """ |
---|
2965 | Set the checkstring that I will pass to the remote server when |
---|
2966 | writing. |
---|
2967 | hunk ./src/allmydata/interfaces.py 640 |
---|
2968 | Add a block and salt to the share. |
---|
2969 | """ |
---|
2970 | |
---|
2971 | - def put_encprivey(encprivkey): |
---|
2972 | + def put_encprivkey(encprivkey): |
---|
2973 | """ |
---|
2974 | Add the encrypted private key to the share. |
---|
2975 | """ |
---|
2976 | hunk ./src/allmydata/interfaces.py 645 |
---|
2977 | |
---|
2978 | - def put_blockhashes(blockhashes=list): |
---|
2979 | + def put_blockhashes(blockhashes): |
---|
2980 | """ |
---|
2981 | hunk ./src/allmydata/interfaces.py 647 |
---|
2982 | + @param blockhashes=list |
---|
2983 | Add the block hash tree to the share. |
---|
2984 | """ |
---|
2985 | |
---|
2986 | hunk ./src/allmydata/interfaces.py 651 |
---|
2987 | - def put_sharehashes(sharehashes=dict): |
---|
2988 | + def put_sharehashes(sharehashes): |
---|
2989 | """ |
---|
2990 | hunk ./src/allmydata/interfaces.py 653 |
---|
2991 | + @param sharehashes=dict |
---|
2992 | Add the share hash chain to the share. |
---|
2993 | """ |
---|
2994 | |
---|
2995 | hunk ./src/allmydata/interfaces.py 739 |
---|
2996 | def get_extension_params(): |
---|
2997 | """Return the extension parameters in the URI""" |
---|
2998 | |
---|
2999 | - def set_extension_params(): |
---|
3000 | + def set_extension_params(params): |
---|
3001 | """Set the extension parameters that should be in the URI""" |
---|
3002 | |
---|
3003 | class IDirectoryURI(Interface): |
---|
3004 | hunk ./src/allmydata/interfaces.py 879 |
---|
3005 | writer-visible data using this writekey. |
---|
3006 | """ |
---|
3007 | |
---|
3008 | - # TODO: Can this be overwrite instead of replace? |
---|
3009 | - def replace(new_contents): |
---|
3010 | - """Replace the contents of the mutable file, provided that no other |
---|
3011 | + def overwrite(new_contents): |
---|
3012 | + """Overwrite the contents of the mutable file, provided that no other |
---|
3013 | node has published (or is attempting to publish, concurrently) a |
---|
3014 | newer version of the file than this one. |
---|
3015 | |
---|
3016 | hunk ./src/allmydata/interfaces.py 1346 |
---|
3017 | is empty, the metadata will be an empty dictionary. |
---|
3018 | """ |
---|
3019 | |
---|
3020 | - def set_uri(name, writecap, readcap=None, metadata=None, overwrite=True): |
---|
3021 | + def set_uri(name, writecap, readcap, metadata=None, overwrite=True): |
---|
3022 | """I add a child (by writecap+readcap) at the specific name. I return |
---|
3023 | a Deferred that fires when the operation finishes. If overwrite= is |
---|
3024 | True, I will replace any existing child of the same name, otherwise |
---|
3025 | hunk ./src/allmydata/interfaces.py 1745 |
---|
3026 | Block Hash, and the encoding parameters, both of which must be included |
---|
3027 | in the URI. |
---|
3028 | |
---|
3029 | - I do not choose shareholders, that is left to the IUploader. I must be |
---|
3030 | - given a dict of RemoteReferences to storage buckets that are ready and |
---|
3031 | - willing to receive data. |
---|
3032 | + I do not choose shareholders, that is left to the IUploader. |
---|
3033 | """ |
---|
3034 | |
---|
3035 | def set_size(size): |
---|
3036 | hunk ./src/allmydata/interfaces.py 1752 |
---|
3037 | """Specify the number of bytes that will be encoded. This must be |
---|
3038 | peformed before get_serialized_params() can be called. |
---|
3039 | """ |
---|
3040 | + |
---|
3041 | def set_params(params): |
---|
3042 | """Override the default encoding parameters. 'params' is a tuple of |
---|
3043 | (k,d,n), where 'k' is the number of required shares, 'd' is the |
---|
3044 | hunk ./src/allmydata/interfaces.py 1848 |
---|
3045 | download, validate, decode, and decrypt data from them, writing the |
---|
3046 | results to an output file. |
---|
3047 | |
---|
3048 | - I do not locate the shareholders, that is left to the IDownloader. I must |
---|
3049 | - be given a dict of RemoteReferences to storage buckets that are ready to |
---|
3050 | - send data. |
---|
3051 | + I do not locate the shareholders, that is left to the IDownloader. |
---|
3052 | """ |
---|
3053 | |
---|
3054 | def setup(outfile): |
---|
3055 | hunk ./src/allmydata/interfaces.py 1950 |
---|
3056 | resuming an interrupted upload (where we need to compute the |
---|
3057 | plaintext hashes, but don't need the redundant encrypted data).""" |
---|
3058 | |
---|
3059 | - def get_plaintext_hashtree_leaves(first, last, num_segments): |
---|
3060 | - """OBSOLETE; Get the leaf nodes of a merkle hash tree over the |
---|
3061 | - plaintext segments, i.e. get the tagged hashes of the given segments. |
---|
3062 | - The segment size is expected to be generated by the |
---|
3063 | - IEncryptedUploadable before any plaintext is read or ciphertext |
---|
3064 | - produced, so that the segment hashes can be generated with only a |
---|
3065 | - single pass. |
---|
3066 | - |
---|
3067 | - This returns a Deferred that fires with a sequence of hashes, using: |
---|
3068 | - |
---|
3069 | - tuple(segment_hashes[first:last]) |
---|
3070 | - |
---|
3071 | - 'num_segments' is used to assert that the number of segments that the |
---|
3072 | - IEncryptedUploadable handled matches the number of segments that the |
---|
3073 | - encoder was expecting. |
---|
3074 | - |
---|
3075 | - This method must not be called until the final byte has been read |
---|
3076 | - from read_encrypted(). Once this method is called, read_encrypted() |
---|
3077 | - can never be called again. |
---|
3078 | - """ |
---|
3079 | - |
---|
3080 | - def get_plaintext_hash(): |
---|
3081 | - """OBSOLETE; Get the hash of the whole plaintext. |
---|
3082 | - |
---|
3083 | - This returns a Deferred that fires with a tagged SHA-256 hash of the |
---|
3084 | - whole plaintext, obtained from hashutil.plaintext_hash(data). |
---|
3085 | - """ |
---|
3086 | - |
---|
3087 | def close(): |
---|
3088 | """Just like IUploadable.close().""" |
---|
3089 | |
---|
3090 | hunk ./src/allmydata/interfaces.py 2144 |
---|
3091 | returns a Deferred that fires with an IUploadResults instance, from |
---|
3092 | which the URI of the file can be obtained as results.uri .""" |
---|
3093 | |
---|
3094 | - def upload_ssk(write_capability, new_version, uploadable): |
---|
3095 | - """TODO: how should this work?""" |
---|
3096 | - |
---|
3097 | class ICheckable(Interface): |
---|
3098 | def check(monitor, verify=False, add_lease=False): |
---|
3099 | """Check up on my health, optionally repairing any problems. |
---|
3100 | hunk ./src/allmydata/interfaces.py 2505 |
---|
3101 | |
---|
3102 | class IRepairResults(Interface): |
---|
3103 | """I contain the results of a repair operation.""" |
---|
3104 | - def get_successful(self): |
---|
3105 | + def get_successful(): |
---|
3106 | """Returns a boolean: True if the repair made the file healthy, False |
---|
3107 | if not. Repair failure generally indicates a file that has been |
---|
3108 | damaged beyond repair.""" |
---|
3109 | hunk ./src/allmydata/interfaces.py 2577 |
---|
3110 | Tahoe process will typically have a single NodeMaker, but unit tests may |
---|
3111 | create simplified/mocked forms for testing purposes. |
---|
3112 | """ |
---|
3113 | - def create_from_cap(writecap, readcap=None, **kwargs): |
---|
3114 | + def create_from_cap(writecap, readcap=None, deep_immutable=False, name=u"<unknown name>"): |
---|
3115 | """I create an IFilesystemNode from the given writecap/readcap. I can |
---|
3116 | only provide nodes for existing file/directory objects: use my other |
---|
3117 | methods to create new objects. I return synchronously.""" |
---|
3118 | hunk ./src/allmydata/monitor.py 30 |
---|
3119 | |
---|
3120 | # the following methods are provided for the operation code |
---|
3121 | |
---|
3122 | - def is_cancelled(self): |
---|
3123 | + def is_cancelled(): |
---|
3124 | """Returns True if the operation has been cancelled. If True, |
---|
3125 | operation code should stop creating new work, and attempt to stop any |
---|
3126 | work already in progress.""" |
---|
3127 | hunk ./src/allmydata/monitor.py 35 |
---|
3128 | |
---|
3129 | - def raise_if_cancelled(self): |
---|
3130 | + def raise_if_cancelled(): |
---|
3131 | """Raise OperationCancelledError if the operation has been cancelled. |
---|
3132 | Operation code that has a robust error-handling path can simply call |
---|
3133 | this periodically.""" |
---|
3134 | hunk ./src/allmydata/monitor.py 40 |
---|
3135 | |
---|
3136 | - def set_status(self, status): |
---|
3137 | + def set_status(status): |
---|
3138 | """Sets the Monitor's 'status' object to an arbitrary value. |
---|
3139 | Different operations will store different sorts of status information |
---|
3140 | here. Operation code should use get+modify+set sequences to update |
---|
3141 | hunk ./src/allmydata/monitor.py 46 |
---|
3142 | this.""" |
---|
3143 | |
---|
3144 | - def get_status(self): |
---|
3145 | + def get_status(): |
---|
3146 | """Return the status object. If the operation failed, this will be a |
---|
3147 | Failure instance.""" |
---|
3148 | |
---|
3149 | hunk ./src/allmydata/monitor.py 50 |
---|
3150 | - def finish(self, status): |
---|
3151 | + def finish(status): |
---|
3152 | """Call this when the operation is done, successful or not. The |
---|
3153 | Monitor's lifetime is influenced by the completion of the operation |
---|
3154 | it is monitoring. The Monitor's 'status' value will be set with the |
---|
3155 | hunk ./src/allmydata/monitor.py 63 |
---|
3156 | |
---|
3157 | # the following methods are provided for the initiator of the operation |
---|
3158 | |
---|
3159 | - def is_finished(self): |
---|
3160 | + def is_finished(): |
---|
3161 | """Return a boolean, True if the operation is done (whether |
---|
3162 | successful or failed), False if it is still running.""" |
---|
3163 | |
---|
3164 | hunk ./src/allmydata/monitor.py 67 |
---|
3165 | - def when_done(self): |
---|
3166 | + def when_done(): |
---|
3167 | """Return a Deferred that fires when the operation is complete. It |
---|
3168 | will fire with the operation status, the same value as returned by |
---|
3169 | get_status().""" |
---|
3170 | hunk ./src/allmydata/monitor.py 72 |
---|
3171 | |
---|
3172 | - def cancel(self): |
---|
3173 | + def cancel(): |
---|
3174 | """Cancel the operation as soon as possible. is_cancelled() will |
---|
3175 | start returning True after this is called.""" |
---|
3176 | |
---|
3177 | hunk ./src/allmydata/mutable/filenode.py 753 |
---|
3178 | self._writekey = writekey |
---|
3179 | self._serializer = defer.succeed(None) |
---|
3180 | |
---|
3181 | - |
---|
3182 | def get_sequence_number(self): |
---|
3183 | """ |
---|
3184 | Get the sequence number of the mutable version that I represent. |
---|
3185 | hunk ./src/allmydata/mutable/filenode.py 759 |
---|
3186 | """ |
---|
3187 | return self._version[0] # verinfo[0] == the sequence number |
---|
3188 | |
---|
3189 | + def get_servermap(self): |
---|
3190 | + return self._servermap |
---|
3191 | |
---|
3192 | hunk ./src/allmydata/mutable/filenode.py 762 |
---|
3193 | - # TODO: Terminology? |
---|
3194 | def get_writekey(self): |
---|
3195 | """ |
---|
3196 | I return a writekey or None if I don't have a writekey. |
---|
3197 | hunk ./src/allmydata/mutable/filenode.py 768 |
---|
3198 | """ |
---|
3199 | return self._writekey |
---|
3200 | |
---|
3201 | - |
---|
3202 | def set_downloader_hints(self, hints): |
---|
3203 | """ |
---|
3204 | I set the downloader hints. |
---|
3205 | hunk ./src/allmydata/mutable/filenode.py 776 |
---|
3206 | |
---|
3207 | self._downloader_hints = hints |
---|
3208 | |
---|
3209 | - |
---|
3210 | def get_downloader_hints(self): |
---|
3211 | """ |
---|
3212 | I return the downloader hints. |
---|
3213 | hunk ./src/allmydata/mutable/filenode.py 782 |
---|
3214 | """ |
---|
3215 | return self._downloader_hints |
---|
3216 | |
---|
3217 | - |
---|
3218 | def overwrite(self, new_contents): |
---|
3219 | """ |
---|
3220 | I overwrite the contents of this mutable file version with the |
---|
3221 | hunk ./src/allmydata/mutable/filenode.py 791 |
---|
3222 | |
---|
3223 | return self._do_serialized(self._overwrite, new_contents) |
---|
3224 | |
---|
3225 | - |
---|
3226 | def _overwrite(self, new_contents): |
---|
3227 | assert IMutableUploadable.providedBy(new_contents) |
---|
3228 | assert self._servermap.last_update_mode == MODE_WRITE |
---|
3229 | hunk ./src/allmydata/mutable/filenode.py 797 |
---|
3230 | |
---|
3231 | return self._upload(new_contents) |
---|
3232 | |
---|
3233 | - |
---|
3234 | def modify(self, modifier, backoffer=None): |
---|
3235 | """I use a modifier callback to apply a change to the mutable file. |
---|
3236 | I implement the following pseudocode:: |
---|
3237 | hunk ./src/allmydata/mutable/filenode.py 841 |
---|
3238 | |
---|
3239 | return self._do_serialized(self._modify, modifier, backoffer) |
---|
3240 | |
---|
3241 | - |
---|
3242 | def _modify(self, modifier, backoffer): |
---|
3243 | if backoffer is None: |
---|
3244 | backoffer = BackoffAgent().delay |
---|
3245 | hunk ./src/allmydata/mutable/filenode.py 846 |
---|
3246 | return self._modify_and_retry(modifier, backoffer, True) |
---|
3247 | |
---|
3248 | - |
---|
3249 | def _modify_and_retry(self, modifier, backoffer, first_time): |
---|
3250 | """ |
---|
3251 | I try to apply modifier to the contents of this version of the |
---|
3252 | hunk ./src/allmydata/mutable/filenode.py 878 |
---|
3253 | d.addErrback(_retry) |
---|
3254 | return d |
---|
3255 | |
---|
3256 | - |
---|
3257 | def _modify_once(self, modifier, first_time): |
---|
3258 | """ |
---|
3259 | I attempt to apply a modifier to the contents of the mutable |
---|
3260 | hunk ./src/allmydata/mutable/filenode.py 913 |
---|
3261 | d.addCallback(_apply) |
---|
3262 | return d |
---|
3263 | |
---|
3264 | - |
---|
3265 | def is_readonly(self): |
---|
3266 | """ |
---|
3267 | I return True if this MutableFileVersion provides no write |
---|
3268 | hunk ./src/allmydata/mutable/filenode.py 921 |
---|
3269 | """ |
---|
3270 | return self._writekey is None |
---|
3271 | |
---|
3272 | - |
---|
3273 | def is_mutable(self): |
---|
3274 | """ |
---|
3275 | I return True, since mutable files are always mutable by |
---|
3276 | hunk ./src/allmydata/mutable/filenode.py 928 |
---|
3277 | """ |
---|
3278 | return True |
---|
3279 | |
---|
3280 | - |
---|
3281 | def get_storage_index(self): |
---|
3282 | """ |
---|
3283 | I return the storage index of the reference that I encapsulate. |
---|
3284 | hunk ./src/allmydata/mutable/filenode.py 934 |
---|
3285 | """ |
---|
3286 | return self._storage_index |
---|
3287 | |
---|
3288 | - |
---|
3289 | def get_size(self): |
---|
3290 | """ |
---|
3291 | I return the length, in bytes, of this readable object. |
---|
3292 | hunk ./src/allmydata/mutable/filenode.py 940 |
---|
3293 | """ |
---|
3294 | return self._servermap.size_of_version(self._version) |
---|
3295 | |
---|
3296 | - |
---|
3297 | def download_to_data(self, fetch_privkey=False): |
---|
3298 | """ |
---|
3299 | I return a Deferred that fires with the contents of this |
---|
3300 | hunk ./src/allmydata/mutable/filenode.py 951 |
---|
3301 | d.addCallback(lambda mc: "".join(mc.chunks)) |
---|
3302 | return d |
---|
3303 | |
---|
3304 | - |
---|
3305 | def _try_to_download_data(self): |
---|
3306 | """ |
---|
3307 | I am an unserialized cousin of download_to_data; I am called |
---|
3308 | hunk ./src/allmydata/mutable/filenode.py 963 |
---|
3309 | d.addCallback(lambda mc: "".join(mc.chunks)) |
---|
3310 | return d |
---|
3311 | |
---|
3312 | - |
---|
3313 | def read(self, consumer, offset=0, size=None, fetch_privkey=False): |
---|
3314 | """ |
---|
3315 | I read a portion (possibly all) of the mutable file that I |
---|
3316 | hunk ./src/allmydata/mutable/filenode.py 971 |
---|
3317 | return self._do_serialized(self._read, consumer, offset, size, |
---|
3318 | fetch_privkey) |
---|
3319 | |
---|
3320 | - |
---|
3321 | def _read(self, consumer, offset=0, size=None, fetch_privkey=False): |
---|
3322 | """ |
---|
3323 | I am the serialized companion of read. |
---|
3324 | hunk ./src/allmydata/mutable/filenode.py 981 |
---|
3325 | d = r.download(consumer, offset, size) |
---|
3326 | return d |
---|
3327 | |
---|
3328 | - |
---|
3329 | def _do_serialized(self, cb, *args, **kwargs): |
---|
3330 | # note: to avoid deadlock, this callable is *not* allowed to invoke |
---|
3331 | # other serialized methods within this (or any other) |
---|
3332 | hunk ./src/allmydata/mutable/filenode.py 999 |
---|
3333 | self._serializer.addErrback(log.err) |
---|
3334 | return d |
---|
3335 | |
---|
3336 | - |
---|
3337 | def _upload(self, new_contents): |
---|
3338 | #assert self._pubkey, "update_servermap must be called before publish" |
---|
3339 | p = Publish(self._node, self._storage_broker, self._servermap) |
---|
3340 | hunk ./src/allmydata/mutable/filenode.py 1009 |
---|
3341 | d.addCallback(self._did_upload, new_contents.get_size()) |
---|
3342 | return d |
---|
3343 | |
---|
3344 | - |
---|
3345 | def _did_upload(self, res, size): |
---|
3346 | self._most_recent_size = size |
---|
3347 | return res |
---|
3348 | hunk ./src/allmydata/mutable/filenode.py 1029 |
---|
3349 | """ |
---|
3350 | return self._do_serialized(self._update, data, offset) |
---|
3351 | |
---|
3352 | - |
---|
3353 | def _update(self, data, offset): |
---|
3354 | """ |
---|
3355 | I update the mutable file version represented by this particular |
---|
3356 | hunk ./src/allmydata/mutable/filenode.py 1058 |
---|
3357 | d.addCallback(self._build_uploadable_and_finish, data, offset) |
---|
3358 | return d |
---|
3359 | |
---|
3360 | - |
---|
3361 | def _do_modify_update(self, data, offset): |
---|
3362 | """ |
---|
3363 | I perform a file update by modifying the contents of the file |
---|
3364 | hunk ./src/allmydata/mutable/filenode.py 1073 |
---|
3365 | return new |
---|
3366 | return self._modify(m, None) |
---|
3367 | |
---|
3368 | - |
---|
3369 | def _do_update_update(self, data, offset): |
---|
3370 | """ |
---|
3371 | I start the Servermap update that gets us the data we need to |
---|
3372 | hunk ./src/allmydata/mutable/filenode.py 1108 |
---|
3373 | return self._update_servermap(update_range=(start_segment, |
---|
3374 | end_segment)) |
---|
3375 | |
---|
3376 | - |
---|
3377 | def _decode_and_decrypt_segments(self, ignored, data, offset): |
---|
3378 | """ |
---|
3379 | After the servermap update, I take the encrypted and encoded |
---|
3380 | hunk ./src/allmydata/mutable/filenode.py 1148 |
---|
3381 | d3 = defer.succeed(blockhashes) |
---|
3382 | return deferredutil.gatherResults([d1, d2, d3]) |
---|
3383 | |
---|
3384 | - |
---|
3385 | def _build_uploadable_and_finish(self, segments_and_bht, data, offset): |
---|
3386 | """ |
---|
3387 | After the process has the plaintext segments, I build the |
---|
3388 | hunk ./src/allmydata/mutable/filenode.py 1163 |
---|
3389 | p = Publish(self._node, self._storage_broker, self._servermap) |
---|
3390 | return p.update(u, offset, segments_and_bht[2], self._version) |
---|
3391 | |
---|
3392 | - |
---|
3393 | def _update_servermap(self, mode=MODE_WRITE, update_range=None): |
---|
3394 | """ |
---|
3395 | I update the servermap. I return a Deferred that fires when the |
---|
3396 | hunk ./src/allmydata/storage/common.py 1 |
---|
3397 | - |
---|
3398 | -import os.path |
---|
3399 | from allmydata.util import base32 |
---|
3400 | |
---|
3401 | class DataTooLargeError(Exception): |
---|
3402 | hunk ./src/allmydata/storage/common.py 5 |
---|
3403 | pass |
---|
3404 | + |
---|
3405 | class UnknownMutableContainerVersionError(Exception): |
---|
3406 | pass |
---|
3407 | hunk ./src/allmydata/storage/common.py 8 |
---|
3408 | + |
---|
3409 | class UnknownImmutableContainerVersionError(Exception): |
---|
3410 | pass |
---|
3411 | |
---|
3412 | hunk ./src/allmydata/storage/common.py 18 |
---|
3413 | |
---|
3414 | def si_a2b(ascii_storageindex): |
---|
3415 | return base32.a2b(ascii_storageindex) |
---|
3416 | - |
---|
3417 | -def storage_index_to_dir(storageindex): |
---|
3418 | - sia = si_b2a(storageindex) |
---|
3419 | - return os.path.join(sia[:2], sia) |
---|
3420 | hunk ./src/allmydata/storage/crawler.py 2 |
---|
3421 | |
---|
3422 | -import os, time, struct |
---|
3423 | +import time, struct |
---|
3424 | import cPickle as pickle |
---|
3425 | from twisted.internet import reactor |
---|
3426 | from twisted.application import service |
---|
3427 | hunk ./src/allmydata/storage/crawler.py 6 |
---|
3428 | + |
---|
3429 | +from allmydata.util.assertutil import precondition |
---|
3430 | +from allmydata.interfaces import IStorageBackend |
---|
3431 | from allmydata.storage.common import si_b2a |
---|
3432 | hunk ./src/allmydata/storage/crawler.py 10 |
---|
3433 | -from allmydata.util import fileutil |
---|
3434 | + |
---|
3435 | |
---|
3436 | class TimeSliceExceeded(Exception): |
---|
3437 | pass |
---|
3438 | hunk ./src/allmydata/storage/crawler.py 15 |
---|
3439 | |
---|
3440 | + |
---|
3441 | class ShareCrawler(service.MultiService): |
---|
3442 | hunk ./src/allmydata/storage/crawler.py 17 |
---|
3443 | - """A ShareCrawler subclass is attached to a StorageServer, and |
---|
3444 | - periodically walks all of its shares, processing each one in some |
---|
3445 | - fashion. This crawl is rate-limited, to reduce the IO burden on the host, |
---|
3446 | - since large servers can easily have a terabyte of shares, in several |
---|
3447 | - million files, which can take hours or days to read. |
---|
3448 | + """ |
---|
3449 | + An instance of a subclass of ShareCrawler is attached to a storage |
---|
3450 | + backend, and periodically walks the backend's shares, processing them |
---|
3451 | + in some fashion. This crawl is rate-limited to reduce the I/O burden on |
---|
3452 | + the host, since large servers can easily have a terabyte of shares in |
---|
3453 | + several million files, which can take hours or days to read. |
---|
3454 | |
---|
3455 | Once the crawler starts a cycle, it will proceed at a rate limited by the |
---|
3456 | allowed_cpu_percentage= and cpu_slice= parameters: yielding the reactor |
---|
3457 | hunk ./src/allmydata/storage/crawler.py 33 |
---|
3458 | long enough to ensure that 'minimum_cycle_time' elapses between the start |
---|
3459 | of two consecutive cycles. |
---|
3460 | |
---|
3461 | - We assume that the normal upload/download/get_buckets traffic of a tahoe |
---|
3462 | + We assume that the normal upload/download/DYHB traffic of a Tahoe-LAFS |
---|
3463 | grid will cause the prefixdir contents to be mostly cached in the kernel, |
---|
3464 | hunk ./src/allmydata/storage/crawler.py 35 |
---|
3465 | - or that the number of buckets in each prefixdir will be small enough to |
---|
3466 | - load quickly. A 1TB allmydata.com server was measured to have 2.56M |
---|
3467 | - buckets, spread into the 1024 prefixdirs, with about 2500 buckets per |
---|
3468 | + or that the number of sharesets in each prefixdir will be small enough to |
---|
3469 | + load quickly. A 1TB allmydata.com server was measured to have 2.56 million |
---|
3470 | + sharesets, spread into the 1024 prefixdirs, with about 2500 sharesets per |
---|
3471 | prefix. On this server, each prefixdir took 130ms-200ms to list the first |
---|
3472 | time, and 17ms to list the second time. |
---|
3473 | |
---|
3474 | hunk ./src/allmydata/storage/crawler.py 41 |
---|
3475 | - To use a crawler, create a subclass which implements the process_bucket() |
---|
3476 | - method. It will be called with a prefixdir and a base32 storage index |
---|
3477 | - string. process_bucket() must run synchronously. Any keys added to |
---|
3478 | - self.state will be preserved. Override add_initial_state() to set up |
---|
3479 | - initial state keys. Override finished_cycle() to perform additional |
---|
3480 | - processing when the cycle is complete. Any status that the crawler |
---|
3481 | - produces should be put in the self.state dictionary. Status renderers |
---|
3482 | - (like a web page which describes the accomplishments of your crawler) |
---|
3483 | - will use crawler.get_state() to retrieve this dictionary; they can |
---|
3484 | - present the contents as they see fit. |
---|
3485 | + To implement a crawler, create a subclass that implements the |
---|
3486 | + process_shareset() method. It will be called with a prefixdir and an |
---|
3487 | + object providing the IShareSet interface. process_shareset() must run |
---|
3488 | + synchronously. Any keys added to self.state will be preserved. Override |
---|
3489 | + add_initial_state() to set up initial state keys. Override |
---|
3490 | + finished_cycle() to perform additional processing when the cycle is |
---|
3491 | + complete. Any status that the crawler produces should be put in the |
---|
3492 | + self.state dictionary. Status renderers (like a web page describing the |
---|
3493 | + accomplishments of your crawler) will use crawler.get_state() to retrieve |
---|
3494 | + this dictionary; they can present the contents as they see fit. |
---|
3495 | |
---|
3496 | hunk ./src/allmydata/storage/crawler.py 52 |
---|
3497 | - Then create an instance, with a reference to a StorageServer and a |
---|
3498 | - filename where it can store persistent state. The statefile is used to |
---|
3499 | - keep track of how far around the ring the process has travelled, as well |
---|
3500 | - as timing history to allow the pace to be predicted and controlled. The |
---|
3501 | - statefile will be updated and written to disk after each time slice (just |
---|
3502 | - before the crawler yields to the reactor), and also after each cycle is |
---|
3503 | - finished, and also when stopService() is called. Note that this means |
---|
3504 | - that a crawler which is interrupted with SIGKILL while it is in the |
---|
3505 | - middle of a time slice will lose progress: the next time the node is |
---|
3506 | - started, the crawler will repeat some unknown amount of work. |
---|
3507 | + Then create an instance, with a reference to a backend object providing |
---|
3508 | + the IStorageBackend interface, and a filename where it can store |
---|
3509 | + persistent state. The statefile is used to keep track of how far around |
---|
3510 | + the ring the process has travelled, as well as timing history to allow |
---|
3511 | + the pace to be predicted and controlled. The statefile will be updated |
---|
3512 | + and written to disk after each time slice (just before the crawler yields |
---|
3513 | + to the reactor), and also after each cycle is finished, and also when |
---|
3514 | + stopService() is called. Note that this means that a crawler that is |
---|
3515 | + interrupted with SIGKILL while it is in the middle of a time slice will |
---|
3516 | + lose progress: the next time the node is started, the crawler will repeat |
---|
3517 | + some unknown amount of work. |
---|
3518 | |
---|
3519 | The crawler instance must be started with startService() before it will |
---|
3520 | hunk ./src/allmydata/storage/crawler.py 65 |
---|
3521 | - do any work. To make it stop doing work, call stopService(). |
---|
3522 | + do any work. To make it stop doing work, call stopService(). A crawler |
---|
3523 | + is usually a child service of a StorageServer, although it should not |
---|
3524 | + depend on that. |
---|
3525 | + |
---|
3526 | + For historical reasons, some dictionary key names use the term "bucket" |
---|
3527 | + for what is now preferably called a "shareset" (the set of shares that a |
---|
3528 | + server holds under a given storage index). |
---|
3529 | """ |
---|
3530 | |
---|
3531 | slow_start = 300 # don't start crawling for 5 minutes after startup |
---|
3532 | hunk ./src/allmydata/storage/crawler.py 80 |
---|
3533 | cpu_slice = 1.0 # use up to 1.0 seconds before yielding |
---|
3534 | minimum_cycle_time = 300 # don't run a cycle faster than this |
---|
3535 | |
---|
3536 | - def __init__(self, server, statefile, allowed_cpu_percentage=None): |
---|
3537 | + def __init__(self, backend, statefp, allowed_cpu_percentage=None): |
---|
3538 | + precondition(IStorageBackend.providedBy(backend), backend) |
---|
3539 | service.MultiService.__init__(self) |
---|
3540 | hunk ./src/allmydata/storage/crawler.py 83 |
---|
3541 | + self.backend = backend |
---|
3542 | + self.statefp = statefp |
---|
3543 | if allowed_cpu_percentage is not None: |
---|
3544 | self.allowed_cpu_percentage = allowed_cpu_percentage |
---|
3545 | hunk ./src/allmydata/storage/crawler.py 87 |
---|
3546 | - self.server = server |
---|
3547 | - self.sharedir = server.sharedir |
---|
3548 | - self.statefile = statefile |
---|
3549 | self.prefixes = [si_b2a(struct.pack(">H", i << (16-10)))[:2] |
---|
3550 | for i in range(2**10)] |
---|
3551 | self.prefixes.sort() |
---|
3552 | hunk ./src/allmydata/storage/crawler.py 91 |
---|
3553 | self.timer = None |
---|
3554 | - self.bucket_cache = (None, []) |
---|
3555 | + self.shareset_cache = (None, []) |
---|
3556 | self.current_sleep_time = None |
---|
3557 | self.next_wake_time = None |
---|
3558 | self.last_prefix_finished_time = None |
---|
3559 | hunk ./src/allmydata/storage/crawler.py 154 |
---|
3560 | left = len(self.prefixes) - self.last_complete_prefix_index |
---|
3561 | remaining = left * self.last_prefix_elapsed_time |
---|
3562 | # TODO: remainder of this prefix: we need to estimate the |
---|
3563 | - # per-bucket time, probably by measuring the time spent on |
---|
3564 | - # this prefix so far, divided by the number of buckets we've |
---|
3565 | + # per-shareset time, probably by measuring the time spent on |
---|
3566 | + # this prefix so far, divided by the number of sharesets we've |
---|
3567 | # processed. |
---|
3568 | d["estimated-cycle-complete-time-left"] = remaining |
---|
3569 | # it's possible to call get_progress() from inside a crawler's |
---|
3570 | hunk ./src/allmydata/storage/crawler.py 175 |
---|
3571 | state dictionary. |
---|
3572 | |
---|
3573 | If we are not currently sleeping (i.e. get_state() was called from |
---|
3574 | - inside the process_prefixdir, process_bucket, or finished_cycle() |
---|
3575 | + inside the process_prefixdir, process_shareset, or finished_cycle() |
---|
3576 | methods, or if startService has not yet been called on this crawler), |
---|
3577 | these two keys will be None. |
---|
3578 | |
---|
3579 | hunk ./src/allmydata/storage/crawler.py 188 |
---|
3580 | def load_state(self): |
---|
3581 | # we use this to store state for both the crawler's internals and |
---|
3582 | # anything the subclass-specific code needs. The state is stored |
---|
3583 | - # after each bucket is processed, after each prefixdir is processed, |
---|
3584 | + # after each shareset is processed, after each prefixdir is processed, |
---|
3585 | # and after a cycle is complete. The internal keys we use are: |
---|
3586 | # ["version"]: int, always 1 |
---|
3587 | # ["last-cycle-finished"]: int, or None if we have not yet finished |
---|
3588 | hunk ./src/allmydata/storage/crawler.py 202 |
---|
3589 | # are sleeping between cycles, or if we |
---|
3590 | # have not yet finished any prefixdir since |
---|
3591 | # a cycle was started |
---|
3592 | - # ["last-complete-bucket"]: str, base32 storage index bucket name |
---|
3593 | - # of the last bucket to be processed, or |
---|
3594 | - # None if we are sleeping between cycles |
---|
3595 | + # ["last-complete-bucket"]: str, base32 storage index of the last |
---|
3596 | + # shareset to be processed, or None if we |
---|
3597 | + # are sleeping between cycles |
---|
3598 | try: |
---|
3599 | hunk ./src/allmydata/storage/crawler.py 206 |
---|
3600 | - f = open(self.statefile, "rb") |
---|
3601 | - state = pickle.load(f) |
---|
3602 | - f.close() |
---|
3603 | + state = pickle.loads(self.statefp.getContent()) |
---|
3604 | except EnvironmentError: |
---|
3605 | state = {"version": 1, |
---|
3606 | "last-cycle-finished": None, |
---|
3607 | hunk ./src/allmydata/storage/crawler.py 242 |
---|
3608 | else: |
---|
3609 | last_complete_prefix = self.prefixes[lcpi] |
---|
3610 | self.state["last-complete-prefix"] = last_complete_prefix |
---|
3611 | - tmpfile = self.statefile + ".tmp" |
---|
3612 | - f = open(tmpfile, "wb") |
---|
3613 | - pickle.dump(self.state, f) |
---|
3614 | - f.close() |
---|
3615 | - fileutil.move_into_place(tmpfile, self.statefile) |
---|
3616 | + self.statefp.setContent(pickle.dumps(self.state)) |
---|
3617 | |
---|
3618 | def startService(self): |
---|
3619 | # arrange things to look like we were just sleeping, so |
---|
3620 | hunk ./src/allmydata/storage/crawler.py 284 |
---|
3621 | sleep_time = (this_slice / self.allowed_cpu_percentage) - this_slice |
---|
3622 | # if the math gets weird, or a timequake happens, don't sleep |
---|
3623 | # forever. Note that this means that, while a cycle is running, we |
---|
3624 | - # will process at least one bucket every 5 minutes, no matter how |
---|
3625 | - # long that bucket takes. |
---|
3626 | + # will process at least one shareset every 5 minutes, no matter how |
---|
3627 | + # long that shareset takes. |
---|
3628 | sleep_time = max(0.0, min(sleep_time, 299)) |
---|
3629 | if finished_cycle: |
---|
3630 | # how long should we sleep between cycles? Don't run faster than |
---|
3631 | hunk ./src/allmydata/storage/crawler.py 315 |
---|
3632 | for i in range(self.last_complete_prefix_index+1, len(self.prefixes)): |
---|
3633 | # if we want to yield earlier, just raise TimeSliceExceeded() |
---|
3634 | prefix = self.prefixes[i] |
---|
3635 | - prefixdir = os.path.join(self.sharedir, prefix) |
---|
3636 | - if i == self.bucket_cache[0]: |
---|
3637 | - buckets = self.bucket_cache[1] |
---|
3638 | + if i == self.shareset_cache[0]: |
---|
3639 | + sharesets = self.shareset_cache[1] |
---|
3640 | else: |
---|
3641 | hunk ./src/allmydata/storage/crawler.py 318 |
---|
3642 | - try: |
---|
3643 | - buckets = os.listdir(prefixdir) |
---|
3644 | - buckets.sort() |
---|
3645 | - except EnvironmentError: |
---|
3646 | - buckets = [] |
---|
3647 | - self.bucket_cache = (i, buckets) |
---|
3648 | - self.process_prefixdir(cycle, prefix, prefixdir, |
---|
3649 | - buckets, start_slice) |
---|
3650 | + sharesets = self.backend.get_sharesets_for_prefix(prefix) |
---|
3651 | + self.shareset_cache = (i, sharesets) |
---|
3652 | + self.process_prefixdir(cycle, prefix, sharesets, start_slice) |
---|
3653 | self.last_complete_prefix_index = i |
---|
3654 | |
---|
3655 | now = time.time() |
---|
3656 | hunk ./src/allmydata/storage/crawler.py 345 |
---|
3657 | self.finished_cycle(cycle) |
---|
3658 | self.save_state() |
---|
3659 | |
---|
3660 | - def process_prefixdir(self, cycle, prefix, prefixdir, buckets, start_slice): |
---|
3661 | - """This gets a list of bucket names (i.e. storage index strings, |
---|
3662 | + def process_prefixdir(self, cycle, prefix, sharesets, start_slice): |
---|
3663 | + """ |
---|
3664 | + This gets a list of shareset names (i.e. storage index strings, |
---|
3665 | base32-encoded) in sorted order. |
---|
3666 | |
---|
3667 | You can override this if your crawler doesn't care about the actual |
---|
3668 | hunk ./src/allmydata/storage/crawler.py 352 |
---|
3669 | shares, for example a crawler which merely keeps track of how many |
---|
3670 | - buckets are being managed by this server. |
---|
3671 | + sharesets are being managed by this server. |
---|
3672 | |
---|
3673 | hunk ./src/allmydata/storage/crawler.py 354 |
---|
3674 | - Subclasses which *do* care about actual bucket should leave this |
---|
3675 | - method along, and implement process_bucket() instead. |
---|
3676 | + Subclasses which *do* care about actual shareset should leave this |
---|
3677 | + method alone, and implement process_shareset() instead. |
---|
3678 | """ |
---|
3679 | |
---|
3680 | hunk ./src/allmydata/storage/crawler.py 358 |
---|
3681 | - for bucket in buckets: |
---|
3682 | - if bucket <= self.state["last-complete-bucket"]: |
---|
3683 | + for shareset in sharesets: |
---|
3684 | + base32si = shareset.get_storage_index_string() |
---|
3685 | + if base32si <= self.state["last-complete-bucket"]: |
---|
3686 | continue |
---|
3687 | hunk ./src/allmydata/storage/crawler.py 362 |
---|
3688 | - self.process_bucket(cycle, prefix, prefixdir, bucket) |
---|
3689 | - self.state["last-complete-bucket"] = bucket |
---|
3690 | + self.process_shareset(cycle, prefix, shareset) |
---|
3691 | + self.state["last-complete-bucket"] = base32si |
---|
3692 | if time.time() >= start_slice + self.cpu_slice: |
---|
3693 | raise TimeSliceExceeded() |
---|
3694 | |
---|
3695 | hunk ./src/allmydata/storage/crawler.py 370 |
---|
3696 | # the remaining methods are explictly for subclasses to implement. |
---|
3697 | |
---|
3698 | def started_cycle(self, cycle): |
---|
3699 | - """Notify a subclass that the crawler is about to start a cycle. |
---|
3700 | + """ |
---|
3701 | + Notify a subclass that the crawler is about to start a cycle. |
---|
3702 | |
---|
3703 | This method is for subclasses to override. No upcall is necessary. |
---|
3704 | """ |
---|
3705 | hunk ./src/allmydata/storage/crawler.py 377 |
---|
3706 | pass |
---|
3707 | |
---|
3708 | - def process_bucket(self, cycle, prefix, prefixdir, storage_index_b32): |
---|
3709 | - """Examine a single bucket. Subclasses should do whatever they want |
---|
3710 | + def process_shareset(self, cycle, prefix, shareset): |
---|
3711 | + """ |
---|
3712 | + Examine a single shareset. Subclasses should do whatever they want |
---|
3713 | to do to the shares therein, then update self.state as necessary. |
---|
3714 | |
---|
3715 | If the crawler is never interrupted by SIGKILL, this method will be |
---|
3716 | hunk ./src/allmydata/storage/crawler.py 383 |
---|
3717 | - called exactly once per share (per cycle). If it *is* interrupted, |
---|
3718 | + called exactly once per shareset (per cycle). If it *is* interrupted, |
---|
3719 | then the next time the node is started, some amount of work will be |
---|
3720 | duplicated, according to when self.save_state() was last called. By |
---|
3721 | default, save_state() is called at the end of each timeslice, and |
---|
3722 | hunk ./src/allmydata/storage/crawler.py 391 |
---|
3723 | |
---|
3724 | To reduce the chance of duplicate work (i.e. to avoid adding multiple |
---|
3725 | records to a database), you can call save_state() at the end of your |
---|
3726 | - process_bucket() method. This will reduce the maximum duplicated work |
---|
3727 | - to one bucket per SIGKILL. It will also add overhead, probably 1-20ms |
---|
3728 | - per bucket (and some disk writes), which will count against your |
---|
3729 | - allowed_cpu_percentage, and which may be considerable if |
---|
3730 | - process_bucket() runs quickly. |
---|
3731 | + process_shareset() method. This will reduce the maximum duplicated |
---|
3732 | + work to one shareset per SIGKILL. It will also add overhead, probably |
---|
3733 | + 1-20ms per shareset (and some disk writes), which will count against |
---|
3734 | + your allowed_cpu_percentage, and which may be considerable if |
---|
3735 | + process_shareset() runs quickly. |
---|
3736 | |
---|
3737 | This method is for subclasses to override. No upcall is necessary. |
---|
3738 | """ |
---|
3739 | hunk ./src/allmydata/storage/crawler.py 402 |
---|
3740 | pass |
---|
3741 | |
---|
3742 | def finished_prefix(self, cycle, prefix): |
---|
3743 | - """Notify a subclass that the crawler has just finished processing a |
---|
3744 | - prefix directory (all buckets with the same two-character/10bit |
---|
3745 | + """ |
---|
3746 | + Notify a subclass that the crawler has just finished processing a |
---|
3747 | + prefix directory (all sharesets with the same two-character/10-bit |
---|
3748 | prefix). To impose a limit on how much work might be duplicated by a |
---|
3749 | SIGKILL that occurs during a timeslice, you can call |
---|
3750 | self.save_state() here, but be aware that it may represent a |
---|
3751 | hunk ./src/allmydata/storage/crawler.py 415 |
---|
3752 | pass |
---|
3753 | |
---|
3754 | def finished_cycle(self, cycle): |
---|
3755 | - """Notify subclass that a cycle (one complete traversal of all |
---|
3756 | + """ |
---|
3757 | + Notify subclass that a cycle (one complete traversal of all |
---|
3758 | prefixdirs) has just finished. 'cycle' is the number of the cycle |
---|
3759 | that just finished. This method should perform summary work and |
---|
3760 | update self.state to publish information to status displays. |
---|
3761 | hunk ./src/allmydata/storage/crawler.py 433 |
---|
3762 | pass |
---|
3763 | |
---|
3764 | def yielding(self, sleep_time): |
---|
3765 | - """The crawler is about to sleep for 'sleep_time' seconds. This |
---|
3766 | + """ |
---|
3767 | + The crawler is about to sleep for 'sleep_time' seconds. This |
---|
3768 | method is mostly for the convenience of unit tests. |
---|
3769 | |
---|
3770 | This method is for subclasses to override. No upcall is necessary. |
---|
3771 | hunk ./src/allmydata/storage/crawler.py 443 |
---|
3772 | |
---|
3773 | |
---|
3774 | class BucketCountingCrawler(ShareCrawler): |
---|
3775 | - """I keep track of how many buckets are being managed by this server. |
---|
3776 | - This is equivalent to the number of distributed files and directories for |
---|
3777 | - which I am providing storage. The actual number of files+directories in |
---|
3778 | - the full grid is probably higher (especially when there are more servers |
---|
3779 | - than 'N', the number of generated shares), because some files+directories |
---|
3780 | - will have shares on other servers instead of me. Also note that the |
---|
3781 | - number of buckets will differ from the number of shares in small grids, |
---|
3782 | - when more than one share is placed on a single server. |
---|
3783 | + """ |
---|
3784 | + I keep track of how many sharesets, each corresponding to a storage index, |
---|
3785 | + are being managed by this server. This is equivalent to the number of |
---|
3786 | + distributed files and directories for which I am providing storage. The |
---|
3787 | + actual number of files and directories in the full grid is probably higher |
---|
3788 | + (especially when there are more servers than 'N', the number of generated |
---|
3789 | + shares), because some files and directories will have shares on other |
---|
3790 | + servers instead of me. Also note that the number of sharesets will differ |
---|
3791 | + from the number of shares in small grids, when more than one share is |
---|
3792 | + placed on a single server. |
---|
3793 | """ |
---|
3794 | |
---|
3795 | minimum_cycle_time = 60*60 # we don't need this more than once an hour |
---|
3796 | hunk ./src/allmydata/storage/crawler.py 457 |
---|
3797 | |
---|
3798 | - def __init__(self, server, statefile, num_sample_prefixes=1): |
---|
3799 | - ShareCrawler.__init__(self, server, statefile) |
---|
3800 | + def __init__(self, backend, statefp, num_sample_prefixes=1): |
---|
3801 | + ShareCrawler.__init__(self, backend, statefp) |
---|
3802 | self.num_sample_prefixes = num_sample_prefixes |
---|
3803 | |
---|
3804 | def add_initial_state(self): |
---|
3805 | hunk ./src/allmydata/storage/crawler.py 471 |
---|
3806 | self.state.setdefault("last-complete-bucket-count", None) |
---|
3807 | self.state.setdefault("storage-index-samples", {}) |
---|
3808 | |
---|
3809 | - def process_prefixdir(self, cycle, prefix, prefixdir, buckets, start_slice): |
---|
3810 | + def process_prefixdir(self, cycle, prefix, sharesets, start_slice): |
---|
3811 | # we override process_prefixdir() because we don't want to look at |
---|
3812 | hunk ./src/allmydata/storage/crawler.py 473 |
---|
3813 | - # the individual buckets. We'll save state after each one. On my |
---|
3814 | + # the individual sharesets. We'll save state after each one. On my |
---|
3815 | # laptop, a mostly-empty storage server can process about 70 |
---|
3816 | # prefixdirs in a 1.0s slice. |
---|
3817 | if cycle not in self.state["bucket-counts"]: |
---|
3818 | hunk ./src/allmydata/storage/crawler.py 478 |
---|
3819 | self.state["bucket-counts"][cycle] = {} |
---|
3820 | - self.state["bucket-counts"][cycle][prefix] = len(buckets) |
---|
3821 | + self.state["bucket-counts"][cycle][prefix] = len(sharesets) |
---|
3822 | if prefix in self.prefixes[:self.num_sample_prefixes]: |
---|
3823 | hunk ./src/allmydata/storage/crawler.py 480 |
---|
3824 | - self.state["storage-index-samples"][prefix] = (cycle, buckets) |
---|
3825 | + self.state["storage-index-samples"][prefix] = (cycle, sharesets) |
---|
3826 | |
---|
3827 | def finished_cycle(self, cycle): |
---|
3828 | last_counts = self.state["bucket-counts"].get(cycle, []) |
---|
3829 | hunk ./src/allmydata/storage/crawler.py 486 |
---|
3830 | if len(last_counts) == len(self.prefixes): |
---|
3831 | # great, we have a whole cycle. |
---|
3832 | - num_buckets = sum(last_counts.values()) |
---|
3833 | - self.state["last-complete-bucket-count"] = num_buckets |
---|
3834 | + num_sharesets = sum(last_counts.values()) |
---|
3835 | + self.state["last-complete-bucket-count"] = num_sharesets |
---|
3836 | # get rid of old counts |
---|
3837 | for old_cycle in list(self.state["bucket-counts"].keys()): |
---|
3838 | if old_cycle != cycle: |
---|
3839 | hunk ./src/allmydata/storage/crawler.py 494 |
---|
3840 | del self.state["bucket-counts"][old_cycle] |
---|
3841 | # get rid of old samples too |
---|
3842 | for prefix in list(self.state["storage-index-samples"].keys()): |
---|
3843 | - old_cycle,buckets = self.state["storage-index-samples"][prefix] |
---|
3844 | + old_cycle, storage_indices = self.state["storage-index-samples"][prefix] |
---|
3845 | if old_cycle != cycle: |
---|
3846 | del self.state["storage-index-samples"][prefix] |
---|
3847 | hunk ./src/allmydata/storage/crawler.py 497 |
---|
3848 | - |
---|
3849 | hunk ./src/allmydata/storage/expirer.py 1 |
---|
3850 | -import time, os, pickle, struct |
---|
3851 | + |
---|
3852 | +import time, pickle, struct |
---|
3853 | +from twisted.python import log as twlog |
---|
3854 | + |
---|
3855 | from allmydata.storage.crawler import ShareCrawler |
---|
3856 | hunk ./src/allmydata/storage/expirer.py 6 |
---|
3857 | -from allmydata.storage.shares import get_share_file |
---|
3858 | -from allmydata.storage.common import UnknownMutableContainerVersionError, \ |
---|
3859 | +from allmydata.storage.common import si_b2a, UnknownMutableContainerVersionError, \ |
---|
3860 | UnknownImmutableContainerVersionError |
---|
3861 | hunk ./src/allmydata/storage/expirer.py 8 |
---|
3862 | -from twisted.python import log as twlog |
---|
3863 | + |
---|
3864 | |
---|
3865 | class LeaseCheckingCrawler(ShareCrawler): |
---|
3866 | """I examine the leases on all shares, determining which are still valid |
---|
3867 | hunk ./src/allmydata/storage/expirer.py 17 |
---|
3868 | removed. |
---|
3869 | |
---|
3870 | I collect statistics on the leases and make these available to a web |
---|
3871 | - status page, including:: |
---|
3872 | + status page, including: |
---|
3873 | |
---|
3874 | Space recovered during this cycle-so-far: |
---|
3875 | actual (only if expiration_enabled=True): |
---|
3876 | hunk ./src/allmydata/storage/expirer.py 21 |
---|
3877 | - num-buckets, num-shares, sum of share sizes, real disk usage |
---|
3878 | + num-storage-indices, num-shares, sum of share sizes, real disk usage |
---|
3879 | ('real disk usage' means we use stat(fn).st_blocks*512 and include any |
---|
3880 | space used by the directory) |
---|
3881 | what it would have been with the original lease expiration time |
---|
3882 | hunk ./src/allmydata/storage/expirer.py 32 |
---|
3883 | |
---|
3884 | Space recovered during the last 10 cycles <-- saved in separate pickle |
---|
3885 | |
---|
3886 | - Shares/buckets examined: |
---|
3887 | + Shares/storage-indices examined: |
---|
3888 | this cycle-so-far |
---|
3889 | prediction of rest of cycle |
---|
3890 | during last 10 cycles <-- separate pickle |
---|
3891 | hunk ./src/allmydata/storage/expirer.py 42 |
---|
3892 | Histogram of leases-per-share: |
---|
3893 | this-cycle-to-date |
---|
3894 | last 10 cycles <-- separate pickle |
---|
3895 | - Histogram of lease ages, buckets = 1day |
---|
3896 | + Histogram of lease ages, storage-indices over 1 day |
---|
3897 | cycle-to-date |
---|
3898 | last 10 cycles <-- separate pickle |
---|
3899 | |
---|
3900 | hunk ./src/allmydata/storage/expirer.py 53 |
---|
3901 | slow_start = 360 # wait 6 minutes after startup |
---|
3902 | minimum_cycle_time = 12*60*60 # not more than twice per day |
---|
3903 | |
---|
3904 | - def __init__(self, server, statefile, historyfile, |
---|
3905 | - expiration_enabled, mode, |
---|
3906 | - override_lease_duration, # used if expiration_mode=="age" |
---|
3907 | - cutoff_date, # used if expiration_mode=="cutoff-date" |
---|
3908 | - sharetypes): |
---|
3909 | - self.historyfile = historyfile |
---|
3910 | - self.expiration_enabled = expiration_enabled |
---|
3911 | - self.mode = mode |
---|
3912 | + def __init__(self, backend, statefp, historyfp, expiration_policy): |
---|
3913 | + # ShareCrawler.__init__ will call add_initial_state, so self.historyfp has to be set first. |
---|
3914 | + self.historyfp = historyfp |
---|
3915 | + ShareCrawler.__init__(self, backend, statefp) |
---|
3916 | + |
---|
3917 | + self.expiration_enabled = expiration_policy['enabled'] |
---|
3918 | + self.mode = expiration_policy['mode'] |
---|
3919 | self.override_lease_duration = None |
---|
3920 | self.cutoff_date = None |
---|
3921 | if self.mode == "age": |
---|
3922 | hunk ./src/allmydata/storage/expirer.py 63 |
---|
3923 | - assert isinstance(override_lease_duration, (int, type(None))) |
---|
3924 | - self.override_lease_duration = override_lease_duration # seconds |
---|
3925 | + assert isinstance(expiration_policy['override_lease_duration'], (int, type(None))) |
---|
3926 | + self.override_lease_duration = expiration_policy['override_lease_duration'] # seconds |
---|
3927 | elif self.mode == "cutoff-date": |
---|
3928 | hunk ./src/allmydata/storage/expirer.py 66 |
---|
3929 | - assert isinstance(cutoff_date, int) # seconds-since-epoch |
---|
3930 | - assert cutoff_date is not None |
---|
3931 | - self.cutoff_date = cutoff_date |
---|
3932 | + assert isinstance(expiration_policy['cutoff_date'], int) # seconds-since-epoch |
---|
3933 | + self.cutoff_date = expiration_policy['cutoff_date'] |
---|
3934 | else: |
---|
3935 | hunk ./src/allmydata/storage/expirer.py 69 |
---|
3936 | - raise ValueError("GC mode '%s' must be 'age' or 'cutoff-date'" % mode) |
---|
3937 | - self.sharetypes_to_expire = sharetypes |
---|
3938 | - ShareCrawler.__init__(self, server, statefile) |
---|
3939 | + raise ValueError("GC mode '%s' must be 'age' or 'cutoff-date'" % expiration_policy['mode']) |
---|
3940 | + self.sharetypes_to_expire = expiration_policy['sharetypes'] |
---|
3941 | |
---|
3942 | def add_initial_state(self): |
---|
3943 | # we fill ["cycle-to-date"] here (even though they will be reset in |
---|
3944 | hunk ./src/allmydata/storage/expirer.py 84 |
---|
3945 | self.state["cycle-to-date"].setdefault(k, so_far[k]) |
---|
3946 | |
---|
3947 | # initialize history |
---|
3948 | - if not os.path.exists(self.historyfile): |
---|
3949 | + if not self.historyfp.exists(): |
---|
3950 | history = {} # cyclenum -> dict |
---|
3951 | hunk ./src/allmydata/storage/expirer.py 86 |
---|
3952 | - f = open(self.historyfile, "wb") |
---|
3953 | - pickle.dump(history, f) |
---|
3954 | - f.close() |
---|
3955 | + self.historyfp.setContent(pickle.dumps(history)) |
---|
3956 | |
---|
3957 | def create_empty_cycle_dict(self): |
---|
3958 | recovered = self.create_empty_recovered_dict() |
---|
3959 | hunk ./src/allmydata/storage/expirer.py 99 |
---|
3960 | |
---|
3961 | def create_empty_recovered_dict(self): |
---|
3962 | recovered = {} |
---|
3963 | + # "buckets" is ambiguous; here it means the number of sharesets (one per storage index per server) |
---|
3964 | for a in ("actual", "original", "configured", "examined"): |
---|
3965 | for b in ("buckets", "shares", "sharebytes", "diskbytes"): |
---|
3966 | recovered[a+"-"+b] = 0 |
---|
3967 | hunk ./src/allmydata/storage/expirer.py 110 |
---|
3968 | def started_cycle(self, cycle): |
---|
3969 | self.state["cycle-to-date"] = self.create_empty_cycle_dict() |
---|
3970 | |
---|
3971 | - def stat(self, fn): |
---|
3972 | - return os.stat(fn) |
---|
3973 | - |
---|
3974 | - def process_bucket(self, cycle, prefix, prefixdir, storage_index_b32): |
---|
3975 | - bucketdir = os.path.join(prefixdir, storage_index_b32) |
---|
3976 | - s = self.stat(bucketdir) |
---|
3977 | + def process_storage_index(self, cycle, prefix, container): |
---|
3978 | would_keep_shares = [] |
---|
3979 | wks = None |
---|
3980 | hunk ./src/allmydata/storage/expirer.py 113 |
---|
3981 | + sharetype = None |
---|
3982 | |
---|
3983 | hunk ./src/allmydata/storage/expirer.py 115 |
---|
3984 | - for fn in os.listdir(bucketdir): |
---|
3985 | - try: |
---|
3986 | - shnum = int(fn) |
---|
3987 | - except ValueError: |
---|
3988 | - continue # non-numeric means not a sharefile |
---|
3989 | - sharefile = os.path.join(bucketdir, fn) |
---|
3990 | + for share in container.get_shares(): |
---|
3991 | + sharetype = share.sharetype |
---|
3992 | try: |
---|
3993 | hunk ./src/allmydata/storage/expirer.py 118 |
---|
3994 | - wks = self.process_share(sharefile) |
---|
3995 | + wks = self.process_share(share) |
---|
3996 | except (UnknownMutableContainerVersionError, |
---|
3997 | UnknownImmutableContainerVersionError, |
---|
3998 | struct.error): |
---|
3999 | hunk ./src/allmydata/storage/expirer.py 122 |
---|
4000 | - twlog.msg("lease-checker error processing %s" % sharefile) |
---|
4001 | + twlog.msg("lease-checker error processing %r" % (share,)) |
---|
4002 | twlog.err() |
---|
4003 | hunk ./src/allmydata/storage/expirer.py 124 |
---|
4004 | - which = (storage_index_b32, shnum) |
---|
4005 | + which = (si_b2a(share.storageindex), share.get_shnum()) |
---|
4006 | self.state["cycle-to-date"]["corrupt-shares"].append(which) |
---|
4007 | wks = (1, 1, 1, "unknown") |
---|
4008 | would_keep_shares.append(wks) |
---|
4009 | hunk ./src/allmydata/storage/expirer.py 129 |
---|
4010 | |
---|
4011 | - sharetype = None |
---|
4012 | + container_type = None |
---|
4013 | if wks: |
---|
4014 | hunk ./src/allmydata/storage/expirer.py 131 |
---|
4015 | - # use the last share's sharetype as the buckettype |
---|
4016 | - sharetype = wks[3] |
---|
4017 | + # use the last share's sharetype as the container type |
---|
4018 | + container_type = wks[3] |
---|
4019 | rec = self.state["cycle-to-date"]["space-recovered"] |
---|
4020 | self.increment(rec, "examined-buckets", 1) |
---|
4021 | if sharetype: |
---|
4022 | hunk ./src/allmydata/storage/expirer.py 136 |
---|
4023 | - self.increment(rec, "examined-buckets-"+sharetype, 1) |
---|
4024 | + self.increment(rec, "examined-buckets-"+container_type, 1) |
---|
4025 | + |
---|
4026 | + container_diskbytes = container.get_overhead() |
---|
4027 | |
---|
4028 | hunk ./src/allmydata/storage/expirer.py 140 |
---|
4029 | - try: |
---|
4030 | - bucket_diskbytes = s.st_blocks * 512 |
---|
4031 | - except AttributeError: |
---|
4032 | - bucket_diskbytes = 0 # no stat().st_blocks on windows |
---|
4033 | if sum([wks[0] for wks in would_keep_shares]) == 0: |
---|
4034 | hunk ./src/allmydata/storage/expirer.py 141 |
---|
4035 | - self.increment_bucketspace("original", bucket_diskbytes, sharetype) |
---|
4036 | + self.increment_container_space("original", container_diskbytes, sharetype) |
---|
4037 | if sum([wks[1] for wks in would_keep_shares]) == 0: |
---|
4038 | hunk ./src/allmydata/storage/expirer.py 143 |
---|
4039 | - self.increment_bucketspace("configured", bucket_diskbytes, sharetype) |
---|
4040 | + self.increment_container_space("configured", container_diskbytes, sharetype) |
---|
4041 | if sum([wks[2] for wks in would_keep_shares]) == 0: |
---|
4042 | hunk ./src/allmydata/storage/expirer.py 145 |
---|
4043 | - self.increment_bucketspace("actual", bucket_diskbytes, sharetype) |
---|
4044 | + self.increment_container_space("actual", container_diskbytes, sharetype) |
---|
4045 | |
---|
4046 | hunk ./src/allmydata/storage/expirer.py 147 |
---|
4047 | - def process_share(self, sharefilename): |
---|
4048 | - # first, find out what kind of a share it is |
---|
4049 | - sf = get_share_file(sharefilename) |
---|
4050 | - sharetype = sf.sharetype |
---|
4051 | + def process_share(self, share): |
---|
4052 | + sharetype = share.sharetype |
---|
4053 | now = time.time() |
---|
4054 | hunk ./src/allmydata/storage/expirer.py 150 |
---|
4055 | - s = self.stat(sharefilename) |
---|
4056 | + sharebytes = share.get_size() |
---|
4057 | + diskbytes = share.get_used_space() |
---|
4058 | |
---|
4059 | num_leases = 0 |
---|
4060 | num_valid_leases_original = 0 |
---|
4061 | hunk ./src/allmydata/storage/expirer.py 158 |
---|
4062 | num_valid_leases_configured = 0 |
---|
4063 | expired_leases_configured = [] |
---|
4064 | |
---|
4065 | - for li in sf.get_leases(): |
---|
4066 | + for li in share.get_leases(): |
---|
4067 | num_leases += 1 |
---|
4068 | original_expiration_time = li.get_expiration_time() |
---|
4069 | grant_renew_time = li.get_grant_renew_time_time() |
---|
4070 | hunk ./src/allmydata/storage/expirer.py 171 |
---|
4071 | |
---|
4072 | # expired-or-not according to our configured age limit |
---|
4073 | expired = False |
---|
4074 | - if self.mode == "age": |
---|
4075 | - age_limit = original_expiration_time |
---|
4076 | - if self.override_lease_duration is not None: |
---|
4077 | - age_limit = self.override_lease_duration |
---|
4078 | - if age > age_limit: |
---|
4079 | - expired = True |
---|
4080 | - else: |
---|
4081 | - assert self.mode == "cutoff-date" |
---|
4082 | - if grant_renew_time < self.cutoff_date: |
---|
4083 | - expired = True |
---|
4084 | - if sharetype not in self.sharetypes_to_expire: |
---|
4085 | - expired = False |
---|
4086 | + if sharetype in self.sharetypes_to_expire: |
---|
4087 | + if self.mode == "age": |
---|
4088 | + age_limit = original_expiration_time |
---|
4089 | + if self.override_lease_duration is not None: |
---|
4090 | + age_limit = self.override_lease_duration |
---|
4091 | + if age > age_limit: |
---|
4092 | + expired = True |
---|
4093 | + else: |
---|
4094 | + assert self.mode == "cutoff-date" |
---|
4095 | + if grant_renew_time < self.cutoff_date: |
---|
4096 | + expired = True |
---|
4097 | |
---|
4098 | if expired: |
---|
4099 | expired_leases_configured.append(li) |
---|
4100 | hunk ./src/allmydata/storage/expirer.py 190 |
---|
4101 | |
---|
4102 | so_far = self.state["cycle-to-date"] |
---|
4103 | self.increment(so_far["leases-per-share-histogram"], num_leases, 1) |
---|
4104 | - self.increment_space("examined", s, sharetype) |
---|
4105 | + self.increment_space("examined", diskbytes, sharetype) |
---|
4106 | |
---|
4107 | would_keep_share = [1, 1, 1, sharetype] |
---|
4108 | |
---|
4109 | hunk ./src/allmydata/storage/expirer.py 196 |
---|
4110 | if self.expiration_enabled: |
---|
4111 | for li in expired_leases_configured: |
---|
4112 | - sf.cancel_lease(li.cancel_secret) |
---|
4113 | + share.cancel_lease(li.cancel_secret) |
---|
4114 | |
---|
4115 | if num_valid_leases_original == 0: |
---|
4116 | would_keep_share[0] = 0 |
---|
4117 | hunk ./src/allmydata/storage/expirer.py 200 |
---|
4118 | - self.increment_space("original", s, sharetype) |
---|
4119 | + self.increment_space("original", sharebytes, diskbytes, sharetype) |
---|
4120 | |
---|
4121 | if num_valid_leases_configured == 0: |
---|
4122 | would_keep_share[1] = 0 |
---|
4123 | hunk ./src/allmydata/storage/expirer.py 204 |
---|
4124 | - self.increment_space("configured", s, sharetype) |
---|
4125 | + self.increment_space("configured", sharebytes, diskbytes, sharetype) |
---|
4126 | if self.expiration_enabled: |
---|
4127 | would_keep_share[2] = 0 |
---|
4128 | hunk ./src/allmydata/storage/expirer.py 207 |
---|
4129 | - self.increment_space("actual", s, sharetype) |
---|
4130 | + self.increment_space("actual", sharebytes, diskbytes, sharetype) |
---|
4131 | |
---|
4132 | return would_keep_share |
---|
4133 | |
---|
4134 | hunk ./src/allmydata/storage/expirer.py 211 |
---|
4135 | - def increment_space(self, a, s, sharetype): |
---|
4136 | - sharebytes = s.st_size |
---|
4137 | - try: |
---|
4138 | - # note that stat(2) says that st_blocks is 512 bytes, and that |
---|
4139 | - # st_blksize is "optimal file sys I/O ops blocksize", which is |
---|
4140 | - # independent of the block-size that st_blocks uses. |
---|
4141 | - diskbytes = s.st_blocks * 512 |
---|
4142 | - except AttributeError: |
---|
4143 | - # the docs say that st_blocks is only on linux. I also see it on |
---|
4144 | - # MacOS. But it isn't available on windows. |
---|
4145 | - diskbytes = sharebytes |
---|
4146 | + def increment_space(self, a, sharebytes, diskbytes, sharetype): |
---|
4147 | so_far_sr = self.state["cycle-to-date"]["space-recovered"] |
---|
4148 | self.increment(so_far_sr, a+"-shares", 1) |
---|
4149 | self.increment(so_far_sr, a+"-sharebytes", sharebytes) |
---|
4150 | hunk ./src/allmydata/storage/expirer.py 221 |
---|
4151 | self.increment(so_far_sr, a+"-sharebytes-"+sharetype, sharebytes) |
---|
4152 | self.increment(so_far_sr, a+"-diskbytes-"+sharetype, diskbytes) |
---|
4153 | |
---|
4154 | - def increment_bucketspace(self, a, bucket_diskbytes, sharetype): |
---|
4155 | + def increment_container_space(self, a, container_diskbytes, container_type): |
---|
4156 | rec = self.state["cycle-to-date"]["space-recovered"] |
---|
4157 | hunk ./src/allmydata/storage/expirer.py 223 |
---|
4158 | - self.increment(rec, a+"-diskbytes", bucket_diskbytes) |
---|
4159 | + self.increment(rec, a+"-diskbytes", container_diskbytes) |
---|
4160 | self.increment(rec, a+"-buckets", 1) |
---|
4161 | hunk ./src/allmydata/storage/expirer.py 225 |
---|
4162 | - if sharetype: |
---|
4163 | - self.increment(rec, a+"-diskbytes-"+sharetype, bucket_diskbytes) |
---|
4164 | - self.increment(rec, a+"-buckets-"+sharetype, 1) |
---|
4165 | + if container_type: |
---|
4166 | + self.increment(rec, a+"-diskbytes-"+container_type, container_diskbytes) |
---|
4167 | + self.increment(rec, a+"-buckets-"+container_type, 1) |
---|
4168 | |
---|
4169 | def increment(self, d, k, delta=1): |
---|
4170 | if k not in d: |
---|
4171 | hunk ./src/allmydata/storage/expirer.py 281 |
---|
4172 | # copy() needs to become a deepcopy |
---|
4173 | h["space-recovered"] = s["space-recovered"].copy() |
---|
4174 | |
---|
4175 | - history = pickle.load(open(self.historyfile, "rb")) |
---|
4176 | + history = pickle.load(self.historyfp.getContent()) |
---|
4177 | history[cycle] = h |
---|
4178 | while len(history) > 10: |
---|
4179 | oldcycles = sorted(history.keys()) |
---|
4180 | hunk ./src/allmydata/storage/expirer.py 286 |
---|
4181 | del history[oldcycles[0]] |
---|
4182 | - f = open(self.historyfile, "wb") |
---|
4183 | - pickle.dump(history, f) |
---|
4184 | - f.close() |
---|
4185 | + self.historyfp.setContent(pickle.dumps(history)) |
---|
4186 | |
---|
4187 | def get_state(self): |
---|
4188 | """In addition to the crawler state described in |
---|
4189 | hunk ./src/allmydata/storage/expirer.py 355 |
---|
4190 | progress = self.get_progress() |
---|
4191 | |
---|
4192 | state = ShareCrawler.get_state(self) # does a shallow copy |
---|
4193 | - history = pickle.load(open(self.historyfile, "rb")) |
---|
4194 | + history = pickle.load(self.historyfp.getContent()) |
---|
4195 | state["history"] = history |
---|
4196 | |
---|
4197 | if not progress["cycle-in-progress"]: |
---|
4198 | hunk ./src/allmydata/storage/lease.py 3 |
---|
4199 | import struct, time |
---|
4200 | |
---|
4201 | + |
---|
4202 | +class NonExistentLeaseError(Exception): |
---|
4203 | + pass |
---|
4204 | + |
---|
4205 | class LeaseInfo: |
---|
4206 | def __init__(self, owner_num=None, renew_secret=None, cancel_secret=None, |
---|
4207 | expiration_time=None, nodeid=None): |
---|
4208 | hunk ./src/allmydata/storage/lease.py 21 |
---|
4209 | |
---|
4210 | def get_expiration_time(self): |
---|
4211 | return self.expiration_time |
---|
4212 | + |
---|
4213 | def get_grant_renew_time_time(self): |
---|
4214 | # hack, based upon fixed 31day expiration period |
---|
4215 | return self.expiration_time - 31*24*60*60 |
---|
4216 | hunk ./src/allmydata/storage/lease.py 25 |
---|
4217 | + |
---|
4218 | def get_age(self): |
---|
4219 | return time.time() - self.get_grant_renew_time_time() |
---|
4220 | |
---|
4221 | hunk ./src/allmydata/storage/lease.py 36 |
---|
4222 | self.expiration_time) = struct.unpack(">L32s32sL", data) |
---|
4223 | self.nodeid = None |
---|
4224 | return self |
---|
4225 | + |
---|
4226 | def to_immutable_data(self): |
---|
4227 | return struct.pack(">L32s32sL", |
---|
4228 | self.owner_num, |
---|
4229 | hunk ./src/allmydata/storage/lease.py 49 |
---|
4230 | int(self.expiration_time), |
---|
4231 | self.renew_secret, self.cancel_secret, |
---|
4232 | self.nodeid) |
---|
4233 | + |
---|
4234 | def from_mutable_data(self, data): |
---|
4235 | (self.owner_num, |
---|
4236 | self.expiration_time, |
---|
4237 | hunk ./src/allmydata/storage/server.py 1 |
---|
4238 | -import os, re, weakref, struct, time |
---|
4239 | +import weakref, time |
---|
4240 | |
---|
4241 | from foolscap.api import Referenceable |
---|
4242 | from twisted.application import service |
---|
4243 | hunk ./src/allmydata/storage/server.py 7 |
---|
4244 | |
---|
4245 | from zope.interface import implements |
---|
4246 | -from allmydata.interfaces import RIStorageServer, IStatsProducer |
---|
4247 | -from allmydata.util import fileutil, idlib, log, time_format |
---|
4248 | +from allmydata.interfaces import RIStorageServer, IStatsProducer, IStorageBackend |
---|
4249 | +from allmydata.util.assertutil import precondition |
---|
4250 | +from allmydata.util import idlib, log |
---|
4251 | import allmydata # for __full_version__ |
---|
4252 | |
---|
4253 | hunk ./src/allmydata/storage/server.py 12 |
---|
4254 | -from allmydata.storage.common import si_b2a, si_a2b, storage_index_to_dir |
---|
4255 | -_pyflakes_hush = [si_b2a, si_a2b, storage_index_to_dir] # re-exported |
---|
4256 | +from allmydata.storage.common import si_a2b, si_b2a |
---|
4257 | +[si_a2b] # hush pyflakes |
---|
4258 | from allmydata.storage.lease import LeaseInfo |
---|
4259 | hunk ./src/allmydata/storage/server.py 15 |
---|
4260 | -from allmydata.storage.mutable import MutableShareFile, EmptyShare, \ |
---|
4261 | - create_mutable_sharefile |
---|
4262 | -from allmydata.storage.immutable import ShareFile, BucketWriter, BucketReader |
---|
4263 | -from allmydata.storage.crawler import BucketCountingCrawler |
---|
4264 | from allmydata.storage.expirer import LeaseCheckingCrawler |
---|
4265 | hunk ./src/allmydata/storage/server.py 16 |
---|
4266 | - |
---|
4267 | -# storage/ |
---|
4268 | -# storage/shares/incoming |
---|
4269 | -# incoming/ holds temp dirs named $START/$STORAGEINDEX/$SHARENUM which will |
---|
4270 | -# be moved to storage/shares/$START/$STORAGEINDEX/$SHARENUM upon success |
---|
4271 | -# storage/shares/$START/$STORAGEINDEX |
---|
4272 | -# storage/shares/$START/$STORAGEINDEX/$SHARENUM |
---|
4273 | - |
---|
4274 | -# Where "$START" denotes the first 10 bits worth of $STORAGEINDEX (that's 2 |
---|
4275 | -# base-32 chars). |
---|
4276 | - |
---|
4277 | -# $SHARENUM matches this regex: |
---|
4278 | -NUM_RE=re.compile("^[0-9]+$") |
---|
4279 | - |
---|
4280 | +from allmydata.storage.crawler import BucketCountingCrawler |
---|
4281 | |
---|
4282 | |
---|
4283 | class StorageServer(service.MultiService, Referenceable): |
---|
4284 | hunk ./src/allmydata/storage/server.py 21 |
---|
4285 | implements(RIStorageServer, IStatsProducer) |
---|
4286 | + |
---|
4287 | name = 'storage' |
---|
4288 | LeaseCheckerClass = LeaseCheckingCrawler |
---|
4289 | hunk ./src/allmydata/storage/server.py 24 |
---|
4290 | + DEFAULT_EXPIRATION_POLICY = { |
---|
4291 | + 'enabled': False, |
---|
4292 | + 'mode': 'age', |
---|
4293 | + 'override_lease_duration': None, |
---|
4294 | + 'cutoff_date': None, |
---|
4295 | + 'sharetypes': ('mutable', 'immutable'), |
---|
4296 | + } |
---|
4297 | |
---|
4298 | hunk ./src/allmydata/storage/server.py 32 |
---|
4299 | - def __init__(self, storedir, nodeid, reserved_space=0, |
---|
4300 | - discard_storage=False, readonly_storage=False, |
---|
4301 | + def __init__(self, serverid, backend, statedir, |
---|
4302 | stats_provider=None, |
---|
4303 | hunk ./src/allmydata/storage/server.py 34 |
---|
4304 | - expiration_enabled=False, |
---|
4305 | - expiration_mode="age", |
---|
4306 | - expiration_override_lease_duration=None, |
---|
4307 | - expiration_cutoff_date=None, |
---|
4308 | - expiration_sharetypes=("mutable", "immutable")): |
---|
4309 | + expiration_policy=None): |
---|
4310 | service.MultiService.__init__(self) |
---|
4311 | hunk ./src/allmydata/storage/server.py 36 |
---|
4312 | - assert isinstance(nodeid, str) |
---|
4313 | - assert len(nodeid) == 20 |
---|
4314 | - self.my_nodeid = nodeid |
---|
4315 | - self.storedir = storedir |
---|
4316 | - sharedir = os.path.join(storedir, "shares") |
---|
4317 | - fileutil.make_dirs(sharedir) |
---|
4318 | - self.sharedir = sharedir |
---|
4319 | - # we don't actually create the corruption-advisory dir until necessary |
---|
4320 | - self.corruption_advisory_dir = os.path.join(storedir, |
---|
4321 | - "corruption-advisories") |
---|
4322 | - self.reserved_space = int(reserved_space) |
---|
4323 | - self.no_storage = discard_storage |
---|
4324 | - self.readonly_storage = readonly_storage |
---|
4325 | + precondition(IStorageBackend.providedBy(backend), backend) |
---|
4326 | + precondition(isinstance(serverid, str), serverid) |
---|
4327 | + precondition(len(serverid) == 20, serverid) |
---|
4328 | + |
---|
4329 | + self._serverid = serverid |
---|
4330 | self.stats_provider = stats_provider |
---|
4331 | if self.stats_provider: |
---|
4332 | self.stats_provider.register_producer(self) |
---|
4333 | hunk ./src/allmydata/storage/server.py 44 |
---|
4334 | - self.incomingdir = os.path.join(sharedir, 'incoming') |
---|
4335 | - self._clean_incomplete() |
---|
4336 | - fileutil.make_dirs(self.incomingdir) |
---|
4337 | self._active_writers = weakref.WeakKeyDictionary() |
---|
4338 | hunk ./src/allmydata/storage/server.py 45 |
---|
4339 | + self.backend = backend |
---|
4340 | + self.backend.setServiceParent(self) |
---|
4341 | + self._statedir = statedir |
---|
4342 | log.msg("StorageServer created", facility="tahoe.storage") |
---|
4343 | |
---|
4344 | hunk ./src/allmydata/storage/server.py 50 |
---|
4345 | - if reserved_space: |
---|
4346 | - if self.get_available_space() is None: |
---|
4347 | - log.msg("warning: [storage]reserved_space= is set, but this platform does not support an API to get disk statistics (statvfs(2) or GetDiskFreeSpaceEx), so this reservation cannot be honored", |
---|
4348 | - umin="0wZ27w", level=log.UNUSUAL) |
---|
4349 | - |
---|
4350 | self.latencies = {"allocate": [], # immutable |
---|
4351 | "write": [], |
---|
4352 | "close": [], |
---|
4353 | hunk ./src/allmydata/storage/server.py 61 |
---|
4354 | "renew": [], |
---|
4355 | "cancel": [], |
---|
4356 | } |
---|
4357 | - self.add_bucket_counter() |
---|
4358 | - |
---|
4359 | - statefile = os.path.join(self.storedir, "lease_checker.state") |
---|
4360 | - historyfile = os.path.join(self.storedir, "lease_checker.history") |
---|
4361 | - klass = self.LeaseCheckerClass |
---|
4362 | - self.lease_checker = klass(self, statefile, historyfile, |
---|
4363 | - expiration_enabled, expiration_mode, |
---|
4364 | - expiration_override_lease_duration, |
---|
4365 | - expiration_cutoff_date, |
---|
4366 | - expiration_sharetypes) |
---|
4367 | - self.lease_checker.setServiceParent(self) |
---|
4368 | + self._setup_bucket_counter() |
---|
4369 | + self._setup_lease_checker(expiration_policy or self.DEFAULT_EXPIRATION_POLICY) |
---|
4370 | |
---|
4371 | def __repr__(self): |
---|
4372 | hunk ./src/allmydata/storage/server.py 65 |
---|
4373 | - return "<StorageServer %s>" % (idlib.shortnodeid_b2a(self.my_nodeid),) |
---|
4374 | + return "<StorageServer %s>" % (idlib.shortnodeid_b2a(self._serverid),) |
---|
4375 | |
---|
4376 | hunk ./src/allmydata/storage/server.py 67 |
---|
4377 | - def add_bucket_counter(self): |
---|
4378 | - statefile = os.path.join(self.storedir, "bucket_counter.state") |
---|
4379 | - self.bucket_counter = BucketCountingCrawler(self, statefile) |
---|
4380 | + def _setup_bucket_counter(self): |
---|
4381 | + statefp = self._statedir.child("bucket_counter.state") |
---|
4382 | + self.bucket_counter = BucketCountingCrawler(self.backend, statefp) |
---|
4383 | self.bucket_counter.setServiceParent(self) |
---|
4384 | |
---|
4385 | hunk ./src/allmydata/storage/server.py 72 |
---|
4386 | + def _setup_lease_checker(self, expiration_policy): |
---|
4387 | + statefp = self._statedir.child("lease_checker.state") |
---|
4388 | + historyfp = self._statedir.child("lease_checker.history") |
---|
4389 | + self.lease_checker = self.LeaseCheckerClass(self.backend, statefp, historyfp, expiration_policy) |
---|
4390 | + self.lease_checker.setServiceParent(self) |
---|
4391 | + |
---|
4392 | def count(self, name, delta=1): |
---|
4393 | if self.stats_provider: |
---|
4394 | self.stats_provider.count("storage_server." + name, delta) |
---|
4395 | hunk ./src/allmydata/storage/server.py 92 |
---|
4396 | """Return a dict, indexed by category, that contains a dict of |
---|
4397 | latency numbers for each category. If there are sufficient samples |
---|
4398 | for unambiguous interpretation, each dict will contain the |
---|
4399 | - following keys: mean, 01_0_percentile, 10_0_percentile, |
---|
4400 | + following keys: samplesize, mean, 01_0_percentile, 10_0_percentile, |
---|
4401 | 50_0_percentile (median), 90_0_percentile, 95_0_percentile, |
---|
4402 | 99_0_percentile, 99_9_percentile. If there are insufficient |
---|
4403 | samples for a given percentile to be interpreted unambiguously |
---|
4404 | hunk ./src/allmydata/storage/server.py 114 |
---|
4405 | else: |
---|
4406 | stats["mean"] = None |
---|
4407 | |
---|
4408 | - orderstatlist = [(0.01, "01_0_percentile", 100), (0.1, "10_0_percentile", 10),\ |
---|
4409 | - (0.50, "50_0_percentile", 10), (0.90, "90_0_percentile", 10),\ |
---|
4410 | - (0.95, "95_0_percentile", 20), (0.99, "99_0_percentile", 100),\ |
---|
4411 | + orderstatlist = [(0.1, "10_0_percentile", 10), (0.5, "50_0_percentile", 10), \ |
---|
4412 | + (0.9, "90_0_percentile", 10), (0.95, "95_0_percentile", 20), \ |
---|
4413 | + (0.01, "01_0_percentile", 100), (0.99, "99_0_percentile", 100),\ |
---|
4414 | (0.999, "99_9_percentile", 1000)] |
---|
4415 | |
---|
4416 | for percentile, percentilestring, minnumtoobserve in orderstatlist: |
---|
4417 | hunk ./src/allmydata/storage/server.py 133 |
---|
4418 | kwargs["facility"] = "tahoe.storage" |
---|
4419 | return log.msg(*args, **kwargs) |
---|
4420 | |
---|
4421 | - def _clean_incomplete(self): |
---|
4422 | - fileutil.rm_dir(self.incomingdir) |
---|
4423 | + def get_serverid(self): |
---|
4424 | + return self._serverid |
---|
4425 | |
---|
4426 | def get_stats(self): |
---|
4427 | # remember: RIStatsProvider requires that our return dict |
---|
4428 | hunk ./src/allmydata/storage/server.py 138 |
---|
4429 | - # contains numeric values. |
---|
4430 | + # contains numeric, or None values. |
---|
4431 | stats = { 'storage_server.allocated': self.allocated_size(), } |
---|
4432 | hunk ./src/allmydata/storage/server.py 140 |
---|
4433 | - stats['storage_server.reserved_space'] = self.reserved_space |
---|
4434 | for category,ld in self.get_latencies().items(): |
---|
4435 | for name,v in ld.items(): |
---|
4436 | stats['storage_server.latencies.%s.%s' % (category, name)] = v |
---|
4437 | hunk ./src/allmydata/storage/server.py 144 |
---|
4438 | |
---|
4439 | - try: |
---|
4440 | - disk = fileutil.get_disk_stats(self.sharedir, self.reserved_space) |
---|
4441 | - writeable = disk['avail'] > 0 |
---|
4442 | - |
---|
4443 | - # spacetime predictors should use disk_avail / (d(disk_used)/dt) |
---|
4444 | - stats['storage_server.disk_total'] = disk['total'] |
---|
4445 | - stats['storage_server.disk_used'] = disk['used'] |
---|
4446 | - stats['storage_server.disk_free_for_root'] = disk['free_for_root'] |
---|
4447 | - stats['storage_server.disk_free_for_nonroot'] = disk['free_for_nonroot'] |
---|
4448 | - stats['storage_server.disk_avail'] = disk['avail'] |
---|
4449 | - except AttributeError: |
---|
4450 | - writeable = True |
---|
4451 | - except EnvironmentError: |
---|
4452 | - log.msg("OS call to get disk statistics failed", level=log.UNUSUAL) |
---|
4453 | - writeable = False |
---|
4454 | - |
---|
4455 | - if self.readonly_storage: |
---|
4456 | - stats['storage_server.disk_avail'] = 0 |
---|
4457 | - writeable = False |
---|
4458 | + self.backend.fill_in_space_stats(stats) |
---|
4459 | |
---|
4460 | hunk ./src/allmydata/storage/server.py 146 |
---|
4461 | - stats['storage_server.accepting_immutable_shares'] = int(writeable) |
---|
4462 | s = self.bucket_counter.get_state() |
---|
4463 | bucket_count = s.get("last-complete-bucket-count") |
---|
4464 | if bucket_count: |
---|
4465 | hunk ./src/allmydata/storage/server.py 153 |
---|
4466 | return stats |
---|
4467 | |
---|
4468 | def get_available_space(self): |
---|
4469 | - """Returns available space for share storage in bytes, or None if no |
---|
4470 | - API to get this information is available.""" |
---|
4471 | - |
---|
4472 | - if self.readonly_storage: |
---|
4473 | - return 0 |
---|
4474 | - return fileutil.get_available_space(self.sharedir, self.reserved_space) |
---|
4475 | + return self.backend.get_available_space() |
---|
4476 | |
---|
4477 | def allocated_size(self): |
---|
4478 | space = 0 |
---|
4479 | hunk ./src/allmydata/storage/server.py 162 |
---|
4480 | return space |
---|
4481 | |
---|
4482 | def remote_get_version(self): |
---|
4483 | - remaining_space = self.get_available_space() |
---|
4484 | + remaining_space = self.backend.get_available_space() |
---|
4485 | if remaining_space is None: |
---|
4486 | # We're on a platform that has no API to get disk stats. |
---|
4487 | remaining_space = 2**64 |
---|
4488 | hunk ./src/allmydata/storage/server.py 178 |
---|
4489 | } |
---|
4490 | return version |
---|
4491 | |
---|
4492 | - def remote_allocate_buckets(self, storage_index, |
---|
4493 | + def remote_allocate_buckets(self, storageindex, |
---|
4494 | renew_secret, cancel_secret, |
---|
4495 | sharenums, allocated_size, |
---|
4496 | canary, owner_num=0): |
---|
4497 | hunk ./src/allmydata/storage/server.py 182 |
---|
4498 | + # cancel_secret is no longer used. |
---|
4499 | # owner_num is not for clients to set, but rather it should be |
---|
4500 | hunk ./src/allmydata/storage/server.py 184 |
---|
4501 | - # curried into the PersonalStorageServer instance that is dedicated |
---|
4502 | - # to a particular owner. |
---|
4503 | + # curried into a StorageServer instance dedicated to a particular |
---|
4504 | + # owner. |
---|
4505 | start = time.time() |
---|
4506 | self.count("allocate") |
---|
4507 | hunk ./src/allmydata/storage/server.py 188 |
---|
4508 | - alreadygot = set() |
---|
4509 | bucketwriters = {} # k: shnum, v: BucketWriter |
---|
4510 | hunk ./src/allmydata/storage/server.py 189 |
---|
4511 | - si_dir = storage_index_to_dir(storage_index) |
---|
4512 | - si_s = si_b2a(storage_index) |
---|
4513 | |
---|
4514 | hunk ./src/allmydata/storage/server.py 190 |
---|
4515 | + si_s = si_b2a(storageindex) |
---|
4516 | log.msg("storage: allocate_buckets %s" % si_s) |
---|
4517 | |
---|
4518 | hunk ./src/allmydata/storage/server.py 193 |
---|
4519 | - # in this implementation, the lease information (including secrets) |
---|
4520 | - # goes into the share files themselves. It could also be put into a |
---|
4521 | - # separate database. Note that the lease should not be added until |
---|
4522 | - # the BucketWriter has been closed. |
---|
4523 | + # Note that the lease should not be added until the BucketWriter |
---|
4524 | + # has been closed. |
---|
4525 | expire_time = time.time() + 31*24*60*60 |
---|
4526 | hunk ./src/allmydata/storage/server.py 196 |
---|
4527 | - lease_info = LeaseInfo(owner_num, |
---|
4528 | - renew_secret, cancel_secret, |
---|
4529 | - expire_time, self.my_nodeid) |
---|
4530 | + lease_info = LeaseInfo(owner_num, renew_secret, |
---|
4531 | + expire_time, self._serverid) |
---|
4532 | |
---|
4533 | max_space_per_bucket = allocated_size |
---|
4534 | |
---|
4535 | hunk ./src/allmydata/storage/server.py 201 |
---|
4536 | - remaining_space = self.get_available_space() |
---|
4537 | + remaining_space = self.backend.get_available_space() |
---|
4538 | limited = remaining_space is not None |
---|
4539 | if limited: |
---|
4540 | hunk ./src/allmydata/storage/server.py 204 |
---|
4541 | - # this is a bit conservative, since some of this allocated_size() |
---|
4542 | - # has already been written to disk, where it will show up in |
---|
4543 | + # This is a bit conservative, since some of this allocated_size() |
---|
4544 | + # has already been written to the backend, where it will show up in |
---|
4545 | # get_available_space. |
---|
4546 | remaining_space -= self.allocated_size() |
---|
4547 | hunk ./src/allmydata/storage/server.py 208 |
---|
4548 | - # self.readonly_storage causes remaining_space <= 0 |
---|
4549 | + # If the backend is read-only, remaining_space will be <= 0. |
---|
4550 | + |
---|
4551 | + shareset = self.backend.get_shareset(storageindex) |
---|
4552 | |
---|
4553 | hunk ./src/allmydata/storage/server.py 212 |
---|
4554 | - # fill alreadygot with all shares that we have, not just the ones |
---|
4555 | + # Fill alreadygot with all shares that we have, not just the ones |
---|
4556 | # they asked about: this will save them a lot of work. Add or update |
---|
4557 | # leases for all of them: if they want us to hold shares for this |
---|
4558 | hunk ./src/allmydata/storage/server.py 215 |
---|
4559 | - # file, they'll want us to hold leases for this file. |
---|
4560 | - for (shnum, fn) in self._get_bucket_shares(storage_index): |
---|
4561 | - alreadygot.add(shnum) |
---|
4562 | - sf = ShareFile(fn) |
---|
4563 | - sf.add_or_renew_lease(lease_info) |
---|
4564 | + # file, they'll want us to hold leases for all the shares of it. |
---|
4565 | + # |
---|
4566 | + # XXX should we be making the assumption here that lease info is |
---|
4567 | + # duplicated in all shares? |
---|
4568 | + alreadygot = set() |
---|
4569 | + for share in shareset.get_shares(): |
---|
4570 | + share.add_or_renew_lease(lease_info) |
---|
4571 | + alreadygot.add(share.shnum) |
---|
4572 | |
---|
4573 | hunk ./src/allmydata/storage/server.py 224 |
---|
4574 | - for shnum in sharenums: |
---|
4575 | - incominghome = os.path.join(self.incomingdir, si_dir, "%d" % shnum) |
---|
4576 | - finalhome = os.path.join(self.sharedir, si_dir, "%d" % shnum) |
---|
4577 | - if os.path.exists(finalhome): |
---|
4578 | - # great! we already have it. easy. |
---|
4579 | - pass |
---|
4580 | - elif os.path.exists(incominghome): |
---|
4581 | + for shnum in sharenums - alreadygot: |
---|
4582 | + if shareset.has_incoming(shnum): |
---|
4583 | # Note that we don't create BucketWriters for shnums that |
---|
4584 | # have a partial share (in incoming/), so if a second upload |
---|
4585 | # occurs while the first is still in progress, the second |
---|
4586 | hunk ./src/allmydata/storage/server.py 232 |
---|
4587 | # uploader will use different storage servers. |
---|
4588 | pass |
---|
4589 | elif (not limited) or (remaining_space >= max_space_per_bucket): |
---|
4590 | - # ok! we need to create the new share file. |
---|
4591 | - bw = BucketWriter(self, incominghome, finalhome, |
---|
4592 | - max_space_per_bucket, lease_info, canary) |
---|
4593 | - if self.no_storage: |
---|
4594 | - bw.throw_out_all_data = True |
---|
4595 | + bw = shareset.make_bucket_writer(self, shnum, max_space_per_bucket, |
---|
4596 | + lease_info, canary) |
---|
4597 | bucketwriters[shnum] = bw |
---|
4598 | self._active_writers[bw] = 1 |
---|
4599 | if limited: |
---|
4600 | hunk ./src/allmydata/storage/server.py 239 |
---|
4601 | remaining_space -= max_space_per_bucket |
---|
4602 | else: |
---|
4603 | - # bummer! not enough space to accept this bucket |
---|
4604 | + # Bummer not enough space to accept this share. |
---|
4605 | pass |
---|
4606 | |
---|
4607 | hunk ./src/allmydata/storage/server.py 242 |
---|
4608 | - if bucketwriters: |
---|
4609 | - fileutil.make_dirs(os.path.join(self.sharedir, si_dir)) |
---|
4610 | - |
---|
4611 | self.add_latency("allocate", time.time() - start) |
---|
4612 | return alreadygot, bucketwriters |
---|
4613 | |
---|
4614 | hunk ./src/allmydata/storage/server.py 245 |
---|
4615 | - def _iter_share_files(self, storage_index): |
---|
4616 | - for shnum, filename in self._get_bucket_shares(storage_index): |
---|
4617 | - f = open(filename, 'rb') |
---|
4618 | - header = f.read(32) |
---|
4619 | - f.close() |
---|
4620 | - if header[:32] == MutableShareFile.MAGIC: |
---|
4621 | - sf = MutableShareFile(filename, self) |
---|
4622 | - # note: if the share has been migrated, the renew_lease() |
---|
4623 | - # call will throw an exception, with information to help the |
---|
4624 | - # client update the lease. |
---|
4625 | - elif header[:4] == struct.pack(">L", 1): |
---|
4626 | - sf = ShareFile(filename) |
---|
4627 | - else: |
---|
4628 | - continue # non-sharefile |
---|
4629 | - yield sf |
---|
4630 | - |
---|
4631 | - def remote_add_lease(self, storage_index, renew_secret, cancel_secret, |
---|
4632 | + def remote_add_lease(self, storageindex, renew_secret, cancel_secret, |
---|
4633 | owner_num=1): |
---|
4634 | hunk ./src/allmydata/storage/server.py 247 |
---|
4635 | + # cancel_secret is no longer used. |
---|
4636 | start = time.time() |
---|
4637 | self.count("add-lease") |
---|
4638 | new_expire_time = time.time() + 31*24*60*60 |
---|
4639 | hunk ./src/allmydata/storage/server.py 251 |
---|
4640 | - lease_info = LeaseInfo(owner_num, |
---|
4641 | - renew_secret, cancel_secret, |
---|
4642 | - new_expire_time, self.my_nodeid) |
---|
4643 | - for sf in self._iter_share_files(storage_index): |
---|
4644 | - sf.add_or_renew_lease(lease_info) |
---|
4645 | - self.add_latency("add-lease", time.time() - start) |
---|
4646 | - return None |
---|
4647 | + lease_info = LeaseInfo(owner_num, renew_secret, |
---|
4648 | + new_expire_time, self._serverid) |
---|
4649 | |
---|
4650 | hunk ./src/allmydata/storage/server.py 254 |
---|
4651 | - def remote_renew_lease(self, storage_index, renew_secret): |
---|
4652 | + try: |
---|
4653 | + self.backend.add_or_renew_lease(lease_info) |
---|
4654 | + finally: |
---|
4655 | + self.add_latency("add-lease", time.time() - start) |
---|
4656 | + |
---|
4657 | + def remote_renew_lease(self, storageindex, renew_secret): |
---|
4658 | start = time.time() |
---|
4659 | self.count("renew") |
---|
4660 | hunk ./src/allmydata/storage/server.py 262 |
---|
4661 | - new_expire_time = time.time() + 31*24*60*60 |
---|
4662 | - found_buckets = False |
---|
4663 | - for sf in self._iter_share_files(storage_index): |
---|
4664 | - found_buckets = True |
---|
4665 | - sf.renew_lease(renew_secret, new_expire_time) |
---|
4666 | - self.add_latency("renew", time.time() - start) |
---|
4667 | - if not found_buckets: |
---|
4668 | - raise IndexError("no such lease to renew") |
---|
4669 | + |
---|
4670 | + try: |
---|
4671 | + shareset = self.backend.get_shareset(storageindex) |
---|
4672 | + new_expiration_time = start + 31*24*60*60 # one month from now |
---|
4673 | + shareset.renew_lease(renew_secret, new_expiration_time) |
---|
4674 | + finally: |
---|
4675 | + self.add_latency("renew", time.time() - start) |
---|
4676 | |
---|
4677 | def bucket_writer_closed(self, bw, consumed_size): |
---|
4678 | if self.stats_provider: |
---|
4679 | hunk ./src/allmydata/storage/server.py 275 |
---|
4680 | self.stats_provider.count('storage_server.bytes_added', consumed_size) |
---|
4681 | del self._active_writers[bw] |
---|
4682 | |
---|
4683 | - def _get_bucket_shares(self, storage_index): |
---|
4684 | - """Return a list of (shnum, pathname) tuples for files that hold |
---|
4685 | - shares for this storage_index. In each tuple, 'shnum' will always be |
---|
4686 | - the integer form of the last component of 'pathname'.""" |
---|
4687 | - storagedir = os.path.join(self.sharedir, storage_index_to_dir(storage_index)) |
---|
4688 | - try: |
---|
4689 | - for f in os.listdir(storagedir): |
---|
4690 | - if NUM_RE.match(f): |
---|
4691 | - filename = os.path.join(storagedir, f) |
---|
4692 | - yield (int(f), filename) |
---|
4693 | - except OSError: |
---|
4694 | - # Commonly caused by there being no buckets at all. |
---|
4695 | - pass |
---|
4696 | - |
---|
4697 | - def remote_get_buckets(self, storage_index): |
---|
4698 | + def remote_get_buckets(self, storageindex): |
---|
4699 | start = time.time() |
---|
4700 | self.count("get") |
---|
4701 | hunk ./src/allmydata/storage/server.py 278 |
---|
4702 | - si_s = si_b2a(storage_index) |
---|
4703 | + si_s = si_b2a(storageindex) |
---|
4704 | log.msg("storage: get_buckets %s" % si_s) |
---|
4705 | bucketreaders = {} # k: sharenum, v: BucketReader |
---|
4706 | hunk ./src/allmydata/storage/server.py 281 |
---|
4707 | - for shnum, filename in self._get_bucket_shares(storage_index): |
---|
4708 | - bucketreaders[shnum] = BucketReader(self, filename, |
---|
4709 | - storage_index, shnum) |
---|
4710 | - self.add_latency("get", time.time() - start) |
---|
4711 | - return bucketreaders |
---|
4712 | |
---|
4713 | hunk ./src/allmydata/storage/server.py 282 |
---|
4714 | - def get_leases(self, storage_index): |
---|
4715 | - """Provide an iterator that yields all of the leases attached to this |
---|
4716 | - bucket. Each lease is returned as a LeaseInfo instance. |
---|
4717 | + try: |
---|
4718 | + shareset = self.backend.get_shareset(storageindex) |
---|
4719 | + for share in shareset.get_shares(): |
---|
4720 | + bucketreaders[share.get_shnum()] = shareset.make_bucket_reader(self, share) |
---|
4721 | + return bucketreaders |
---|
4722 | + finally: |
---|
4723 | + self.add_latency("get", time.time() - start) |
---|
4724 | |
---|
4725 | hunk ./src/allmydata/storage/server.py 290 |
---|
4726 | - This method is not for client use. |
---|
4727 | + def get_leases(self, storageindex): |
---|
4728 | """ |
---|
4729 | hunk ./src/allmydata/storage/server.py 292 |
---|
4730 | + Provide an iterator that yields all of the leases attached to this |
---|
4731 | + bucket. Each lease is returned as a LeaseInfo instance. |
---|
4732 | |
---|
4733 | hunk ./src/allmydata/storage/server.py 295 |
---|
4734 | - # since all shares get the same lease data, we just grab the leases |
---|
4735 | - # from the first share |
---|
4736 | - try: |
---|
4737 | - shnum, filename = self._get_bucket_shares(storage_index).next() |
---|
4738 | - sf = ShareFile(filename) |
---|
4739 | - return sf.get_leases() |
---|
4740 | - except StopIteration: |
---|
4741 | - return iter([]) |
---|
4742 | + This method is not for client use. XXX do we need it at all? |
---|
4743 | + """ |
---|
4744 | + return self.backend.get_shareset(storageindex).get_leases() |
---|
4745 | |
---|
4746 | hunk ./src/allmydata/storage/server.py 299 |
---|
4747 | - def remote_slot_testv_and_readv_and_writev(self, storage_index, |
---|
4748 | + def remote_slot_testv_and_readv_and_writev(self, storageindex, |
---|
4749 | secrets, |
---|
4750 | test_and_write_vectors, |
---|
4751 | read_vector): |
---|
4752 | hunk ./src/allmydata/storage/server.py 305 |
---|
4753 | start = time.time() |
---|
4754 | self.count("writev") |
---|
4755 | - si_s = si_b2a(storage_index) |
---|
4756 | + si_s = si_b2a(storageindex) |
---|
4757 | log.msg("storage: slot_writev %s" % si_s) |
---|
4758 | hunk ./src/allmydata/storage/server.py 307 |
---|
4759 | - si_dir = storage_index_to_dir(storage_index) |
---|
4760 | - (write_enabler, renew_secret, cancel_secret) = secrets |
---|
4761 | - # shares exist if there is a file for them |
---|
4762 | - bucketdir = os.path.join(self.sharedir, si_dir) |
---|
4763 | - shares = {} |
---|
4764 | - if os.path.isdir(bucketdir): |
---|
4765 | - for sharenum_s in os.listdir(bucketdir): |
---|
4766 | - try: |
---|
4767 | - sharenum = int(sharenum_s) |
---|
4768 | - except ValueError: |
---|
4769 | - continue |
---|
4770 | - filename = os.path.join(bucketdir, sharenum_s) |
---|
4771 | - msf = MutableShareFile(filename, self) |
---|
4772 | - msf.check_write_enabler(write_enabler, si_s) |
---|
4773 | - shares[sharenum] = msf |
---|
4774 | - # write_enabler is good for all existing shares. |
---|
4775 | - |
---|
4776 | - # Now evaluate test vectors. |
---|
4777 | - testv_is_good = True |
---|
4778 | - for sharenum in test_and_write_vectors: |
---|
4779 | - (testv, datav, new_length) = test_and_write_vectors[sharenum] |
---|
4780 | - if sharenum in shares: |
---|
4781 | - if not shares[sharenum].check_testv(testv): |
---|
4782 | - self.log("testv failed: [%d]: %r" % (sharenum, testv)) |
---|
4783 | - testv_is_good = False |
---|
4784 | - break |
---|
4785 | - else: |
---|
4786 | - # compare the vectors against an empty share, in which all |
---|
4787 | - # reads return empty strings. |
---|
4788 | - if not EmptyShare().check_testv(testv): |
---|
4789 | - self.log("testv failed (empty): [%d] %r" % (sharenum, |
---|
4790 | - testv)) |
---|
4791 | - testv_is_good = False |
---|
4792 | - break |
---|
4793 | - |
---|
4794 | - # now gather the read vectors, before we do any writes |
---|
4795 | - read_data = {} |
---|
4796 | - for sharenum, share in shares.items(): |
---|
4797 | - read_data[sharenum] = share.readv(read_vector) |
---|
4798 | - |
---|
4799 | - ownerid = 1 # TODO |
---|
4800 | - expire_time = time.time() + 31*24*60*60 # one month |
---|
4801 | - lease_info = LeaseInfo(ownerid, |
---|
4802 | - renew_secret, cancel_secret, |
---|
4803 | - expire_time, self.my_nodeid) |
---|
4804 | - |
---|
4805 | - if testv_is_good: |
---|
4806 | - # now apply the write vectors |
---|
4807 | - for sharenum in test_and_write_vectors: |
---|
4808 | - (testv, datav, new_length) = test_and_write_vectors[sharenum] |
---|
4809 | - if new_length == 0: |
---|
4810 | - if sharenum in shares: |
---|
4811 | - shares[sharenum].unlink() |
---|
4812 | - else: |
---|
4813 | - if sharenum not in shares: |
---|
4814 | - # allocate a new share |
---|
4815 | - allocated_size = 2000 # arbitrary, really |
---|
4816 | - share = self._allocate_slot_share(bucketdir, secrets, |
---|
4817 | - sharenum, |
---|
4818 | - allocated_size, |
---|
4819 | - owner_num=0) |
---|
4820 | - shares[sharenum] = share |
---|
4821 | - shares[sharenum].writev(datav, new_length) |
---|
4822 | - # and update the lease |
---|
4823 | - shares[sharenum].add_or_renew_lease(lease_info) |
---|
4824 | - |
---|
4825 | - if new_length == 0: |
---|
4826 | - # delete empty bucket directories |
---|
4827 | - if not os.listdir(bucketdir): |
---|
4828 | - os.rmdir(bucketdir) |
---|
4829 | |
---|
4830 | hunk ./src/allmydata/storage/server.py 308 |
---|
4831 | + try: |
---|
4832 | + shareset = self.backend.get_shareset(storageindex) |
---|
4833 | + expiration_time = start + 31*24*60*60 # one month from now |
---|
4834 | + return shareset.testv_and_readv_and_writev(self, secrets, test_and_write_vectors, |
---|
4835 | + read_vector, expiration_time) |
---|
4836 | + finally: |
---|
4837 | + self.add_latency("writev", time.time() - start) |
---|
4838 | |
---|
4839 | hunk ./src/allmydata/storage/server.py 316 |
---|
4840 | - # all done |
---|
4841 | - self.add_latency("writev", time.time() - start) |
---|
4842 | - return (testv_is_good, read_data) |
---|
4843 | - |
---|
4844 | - def _allocate_slot_share(self, bucketdir, secrets, sharenum, |
---|
4845 | - allocated_size, owner_num=0): |
---|
4846 | - (write_enabler, renew_secret, cancel_secret) = secrets |
---|
4847 | - my_nodeid = self.my_nodeid |
---|
4848 | - fileutil.make_dirs(bucketdir) |
---|
4849 | - filename = os.path.join(bucketdir, "%d" % sharenum) |
---|
4850 | - share = create_mutable_sharefile(filename, my_nodeid, write_enabler, |
---|
4851 | - self) |
---|
4852 | - return share |
---|
4853 | - |
---|
4854 | - def remote_slot_readv(self, storage_index, shares, readv): |
---|
4855 | + def remote_slot_readv(self, storageindex, shares, readv): |
---|
4856 | start = time.time() |
---|
4857 | self.count("readv") |
---|
4858 | hunk ./src/allmydata/storage/server.py 319 |
---|
4859 | - si_s = si_b2a(storage_index) |
---|
4860 | - lp = log.msg("storage: slot_readv %s %s" % (si_s, shares), |
---|
4861 | - facility="tahoe.storage", level=log.OPERATIONAL) |
---|
4862 | - si_dir = storage_index_to_dir(storage_index) |
---|
4863 | - # shares exist if there is a file for them |
---|
4864 | - bucketdir = os.path.join(self.sharedir, si_dir) |
---|
4865 | - if not os.path.isdir(bucketdir): |
---|
4866 | + si_s = si_b2a(storageindex) |
---|
4867 | + log.msg("storage: slot_readv %s %s" % (si_s, shares), |
---|
4868 | + facility="tahoe.storage", level=log.OPERATIONAL) |
---|
4869 | + |
---|
4870 | + try: |
---|
4871 | + shareset = self.backend.get_shareset(storageindex) |
---|
4872 | + return shareset.readv(self, shares, readv) |
---|
4873 | + finally: |
---|
4874 | self.add_latency("readv", time.time() - start) |
---|
4875 | hunk ./src/allmydata/storage/server.py 328 |
---|
4876 | - return {} |
---|
4877 | - datavs = {} |
---|
4878 | - for sharenum_s in os.listdir(bucketdir): |
---|
4879 | - try: |
---|
4880 | - sharenum = int(sharenum_s) |
---|
4881 | - except ValueError: |
---|
4882 | - continue |
---|
4883 | - if sharenum in shares or not shares: |
---|
4884 | - filename = os.path.join(bucketdir, sharenum_s) |
---|
4885 | - msf = MutableShareFile(filename, self) |
---|
4886 | - datavs[sharenum] = msf.readv(readv) |
---|
4887 | - log.msg("returning shares %s" % (datavs.keys(),), |
---|
4888 | - facility="tahoe.storage", level=log.NOISY, parent=lp) |
---|
4889 | - self.add_latency("readv", time.time() - start) |
---|
4890 | - return datavs |
---|
4891 | |
---|
4892 | hunk ./src/allmydata/storage/server.py 329 |
---|
4893 | - def remote_advise_corrupt_share(self, share_type, storage_index, shnum, |
---|
4894 | - reason): |
---|
4895 | - fileutil.make_dirs(self.corruption_advisory_dir) |
---|
4896 | - now = time_format.iso_utc(sep="T") |
---|
4897 | - si_s = si_b2a(storage_index) |
---|
4898 | - # windows can't handle colons in the filename |
---|
4899 | - fn = os.path.join(self.corruption_advisory_dir, |
---|
4900 | - "%s--%s-%d" % (now, si_s, shnum)).replace(":","") |
---|
4901 | - f = open(fn, "w") |
---|
4902 | - f.write("report: Share Corruption\n") |
---|
4903 | - f.write("type: %s\n" % share_type) |
---|
4904 | - f.write("storage_index: %s\n" % si_s) |
---|
4905 | - f.write("share_number: %d\n" % shnum) |
---|
4906 | - f.write("\n") |
---|
4907 | - f.write(reason) |
---|
4908 | - f.write("\n") |
---|
4909 | - f.close() |
---|
4910 | - log.msg(format=("client claims corruption in (%(share_type)s) " + |
---|
4911 | - "%(si)s-%(shnum)d: %(reason)s"), |
---|
4912 | - share_type=share_type, si=si_s, shnum=shnum, reason=reason, |
---|
4913 | - level=log.SCARY, umid="SGx2fA") |
---|
4914 | - return None |
---|
4915 | + def remote_advise_corrupt_share(self, share_type, storage_index, shnum, reason): |
---|
4916 | + self.backend.advise_corrupt_share(share_type, storage_index, shnum, reason) |
---|
4917 | hunk ./src/allmydata/test/common.py 20 |
---|
4918 | from allmydata.mutable.common import CorruptShareError |
---|
4919 | from allmydata.mutable.layout import unpack_header |
---|
4920 | from allmydata.mutable.publish import MutableData |
---|
4921 | -from allmydata.storage.mutable import MutableShareFile |
---|
4922 | +from allmydata.storage.backends.disk.mutable import MutableDiskShare |
---|
4923 | from allmydata.util import hashutil, log, fileutil, pollmixin |
---|
4924 | from allmydata.util.assertutil import precondition |
---|
4925 | from allmydata.util.consumer import download_to_data |
---|
4926 | hunk ./src/allmydata/test/common.py 1297 |
---|
4927 | |
---|
4928 | def _corrupt_mutable_share_data(data, debug=False): |
---|
4929 | prefix = data[:32] |
---|
4930 | - assert prefix == MutableShareFile.MAGIC, "This function is designed to corrupt mutable shares of v1, and the magic number doesn't look right: %r vs %r" % (prefix, MutableShareFile.MAGIC) |
---|
4931 | - data_offset = MutableShareFile.DATA_OFFSET |
---|
4932 | + assert prefix == MutableDiskShare.MAGIC, "This function is designed to corrupt mutable shares of v1, and the magic number doesn't look right: %r vs %r" % (prefix, MutableDiskShare.MAGIC) |
---|
4933 | + data_offset = MutableDiskShare.DATA_OFFSET |
---|
4934 | sharetype = data[data_offset:data_offset+1] |
---|
4935 | assert sharetype == "\x00", "non-SDMF mutable shares not supported" |
---|
4936 | (version, ig_seqnum, ig_roothash, ig_IV, ig_k, ig_N, ig_segsize, |
---|
4937 | hunk ./src/allmydata/test/no_network.py 21 |
---|
4938 | from twisted.application import service |
---|
4939 | from twisted.internet import defer, reactor |
---|
4940 | from twisted.python.failure import Failure |
---|
4941 | +from twisted.python.filepath import FilePath |
---|
4942 | from foolscap.api import Referenceable, fireEventually, RemoteException |
---|
4943 | from base64 import b32encode |
---|
4944 | hunk ./src/allmydata/test/no_network.py 24 |
---|
4945 | + |
---|
4946 | from allmydata import uri as tahoe_uri |
---|
4947 | from allmydata.client import Client |
---|
4948 | hunk ./src/allmydata/test/no_network.py 27 |
---|
4949 | -from allmydata.storage.server import StorageServer, storage_index_to_dir |
---|
4950 | +from allmydata.storage.server import StorageServer |
---|
4951 | +from allmydata.storage.backends.disk.disk_backend import DiskBackend |
---|
4952 | from allmydata.util import fileutil, idlib, hashutil |
---|
4953 | from allmydata.util.hashutil import sha1 |
---|
4954 | from allmydata.test.common_web import HTTPClientGETFactory |
---|
4955 | hunk ./src/allmydata/test/no_network.py 155 |
---|
4956 | seed = server.get_permutation_seed() |
---|
4957 | return sha1(peer_selection_index + seed).digest() |
---|
4958 | return sorted(self.get_connected_servers(), key=_permuted) |
---|
4959 | + |
---|
4960 | def get_connected_servers(self): |
---|
4961 | return self.client._servers |
---|
4962 | hunk ./src/allmydata/test/no_network.py 158 |
---|
4963 | + |
---|
4964 | def get_nickname_for_serverid(self, serverid): |
---|
4965 | return None |
---|
4966 | |
---|
4967 | hunk ./src/allmydata/test/no_network.py 162 |
---|
4968 | + def get_known_servers(self): |
---|
4969 | + return self.get_connected_servers() |
---|
4970 | + |
---|
4971 | + def get_all_serverids(self): |
---|
4972 | + return self.client.get_all_serverids() |
---|
4973 | + |
---|
4974 | + |
---|
4975 | class NoNetworkClient(Client): |
---|
4976 | def create_tub(self): |
---|
4977 | pass |
---|
4978 | hunk ./src/allmydata/test/no_network.py 262 |
---|
4979 | |
---|
4980 | def make_server(self, i, readonly=False): |
---|
4981 | serverid = hashutil.tagged_hash("serverid", str(i))[:20] |
---|
4982 | - serverdir = os.path.join(self.basedir, "servers", |
---|
4983 | - idlib.shortnodeid_b2a(serverid), "storage") |
---|
4984 | - fileutil.make_dirs(serverdir) |
---|
4985 | - ss = StorageServer(serverdir, serverid, stats_provider=SimpleStats(), |
---|
4986 | - readonly_storage=readonly) |
---|
4987 | + storagedir = FilePath(self.basedir).child("servers").child(idlib.shortnodeid_b2a(serverid)).child("storage") |
---|
4988 | + |
---|
4989 | + # The backend will make the storage directory and any necessary parents. |
---|
4990 | + backend = DiskBackend(storagedir, readonly=readonly) |
---|
4991 | + ss = StorageServer(serverid, backend, storagedir, stats_provider=SimpleStats()) |
---|
4992 | ss._no_network_server_number = i |
---|
4993 | return ss |
---|
4994 | |
---|
4995 | hunk ./src/allmydata/test/no_network.py 276 |
---|
4996 | middleman = service.MultiService() |
---|
4997 | middleman.setServiceParent(self) |
---|
4998 | ss.setServiceParent(middleman) |
---|
4999 | - serverid = ss.my_nodeid |
---|
5000 | + serverid = ss.get_serverid() |
---|
5001 | self.servers_by_number[i] = ss |
---|
5002 | wrapper = wrap_storage_server(ss) |
---|
5003 | self.wrappers_by_id[serverid] = wrapper |
---|
5004 | hunk ./src/allmydata/test/no_network.py 295 |
---|
5005 | # it's enough to remove the server from c._servers (we don't actually |
---|
5006 | # have to detach and stopService it) |
---|
5007 | for i,ss in self.servers_by_number.items(): |
---|
5008 | - if ss.my_nodeid == serverid: |
---|
5009 | + if ss.get_serverid() == serverid: |
---|
5010 | del self.servers_by_number[i] |
---|
5011 | break |
---|
5012 | del self.wrappers_by_id[serverid] |
---|
5013 | hunk ./src/allmydata/test/no_network.py 345 |
---|
5014 | def get_clientdir(self, i=0): |
---|
5015 | return self.g.clients[i].basedir |
---|
5016 | |
---|
5017 | + def get_server(self, i): |
---|
5018 | + return self.g.servers_by_number[i] |
---|
5019 | + |
---|
5020 | def get_serverdir(self, i): |
---|
5021 | hunk ./src/allmydata/test/no_network.py 349 |
---|
5022 | - return self.g.servers_by_number[i].storedir |
---|
5023 | + return self.g.servers_by_number[i].backend.storedir |
---|
5024 | + |
---|
5025 | + def remove_server(self, i): |
---|
5026 | + self.g.remove_server(self.g.servers_by_number[i].get_serverid()) |
---|
5027 | |
---|
5028 | def iterate_servers(self): |
---|
5029 | for i in sorted(self.g.servers_by_number.keys()): |
---|
5030 | hunk ./src/allmydata/test/no_network.py 357 |
---|
5031 | ss = self.g.servers_by_number[i] |
---|
5032 | - yield (i, ss, ss.storedir) |
---|
5033 | + yield (i, ss, ss.backend.storedir) |
---|
5034 | |
---|
5035 | def find_uri_shares(self, uri): |
---|
5036 | si = tahoe_uri.from_string(uri).get_storage_index() |
---|
5037 | hunk ./src/allmydata/test/no_network.py 361 |
---|
5038 | - prefixdir = storage_index_to_dir(si) |
---|
5039 | shares = [] |
---|
5040 | for i,ss in self.g.servers_by_number.items(): |
---|
5041 | hunk ./src/allmydata/test/no_network.py 363 |
---|
5042 | - serverid = ss.my_nodeid |
---|
5043 | - basedir = os.path.join(ss.sharedir, prefixdir) |
---|
5044 | - if not os.path.exists(basedir): |
---|
5045 | - continue |
---|
5046 | - for f in os.listdir(basedir): |
---|
5047 | - try: |
---|
5048 | - shnum = int(f) |
---|
5049 | - shares.append((shnum, serverid, os.path.join(basedir, f))) |
---|
5050 | - except ValueError: |
---|
5051 | - pass |
---|
5052 | + for share in ss.backend.get_shareset(si).get_shares(): |
---|
5053 | + shares.append((share.get_shnum(), ss.get_serverid(), share._home)) |
---|
5054 | return sorted(shares) |
---|
5055 | |
---|
5056 | hunk ./src/allmydata/test/no_network.py 367 |
---|
5057 | + def count_leases(self, uri): |
---|
5058 | + """Return (filename, leasecount) pairs in arbitrary order.""" |
---|
5059 | + si = tahoe_uri.from_string(uri).get_storage_index() |
---|
5060 | + lease_counts = [] |
---|
5061 | + for i,ss in self.g.servers_by_number.items(): |
---|
5062 | + for share in ss.backend.get_shareset(si).get_shares(): |
---|
5063 | + num_leases = len(list(share.get_leases())) |
---|
5064 | + lease_counts.append( (share._home.path, num_leases) ) |
---|
5065 | + return lease_counts |
---|
5066 | + |
---|
5067 | def copy_shares(self, uri): |
---|
5068 | shares = {} |
---|
5069 | hunk ./src/allmydata/test/no_network.py 379 |
---|
5070 | - for (shnum, serverid, sharefile) in self.find_uri_shares(uri): |
---|
5071 | - shares[sharefile] = open(sharefile, "rb").read() |
---|
5072 | + for (shnum, serverid, sharefp) in self.find_uri_shares(uri): |
---|
5073 | + shares[sharefp.path] = sharefp.getContent() |
---|
5074 | return shares |
---|
5075 | |
---|
5076 | hunk ./src/allmydata/test/no_network.py 383 |
---|
5077 | + def copy_share(self, from_share, uri, to_server): |
---|
5078 | + si = uri.from_string(self.uri).get_storage_index() |
---|
5079 | + (i_shnum, i_serverid, i_sharefp) = from_share |
---|
5080 | + shares_dir = to_server.backend.get_shareset(si)._sharehomedir |
---|
5081 | + i_sharefp.copyTo(shares_dir.child(str(i_shnum))) |
---|
5082 | + |
---|
5083 | def restore_all_shares(self, shares): |
---|
5084 | hunk ./src/allmydata/test/no_network.py 390 |
---|
5085 | - for sharefile, data in shares.items(): |
---|
5086 | - open(sharefile, "wb").write(data) |
---|
5087 | + for share, data in shares.items(): |
---|
5088 | + share.home.setContent(data) |
---|
5089 | |
---|
5090 | hunk ./src/allmydata/test/no_network.py 393 |
---|
5091 | - def delete_share(self, (shnum, serverid, sharefile)): |
---|
5092 | - os.unlink(sharefile) |
---|
5093 | + def delete_share(self, (shnum, serverid, sharefp)): |
---|
5094 | + sharefp.remove() |
---|
5095 | |
---|
5096 | def delete_shares_numbered(self, uri, shnums): |
---|
5097 | hunk ./src/allmydata/test/no_network.py 397 |
---|
5098 | - for (i_shnum, i_serverid, i_sharefile) in self.find_uri_shares(uri): |
---|
5099 | + for (i_shnum, i_serverid, i_sharefp) in self.find_uri_shares(uri): |
---|
5100 | if i_shnum in shnums: |
---|
5101 | hunk ./src/allmydata/test/no_network.py 399 |
---|
5102 | - os.unlink(i_sharefile) |
---|
5103 | + i_sharefp.remove() |
---|
5104 | |
---|
5105 | hunk ./src/allmydata/test/no_network.py 401 |
---|
5106 | - def corrupt_share(self, (shnum, serverid, sharefile), corruptor_function): |
---|
5107 | - sharedata = open(sharefile, "rb").read() |
---|
5108 | - corruptdata = corruptor_function(sharedata) |
---|
5109 | - open(sharefile, "wb").write(corruptdata) |
---|
5110 | + def corrupt_share(self, (shnum, serverid, sharefp), corruptor_function, debug=False): |
---|
5111 | + sharedata = sharefp.getContent() |
---|
5112 | + corruptdata = corruptor_function(sharedata, debug=debug) |
---|
5113 | + sharefp.setContent(corruptdata) |
---|
5114 | |
---|
5115 | def corrupt_shares_numbered(self, uri, shnums, corruptor, debug=False): |
---|
5116 | hunk ./src/allmydata/test/no_network.py 407 |
---|
5117 | - for (i_shnum, i_serverid, i_sharefile) in self.find_uri_shares(uri): |
---|
5118 | + for (i_shnum, i_serverid, i_sharefp) in self.find_uri_shares(uri): |
---|
5119 | if i_shnum in shnums: |
---|
5120 | hunk ./src/allmydata/test/no_network.py 409 |
---|
5121 | - sharedata = open(i_sharefile, "rb").read() |
---|
5122 | - corruptdata = corruptor(sharedata, debug=debug) |
---|
5123 | - open(i_sharefile, "wb").write(corruptdata) |
---|
5124 | + self.corrupt_share((i_shnum, i_serverid, i_sharefp), corruptor, debug=debug) |
---|
5125 | |
---|
5126 | def corrupt_all_shares(self, uri, corruptor, debug=False): |
---|
5127 | hunk ./src/allmydata/test/no_network.py 412 |
---|
5128 | - for (i_shnum, i_serverid, i_sharefile) in self.find_uri_shares(uri): |
---|
5129 | - sharedata = open(i_sharefile, "rb").read() |
---|
5130 | - corruptdata = corruptor(sharedata, debug=debug) |
---|
5131 | - open(i_sharefile, "wb").write(corruptdata) |
---|
5132 | + for (i_shnum, i_serverid, i_sharefp) in self.find_uri_shares(uri): |
---|
5133 | + self.corrupt_share((i_shnum, i_serverid, i_sharefp), corruptor, debug=debug) |
---|
5134 | |
---|
5135 | def GET(self, urlpath, followRedirect=False, return_response=False, |
---|
5136 | method="GET", clientnum=0, **kwargs): |
---|
5137 | hunk ./src/allmydata/test/test_download.py 6 |
---|
5138 | # a previous run. This asserts that the current code is capable of decoding |
---|
5139 | # shares from a previous version. |
---|
5140 | |
---|
5141 | -import os |
---|
5142 | from twisted.trial import unittest |
---|
5143 | from twisted.internet import defer, reactor |
---|
5144 | from allmydata import uri |
---|
5145 | hunk ./src/allmydata/test/test_download.py 9 |
---|
5146 | -from allmydata.storage.server import storage_index_to_dir |
---|
5147 | from allmydata.util import base32, fileutil, spans, log, hashutil |
---|
5148 | from allmydata.util.consumer import download_to_data, MemoryConsumer |
---|
5149 | from allmydata.immutable import upload, layout |
---|
5150 | hunk ./src/allmydata/test/test_download.py 85 |
---|
5151 | u = upload.Data(plaintext, None) |
---|
5152 | d = self.c0.upload(u) |
---|
5153 | f = open("stored_shares.py", "w") |
---|
5154 | - def _created_immutable(ur): |
---|
5155 | - # write the generated shares and URI to a file, which can then be |
---|
5156 | - # incorporated into this one next time. |
---|
5157 | - f.write('immutable_uri = "%s"\n' % ur.uri) |
---|
5158 | - f.write('immutable_shares = {\n') |
---|
5159 | - si = uri.from_string(ur.uri).get_storage_index() |
---|
5160 | - si_dir = storage_index_to_dir(si) |
---|
5161 | + |
---|
5162 | + def _write_py(uri): |
---|
5163 | + si = uri.from_string(uri).get_storage_index() |
---|
5164 | for (i,ss,ssdir) in self.iterate_servers(): |
---|
5165 | hunk ./src/allmydata/test/test_download.py 89 |
---|
5166 | - sharedir = os.path.join(ssdir, "shares", si_dir) |
---|
5167 | shares = {} |
---|
5168 | hunk ./src/allmydata/test/test_download.py 90 |
---|
5169 | - for fn in os.listdir(sharedir): |
---|
5170 | - shnum = int(fn) |
---|
5171 | - sharedata = open(os.path.join(sharedir, fn), "rb").read() |
---|
5172 | - shares[shnum] = sharedata |
---|
5173 | - fileutil.rm_dir(sharedir) |
---|
5174 | + shareset = ss.backend.get_shareset(si) |
---|
5175 | + for share in shareset.get_shares(): |
---|
5176 | + sharedata = share._home.getContent() |
---|
5177 | + shares[share.get_shnum()] = sharedata |
---|
5178 | + |
---|
5179 | + fileutil.fp_remove(shareset._sharehomedir) |
---|
5180 | if shares: |
---|
5181 | f.write(' %d: { # client[%d]\n' % (i, i)) |
---|
5182 | for shnum in sorted(shares.keys()): |
---|
5183 | hunk ./src/allmydata/test/test_download.py 103 |
---|
5184 | (shnum, base32.b2a(shares[shnum]))) |
---|
5185 | f.write(' },\n') |
---|
5186 | f.write('}\n') |
---|
5187 | - f.write('\n') |
---|
5188 | |
---|
5189 | hunk ./src/allmydata/test/test_download.py 104 |
---|
5190 | + def _created_immutable(ur): |
---|
5191 | + # write the generated shares and URI to a file, which can then be |
---|
5192 | + # incorporated into this one next time. |
---|
5193 | + f.write('immutable_uri = "%s"\n' % ur.uri) |
---|
5194 | + f.write('immutable_shares = {\n') |
---|
5195 | + _write_py(ur.uri) |
---|
5196 | + f.write('\n') |
---|
5197 | d.addCallback(_created_immutable) |
---|
5198 | |
---|
5199 | d.addCallback(lambda ignored: |
---|
5200 | hunk ./src/allmydata/test/test_download.py 118 |
---|
5201 | def _created_mutable(n): |
---|
5202 | f.write('mutable_uri = "%s"\n' % n.get_uri()) |
---|
5203 | f.write('mutable_shares = {\n') |
---|
5204 | - si = uri.from_string(n.get_uri()).get_storage_index() |
---|
5205 | - si_dir = storage_index_to_dir(si) |
---|
5206 | - for (i,ss,ssdir) in self.iterate_servers(): |
---|
5207 | - sharedir = os.path.join(ssdir, "shares", si_dir) |
---|
5208 | - shares = {} |
---|
5209 | - for fn in os.listdir(sharedir): |
---|
5210 | - shnum = int(fn) |
---|
5211 | - sharedata = open(os.path.join(sharedir, fn), "rb").read() |
---|
5212 | - shares[shnum] = sharedata |
---|
5213 | - fileutil.rm_dir(sharedir) |
---|
5214 | - if shares: |
---|
5215 | - f.write(' %d: { # client[%d]\n' % (i, i)) |
---|
5216 | - for shnum in sorted(shares.keys()): |
---|
5217 | - f.write(' %d: base32.a2b("%s"),\n' % |
---|
5218 | - (shnum, base32.b2a(shares[shnum]))) |
---|
5219 | - f.write(' },\n') |
---|
5220 | - f.write('}\n') |
---|
5221 | - |
---|
5222 | - f.close() |
---|
5223 | + _write_py(n.get_uri()) |
---|
5224 | d.addCallback(_created_mutable) |
---|
5225 | |
---|
5226 | def _done(ignored): |
---|
5227 | hunk ./src/allmydata/test/test_download.py 123 |
---|
5228 | f.close() |
---|
5229 | - d.addCallback(_done) |
---|
5230 | + d.addBoth(_done) |
---|
5231 | |
---|
5232 | return d |
---|
5233 | |
---|
5234 | hunk ./src/allmydata/test/test_download.py 127 |
---|
5235 | + def _write_shares(self, uri, shares): |
---|
5236 | + si = uri.from_string(uri).get_storage_index() |
---|
5237 | + for i in shares: |
---|
5238 | + shares_for_server = shares[i] |
---|
5239 | + for shnum in shares_for_server: |
---|
5240 | + share_dir = self.get_server(i).backend.get_shareset(si)._sharehomedir |
---|
5241 | + fileutil.fp_make_dirs(share_dir) |
---|
5242 | + share_dir.child(str(shnum)).setContent(shares[shnum]) |
---|
5243 | + |
---|
5244 | def load_shares(self, ignored=None): |
---|
5245 | # this uses the data generated by create_shares() to populate the |
---|
5246 | # storage servers with pre-generated shares |
---|
5247 | hunk ./src/allmydata/test/test_download.py 139 |
---|
5248 | - si = uri.from_string(immutable_uri).get_storage_index() |
---|
5249 | - si_dir = storage_index_to_dir(si) |
---|
5250 | - for i in immutable_shares: |
---|
5251 | - shares = immutable_shares[i] |
---|
5252 | - for shnum in shares: |
---|
5253 | - dn = os.path.join(self.get_serverdir(i), "shares", si_dir) |
---|
5254 | - fileutil.make_dirs(dn) |
---|
5255 | - fn = os.path.join(dn, str(shnum)) |
---|
5256 | - f = open(fn, "wb") |
---|
5257 | - f.write(shares[shnum]) |
---|
5258 | - f.close() |
---|
5259 | - |
---|
5260 | - si = uri.from_string(mutable_uri).get_storage_index() |
---|
5261 | - si_dir = storage_index_to_dir(si) |
---|
5262 | - for i in mutable_shares: |
---|
5263 | - shares = mutable_shares[i] |
---|
5264 | - for shnum in shares: |
---|
5265 | - dn = os.path.join(self.get_serverdir(i), "shares", si_dir) |
---|
5266 | - fileutil.make_dirs(dn) |
---|
5267 | - fn = os.path.join(dn, str(shnum)) |
---|
5268 | - f = open(fn, "wb") |
---|
5269 | - f.write(shares[shnum]) |
---|
5270 | - f.close() |
---|
5271 | + self._write_shares(immutable_uri, immutable_shares) |
---|
5272 | + self._write_shares(mutable_uri, mutable_shares) |
---|
5273 | |
---|
5274 | def download_immutable(self, ignored=None): |
---|
5275 | n = self.c0.create_node_from_uri(immutable_uri) |
---|
5276 | hunk ./src/allmydata/test/test_download.py 183 |
---|
5277 | |
---|
5278 | self.load_shares() |
---|
5279 | si = uri.from_string(immutable_uri).get_storage_index() |
---|
5280 | - si_dir = storage_index_to_dir(si) |
---|
5281 | |
---|
5282 | n = self.c0.create_node_from_uri(immutable_uri) |
---|
5283 | d = download_to_data(n) |
---|
5284 | hunk ./src/allmydata/test/test_download.py 198 |
---|
5285 | for clientnum in immutable_shares: |
---|
5286 | for shnum in immutable_shares[clientnum]: |
---|
5287 | if s._shnum == shnum: |
---|
5288 | - fn = os.path.join(self.get_serverdir(clientnum), |
---|
5289 | - "shares", si_dir, str(shnum)) |
---|
5290 | - os.unlink(fn) |
---|
5291 | + share_dir = self.get_server(clientnum).backend.get_shareset(si)._sharehomedir |
---|
5292 | + share_dir.child(str(shnum)).remove() |
---|
5293 | d.addCallback(_clobber_some_shares) |
---|
5294 | d.addCallback(lambda ign: download_to_data(n)) |
---|
5295 | d.addCallback(_got_data) |
---|
5296 | hunk ./src/allmydata/test/test_download.py 212 |
---|
5297 | for shnum in immutable_shares[clientnum]: |
---|
5298 | if shnum == save_me: |
---|
5299 | continue |
---|
5300 | - fn = os.path.join(self.get_serverdir(clientnum), |
---|
5301 | - "shares", si_dir, str(shnum)) |
---|
5302 | - if os.path.exists(fn): |
---|
5303 | - os.unlink(fn) |
---|
5304 | + share_dir = self.get_server(clientnum).backend.get_shareset(si)._sharehomedir |
---|
5305 | + fileutil.fp_remove(share_dir.child(str(shnum))) |
---|
5306 | # now the download should fail with NotEnoughSharesError |
---|
5307 | return self.shouldFail(NotEnoughSharesError, "1shares", None, |
---|
5308 | download_to_data, n) |
---|
5309 | hunk ./src/allmydata/test/test_download.py 223 |
---|
5310 | # delete the last remaining share |
---|
5311 | for clientnum in immutable_shares: |
---|
5312 | for shnum in immutable_shares[clientnum]: |
---|
5313 | - fn = os.path.join(self.get_serverdir(clientnum), |
---|
5314 | - "shares", si_dir, str(shnum)) |
---|
5315 | - if os.path.exists(fn): |
---|
5316 | - os.unlink(fn) |
---|
5317 | + share_dir = self.get_server(clientnum).backend.get_shareset(si)._sharehomedir |
---|
5318 | + share_dir.child(str(shnum)).remove() |
---|
5319 | # now a new download should fail with NoSharesError. We want a |
---|
5320 | # new ImmutableFileNode so it will forget about the old shares. |
---|
5321 | # If we merely called create_node_from_uri() without first |
---|
5322 | hunk ./src/allmydata/test/test_download.py 801 |
---|
5323 | # will report two shares, and the ShareFinder will handle the |
---|
5324 | # duplicate by attaching both to the same CommonShare instance. |
---|
5325 | si = uri.from_string(immutable_uri).get_storage_index() |
---|
5326 | - si_dir = storage_index_to_dir(si) |
---|
5327 | - sh0_file = [sharefile |
---|
5328 | - for (shnum, serverid, sharefile) |
---|
5329 | - in self.find_uri_shares(immutable_uri) |
---|
5330 | - if shnum == 0][0] |
---|
5331 | - sh0_data = open(sh0_file, "rb").read() |
---|
5332 | + sh0_fp = [sharefp for (shnum, serverid, sharefp) |
---|
5333 | + in self.find_uri_shares(immutable_uri) |
---|
5334 | + if shnum == 0][0] |
---|
5335 | + sh0_data = sh0_fp.getContent() |
---|
5336 | for clientnum in immutable_shares: |
---|
5337 | if 0 in immutable_shares[clientnum]: |
---|
5338 | continue |
---|
5339 | hunk ./src/allmydata/test/test_download.py 808 |
---|
5340 | - cdir = self.get_serverdir(clientnum) |
---|
5341 | - target = os.path.join(cdir, "shares", si_dir, "0") |
---|
5342 | - outf = open(target, "wb") |
---|
5343 | - outf.write(sh0_data) |
---|
5344 | - outf.close() |
---|
5345 | + cdir = self.get_server(clientnum).backend.get_shareset(si)._sharehomedir |
---|
5346 | + fileutil.fp_make_dirs(cdir) |
---|
5347 | + cdir.child(str(shnum)).setContent(sh0_data) |
---|
5348 | |
---|
5349 | d = self.download_immutable() |
---|
5350 | return d |
---|
5351 | hunk ./src/allmydata/test/test_encode.py 134 |
---|
5352 | d.addCallback(_try) |
---|
5353 | return d |
---|
5354 | |
---|
5355 | - def get_share_hashes(self, at_least_these=()): |
---|
5356 | + def get_share_hashes(self): |
---|
5357 | d = self._start() |
---|
5358 | def _try(unused=None): |
---|
5359 | if self.mode == "bad sharehash": |
---|
5360 | hunk ./src/allmydata/test/test_hung_server.py 3 |
---|
5361 | # -*- coding: utf-8 -*- |
---|
5362 | |
---|
5363 | -import os, shutil |
---|
5364 | from twisted.trial import unittest |
---|
5365 | from twisted.internet import defer |
---|
5366 | hunk ./src/allmydata/test/test_hung_server.py 5 |
---|
5367 | -from allmydata import uri |
---|
5368 | + |
---|
5369 | from allmydata.util.consumer import download_to_data |
---|
5370 | from allmydata.immutable import upload |
---|
5371 | from allmydata.mutable.common import UnrecoverableFileError |
---|
5372 | hunk ./src/allmydata/test/test_hung_server.py 10 |
---|
5373 | from allmydata.mutable.publish import MutableData |
---|
5374 | -from allmydata.storage.common import storage_index_to_dir |
---|
5375 | from allmydata.test.no_network import GridTestMixin |
---|
5376 | from allmydata.test.common import ShouldFailMixin |
---|
5377 | from allmydata.util.pollmixin import PollMixin |
---|
5378 | hunk ./src/allmydata/test/test_hung_server.py 18 |
---|
5379 | immutable_plaintext = "data" * 10000 |
---|
5380 | mutable_plaintext = "muta" * 10000 |
---|
5381 | |
---|
5382 | + |
---|
5383 | class HungServerDownloadTest(GridTestMixin, ShouldFailMixin, PollMixin, |
---|
5384 | unittest.TestCase): |
---|
5385 | # Many of these tests take around 60 seconds on François's ARM buildslave: |
---|
5386 | hunk ./src/allmydata/test/test_hung_server.py 31 |
---|
5387 | timeout = 240 |
---|
5388 | |
---|
5389 | def _break(self, servers): |
---|
5390 | - for (id, ss) in servers: |
---|
5391 | - self.g.break_server(id) |
---|
5392 | + for ss in servers: |
---|
5393 | + self.g.break_server(ss.get_serverid()) |
---|
5394 | |
---|
5395 | def _hang(self, servers, **kwargs): |
---|
5396 | hunk ./src/allmydata/test/test_hung_server.py 35 |
---|
5397 | - for (id, ss) in servers: |
---|
5398 | - self.g.hang_server(id, **kwargs) |
---|
5399 | + for ss in servers: |
---|
5400 | + self.g.hang_server(ss.get_serverid(), **kwargs) |
---|
5401 | |
---|
5402 | def _unhang(self, servers, **kwargs): |
---|
5403 | hunk ./src/allmydata/test/test_hung_server.py 39 |
---|
5404 | - for (id, ss) in servers: |
---|
5405 | - self.g.unhang_server(id, **kwargs) |
---|
5406 | + for ss in servers: |
---|
5407 | + self.g.unhang_server(ss.get_serverid(), **kwargs) |
---|
5408 | |
---|
5409 | def _hang_shares(self, shnums, **kwargs): |
---|
5410 | # hang all servers who are holding the given shares |
---|
5411 | hunk ./src/allmydata/test/test_hung_server.py 52 |
---|
5412 | hung_serverids.add(i_serverid) |
---|
5413 | |
---|
5414 | def _delete_all_shares_from(self, servers): |
---|
5415 | - serverids = [id for (id, ss) in servers] |
---|
5416 | - for (i_shnum, i_serverid, i_sharefile) in self.shares: |
---|
5417 | + serverids = [ss.get_serverid() for ss in servers] |
---|
5418 | + for (i_shnum, i_serverid, i_sharefp) in self.shares: |
---|
5419 | if i_serverid in serverids: |
---|
5420 | hunk ./src/allmydata/test/test_hung_server.py 55 |
---|
5421 | - os.unlink(i_sharefile) |
---|
5422 | + i_sharefp.remove() |
---|
5423 | |
---|
5424 | def _corrupt_all_shares_in(self, servers, corruptor_func): |
---|
5425 | hunk ./src/allmydata/test/test_hung_server.py 58 |
---|
5426 | - serverids = [id for (id, ss) in servers] |
---|
5427 | - for (i_shnum, i_serverid, i_sharefile) in self.shares: |
---|
5428 | + serverids = [ss.get_serverid() for ss in servers] |
---|
5429 | + for (i_shnum, i_serverid, i_sharefp) in self.shares: |
---|
5430 | if i_serverid in serverids: |
---|
5431 | hunk ./src/allmydata/test/test_hung_server.py 61 |
---|
5432 | - self._corrupt_share((i_shnum, i_sharefile), corruptor_func) |
---|
5433 | + self.corrupt_share((i_shnum, i_serverid, i_sharefp), corruptor_func) |
---|
5434 | |
---|
5435 | def _copy_all_shares_from(self, from_servers, to_server): |
---|
5436 | hunk ./src/allmydata/test/test_hung_server.py 64 |
---|
5437 | - serverids = [id for (id, ss) in from_servers] |
---|
5438 | - for (i_shnum, i_serverid, i_sharefile) in self.shares: |
---|
5439 | + serverids = [ss.get_serverid() for ss in from_servers] |
---|
5440 | + for (i_shnum, i_serverid, i_sharefp) in self.shares: |
---|
5441 | if i_serverid in serverids: |
---|
5442 | hunk ./src/allmydata/test/test_hung_server.py 67 |
---|
5443 | - self._copy_share((i_shnum, i_sharefile), to_server) |
---|
5444 | + self.copy_share((i_shnum, i_serverid, i_sharefp), self.uri, to_server) |
---|
5445 | |
---|
5446 | hunk ./src/allmydata/test/test_hung_server.py 69 |
---|
5447 | - def _copy_share(self, share, to_server): |
---|
5448 | - (sharenum, sharefile) = share |
---|
5449 | - (id, ss) = to_server |
---|
5450 | - shares_dir = os.path.join(ss.original.storedir, "shares") |
---|
5451 | - si = uri.from_string(self.uri).get_storage_index() |
---|
5452 | - si_dir = os.path.join(shares_dir, storage_index_to_dir(si)) |
---|
5453 | - if not os.path.exists(si_dir): |
---|
5454 | - os.makedirs(si_dir) |
---|
5455 | - new_sharefile = os.path.join(si_dir, str(sharenum)) |
---|
5456 | - shutil.copy(sharefile, new_sharefile) |
---|
5457 | self.shares = self.find_uri_shares(self.uri) |
---|
5458 | hunk ./src/allmydata/test/test_hung_server.py 70 |
---|
5459 | - # Make sure that the storage server has the share. |
---|
5460 | - self.failUnless((sharenum, ss.original.my_nodeid, new_sharefile) |
---|
5461 | - in self.shares) |
---|
5462 | - |
---|
5463 | - def _corrupt_share(self, share, corruptor_func): |
---|
5464 | - (sharenum, sharefile) = share |
---|
5465 | - data = open(sharefile, "rb").read() |
---|
5466 | - newdata = corruptor_func(data) |
---|
5467 | - os.unlink(sharefile) |
---|
5468 | - wf = open(sharefile, "wb") |
---|
5469 | - wf.write(newdata) |
---|
5470 | - wf.close() |
---|
5471 | |
---|
5472 | def _set_up(self, mutable, testdir, num_clients=1, num_servers=10): |
---|
5473 | self.mutable = mutable |
---|
5474 | hunk ./src/allmydata/test/test_hung_server.py 82 |
---|
5475 | |
---|
5476 | self.c0 = self.g.clients[0] |
---|
5477 | nm = self.c0.nodemaker |
---|
5478 | - self.servers = sorted([(s.get_serverid(), s.get_rref()) |
---|
5479 | - for s in nm.storage_broker.get_connected_servers()]) |
---|
5480 | + unsorted = [(s.get_serverid(), s.get_rref()) for s in nm.storage_broker.get_connected_servers()] |
---|
5481 | + self.servers = [ss for (id, ss) in sorted(unsorted)] |
---|
5482 | self.servers = self.servers[5:] + self.servers[:5] |
---|
5483 | |
---|
5484 | if mutable: |
---|
5485 | hunk ./src/allmydata/test/test_hung_server.py 244 |
---|
5486 | # stuck-but-not-overdue, and 4 live requests. All 4 live requests |
---|
5487 | # will retire before the download is complete and the ShareFinder |
---|
5488 | # is shut off. That will leave 4 OVERDUE and 1 |
---|
5489 | - # stuck-but-not-overdue, for a total of 5 requests in in |
---|
5490 | + # stuck-but-not-overdue, for a total of 5 requests in |
---|
5491 | # _sf.pending_requests |
---|
5492 | for t in self._sf.overdue_timers.values()[:4]: |
---|
5493 | t.reset(-1.0) |
---|
5494 | hunk ./src/allmydata/test/test_mutable.py 21 |
---|
5495 | from foolscap.api import eventually, fireEventually |
---|
5496 | from foolscap.logging import log |
---|
5497 | from allmydata.storage_client import StorageFarmBroker |
---|
5498 | -from allmydata.storage.common import storage_index_to_dir |
---|
5499 | from allmydata.scripts import debug |
---|
5500 | |
---|
5501 | from allmydata.mutable.filenode import MutableFileNode, BackoffAgent |
---|
5502 | hunk ./src/allmydata/test/test_mutable.py 3670 |
---|
5503 | # Now execute each assignment by writing the storage. |
---|
5504 | for (share, servernum) in assignments: |
---|
5505 | sharedata = base64.b64decode(self.sdmf_old_shares[share]) |
---|
5506 | - storedir = self.get_serverdir(servernum) |
---|
5507 | - storage_path = os.path.join(storedir, "shares", |
---|
5508 | - storage_index_to_dir(si)) |
---|
5509 | - fileutil.make_dirs(storage_path) |
---|
5510 | - fileutil.write(os.path.join(storage_path, "%d" % share), |
---|
5511 | - sharedata) |
---|
5512 | + storage_dir = self.get_server(servernum).backend.get_shareset(si).sharehomedir |
---|
5513 | + fileutil.fp_make_dirs(storage_dir) |
---|
5514 | + storage_dir.child("%d" % share).setContent(sharedata) |
---|
5515 | # ...and verify that the shares are there. |
---|
5516 | shares = self.find_uri_shares(self.sdmf_old_cap) |
---|
5517 | assert len(shares) == 10 |
---|
5518 | hunk ./src/allmydata/test/test_provisioning.py 13 |
---|
5519 | from nevow import inevow |
---|
5520 | from zope.interface import implements |
---|
5521 | |
---|
5522 | -class MyRequest: |
---|
5523 | +class MockRequest: |
---|
5524 | implements(inevow.IRequest) |
---|
5525 | pass |
---|
5526 | |
---|
5527 | hunk ./src/allmydata/test/test_provisioning.py 26 |
---|
5528 | def test_load(self): |
---|
5529 | pt = provisioning.ProvisioningTool() |
---|
5530 | self.fields = {} |
---|
5531 | - #r = MyRequest() |
---|
5532 | + #r = MockRequest() |
---|
5533 | #r.fields = self.fields |
---|
5534 | #ctx = RequestContext() |
---|
5535 | #unfilled = pt.renderSynchronously(ctx) |
---|
5536 | hunk ./src/allmydata/test/test_repairer.py 537 |
---|
5537 | # happiness setting. |
---|
5538 | def _delete_some_servers(ignored): |
---|
5539 | for i in xrange(7): |
---|
5540 | - self.g.remove_server(self.g.servers_by_number[i].my_nodeid) |
---|
5541 | + self.remove_server(i) |
---|
5542 | |
---|
5543 | assert len(self.g.servers_by_number) == 3 |
---|
5544 | |
---|
5545 | hunk ./src/allmydata/test/test_storage.py 14 |
---|
5546 | from allmydata import interfaces |
---|
5547 | from allmydata.util import fileutil, hashutil, base32, pollmixin, time_format |
---|
5548 | from allmydata.storage.server import StorageServer |
---|
5549 | -from allmydata.storage.mutable import MutableShareFile |
---|
5550 | -from allmydata.storage.immutable import BucketWriter, BucketReader |
---|
5551 | -from allmydata.storage.common import DataTooLargeError, storage_index_to_dir, \ |
---|
5552 | +from allmydata.storage.backends.disk.mutable import MutableDiskShare |
---|
5553 | +from allmydata.storage.bucket import BucketWriter, BucketReader |
---|
5554 | +from allmydata.storage.common import DataTooLargeError, \ |
---|
5555 | UnknownMutableContainerVersionError, UnknownImmutableContainerVersionError |
---|
5556 | from allmydata.storage.lease import LeaseInfo |
---|
5557 | from allmydata.storage.crawler import BucketCountingCrawler |
---|
5558 | hunk ./src/allmydata/test/test_storage.py 474 |
---|
5559 | w[0].remote_write(0, "\xff"*10) |
---|
5560 | w[0].remote_close() |
---|
5561 | |
---|
5562 | - fn = os.path.join(ss.sharedir, storage_index_to_dir("si1"), "0") |
---|
5563 | - f = open(fn, "rb+") |
---|
5564 | + fp = ss.backend.get_shareset("si1").sharehomedir.child("0") |
---|
5565 | + f = fp.open("rb+") |
---|
5566 | f.seek(0) |
---|
5567 | f.write(struct.pack(">L", 0)) # this is invalid: minimum used is v1 |
---|
5568 | f.close() |
---|
5569 | hunk ./src/allmydata/test/test_storage.py 814 |
---|
5570 | def test_bad_magic(self): |
---|
5571 | ss = self.create("test_bad_magic") |
---|
5572 | self.allocate(ss, "si1", "we1", self._lease_secret.next(), set([0]), 10) |
---|
5573 | - fn = os.path.join(ss.sharedir, storage_index_to_dir("si1"), "0") |
---|
5574 | - f = open(fn, "rb+") |
---|
5575 | + fp = ss.backend.get_shareset("si1").sharehomedir.child("0") |
---|
5576 | + f = fp.open("rb+") |
---|
5577 | f.seek(0) |
---|
5578 | f.write("BAD MAGIC") |
---|
5579 | f.close() |
---|
5580 | hunk ./src/allmydata/test/test_storage.py 842 |
---|
5581 | |
---|
5582 | # Trying to make the container too large (by sending a write vector |
---|
5583 | # whose offset is too high) will raise an exception. |
---|
5584 | - TOOBIG = MutableShareFile.MAX_SIZE + 10 |
---|
5585 | + TOOBIG = MutableDiskShare.MAX_SIZE + 10 |
---|
5586 | self.failUnlessRaises(DataTooLargeError, |
---|
5587 | rstaraw, "si1", secrets, |
---|
5588 | {0: ([], [(TOOBIG,data)], None)}, |
---|
5589 | hunk ./src/allmydata/test/test_storage.py 1229 |
---|
5590 | |
---|
5591 | # create a random non-numeric file in the bucket directory, to |
---|
5592 | # exercise the code that's supposed to ignore those. |
---|
5593 | - bucket_dir = os.path.join(self.workdir("test_leases"), |
---|
5594 | - "shares", storage_index_to_dir("si1")) |
---|
5595 | - f = open(os.path.join(bucket_dir, "ignore_me.txt"), "w") |
---|
5596 | - f.write("you ought to be ignoring me\n") |
---|
5597 | - f.close() |
---|
5598 | + bucket_dir = ss.backend.get_shareset("si1").sharehomedir |
---|
5599 | + bucket_dir.child("ignore_me.txt").setContent("you ought to be ignoring me\n") |
---|
5600 | |
---|
5601 | hunk ./src/allmydata/test/test_storage.py 1232 |
---|
5602 | - s0 = MutableShareFile(os.path.join(bucket_dir, "0")) |
---|
5603 | + s0 = MutableDiskShare(os.path.join(bucket_dir, "0")) |
---|
5604 | self.failUnlessEqual(len(list(s0.get_leases())), 1) |
---|
5605 | |
---|
5606 | # add-lease on a missing storage index is silently ignored |
---|
5607 | hunk ./src/allmydata/test/test_storage.py 3118 |
---|
5608 | [immutable_si_0, immutable_si_1, mutable_si_2, mutable_si_3] = self.sis |
---|
5609 | |
---|
5610 | # add a non-sharefile to exercise another code path |
---|
5611 | - fn = os.path.join(ss.sharedir, |
---|
5612 | - storage_index_to_dir(immutable_si_0), |
---|
5613 | - "not-a-share") |
---|
5614 | - f = open(fn, "wb") |
---|
5615 | - f.write("I am not a share.\n") |
---|
5616 | - f.close() |
---|
5617 | + fp = ss.backend.get_shareset(immutable_si_0).sharehomedir.child("not-a-share") |
---|
5618 | + fp.setContent("I am not a share.\n") |
---|
5619 | |
---|
5620 | # this is before the crawl has started, so we're not in a cycle yet |
---|
5621 | initial_state = lc.get_state() |
---|
5622 | hunk ./src/allmydata/test/test_storage.py 3282 |
---|
5623 | def test_expire_age(self): |
---|
5624 | basedir = "storage/LeaseCrawler/expire_age" |
---|
5625 | fileutil.make_dirs(basedir) |
---|
5626 | - # setting expiration_time to 2000 means that any lease which is more |
---|
5627 | - # than 2000s old will be expired. |
---|
5628 | - ss = InstrumentedStorageServer(basedir, "\x00" * 20, |
---|
5629 | - expiration_enabled=True, |
---|
5630 | - expiration_mode="age", |
---|
5631 | - expiration_override_lease_duration=2000) |
---|
5632 | + # setting 'override_lease_duration' to 2000 means that any lease that |
---|
5633 | + # is more than 2000 seconds old will be expired. |
---|
5634 | + expiration_policy = { |
---|
5635 | + 'enabled': True, |
---|
5636 | + 'mode': 'age', |
---|
5637 | + 'override_lease_duration': 2000, |
---|
5638 | + 'sharetypes': ('mutable', 'immutable'), |
---|
5639 | + } |
---|
5640 | + ss = InstrumentedStorageServer(basedir, "\x00" * 20, expiration_policy) |
---|
5641 | # make it start sooner than usual. |
---|
5642 | lc = ss.lease_checker |
---|
5643 | lc.slow_start = 0 |
---|
5644 | hunk ./src/allmydata/test/test_storage.py 3423 |
---|
5645 | def test_expire_cutoff_date(self): |
---|
5646 | basedir = "storage/LeaseCrawler/expire_cutoff_date" |
---|
5647 | fileutil.make_dirs(basedir) |
---|
5648 | - # setting cutoff-date to 2000 seconds ago means that any lease which |
---|
5649 | - # is more than 2000s old will be expired. |
---|
5650 | + # setting 'cutoff_date' to 2000 seconds ago means that any lease that |
---|
5651 | + # is more than 2000 seconds old will be expired. |
---|
5652 | now = time.time() |
---|
5653 | then = int(now - 2000) |
---|
5654 | hunk ./src/allmydata/test/test_storage.py 3427 |
---|
5655 | - ss = InstrumentedStorageServer(basedir, "\x00" * 20, |
---|
5656 | - expiration_enabled=True, |
---|
5657 | - expiration_mode="cutoff-date", |
---|
5658 | - expiration_cutoff_date=then) |
---|
5659 | + expiration_policy = { |
---|
5660 | + 'enabled': True, |
---|
5661 | + 'mode': 'cutoff-date', |
---|
5662 | + 'cutoff_date': then, |
---|
5663 | + 'sharetypes': ('mutable', 'immutable'), |
---|
5664 | + } |
---|
5665 | + ss = InstrumentedStorageServer(basedir, "\x00" * 20, expiration_policy) |
---|
5666 | # make it start sooner than usual. |
---|
5667 | lc = ss.lease_checker |
---|
5668 | lc.slow_start = 0 |
---|
5669 | hunk ./src/allmydata/test/test_storage.py 3575 |
---|
5670 | def test_only_immutable(self): |
---|
5671 | basedir = "storage/LeaseCrawler/only_immutable" |
---|
5672 | fileutil.make_dirs(basedir) |
---|
5673 | + # setting 'cutoff_date' to 2000 seconds ago means that any lease that |
---|
5674 | + # is more than 2000 seconds old will be expired. |
---|
5675 | now = time.time() |
---|
5676 | then = int(now - 2000) |
---|
5677 | hunk ./src/allmydata/test/test_storage.py 3579 |
---|
5678 | - ss = StorageServer(basedir, "\x00" * 20, |
---|
5679 | - expiration_enabled=True, |
---|
5680 | - expiration_mode="cutoff-date", |
---|
5681 | - expiration_cutoff_date=then, |
---|
5682 | - expiration_sharetypes=("immutable",)) |
---|
5683 | + expiration_policy = { |
---|
5684 | + 'enabled': True, |
---|
5685 | + 'mode': 'cutoff-date', |
---|
5686 | + 'cutoff_date': then, |
---|
5687 | + 'sharetypes': ('immutable',), |
---|
5688 | + } |
---|
5689 | + ss = StorageServer(basedir, "\x00" * 20, expiration_policy) |
---|
5690 | lc = ss.lease_checker |
---|
5691 | lc.slow_start = 0 |
---|
5692 | webstatus = StorageStatus(ss) |
---|
5693 | hunk ./src/allmydata/test/test_storage.py 3636 |
---|
5694 | def test_only_mutable(self): |
---|
5695 | basedir = "storage/LeaseCrawler/only_mutable" |
---|
5696 | fileutil.make_dirs(basedir) |
---|
5697 | + # setting 'cutoff_date' to 2000 seconds ago means that any lease that |
---|
5698 | + # is more than 2000 seconds old will be expired. |
---|
5699 | now = time.time() |
---|
5700 | then = int(now - 2000) |
---|
5701 | hunk ./src/allmydata/test/test_storage.py 3640 |
---|
5702 | - ss = StorageServer(basedir, "\x00" * 20, |
---|
5703 | - expiration_enabled=True, |
---|
5704 | - expiration_mode="cutoff-date", |
---|
5705 | - expiration_cutoff_date=then, |
---|
5706 | - expiration_sharetypes=("mutable",)) |
---|
5707 | + expiration_policy = { |
---|
5708 | + 'enabled': True, |
---|
5709 | + 'mode': 'cutoff-date', |
---|
5710 | + 'cutoff_date': then, |
---|
5711 | + 'sharetypes': ('mutable',), |
---|
5712 | + } |
---|
5713 | + ss = StorageServer(basedir, "\x00" * 20, expiration_policy) |
---|
5714 | lc = ss.lease_checker |
---|
5715 | lc.slow_start = 0 |
---|
5716 | webstatus = StorageStatus(ss) |
---|
5717 | hunk ./src/allmydata/test/test_storage.py 3819 |
---|
5718 | def test_no_st_blocks(self): |
---|
5719 | basedir = "storage/LeaseCrawler/no_st_blocks" |
---|
5720 | fileutil.make_dirs(basedir) |
---|
5721 | - ss = No_ST_BLOCKS_StorageServer(basedir, "\x00" * 20, |
---|
5722 | - expiration_mode="age", |
---|
5723 | - expiration_override_lease_duration=-1000) |
---|
5724 | - # a negative expiration_time= means the "configured-" |
---|
5725 | + # A negative 'override_lease_duration' means that the "configured-" |
---|
5726 | # space-recovered counts will be non-zero, since all shares will have |
---|
5727 | hunk ./src/allmydata/test/test_storage.py 3821 |
---|
5728 | - # expired by then |
---|
5729 | + # expired by then. |
---|
5730 | + expiration_policy = { |
---|
5731 | + 'enabled': True, |
---|
5732 | + 'mode': 'age', |
---|
5733 | + 'override_lease_duration': -1000, |
---|
5734 | + 'sharetypes': ('mutable', 'immutable'), |
---|
5735 | + } |
---|
5736 | + ss = No_ST_BLOCKS_StorageServer(basedir, "\x00" * 20, expiration_policy) |
---|
5737 | |
---|
5738 | # make it start sooner than usual. |
---|
5739 | lc = ss.lease_checker |
---|
5740 | hunk ./src/allmydata/test/test_storage.py 3877 |
---|
5741 | [immutable_si_0, immutable_si_1, mutable_si_2, mutable_si_3] = self.sis |
---|
5742 | first = min(self.sis) |
---|
5743 | first_b32 = base32.b2a(first) |
---|
5744 | - fn = os.path.join(ss.sharedir, storage_index_to_dir(first), "0") |
---|
5745 | - f = open(fn, "rb+") |
---|
5746 | + fp = ss.backend.get_shareset(first).sharehomedir.child("0") |
---|
5747 | + f = fp.open("rb+") |
---|
5748 | f.seek(0) |
---|
5749 | f.write("BAD MAGIC") |
---|
5750 | f.close() |
---|
5751 | hunk ./src/allmydata/test/test_storage.py 3890 |
---|
5752 | |
---|
5753 | # also create an empty bucket |
---|
5754 | empty_si = base32.b2a("\x04"*16) |
---|
5755 | - empty_bucket_dir = os.path.join(ss.sharedir, |
---|
5756 | - storage_index_to_dir(empty_si)) |
---|
5757 | - fileutil.make_dirs(empty_bucket_dir) |
---|
5758 | + empty_bucket_dir = ss.backend.get_shareset(empty_si).sharehomedir |
---|
5759 | + fileutil.fp_make_dirs(empty_bucket_dir) |
---|
5760 | |
---|
5761 | ss.setServiceParent(self.s) |
---|
5762 | |
---|
5763 | hunk ./src/allmydata/test/test_system.py 10 |
---|
5764 | |
---|
5765 | import allmydata |
---|
5766 | from allmydata import uri |
---|
5767 | -from allmydata.storage.mutable import MutableShareFile |
---|
5768 | +from allmydata.storage.backends.disk.mutable import MutableDiskShare |
---|
5769 | from allmydata.storage.server import si_a2b |
---|
5770 | from allmydata.immutable import offloaded, upload |
---|
5771 | from allmydata.immutable.literal import LiteralFileNode |
---|
5772 | hunk ./src/allmydata/test/test_system.py 421 |
---|
5773 | return shares |
---|
5774 | |
---|
5775 | def _corrupt_mutable_share(self, filename, which): |
---|
5776 | - msf = MutableShareFile(filename) |
---|
5777 | + msf = MutableDiskShare(filename) |
---|
5778 | datav = msf.readv([ (0, 1000000) ]) |
---|
5779 | final_share = datav[0] |
---|
5780 | assert len(final_share) < 1000000 # ought to be truncated |
---|
5781 | hunk ./src/allmydata/test/test_upload.py 22 |
---|
5782 | from allmydata.util.happinessutil import servers_of_happiness, \ |
---|
5783 | shares_by_server, merge_servers |
---|
5784 | from allmydata.storage_client import StorageFarmBroker |
---|
5785 | -from allmydata.storage.server import storage_index_to_dir |
---|
5786 | |
---|
5787 | MiB = 1024*1024 |
---|
5788 | |
---|
5789 | hunk ./src/allmydata/test/test_upload.py 821 |
---|
5790 | |
---|
5791 | def _copy_share_to_server(self, share_number, server_number): |
---|
5792 | ss = self.g.servers_by_number[server_number] |
---|
5793 | - # Copy share i from the directory associated with the first |
---|
5794 | - # storage server to the directory associated with this one. |
---|
5795 | - assert self.g, "I tried to find a grid at self.g, but failed" |
---|
5796 | - assert self.shares, "I tried to find shares at self.shares, but failed" |
---|
5797 | - old_share_location = self.shares[share_number][2] |
---|
5798 | - new_share_location = os.path.join(ss.storedir, "shares") |
---|
5799 | - si = uri.from_string(self.uri).get_storage_index() |
---|
5800 | - new_share_location = os.path.join(new_share_location, |
---|
5801 | - storage_index_to_dir(si)) |
---|
5802 | - if not os.path.exists(new_share_location): |
---|
5803 | - os.makedirs(new_share_location) |
---|
5804 | - new_share_location = os.path.join(new_share_location, |
---|
5805 | - str(share_number)) |
---|
5806 | - if old_share_location != new_share_location: |
---|
5807 | - shutil.copy(old_share_location, new_share_location) |
---|
5808 | - shares = self.find_uri_shares(self.uri) |
---|
5809 | - # Make sure that the storage server has the share. |
---|
5810 | - self.failUnless((share_number, ss.my_nodeid, new_share_location) |
---|
5811 | - in shares) |
---|
5812 | + self.copy_share(self.shares[share_number], ss) |
---|
5813 | |
---|
5814 | def _setup_grid(self): |
---|
5815 | """ |
---|
5816 | hunk ./src/allmydata/test/test_upload.py 1103 |
---|
5817 | self._copy_share_to_server(i, 2) |
---|
5818 | d.addCallback(_copy_shares) |
---|
5819 | # Remove the first server, and add a placeholder with share 0 |
---|
5820 | - d.addCallback(lambda ign: |
---|
5821 | - self.g.remove_server(self.g.servers_by_number[0].my_nodeid)) |
---|
5822 | + d.addCallback(lambda ign: self.remove_server(0)) |
---|
5823 | d.addCallback(lambda ign: |
---|
5824 | self._add_server_with_share(server_number=4, share_number=0)) |
---|
5825 | # Now try uploading. |
---|
5826 | hunk ./src/allmydata/test/test_upload.py 1134 |
---|
5827 | d.addCallback(lambda ign: |
---|
5828 | self._add_server(server_number=4)) |
---|
5829 | d.addCallback(_copy_shares) |
---|
5830 | - d.addCallback(lambda ign: |
---|
5831 | - self.g.remove_server(self.g.servers_by_number[0].my_nodeid)) |
---|
5832 | + d.addCallback(lambda ign: self.remove_server(0)) |
---|
5833 | d.addCallback(_reset_encoding_parameters) |
---|
5834 | d.addCallback(lambda client: |
---|
5835 | client.upload(upload.Data("data" * 10000, convergence=""))) |
---|
5836 | hunk ./src/allmydata/test/test_upload.py 1196 |
---|
5837 | self._copy_share_to_server(i, 2) |
---|
5838 | d.addCallback(_copy_shares) |
---|
5839 | # Remove server 0, and add another in its place |
---|
5840 | - d.addCallback(lambda ign: |
---|
5841 | - self.g.remove_server(self.g.servers_by_number[0].my_nodeid)) |
---|
5842 | + d.addCallback(lambda ign: self.remove_server(0)) |
---|
5843 | d.addCallback(lambda ign: |
---|
5844 | self._add_server_with_share(server_number=4, share_number=0, |
---|
5845 | readonly=True)) |
---|
5846 | hunk ./src/allmydata/test/test_upload.py 1237 |
---|
5847 | for i in xrange(1, 10): |
---|
5848 | self._copy_share_to_server(i, 2) |
---|
5849 | d.addCallback(_copy_shares) |
---|
5850 | - d.addCallback(lambda ign: |
---|
5851 | - self.g.remove_server(self.g.servers_by_number[0].my_nodeid)) |
---|
5852 | + d.addCallback(lambda ign: self.remove_server(0)) |
---|
5853 | def _reset_encoding_parameters(ign, happy=4): |
---|
5854 | client = self.g.clients[0] |
---|
5855 | client.DEFAULT_ENCODING_PARAMETERS['happy'] = happy |
---|
5856 | hunk ./src/allmydata/test/test_upload.py 1273 |
---|
5857 | # remove the original server |
---|
5858 | # (necessary to ensure that the Tahoe2ServerSelector will distribute |
---|
5859 | # all the shares) |
---|
5860 | - def _remove_server(ign): |
---|
5861 | - server = self.g.servers_by_number[0] |
---|
5862 | - self.g.remove_server(server.my_nodeid) |
---|
5863 | - d.addCallback(_remove_server) |
---|
5864 | + d.addCallback(lambda ign: self.remove_server(0)) |
---|
5865 | # This should succeed; we still have 4 servers, and the |
---|
5866 | # happiness of the upload is 4. |
---|
5867 | d.addCallback(lambda ign: |
---|
5868 | hunk ./src/allmydata/test/test_upload.py 1285 |
---|
5869 | d.addCallback(lambda ign: |
---|
5870 | self._setup_and_upload()) |
---|
5871 | d.addCallback(_do_server_setup) |
---|
5872 | - d.addCallback(_remove_server) |
---|
5873 | + d.addCallback(lambda ign: self.remove_server(0)) |
---|
5874 | d.addCallback(lambda ign: |
---|
5875 | self.shouldFail(UploadUnhappinessError, |
---|
5876 | "test_dropped_servers_in_encoder", |
---|
5877 | hunk ./src/allmydata/test/test_upload.py 1307 |
---|
5878 | self._add_server_with_share(4, 7, readonly=True) |
---|
5879 | self._add_server_with_share(5, 8, readonly=True) |
---|
5880 | d.addCallback(_do_server_setup_2) |
---|
5881 | - d.addCallback(_remove_server) |
---|
5882 | + d.addCallback(lambda ign: self.remove_server(0)) |
---|
5883 | d.addCallback(lambda ign: |
---|
5884 | self._do_upload_with_broken_servers(1)) |
---|
5885 | d.addCallback(_set_basedir) |
---|
5886 | hunk ./src/allmydata/test/test_upload.py 1314 |
---|
5887 | d.addCallback(lambda ign: |
---|
5888 | self._setup_and_upload()) |
---|
5889 | d.addCallback(_do_server_setup_2) |
---|
5890 | - d.addCallback(_remove_server) |
---|
5891 | + d.addCallback(lambda ign: self.remove_server(0)) |
---|
5892 | d.addCallback(lambda ign: |
---|
5893 | self.shouldFail(UploadUnhappinessError, |
---|
5894 | "test_dropped_servers_in_encoder", |
---|
5895 | hunk ./src/allmydata/test/test_upload.py 1528 |
---|
5896 | for i in xrange(1, 10): |
---|
5897 | self._copy_share_to_server(i, 1) |
---|
5898 | d.addCallback(_copy_shares) |
---|
5899 | - d.addCallback(lambda ign: |
---|
5900 | - self.g.remove_server(self.g.servers_by_number[0].my_nodeid)) |
---|
5901 | + d.addCallback(lambda ign: self.remove_server(0)) |
---|
5902 | def _prepare_client(ign): |
---|
5903 | client = self.g.clients[0] |
---|
5904 | client.DEFAULT_ENCODING_PARAMETERS['happy'] = 4 |
---|
5905 | hunk ./src/allmydata/test/test_upload.py 1550 |
---|
5906 | def _setup(ign): |
---|
5907 | for i in xrange(1, 11): |
---|
5908 | self._add_server(server_number=i) |
---|
5909 | - self.g.remove_server(self.g.servers_by_number[0].my_nodeid) |
---|
5910 | + self.remove_server(0) |
---|
5911 | c = self.g.clients[0] |
---|
5912 | # We set happy to an unsatisfiable value so that we can check the |
---|
5913 | # counting in the exception message. The same progress message |
---|
5914 | hunk ./src/allmydata/test/test_upload.py 1577 |
---|
5915 | self._add_server(server_number=i) |
---|
5916 | self._add_server(server_number=11, readonly=True) |
---|
5917 | self._add_server(server_number=12, readonly=True) |
---|
5918 | - self.g.remove_server(self.g.servers_by_number[0].my_nodeid) |
---|
5919 | + self.remove_server(0) |
---|
5920 | c = self.g.clients[0] |
---|
5921 | c.DEFAULT_ENCODING_PARAMETERS['happy'] = 45 |
---|
5922 | return c |
---|
5923 | hunk ./src/allmydata/test/test_upload.py 1605 |
---|
5924 | # the first one that the selector sees. |
---|
5925 | for i in xrange(10): |
---|
5926 | self._copy_share_to_server(i, 9) |
---|
5927 | - # Remove server 0, and its contents |
---|
5928 | - self.g.remove_server(self.g.servers_by_number[0].my_nodeid) |
---|
5929 | + self.remove_server(0) |
---|
5930 | # Make happiness unsatisfiable |
---|
5931 | c = self.g.clients[0] |
---|
5932 | c.DEFAULT_ENCODING_PARAMETERS['happy'] = 45 |
---|
5933 | hunk ./src/allmydata/test/test_upload.py 1625 |
---|
5934 | def _then(ign): |
---|
5935 | for i in xrange(1, 11): |
---|
5936 | self._add_server(server_number=i, readonly=True) |
---|
5937 | - self.g.remove_server(self.g.servers_by_number[0].my_nodeid) |
---|
5938 | + self.remove_server(0) |
---|
5939 | c = self.g.clients[0] |
---|
5940 | c.DEFAULT_ENCODING_PARAMETERS['k'] = 2 |
---|
5941 | c.DEFAULT_ENCODING_PARAMETERS['happy'] = 4 |
---|
5942 | hunk ./src/allmydata/test/test_upload.py 1661 |
---|
5943 | self._add_server(server_number=4, readonly=True)) |
---|
5944 | d.addCallback(lambda ign: |
---|
5945 | self._add_server(server_number=5, readonly=True)) |
---|
5946 | - d.addCallback(lambda ign: |
---|
5947 | - self.g.remove_server(self.g.servers_by_number[0].my_nodeid)) |
---|
5948 | + d.addCallback(lambda ign: self.remove_server(0)) |
---|
5949 | def _reset_encoding_parameters(ign, happy=4): |
---|
5950 | client = self.g.clients[0] |
---|
5951 | client.DEFAULT_ENCODING_PARAMETERS['happy'] = happy |
---|
5952 | hunk ./src/allmydata/test/test_upload.py 1696 |
---|
5953 | d.addCallback(lambda ign: |
---|
5954 | self._add_server(server_number=2)) |
---|
5955 | def _break_server_2(ign): |
---|
5956 | - serverid = self.g.servers_by_number[2].my_nodeid |
---|
5957 | + serverid = self.get_server(2).get_serverid() |
---|
5958 | self.g.break_server(serverid) |
---|
5959 | d.addCallback(_break_server_2) |
---|
5960 | d.addCallback(lambda ign: |
---|
5961 | hunk ./src/allmydata/test/test_upload.py 1705 |
---|
5962 | self._add_server(server_number=4, readonly=True)) |
---|
5963 | d.addCallback(lambda ign: |
---|
5964 | self._add_server(server_number=5, readonly=True)) |
---|
5965 | - d.addCallback(lambda ign: |
---|
5966 | - self.g.remove_server(self.g.servers_by_number[0].my_nodeid)) |
---|
5967 | + d.addCallback(lambda ign: self.remove_server(0)) |
---|
5968 | d.addCallback(_reset_encoding_parameters) |
---|
5969 | d.addCallback(lambda client: |
---|
5970 | self.shouldFail(UploadUnhappinessError, "test_selection_exceptions", |
---|
5971 | hunk ./src/allmydata/test/test_upload.py 1816 |
---|
5972 | # Copy shares |
---|
5973 | self._copy_share_to_server(1, 1) |
---|
5974 | self._copy_share_to_server(2, 1) |
---|
5975 | - # Remove server 0 |
---|
5976 | - self.g.remove_server(self.g.servers_by_number[0].my_nodeid) |
---|
5977 | + self.remove_server(0) |
---|
5978 | client = self.g.clients[0] |
---|
5979 | client.DEFAULT_ENCODING_PARAMETERS['happy'] = 3 |
---|
5980 | return client |
---|
5981 | hunk ./src/allmydata/test/test_upload.py 1930 |
---|
5982 | readonly=True) |
---|
5983 | self._add_server_with_share(server_number=4, share_number=3, |
---|
5984 | readonly=True) |
---|
5985 | - # Remove server 0. |
---|
5986 | - self.g.remove_server(self.g.servers_by_number[0].my_nodeid) |
---|
5987 | + self.remove_server(0) |
---|
5988 | # Set the client appropriately |
---|
5989 | c = self.g.clients[0] |
---|
5990 | c.DEFAULT_ENCODING_PARAMETERS['happy'] = 4 |
---|
5991 | hunk ./src/allmydata/test/test_util.py 9 |
---|
5992 | from twisted.trial import unittest |
---|
5993 | from twisted.internet import defer, reactor |
---|
5994 | from twisted.python.failure import Failure |
---|
5995 | +from twisted.python.filepath import FilePath |
---|
5996 | from twisted.python import log |
---|
5997 | from pycryptopp.hash.sha256 import SHA256 as _hash |
---|
5998 | |
---|
5999 | hunk ./src/allmydata/test/test_util.py 508 |
---|
6000 | os.chdir(saved_cwd) |
---|
6001 | |
---|
6002 | def test_disk_stats(self): |
---|
6003 | - avail = fileutil.get_available_space('.', 2**14) |
---|
6004 | + avail = fileutil.get_available_space(FilePath('.'), 2**14) |
---|
6005 | if avail == 0: |
---|
6006 | raise unittest.SkipTest("This test will spuriously fail there is no disk space left.") |
---|
6007 | |
---|
6008 | hunk ./src/allmydata/test/test_util.py 512 |
---|
6009 | - disk = fileutil.get_disk_stats('.', 2**13) |
---|
6010 | + disk = fileutil.get_disk_stats(FilePath('.'), 2**13) |
---|
6011 | self.failUnless(disk['total'] > 0, disk['total']) |
---|
6012 | self.failUnless(disk['used'] > 0, disk['used']) |
---|
6013 | self.failUnless(disk['free_for_root'] > 0, disk['free_for_root']) |
---|
6014 | hunk ./src/allmydata/test/test_util.py 521 |
---|
6015 | |
---|
6016 | def test_disk_stats_avail_nonnegative(self): |
---|
6017 | # This test will spuriously fail if you have more than 2^128 |
---|
6018 | - # bytes of available space on your filesystem. |
---|
6019 | - disk = fileutil.get_disk_stats('.', 2**128) |
---|
6020 | + # bytes of available space on your filesystem (lucky you). |
---|
6021 | + disk = fileutil.get_disk_stats(FilePath('.'), 2**128) |
---|
6022 | self.failUnlessEqual(disk['avail'], 0) |
---|
6023 | |
---|
6024 | class PollMixinTests(unittest.TestCase): |
---|
6025 | hunk ./src/allmydata/test/test_web.py 12 |
---|
6026 | from twisted.python import failure, log |
---|
6027 | from nevow import rend |
---|
6028 | from allmydata import interfaces, uri, webish, dirnode |
---|
6029 | -from allmydata.storage.shares import get_share_file |
---|
6030 | from allmydata.storage_client import StorageFarmBroker |
---|
6031 | from allmydata.immutable import upload |
---|
6032 | from allmydata.immutable.downloader.status import DownloadStatus |
---|
6033 | hunk ./src/allmydata/test/test_web.py 4111 |
---|
6034 | good_shares = self.find_uri_shares(self.uris["good"]) |
---|
6035 | self.failUnlessReallyEqual(len(good_shares), 10) |
---|
6036 | sick_shares = self.find_uri_shares(self.uris["sick"]) |
---|
6037 | - os.unlink(sick_shares[0][2]) |
---|
6038 | + sick_shares[0][2].remove() |
---|
6039 | dead_shares = self.find_uri_shares(self.uris["dead"]) |
---|
6040 | for i in range(1, 10): |
---|
6041 | hunk ./src/allmydata/test/test_web.py 4114 |
---|
6042 | - os.unlink(dead_shares[i][2]) |
---|
6043 | + dead_shares[i][2].remove() |
---|
6044 | c_shares = self.find_uri_shares(self.uris["corrupt"]) |
---|
6045 | cso = CorruptShareOptions() |
---|
6046 | cso.stdout = StringIO() |
---|
6047 | hunk ./src/allmydata/test/test_web.py 4118 |
---|
6048 | - cso.parseOptions([c_shares[0][2]]) |
---|
6049 | + cso.parseOptions([c_shares[0][2].path]) |
---|
6050 | corrupt_share(cso) |
---|
6051 | d.addCallback(_clobber_shares) |
---|
6052 | |
---|
6053 | hunk ./src/allmydata/test/test_web.py 4253 |
---|
6054 | good_shares = self.find_uri_shares(self.uris["good"]) |
---|
6055 | self.failUnlessReallyEqual(len(good_shares), 10) |
---|
6056 | sick_shares = self.find_uri_shares(self.uris["sick"]) |
---|
6057 | - os.unlink(sick_shares[0][2]) |
---|
6058 | + sick_shares[0][2].remove() |
---|
6059 | dead_shares = self.find_uri_shares(self.uris["dead"]) |
---|
6060 | for i in range(1, 10): |
---|
6061 | hunk ./src/allmydata/test/test_web.py 4256 |
---|
6062 | - os.unlink(dead_shares[i][2]) |
---|
6063 | + dead_shares[i][2].remove() |
---|
6064 | c_shares = self.find_uri_shares(self.uris["corrupt"]) |
---|
6065 | cso = CorruptShareOptions() |
---|
6066 | cso.stdout = StringIO() |
---|
6067 | hunk ./src/allmydata/test/test_web.py 4260 |
---|
6068 | - cso.parseOptions([c_shares[0][2]]) |
---|
6069 | + cso.parseOptions([c_shares[0][2].path]) |
---|
6070 | corrupt_share(cso) |
---|
6071 | d.addCallback(_clobber_shares) |
---|
6072 | |
---|
6073 | hunk ./src/allmydata/test/test_web.py 4319 |
---|
6074 | |
---|
6075 | def _clobber_shares(ignored): |
---|
6076 | sick_shares = self.find_uri_shares(self.uris["sick"]) |
---|
6077 | - os.unlink(sick_shares[0][2]) |
---|
6078 | + sick_shares[0][2].remove() |
---|
6079 | d.addCallback(_clobber_shares) |
---|
6080 | |
---|
6081 | d.addCallback(self.CHECK, "sick", "t=check&repair=true&output=json") |
---|
6082 | hunk ./src/allmydata/test/test_web.py 4811 |
---|
6083 | good_shares = self.find_uri_shares(self.uris["good"]) |
---|
6084 | self.failUnlessReallyEqual(len(good_shares), 10) |
---|
6085 | sick_shares = self.find_uri_shares(self.uris["sick"]) |
---|
6086 | - os.unlink(sick_shares[0][2]) |
---|
6087 | + sick_shares[0][2].remove() |
---|
6088 | #dead_shares = self.find_uri_shares(self.uris["dead"]) |
---|
6089 | #for i in range(1, 10): |
---|
6090 | hunk ./src/allmydata/test/test_web.py 4814 |
---|
6091 | - # os.unlink(dead_shares[i][2]) |
---|
6092 | + # dead_shares[i][2].remove() |
---|
6093 | |
---|
6094 | #c_shares = self.find_uri_shares(self.uris["corrupt"]) |
---|
6095 | #cso = CorruptShareOptions() |
---|
6096 | hunk ./src/allmydata/test/test_web.py 4819 |
---|
6097 | #cso.stdout = StringIO() |
---|
6098 | - #cso.parseOptions([c_shares[0][2]]) |
---|
6099 | + #cso.parseOptions([c_shares[0][2].path]) |
---|
6100 | #corrupt_share(cso) |
---|
6101 | d.addCallback(_clobber_shares) |
---|
6102 | |
---|
6103 | hunk ./src/allmydata/test/test_web.py 4870 |
---|
6104 | d.addErrback(self.explain_web_error) |
---|
6105 | return d |
---|
6106 | |
---|
6107 | - def _count_leases(self, ignored, which): |
---|
6108 | - u = self.uris[which] |
---|
6109 | - shares = self.find_uri_shares(u) |
---|
6110 | - lease_counts = [] |
---|
6111 | - for shnum, serverid, fn in shares: |
---|
6112 | - sf = get_share_file(fn) |
---|
6113 | - num_leases = len(list(sf.get_leases())) |
---|
6114 | - lease_counts.append( (fn, num_leases) ) |
---|
6115 | - return lease_counts |
---|
6116 | - |
---|
6117 | - def _assert_leasecount(self, lease_counts, expected): |
---|
6118 | + def _assert_leasecount(self, ignored, which, expected): |
---|
6119 | + lease_counts = self.count_leases(self.uris[which]) |
---|
6120 | for (fn, num_leases) in lease_counts: |
---|
6121 | if num_leases != expected: |
---|
6122 | self.fail("expected %d leases, have %d, on %s" % |
---|
6123 | hunk ./src/allmydata/test/test_web.py 4903 |
---|
6124 | self.fileurls[which] = "uri/" + urllib.quote(self.uris[which]) |
---|
6125 | d.addCallback(_compute_fileurls) |
---|
6126 | |
---|
6127 | - d.addCallback(self._count_leases, "one") |
---|
6128 | - d.addCallback(self._assert_leasecount, 1) |
---|
6129 | - d.addCallback(self._count_leases, "two") |
---|
6130 | - d.addCallback(self._assert_leasecount, 1) |
---|
6131 | - d.addCallback(self._count_leases, "mutable") |
---|
6132 | - d.addCallback(self._assert_leasecount, 1) |
---|
6133 | + d.addCallback(self._assert_leasecount, "one", 1) |
---|
6134 | + d.addCallback(self._assert_leasecount, "two", 1) |
---|
6135 | + d.addCallback(self._assert_leasecount, "mutable", 1) |
---|
6136 | |
---|
6137 | d.addCallback(self.CHECK, "one", "t=check") # no add-lease |
---|
6138 | def _got_html_good(res): |
---|
6139 | hunk ./src/allmydata/test/test_web.py 4913 |
---|
6140 | self.failIf("Not Healthy" in res, res) |
---|
6141 | d.addCallback(_got_html_good) |
---|
6142 | |
---|
6143 | - d.addCallback(self._count_leases, "one") |
---|
6144 | - d.addCallback(self._assert_leasecount, 1) |
---|
6145 | - d.addCallback(self._count_leases, "two") |
---|
6146 | - d.addCallback(self._assert_leasecount, 1) |
---|
6147 | - d.addCallback(self._count_leases, "mutable") |
---|
6148 | - d.addCallback(self._assert_leasecount, 1) |
---|
6149 | + d.addCallback(self._assert_leasecount, "one", 1) |
---|
6150 | + d.addCallback(self._assert_leasecount, "two", 1) |
---|
6151 | + d.addCallback(self._assert_leasecount, "mutable", 1) |
---|
6152 | |
---|
6153 | # this CHECK uses the original client, which uses the same |
---|
6154 | # lease-secrets, so it will just renew the original lease |
---|
6155 | hunk ./src/allmydata/test/test_web.py 4922 |
---|
6156 | d.addCallback(self.CHECK, "one", "t=check&add-lease=true") |
---|
6157 | d.addCallback(_got_html_good) |
---|
6158 | |
---|
6159 | - d.addCallback(self._count_leases, "one") |
---|
6160 | - d.addCallback(self._assert_leasecount, 1) |
---|
6161 | - d.addCallback(self._count_leases, "two") |
---|
6162 | - d.addCallback(self._assert_leasecount, 1) |
---|
6163 | - d.addCallback(self._count_leases, "mutable") |
---|
6164 | - d.addCallback(self._assert_leasecount, 1) |
---|
6165 | + d.addCallback(self._assert_leasecount, "one", 1) |
---|
6166 | + d.addCallback(self._assert_leasecount, "two", 1) |
---|
6167 | + d.addCallback(self._assert_leasecount, "mutable", 1) |
---|
6168 | |
---|
6169 | # this CHECK uses an alternate client, which adds a second lease |
---|
6170 | d.addCallback(self.CHECK, "one", "t=check&add-lease=true", clientnum=1) |
---|
6171 | hunk ./src/allmydata/test/test_web.py 4930 |
---|
6172 | d.addCallback(_got_html_good) |
---|
6173 | |
---|
6174 | - d.addCallback(self._count_leases, "one") |
---|
6175 | - d.addCallback(self._assert_leasecount, 2) |
---|
6176 | - d.addCallback(self._count_leases, "two") |
---|
6177 | - d.addCallback(self._assert_leasecount, 1) |
---|
6178 | - d.addCallback(self._count_leases, "mutable") |
---|
6179 | - d.addCallback(self._assert_leasecount, 1) |
---|
6180 | + d.addCallback(self._assert_leasecount, "one", 2) |
---|
6181 | + d.addCallback(self._assert_leasecount, "two", 1) |
---|
6182 | + d.addCallback(self._assert_leasecount, "mutable", 1) |
---|
6183 | |
---|
6184 | d.addCallback(self.CHECK, "mutable", "t=check&add-lease=true") |
---|
6185 | d.addCallback(_got_html_good) |
---|
6186 | hunk ./src/allmydata/test/test_web.py 4937 |
---|
6187 | |
---|
6188 | - d.addCallback(self._count_leases, "one") |
---|
6189 | - d.addCallback(self._assert_leasecount, 2) |
---|
6190 | - d.addCallback(self._count_leases, "two") |
---|
6191 | - d.addCallback(self._assert_leasecount, 1) |
---|
6192 | - d.addCallback(self._count_leases, "mutable") |
---|
6193 | - d.addCallback(self._assert_leasecount, 1) |
---|
6194 | + d.addCallback(self._assert_leasecount, "one", 2) |
---|
6195 | + d.addCallback(self._assert_leasecount, "two", 1) |
---|
6196 | + d.addCallback(self._assert_leasecount, "mutable", 1) |
---|
6197 | |
---|
6198 | d.addCallback(self.CHECK, "mutable", "t=check&add-lease=true", |
---|
6199 | clientnum=1) |
---|
6200 | hunk ./src/allmydata/test/test_web.py 4945 |
---|
6201 | d.addCallback(_got_html_good) |
---|
6202 | |
---|
6203 | - d.addCallback(self._count_leases, "one") |
---|
6204 | - d.addCallback(self._assert_leasecount, 2) |
---|
6205 | - d.addCallback(self._count_leases, "two") |
---|
6206 | - d.addCallback(self._assert_leasecount, 1) |
---|
6207 | - d.addCallback(self._count_leases, "mutable") |
---|
6208 | - d.addCallback(self._assert_leasecount, 2) |
---|
6209 | + d.addCallback(self._assert_leasecount, "one", 2) |
---|
6210 | + d.addCallback(self._assert_leasecount, "two", 1) |
---|
6211 | + d.addCallback(self._assert_leasecount, "mutable", 2) |
---|
6212 | |
---|
6213 | d.addErrback(self.explain_web_error) |
---|
6214 | return d |
---|
6215 | hunk ./src/allmydata/test/test_web.py 4989 |
---|
6216 | self.failUnlessReallyEqual(len(units), 4+1) |
---|
6217 | d.addCallback(_done) |
---|
6218 | |
---|
6219 | - d.addCallback(self._count_leases, "root") |
---|
6220 | - d.addCallback(self._assert_leasecount, 1) |
---|
6221 | - d.addCallback(self._count_leases, "one") |
---|
6222 | - d.addCallback(self._assert_leasecount, 1) |
---|
6223 | - d.addCallback(self._count_leases, "mutable") |
---|
6224 | - d.addCallback(self._assert_leasecount, 1) |
---|
6225 | + d.addCallback(self._assert_leasecount, "root", 1) |
---|
6226 | + d.addCallback(self._assert_leasecount, "one", 1) |
---|
6227 | + d.addCallback(self._assert_leasecount, "mutable", 1) |
---|
6228 | |
---|
6229 | d.addCallback(self.CHECK, "root", "t=stream-deep-check&add-lease=true") |
---|
6230 | d.addCallback(_done) |
---|
6231 | hunk ./src/allmydata/test/test_web.py 4996 |
---|
6232 | |
---|
6233 | - d.addCallback(self._count_leases, "root") |
---|
6234 | - d.addCallback(self._assert_leasecount, 1) |
---|
6235 | - d.addCallback(self._count_leases, "one") |
---|
6236 | - d.addCallback(self._assert_leasecount, 1) |
---|
6237 | - d.addCallback(self._count_leases, "mutable") |
---|
6238 | - d.addCallback(self._assert_leasecount, 1) |
---|
6239 | + d.addCallback(self._assert_leasecount, "root", 1) |
---|
6240 | + d.addCallback(self._assert_leasecount, "one", 1) |
---|
6241 | + d.addCallback(self._assert_leasecount, "mutable", 1) |
---|
6242 | |
---|
6243 | d.addCallback(self.CHECK, "root", "t=stream-deep-check&add-lease=true", |
---|
6244 | clientnum=1) |
---|
6245 | hunk ./src/allmydata/test/test_web.py 5004 |
---|
6246 | d.addCallback(_done) |
---|
6247 | |
---|
6248 | - d.addCallback(self._count_leases, "root") |
---|
6249 | - d.addCallback(self._assert_leasecount, 2) |
---|
6250 | - d.addCallback(self._count_leases, "one") |
---|
6251 | - d.addCallback(self._assert_leasecount, 2) |
---|
6252 | - d.addCallback(self._count_leases, "mutable") |
---|
6253 | - d.addCallback(self._assert_leasecount, 2) |
---|
6254 | + d.addCallback(self._assert_leasecount, "root", 2) |
---|
6255 | + d.addCallback(self._assert_leasecount, "one", 2) |
---|
6256 | + d.addCallback(self._assert_leasecount, "mutable", 2) |
---|
6257 | |
---|
6258 | d.addErrback(self.explain_web_error) |
---|
6259 | return d |
---|
6260 | merger 0.0 ( |
---|
6261 | hunk ./src/allmydata/uri.py 829 |
---|
6262 | + def is_readonly(self): |
---|
6263 | + return True |
---|
6264 | + |
---|
6265 | + def get_readonly(self): |
---|
6266 | + return self |
---|
6267 | + |
---|
6268 | + |
---|
6269 | hunk ./src/allmydata/uri.py 829 |
---|
6270 | + def is_readonly(self): |
---|
6271 | + return True |
---|
6272 | + |
---|
6273 | + def get_readonly(self): |
---|
6274 | + return self |
---|
6275 | + |
---|
6276 | + |
---|
6277 | ) |
---|
6278 | merger 0.0 ( |
---|
6279 | hunk ./src/allmydata/uri.py 848 |
---|
6280 | + def is_readonly(self): |
---|
6281 | + return True |
---|
6282 | + |
---|
6283 | + def get_readonly(self): |
---|
6284 | + return self |
---|
6285 | + |
---|
6286 | hunk ./src/allmydata/uri.py 848 |
---|
6287 | + def is_readonly(self): |
---|
6288 | + return True |
---|
6289 | + |
---|
6290 | + def get_readonly(self): |
---|
6291 | + return self |
---|
6292 | + |
---|
6293 | ) |
---|
6294 | hunk ./src/allmydata/util/encodingutil.py 221 |
---|
6295 | def quote_path(path, quotemarks=True): |
---|
6296 | return quote_output("/".join(map(to_str, path)), quotemarks=quotemarks) |
---|
6297 | |
---|
6298 | +def quote_filepath(fp, quotemarks=True, encoding=None): |
---|
6299 | + path = fp.path |
---|
6300 | + if isinstance(path, str): |
---|
6301 | + try: |
---|
6302 | + path = path.decode(filesystem_encoding) |
---|
6303 | + except UnicodeDecodeError: |
---|
6304 | + return 'b"%s"' % (ESCAPABLE_8BIT.sub(_str_escape, path),) |
---|
6305 | + |
---|
6306 | + return quote_output(path, quotemarks=quotemarks, encoding=encoding) |
---|
6307 | + |
---|
6308 | |
---|
6309 | def unicode_platform(): |
---|
6310 | """ |
---|
6311 | hunk ./src/allmydata/util/fileutil.py 5 |
---|
6312 | Futz with files like a pro. |
---|
6313 | """ |
---|
6314 | |
---|
6315 | -import sys, exceptions, os, stat, tempfile, time, binascii |
---|
6316 | +import errno, sys, exceptions, os, stat, tempfile, time, binascii |
---|
6317 | + |
---|
6318 | +from allmydata.util.assertutil import precondition |
---|
6319 | |
---|
6320 | from twisted.python import log |
---|
6321 | hunk ./src/allmydata/util/fileutil.py 10 |
---|
6322 | +from twisted.python.filepath import FilePath, UnlistableError |
---|
6323 | |
---|
6324 | from pycryptopp.cipher.aes import AES |
---|
6325 | |
---|
6326 | hunk ./src/allmydata/util/fileutil.py 189 |
---|
6327 | raise tx |
---|
6328 | raise exceptions.IOError, "unknown error prevented creation of directory, or deleted the directory immediately after creation: %s" % dirname # careful not to construct an IOError with a 2-tuple, as that has a special meaning... |
---|
6329 | |
---|
6330 | -def rm_dir(dirname): |
---|
6331 | +def fp_make_dirs(dirfp): |
---|
6332 | + """ |
---|
6333 | + An idempotent version of FilePath.makedirs(). If the dir already |
---|
6334 | + exists, do nothing and return without raising an exception. If this |
---|
6335 | + call creates the dir, return without raising an exception. If there is |
---|
6336 | + an error that prevents creation or if the directory gets deleted after |
---|
6337 | + fp_make_dirs() creates it and before fp_make_dirs() checks that it |
---|
6338 | + exists, raise an exception. |
---|
6339 | + """ |
---|
6340 | + log.msg( "xxx 0 %s" % (dirfp,)) |
---|
6341 | + tx = None |
---|
6342 | + try: |
---|
6343 | + dirfp.makedirs() |
---|
6344 | + except OSError, x: |
---|
6345 | + tx = x |
---|
6346 | + |
---|
6347 | + if not dirfp.isdir(): |
---|
6348 | + if tx: |
---|
6349 | + raise tx |
---|
6350 | + raise exceptions.IOError, "unknown error prevented creation of directory, or deleted the directory immediately after creation: %s" % dirfp # careful not to construct an IOError with a 2-tuple, as that has a special meaning... |
---|
6351 | + |
---|
6352 | +def fp_rmdir_if_empty(dirfp): |
---|
6353 | + """ Remove the directory if it is empty. """ |
---|
6354 | + try: |
---|
6355 | + os.rmdir(dirfp.path) |
---|
6356 | + except OSError, e: |
---|
6357 | + if e.errno != errno.ENOTEMPTY: |
---|
6358 | + raise |
---|
6359 | + else: |
---|
6360 | + dirfp.changed() |
---|
6361 | + |
---|
6362 | +def rmtree(dirname): |
---|
6363 | """ |
---|
6364 | A threadsafe and idempotent version of shutil.rmtree(). If the dir is |
---|
6365 | already gone, do nothing and return without raising an exception. If this |
---|
6366 | hunk ./src/allmydata/util/fileutil.py 239 |
---|
6367 | else: |
---|
6368 | remove(fullname) |
---|
6369 | os.rmdir(dirname) |
---|
6370 | - except Exception, le: |
---|
6371 | - # Ignore "No such file or directory" |
---|
6372 | - if (not isinstance(le, OSError)) or le.args[0] != 2: |
---|
6373 | + except EnvironmentError, le: |
---|
6374 | + # Ignore "No such file or directory", collect any other exception. |
---|
6375 | + if (le.args[0] != 2 and le.args[0] != 3) or (le.args[0] != errno.ENOENT): |
---|
6376 | excs.append(le) |
---|
6377 | hunk ./src/allmydata/util/fileutil.py 243 |
---|
6378 | + except Exception, le: |
---|
6379 | + excs.append(le) |
---|
6380 | |
---|
6381 | # Okay, now we've recursively removed everything, ignoring any "No |
---|
6382 | # such file or directory" errors, and collecting any other errors. |
---|
6383 | hunk ./src/allmydata/util/fileutil.py 256 |
---|
6384 | raise OSError, "Failed to remove dir for unknown reason." |
---|
6385 | raise OSError, excs |
---|
6386 | |
---|
6387 | +def fp_remove(fp): |
---|
6388 | + """ |
---|
6389 | + An idempotent version of shutil.rmtree(). If the file/dir is already |
---|
6390 | + gone, do nothing and return without raising an exception. If this call |
---|
6391 | + removes the file/dir, return without raising an exception. If there is |
---|
6392 | + an error that prevents removal, or if a file or directory at the same |
---|
6393 | + path gets created again by someone else after this deletes it and before |
---|
6394 | + this checks that it is gone, raise an exception. |
---|
6395 | + """ |
---|
6396 | + try: |
---|
6397 | + fp.remove() |
---|
6398 | + except UnlistableError, e: |
---|
6399 | + if e.originalException.errno != errno.ENOENT: |
---|
6400 | + raise |
---|
6401 | + except OSError, e: |
---|
6402 | + if e.errno != errno.ENOENT: |
---|
6403 | + raise |
---|
6404 | + |
---|
6405 | +def rm_dir(dirname): |
---|
6406 | + # Renamed to be like shutil.rmtree and unlike rmdir. |
---|
6407 | + return rmtree(dirname) |
---|
6408 | |
---|
6409 | def remove_if_possible(f): |
---|
6410 | try: |
---|
6411 | hunk ./src/allmydata/util/fileutil.py 387 |
---|
6412 | import traceback |
---|
6413 | traceback.print_exc() |
---|
6414 | |
---|
6415 | -def get_disk_stats(whichdir, reserved_space=0): |
---|
6416 | +def get_disk_stats(whichdirfp, reserved_space=0): |
---|
6417 | """Return disk statistics for the storage disk, in the form of a dict |
---|
6418 | with the following fields. |
---|
6419 | total: total bytes on disk |
---|
6420 | hunk ./src/allmydata/util/fileutil.py 408 |
---|
6421 | you can pass how many bytes you would like to leave unused on this |
---|
6422 | filesystem as reserved_space. |
---|
6423 | """ |
---|
6424 | + precondition(isinstance(whichdirfp, FilePath), whichdirfp) |
---|
6425 | |
---|
6426 | if have_GetDiskFreeSpaceExW: |
---|
6427 | # If this is a Windows system and GetDiskFreeSpaceExW is available, use it. |
---|
6428 | hunk ./src/allmydata/util/fileutil.py 419 |
---|
6429 | n_free_for_nonroot = c_ulonglong(0) |
---|
6430 | n_total = c_ulonglong(0) |
---|
6431 | n_free_for_root = c_ulonglong(0) |
---|
6432 | - retval = GetDiskFreeSpaceExW(whichdir, byref(n_free_for_nonroot), |
---|
6433 | - byref(n_total), |
---|
6434 | - byref(n_free_for_root)) |
---|
6435 | + retval = GetDiskFreeSpaceExW(whichdirfp.path, byref(n_free_for_nonroot), |
---|
6436 | + byref(n_total), |
---|
6437 | + byref(n_free_for_root)) |
---|
6438 | if retval == 0: |
---|
6439 | raise OSError("Windows error %d attempting to get disk statistics for %r" |
---|
6440 | hunk ./src/allmydata/util/fileutil.py 424 |
---|
6441 | - % (GetLastError(), whichdir)) |
---|
6442 | + % (GetLastError(), whichdirfp.path)) |
---|
6443 | free_for_nonroot = n_free_for_nonroot.value |
---|
6444 | total = n_total.value |
---|
6445 | free_for_root = n_free_for_root.value |
---|
6446 | hunk ./src/allmydata/util/fileutil.py 433 |
---|
6447 | # <http://docs.python.org/library/os.html#os.statvfs> |
---|
6448 | # <http://opengroup.org/onlinepubs/7990989799/xsh/fstatvfs.html> |
---|
6449 | # <http://opengroup.org/onlinepubs/7990989799/xsh/sysstatvfs.h.html> |
---|
6450 | - s = os.statvfs(whichdir) |
---|
6451 | + s = os.statvfs(whichdirfp.path) |
---|
6452 | |
---|
6453 | # on my mac laptop: |
---|
6454 | # statvfs(2) is a wrapper around statfs(2). |
---|
6455 | hunk ./src/allmydata/util/fileutil.py 460 |
---|
6456 | 'avail': avail, |
---|
6457 | } |
---|
6458 | |
---|
6459 | -def get_available_space(whichdir, reserved_space): |
---|
6460 | +def get_available_space(whichdirfp, reserved_space): |
---|
6461 | """Returns available space for share storage in bytes, or None if no |
---|
6462 | API to get this information is available. |
---|
6463 | |
---|
6464 | hunk ./src/allmydata/util/fileutil.py 472 |
---|
6465 | you can pass how many bytes you would like to leave unused on this |
---|
6466 | filesystem as reserved_space. |
---|
6467 | """ |
---|
6468 | + precondition(isinstance(whichdirfp, FilePath), whichdirfp) |
---|
6469 | try: |
---|
6470 | hunk ./src/allmydata/util/fileutil.py 474 |
---|
6471 | - return get_disk_stats(whichdir, reserved_space)['avail'] |
---|
6472 | + return get_disk_stats(whichdirfp, reserved_space)['avail'] |
---|
6473 | except AttributeError: |
---|
6474 | return None |
---|
6475 | hunk ./src/allmydata/util/fileutil.py 477 |
---|
6476 | - except EnvironmentError: |
---|
6477 | - log.msg("OS call to get disk statistics failed") |
---|
6478 | + |
---|
6479 | + |
---|
6480 | +def get_used_space(fp): |
---|
6481 | + if fp is None: |
---|
6482 | return 0 |
---|
6483 | hunk ./src/allmydata/util/fileutil.py 482 |
---|
6484 | + try: |
---|
6485 | + s = os.stat(fp.path) |
---|
6486 | + except EnvironmentError: |
---|
6487 | + if not fp.exists(): |
---|
6488 | + return 0 |
---|
6489 | + raise |
---|
6490 | + else: |
---|
6491 | + # POSIX defines st_blocks (originally a BSDism): |
---|
6492 | + # <http://pubs.opengroup.org/onlinepubs/009695399/basedefs/sys/stat.h.html> |
---|
6493 | + # but does not require stat() to give it a "meaningful value" |
---|
6494 | + # <http://pubs.opengroup.org/onlinepubs/009695399/functions/stat.html> |
---|
6495 | + # and says: |
---|
6496 | + # "The unit for the st_blocks member of the stat structure is not defined |
---|
6497 | + # within IEEE Std 1003.1-2001. In some implementations it is 512 bytes. |
---|
6498 | + # It may differ on a file system basis. There is no correlation between |
---|
6499 | + # values of the st_blocks and st_blksize, and the f_bsize (from <sys/statvfs.h>) |
---|
6500 | + # structure members." |
---|
6501 | + # |
---|
6502 | + # The Linux docs define it as "the number of blocks allocated to the file, |
---|
6503 | + # [in] 512-byte units." It is also defined that way on MacOS X. Python does |
---|
6504 | + # not set the attribute on Windows. |
---|
6505 | + # |
---|
6506 | + # We consider platforms that define st_blocks but give it a wrong value, or |
---|
6507 | + # measure it in a unit other than 512 bytes, to be broken. See also |
---|
6508 | + # <http://bugs.python.org/issue12350>. |
---|
6509 | + |
---|
6510 | + if hasattr(s, 'st_blocks'): |
---|
6511 | + return s.st_blocks * 512 |
---|
6512 | + else: |
---|
6513 | + return s.st_size |
---|
6514 | } |
---|
6515 | [Work-in-progress, includes fix to bug involving BucketWriter. refs #999 |
---|
6516 | david-sarah@jacaranda.org**20110920033803 |
---|
6517 | Ignore-this: 64e9e019421454e4d08141d10b6e4eed |
---|
6518 | ] { |
---|
6519 | hunk ./src/allmydata/client.py 9 |
---|
6520 | from twisted.internet import reactor, defer |
---|
6521 | from twisted.application import service |
---|
6522 | from twisted.application.internet import TimerService |
---|
6523 | +from twisted.python.filepath import FilePath |
---|
6524 | from foolscap.api import Referenceable |
---|
6525 | from pycryptopp.publickey import rsa |
---|
6526 | |
---|
6527 | hunk ./src/allmydata/client.py 15 |
---|
6528 | import allmydata |
---|
6529 | from allmydata.storage.server import StorageServer |
---|
6530 | +from allmydata.storage.backends.disk.disk_backend import DiskBackend |
---|
6531 | from allmydata import storage_client |
---|
6532 | from allmydata.immutable.upload import Uploader |
---|
6533 | from allmydata.immutable.offloaded import Helper |
---|
6534 | hunk ./src/allmydata/client.py 213 |
---|
6535 | return |
---|
6536 | readonly = self.get_config("storage", "readonly", False, boolean=True) |
---|
6537 | |
---|
6538 | - storedir = os.path.join(self.basedir, self.STOREDIR) |
---|
6539 | + storedir = FilePath(self.basedir).child(self.STOREDIR) |
---|
6540 | |
---|
6541 | data = self.get_config("storage", "reserved_space", None) |
---|
6542 | reserved = None |
---|
6543 | hunk ./src/allmydata/client.py 255 |
---|
6544 | 'cutoff_date': cutoff_date, |
---|
6545 | 'sharetypes': tuple(sharetypes), |
---|
6546 | } |
---|
6547 | - ss = StorageServer(storedir, self.nodeid, |
---|
6548 | - reserved_space=reserved, |
---|
6549 | - discard_storage=discard, |
---|
6550 | - readonly_storage=readonly, |
---|
6551 | + |
---|
6552 | + backend = DiskBackend(storedir, readonly=readonly, reserved_space=reserved, |
---|
6553 | + discard_storage=discard) |
---|
6554 | + ss = StorageServer(nodeid, backend, storedir, |
---|
6555 | stats_provider=self.stats_provider, |
---|
6556 | expiration_policy=expiration_policy) |
---|
6557 | self.add_service(ss) |
---|
6558 | hunk ./src/allmydata/interfaces.py 348 |
---|
6559 | |
---|
6560 | def get_shares(): |
---|
6561 | """ |
---|
6562 | - Generates the IStoredShare objects held in this shareset. |
---|
6563 | + Generates IStoredShare objects for all completed shares in this shareset. |
---|
6564 | """ |
---|
6565 | |
---|
6566 | def has_incoming(shnum): |
---|
6567 | hunk ./src/allmydata/storage/backends/base.py 69 |
---|
6568 | # def _create_mutable_share(self, storageserver, shnum, write_enabler): |
---|
6569 | # """create a mutable share with the given shnum and write_enabler""" |
---|
6570 | |
---|
6571 | - # secrets might be a triple with cancel_secret in secrets[2], but if |
---|
6572 | - # so we ignore the cancel_secret. |
---|
6573 | write_enabler = secrets[0] |
---|
6574 | renew_secret = secrets[1] |
---|
6575 | hunk ./src/allmydata/storage/backends/base.py 71 |
---|
6576 | + cancel_secret = '\x00'*32 |
---|
6577 | + if len(secrets) > 2: |
---|
6578 | + cancel_secret = secrets[2] |
---|
6579 | |
---|
6580 | si_s = self.get_storage_index_string() |
---|
6581 | shares = {} |
---|
6582 | hunk ./src/allmydata/storage/backends/base.py 110 |
---|
6583 | read_data[shnum] = share.readv(read_vector) |
---|
6584 | |
---|
6585 | ownerid = 1 # TODO |
---|
6586 | - lease_info = LeaseInfo(ownerid, renew_secret, |
---|
6587 | + lease_info = LeaseInfo(ownerid, renew_secret, cancel_secret, |
---|
6588 | expiration_time, storageserver.get_serverid()) |
---|
6589 | |
---|
6590 | if testv_is_good: |
---|
6591 | hunk ./src/allmydata/storage/backends/disk/disk_backend.py 34 |
---|
6592 | return newfp.child(sia) |
---|
6593 | |
---|
6594 | |
---|
6595 | -def get_share(fp): |
---|
6596 | +def get_share(storageindex, shnum, fp): |
---|
6597 | f = fp.open('rb') |
---|
6598 | try: |
---|
6599 | prefix = f.read(32) |
---|
6600 | hunk ./src/allmydata/storage/backends/disk/disk_backend.py 42 |
---|
6601 | f.close() |
---|
6602 | |
---|
6603 | if prefix == MutableDiskShare.MAGIC: |
---|
6604 | - return MutableDiskShare(fp) |
---|
6605 | + return MutableDiskShare(storageindex, shnum, fp) |
---|
6606 | else: |
---|
6607 | # assume it's immutable |
---|
6608 | hunk ./src/allmydata/storage/backends/disk/disk_backend.py 45 |
---|
6609 | - return ImmutableDiskShare(fp) |
---|
6610 | + return ImmutableDiskShare(storageindex, shnum, fp) |
---|
6611 | |
---|
6612 | |
---|
6613 | class DiskBackend(Backend): |
---|
6614 | hunk ./src/allmydata/storage/backends/disk/disk_backend.py 174 |
---|
6615 | if not NUM_RE.match(shnumstr): |
---|
6616 | continue |
---|
6617 | sharehome = self._sharehomedir.child(shnumstr) |
---|
6618 | - yield self.get_share(sharehome) |
---|
6619 | + yield get_share(self.get_storage_index(), int(shnumstr), sharehome) |
---|
6620 | except UnlistableError: |
---|
6621 | # There is no shares directory at all. |
---|
6622 | pass |
---|
6623 | hunk ./src/allmydata/storage/backends/disk/disk_backend.py 185 |
---|
6624 | return self._incominghomedir.child(str(shnum)).exists() |
---|
6625 | |
---|
6626 | def make_bucket_writer(self, storageserver, shnum, max_space_per_bucket, lease_info, canary): |
---|
6627 | - sharehome = self._sharehomedir.child(str(shnum)) |
---|
6628 | + finalhome = self._sharehomedir.child(str(shnum)) |
---|
6629 | incominghome = self._incominghomedir.child(str(shnum)) |
---|
6630 | hunk ./src/allmydata/storage/backends/disk/disk_backend.py 187 |
---|
6631 | - immsh = ImmutableDiskShare(self.get_storage_index(), shnum, sharehome, incominghome, |
---|
6632 | - max_size=max_space_per_bucket, create=True) |
---|
6633 | + immsh = ImmutableDiskShare(self.get_storage_index(), shnum, incominghome, finalhome, |
---|
6634 | + max_size=max_space_per_bucket) |
---|
6635 | bw = BucketWriter(storageserver, immsh, max_space_per_bucket, lease_info, canary) |
---|
6636 | if self._discard_storage: |
---|
6637 | bw.throw_out_all_data = True |
---|
6638 | hunk ./src/allmydata/storage/backends/disk/disk_backend.py 198 |
---|
6639 | fileutil.fp_make_dirs(self._sharehomedir) |
---|
6640 | sharehome = self._sharehomedir.child(str(shnum)) |
---|
6641 | serverid = storageserver.get_serverid() |
---|
6642 | - return create_mutable_disk_share(sharehome, serverid, write_enabler, storageserver) |
---|
6643 | + return create_mutable_disk_share(self.get_storage_index(), shnum, sharehome, serverid, write_enabler, storageserver) |
---|
6644 | |
---|
6645 | def _clean_up_after_unlink(self): |
---|
6646 | fileutil.fp_rmdir_if_empty(self._sharehomedir) |
---|
6647 | hunk ./src/allmydata/storage/backends/disk/immutable.py 48 |
---|
6648 | LEASE_SIZE = struct.calcsize(">L32s32sL") |
---|
6649 | |
---|
6650 | |
---|
6651 | - def __init__(self, storageindex, shnum, finalhome=None, incominghome=None, max_size=None, create=False): |
---|
6652 | - """ If max_size is not None then I won't allow more than |
---|
6653 | - max_size to be written to me. If create=True then max_size |
---|
6654 | - must not be None. """ |
---|
6655 | - precondition((max_size is not None) or (not create), max_size, create) |
---|
6656 | + def __init__(self, storageindex, shnum, home, finalhome=None, max_size=None): |
---|
6657 | + """ |
---|
6658 | + If max_size is not None then I won't allow more than max_size to be written to me. |
---|
6659 | + If finalhome is not None (meaning that we are creating the share) then max_size |
---|
6660 | + must not be None. |
---|
6661 | + """ |
---|
6662 | + precondition((max_size is not None) or (finalhome is None), max_size, finalhome) |
---|
6663 | self._storageindex = storageindex |
---|
6664 | self._max_size = max_size |
---|
6665 | hunk ./src/allmydata/storage/backends/disk/immutable.py 57 |
---|
6666 | - self._incominghome = incominghome |
---|
6667 | - self._home = finalhome |
---|
6668 | + |
---|
6669 | + # If we are creating the share, _finalhome refers to the final path and |
---|
6670 | + # _home to the incoming path. Otherwise, _finalhome is None. |
---|
6671 | + self._finalhome = finalhome |
---|
6672 | + self._home = home |
---|
6673 | self._shnum = shnum |
---|
6674 | hunk ./src/allmydata/storage/backends/disk/immutable.py 63 |
---|
6675 | - if create: |
---|
6676 | - # touch the file, so later callers will see that we're working on |
---|
6677 | + |
---|
6678 | + if self._finalhome is not None: |
---|
6679 | + # Touch the file, so later callers will see that we're working on |
---|
6680 | # it. Also construct the metadata. |
---|
6681 | hunk ./src/allmydata/storage/backends/disk/immutable.py 67 |
---|
6682 | - assert not finalhome.exists() |
---|
6683 | - fp_make_dirs(self._incominghome.parent()) |
---|
6684 | + assert not self._finalhome.exists() |
---|
6685 | + fp_make_dirs(self._home.parent()) |
---|
6686 | # The second field -- the four-byte share data length -- is no |
---|
6687 | # longer used as of Tahoe v1.3.0, but we continue to write it in |
---|
6688 | # there in case someone downgrades a storage server from >= |
---|
6689 | hunk ./src/allmydata/storage/backends/disk/immutable.py 78 |
---|
6690 | # the largest length that can fit into the field. That way, even |
---|
6691 | # if this does happen, the old < v1.3.0 server will still allow |
---|
6692 | # clients to read the first part of the share. |
---|
6693 | - self._incominghome.setContent(struct.pack(">LLL", 1, min(2**32-1, max_size), 0) ) |
---|
6694 | + self._home.setContent(struct.pack(">LLL", 1, min(2**32-1, max_size), 0) ) |
---|
6695 | self._lease_offset = max_size + 0x0c |
---|
6696 | self._num_leases = 0 |
---|
6697 | else: |
---|
6698 | hunk ./src/allmydata/storage/backends/disk/immutable.py 101 |
---|
6699 | % (si_b2a(self._storageindex), self._shnum, quote_filepath(self._home))) |
---|
6700 | |
---|
6701 | def close(self): |
---|
6702 | - fileutil.fp_make_dirs(self._home.parent()) |
---|
6703 | - self._incominghome.moveTo(self._home) |
---|
6704 | - try: |
---|
6705 | - # self._incominghome is like storage/shares/incoming/ab/abcde/4 . |
---|
6706 | - # We try to delete the parent (.../ab/abcde) to avoid leaving |
---|
6707 | - # these directories lying around forever, but the delete might |
---|
6708 | - # fail if we're working on another share for the same storage |
---|
6709 | - # index (like ab/abcde/5). The alternative approach would be to |
---|
6710 | - # use a hierarchy of objects (PrefixHolder, BucketHolder, |
---|
6711 | - # ShareWriter), each of which is responsible for a single |
---|
6712 | - # directory on disk, and have them use reference counting of |
---|
6713 | - # their children to know when they should do the rmdir. This |
---|
6714 | - # approach is simpler, but relies on os.rmdir refusing to delete |
---|
6715 | - # a non-empty directory. Do *not* use fileutil.fp_remove() here! |
---|
6716 | - fileutil.fp_rmdir_if_empty(self._incominghome.parent()) |
---|
6717 | - # we also delete the grandparent (prefix) directory, .../ab , |
---|
6718 | - # again to avoid leaving directories lying around. This might |
---|
6719 | - # fail if there is another bucket open that shares a prefix (like |
---|
6720 | - # ab/abfff). |
---|
6721 | - fileutil.fp_rmdir_if_empty(self._incominghome.parent().parent()) |
---|
6722 | - # we leave the great-grandparent (incoming/) directory in place. |
---|
6723 | - except EnvironmentError: |
---|
6724 | - # ignore the "can't rmdir because the directory is not empty" |
---|
6725 | - # exceptions, those are normal consequences of the |
---|
6726 | - # above-mentioned conditions. |
---|
6727 | - pass |
---|
6728 | - pass |
---|
6729 | + fileutil.fp_make_dirs(self._finalhome.parent()) |
---|
6730 | + self._home.moveTo(self._finalhome) |
---|
6731 | + |
---|
6732 | + # self._home is like storage/shares/incoming/ab/abcde/4 . |
---|
6733 | + # We try to delete the parent (.../ab/abcde) to avoid leaving |
---|
6734 | + # these directories lying around forever, but the delete might |
---|
6735 | + # fail if we're working on another share for the same storage |
---|
6736 | + # index (like ab/abcde/5). The alternative approach would be to |
---|
6737 | + # use a hierarchy of objects (PrefixHolder, BucketHolder, |
---|
6738 | + # ShareWriter), each of which is responsible for a single |
---|
6739 | + # directory on disk, and have them use reference counting of |
---|
6740 | + # their children to know when they should do the rmdir. This |
---|
6741 | + # approach is simpler, but relies on os.rmdir (used by |
---|
6742 | + # fp_rmdir_if_empty) refusing to delete a non-empty directory. |
---|
6743 | + # Do *not* use fileutil.fp_remove() here! |
---|
6744 | + parent = self._home.parent() |
---|
6745 | + fileutil.fp_rmdir_if_empty(parent) |
---|
6746 | + |
---|
6747 | + # we also delete the grandparent (prefix) directory, .../ab , |
---|
6748 | + # again to avoid leaving directories lying around. This might |
---|
6749 | + # fail if there is another bucket open that shares a prefix (like |
---|
6750 | + # ab/abfff). |
---|
6751 | + fileutil.fp_rmdir_if_empty(parent.parent()) |
---|
6752 | + |
---|
6753 | + # we leave the great-grandparent (incoming/) directory in place. |
---|
6754 | + |
---|
6755 | + # allow lease changes after closing. |
---|
6756 | + self._home = self._finalhome |
---|
6757 | + self._finalhome = None |
---|
6758 | |
---|
6759 | def get_used_space(self): |
---|
6760 | hunk ./src/allmydata/storage/backends/disk/immutable.py 132 |
---|
6761 | - return (fileutil.get_used_space(self._home) + |
---|
6762 | - fileutil.get_used_space(self._incominghome)) |
---|
6763 | + return (fileutil.get_used_space(self._finalhome) + |
---|
6764 | + fileutil.get_used_space(self._home)) |
---|
6765 | |
---|
6766 | def get_storage_index(self): |
---|
6767 | return self._storageindex |
---|
6768 | hunk ./src/allmydata/storage/backends/disk/immutable.py 175 |
---|
6769 | precondition(offset >= 0, offset) |
---|
6770 | if self._max_size is not None and offset+length > self._max_size: |
---|
6771 | raise DataTooLargeError(self._max_size, offset, length) |
---|
6772 | - f = self._incominghome.open(mode='rb+') |
---|
6773 | + f = self._home.open(mode='rb+') |
---|
6774 | try: |
---|
6775 | real_offset = self._data_offset+offset |
---|
6776 | f.seek(real_offset) |
---|
6777 | hunk ./src/allmydata/storage/backends/disk/immutable.py 205 |
---|
6778 | |
---|
6779 | # These lease operations are intended for use by disk_backend.py. |
---|
6780 | # Other clients should not depend on the fact that the disk backend |
---|
6781 | - # stores leases in share files. |
---|
6782 | + # stores leases in share files. XXX bucket.py also relies on this. |
---|
6783 | |
---|
6784 | def get_leases(self): |
---|
6785 | """Yields a LeaseInfo instance for all leases.""" |
---|
6786 | hunk ./src/allmydata/storage/backends/disk/immutable.py 221 |
---|
6787 | f.close() |
---|
6788 | |
---|
6789 | def add_lease(self, lease_info): |
---|
6790 | - f = self._incominghome.open(mode='rb') |
---|
6791 | + f = self._home.open(mode='rb+') |
---|
6792 | try: |
---|
6793 | num_leases = self._read_num_leases(f) |
---|
6794 | hunk ./src/allmydata/storage/backends/disk/immutable.py 224 |
---|
6795 | - finally: |
---|
6796 | - f.close() |
---|
6797 | - f = self._home.open(mode='wb+') |
---|
6798 | - try: |
---|
6799 | self._write_lease_record(f, num_leases, lease_info) |
---|
6800 | self._write_num_leases(f, num_leases+1) |
---|
6801 | finally: |
---|
6802 | hunk ./src/allmydata/storage/backends/disk/mutable.py 440 |
---|
6803 | pass |
---|
6804 | |
---|
6805 | |
---|
6806 | -def create_mutable_disk_share(fp, serverid, write_enabler, parent): |
---|
6807 | - ms = MutableDiskShare(fp, parent) |
---|
6808 | +def create_mutable_disk_share(storageindex, shnum, fp, serverid, write_enabler, parent): |
---|
6809 | + ms = MutableDiskShare(storageindex, shnum, fp, parent) |
---|
6810 | ms.create(serverid, write_enabler) |
---|
6811 | del ms |
---|
6812 | hunk ./src/allmydata/storage/backends/disk/mutable.py 444 |
---|
6813 | - return MutableDiskShare(fp, parent) |
---|
6814 | + return MutableDiskShare(storageindex, shnum, fp, parent) |
---|
6815 | hunk ./src/allmydata/storage/bucket.py 44 |
---|
6816 | start = time.time() |
---|
6817 | |
---|
6818 | self._share.close() |
---|
6819 | - filelen = self._share.stat() |
---|
6820 | + # XXX should this be self._share.get_used_space() ? |
---|
6821 | + consumed_size = self._share.get_size() |
---|
6822 | self._share = None |
---|
6823 | |
---|
6824 | self.closed = True |
---|
6825 | hunk ./src/allmydata/storage/bucket.py 51 |
---|
6826 | self._canary.dontNotifyOnDisconnect(self._disconnect_marker) |
---|
6827 | |
---|
6828 | - self.ss.bucket_writer_closed(self, filelen) |
---|
6829 | + self.ss.bucket_writer_closed(self, consumed_size) |
---|
6830 | self.ss.add_latency("close", time.time() - start) |
---|
6831 | self.ss.count("close") |
---|
6832 | |
---|
6833 | hunk ./src/allmydata/storage/server.py 182 |
---|
6834 | renew_secret, cancel_secret, |
---|
6835 | sharenums, allocated_size, |
---|
6836 | canary, owner_num=0): |
---|
6837 | - # cancel_secret is no longer used. |
---|
6838 | # owner_num is not for clients to set, but rather it should be |
---|
6839 | # curried into a StorageServer instance dedicated to a particular |
---|
6840 | # owner. |
---|
6841 | hunk ./src/allmydata/storage/server.py 195 |
---|
6842 | # Note that the lease should not be added until the BucketWriter |
---|
6843 | # has been closed. |
---|
6844 | expire_time = time.time() + 31*24*60*60 |
---|
6845 | - lease_info = LeaseInfo(owner_num, renew_secret, |
---|
6846 | + lease_info = LeaseInfo(owner_num, renew_secret, cancel_secret, |
---|
6847 | expire_time, self._serverid) |
---|
6848 | |
---|
6849 | max_space_per_bucket = allocated_size |
---|
6850 | hunk ./src/allmydata/test/no_network.py 349 |
---|
6851 | return self.g.servers_by_number[i] |
---|
6852 | |
---|
6853 | def get_serverdir(self, i): |
---|
6854 | - return self.g.servers_by_number[i].backend.storedir |
---|
6855 | + return self.g.servers_by_number[i].backend._storedir |
---|
6856 | |
---|
6857 | def remove_server(self, i): |
---|
6858 | self.g.remove_server(self.g.servers_by_number[i].get_serverid()) |
---|
6859 | hunk ./src/allmydata/test/no_network.py 357 |
---|
6860 | def iterate_servers(self): |
---|
6861 | for i in sorted(self.g.servers_by_number.keys()): |
---|
6862 | ss = self.g.servers_by_number[i] |
---|
6863 | - yield (i, ss, ss.backend.storedir) |
---|
6864 | + yield (i, ss, ss.backend._storedir) |
---|
6865 | |
---|
6866 | def find_uri_shares(self, uri): |
---|
6867 | si = tahoe_uri.from_string(uri).get_storage_index() |
---|
6868 | hunk ./src/allmydata/test/no_network.py 384 |
---|
6869 | return shares |
---|
6870 | |
---|
6871 | def copy_share(self, from_share, uri, to_server): |
---|
6872 | - si = uri.from_string(self.uri).get_storage_index() |
---|
6873 | + si = tahoe_uri.from_string(uri).get_storage_index() |
---|
6874 | (i_shnum, i_serverid, i_sharefp) = from_share |
---|
6875 | shares_dir = to_server.backend.get_shareset(si)._sharehomedir |
---|
6876 | i_sharefp.copyTo(shares_dir.child(str(i_shnum))) |
---|
6877 | hunk ./src/allmydata/test/test_download.py 127 |
---|
6878 | |
---|
6879 | return d |
---|
6880 | |
---|
6881 | - def _write_shares(self, uri, shares): |
---|
6882 | - si = uri.from_string(uri).get_storage_index() |
---|
6883 | + def _write_shares(self, fileuri, shares): |
---|
6884 | + si = uri.from_string(fileuri).get_storage_index() |
---|
6885 | for i in shares: |
---|
6886 | shares_for_server = shares[i] |
---|
6887 | for shnum in shares_for_server: |
---|
6888 | hunk ./src/allmydata/test/test_hung_server.py 36 |
---|
6889 | |
---|
6890 | def _hang(self, servers, **kwargs): |
---|
6891 | for ss in servers: |
---|
6892 | - self.g.hang_server(ss.get_serverid(), **kwargs) |
---|
6893 | + self.g.hang_server(ss.original.get_serverid(), **kwargs) |
---|
6894 | |
---|
6895 | def _unhang(self, servers, **kwargs): |
---|
6896 | for ss in servers: |
---|
6897 | hunk ./src/allmydata/test/test_hung_server.py 40 |
---|
6898 | - self.g.unhang_server(ss.get_serverid(), **kwargs) |
---|
6899 | + self.g.unhang_server(ss.original.get_serverid(), **kwargs) |
---|
6900 | |
---|
6901 | def _hang_shares(self, shnums, **kwargs): |
---|
6902 | # hang all servers who are holding the given shares |
---|
6903 | hunk ./src/allmydata/test/test_hung_server.py 52 |
---|
6904 | hung_serverids.add(i_serverid) |
---|
6905 | |
---|
6906 | def _delete_all_shares_from(self, servers): |
---|
6907 | - serverids = [ss.get_serverid() for ss in servers] |
---|
6908 | + serverids = [ss.original.get_serverid() for ss in servers] |
---|
6909 | for (i_shnum, i_serverid, i_sharefp) in self.shares: |
---|
6910 | if i_serverid in serverids: |
---|
6911 | i_sharefp.remove() |
---|
6912 | hunk ./src/allmydata/test/test_hung_server.py 58 |
---|
6913 | |
---|
6914 | def _corrupt_all_shares_in(self, servers, corruptor_func): |
---|
6915 | - serverids = [ss.get_serverid() for ss in servers] |
---|
6916 | + serverids = [ss.original.get_serverid() for ss in servers] |
---|
6917 | for (i_shnum, i_serverid, i_sharefp) in self.shares: |
---|
6918 | if i_serverid in serverids: |
---|
6919 | self.corrupt_share((i_shnum, i_serverid, i_sharefp), corruptor_func) |
---|
6920 | hunk ./src/allmydata/test/test_hung_server.py 64 |
---|
6921 | |
---|
6922 | def _copy_all_shares_from(self, from_servers, to_server): |
---|
6923 | - serverids = [ss.get_serverid() for ss in from_servers] |
---|
6924 | + serverids = [ss.original.get_serverid() for ss in from_servers] |
---|
6925 | for (i_shnum, i_serverid, i_sharefp) in self.shares: |
---|
6926 | if i_serverid in serverids: |
---|
6927 | self.copy_share((i_shnum, i_serverid, i_sharefp), self.uri, to_server) |
---|
6928 | hunk ./src/allmydata/test/test_mutable.py 2991 |
---|
6929 | fso = debug.FindSharesOptions() |
---|
6930 | storage_index = base32.b2a(n.get_storage_index()) |
---|
6931 | fso.si_s = storage_index |
---|
6932 | - fso.nodedirs = [unicode(os.path.dirname(os.path.abspath(storedir))) |
---|
6933 | + fso.nodedirs = [unicode(storedir.parent().path) |
---|
6934 | for (i,ss,storedir) |
---|
6935 | in self.iterate_servers()] |
---|
6936 | fso.stdout = StringIO() |
---|
6937 | hunk ./src/allmydata/test/test_upload.py 818 |
---|
6938 | if share_number is not None: |
---|
6939 | self._copy_share_to_server(share_number, server_number) |
---|
6940 | |
---|
6941 | - |
---|
6942 | def _copy_share_to_server(self, share_number, server_number): |
---|
6943 | ss = self.g.servers_by_number[server_number] |
---|
6944 | hunk ./src/allmydata/test/test_upload.py 820 |
---|
6945 | - self.copy_share(self.shares[share_number], ss) |
---|
6946 | + self.copy_share(self.shares[share_number], self.uri, ss) |
---|
6947 | |
---|
6948 | def _setup_grid(self): |
---|
6949 | """ |
---|
6950 | } |
---|
6951 | [docs/backends: document the configuration options for the pluggable backends scheme. refs #999 |
---|
6952 | david-sarah@jacaranda.org**20110920171737 |
---|
6953 | Ignore-this: 5947e864682a43cb04e557334cda7c19 |
---|
6954 | ] { |
---|
6955 | adddir ./docs/backends |
---|
6956 | addfile ./docs/backends/S3.rst |
---|
6957 | hunk ./docs/backends/S3.rst 1 |
---|
6958 | +==================================================== |
---|
6959 | +Storing Shares in Amazon Simple Storage Service (S3) |
---|
6960 | +==================================================== |
---|
6961 | + |
---|
6962 | +S3 is a commercial storage service provided by Amazon, described at |
---|
6963 | +`<https://aws.amazon.com/s3/>`_. |
---|
6964 | + |
---|
6965 | +The Tahoe-LAFS storage server can be configured to store its shares in |
---|
6966 | +an S3 bucket, rather than on local filesystem. To enable this, add the |
---|
6967 | +following keys to the server's ``tahoe.cfg`` file: |
---|
6968 | + |
---|
6969 | +``[storage]`` |
---|
6970 | + |
---|
6971 | +``backend = s3`` |
---|
6972 | + |
---|
6973 | + This turns off the local filesystem backend and enables use of S3. |
---|
6974 | + |
---|
6975 | +``s3.access_key_id = (string, required)`` |
---|
6976 | +``s3.secret_access_key = (string, required)`` |
---|
6977 | + |
---|
6978 | + These two give the storage server permission to access your Amazon |
---|
6979 | + Web Services account, allowing them to upload and download shares |
---|
6980 | + from S3. |
---|
6981 | + |
---|
6982 | +``s3.bucket = (string, required)`` |
---|
6983 | + |
---|
6984 | + This controls which bucket will be used to hold shares. The Tahoe-LAFS |
---|
6985 | + storage server will only modify and access objects in the configured S3 |
---|
6986 | + bucket. |
---|
6987 | + |
---|
6988 | +``s3.url = (URL string, optional)`` |
---|
6989 | + |
---|
6990 | + This URL tells the storage server how to access the S3 service. It |
---|
6991 | + defaults to ``http://s3.amazonaws.com``, but by setting it to something |
---|
6992 | + else, you may be able to use some other S3-like service if it is |
---|
6993 | + sufficiently compatible. |
---|
6994 | + |
---|
6995 | +``s3.max_space = (str, optional)`` |
---|
6996 | + |
---|
6997 | + This tells the server to limit how much space can be used in the S3 |
---|
6998 | + bucket. Before each share is uploaded, the server will ask S3 for the |
---|
6999 | + current bucket usage, and will only accept the share if it does not cause |
---|
7000 | + the usage to grow above this limit. |
---|
7001 | + |
---|
7002 | + The string contains a number, with an optional case-insensitive scale |
---|
7003 | + suffix like "K" or "M" or "G", and an optional "B" or "iB" suffix. So |
---|
7004 | + "100MB", "100M", "100000000B", "100000000", and "100000kb" all mean the |
---|
7005 | + same thing. Likewise, "1MiB", "1024KiB", and "1048576B" all mean the same |
---|
7006 | + thing. |
---|
7007 | + |
---|
7008 | + If ``s3.max_space`` is omitted, the default behavior is to allow |
---|
7009 | + unlimited usage. |
---|
7010 | + |
---|
7011 | + |
---|
7012 | +Once configured, the WUI "storage server" page will provide information about |
---|
7013 | +how much space is being used and how many shares are being stored. |
---|
7014 | + |
---|
7015 | + |
---|
7016 | +Issues |
---|
7017 | +------ |
---|
7018 | + |
---|
7019 | +Objects in an S3 bucket cannot be read for free. As a result, when Tahoe-LAFS |
---|
7020 | +is configured to store shares in S3 rather than on local disk, some common |
---|
7021 | +operations may behave differently: |
---|
7022 | + |
---|
7023 | +* Lease crawling/expiration is not yet implemented. As a result, shares will |
---|
7024 | + be retained forever, and the Storage Server status web page will not show |
---|
7025 | + information about the number of mutable/immutable shares present. |
---|
7026 | + |
---|
7027 | +* Enabling ``s3.max_space`` causes an extra S3 usage query to be sent for |
---|
7028 | + each share upload, causing the upload process to run slightly slower and |
---|
7029 | + incur more S3 request charges. |
---|
7030 | addfile ./docs/backends/disk.rst |
---|
7031 | hunk ./docs/backends/disk.rst 1 |
---|
7032 | +==================================== |
---|
7033 | +Storing Shares on a Local Filesystem |
---|
7034 | +==================================== |
---|
7035 | + |
---|
7036 | +The "disk" backend stores shares on the local filesystem. Versions of |
---|
7037 | +Tahoe-LAFS <= 1.9.0 always stored shares in this way. |
---|
7038 | + |
---|
7039 | +``[storage]`` |
---|
7040 | + |
---|
7041 | +``backend = disk`` |
---|
7042 | + |
---|
7043 | + This enables use of the disk backend, and is the default. |
---|
7044 | + |
---|
7045 | +``reserved_space = (str, optional)`` |
---|
7046 | + |
---|
7047 | + If provided, this value defines how much disk space is reserved: the |
---|
7048 | + storage server will not accept any share that causes the amount of free |
---|
7049 | + disk space to drop below this value. (The free space is measured by a |
---|
7050 | + call to statvfs(2) on Unix, or GetDiskFreeSpaceEx on Windows, and is the |
---|
7051 | + space available to the user account under which the storage server runs.) |
---|
7052 | + |
---|
7053 | + This string contains a number, with an optional case-insensitive scale |
---|
7054 | + suffix like "K" or "M" or "G", and an optional "B" or "iB" suffix. So |
---|
7055 | + "100MB", "100M", "100000000B", "100000000", and "100000kb" all mean the |
---|
7056 | + same thing. Likewise, "1MiB", "1024KiB", and "1048576B" all mean the same |
---|
7057 | + thing. |
---|
7058 | + |
---|
7059 | + "``tahoe create-node``" generates a tahoe.cfg with |
---|
7060 | + "``reserved_space=1G``", but you may wish to raise, lower, or remove the |
---|
7061 | + reservation to suit your needs. |
---|
7062 | + |
---|
7063 | +``expire.enabled =`` |
---|
7064 | + |
---|
7065 | +``expire.mode =`` |
---|
7066 | + |
---|
7067 | +``expire.override_lease_duration =`` |
---|
7068 | + |
---|
7069 | +``expire.cutoff_date =`` |
---|
7070 | + |
---|
7071 | +``expire.immutable =`` |
---|
7072 | + |
---|
7073 | +``expire.mutable =`` |
---|
7074 | + |
---|
7075 | + These settings control garbage collection, causing the server to |
---|
7076 | + delete shares that no longer have an up-to-date lease on them. Please |
---|
7077 | + see `<garbage-collection.rst>`_ for full details. |
---|
7078 | hunk ./docs/configuration.rst 412 |
---|
7079 | <http://tahoe-lafs.org/trac/tahoe-lafs/ticket/390>`_ for the current |
---|
7080 | status of this bug. The default value is ``False``. |
---|
7081 | |
---|
7082 | -``reserved_space = (str, optional)`` |
---|
7083 | +``backend = (string, optional)`` |
---|
7084 | |
---|
7085 | hunk ./docs/configuration.rst 414 |
---|
7086 | - If provided, this value defines how much disk space is reserved: the |
---|
7087 | - storage server will not accept any share that causes the amount of free |
---|
7088 | - disk space to drop below this value. (The free space is measured by a |
---|
7089 | - call to statvfs(2) on Unix, or GetDiskFreeSpaceEx on Windows, and is the |
---|
7090 | - space available to the user account under which the storage server runs.) |
---|
7091 | + Storage servers can store the data into different "backends". Clients |
---|
7092 | + need not be aware of which backend is used by a server. The default |
---|
7093 | + value is ``disk``. |
---|
7094 | |
---|
7095 | hunk ./docs/configuration.rst 418 |
---|
7096 | - This string contains a number, with an optional case-insensitive scale |
---|
7097 | - suffix like "K" or "M" or "G", and an optional "B" or "iB" suffix. So |
---|
7098 | - "100MB", "100M", "100000000B", "100000000", and "100000kb" all mean the |
---|
7099 | - same thing. Likewise, "1MiB", "1024KiB", and "1048576B" all mean the same |
---|
7100 | - thing. |
---|
7101 | +``backend = disk`` |
---|
7102 | |
---|
7103 | hunk ./docs/configuration.rst 420 |
---|
7104 | - "``tahoe create-node``" generates a tahoe.cfg with |
---|
7105 | - "``reserved_space=1G``", but you may wish to raise, lower, or remove the |
---|
7106 | - reservation to suit your needs. |
---|
7107 | + The default is to store shares on the local filesystem (in |
---|
7108 | + BASEDIR/storage/shares/). For configuration details (including how to |
---|
7109 | + reserve a minimum amount of free space), see `<backends/disk.rst>`_. |
---|
7110 | |
---|
7111 | hunk ./docs/configuration.rst 424 |
---|
7112 | -``expire.enabled =`` |
---|
7113 | +``backend = S3`` |
---|
7114 | |
---|
7115 | hunk ./docs/configuration.rst 426 |
---|
7116 | -``expire.mode =`` |
---|
7117 | - |
---|
7118 | -``expire.override_lease_duration =`` |
---|
7119 | - |
---|
7120 | -``expire.cutoff_date =`` |
---|
7121 | - |
---|
7122 | -``expire.immutable =`` |
---|
7123 | - |
---|
7124 | -``expire.mutable =`` |
---|
7125 | - |
---|
7126 | - These settings control garbage collection, in which the server will |
---|
7127 | - delete shares that no longer have an up-to-date lease on them. Please see |
---|
7128 | - `<garbage-collection.rst>`_ for full details. |
---|
7129 | + The storage server can store all shares to an Amazon Simple Storage |
---|
7130 | + Service (S3) bucket. For configuration details, see `<backends/S3.rst>`_. |
---|
7131 | |
---|
7132 | |
---|
7133 | Running A Helper |
---|
7134 | } |
---|
7135 | [Fix some incorrect attribute accesses. refs #999 |
---|
7136 | david-sarah@jacaranda.org**20110921031207 |
---|
7137 | Ignore-this: f1ea4c3ea191f6d4b719afaebd2b2bcd |
---|
7138 | ] { |
---|
7139 | hunk ./src/allmydata/client.py 258 |
---|
7140 | |
---|
7141 | backend = DiskBackend(storedir, readonly=readonly, reserved_space=reserved, |
---|
7142 | discard_storage=discard) |
---|
7143 | - ss = StorageServer(nodeid, backend, storedir, |
---|
7144 | + ss = StorageServer(self.nodeid, backend, storedir, |
---|
7145 | stats_provider=self.stats_provider, |
---|
7146 | expiration_policy=expiration_policy) |
---|
7147 | self.add_service(ss) |
---|
7148 | hunk ./src/allmydata/interfaces.py 449 |
---|
7149 | Returns the storage index. |
---|
7150 | """ |
---|
7151 | |
---|
7152 | + def get_storage_index_string(): |
---|
7153 | + """ |
---|
7154 | + Returns the base32-encoded storage index. |
---|
7155 | + """ |
---|
7156 | + |
---|
7157 | def get_shnum(): |
---|
7158 | """ |
---|
7159 | Returns the share number. |
---|
7160 | hunk ./src/allmydata/storage/backends/disk/immutable.py 138 |
---|
7161 | def get_storage_index(self): |
---|
7162 | return self._storageindex |
---|
7163 | |
---|
7164 | + def get_storage_index_string(self): |
---|
7165 | + return si_b2a(self._storageindex) |
---|
7166 | + |
---|
7167 | def get_shnum(self): |
---|
7168 | return self._shnum |
---|
7169 | |
---|
7170 | hunk ./src/allmydata/storage/backends/disk/mutable.py 119 |
---|
7171 | def get_storage_index(self): |
---|
7172 | return self._storageindex |
---|
7173 | |
---|
7174 | + def get_storage_index_string(self): |
---|
7175 | + return si_b2a(self._storageindex) |
---|
7176 | + |
---|
7177 | def get_shnum(self): |
---|
7178 | return self._shnum |
---|
7179 | |
---|
7180 | hunk ./src/allmydata/storage/bucket.py 86 |
---|
7181 | def __init__(self, ss, share): |
---|
7182 | self.ss = ss |
---|
7183 | self._share = share |
---|
7184 | - self.storageindex = share.storageindex |
---|
7185 | - self.shnum = share.shnum |
---|
7186 | + self.storageindex = share.get_storage_index() |
---|
7187 | + self.shnum = share.get_shnum() |
---|
7188 | |
---|
7189 | def __repr__(self): |
---|
7190 | return "<%s %s %s>" % (self.__class__.__name__, |
---|
7191 | hunk ./src/allmydata/storage/expirer.py 6 |
---|
7192 | from twisted.python import log as twlog |
---|
7193 | |
---|
7194 | from allmydata.storage.crawler import ShareCrawler |
---|
7195 | -from allmydata.storage.common import si_b2a, UnknownMutableContainerVersionError, \ |
---|
7196 | +from allmydata.storage.common import UnknownMutableContainerVersionError, \ |
---|
7197 | UnknownImmutableContainerVersionError |
---|
7198 | |
---|
7199 | |
---|
7200 | hunk ./src/allmydata/storage/expirer.py 124 |
---|
7201 | struct.error): |
---|
7202 | twlog.msg("lease-checker error processing %r" % (share,)) |
---|
7203 | twlog.err() |
---|
7204 | - which = (si_b2a(share.storageindex), share.get_shnum()) |
---|
7205 | + which = (share.get_storage_index_string(), share.get_shnum()) |
---|
7206 | self.state["cycle-to-date"]["corrupt-shares"].append(which) |
---|
7207 | wks = (1, 1, 1, "unknown") |
---|
7208 | would_keep_shares.append(wks) |
---|
7209 | hunk ./src/allmydata/storage/server.py 221 |
---|
7210 | alreadygot = set() |
---|
7211 | for share in shareset.get_shares(): |
---|
7212 | share.add_or_renew_lease(lease_info) |
---|
7213 | - alreadygot.add(share.shnum) |
---|
7214 | + alreadygot.add(share.get_shnum()) |
---|
7215 | |
---|
7216 | for shnum in sharenums - alreadygot: |
---|
7217 | if shareset.has_incoming(shnum): |
---|
7218 | hunk ./src/allmydata/storage/server.py 324 |
---|
7219 | |
---|
7220 | try: |
---|
7221 | shareset = self.backend.get_shareset(storageindex) |
---|
7222 | - return shareset.readv(self, shares, readv) |
---|
7223 | + return shareset.readv(shares, readv) |
---|
7224 | finally: |
---|
7225 | self.add_latency("readv", time.time() - start) |
---|
7226 | |
---|
7227 | hunk ./src/allmydata/storage/shares.py 1 |
---|
7228 | -#! /usr/bin/python |
---|
7229 | - |
---|
7230 | -from allmydata.storage.mutable import MutableShareFile |
---|
7231 | -from allmydata.storage.immutable import ShareFile |
---|
7232 | - |
---|
7233 | -def get_share_file(filename): |
---|
7234 | - f = open(filename, "rb") |
---|
7235 | - prefix = f.read(32) |
---|
7236 | - f.close() |
---|
7237 | - if prefix == MutableShareFile.MAGIC: |
---|
7238 | - return MutableShareFile(filename) |
---|
7239 | - # otherwise assume it's immutable |
---|
7240 | - return ShareFile(filename) |
---|
7241 | - |
---|
7242 | rmfile ./src/allmydata/storage/shares.py |
---|
7243 | hunk ./src/allmydata/test/no_network.py 387 |
---|
7244 | si = tahoe_uri.from_string(uri).get_storage_index() |
---|
7245 | (i_shnum, i_serverid, i_sharefp) = from_share |
---|
7246 | shares_dir = to_server.backend.get_shareset(si)._sharehomedir |
---|
7247 | + fileutil.fp_make_dirs(shares_dir) |
---|
7248 | i_sharefp.copyTo(shares_dir.child(str(i_shnum))) |
---|
7249 | |
---|
7250 | def restore_all_shares(self, shares): |
---|
7251 | hunk ./src/allmydata/test/no_network.py 391 |
---|
7252 | - for share, data in shares.items(): |
---|
7253 | - share.home.setContent(data) |
---|
7254 | + for sharepath, data in shares.items(): |
---|
7255 | + FilePath(sharepath).setContent(data) |
---|
7256 | |
---|
7257 | def delete_share(self, (shnum, serverid, sharefp)): |
---|
7258 | sharefp.remove() |
---|
7259 | hunk ./src/allmydata/test/test_upload.py 744 |
---|
7260 | servertoshnums = {} # k: server, v: set(shnum) |
---|
7261 | |
---|
7262 | for i, c in self.g.servers_by_number.iteritems(): |
---|
7263 | - for (dirp, dirns, fns) in os.walk(c.sharedir): |
---|
7264 | + for (dirp, dirns, fns) in os.walk(c.backend._sharedir.path): |
---|
7265 | for fn in fns: |
---|
7266 | try: |
---|
7267 | sharenum = int(fn) |
---|
7268 | } |
---|
7269 | [docs/backends/S3.rst: remove Issues section. refs #999 |
---|
7270 | david-sarah@jacaranda.org**20110921031625 |
---|
7271 | Ignore-this: c83d8f52b790bc32488869e6ee1df8c2 |
---|
7272 | ] hunk ./docs/backends/S3.rst 57 |
---|
7273 | |
---|
7274 | Once configured, the WUI "storage server" page will provide information about |
---|
7275 | how much space is being used and how many shares are being stored. |
---|
7276 | - |
---|
7277 | - |
---|
7278 | -Issues |
---|
7279 | ------- |
---|
7280 | - |
---|
7281 | -Objects in an S3 bucket cannot be read for free. As a result, when Tahoe-LAFS |
---|
7282 | -is configured to store shares in S3 rather than on local disk, some common |
---|
7283 | -operations may behave differently: |
---|
7284 | - |
---|
7285 | -* Lease crawling/expiration is not yet implemented. As a result, shares will |
---|
7286 | - be retained forever, and the Storage Server status web page will not show |
---|
7287 | - information about the number of mutable/immutable shares present. |
---|
7288 | - |
---|
7289 | -* Enabling ``s3.max_space`` causes an extra S3 usage query to be sent for |
---|
7290 | - each share upload, causing the upload process to run slightly slower and |
---|
7291 | - incur more S3 request charges. |
---|
7292 | [docs/backends/S3.rst, disk.rst: describe type of space settings as 'quantity of space', not 'str'. refs #999 |
---|
7293 | david-sarah@jacaranda.org**20110921031705 |
---|
7294 | Ignore-this: a74ed8e01b0a1ab5f07a1487d7bf138 |
---|
7295 | ] { |
---|
7296 | hunk ./docs/backends/S3.rst 38 |
---|
7297 | else, you may be able to use some other S3-like service if it is |
---|
7298 | sufficiently compatible. |
---|
7299 | |
---|
7300 | -``s3.max_space = (str, optional)`` |
---|
7301 | +``s3.max_space = (quantity of space, optional)`` |
---|
7302 | |
---|
7303 | This tells the server to limit how much space can be used in the S3 |
---|
7304 | bucket. Before each share is uploaded, the server will ask S3 for the |
---|
7305 | hunk ./docs/backends/disk.rst 14 |
---|
7306 | |
---|
7307 | This enables use of the disk backend, and is the default. |
---|
7308 | |
---|
7309 | -``reserved_space = (str, optional)`` |
---|
7310 | +``reserved_space = (quantity of space, optional)`` |
---|
7311 | |
---|
7312 | If provided, this value defines how much disk space is reserved: the |
---|
7313 | storage server will not accept any share that causes the amount of free |
---|
7314 | } |
---|
7315 | [More fixes to tests needed for pluggable backends. refs #999 |
---|
7316 | david-sarah@jacaranda.org**20110921184649 |
---|
7317 | Ignore-this: 9be0d3a98e350fd4e17a07d2c00bb4ca |
---|
7318 | ] { |
---|
7319 | hunk ./src/allmydata/scripts/debug.py 8 |
---|
7320 | from twisted.python import usage, failure |
---|
7321 | from twisted.internet import defer |
---|
7322 | from twisted.scripts import trial as twisted_trial |
---|
7323 | +from twisted.python.filepath import FilePath |
---|
7324 | |
---|
7325 | |
---|
7326 | class DumpOptions(usage.Options): |
---|
7327 | hunk ./src/allmydata/scripts/debug.py 38 |
---|
7328 | self['filename'] = argv_to_abspath(filename) |
---|
7329 | |
---|
7330 | def dump_share(options): |
---|
7331 | - from allmydata.storage.mutable import MutableShareFile |
---|
7332 | + from allmydata.storage.backends.disk.disk_backend import get_share |
---|
7333 | from allmydata.util.encodingutil import quote_output |
---|
7334 | |
---|
7335 | out = options.stdout |
---|
7336 | hunk ./src/allmydata/scripts/debug.py 46 |
---|
7337 | # check the version, to see if we have a mutable or immutable share |
---|
7338 | print >>out, "share filename: %s" % quote_output(options['filename']) |
---|
7339 | |
---|
7340 | - f = open(options['filename'], "rb") |
---|
7341 | - prefix = f.read(32) |
---|
7342 | - f.close() |
---|
7343 | - if prefix == MutableShareFile.MAGIC: |
---|
7344 | - return dump_mutable_share(options) |
---|
7345 | - # otherwise assume it's immutable |
---|
7346 | - return dump_immutable_share(options) |
---|
7347 | - |
---|
7348 | -def dump_immutable_share(options): |
---|
7349 | - from allmydata.storage.immutable import ShareFile |
---|
7350 | + share = get_share("", 0, fp) |
---|
7351 | + if share.sharetype == "mutable": |
---|
7352 | + return dump_mutable_share(options, share) |
---|
7353 | + else: |
---|
7354 | + assert share.sharetype == "immutable", share.sharetype |
---|
7355 | + return dump_immutable_share(options) |
---|
7356 | |
---|
7357 | hunk ./src/allmydata/scripts/debug.py 53 |
---|
7358 | +def dump_immutable_share(options, share): |
---|
7359 | out = options.stdout |
---|
7360 | hunk ./src/allmydata/scripts/debug.py 55 |
---|
7361 | - f = ShareFile(options['filename']) |
---|
7362 | if not options["leases-only"]: |
---|
7363 | hunk ./src/allmydata/scripts/debug.py 56 |
---|
7364 | - dump_immutable_chk_share(f, out, options) |
---|
7365 | - dump_immutable_lease_info(f, out) |
---|
7366 | + dump_immutable_chk_share(share, out, options) |
---|
7367 | + dump_immutable_lease_info(share, out) |
---|
7368 | print >>out |
---|
7369 | return 0 |
---|
7370 | |
---|
7371 | hunk ./src/allmydata/scripts/debug.py 166 |
---|
7372 | return when |
---|
7373 | |
---|
7374 | |
---|
7375 | -def dump_mutable_share(options): |
---|
7376 | - from allmydata.storage.mutable import MutableShareFile |
---|
7377 | +def dump_mutable_share(options, m): |
---|
7378 | from allmydata.util import base32, idlib |
---|
7379 | out = options.stdout |
---|
7380 | hunk ./src/allmydata/scripts/debug.py 169 |
---|
7381 | - m = MutableShareFile(options['filename']) |
---|
7382 | f = open(options['filename'], "rb") |
---|
7383 | WE, nodeid = m._read_write_enabler_and_nodeid(f) |
---|
7384 | num_extra_leases = m._read_num_extra_leases(f) |
---|
7385 | hunk ./src/allmydata/scripts/debug.py 641 |
---|
7386 | /home/warner/testnet/node-1/storage/shares/44k/44kai1tui348689nrw8fjegc8c/9 |
---|
7387 | /home/warner/testnet/node-2/storage/shares/44k/44kai1tui348689nrw8fjegc8c/2 |
---|
7388 | """ |
---|
7389 | - from allmydata.storage.server import si_a2b, storage_index_to_dir |
---|
7390 | - from allmydata.util.encodingutil import listdir_unicode |
---|
7391 | + from allmydata.storage.server import si_a2b |
---|
7392 | + from allmydata.storage.backends.disk_backend import si_si2dir |
---|
7393 | + from allmydata.util.encodingutil import quote_filepath |
---|
7394 | |
---|
7395 | out = options.stdout |
---|
7396 | hunk ./src/allmydata/scripts/debug.py 646 |
---|
7397 | - sharedir = storage_index_to_dir(si_a2b(options.si_s)) |
---|
7398 | - for d in options.nodedirs: |
---|
7399 | - d = os.path.join(d, "storage/shares", sharedir) |
---|
7400 | - if os.path.exists(d): |
---|
7401 | - for shnum in listdir_unicode(d): |
---|
7402 | - print >>out, os.path.join(d, shnum) |
---|
7403 | + si = si_a2b(options.si_s) |
---|
7404 | + for nodedir in options.nodedirs: |
---|
7405 | + sharedir = si_si2dir(nodedir.child("storage").child("shares"), si) |
---|
7406 | + if sharedir.exists(): |
---|
7407 | + for sharefp in sharedir.children(): |
---|
7408 | + print >>out, quote_filepath(sharefp, quotemarks=False) |
---|
7409 | |
---|
7410 | return 0 |
---|
7411 | |
---|
7412 | hunk ./src/allmydata/scripts/debug.py 878 |
---|
7413 | print >>err, "Error processing %s" % quote_output(si_dir) |
---|
7414 | failure.Failure().printTraceback(err) |
---|
7415 | |
---|
7416 | + |
---|
7417 | class CorruptShareOptions(usage.Options): |
---|
7418 | def getSynopsis(self): |
---|
7419 | return "Usage: tahoe debug corrupt-share SHARE_FILENAME" |
---|
7420 | hunk ./src/allmydata/scripts/debug.py 902 |
---|
7421 | Obviously, this command should not be used in normal operation. |
---|
7422 | """ |
---|
7423 | return t |
---|
7424 | + |
---|
7425 | def parseArgs(self, filename): |
---|
7426 | self['filename'] = filename |
---|
7427 | |
---|
7428 | hunk ./src/allmydata/scripts/debug.py 907 |
---|
7429 | def corrupt_share(options): |
---|
7430 | + do_corrupt_share(options.stdout, FilePath(options['filename']), options['offset']) |
---|
7431 | + |
---|
7432 | +def do_corrupt_share(out, fp, offset="block-random"): |
---|
7433 | import random |
---|
7434 | hunk ./src/allmydata/scripts/debug.py 911 |
---|
7435 | - from allmydata.storage.mutable import MutableShareFile |
---|
7436 | - from allmydata.storage.immutable import ShareFile |
---|
7437 | + from allmydata.storage.backends.disk.mutable import MutableDiskShare |
---|
7438 | + from allmydata.storage.backends.disk.immutable import ImmutableDiskShare |
---|
7439 | from allmydata.mutable.layout import unpack_header |
---|
7440 | from allmydata.immutable.layout import ReadBucketProxy |
---|
7441 | hunk ./src/allmydata/scripts/debug.py 915 |
---|
7442 | - out = options.stdout |
---|
7443 | - fn = options['filename'] |
---|
7444 | - assert options["offset"] == "block-random", "other offsets not implemented" |
---|
7445 | + |
---|
7446 | + assert offset == "block-random", "other offsets not implemented" |
---|
7447 | + |
---|
7448 | # first, what kind of share is it? |
---|
7449 | |
---|
7450 | def flip_bit(start, end): |
---|
7451 | hunk ./src/allmydata/scripts/debug.py 924 |
---|
7452 | offset = random.randrange(start, end) |
---|
7453 | bit = random.randrange(0, 8) |
---|
7454 | print >>out, "[%d..%d): %d.b%d" % (start, end, offset, bit) |
---|
7455 | - f = open(fn, "rb+") |
---|
7456 | - f.seek(offset) |
---|
7457 | - d = f.read(1) |
---|
7458 | - d = chr(ord(d) ^ 0x01) |
---|
7459 | - f.seek(offset) |
---|
7460 | - f.write(d) |
---|
7461 | - f.close() |
---|
7462 | + f = fp.open("rb+") |
---|
7463 | + try: |
---|
7464 | + f.seek(offset) |
---|
7465 | + d = f.read(1) |
---|
7466 | + d = chr(ord(d) ^ 0x01) |
---|
7467 | + f.seek(offset) |
---|
7468 | + f.write(d) |
---|
7469 | + finally: |
---|
7470 | + f.close() |
---|
7471 | |
---|
7472 | hunk ./src/allmydata/scripts/debug.py 934 |
---|
7473 | - f = open(fn, "rb") |
---|
7474 | - prefix = f.read(32) |
---|
7475 | - f.close() |
---|
7476 | - if prefix == MutableShareFile.MAGIC: |
---|
7477 | - # mutable |
---|
7478 | - m = MutableShareFile(fn) |
---|
7479 | - f = open(fn, "rb") |
---|
7480 | - f.seek(m.DATA_OFFSET) |
---|
7481 | - data = f.read(2000) |
---|
7482 | - # make sure this slot contains an SMDF share |
---|
7483 | - assert data[0] == "\x00", "non-SDMF mutable shares not supported" |
---|
7484 | + f = fp.open("rb") |
---|
7485 | + try: |
---|
7486 | + prefix = f.read(32) |
---|
7487 | + finally: |
---|
7488 | f.close() |
---|
7489 | hunk ./src/allmydata/scripts/debug.py 939 |
---|
7490 | + if prefix == MutableDiskShare.MAGIC: |
---|
7491 | + # mutable |
---|
7492 | + m = MutableDiskShare("", 0, fp) |
---|
7493 | + f = fp.open("rb") |
---|
7494 | + try: |
---|
7495 | + f.seek(m.DATA_OFFSET) |
---|
7496 | + data = f.read(2000) |
---|
7497 | + # make sure this slot contains an SMDF share |
---|
7498 | + assert data[0] == "\x00", "non-SDMF mutable shares not supported" |
---|
7499 | + finally: |
---|
7500 | + f.close() |
---|
7501 | |
---|
7502 | (version, ig_seqnum, ig_roothash, ig_IV, ig_k, ig_N, ig_segsize, |
---|
7503 | ig_datalen, offsets) = unpack_header(data) |
---|
7504 | hunk ./src/allmydata/scripts/debug.py 960 |
---|
7505 | flip_bit(start, end) |
---|
7506 | else: |
---|
7507 | # otherwise assume it's immutable |
---|
7508 | - f = ShareFile(fn) |
---|
7509 | + f = ImmutableDiskShare("", 0, fp) |
---|
7510 | bp = ReadBucketProxy(None, None, '') |
---|
7511 | offsets = bp._parse_offsets(f.read_share_data(0, 0x24)) |
---|
7512 | start = f._data_offset + offsets["data"] |
---|
7513 | hunk ./src/allmydata/storage/backends/base.py 92 |
---|
7514 | (testv, datav, new_length) = test_and_write_vectors[sharenum] |
---|
7515 | if sharenum in shares: |
---|
7516 | if not shares[sharenum].check_testv(testv): |
---|
7517 | - self.log("testv failed: [%d]: %r" % (sharenum, testv)) |
---|
7518 | + storageserver.log("testv failed: [%d]: %r" % (sharenum, testv)) |
---|
7519 | testv_is_good = False |
---|
7520 | break |
---|
7521 | else: |
---|
7522 | hunk ./src/allmydata/storage/backends/base.py 99 |
---|
7523 | # compare the vectors against an empty share, in which all |
---|
7524 | # reads return empty strings |
---|
7525 | if not EmptyShare().check_testv(testv): |
---|
7526 | - self.log("testv failed (empty): [%d] %r" % (sharenum, |
---|
7527 | - testv)) |
---|
7528 | + storageserver.log("testv failed (empty): [%d] %r" % (sharenum, testv)) |
---|
7529 | testv_is_good = False |
---|
7530 | break |
---|
7531 | |
---|
7532 | hunk ./src/allmydata/test/test_cli.py 2892 |
---|
7533 | # delete one, corrupt a second |
---|
7534 | shares = self.find_uri_shares(self.uri) |
---|
7535 | self.failUnlessReallyEqual(len(shares), 10) |
---|
7536 | - os.unlink(shares[0][2]) |
---|
7537 | - cso = debug.CorruptShareOptions() |
---|
7538 | - cso.stdout = StringIO() |
---|
7539 | - cso.parseOptions([shares[1][2]]) |
---|
7540 | + shares[0][2].remove() |
---|
7541 | + stdout = StringIO() |
---|
7542 | + sharefile = shares[1][2] |
---|
7543 | storage_index = uri.from_string(self.uri).get_storage_index() |
---|
7544 | self._corrupt_share_line = " server %s, SI %s, shnum %d" % \ |
---|
7545 | (base32.b2a(shares[1][1]), |
---|
7546 | hunk ./src/allmydata/test/test_cli.py 2900 |
---|
7547 | base32.b2a(storage_index), |
---|
7548 | shares[1][0]) |
---|
7549 | - debug.corrupt_share(cso) |
---|
7550 | + debug.do_corrupt_share(stdout, sharefile) |
---|
7551 | d.addCallback(_clobber_shares) |
---|
7552 | |
---|
7553 | d.addCallback(lambda ign: self.do_cli("check", "--verify", self.uri)) |
---|
7554 | hunk ./src/allmydata/test/test_cli.py 3017 |
---|
7555 | def _clobber_shares(ignored): |
---|
7556 | shares = self.find_uri_shares(self.uris[u"g\u00F6\u00F6d"]) |
---|
7557 | self.failUnlessReallyEqual(len(shares), 10) |
---|
7558 | - os.unlink(shares[0][2]) |
---|
7559 | + shares[0][2].remove() |
---|
7560 | |
---|
7561 | shares = self.find_uri_shares(self.uris["mutable"]) |
---|
7562 | hunk ./src/allmydata/test/test_cli.py 3020 |
---|
7563 | - cso = debug.CorruptShareOptions() |
---|
7564 | - cso.stdout = StringIO() |
---|
7565 | - cso.parseOptions([shares[1][2]]) |
---|
7566 | + stdout = StringIO() |
---|
7567 | + sharefile = shares[1][2] |
---|
7568 | storage_index = uri.from_string(self.uris["mutable"]).get_storage_index() |
---|
7569 | self._corrupt_share_line = " corrupt: server %s, SI %s, shnum %d" % \ |
---|
7570 | (base32.b2a(shares[1][1]), |
---|
7571 | hunk ./src/allmydata/test/test_cli.py 3027 |
---|
7572 | base32.b2a(storage_index), |
---|
7573 | shares[1][0]) |
---|
7574 | - debug.corrupt_share(cso) |
---|
7575 | + debug.do_corrupt_share(stdout, sharefile) |
---|
7576 | d.addCallback(_clobber_shares) |
---|
7577 | |
---|
7578 | # root |
---|
7579 | hunk ./src/allmydata/test/test_client.py 90 |
---|
7580 | "enabled = true\n" + \ |
---|
7581 | "reserved_space = 1000\n") |
---|
7582 | c = client.Client(basedir) |
---|
7583 | - self.failUnlessEqual(c.getServiceNamed("storage").reserved_space, 1000) |
---|
7584 | + self.failUnlessEqual(c.getServiceNamed("storage").backend._reserved_space, 1000) |
---|
7585 | |
---|
7586 | def test_reserved_2(self): |
---|
7587 | basedir = "client.Basic.test_reserved_2" |
---|
7588 | hunk ./src/allmydata/test/test_client.py 101 |
---|
7589 | "enabled = true\n" + \ |
---|
7590 | "reserved_space = 10K\n") |
---|
7591 | c = client.Client(basedir) |
---|
7592 | - self.failUnlessEqual(c.getServiceNamed("storage").reserved_space, 10*1000) |
---|
7593 | + self.failUnlessEqual(c.getServiceNamed("storage").backend._reserved_space, 10*1000) |
---|
7594 | |
---|
7595 | def test_reserved_3(self): |
---|
7596 | basedir = "client.Basic.test_reserved_3" |
---|
7597 | hunk ./src/allmydata/test/test_client.py 112 |
---|
7598 | "enabled = true\n" + \ |
---|
7599 | "reserved_space = 5mB\n") |
---|
7600 | c = client.Client(basedir) |
---|
7601 | - self.failUnlessEqual(c.getServiceNamed("storage").reserved_space, |
---|
7602 | + self.failUnlessEqual(c.getServiceNamed("storage").backend._reserved_space, |
---|
7603 | 5*1000*1000) |
---|
7604 | |
---|
7605 | def test_reserved_4(self): |
---|
7606 | hunk ./src/allmydata/test/test_client.py 124 |
---|
7607 | "enabled = true\n" + \ |
---|
7608 | "reserved_space = 78Gb\n") |
---|
7609 | c = client.Client(basedir) |
---|
7610 | - self.failUnlessEqual(c.getServiceNamed("storage").reserved_space, |
---|
7611 | + self.failUnlessEqual(c.getServiceNamed("storage").backend._reserved_space, |
---|
7612 | 78*1000*1000*1000) |
---|
7613 | |
---|
7614 | def test_reserved_bad(self): |
---|
7615 | hunk ./src/allmydata/test/test_client.py 136 |
---|
7616 | "enabled = true\n" + \ |
---|
7617 | "reserved_space = bogus\n") |
---|
7618 | c = client.Client(basedir) |
---|
7619 | - self.failUnlessEqual(c.getServiceNamed("storage").reserved_space, 0) |
---|
7620 | + self.failUnlessEqual(c.getServiceNamed("storage").backend._reserved_space, 0) |
---|
7621 | |
---|
7622 | def _permute(self, sb, key): |
---|
7623 | return [ s.get_serverid() for s in sb.get_servers_for_psi(key) ] |
---|
7624 | hunk ./src/allmydata/test/test_crawler.py 7 |
---|
7625 | from twisted.trial import unittest |
---|
7626 | from twisted.application import service |
---|
7627 | from twisted.internet import defer |
---|
7628 | +from twisted.python.filepath import FilePath |
---|
7629 | from foolscap.api import eventually, fireEventually |
---|
7630 | |
---|
7631 | from allmydata.util import fileutil, hashutil, pollmixin |
---|
7632 | hunk ./src/allmydata/test/test_crawler.py 13 |
---|
7633 | from allmydata.storage.server import StorageServer, si_b2a |
---|
7634 | from allmydata.storage.crawler import ShareCrawler, TimeSliceExceeded |
---|
7635 | +from allmydata.storage.backends.disk.disk_backend import DiskBackend |
---|
7636 | |
---|
7637 | from allmydata.test.test_storage import FakeCanary |
---|
7638 | from allmydata.test.common_util import StallMixin |
---|
7639 | hunk ./src/allmydata/test/test_crawler.py 115 |
---|
7640 | |
---|
7641 | def test_immediate(self): |
---|
7642 | self.basedir = "crawler/Basic/immediate" |
---|
7643 | - fileutil.make_dirs(self.basedir) |
---|
7644 | serverid = "\x00" * 20 |
---|
7645 | hunk ./src/allmydata/test/test_crawler.py 116 |
---|
7646 | - ss = StorageServer(self.basedir, serverid) |
---|
7647 | + fp = FilePath(self.basedir) |
---|
7648 | + backend = DiskBackend(fp) |
---|
7649 | + ss = StorageServer(serverid, backend, fp) |
---|
7650 | ss.setServiceParent(self.s) |
---|
7651 | |
---|
7652 | sis = [self.write(i, ss, serverid) for i in range(10)] |
---|
7653 | hunk ./src/allmydata/test/test_crawler.py 122 |
---|
7654 | - statefile = os.path.join(self.basedir, "statefile") |
---|
7655 | + statefp = fp.child("statefile") |
---|
7656 | |
---|
7657 | hunk ./src/allmydata/test/test_crawler.py 124 |
---|
7658 | - c = BucketEnumeratingCrawler(ss, statefile, allowed_cpu_percentage=.1) |
---|
7659 | + c = BucketEnumeratingCrawler(backend, statefp, allowed_cpu_percentage=.1) |
---|
7660 | c.load_state() |
---|
7661 | |
---|
7662 | c.start_current_prefix(time.time()) |
---|
7663 | hunk ./src/allmydata/test/test_crawler.py 137 |
---|
7664 | self.failUnlessEqual(sorted(sis), sorted(c.all_buckets)) |
---|
7665 | |
---|
7666 | # check that a new crawler picks up on the state file properly |
---|
7667 | - c2 = BucketEnumeratingCrawler(ss, statefile) |
---|
7668 | + c2 = BucketEnumeratingCrawler(backend, statefp) |
---|
7669 | c2.load_state() |
---|
7670 | |
---|
7671 | c2.start_current_prefix(time.time()) |
---|
7672 | hunk ./src/allmydata/test/test_crawler.py 145 |
---|
7673 | |
---|
7674 | def test_service(self): |
---|
7675 | self.basedir = "crawler/Basic/service" |
---|
7676 | - fileutil.make_dirs(self.basedir) |
---|
7677 | serverid = "\x00" * 20 |
---|
7678 | hunk ./src/allmydata/test/test_crawler.py 146 |
---|
7679 | - ss = StorageServer(self.basedir, serverid) |
---|
7680 | + fp = FilePath(self.basedir) |
---|
7681 | + backend = DiskBackend(fp) |
---|
7682 | + ss = StorageServer(serverid, backend, fp) |
---|
7683 | ss.setServiceParent(self.s) |
---|
7684 | |
---|
7685 | sis = [self.write(i, ss, serverid) for i in range(10)] |
---|
7686 | hunk ./src/allmydata/test/test_crawler.py 153 |
---|
7687 | |
---|
7688 | - statefile = os.path.join(self.basedir, "statefile") |
---|
7689 | - c = BucketEnumeratingCrawler(ss, statefile) |
---|
7690 | + statefp = fp.child("statefile") |
---|
7691 | + c = BucketEnumeratingCrawler(backend, statefp) |
---|
7692 | c.setServiceParent(self.s) |
---|
7693 | |
---|
7694 | # it should be legal to call get_state() and get_progress() right |
---|
7695 | hunk ./src/allmydata/test/test_crawler.py 174 |
---|
7696 | |
---|
7697 | def test_paced(self): |
---|
7698 | self.basedir = "crawler/Basic/paced" |
---|
7699 | - fileutil.make_dirs(self.basedir) |
---|
7700 | serverid = "\x00" * 20 |
---|
7701 | hunk ./src/allmydata/test/test_crawler.py 175 |
---|
7702 | - ss = StorageServer(self.basedir, serverid) |
---|
7703 | + fp = FilePath(self.basedir) |
---|
7704 | + backend = DiskBackend(fp) |
---|
7705 | + ss = StorageServer(serverid, backend, fp) |
---|
7706 | ss.setServiceParent(self.s) |
---|
7707 | |
---|
7708 | # put four buckets in each prefixdir |
---|
7709 | hunk ./src/allmydata/test/test_crawler.py 186 |
---|
7710 | for tail in range(4): |
---|
7711 | sis.append(self.write(i, ss, serverid, tail)) |
---|
7712 | |
---|
7713 | - statefile = os.path.join(self.basedir, "statefile") |
---|
7714 | + statefp = fp.child("statefile") |
---|
7715 | |
---|
7716 | hunk ./src/allmydata/test/test_crawler.py 188 |
---|
7717 | - c = PacedCrawler(ss, statefile) |
---|
7718 | + c = PacedCrawler(backend, statefp) |
---|
7719 | c.load_state() |
---|
7720 | try: |
---|
7721 | c.start_current_prefix(time.time()) |
---|
7722 | hunk ./src/allmydata/test/test_crawler.py 213 |
---|
7723 | del c |
---|
7724 | |
---|
7725 | # start a new crawler, it should start from the beginning |
---|
7726 | - c = PacedCrawler(ss, statefile) |
---|
7727 | + c = PacedCrawler(backend, statefp) |
---|
7728 | c.load_state() |
---|
7729 | try: |
---|
7730 | c.start_current_prefix(time.time()) |
---|
7731 | hunk ./src/allmydata/test/test_crawler.py 226 |
---|
7732 | c.cpu_slice = PacedCrawler.cpu_slice |
---|
7733 | |
---|
7734 | # a third crawler should pick up from where it left off |
---|
7735 | - c2 = PacedCrawler(ss, statefile) |
---|
7736 | + c2 = PacedCrawler(backend, statefp) |
---|
7737 | c2.all_buckets = c.all_buckets[:] |
---|
7738 | c2.load_state() |
---|
7739 | c2.countdown = -1 |
---|
7740 | hunk ./src/allmydata/test/test_crawler.py 237 |
---|
7741 | |
---|
7742 | # now stop it at the end of a bucket (countdown=4), to exercise a |
---|
7743 | # different place that checks the time |
---|
7744 | - c = PacedCrawler(ss, statefile) |
---|
7745 | + c = PacedCrawler(backend, statefp) |
---|
7746 | c.load_state() |
---|
7747 | c.countdown = 4 |
---|
7748 | try: |
---|
7749 | hunk ./src/allmydata/test/test_crawler.py 256 |
---|
7750 | |
---|
7751 | # stop it again at the end of the bucket, check that a new checker |
---|
7752 | # picks up correctly |
---|
7753 | - c = PacedCrawler(ss, statefile) |
---|
7754 | + c = PacedCrawler(backend, statefp) |
---|
7755 | c.load_state() |
---|
7756 | c.countdown = 4 |
---|
7757 | try: |
---|
7758 | hunk ./src/allmydata/test/test_crawler.py 266 |
---|
7759 | # that should stop at the end of one of the buckets. |
---|
7760 | c.save_state() |
---|
7761 | |
---|
7762 | - c2 = PacedCrawler(ss, statefile) |
---|
7763 | + c2 = PacedCrawler(backend, statefp) |
---|
7764 | c2.all_buckets = c.all_buckets[:] |
---|
7765 | c2.load_state() |
---|
7766 | c2.countdown = -1 |
---|
7767 | hunk ./src/allmydata/test/test_crawler.py 277 |
---|
7768 | |
---|
7769 | def test_paced_service(self): |
---|
7770 | self.basedir = "crawler/Basic/paced_service" |
---|
7771 | - fileutil.make_dirs(self.basedir) |
---|
7772 | serverid = "\x00" * 20 |
---|
7773 | hunk ./src/allmydata/test/test_crawler.py 278 |
---|
7774 | - ss = StorageServer(self.basedir, serverid) |
---|
7775 | + fp = FilePath(self.basedir) |
---|
7776 | + backend = DiskBackend(fp) |
---|
7777 | + ss = StorageServer(serverid, backend, fp) |
---|
7778 | ss.setServiceParent(self.s) |
---|
7779 | |
---|
7780 | sis = [self.write(i, ss, serverid) for i in range(10)] |
---|
7781 | hunk ./src/allmydata/test/test_crawler.py 285 |
---|
7782 | |
---|
7783 | - statefile = os.path.join(self.basedir, "statefile") |
---|
7784 | - c = PacedCrawler(ss, statefile) |
---|
7785 | + statefp = fp.child("statefile") |
---|
7786 | + c = PacedCrawler(backend, statefp) |
---|
7787 | |
---|
7788 | did_check_progress = [False] |
---|
7789 | def check_progress(): |
---|
7790 | hunk ./src/allmydata/test/test_crawler.py 345 |
---|
7791 | # and read the stdout when it runs. |
---|
7792 | |
---|
7793 | self.basedir = "crawler/Basic/cpu_usage" |
---|
7794 | - fileutil.make_dirs(self.basedir) |
---|
7795 | serverid = "\x00" * 20 |
---|
7796 | hunk ./src/allmydata/test/test_crawler.py 346 |
---|
7797 | - ss = StorageServer(self.basedir, serverid) |
---|
7798 | + fp = FilePath(self.basedir) |
---|
7799 | + backend = DiskBackend(fp) |
---|
7800 | + ss = StorageServer(serverid, backend, fp) |
---|
7801 | ss.setServiceParent(self.s) |
---|
7802 | |
---|
7803 | for i in range(10): |
---|
7804 | hunk ./src/allmydata/test/test_crawler.py 354 |
---|
7805 | self.write(i, ss, serverid) |
---|
7806 | |
---|
7807 | - statefile = os.path.join(self.basedir, "statefile") |
---|
7808 | - c = ConsumingCrawler(ss, statefile) |
---|
7809 | + statefp = fp.child("statefile") |
---|
7810 | + c = ConsumingCrawler(backend, statefp) |
---|
7811 | c.setServiceParent(self.s) |
---|
7812 | |
---|
7813 | # this will run as fast as it can, consuming about 50ms per call to |
---|
7814 | hunk ./src/allmydata/test/test_crawler.py 391 |
---|
7815 | |
---|
7816 | def test_empty_subclass(self): |
---|
7817 | self.basedir = "crawler/Basic/empty_subclass" |
---|
7818 | - fileutil.make_dirs(self.basedir) |
---|
7819 | serverid = "\x00" * 20 |
---|
7820 | hunk ./src/allmydata/test/test_crawler.py 392 |
---|
7821 | - ss = StorageServer(self.basedir, serverid) |
---|
7822 | + fp = FilePath(self.basedir) |
---|
7823 | + backend = DiskBackend(fp) |
---|
7824 | + ss = StorageServer(serverid, backend, fp) |
---|
7825 | ss.setServiceParent(self.s) |
---|
7826 | |
---|
7827 | for i in range(10): |
---|
7828 | hunk ./src/allmydata/test/test_crawler.py 400 |
---|
7829 | self.write(i, ss, serverid) |
---|
7830 | |
---|
7831 | - statefile = os.path.join(self.basedir, "statefile") |
---|
7832 | - c = ShareCrawler(ss, statefile) |
---|
7833 | + statefp = fp.child("statefile") |
---|
7834 | + c = ShareCrawler(backend, statefp) |
---|
7835 | c.slow_start = 0 |
---|
7836 | c.setServiceParent(self.s) |
---|
7837 | |
---|
7838 | hunk ./src/allmydata/test/test_crawler.py 417 |
---|
7839 | d.addCallback(_done) |
---|
7840 | return d |
---|
7841 | |
---|
7842 | - |
---|
7843 | def test_oneshot(self): |
---|
7844 | self.basedir = "crawler/Basic/oneshot" |
---|
7845 | hunk ./src/allmydata/test/test_crawler.py 419 |
---|
7846 | - fileutil.make_dirs(self.basedir) |
---|
7847 | serverid = "\x00" * 20 |
---|
7848 | hunk ./src/allmydata/test/test_crawler.py 420 |
---|
7849 | - ss = StorageServer(self.basedir, serverid) |
---|
7850 | + fp = FilePath(self.basedir) |
---|
7851 | + backend = DiskBackend(fp) |
---|
7852 | + ss = StorageServer(serverid, backend, fp) |
---|
7853 | ss.setServiceParent(self.s) |
---|
7854 | |
---|
7855 | for i in range(30): |
---|
7856 | hunk ./src/allmydata/test/test_crawler.py 428 |
---|
7857 | self.write(i, ss, serverid) |
---|
7858 | |
---|
7859 | - statefile = os.path.join(self.basedir, "statefile") |
---|
7860 | - c = OneShotCrawler(ss, statefile) |
---|
7861 | + statefp = fp.child("statefile") |
---|
7862 | + c = OneShotCrawler(backend, statefp) |
---|
7863 | c.setServiceParent(self.s) |
---|
7864 | |
---|
7865 | d = c.finished_d |
---|
7866 | hunk ./src/allmydata/test/test_crawler.py 447 |
---|
7867 | self.failUnlessEqual(s["current-cycle"], None) |
---|
7868 | d.addCallback(_check) |
---|
7869 | return d |
---|
7870 | - |
---|
7871 | hunk ./src/allmydata/test/test_deepcheck.py 23 |
---|
7872 | ShouldFailMixin |
---|
7873 | from allmydata.test.common_util import StallMixin |
---|
7874 | from allmydata.test.no_network import GridTestMixin |
---|
7875 | +from allmydata.scripts import debug |
---|
7876 | + |
---|
7877 | |
---|
7878 | timeout = 2400 # One of these took 1046.091s on Zandr's ARM box. |
---|
7879 | |
---|
7880 | hunk ./src/allmydata/test/test_deepcheck.py 905 |
---|
7881 | d.addErrback(self.explain_error) |
---|
7882 | return d |
---|
7883 | |
---|
7884 | - |
---|
7885 | - |
---|
7886 | def set_up_damaged_tree(self): |
---|
7887 | # 6.4s |
---|
7888 | |
---|
7889 | hunk ./src/allmydata/test/test_deepcheck.py 989 |
---|
7890 | |
---|
7891 | return d |
---|
7892 | |
---|
7893 | - def _run_cli(self, argv): |
---|
7894 | - stdout, stderr = StringIO(), StringIO() |
---|
7895 | - # this can only do synchronous operations |
---|
7896 | - assert argv[0] == "debug" |
---|
7897 | - runner.runner(argv, run_by_human=False, stdout=stdout, stderr=stderr) |
---|
7898 | - return stdout.getvalue() |
---|
7899 | - |
---|
7900 | def _delete_some_shares(self, node): |
---|
7901 | self.delete_shares_numbered(node.get_uri(), [0,1]) |
---|
7902 | |
---|
7903 | hunk ./src/allmydata/test/test_deepcheck.py 995 |
---|
7904 | def _corrupt_some_shares(self, node): |
---|
7905 | for (shnum, serverid, sharefile) in self.find_uri_shares(node.get_uri()): |
---|
7906 | if shnum in (0,1): |
---|
7907 | - self._run_cli(["debug", "corrupt-share", sharefile]) |
---|
7908 | + debug.do_corrupt_share(StringIO(), sharefile) |
---|
7909 | |
---|
7910 | def _delete_most_shares(self, node): |
---|
7911 | self.delete_shares_numbered(node.get_uri(), range(1,10)) |
---|
7912 | hunk ./src/allmydata/test/test_deepcheck.py 1000 |
---|
7913 | |
---|
7914 | - |
---|
7915 | def check_is_healthy(self, cr, where): |
---|
7916 | try: |
---|
7917 | self.failUnless(ICheckResults.providedBy(cr), (cr, type(cr), where)) |
---|
7918 | hunk ./src/allmydata/test/test_download.py 134 |
---|
7919 | for shnum in shares_for_server: |
---|
7920 | share_dir = self.get_server(i).backend.get_shareset(si)._sharehomedir |
---|
7921 | fileutil.fp_make_dirs(share_dir) |
---|
7922 | - share_dir.child(str(shnum)).setContent(shares[shnum]) |
---|
7923 | + share_dir.child(str(shnum)).setContent(shares_for_server[shnum]) |
---|
7924 | |
---|
7925 | def load_shares(self, ignored=None): |
---|
7926 | # this uses the data generated by create_shares() to populate the |
---|
7927 | hunk ./src/allmydata/test/test_hung_server.py 32 |
---|
7928 | |
---|
7929 | def _break(self, servers): |
---|
7930 | for ss in servers: |
---|
7931 | - self.g.break_server(ss.get_serverid()) |
---|
7932 | + self.g.break_server(ss.original.get_serverid()) |
---|
7933 | |
---|
7934 | def _hang(self, servers, **kwargs): |
---|
7935 | for ss in servers: |
---|
7936 | hunk ./src/allmydata/test/test_hung_server.py 67 |
---|
7937 | serverids = [ss.original.get_serverid() for ss in from_servers] |
---|
7938 | for (i_shnum, i_serverid, i_sharefp) in self.shares: |
---|
7939 | if i_serverid in serverids: |
---|
7940 | - self.copy_share((i_shnum, i_serverid, i_sharefp), self.uri, to_server) |
---|
7941 | + self.copy_share((i_shnum, i_serverid, i_sharefp), self.uri, to_server.original) |
---|
7942 | |
---|
7943 | self.shares = self.find_uri_shares(self.uri) |
---|
7944 | |
---|
7945 | hunk ./src/allmydata/test/test_mutable.py 3670 |
---|
7946 | # Now execute each assignment by writing the storage. |
---|
7947 | for (share, servernum) in assignments: |
---|
7948 | sharedata = base64.b64decode(self.sdmf_old_shares[share]) |
---|
7949 | - storage_dir = self.get_server(servernum).backend.get_shareset(si).sharehomedir |
---|
7950 | + storage_dir = self.get_server(servernum).backend.get_shareset(si)._sharehomedir |
---|
7951 | fileutil.fp_make_dirs(storage_dir) |
---|
7952 | storage_dir.child("%d" % share).setContent(sharedata) |
---|
7953 | # ...and verify that the shares are there. |
---|
7954 | hunk ./src/allmydata/test/test_no_network.py 10 |
---|
7955 | from allmydata.immutable.upload import Data |
---|
7956 | from allmydata.util.consumer import download_to_data |
---|
7957 | |
---|
7958 | + |
---|
7959 | class Harness(unittest.TestCase): |
---|
7960 | def setUp(self): |
---|
7961 | self.s = service.MultiService() |
---|
7962 | hunk ./src/allmydata/test/test_storage.py 1 |
---|
7963 | -import time, os.path, platform, stat, re, simplejson, struct, shutil |
---|
7964 | +import time, os.path, platform, stat, re, simplejson, struct, shutil, itertools |
---|
7965 | |
---|
7966 | import mock |
---|
7967 | |
---|
7968 | hunk ./src/allmydata/test/test_storage.py 6 |
---|
7969 | from twisted.trial import unittest |
---|
7970 | - |
---|
7971 | from twisted.internet import defer |
---|
7972 | from twisted.application import service |
---|
7973 | hunk ./src/allmydata/test/test_storage.py 8 |
---|
7974 | +from twisted.python.filepath import FilePath |
---|
7975 | from foolscap.api import fireEventually |
---|
7976 | hunk ./src/allmydata/test/test_storage.py 10 |
---|
7977 | -import itertools |
---|
7978 | + |
---|
7979 | from allmydata import interfaces |
---|
7980 | from allmydata.util import fileutil, hashutil, base32, pollmixin, time_format |
---|
7981 | from allmydata.storage.server import StorageServer |
---|
7982 | hunk ./src/allmydata/test/test_storage.py 14 |
---|
7983 | +from allmydata.storage.backends.disk.disk_backend import DiskBackend |
---|
7984 | from allmydata.storage.backends.disk.mutable import MutableDiskShare |
---|
7985 | from allmydata.storage.bucket import BucketWriter, BucketReader |
---|
7986 | from allmydata.storage.common import DataTooLargeError, \ |
---|
7987 | hunk ./src/allmydata/test/test_storage.py 310 |
---|
7988 | return self.sparent.stopService() |
---|
7989 | |
---|
7990 | def workdir(self, name): |
---|
7991 | - basedir = os.path.join("storage", "Server", name) |
---|
7992 | - return basedir |
---|
7993 | + return FilePath("storage").child("Server").child(name) |
---|
7994 | |
---|
7995 | def create(self, name, reserved_space=0, klass=StorageServer): |
---|
7996 | workdir = self.workdir(name) |
---|
7997 | hunk ./src/allmydata/test/test_storage.py 314 |
---|
7998 | - ss = klass(workdir, "\x00" * 20, reserved_space=reserved_space, |
---|
7999 | + backend = DiskBackend(workdir, readonly=False, reserved_space=reserved_space) |
---|
8000 | + ss = klass("\x00" * 20, backend, workdir, |
---|
8001 | stats_provider=FakeStatsProvider()) |
---|
8002 | ss.setServiceParent(self.sparent) |
---|
8003 | return ss |
---|
8004 | hunk ./src/allmydata/test/test_storage.py 1386 |
---|
8005 | |
---|
8006 | def tearDown(self): |
---|
8007 | self.sparent.stopService() |
---|
8008 | - shutil.rmtree(self.workdir("MDMFProxies storage test server")) |
---|
8009 | + fileutil.fp_remove(self.workdir("MDMFProxies storage test server")) |
---|
8010 | |
---|
8011 | |
---|
8012 | def write_enabler(self, we_tag): |
---|
8013 | hunk ./src/allmydata/test/test_storage.py 2781 |
---|
8014 | return self.sparent.stopService() |
---|
8015 | |
---|
8016 | def workdir(self, name): |
---|
8017 | - basedir = os.path.join("storage", "Server", name) |
---|
8018 | - return basedir |
---|
8019 | + return FilePath("storage").child("Server").child(name) |
---|
8020 | |
---|
8021 | def create(self, name): |
---|
8022 | workdir = self.workdir(name) |
---|
8023 | hunk ./src/allmydata/test/test_storage.py 2785 |
---|
8024 | - ss = StorageServer(workdir, "\x00" * 20) |
---|
8025 | + backend = DiskBackend(workdir) |
---|
8026 | + ss = StorageServer("\x00" * 20, backend, workdir) |
---|
8027 | ss.setServiceParent(self.sparent) |
---|
8028 | return ss |
---|
8029 | |
---|
8030 | hunk ./src/allmydata/test/test_storage.py 4061 |
---|
8031 | } |
---|
8032 | |
---|
8033 | basedir = "storage/WebStatus/status_right_disk_stats" |
---|
8034 | - fileutil.make_dirs(basedir) |
---|
8035 | - ss = StorageServer(basedir, "\x00" * 20, reserved_space=reserved_space) |
---|
8036 | - expecteddir = ss.sharedir |
---|
8037 | + fp = FilePath(basedir) |
---|
8038 | + backend = DiskBackend(fp, readonly=False, reserved_space=reserved_space) |
---|
8039 | + ss = StorageServer("\x00" * 20, backend, fp) |
---|
8040 | + expecteddir = backend._sharedir |
---|
8041 | ss.setServiceParent(self.s) |
---|
8042 | w = StorageStatus(ss) |
---|
8043 | html = w.renderSynchronously() |
---|
8044 | hunk ./src/allmydata/test/test_storage.py 4084 |
---|
8045 | |
---|
8046 | def test_readonly(self): |
---|
8047 | basedir = "storage/WebStatus/readonly" |
---|
8048 | - fileutil.make_dirs(basedir) |
---|
8049 | - ss = StorageServer(basedir, "\x00" * 20, readonly_storage=True) |
---|
8050 | + fp = FilePath(basedir) |
---|
8051 | + backend = DiskBackend(fp, readonly=True) |
---|
8052 | + ss = StorageServer("\x00" * 20, backend, fp) |
---|
8053 | ss.setServiceParent(self.s) |
---|
8054 | w = StorageStatus(ss) |
---|
8055 | html = w.renderSynchronously() |
---|
8056 | hunk ./src/allmydata/test/test_storage.py 4096 |
---|
8057 | |
---|
8058 | def test_reserved(self): |
---|
8059 | basedir = "storage/WebStatus/reserved" |
---|
8060 | - fileutil.make_dirs(basedir) |
---|
8061 | - ss = StorageServer(basedir, "\x00" * 20, reserved_space=10e6) |
---|
8062 | - ss.setServiceParent(self.s) |
---|
8063 | - w = StorageStatus(ss) |
---|
8064 | - html = w.renderSynchronously() |
---|
8065 | - self.failUnlessIn("<h1>Storage Server Status</h1>", html) |
---|
8066 | - s = remove_tags(html) |
---|
8067 | - self.failUnlessIn("Reserved space: - 10.00 MB (10000000)", s) |
---|
8068 | - |
---|
8069 | - def test_huge_reserved(self): |
---|
8070 | - basedir = "storage/WebStatus/reserved" |
---|
8071 | - fileutil.make_dirs(basedir) |
---|
8072 | - ss = StorageServer(basedir, "\x00" * 20, reserved_space=10e6) |
---|
8073 | + fp = FilePath(basedir) |
---|
8074 | + backend = DiskBackend(fp, readonly=False, reserved_space=10e6) |
---|
8075 | + ss = StorageServer("\x00" * 20, backend, fp) |
---|
8076 | ss.setServiceParent(self.s) |
---|
8077 | w = StorageStatus(ss) |
---|
8078 | html = w.renderSynchronously() |
---|
8079 | hunk ./src/allmydata/test/test_upload.py 3 |
---|
8080 | # -*- coding: utf-8 -*- |
---|
8081 | |
---|
8082 | -import os, shutil |
---|
8083 | +import os |
---|
8084 | from cStringIO import StringIO |
---|
8085 | from twisted.trial import unittest |
---|
8086 | from twisted.python.failure import Failure |
---|
8087 | hunk ./src/allmydata/test/test_upload.py 14 |
---|
8088 | from allmydata import uri, monitor, client |
---|
8089 | from allmydata.immutable import upload, encode |
---|
8090 | from allmydata.interfaces import FileTooLargeError, UploadUnhappinessError |
---|
8091 | -from allmydata.util import log |
---|
8092 | +from allmydata.util import log, fileutil |
---|
8093 | from allmydata.util.assertutil import precondition |
---|
8094 | from allmydata.util.deferredutil import DeferredListShouldSucceed |
---|
8095 | from allmydata.test.no_network import GridTestMixin |
---|
8096 | hunk ./src/allmydata/test/test_upload.py 972 |
---|
8097 | readonly=True)) |
---|
8098 | # Remove the first share from server 0. |
---|
8099 | def _remove_share_0_from_server_0(): |
---|
8100 | - share_location = self.shares[0][2] |
---|
8101 | - os.remove(share_location) |
---|
8102 | + self.shares[0][2].remove() |
---|
8103 | d.addCallback(lambda ign: |
---|
8104 | _remove_share_0_from_server_0()) |
---|
8105 | # Set happy = 4 in the client. |
---|
8106 | hunk ./src/allmydata/test/test_upload.py 1847 |
---|
8107 | self._copy_share_to_server(3, 1) |
---|
8108 | storedir = self.get_serverdir(0) |
---|
8109 | # remove the storedir, wiping out any existing shares |
---|
8110 | - shutil.rmtree(storedir) |
---|
8111 | + fileutil.fp_remove(storedir) |
---|
8112 | # create an empty storedir to replace the one we just removed |
---|
8113 | hunk ./src/allmydata/test/test_upload.py 1849 |
---|
8114 | - os.mkdir(storedir) |
---|
8115 | + storedir.mkdir() |
---|
8116 | client = self.g.clients[0] |
---|
8117 | client.DEFAULT_ENCODING_PARAMETERS['happy'] = 4 |
---|
8118 | return client |
---|
8119 | hunk ./src/allmydata/test/test_upload.py 1888 |
---|
8120 | self._copy_share_to_server(3, 1) |
---|
8121 | storedir = self.get_serverdir(0) |
---|
8122 | # remove the storedir, wiping out any existing shares |
---|
8123 | - shutil.rmtree(storedir) |
---|
8124 | + fileutil.fp_remove(storedir) |
---|
8125 | # create an empty storedir to replace the one we just removed |
---|
8126 | hunk ./src/allmydata/test/test_upload.py 1890 |
---|
8127 | - os.mkdir(storedir) |
---|
8128 | + storedir.mkdir() |
---|
8129 | client = self.g.clients[0] |
---|
8130 | client.DEFAULT_ENCODING_PARAMETERS['happy'] = 4 |
---|
8131 | return client |
---|
8132 | hunk ./src/allmydata/test/test_web.py 4870 |
---|
8133 | d.addErrback(self.explain_web_error) |
---|
8134 | return d |
---|
8135 | |
---|
8136 | - def _assert_leasecount(self, ignored, which, expected): |
---|
8137 | + def _assert_leasecount(self, which, expected): |
---|
8138 | lease_counts = self.count_leases(self.uris[which]) |
---|
8139 | for (fn, num_leases) in lease_counts: |
---|
8140 | if num_leases != expected: |
---|
8141 | hunk ./src/allmydata/test/test_web.py 4903 |
---|
8142 | self.fileurls[which] = "uri/" + urllib.quote(self.uris[which]) |
---|
8143 | d.addCallback(_compute_fileurls) |
---|
8144 | |
---|
8145 | - d.addCallback(self._assert_leasecount, "one", 1) |
---|
8146 | - d.addCallback(self._assert_leasecount, "two", 1) |
---|
8147 | - d.addCallback(self._assert_leasecount, "mutable", 1) |
---|
8148 | + d.addCallback(lambda ign: self._assert_leasecount("one", 1)) |
---|
8149 | + d.addCallback(lambda ign: self._assert_leasecount("two", 1)) |
---|
8150 | + d.addCallback(lambda ign: self._assert_leasecount("mutable", 1)) |
---|
8151 | |
---|
8152 | d.addCallback(self.CHECK, "one", "t=check") # no add-lease |
---|
8153 | def _got_html_good(res): |
---|
8154 | hunk ./src/allmydata/test/test_web.py 4913 |
---|
8155 | self.failIf("Not Healthy" in res, res) |
---|
8156 | d.addCallback(_got_html_good) |
---|
8157 | |
---|
8158 | - d.addCallback(self._assert_leasecount, "one", 1) |
---|
8159 | - d.addCallback(self._assert_leasecount, "two", 1) |
---|
8160 | - d.addCallback(self._assert_leasecount, "mutable", 1) |
---|
8161 | + d.addCallback(lambda ign: self._assert_leasecount("one", 1)) |
---|
8162 | + d.addCallback(lambda ign: self._assert_leasecount("two", 1)) |
---|
8163 | + d.addCallback(lambda ign: self._assert_leasecount("mutable", 1)) |
---|
8164 | |
---|
8165 | # this CHECK uses the original client, which uses the same |
---|
8166 | # lease-secrets, so it will just renew the original lease |
---|
8167 | hunk ./src/allmydata/test/test_web.py 4922 |
---|
8168 | d.addCallback(self.CHECK, "one", "t=check&add-lease=true") |
---|
8169 | d.addCallback(_got_html_good) |
---|
8170 | |
---|
8171 | - d.addCallback(self._assert_leasecount, "one", 1) |
---|
8172 | - d.addCallback(self._assert_leasecount, "two", 1) |
---|
8173 | - d.addCallback(self._assert_leasecount, "mutable", 1) |
---|
8174 | + d.addCallback(lambda ign: self._assert_leasecount("one", 1)) |
---|
8175 | + d.addCallback(lambda ign: self._assert_leasecount("two", 1)) |
---|
8176 | + d.addCallback(lambda ign: self._assert_leasecount("mutable", 1)) |
---|
8177 | |
---|
8178 | # this CHECK uses an alternate client, which adds a second lease |
---|
8179 | d.addCallback(self.CHECK, "one", "t=check&add-lease=true", clientnum=1) |
---|
8180 | hunk ./src/allmydata/test/test_web.py 4930 |
---|
8181 | d.addCallback(_got_html_good) |
---|
8182 | |
---|
8183 | - d.addCallback(self._assert_leasecount, "one", 2) |
---|
8184 | - d.addCallback(self._assert_leasecount, "two", 1) |
---|
8185 | - d.addCallback(self._assert_leasecount, "mutable", 1) |
---|
8186 | + d.addCallback(lambda ign: self._assert_leasecount("one", 2)) |
---|
8187 | + d.addCallback(lambda ign: self._assert_leasecount("two", 1)) |
---|
8188 | + d.addCallback(lambda ign: self._assert_leasecount("mutable", 1)) |
---|
8189 | |
---|
8190 | d.addCallback(self.CHECK, "mutable", "t=check&add-lease=true") |
---|
8191 | d.addCallback(_got_html_good) |
---|
8192 | hunk ./src/allmydata/test/test_web.py 4937 |
---|
8193 | |
---|
8194 | - d.addCallback(self._assert_leasecount, "one", 2) |
---|
8195 | - d.addCallback(self._assert_leasecount, "two", 1) |
---|
8196 | - d.addCallback(self._assert_leasecount, "mutable", 1) |
---|
8197 | + d.addCallback(lambda ign: self._assert_leasecount("one", 2)) |
---|
8198 | + d.addCallback(lambda ign: self._assert_leasecount("two", 1)) |
---|
8199 | + d.addCallback(lambda ign: self._assert_leasecount("mutable", 1)) |
---|
8200 | |
---|
8201 | d.addCallback(self.CHECK, "mutable", "t=check&add-lease=true", |
---|
8202 | clientnum=1) |
---|
8203 | hunk ./src/allmydata/test/test_web.py 4945 |
---|
8204 | d.addCallback(_got_html_good) |
---|
8205 | |
---|
8206 | - d.addCallback(self._assert_leasecount, "one", 2) |
---|
8207 | - d.addCallback(self._assert_leasecount, "two", 1) |
---|
8208 | - d.addCallback(self._assert_leasecount, "mutable", 2) |
---|
8209 | + d.addCallback(lambda ign: self._assert_leasecount("one", 2)) |
---|
8210 | + d.addCallback(lambda ign: self._assert_leasecount("two", 1)) |
---|
8211 | + d.addCallback(lambda ign: self._assert_leasecount("mutable", 2)) |
---|
8212 | |
---|
8213 | d.addErrback(self.explain_web_error) |
---|
8214 | return d |
---|
8215 | hunk ./src/allmydata/test/test_web.py 4989 |
---|
8216 | self.failUnlessReallyEqual(len(units), 4+1) |
---|
8217 | d.addCallback(_done) |
---|
8218 | |
---|
8219 | - d.addCallback(self._assert_leasecount, "root", 1) |
---|
8220 | - d.addCallback(self._assert_leasecount, "one", 1) |
---|
8221 | - d.addCallback(self._assert_leasecount, "mutable", 1) |
---|
8222 | + d.addCallback(lambda ign: self._assert_leasecount("root", 1)) |
---|
8223 | + d.addCallback(lambda ign: self._assert_leasecount("one", 1)) |
---|
8224 | + d.addCallback(lambda ign: self._assert_leasecount("mutable", 1)) |
---|
8225 | |
---|
8226 | d.addCallback(self.CHECK, "root", "t=stream-deep-check&add-lease=true") |
---|
8227 | d.addCallback(_done) |
---|
8228 | hunk ./src/allmydata/test/test_web.py 4996 |
---|
8229 | |
---|
8230 | - d.addCallback(self._assert_leasecount, "root", 1) |
---|
8231 | - d.addCallback(self._assert_leasecount, "one", 1) |
---|
8232 | - d.addCallback(self._assert_leasecount, "mutable", 1) |
---|
8233 | + d.addCallback(lambda ign: self._assert_leasecount("root", 1)) |
---|
8234 | + d.addCallback(lambda ign: self._assert_leasecount("one", 1)) |
---|
8235 | + d.addCallback(lambda ign: self._assert_leasecount("mutable", 1)) |
---|
8236 | |
---|
8237 | d.addCallback(self.CHECK, "root", "t=stream-deep-check&add-lease=true", |
---|
8238 | clientnum=1) |
---|
8239 | hunk ./src/allmydata/test/test_web.py 5004 |
---|
8240 | d.addCallback(_done) |
---|
8241 | |
---|
8242 | - d.addCallback(self._assert_leasecount, "root", 2) |
---|
8243 | - d.addCallback(self._assert_leasecount, "one", 2) |
---|
8244 | - d.addCallback(self._assert_leasecount, "mutable", 2) |
---|
8245 | + d.addCallback(lambda ign: self._assert_leasecount("root", 2)) |
---|
8246 | + d.addCallback(lambda ign: self._assert_leasecount("one", 2)) |
---|
8247 | + d.addCallback(lambda ign: self._assert_leasecount("mutable", 2)) |
---|
8248 | |
---|
8249 | d.addErrback(self.explain_web_error) |
---|
8250 | return d |
---|
8251 | } |
---|
8252 | [Fix more shallow bugs, mainly FilePathification. Also, remove the max_space_per_bucket parameter from BucketWriter since it can be obtained from the _max_size attribute of the share (via a new get_allocated_size() accessor). refs #999 |
---|
8253 | david-sarah@jacaranda.org**20110921221421 |
---|
8254 | Ignore-this: 600e3ccef8533aa43442fa576c7d88cf |
---|
8255 | ] { |
---|
8256 | hunk ./src/allmydata/scripts/debug.py 642 |
---|
8257 | /home/warner/testnet/node-2/storage/shares/44k/44kai1tui348689nrw8fjegc8c/2 |
---|
8258 | """ |
---|
8259 | from allmydata.storage.server import si_a2b |
---|
8260 | - from allmydata.storage.backends.disk_backend import si_si2dir |
---|
8261 | + from allmydata.storage.backends.disk.disk_backend import si_si2dir |
---|
8262 | from allmydata.util.encodingutil import quote_filepath |
---|
8263 | |
---|
8264 | out = options.stdout |
---|
8265 | hunk ./src/allmydata/scripts/debug.py 648 |
---|
8266 | si = si_a2b(options.si_s) |
---|
8267 | for nodedir in options.nodedirs: |
---|
8268 | - sharedir = si_si2dir(nodedir.child("storage").child("shares"), si) |
---|
8269 | + sharedir = si_si2dir(FilePath(nodedir).child("storage").child("shares"), si) |
---|
8270 | if sharedir.exists(): |
---|
8271 | for sharefp in sharedir.children(): |
---|
8272 | print >>out, quote_filepath(sharefp, quotemarks=False) |
---|
8273 | hunk ./src/allmydata/storage/backends/disk/disk_backend.py 189 |
---|
8274 | incominghome = self._incominghomedir.child(str(shnum)) |
---|
8275 | immsh = ImmutableDiskShare(self.get_storage_index(), shnum, incominghome, finalhome, |
---|
8276 | max_size=max_space_per_bucket) |
---|
8277 | - bw = BucketWriter(storageserver, immsh, max_space_per_bucket, lease_info, canary) |
---|
8278 | + bw = BucketWriter(storageserver, immsh, lease_info, canary) |
---|
8279 | if self._discard_storage: |
---|
8280 | bw.throw_out_all_data = True |
---|
8281 | return bw |
---|
8282 | hunk ./src/allmydata/storage/backends/disk/immutable.py 147 |
---|
8283 | def unlink(self): |
---|
8284 | self._home.remove() |
---|
8285 | |
---|
8286 | + def get_allocated_size(self): |
---|
8287 | + return self._max_size |
---|
8288 | + |
---|
8289 | def get_size(self): |
---|
8290 | return self._home.getsize() |
---|
8291 | |
---|
8292 | hunk ./src/allmydata/storage/bucket.py 15 |
---|
8293 | class BucketWriter(Referenceable): |
---|
8294 | implements(RIBucketWriter) |
---|
8295 | |
---|
8296 | - def __init__(self, ss, immutableshare, max_size, lease_info, canary): |
---|
8297 | + def __init__(self, ss, immutableshare, lease_info, canary): |
---|
8298 | self.ss = ss |
---|
8299 | hunk ./src/allmydata/storage/bucket.py 17 |
---|
8300 | - self._max_size = max_size # don't allow the client to write more than this |
---|
8301 | self._canary = canary |
---|
8302 | self._disconnect_marker = canary.notifyOnDisconnect(self._disconnected) |
---|
8303 | self.closed = False |
---|
8304 | hunk ./src/allmydata/storage/bucket.py 27 |
---|
8305 | self._share.add_lease(lease_info) |
---|
8306 | |
---|
8307 | def allocated_size(self): |
---|
8308 | - return self._max_size |
---|
8309 | + return self._share.get_allocated_size() |
---|
8310 | |
---|
8311 | def remote_write(self, offset, data): |
---|
8312 | start = time.time() |
---|
8313 | hunk ./src/allmydata/storage/crawler.py 480 |
---|
8314 | self.state["bucket-counts"][cycle] = {} |
---|
8315 | self.state["bucket-counts"][cycle][prefix] = len(sharesets) |
---|
8316 | if prefix in self.prefixes[:self.num_sample_prefixes]: |
---|
8317 | - self.state["storage-index-samples"][prefix] = (cycle, sharesets) |
---|
8318 | + si_strings = [shareset.get_storage_index_string() for shareset in sharesets] |
---|
8319 | + self.state["storage-index-samples"][prefix] = (cycle, si_strings) |
---|
8320 | |
---|
8321 | def finished_cycle(self, cycle): |
---|
8322 | last_counts = self.state["bucket-counts"].get(cycle, []) |
---|
8323 | hunk ./src/allmydata/storage/expirer.py 281 |
---|
8324 | # copy() needs to become a deepcopy |
---|
8325 | h["space-recovered"] = s["space-recovered"].copy() |
---|