Changeset 3668cb3 in trunk
- Timestamp:
- 2011-08-01T22:43:17Z (14 years ago)
- Branches:
- master
- Children:
- 884df6f
- Parents:
- 550d67f
- Location:
- src/allmydata
- Files:
-
- 4 edited
Legend:
- Unmodified
- Added
- Removed
-
TabularUnified src/allmydata/immutable/downloader/share.py ¶
r550d67f r3668cb3 124 124 ht = IncompleteHashTree(N) 125 125 num_share_hashes = len(ht.needed_hashes(0, include_leaf=True)) 126 wbp = make_write_bucket_proxy(None, share_size, r["block_size"], 127 r["num_segments"], num_share_hashes, 0, 128 None) 126 wbp = make_write_bucket_proxy(None, None, share_size, r["block_size"], 127 r["num_segments"], num_share_hashes, 0) 129 128 self._fieldsize = wbp.fieldsize 130 129 self._fieldstruct = wbp.fieldstruct -
TabularUnified src/allmydata/immutable/layout.py ¶
r550d67f r3668cb3 77 77 FORCE_V2 = False # set briefly by unit tests to make small-sized V2 shares 78 78 79 def make_write_bucket_proxy(rref, data_size, block_size, num_segments, 80 num_share_hashes, uri_extension_size_max, nodeid): 79 def make_write_bucket_proxy(rref, server, 80 data_size, block_size, num_segments, 81 num_share_hashes, uri_extension_size_max): 81 82 # Use layout v1 for small files, so they'll be readable by older versions 82 83 # (<tahoe-1.3.0). Use layout v2 for large files; they'll only be readable … … 85 86 if FORCE_V2: 86 87 raise FileTooLargeError 87 wbp = WriteBucketProxy(rref, data_size, block_size, num_segments, 88 num_share_hashes, uri_extension_size_max, nodeid) 88 wbp = WriteBucketProxy(rref, server, 89 data_size, block_size, num_segments, 90 num_share_hashes, uri_extension_size_max) 89 91 except FileTooLargeError: 90 wbp = WriteBucketProxy_v2(rref, data_size, block_size, num_segments, 91 num_share_hashes, uri_extension_size_max, nodeid) 92 wbp = WriteBucketProxy_v2(rref, server, 93 data_size, block_size, num_segments, 94 num_share_hashes, uri_extension_size_max) 92 95 return wbp 93 96 … … 97 100 fieldstruct = ">L" 98 101 99 def __init__(self, rref, data_size, block_size, num_segments, 100 num_share_hashes, uri_extension_size_max, nodeid, 101 pipeline_size=50000): 102 def __init__(self, rref, server, data_size, block_size, num_segments, 103 num_share_hashes, uri_extension_size_max, pipeline_size=50000): 102 104 self._rref = rref 105 self._server = server 103 106 self._data_size = data_size 104 107 self._block_size = block_size 105 108 self._num_segments = num_segments 106 self._nodeid = nodeid107 109 108 110 effective_segments = mathutil.next_power_of_k(num_segments,2) … … 162 164 163 165 def __repr__(self): 164 if self._nodeid: 165 nodeid_s = idlib.nodeid_b2a(self._nodeid) 166 else: 167 nodeid_s = "[None]" 168 return "<WriteBucketProxy for node %s>" % nodeid_s 166 return "<WriteBucketProxy for node %s>" % self._server.get_name() 169 167 170 168 def put_header(self): … … 248 246 249 247 248 def get_servername(self): 249 return self._server.get_name() 250 250 def get_peerid(self): 251 if self._nodeid: 252 return self._nodeid 253 return None 251 return self._server.get_serverid() 254 252 255 253 class WriteBucketProxy_v2(WriteBucketProxy): -
TabularUnified src/allmydata/immutable/upload.py ¶
r550d67f r3668cb3 78 78 self.sharesize = sharesize 79 79 80 wbp = layout.make_write_bucket_proxy(None, sharesize,80 wbp = layout.make_write_bucket_proxy(None, None, sharesize, 81 81 blocksize, num_segments, 82 82 num_share_hashes, 83 EXTENSION_SIZE , server.get_serverid())83 EXTENSION_SIZE) 84 84 self.wbp_class = wbp.__class__ # to create more of them 85 85 self.allocated_size = wbp.get_allocated_size() … … 121 121 b = {} 122 122 for sharenum, rref in buckets.iteritems(): 123 bp = self.wbp_class(rref, self. sharesize,123 bp = self.wbp_class(rref, self._server, self.sharesize, 124 124 self.blocksize, 125 125 self.num_segments, 126 126 self.num_share_hashes, 127 EXTENSION_SIZE, 128 self._server.get_serverid()) 127 EXTENSION_SIZE) 129 128 b[sharenum] = bp 130 129 self.buckets.update(b) … … 150 149 151 150 def str_shareloc(shnum, bucketwriter): 152 return "%s: %s" % (shnum, idlib.shortnodeid_b2a(bucketwriter._nodeid),)151 return "%s: %s" % (shnum, bucketwriter.get_servername(),) 153 152 154 153 class Tahoe2ServerSelector(log.PrefixingLogMixin): … … 206 205 207 206 # figure out how much space to ask for 208 wbp = layout.make_write_bucket_proxy(None, share_size, 0, num_segments,209 num_share_hashes, EXTENSION_SIZE,210 None)207 wbp = layout.make_write_bucket_proxy(None, None, 208 share_size, 0, num_segments, 209 num_share_hashes, EXTENSION_SIZE) 211 210 allocated_size = wbp.get_allocated_size() 212 211 all_servers = storage_broker.get_servers_for_psi(storage_index) -
TabularUnified src/allmydata/test/test_storage.py ¶
r550d67f r3668cb3 137 137 def test_create(self): 138 138 bw, rb, sharefname = self.make_bucket("test_create", 500) 139 bp = WriteBucketProxy(rb, 139 bp = WriteBucketProxy(rb, None, 140 140 data_size=300, 141 141 block_size=10, 142 142 num_segments=5, 143 143 num_share_hashes=3, 144 uri_extension_size_max=500 , nodeid=None)144 uri_extension_size_max=500) 145 145 self.failUnless(interfaces.IStorageBucketWriter.providedBy(bp), bp) 146 146 … … 168 168 169 169 bw, rb, sharefname = self.make_bucket(name, sharesize) 170 bp = wbp_class(rb, 170 bp = wbp_class(rb, None, 171 171 data_size=95, 172 172 block_size=25, 173 173 num_segments=4, 174 174 num_share_hashes=3, 175 uri_extension_size_max=len(uri_extension), 176 nodeid=None) 175 uri_extension_size_max=len(uri_extension)) 177 176 178 177 d = bp.put_header()
Note: See TracChangeset
for help on using the changeset viewer.