| 1 | Thu Jul 15 16:17:14 PDT 2010 Kevan Carstensen <kevan@isnotajoke.com> |
|---|
| 2 | * immutable/upload.py: abort buckets if peer selection fails |
|---|
| 3 | |
|---|
| 4 | Thu Jul 15 16:18:20 PDT 2010 Kevan Carstensen <kevan@isnotajoke.com> |
|---|
| 5 | * test/test_upload.py: changes to test plumbing for #1117 tests |
|---|
| 6 | |
|---|
| 7 | - Add a callRemoteOnly method to FakeBucketWriter. |
|---|
| 8 | - Change the abort method in FakeBucketWriter to not return a |
|---|
| 9 | RuntimeError. |
|---|
| 10 | |
|---|
| 11 | Thu Jul 15 16:21:05 PDT 2010 Kevan Carstensen <kevan@isnotajoke.com> |
|---|
| 12 | * storage/immutable.py: make remote_abort btell the storage server about aborted buckets. |
|---|
| 13 | |
|---|
| 14 | Thu Jul 15 16:21:48 PDT 2010 Kevan Carstensen <kevan@isnotajoke.com> |
|---|
| 15 | * test/test_storage.py: test for the new remote_abort semantics. |
|---|
| 16 | |
|---|
| 17 | Thu Jul 15 17:10:46 PDT 2010 Kevan Carstensen <kevan@isnotajoke.com> |
|---|
| 18 | * test/test_upload.py: test to see that aborted buckets are ignored by the storage server |
|---|
| 19 | |
|---|
| 20 | New patches: |
|---|
| 21 | |
|---|
| 22 | [immutable/upload.py: abort buckets if peer selection fails |
|---|
| 23 | Kevan Carstensen <kevan@isnotajoke.com>**20100715231714 |
|---|
| 24 | Ignore-this: 2a0b643a22284df292d8ed9d91b1fd37 |
|---|
| 25 | ] { |
|---|
| 26 | hunk ./src/allmydata/immutable/upload.py 138 |
|---|
| 27 | return (alreadygot, set(b.keys())) |
|---|
| 28 | |
|---|
| 29 | |
|---|
| 30 | + def abort(self): |
|---|
| 31 | + """ |
|---|
| 32 | + I abort the remote bucket writers for the share numbers in |
|---|
| 33 | + sharenums. This is a good idea to conserve space on the storage |
|---|
| 34 | + server. |
|---|
| 35 | + """ |
|---|
| 36 | + for writer in self.buckets.itervalues(): writer.abort() |
|---|
| 37 | + |
|---|
| 38 | + |
|---|
| 39 | class Tahoe2PeerSelector: |
|---|
| 40 | |
|---|
| 41 | def __init__(self, upload_id, logparent=None, upload_status=None): |
|---|
| 42 | hunk ./src/allmydata/immutable/upload.py 367 |
|---|
| 43 | self.needed_shares, |
|---|
| 44 | self.servers_of_happiness, |
|---|
| 45 | effective_happiness) |
|---|
| 46 | - raise UploadUnhappinessError("%s (%s)" % (msg, |
|---|
| 47 | - self._get_progress_message())) |
|---|
| 48 | + return self._failed("%s (%s)" % (msg, self._get_progress_message())) |
|---|
| 49 | |
|---|
| 50 | if self.uncontacted_peers: |
|---|
| 51 | peer = self.uncontacted_peers.pop(0) |
|---|
| 52 | hunk ./src/allmydata/immutable/upload.py 428 |
|---|
| 53 | if self.last_failure_msg: |
|---|
| 54 | msg += " (%s)" % (self.last_failure_msg,) |
|---|
| 55 | log.msg(msg, level=log.UNUSUAL, parent=self._log_parent) |
|---|
| 56 | - raise UploadUnhappinessError(msg) |
|---|
| 57 | + return self._failed(msg) |
|---|
| 58 | else: |
|---|
| 59 | # we placed enough to be happy, so we're done |
|---|
| 60 | if self._status: |
|---|
| 61 | hunk ./src/allmydata/immutable/upload.py 517 |
|---|
| 62 | return self._loop() |
|---|
| 63 | |
|---|
| 64 | |
|---|
| 65 | + def _failed(self, msg): |
|---|
| 66 | + """ |
|---|
| 67 | + I am called when peer selection fails. I first abort all of the |
|---|
| 68 | + remote buckets that I allocated during my unsuccessful attempt to |
|---|
| 69 | + place shares for this file. I then raise an |
|---|
| 70 | + UploadUnhappinessError with my msg argument. |
|---|
| 71 | + """ |
|---|
| 72 | + for peer in self.use_peers: |
|---|
| 73 | + assert isinstance(peer, PeerTracker) |
|---|
| 74 | + |
|---|
| 75 | + peer.abort() |
|---|
| 76 | + |
|---|
| 77 | + raise UploadUnhappinessError(msg) |
|---|
| 78 | + |
|---|
| 79 | + |
|---|
| 80 | class EncryptAnUploadable: |
|---|
| 81 | """This is a wrapper that takes an IUploadable and provides |
|---|
| 82 | IEncryptedUploadable.""" |
|---|
| 83 | } |
|---|
| 84 | [test/test_upload.py: changes to test plumbing for #1117 tests |
|---|
| 85 | Kevan Carstensen <kevan@isnotajoke.com>**20100715231820 |
|---|
| 86 | Ignore-this: 78a6d359d7bf8529d283e2815bf1e2de |
|---|
| 87 | |
|---|
| 88 | - Add a callRemoteOnly method to FakeBucketWriter. |
|---|
| 89 | - Change the abort method in FakeBucketWriter to not return a |
|---|
| 90 | RuntimeError. |
|---|
| 91 | ] { |
|---|
| 92 | hunk ./src/allmydata/test/test_upload.py 162 |
|---|
| 93 | d.addCallback(lambda res: _call()) |
|---|
| 94 | return d |
|---|
| 95 | |
|---|
| 96 | + |
|---|
| 97 | + def callRemoteOnly(self, methname, *args, **kwargs): |
|---|
| 98 | + d = self.callRemote(methname, *args, **kwargs) |
|---|
| 99 | + del d # callRemoteOnly ignores this |
|---|
| 100 | + return None |
|---|
| 101 | + |
|---|
| 102 | + |
|---|
| 103 | def remote_write(self, offset, data): |
|---|
| 104 | precondition(not self.closed) |
|---|
| 105 | precondition(offset >= 0) |
|---|
| 106 | hunk ./src/allmydata/test/test_upload.py 183 |
|---|
| 107 | self.closed = True |
|---|
| 108 | |
|---|
| 109 | def remote_abort(self): |
|---|
| 110 | - log.err(RuntimeError("uh oh, I was asked to abort")) |
|---|
| 111 | + pass |
|---|
| 112 | |
|---|
| 113 | class FakeClient: |
|---|
| 114 | DEFAULT_ENCODING_PARAMETERS = {"k":25, |
|---|
| 115 | } |
|---|
| 116 | [storage/immutable.py: make remote_abort btell the storage server about aborted buckets. |
|---|
| 117 | Kevan Carstensen <kevan@isnotajoke.com>**20100715232105 |
|---|
| 118 | Ignore-this: 16ab0090676355abdd5600ed44ff19c9 |
|---|
| 119 | ] { |
|---|
| 120 | hunk ./src/allmydata/storage/immutable.py 282 |
|---|
| 121 | def _abort(self): |
|---|
| 122 | if self.closed: |
|---|
| 123 | return |
|---|
| 124 | + |
|---|
| 125 | os.remove(self.incominghome) |
|---|
| 126 | # if we were the last share to be moved, remove the incoming/ |
|---|
| 127 | # directory that was our parent |
|---|
| 128 | hunk ./src/allmydata/storage/immutable.py 289 |
|---|
| 129 | parentdir = os.path.split(self.incominghome)[0] |
|---|
| 130 | if not os.listdir(parentdir): |
|---|
| 131 | os.rmdir(parentdir) |
|---|
| 132 | + self._sharefile = None |
|---|
| 133 | |
|---|
| 134 | hunk ./src/allmydata/storage/immutable.py 291 |
|---|
| 135 | + # We are now considered closed for further writing. We must tell |
|---|
| 136 | + # the storage server about this so that it stops expecting us to |
|---|
| 137 | + # use the space it allocated for us earlier. |
|---|
| 138 | + self.closed = True |
|---|
| 139 | + self.ss.bucket_writer_closed(self, 0) |
|---|
| 140 | |
|---|
| 141 | |
|---|
| 142 | class BucketReader(Referenceable): |
|---|
| 143 | } |
|---|
| 144 | [test/test_storage.py: test for the new remote_abort semantics. |
|---|
| 145 | Kevan Carstensen <kevan@isnotajoke.com>**20100715232148 |
|---|
| 146 | Ignore-this: d3d6491f17bf670e770ca4b385007515 |
|---|
| 147 | ] hunk ./src/allmydata/test/test_storage.py 324 |
|---|
| 148 | self.failIf(os.path.exists(incoming_prefix_dir), incoming_prefix_dir) |
|---|
| 149 | self.failUnless(os.path.exists(incoming_dir), incoming_dir) |
|---|
| 150 | |
|---|
| 151 | + def test_abort(self): |
|---|
| 152 | + # remote_abort, when called on a writer, should make sure that |
|---|
| 153 | + # the allocated size of the bucket is not counted by the storage |
|---|
| 154 | + # server when accounting for space. |
|---|
| 155 | + ss = self.create("test_abort") |
|---|
| 156 | + already, writers = self.allocate(ss, "allocate", [0, 1, 2], 150) |
|---|
| 157 | + self.failIfEqual(ss.allocated_size(), 0) |
|---|
| 158 | + |
|---|
| 159 | + # Now abort the writers. |
|---|
| 160 | + for writer in writers.itervalues(): |
|---|
| 161 | + writer.remote_abort() |
|---|
| 162 | + self.failUnlessEqual(ss.allocated_size(), 0) |
|---|
| 163 | + |
|---|
| 164 | + |
|---|
| 165 | def test_allocate(self): |
|---|
| 166 | ss = self.create("test_allocate") |
|---|
| 167 | |
|---|
| 168 | [test/test_upload.py: test to see that aborted buckets are ignored by the storage server |
|---|
| 169 | Kevan Carstensen <kevan@isnotajoke.com>**20100716001046 |
|---|
| 170 | Ignore-this: cc075c24b1c86d737f3199af894cc780 |
|---|
| 171 | ] hunk ./src/allmydata/test/test_upload.py 1809 |
|---|
| 172 | return d |
|---|
| 173 | |
|---|
| 174 | |
|---|
| 175 | + def test_peer_selector_bucket_abort(self): |
|---|
| 176 | + # If peer selection for an upload fails due to an unhappy |
|---|
| 177 | + # layout, the peer selection process should abort the buckets it |
|---|
| 178 | + # allocates before failing, so that the space can be re-used. |
|---|
| 179 | + self.basedir = self.mktemp() |
|---|
| 180 | + self.set_up_grid(num_servers=5) |
|---|
| 181 | + |
|---|
| 182 | + # Try to upload a file with happy=7, which is unsatisfiable with |
|---|
| 183 | + # the current grid. This will fail, but should not take up any |
|---|
| 184 | + # space on the storage servers after it fails. |
|---|
| 185 | + client = self.g.clients[0] |
|---|
| 186 | + client.DEFAULT_ENCODING_PARAMETERS['happy'] = 7 |
|---|
| 187 | + d = defer.succeed(None) |
|---|
| 188 | + d.addCallback(lambda ignored: |
|---|
| 189 | + self.shouldFail(UploadUnhappinessError, |
|---|
| 190 | + "test_peer_selection_bucket_abort", |
|---|
| 191 | + "", |
|---|
| 192 | + client.upload, upload.Data("data" * 10000, |
|---|
| 193 | + convergence=""))) |
|---|
| 194 | + # wait for the abort messages to get there. |
|---|
| 195 | + def _turn_barrier(res): |
|---|
| 196 | + return fireEventually(res) |
|---|
| 197 | + d.addCallback(_turn_barrier) |
|---|
| 198 | + def _then(ignored): |
|---|
| 199 | + for server in self.g.servers_by_number.values(): |
|---|
| 200 | + self.failUnlessEqual(server.allocated_size(), 0) |
|---|
| 201 | + d.addCallback(_then) |
|---|
| 202 | + return d |
|---|
| 203 | + |
|---|
| 204 | + |
|---|
| 205 | + def test_encoder_bucket_abort(self): |
|---|
| 206 | + # If enough servers die in the process of encoding and uploading |
|---|
| 207 | + # a file to make the layout unhappy, we should cancel the |
|---|
| 208 | + # newly-allocated buckets before dying. |
|---|
| 209 | + self.basedir = self.mktemp() |
|---|
| 210 | + self.set_up_grid(num_servers=4) |
|---|
| 211 | + |
|---|
| 212 | + client = self.g.clients[0] |
|---|
| 213 | + client.DEFAULT_ENCODING_PARAMETERS['happy'] = 7 |
|---|
| 214 | + |
|---|
| 215 | + d = defer.succeed(None) |
|---|
| 216 | + d.addCallback(lambda ignored: |
|---|
| 217 | + self.shouldFail(UploadUnhappinessError, |
|---|
| 218 | + "test_encoder_bucket_abort", |
|---|
| 219 | + "", |
|---|
| 220 | + self._do_upload_with_broken_servers, 1)) |
|---|
| 221 | + def _turn_barrier(res): |
|---|
| 222 | + return fireEventually(res) |
|---|
| 223 | + d.addCallback(_turn_barrier) |
|---|
| 224 | + def _then(ignored): |
|---|
| 225 | + for server in self.g.servers_by_number.values(): |
|---|
| 226 | + self.failUnlessEqual(server.allocated_size(), 0) |
|---|
| 227 | + d.addCallback(_then) |
|---|
| 228 | + return d |
|---|
| 229 | + |
|---|
| 230 | + |
|---|
| 231 | def _set_up_nodes_extra_config(self, clientdir): |
|---|
| 232 | cfgfn = os.path.join(clientdir, "tahoe.cfg") |
|---|
| 233 | oldcfg = open(cfgfn, "r").read() |
|---|
| 234 | |
|---|
| 235 | Context: |
|---|
| 236 | |
|---|
| 237 | [SFTP: address some of the comments in zooko's review (#1106). |
|---|
| 238 | david-sarah@jacaranda.org**20100712025537 |
|---|
| 239 | Ignore-this: c3921638a2d4f1de2a776ae78e4dc37e |
|---|
| 240 | ] |
|---|
| 241 | [docs/logging.txt: note that setting flogging vars might affect tests with race conditions. |
|---|
| 242 | david-sarah@jacaranda.org**20100712050721 |
|---|
| 243 | Ignore-this: fc1609d215fcd5561a57fd1226206f27 |
|---|
| 244 | ] |
|---|
| 245 | [test_storage.py: potential fix for failures when logging is enabled. |
|---|
| 246 | david-sarah@jacaranda.org**19700713040546 |
|---|
| 247 | Ignore-this: 5815693a0df3e64c52c3c6b7be2846c7 |
|---|
| 248 | ] |
|---|
| 249 | [upcase_since_on_welcome |
|---|
| 250 | terrellrussell@gmail.com**20100708193903] |
|---|
| 251 | [server_version_on_welcome_page.dpatch.txt |
|---|
| 252 | freestorm77@gmail.com**20100605191721 |
|---|
| 253 | Ignore-this: b450c76dc875f5ac8cca229a666cbd0a |
|---|
| 254 | |
|---|
| 255 | |
|---|
| 256 | - The storage server version is 0 for all storage nodes in the Welcome Page |
|---|
| 257 | |
|---|
| 258 | |
|---|
| 259 | ] |
|---|
| 260 | [NEWS: add NEWS snippets about two recent patches |
|---|
| 261 | zooko@zooko.com**20100708162058 |
|---|
| 262 | Ignore-this: 6c9da6a0ad7351a960bdd60f81532899 |
|---|
| 263 | ] |
|---|
| 264 | [directory_html_top_banner.dpatch |
|---|
| 265 | freestorm77@gmail.com**20100622205301 |
|---|
| 266 | Ignore-this: 1d770d975e0c414c996564774f049bca |
|---|
| 267 | |
|---|
| 268 | The div tag with the link "Return to Welcome page" on the directory.xhtml page is not correct |
|---|
| 269 | |
|---|
| 270 | ] |
|---|
| 271 | [tahoe_css_toolbar.dpatch |
|---|
| 272 | freestorm77@gmail.com**20100622210046 |
|---|
| 273 | Ignore-this: 5b3ebb2e0f52bbba718a932f80c246c0 |
|---|
| 274 | |
|---|
| 275 | CSS modification to be correctly diplayed with Internet Explorer 8 |
|---|
| 276 | |
|---|
| 277 | The links on the top of page directory.xhtml are not diplayed in the same line as display with Firefox. |
|---|
| 278 | |
|---|
| 279 | ] |
|---|
| 280 | [runnin_test_tahoe_css.dpatch |
|---|
| 281 | freestorm77@gmail.com**20100622214714 |
|---|
| 282 | Ignore-this: e0db73d68740aad09a7b9ae60a08c05c |
|---|
| 283 | |
|---|
| 284 | Runnin test for changes in tahoe.css file |
|---|
| 285 | |
|---|
| 286 | ] |
|---|
| 287 | [runnin_test_directory_xhtml.dpatch |
|---|
| 288 | freestorm77@gmail.com**20100622201403 |
|---|
| 289 | Ignore-this: f8962463fce50b9466405cb59fe11d43 |
|---|
| 290 | |
|---|
| 291 | Runnin test for diretory.xhtml top banner |
|---|
| 292 | |
|---|
| 293 | ] |
|---|
| 294 | [stringutils.py: tolerate sys.stdout having no 'encoding' attribute. |
|---|
| 295 | david-sarah@jacaranda.org**20100626040817 |
|---|
| 296 | Ignore-this: f42cad81cef645ee38ac1df4660cc850 |
|---|
| 297 | ] |
|---|
| 298 | [quickstart.html: python 2.5 -> 2.6 as recommended version |
|---|
| 299 | david-sarah@jacaranda.org**20100705175858 |
|---|
| 300 | Ignore-this: bc3a14645ea1d5435002966ae903199f |
|---|
| 301 | ] |
|---|
| 302 | [SFTP: don't call .stopProducing on the producer registered with OverwriteableFileConsumer (which breaks with warner's new downloader). |
|---|
| 303 | david-sarah@jacaranda.org**20100628231926 |
|---|
| 304 | Ignore-this: 131b7a5787bc85a9a356b5740d9d996f |
|---|
| 305 | ] |
|---|
| 306 | [docs/how_to_make_a_tahoe-lafs_release.txt: trivial correction, install.html should now be quickstart.html. |
|---|
| 307 | david-sarah@jacaranda.org**20100625223929 |
|---|
| 308 | Ignore-this: 99a5459cac51bd867cc11ad06927ff30 |
|---|
| 309 | ] |
|---|
| 310 | [setup: in the Makefile, refuse to upload tarballs unless someone has passed the environment variable "BB_BRANCH" with value "trunk" |
|---|
| 311 | zooko@zooko.com**20100619034928 |
|---|
| 312 | Ignore-this: 276ddf9b6ad7ec79e27474862e0f7d6 |
|---|
| 313 | ] |
|---|
| 314 | [trivial: tiny update to in-line comment |
|---|
| 315 | zooko@zooko.com**20100614045715 |
|---|
| 316 | Ignore-this: 10851b0ed2abfed542c97749e5d280bc |
|---|
| 317 | (I'm actually committing this patch as a test of the new eager-annotation-computation of trac-darcs.) |
|---|
| 318 | ] |
|---|
| 319 | [docs: about.html link to home page early on, and be decentralized storage instead of cloud storage this time around |
|---|
| 320 | zooko@zooko.com**20100619065318 |
|---|
| 321 | Ignore-this: dc6db03f696e5b6d2848699e754d8053 |
|---|
| 322 | ] |
|---|
| 323 | [docs: update about.html, especially to have a non-broken link to quickstart.html, and also to comment out the broken links to "for Paranoids" and "for Corporates" |
|---|
| 324 | zooko@zooko.com**20100619065124 |
|---|
| 325 | Ignore-this: e292c7f51c337a84ebfeb366fbd24d6c |
|---|
| 326 | ] |
|---|
| 327 | [TAG allmydata-tahoe-1.7.0 |
|---|
| 328 | zooko@zooko.com**20100619052631 |
|---|
| 329 | Ignore-this: d21e27afe6d85e2e3ba6a3292ba2be1 |
|---|
| 330 | ] |
|---|
| 331 | Patch bundle hash: |
|---|
| 332 | 00056b67380e13933aba18bc9d0dca86cfcf6566 |
|---|