1 | |
---|
2 | import os, re, sys, time, simplejson |
---|
3 | from cStringIO import StringIO |
---|
4 | |
---|
5 | from twisted.trial import unittest |
---|
6 | from twisted.internet import defer |
---|
7 | from twisted.internet import threads # CLI tests use deferToThread |
---|
8 | from twisted.application import service |
---|
9 | |
---|
10 | import allmydata |
---|
11 | from allmydata import client, uri |
---|
12 | from allmydata.introducer.server import IntroducerNode |
---|
13 | from allmydata.storage.mutable import MutableShareFile |
---|
14 | from allmydata.storage.server import si_a2b |
---|
15 | from allmydata.immutable import offloaded, upload |
---|
16 | from allmydata.immutable.literal import LiteralFileNode |
---|
17 | from allmydata.immutable.filenode import ImmutableFileNode |
---|
18 | from allmydata.util import idlib, mathutil, pollmixin, fileutil, iputil |
---|
19 | from allmydata.util import log, base32 |
---|
20 | from allmydata.util.encodingutil import quote_output, unicode_to_argv |
---|
21 | from allmydata.util.fileutil import abspath_expanduser_unicode |
---|
22 | from allmydata.util.consumer import MemoryConsumer, download_to_data |
---|
23 | from allmydata.scripts import runner |
---|
24 | from allmydata.stats import StatsGathererService |
---|
25 | from allmydata.interfaces import IDirectoryNode, IFileNode, \ |
---|
26 | NoSuchChildError, NoSharesError |
---|
27 | from allmydata.monitor import Monitor |
---|
28 | from allmydata.mutable.common import NotWriteableError |
---|
29 | from allmydata.mutable import layout as mutable_layout |
---|
30 | from allmydata.mutable.publish import MutableData |
---|
31 | |
---|
32 | from foolscap.api import DeadReferenceError, fireEventually, flushEventualQueue |
---|
33 | from twisted.python.failure import Failure |
---|
34 | from twisted.web.client import getPage |
---|
35 | from twisted.web.error import Error |
---|
36 | |
---|
37 | from .common import TEST_RSA_KEY_SIZE |
---|
38 | |
---|
39 | # TODO: move this to common or common_util |
---|
40 | from allmydata.test.test_runner import RunBinTahoeMixin |
---|
41 | from . import common_util as testutil |
---|
42 | |
---|
43 | LARGE_DATA = """ |
---|
44 | This is some data to publish to the remote grid.., which needs to be large |
---|
45 | enough to not fit inside a LIT uri. |
---|
46 | """ |
---|
47 | |
---|
48 | # our system test uses the same Tub certificates each time, to avoid the |
---|
49 | # overhead of key generation |
---|
50 | SYSTEM_TEST_CERTS = [ |
---|
51 | """-----BEGIN CERTIFICATE----- |
---|
52 | MIIBnjCCAQcCAgCEMA0GCSqGSIb3DQEBBAUAMBcxFTATBgNVBAMUDG5ld3BiX3Ro |
---|
53 | aW5neTAeFw0wODA3MjUyMjQyMDVaFw0wOTA3MjUyMjQyMDVaMBcxFTATBgNVBAMU |
---|
54 | DG5ld3BiX3RoaW5neTCBnzANBgkqhkiG9w0BAQEFAAOBjQAwgYkCgYEAxHCWajrR |
---|
55 | 2h/iurw8k93m8WUdE3xypJiiAITw7GkKlKbCLD+dEce2MXwVVYca0n/MZZsj89Cu |
---|
56 | Ko0lLjksMseoSDoj98iEmVpaY5mc2ntpQ+FXdoEmPP234XRWEg2HQ+EaK6+WkGQg |
---|
57 | DDXQvFJCVCQk/n1MdAwZZ6vqf2ITzSuD44kCAwEAATANBgkqhkiG9w0BAQQFAAOB |
---|
58 | gQBn6qPKGdFjWJy7sOOTUFfm/THhHQqAh1pBDLkjR+OtzuobCoP8n8J1LNG3Yxds |
---|
59 | Jj7NWQL7X5TfOlfoi7e9jK0ujGgWh3yYU6PnHzJLkDiDT3LCSywQuGXCjh0tOStS |
---|
60 | 2gaCmmAK2cfxSStKzNcewl2Zs8wHMygq8TLFoZ6ozN1+xQ== |
---|
61 | -----END CERTIFICATE----- |
---|
62 | -----BEGIN RSA PRIVATE KEY----- |
---|
63 | MIICXQIBAAKBgQDEcJZqOtHaH+K6vDyT3ebxZR0TfHKkmKIAhPDsaQqUpsIsP50R |
---|
64 | x7YxfBVVhxrSf8xlmyPz0K4qjSUuOSwyx6hIOiP3yISZWlpjmZzae2lD4Vd2gSY8 |
---|
65 | /bfhdFYSDYdD4Rorr5aQZCAMNdC8UkJUJCT+fUx0DBlnq+p/YhPNK4PjiQIDAQAB |
---|
66 | AoGAZyDMdrymiyMOPwavrtlicvyohSBid3MCKc+hRBvpSB0790r2RO1aAySndp1V |
---|
67 | QYmCXx1RhKDbrs8m49t0Dryu5T+sQrFl0E3usAP3vvXWeh4jwJ9GyiRWy4xOEuEQ |
---|
68 | 3ewjbEItHqA/bRJF0TNtbOmZTDC7v9FRPf2bTAyFfTZep5kCQQD33q1RA8WUYtmQ |
---|
69 | IArgHqt69i421lpXlOgqotFHwTx4FiGgVzDQCDuXU6txB9EeKRM340poissav/n6 |
---|
70 | bkLZ7/VDAkEAyuIPkeI59sE5NnmW+N47NbCfdM1Smy1YxZpv942EmP9Veub5N0dw |
---|
71 | iK5bLAgEguUIjpTsh3BRmsE9Xd+ItmnRQwJBAMZhbg19G1EbnE0BmDKv2UbcaThy |
---|
72 | bnPSNc6J6T2opqDl9ZvCrMqTDD6dNIWOYAvni/4a556sFsoeBBAu10peBskCQE6S |
---|
73 | cB86cuJagLLVMh/dySaI6ahNoFFSpY+ZuQUxfInYUR2Q+DFtbGqyw8JwtHaRBthZ |
---|
74 | WqU1XZVGg2KooISsxIsCQQD1PS7//xHLumBb0jnpL7n6W8gmiTyzblT+0otaCisP |
---|
75 | fN6rTlwV1o8VsOUAz0rmKO5RArCbkmb01WtMgPCDBYkk |
---|
76 | -----END RSA PRIVATE KEY----- |
---|
77 | """, # 0 |
---|
78 | """-----BEGIN CERTIFICATE----- |
---|
79 | MIIBnjCCAQcCAgCEMA0GCSqGSIb3DQEBBAUAMBcxFTATBgNVBAMUDG5ld3BiX3Ro |
---|
80 | aW5neTAeFw0wODA3MjUyMjQyMDVaFw0wOTA3MjUyMjQyMDVaMBcxFTATBgNVBAMU |
---|
81 | DG5ld3BiX3RoaW5neTCBnzANBgkqhkiG9w0BAQEFAAOBjQAwgYkCgYEAs9CALdmW |
---|
82 | kJ6r0KPSLdGCA8rzQKxWayrMckT22ZtbRv3aw6VA96dWclpY+T2maV0LrAzmMSL8 |
---|
83 | n61ydJHM33iYDOyWbwHWN45XCjY/e20PL54XUl/DmbBHEhQVQLIfCldcRcnWEfoO |
---|
84 | iOhDJfWpDO1dmP/aOYLdkZCZvBtPAfyUqRcCAwEAATANBgkqhkiG9w0BAQQFAAOB |
---|
85 | gQAN9eaCREkzzk4yPIaWYkWHg3Igs1vnOR/iDw3OjyxO/xJFP2lkA2WtrwL2RTRq |
---|
86 | dxA8gwdPyrWgdiZElwZH8mzTJ4OdUXLSMclLOg9kvH6gtSvhLztfEDwDP1wRhikh |
---|
87 | OeWWu2GIC+uqFCI1ftoGgU+aIa6yrHswf66rrQvBSSvJPQ== |
---|
88 | -----END CERTIFICATE----- |
---|
89 | -----BEGIN RSA PRIVATE KEY----- |
---|
90 | MIICXQIBAAKBgQCz0IAt2ZaQnqvQo9It0YIDyvNArFZrKsxyRPbZm1tG/drDpUD3 |
---|
91 | p1ZyWlj5PaZpXQusDOYxIvyfrXJ0kczfeJgM7JZvAdY3jlcKNj97bQ8vnhdSX8OZ |
---|
92 | sEcSFBVAsh8KV1xFydYR+g6I6EMl9akM7V2Y/9o5gt2RkJm8G08B/JSpFwIDAQAB |
---|
93 | AoGBAIUy5zCPpSP+FeJY6CG+t6Pdm/IFd4KtUoM3KPCrT6M3+uzApm6Ny9Crsor2 |
---|
94 | qyYTocjSSVaOxzn1fvpw4qWLrH1veUf8ozMs8Z0VuPHD1GYUGjOXaBPXb5o1fQL9 |
---|
95 | h7pS5/HrDDPN6wwDNTsxRf/fP58CnfwQUhwdoxcx8TnVmDQxAkEA6N3jBXt/Lh0z |
---|
96 | UbXHhv3QBOcqLZA2I4tY7wQzvUvKvVmCJoW1tfhBdYQWeQv0jzjL5PzrrNY8hC4l |
---|
97 | 8+sFM3h5TwJBAMWtbFIEZfRSG1JhHK3evYHDTZnr/j+CdoWuhzP5RkjkIKsiLEH7 |
---|
98 | 2ZhA7CdFQLZF14oXy+g1uVCzzfB2WELtUbkCQQDKrb1XWzrBlzbAipfkXWs9qTmj |
---|
99 | uJ32Z+V6+0xRGPOXxJ0sDDqw7CeFMfchWg98zLFiV+SEZV78qPHtkAPR3ayvAkB+ |
---|
100 | hUMhM4N13t9x2IoclsXAOhp++9bdG0l0woHyuAdOPATUw6iECwf4NQVxFRgYEZek |
---|
101 | 4Ro3Y7taddrHn1dabr6xAkAic47OoLOROYLpljmJJO0eRe3Z5IFe+0D2LfhAW3LQ |
---|
102 | JU+oGq5pCjfnoaDElRRZn0+GmunnWeQEYKoflTi/lI9d |
---|
103 | -----END RSA PRIVATE KEY----- |
---|
104 | """, # 1 |
---|
105 | """-----BEGIN CERTIFICATE----- |
---|
106 | MIIBnjCCAQcCAgCEMA0GCSqGSIb3DQEBBAUAMBcxFTATBgNVBAMUDG5ld3BiX3Ro |
---|
107 | aW5neTAeFw0wODA3MjUyMjQyMDZaFw0wOTA3MjUyMjQyMDZaMBcxFTATBgNVBAMU |
---|
108 | DG5ld3BiX3RoaW5neTCBnzANBgkqhkiG9w0BAQEFAAOBjQAwgYkCgYEAsxG7LTrz |
---|
109 | DF+9wegOR/BRJhjSumPUbYQnNAUKtPraFsGjAJILP44AHdnHt1MONLgTeX1ynapo |
---|
110 | q6O/q5cdKtBB7uEh7FpkLCCwpZt/m0y79cynn8AmWoQVgl8oS0567UmPeJnTzFPv |
---|
111 | dmT5dlaQALeX5YGceAsEvhmAsdOMttaor38CAwEAATANBgkqhkiG9w0BAQQFAAOB |
---|
112 | gQA345rxotfvh2kfgrmRzAyGewVBV4r23Go30GSZir8X2GoH3qKNwO4SekAohuSw |
---|
113 | AiXzLUbwIdSRSqaLFxSC7Duqc9eIeFDAWjeEmpfFLBNiw3K8SLA00QrHCUXnECTD |
---|
114 | b/Kk6OGuvPOiuuONVjEuEcRdCH3/Li30D0AhJaMynjhQJQ== |
---|
115 | -----END CERTIFICATE----- |
---|
116 | -----BEGIN RSA PRIVATE KEY----- |
---|
117 | MIICXQIBAAKBgQCzEbstOvMMX73B6A5H8FEmGNK6Y9RthCc0BQq0+toWwaMAkgs/ |
---|
118 | jgAd2ce3Uw40uBN5fXKdqmiro7+rlx0q0EHu4SHsWmQsILClm3+bTLv1zKefwCZa |
---|
119 | hBWCXyhLTnrtSY94mdPMU+92ZPl2VpAAt5flgZx4CwS+GYCx04y21qivfwIDAQAB |
---|
120 | AoGBAIlhFg/aRPL+VM9539LzHN60dp8GzceDdqwjHhbAySZiQlLCuJx2rcI4/U65 |
---|
121 | CpIJku9G/fLV9N2RkA/trDPXeGyqCTJfnNzyZcvvMscRMFqSGyc21Y0a+GS8bIxt |
---|
122 | 1R2B18epSVMsWSWWMypeEgsfv29LV7oSWG8UKaqQ9+0h63DhAkEA4i2L/rori/Fb |
---|
123 | wpIBfA+xbXL/GmWR7xPW+3nG3LdLQpVzxz4rIsmtO9hIXzvYpcufQbwgVACyMmRf |
---|
124 | TMABeSDM7wJBAMquEdTaVXjGfH0EJ7z95Ys2rYTiCXjBfyEOi6RXXReqV9SXNKlN |
---|
125 | aKsO22zYecpkAjY1EdUdXWP/mNVEybjpZnECQQCcuh0JPS5RwcTo9c2rjyBOjGIz |
---|
126 | g3B1b5UIG2FurmCrWe6pgO3ZJFEzZ/L2cvz0Hj5UCa2JKBZTDvRutZoPumfnAkAb |
---|
127 | nSW+y1Rz1Q8m9Ub4v9rjYbq4bRd/RVWtyk6KQIDldYbr5wH8wxgsniSVKtVFFuUa |
---|
128 | P5bDY3HS6wMGo42cTOhxAkAcdweQSQ3j7mfc5vh71HeAC1v/VAKGehGOUdeEIQNl |
---|
129 | Sb2WuzpZkbfsrVzW6MdlgY6eE7ufRswhDPLWPC8MP0d1 |
---|
130 | -----END RSA PRIVATE KEY----- |
---|
131 | """, # 2 |
---|
132 | """-----BEGIN CERTIFICATE----- |
---|
133 | MIIBnjCCAQcCAgCEMA0GCSqGSIb3DQEBBAUAMBcxFTATBgNVBAMUDG5ld3BiX3Ro |
---|
134 | aW5neTAeFw0wODA3MjUyMjQyMDZaFw0wOTA3MjUyMjQyMDZaMBcxFTATBgNVBAMU |
---|
135 | DG5ld3BiX3RoaW5neTCBnzANBgkqhkiG9w0BAQEFAAOBjQAwgYkCgYEAxnH+pbOS |
---|
136 | qlJlsHpKUQtV0oN1Mv+ESG+yUDxStFFGjkJv/UIRzpxqFqY/6nJ3D03kZsDdcXyi |
---|
137 | CfV9hPYQaVNMn6z+puPmIagfBQ0aOyuI+nUhCttZIYD9071BjW5bCMX5NZWL/CZm |
---|
138 | E0HdAZ77H6UrRckJ7VR8wAFpihBxD5WliZcCAwEAATANBgkqhkiG9w0BAQQFAAOB |
---|
139 | gQAwXqY1Sjvp9JSTHKklu7s0T6YmH/BKSXrHpS2xO69svK+ze5/+5td3jPn4Qe50 |
---|
140 | xwRNZSFmSLuJLfCO32QJSJTB7Vs5D3dNTZ2i8umsaodm97t8hit7L75nXRGHKH// |
---|
141 | xDVWAFB9sSgCQyPMRkL4wB4YSfRhoSKVwMvaz+XRZDUU0A== |
---|
142 | -----END CERTIFICATE----- |
---|
143 | -----BEGIN RSA PRIVATE KEY----- |
---|
144 | MIICXAIBAAKBgQDGcf6ls5KqUmWwekpRC1XSg3Uy/4RIb7JQPFK0UUaOQm/9QhHO |
---|
145 | nGoWpj/qcncPTeRmwN1xfKIJ9X2E9hBpU0yfrP6m4+YhqB8FDRo7K4j6dSEK21kh |
---|
146 | gP3TvUGNblsIxfk1lYv8JmYTQd0BnvsfpStFyQntVHzAAWmKEHEPlaWJlwIDAQAB |
---|
147 | AoGAdHNMlXwtItm7ZrY8ihZ2xFP0IHsk60TwhHkBp2LSXoTKJvnwbSgIcUYZ18BX |
---|
148 | 8Zkp4MpoqEIU7HcssyuaMdR572huV2w0D/2gYJQLQ5JapaR3hMox3YG4wjXasN1U |
---|
149 | 1iZt7JkhKlOy+ElL5T9mKTE1jDsX2RAv4WALzMpYFo7vs4ECQQDxqrPaqRQ5uYS/ |
---|
150 | ejmIk05nM3Q1zmoLtMDrfRqrjBhaf/W3hqGihiqN2kL3PIIYcxSRWiyNlYXjElsR |
---|
151 | 2sllBTe3AkEA0jcMHVThwKt1+Ce5VcE7N6hFfbsgISTjfJ+Q3K2NkvJkmtE8ZRX5 |
---|
152 | XprssnPN8owkfF5yuKbcSZL3uvaaSGN9IQJAfTVnN9wwOXQwHhDSbDt9/KRBCnum |
---|
153 | n+gHqDrKLaVJHOJ9SZf8eLswoww5c+UqtkYxmtlwie61Tp+9BXQosilQ4wJBAIZ1 |
---|
154 | XVNZmriBM4jR59L5MOZtxF0ilu98R+HLsn3kqLyIPF9mXCoQPxwLHkEan213xFKk |
---|
155 | mt6PJDIPRlOZLqAEuuECQFQMCrn0VUwPg8E40pxMwgMETvVflPs/oZK1Iu+b7+WY |
---|
156 | vBptAyhMu31fHQFnJpiUOyHqSZnOZyEn1Qu2lszNvUg= |
---|
157 | -----END RSA PRIVATE KEY----- |
---|
158 | """, # 3 |
---|
159 | """-----BEGIN CERTIFICATE----- |
---|
160 | MIIBnjCCAQcCAgCEMA0GCSqGSIb3DQEBBAUAMBcxFTATBgNVBAMUDG5ld3BiX3Ro |
---|
161 | aW5neTAeFw0wODA3MjUyMjQyMDZaFw0wOTA3MjUyMjQyMDZaMBcxFTATBgNVBAMU |
---|
162 | DG5ld3BiX3RoaW5neTCBnzANBgkqhkiG9w0BAQEFAAOBjQAwgYkCgYEAnjiOwipn |
---|
163 | jigDuNMfNG/tBJhPwYUHhSbQdvrTubhsxw1oOq5XpNqUwRtC8hktOKM3hghyqExP |
---|
164 | 62EOi0aJBkRhtwtPSLBCINptArZLfkog/nTIqVv4eLEzJ19nTi/llHHWKcgA6XTI |
---|
165 | sU/snUhGlySA3RpETvXqIJTauQRZz0kToSUCAwEAATANBgkqhkiG9w0BAQQFAAOB |
---|
166 | gQCQ+u/CsX5WC5m0cLrpyIS6qZa62lrB3mj9H1aIQhisT5kRsMz3FJ1aOaS8zPRz |
---|
167 | w0jhyRmamCcSsWf5WK539iOtsXbKMdAyjNtkQO3g+fnsLgmznAjjst24jfr+XU59 |
---|
168 | 0amiy1U6TY93gtEBZHtiLldPdUMsTuFbBlqbcMBQ50x9rA== |
---|
169 | -----END CERTIFICATE----- |
---|
170 | -----BEGIN RSA PRIVATE KEY----- |
---|
171 | MIICXAIBAAKBgQCeOI7CKmeOKAO40x80b+0EmE/BhQeFJtB2+tO5uGzHDWg6rlek |
---|
172 | 2pTBG0LyGS04ozeGCHKoTE/rYQ6LRokGRGG3C09IsEIg2m0Ctkt+SiD+dMipW/h4 |
---|
173 | sTMnX2dOL+WUcdYpyADpdMixT+ydSEaXJIDdGkRO9eoglNq5BFnPSROhJQIDAQAB |
---|
174 | AoGAAPrst3s3xQOucjismtCOsVaYN+SxFTwWUoZfRWlFEz6cBLELzfOktEWM9p79 |
---|
175 | TrqEH4px22UNobGqO2amdql5yXwEFVhYQkRB8uDA8uVaqpL8NLWTGPRXxZ2DSU+n |
---|
176 | 7/FLf/TWT3ti/ZtXaPVRj6E2/Mq9AVEVOjUYzkNjM02OxcECQQDKEqmPbdZq2URU |
---|
177 | 7RbUxkq5aTp8nzAgbpUsgBGQ9PDAymhj60BDEP0q28Ssa7tU70pRnQ3AZs9txgmL |
---|
178 | kK2g97FNAkEAyHH9cIb6qXOAJPIr/xamFGr5uuYw9TJPz/hfVkVimW/aZnBB+e6Q |
---|
179 | oALJBDKJWeYPzdNbouJYg8MeU0qWdZ5DOQJADUk+1sxc/bd9U6wnBSRog1pU2x7I |
---|
180 | VkmPC1b8ULCaJ8LnLDKqjf5O9wNuIfwPXB1DoKwX3F+mIcyUkhWYJO5EPQJAUj5D |
---|
181 | KMqZSrGzYHVlC/M1Daee88rDR7fu+3wDUhiCDkbQq7tftrbl7GF4LRq3NIWq8l7I |
---|
182 | eJq6isWiSbaO6Y+YMQJBAJFBpVhlY5Px2BX5+Hsfq6dSP3sVVc0eHkdsoZFFxq37 |
---|
183 | fksL/q2vlPczvBihgcxt+UzW/UrNkelOuX3i57PDvFs= |
---|
184 | -----END RSA PRIVATE KEY----- |
---|
185 | """, # 4 |
---|
186 | """-----BEGIN CERTIFICATE----- |
---|
187 | MIIBnjCCAQcCAgCEMA0GCSqGSIb3DQEBBAUAMBcxFTATBgNVBAMUDG5ld3BiX3Ro |
---|
188 | aW5neTAeFw0wODA3MjUyMjQyMDZaFw0wOTA3MjUyMjQyMDZaMBcxFTATBgNVBAMU |
---|
189 | DG5ld3BiX3RoaW5neTCBnzANBgkqhkiG9w0BAQEFAAOBjQAwgYkCgYEAsCQuudDF |
---|
190 | zgmY5tDpT0TkUo8fpJ5JcvgCkLFpSDD8REpXhLFkHWhTmTj3CAxfv4lA3sQzHZxe |
---|
191 | 4S9YCb5c/VTbFEdgwc/wlxMmJiz2jYghdmWPBb8pBEk31YihIhC+u4kex6gJBH5y |
---|
192 | ixiZ3PPRRMaOBBo+ZfM50XIyWbFOOM/7FwcCAwEAATANBgkqhkiG9w0BAQQFAAOB |
---|
193 | gQB4cFURaiiUx6n8eS4j4Vxrii5PtsaNEI4acANFSYknGd0xTP4vnmoivNmo5fWE |
---|
194 | Q4hYtGezNu4a9MnNhcQmI20KzXmvhLJtkwWCgGOVJtMem8hDWXSALV1Ih8hmVkGS |
---|
195 | CI1elfr9eyguunGp9eMMQfKhWH52WHFA0NYa0Kpv5BY33A== |
---|
196 | -----END CERTIFICATE----- |
---|
197 | -----BEGIN RSA PRIVATE KEY----- |
---|
198 | MIICWwIBAAKBgQCwJC650MXOCZjm0OlPRORSjx+knkly+AKQsWlIMPxESleEsWQd |
---|
199 | aFOZOPcIDF+/iUDexDMdnF7hL1gJvlz9VNsUR2DBz/CXEyYmLPaNiCF2ZY8FvykE |
---|
200 | STfViKEiEL67iR7HqAkEfnKLGJnc89FExo4EGj5l8znRcjJZsU44z/sXBwIDAQAB |
---|
201 | AoGABA7xXKqoxBSIh1js5zypHhXaHsre2l1Igdj0mgs25MPpvE7yBZNvyan8Vx0h |
---|
202 | 36Hj8r4Gh3og3YNfvem67sNTwNwONY0ep+Xho/3vG0jFATGduSXdcT04DusgZNqg |
---|
203 | UJqW75cqxrD6o/nya5wUoN9NL5pcd5AgVMdOYvJGbrwQuaECQQDiCs/5dsUkUkeC |
---|
204 | Tlur1wh0wJpW4Y2ctO3ncRdnAoAA9y8dELHXMqwKE4HtlyzHY7Bxds/BDh373EVK |
---|
205 | rsdl+v9JAkEAx3xTmsOQvWa1tf/O30sdItVpGogKDvYqkLCNthUzPaL85BWB03E2 |
---|
206 | xunHcVVlqAOE5tFuw0/UEyEkOaGlNTJTzwJAPIVel9FoCUiKYuYt/z1swy3KZRaw |
---|
207 | /tMmm4AZHvh5Y0jLcYHFy/OCQpRkhkOitqQHWunPyEXKW2PnnY5cTv68GQJAHG7H |
---|
208 | B88KCUTjb25nkQIGxBlA4swzCtDhXkAb4rEA3a8mdmfuWjHPyeg2ShwO4jSmM7P0 |
---|
209 | Iph1NMjLff9hKcTjlwJARpItOFkYEdtSODC7FMm7KRKQnNB27gFAizsOYWD4D2b7 |
---|
210 | w1FTEZ/kSA9wSNhyNGt7dgUo6zFhm2u973HBCUb3dg== |
---|
211 | -----END RSA PRIVATE KEY----- |
---|
212 | """, # 5 |
---|
213 | """-----BEGIN CERTIFICATE----- |
---|
214 | MIIBnjCCAQcCAgCEMA0GCSqGSIb3DQEBBAUAMBcxFTATBgNVBAMUDG5ld3BiX3Ro |
---|
215 | aW5neTAeFw0wODA3MjUyMjQ3NThaFw0wOTA3MjUyMjQ3NThaMBcxFTATBgNVBAMU |
---|
216 | DG5ld3BiX3RoaW5neTCBnzANBgkqhkiG9w0BAQEFAAOBjQAwgYkCgYEAvhTRj1dA |
---|
217 | NOfse/UBeTfMekZKxZHsNPr+qBYaveWAHDded/BMyMgaMV2n6HQdiDaRjJkzjHCF |
---|
218 | 3xBtpIJeEGUqfrF0ob8BIZXy3qk68eX/0CVUbgmjSBN44ahlo63NshyXmZtEAkRV |
---|
219 | VE/+cRKw3N2wtuTed5xwfNcL6dg4KTOEYEkCAwEAATANBgkqhkiG9w0BAQQFAAOB |
---|
220 | gQCN+CLuVwLeWjSdVbdizYyrOVckqtwiIHG9BbGMlcIdm0qpvD7V7/sN2csk5LaT |
---|
221 | BNiHi1t5628/4UHqqodYmFw8ri8ItFwB+MmTJi11CX6dIP9OUhS0qO8Z/BKtot7H |
---|
222 | j04oNwl+WqZZfHIYwTIEL0HBn60nOvCQPDtnWG2BhpUxMA== |
---|
223 | -----END CERTIFICATE----- |
---|
224 | -----BEGIN RSA PRIVATE KEY----- |
---|
225 | MIICXQIBAAKBgQC+FNGPV0A05+x79QF5N8x6RkrFkew0+v6oFhq95YAcN1538EzI |
---|
226 | yBoxXafodB2INpGMmTOMcIXfEG2kgl4QZSp+sXShvwEhlfLeqTrx5f/QJVRuCaNI |
---|
227 | E3jhqGWjrc2yHJeZm0QCRFVUT/5xErDc3bC25N53nHB81wvp2DgpM4RgSQIDAQAB |
---|
228 | AoGALl2BqIdN4Bnac3oV++2CcSkIQB0SEvJOf820hDGhCEDxSCxTbn5w9S21MVxx |
---|
229 | f7Jf2n3cNxuTbA/jzscGDtW+gXCs+WAbAr5aOqHLUPGEobhKQrQT2hrxQHyv3UFp |
---|
230 | 0tIl9eXFknOyVAaUJ3athK5tyjSiCZQQHLGzeLaDSKVAPqECQQD1GK7DkTcLaSvw |
---|
231 | hoTJ3dBK3JoKT2HHLitfEE0QV58mkqFMjofpe+nyeKWvEb/oB4WBp/cfTvtf7DJK |
---|
232 | zl1OSf11AkEAxomWmJeub0xpqksCmnVI1Jt1mvmcE4xpIcXq8sxzLHRc2QOv0kTw |
---|
233 | IcFl4QcN6EQBmE+8kl7Tx8SPAVKfJMoZBQJAGsUFYYrczjxAdlba7glyFJsfn/yn |
---|
234 | m0+poQpwwFYxpc7iGzB+G7xTAw62WfbAVSFtLYog7aR8xC9SFuWPP1vJeQJBAILo |
---|
235 | xBj3ovgWTXIRJbVM8mnl28UFI0msgsHXK9VOw/6i93nMuYkPFbtcN14KdbwZ42dX |
---|
236 | 5EIrLr+BNr4riW4LqDUCQQCbsEEpTmj3upKUOONPt+6CH/OOMjazUzYHZ/3ORHGp |
---|
237 | Q3Wt+I4IrR/OsiACSIQAhS4kBfk/LGggnj56DrWt+oBl |
---|
238 | -----END RSA PRIVATE KEY----- |
---|
239 | """, #6 |
---|
240 | """-----BEGIN CERTIFICATE----- |
---|
241 | MIIBnjCCAQcCAgCEMA0GCSqGSIb3DQEBBAUAMBcxFTATBgNVBAMUDG5ld3BiX3Ro |
---|
242 | aW5neTAeFw0wODA3MjUyMjQ3NThaFw0wOTA3MjUyMjQ3NThaMBcxFTATBgNVBAMU |
---|
243 | DG5ld3BiX3RoaW5neTCBnzANBgkqhkiG9w0BAQEFAAOBjQAwgYkCgYEAtKhx6sEA |
---|
244 | jn6HWc6T2klwlPn0quyHtATIw8V3ezP46v6g2rRS7dTywo4GTP4vX58l+sC9z9Je |
---|
245 | qhQ1rWSwMK4FmnDMZCu7AVO7oMIXpXdSz7l0bgCnNjvbpkA2pOfbB1Z8oj8iebff |
---|
246 | J33ID5DdkmCzqYVtKpII1o/5z7Jo292JYy8CAwEAATANBgkqhkiG9w0BAQQFAAOB |
---|
247 | gQA0PYMA07wo9kEH4fv9TCfo+zz42Px6lUxrQBPxBvDiGYhk2kME/wX0IcoZPKTV |
---|
248 | WyBGmDAYWvFaHWbrbbTOfzlLWfYrDD913hCi9cO8iF8oBqRjIlkKcxAoe7vVg5Az |
---|
249 | ydVcrY+zqULJovWwyNmH1QNIQfMat0rj7fylwjiS1y/YsA== |
---|
250 | -----END CERTIFICATE----- |
---|
251 | -----BEGIN RSA PRIVATE KEY----- |
---|
252 | MIICXAIBAAKBgQC0qHHqwQCOfodZzpPaSXCU+fSq7Ie0BMjDxXd7M/jq/qDatFLt |
---|
253 | 1PLCjgZM/i9fnyX6wL3P0l6qFDWtZLAwrgWacMxkK7sBU7ugwheld1LPuXRuAKc2 |
---|
254 | O9umQDak59sHVnyiPyJ5t98nfcgPkN2SYLOphW0qkgjWj/nPsmjb3YljLwIDAQAB |
---|
255 | AoGAU4CYRv22mCZ7wVLunDLdyr5ODMMPZnHfqj2XoGbBYz0WdIBs5GlNXAfxeZzz |
---|
256 | oKsbDvAPzANcphh5RxAHMDj/dT8rZOez+eJrs1GEV+crl1T9p83iUkAuOJFtgUgf |
---|
257 | TtQBL9vHaj7DfvCEXcBPmN/teDFmAAOyUNbtuhTkRa3PbuECQQDwaqZ45Kr0natH |
---|
258 | V312dqlf9ms8I6e873pAu+RvA3BAWczk65eGcRjEBxVpTvNEcYKFrV8O5ZYtolrr |
---|
259 | VJl97AfdAkEAwF4w4KJ32fLPVoPnrYlgLw86NejMpAkixblm8cn51avPQmwbtahb |
---|
260 | BZUuca22IpgDpjeEk5SpEMixKe/UjzxMewJBALy4q2cY8U3F+u6sshLtAPYQZIs3 |
---|
261 | 3fNE9W2dUKsIQvRwyZMlkLN7UhqHCPq6e+HNTM0MlCMIfAPkf4Rdy4N6ZY0CQCKE |
---|
262 | BAMaQ6TwgzFDw5sIjiCDe+9WUPmRxhJyHL1/fvtOs4Z4fVRP290ZklbFU2vLmMQH |
---|
263 | LBuKzfb7+4XJyXrV1+cCQBqfPFQQZLr5UgccABYQ2jnWVbJPISJ5h2b0cwXt+pz/ |
---|
264 | 8ODEYLjqWr9K8dtbgwdpzwbkaGhQYpyvsguMvNPMohs= |
---|
265 | -----END RSA PRIVATE KEY----- |
---|
266 | """, #7 |
---|
267 | """-----BEGIN CERTIFICATE----- |
---|
268 | MIIBnjCCAQcCAgCEMA0GCSqGSIb3DQEBBAUAMBcxFTATBgNVBAMUDG5ld3BiX3Ro |
---|
269 | aW5neTAeFw0wODA3MjUyMjQ3NThaFw0wOTA3MjUyMjQ3NThaMBcxFTATBgNVBAMU |
---|
270 | DG5ld3BiX3RoaW5neTCBnzANBgkqhkiG9w0BAQEFAAOBjQAwgYkCgYEAnBfNHycn |
---|
271 | 5RnYzDN4EWTk2q1BBxA6ZYtlG1WPkj5iKeaYKzUk58zBL7mNOA0ucq+yTwh9C4IC |
---|
272 | EutWPaKBSKY5XI+Rdebh+Efq+urtOLgfJHlfcCraEx7hYN+tqqMVgEgnO/MqIsn1 |
---|
273 | I1Fvnp89mSYbQ9tmvhSH4Hm+nbeK6iL2tIsCAwEAATANBgkqhkiG9w0BAQQFAAOB |
---|
274 | gQBt9zxfsKWoyyV764rRb6XThuTDMNSDaVofqePEWjudAbDu6tp0pHcrL0XpIrnT |
---|
275 | 3iPgD47pdlwQNbGJ7xXwZu2QTOq+Lv62E6PCL8FljDVoYqR3WwJFFUigNvBT2Zzu |
---|
276 | Pxx7KUfOlm/M4XUSMu31sNJ0kQniBwpkW43YmHVNFb/R7g== |
---|
277 | -----END CERTIFICATE----- |
---|
278 | -----BEGIN RSA PRIVATE KEY----- |
---|
279 | MIICXQIBAAKBgQCcF80fJyflGdjMM3gRZOTarUEHEDpli2UbVY+SPmIp5pgrNSTn |
---|
280 | zMEvuY04DS5yr7JPCH0LggIS61Y9ooFIpjlcj5F15uH4R+r66u04uB8keV9wKtoT |
---|
281 | HuFg362qoxWASCc78yoiyfUjUW+enz2ZJhtD22a+FIfgeb6dt4rqIva0iwIDAQAB |
---|
282 | AoGBAIHstcnWd7iUeQYPWUNxLaRvTY8pjNH04yWLZEOgNWkXDVX5mExw++RTmB4t |
---|
283 | qpm/cLWkJSEtB7jjthb7ao0j/t2ljqfr6kAbClDv3zByAEDhOu8xB/5ne6Ioo+k2 |
---|
284 | dygC+GcVcobhv8qRU+z0fpeXSP8yS1bQQHOaa17bSGsncvHRAkEAzwsn8jBTOqaW |
---|
285 | 6Iymvr7Aql++LiwEBrqMMRVyBZlkux4hiKa2P7XXEL6/mOPR0aI2LuCqE2COrO7R |
---|
286 | 0wAFZ54bjwJBAMEAe6cs0zI3p3STHwA3LoSZB81lzLhGUnYBvOq1yoDSlJCOYpld |
---|
287 | YM1y3eC0vwiOnEu3GG1bhkW+h6Kx0I/qyUUCQBiH9NqwORxI4rZ4+8S76y4EnA7y |
---|
288 | biOx9KxYIyNgslutTUHYpt1TmUDFqQPfclvJQWw6eExFc4Iv5bJ/XSSSyicCQGyY |
---|
289 | 5PrwEfYTsrm5fpwUcKxTnzxHp6WYjBWybKZ0m/lYhBfCxmAdVrbDh21Exqj99Zv0 |
---|
290 | 7l26PhdIWfGFtCEGrzECQQCtPyXa3ostSceR7zEKxyn9QBCNXKARfNNTBja6+VRE |
---|
291 | qDC6jLqzu/SoOYaqa13QzCsttO2iZk8Ygfy3Yz0n37GE |
---|
292 | -----END RSA PRIVATE KEY----- |
---|
293 | """, #8 |
---|
294 | """-----BEGIN CERTIFICATE----- |
---|
295 | MIIBnjCCAQcCAgCEMA0GCSqGSIb3DQEBBAUAMBcxFTATBgNVBAMUDG5ld3BiX3Ro |
---|
296 | aW5neTAeFw0wODA3MjUyMjQ3NThaFw0wOTA3MjUyMjQ3NThaMBcxFTATBgNVBAMU |
---|
297 | DG5ld3BiX3RoaW5neTCBnzANBgkqhkiG9w0BAQEFAAOBjQAwgYkCgYEA4mnLf+x0 |
---|
298 | CWKDKP5PLZ87t2ReSDE/J5QoI5VhE0bXaahdhPrQTC2wvOpT+N9nzEpI9ASh/ejV |
---|
299 | kYGlc03nNKRL7zyVM1UyGduEwsRssFMqfyJhI1p+VmxDMWNplex7mIAheAdskPj3 |
---|
300 | pwi2CP4VIMjOj368AXvXItPzeCfAhYhEVaMCAwEAATANBgkqhkiG9w0BAQQFAAOB |
---|
301 | gQAEzmwq5JFI5Z0dX20m9rq7NKgwRyAH3h5aE8bdjO8nEc69qscfDRx79Lws3kK8 |
---|
302 | A0LG0DhxKB8cTNu3u+jy81tjcC4pLNQ5IKap9ksmP7RtIHfTA55G8M3fPl2ZgDYQ |
---|
303 | ZzsWAZvTNXd/eme0SgOzD10rfntA6ZIgJTWHx3E0RkdwKw== |
---|
304 | -----END CERTIFICATE----- |
---|
305 | -----BEGIN RSA PRIVATE KEY----- |
---|
306 | MIICXQIBAAKBgQDiact/7HQJYoMo/k8tnzu3ZF5IMT8nlCgjlWETRtdpqF2E+tBM |
---|
307 | LbC86lP432fMSkj0BKH96NWRgaVzTec0pEvvPJUzVTIZ24TCxGywUyp/ImEjWn5W |
---|
308 | bEMxY2mV7HuYgCF4B2yQ+PenCLYI/hUgyM6PfrwBe9ci0/N4J8CFiERVowIDAQAB |
---|
309 | AoGAQYTl+8XcKl8Un4dAOG6M5FwqIHAH25c3Klzu85obehrbvUCriG/sZi7VT/6u |
---|
310 | VeLlS6APlJ+NNgczbrOLhaNJyYzjICSt8BI96PldFUzCEkVlgE+29pO7RNoZmDYB |
---|
311 | dSGyIDrWdVYfdzpir6kC0KDcrpA16Sc+/bK6Q8ALLRpC7QECQQD7F7fhIQ03CKSk |
---|
312 | lS4mgDuBQrB/52jXgBumtjp71ANNeaWR6+06KDPTLysM+olsh97Q7YOGORbrBnBg |
---|
313 | Y2HPnOgjAkEA5taZaMfdFa8V1SPcX7mgCLykYIujqss0AmauZN/24oLdNE8HtTBF |
---|
314 | OLaxE6PnQ0JWfx9KGIy3E0V3aFk5FWb0gQJBAO4KFEaXgOG1jfCBhNj3JHJseMso |
---|
315 | 5Nm4F366r0MJQYBHXNGzqphB2K/Svat2MKX1QSUspk2u/a0d05dtYCLki6UCQHWS |
---|
316 | sChyQ+UbfF9HGKOZBC3vBzo1ZXNEdIUUj5bJjBHq3YgbCK38nAU66A482TmkvDGb |
---|
317 | Wj4OzeB+7Ua0yyJfggECQQDVlAa8HqdAcrbEwI/YfPydFsavBJ0KtcIGK2owQ+dk |
---|
318 | dhlDnpXDud/AtX4Ft2LaquQ15fteRrYjjwI9SFGytjtp |
---|
319 | -----END RSA PRIVATE KEY----- |
---|
320 | """, #9 |
---|
321 | """-----BEGIN CERTIFICATE----- |
---|
322 | MIIBnjCCAQcCAgCEMA0GCSqGSIb3DQEBBAUAMBcxFTATBgNVBAMUDG5ld3BiX3Ro |
---|
323 | aW5neTAeFw0wODA3MjUyMjQ3NThaFw0wOTA3MjUyMjQ3NThaMBcxFTATBgNVBAMU |
---|
324 | DG5ld3BiX3RoaW5neTCBnzANBgkqhkiG9w0BAQEFAAOBjQAwgYkCgYEAueLfowPT |
---|
325 | kXXtHeU2FZSz2mJhHmjqeyI1oMoyyggonccx65vMxaRfljnz2dOjVVYpCOn/LrdP |
---|
326 | wVxHO8KNDsmQeWPRjnnBa2dFqqOnp/8gEJFJBW7K/gI9se6o+xe9QIWBq6d/fKVR |
---|
327 | BURJe5TycLogzZuxQn1xHHILa3XleYuHAbMCAwEAATANBgkqhkiG9w0BAQQFAAOB |
---|
328 | gQBEC1lfC3XK0galQC96B7faLpnQmhn5lX2FUUoFIQQtBTetoE+gTqnLSOIZcOK4 |
---|
329 | pkT3YvxUvgOV0LOLClryo2IknMMGWRSAcXtVUBBLRHVTSSuVUyyLr5kdRU7B4E+l |
---|
330 | OU0j8Md/dzlkm//K1bzLyUaPq204ofH8su2IEX4b3IGmAQ== |
---|
331 | -----END CERTIFICATE----- |
---|
332 | -----BEGIN RSA PRIVATE KEY----- |
---|
333 | MIICWwIBAAKBgQC54t+jA9ORde0d5TYVlLPaYmEeaOp7IjWgyjLKCCidxzHrm8zF |
---|
334 | pF+WOfPZ06NVVikI6f8ut0/BXEc7wo0OyZB5Y9GOecFrZ0Wqo6en/yAQkUkFbsr+ |
---|
335 | Aj2x7qj7F71AhYGrp398pVEFREl7lPJwuiDNm7FCfXEccgtrdeV5i4cBswIDAQAB |
---|
336 | AoGAO4PnJHNaLs16AMNdgKVevEIZZDolMQ1v7C4w+ryH/JRFaHE2q+UH8bpWV9zK |
---|
337 | A82VT9RTrqpkb71S1VBiB2UDyz263XdAI/N2HcIVMmfKb72oV4gCI1KOv4DfFwZv |
---|
338 | tVVcIdVEDBOZ2TgqK4opGOgWMDqgIAl2z3PbsIoNylZHEJECQQDtQeJFhEJGH4Qz |
---|
339 | BGpdND0j2nnnJyhOFHJqikJNdul3uBwmxTK8FPEUUH/rtpyUan3VMOyDx3kX4OQg |
---|
340 | GDNSb32rAkEAyJIZIJ0EMRHVedyWsfqR0zTGKRQ+qsc3sCfyUhFksWms9jsSS0DT |
---|
341 | tVeTdC3F6EIAdpKOGhSyfBTU4jxwbFc0GQJADI4L9znEeAl66Wg2aLA2/Aq3oK/F |
---|
342 | xjv2wgSG9apxOFCZzMNqp+FD0Jth6YtEReZMuldYbLDFi6nu6HPfY2Fa+QJAdpm1 |
---|
343 | lAxk6yMxiZK/5VRWoH6HYske2Vtd+aNVbePtF992ME/z3F3kEkpL3hom+dT1cyfs |
---|
344 | MU3l0Ot8ip7Ul6vlGQJAegNzpcfl2GFSdWQMxQ+nN3woKnPqpR1M3jgnqvo7L4Xe |
---|
345 | JW3vRxvfdrUuzdlvZ/Pbsu/vOd+cuIa4h0yD5q3N+g== |
---|
346 | -----END RSA PRIVATE KEY----- |
---|
347 | """, #10 |
---|
348 | """-----BEGIN CERTIFICATE----- |
---|
349 | MIIBnjCCAQcCAgCEMA0GCSqGSIb3DQEBBAUAMBcxFTATBgNVBAMUDG5ld3BiX3Ro |
---|
350 | aW5neTAeFw0wODA3MjUyMjQ3NThaFw0wOTA3MjUyMjQ3NThaMBcxFTATBgNVBAMU |
---|
351 | DG5ld3BiX3RoaW5neTCBnzANBgkqhkiG9w0BAQEFAAOBjQAwgYkCgYEAruBhwk+J |
---|
352 | XdlwfKXXN8K+43JyEYCV7Fp7ZiES4t4AEJuQuBqJVMxpzeZzu2t/vVb59ThaxxtY |
---|
353 | NGD3Xy6Og5dTv//ztWng8P7HwwvfbrUICU6zo6JAhg7kfaNa116krCYOkC/cdJWt |
---|
354 | o5W+zsDmI1jUVGH0D73h29atc1gn6wLpAsMCAwEAATANBgkqhkiG9w0BAQQFAAOB |
---|
355 | gQAEJ/ITGJ9lK/rk0yHcenW8SHsaSTlZMuJ4yEiIgrJ2t71Rd6mtCC/ljx9USvvK |
---|
356 | bF500whTiZlnWgKi02boBEKa44z/DytF6pljeNPefBQSqZyUByGEb/8Mn58Idyls |
---|
357 | q4/d9iKXMPvbpQdcesOzgOffFZevLQSWyPRaIdYBOOiYUA== |
---|
358 | -----END CERTIFICATE----- |
---|
359 | -----BEGIN RSA PRIVATE KEY----- |
---|
360 | MIICXQIBAAKBgQCu4GHCT4ld2XB8pdc3wr7jcnIRgJXsWntmIRLi3gAQm5C4GolU |
---|
361 | zGnN5nO7a3+9Vvn1OFrHG1g0YPdfLo6Dl1O///O1aeDw/sfDC99utQgJTrOjokCG |
---|
362 | DuR9o1rXXqSsJg6QL9x0la2jlb7OwOYjWNRUYfQPveHb1q1zWCfrAukCwwIDAQAB |
---|
363 | AoGAcZAXC/dYrlBpIxkTRQu7qLqGZuVI9t7fabgqqpceFargdR4Odrn0L5jrKRer |
---|
364 | MYrM8bjyAoC4a/NYUUBLnhrkcCQWO9q5fSQuFKFVWHY53SM63Qdqk8Y9Fmy/h/4c |
---|
365 | UtwZ5BWkUWItvnTMgb9bFcvSiIhEcNQauypnMpgNknopu7kCQQDlSQT10LkX2IGT |
---|
366 | bTUhPcManx92gucaKsPONKq2mP+1sIciThevRTZWZsxyIuoBBY43NcKKi8NlZCtj |
---|
367 | hhSbtzYdAkEAw0B93CXfso8g2QIMj/HJJz/wNTLtg+rriXp6jh5HWe6lKWRVrce+ |
---|
368 | 1w8Qz6OI/ZP6xuQ9HNeZxJ/W6rZPW6BGXwJAHcTuRPA1p/fvUvHh7Q/0zfcNAbkb |
---|
369 | QlV9GL/TzmNtB+0EjpqvDo2g8XTlZIhN85YCEf8D5DMjSn3H+GMHN/SArQJBAJlW |
---|
370 | MIGPjNoh5V4Hae4xqBOW9wIQeM880rUo5s5toQNTk4mqLk9Hquwh/MXUXGUora08 |
---|
371 | 2XGpMC1midXSTwhaGmkCQQCdivptFEYl33PrVbxY9nzHynpp4Mi89vQF0cjCmaYY |
---|
372 | N8L+bvLd4BU9g6hRS8b59lQ6GNjryx2bUnCVtLcey4Jd |
---|
373 | -----END RSA PRIVATE KEY----- |
---|
374 | """, #11 |
---|
375 | ] |
---|
376 | |
---|
377 | # To disable the pre-computed tub certs, uncomment this line. |
---|
378 | #SYSTEM_TEST_CERTS = [] |
---|
379 | |
---|
380 | def flush_but_dont_ignore(res): |
---|
381 | d = flushEventualQueue() |
---|
382 | def _done(ignored): |
---|
383 | return res |
---|
384 | d.addCallback(_done) |
---|
385 | return d |
---|
386 | |
---|
387 | class SystemTestMixin(pollmixin.PollMixin, testutil.StallMixin): |
---|
388 | |
---|
389 | # SystemTestMixin tests tend to be a lot of work, and we have a few |
---|
390 | # buildslaves that are pretty slow, and every once in a while these tests |
---|
391 | # run up against the default 120 second timeout. So increase the default |
---|
392 | # timeout. Individual test cases can override this, of course. |
---|
393 | timeout = 300 |
---|
394 | |
---|
395 | def setUp(self): |
---|
396 | self.sparent = service.MultiService() |
---|
397 | self.sparent.startService() |
---|
398 | |
---|
399 | self.stats_gatherer = None |
---|
400 | self.stats_gatherer_furl = None |
---|
401 | |
---|
402 | def tearDown(self): |
---|
403 | log.msg("shutting down SystemTest services") |
---|
404 | d = self.sparent.stopService() |
---|
405 | d.addBoth(flush_but_dont_ignore) |
---|
406 | return d |
---|
407 | |
---|
408 | def getdir(self, subdir): |
---|
409 | return os.path.join(self.basedir, subdir) |
---|
410 | |
---|
411 | def add_service(self, s): |
---|
412 | s.setServiceParent(self.sparent) |
---|
413 | return s |
---|
414 | |
---|
415 | def set_up_nodes(self, NUMCLIENTS=5, use_stats_gatherer=False): |
---|
416 | self.numclients = NUMCLIENTS |
---|
417 | iv_dir = self.getdir("introducer") |
---|
418 | if not os.path.isdir(iv_dir): |
---|
419 | fileutil.make_dirs(iv_dir) |
---|
420 | fileutil.write(os.path.join(iv_dir, 'tahoe.cfg'), |
---|
421 | "[node]\n" + |
---|
422 | u"nickname = introducer \u263A\n".encode('utf-8') + |
---|
423 | "web.port = tcp:0:interface=127.0.0.1\n") |
---|
424 | if SYSTEM_TEST_CERTS: |
---|
425 | os.mkdir(os.path.join(iv_dir, "private")) |
---|
426 | f = open(os.path.join(iv_dir, "private", "node.pem"), "w") |
---|
427 | f.write(SYSTEM_TEST_CERTS[0]) |
---|
428 | f.close() |
---|
429 | iv = IntroducerNode(basedir=iv_dir) |
---|
430 | self.introducer = self.add_service(iv) |
---|
431 | self._get_introducer_web() |
---|
432 | d = defer.succeed(None) |
---|
433 | if use_stats_gatherer: |
---|
434 | d.addCallback(self._set_up_stats_gatherer) |
---|
435 | d.addCallback(self._set_up_nodes_2) |
---|
436 | if use_stats_gatherer: |
---|
437 | d.addCallback(self._grab_stats) |
---|
438 | return d |
---|
439 | |
---|
440 | def _get_introducer_web(self): |
---|
441 | f = open(os.path.join(self.getdir("introducer"), "node.url"), "r") |
---|
442 | self.introweb_url = f.read().strip() |
---|
443 | f.close() |
---|
444 | |
---|
445 | def _set_up_stats_gatherer(self, res): |
---|
446 | statsdir = self.getdir("stats_gatherer") |
---|
447 | fileutil.make_dirs(statsdir) |
---|
448 | portnum = iputil.allocate_tcp_port() |
---|
449 | location = "tcp:127.0.0.1:%d" % portnum |
---|
450 | fileutil.write(os.path.join(statsdir, "location"), location) |
---|
451 | port = "tcp:%d:interface=127.0.0.1" % portnum |
---|
452 | fileutil.write(os.path.join(statsdir, "port"), port) |
---|
453 | self.stats_gatherer_svc = StatsGathererService(statsdir) |
---|
454 | self.stats_gatherer = self.stats_gatherer_svc.stats_gatherer |
---|
455 | self.add_service(self.stats_gatherer_svc) |
---|
456 | |
---|
457 | d = fireEventually() |
---|
458 | sgf = os.path.join(statsdir, 'stats_gatherer.furl') |
---|
459 | def check_for_furl(): |
---|
460 | return os.path.exists(sgf) |
---|
461 | d.addCallback(lambda junk: self.poll(check_for_furl, timeout=30)) |
---|
462 | def get_furl(junk): |
---|
463 | self.stats_gatherer_furl = file(sgf, 'rb').read().strip() |
---|
464 | d.addCallback(get_furl) |
---|
465 | return d |
---|
466 | |
---|
467 | def _set_up_nodes_2(self, res): |
---|
468 | q = self.introducer |
---|
469 | self.introducer_furl = q.introducer_url |
---|
470 | self.clients = [] |
---|
471 | basedirs = [] |
---|
472 | for i in range(self.numclients): |
---|
473 | basedir = self.getdir("client%d" % i) |
---|
474 | basedirs.append(basedir) |
---|
475 | fileutil.make_dirs(os.path.join(basedir, "private")) |
---|
476 | if len(SYSTEM_TEST_CERTS) > (i+1): |
---|
477 | f = open(os.path.join(basedir, "private", "node.pem"), "w") |
---|
478 | f.write(SYSTEM_TEST_CERTS[i+1]) |
---|
479 | f.close() |
---|
480 | |
---|
481 | config = "[client]\n" |
---|
482 | config += "introducer.furl = %s\n" % self.introducer_furl |
---|
483 | if self.stats_gatherer_furl: |
---|
484 | config += "stats_gatherer.furl = %s\n" % self.stats_gatherer_furl |
---|
485 | |
---|
486 | nodeconfig = "[node]\n" |
---|
487 | nodeconfig += (u"nickname = client %d \u263A\n" % (i,)).encode('utf-8') |
---|
488 | tub_port = iputil.allocate_tcp_port() |
---|
489 | # Don't let it use AUTO: there's no need for tests to use |
---|
490 | # anything other than 127.0.0.1 |
---|
491 | nodeconfig += "tub.port = tcp:%d\n" % tub_port |
---|
492 | nodeconfig += "tub.location = tcp:127.0.0.1:%d\n" % tub_port |
---|
493 | |
---|
494 | if i == 0: |
---|
495 | # clients[0] runs a webserver and a helper |
---|
496 | config += nodeconfig |
---|
497 | config += "web.port = tcp:0:interface=127.0.0.1\n" |
---|
498 | config += "timeout.keepalive = 600\n" |
---|
499 | config += "[helper]\n" |
---|
500 | config += "enabled = True\n" |
---|
501 | elif i == 3: |
---|
502 | # clients[3] runs a webserver and uses a helper |
---|
503 | config += nodeconfig |
---|
504 | config += "web.port = tcp:0:interface=127.0.0.1\n" |
---|
505 | config += "timeout.disconnect = 1800\n" |
---|
506 | else: |
---|
507 | config += nodeconfig |
---|
508 | |
---|
509 | fileutil.write(os.path.join(basedir, 'tahoe.cfg'), config) |
---|
510 | |
---|
511 | # give subclasses a chance to append lines to the node's tahoe.cfg |
---|
512 | # files before they are launched. |
---|
513 | self._set_up_nodes_extra_config() |
---|
514 | |
---|
515 | # start clients[0], wait for it's tub to be ready (at which point it |
---|
516 | # will have registered the helper furl). |
---|
517 | c = self.add_service(client.Client(basedir=basedirs[0])) |
---|
518 | self.clients.append(c) |
---|
519 | c.set_default_mutable_keysize(TEST_RSA_KEY_SIZE) |
---|
520 | |
---|
521 | f = open(os.path.join(basedirs[0],"private","helper.furl"), "r") |
---|
522 | helper_furl = f.read() |
---|
523 | f.close() |
---|
524 | self.helper_furl = helper_furl |
---|
525 | if self.numclients >= 4: |
---|
526 | f = open(os.path.join(basedirs[3], 'tahoe.cfg'), 'ab+') |
---|
527 | f.write( |
---|
528 | "[client]\n" |
---|
529 | "helper.furl = %s\n" % helper_furl) |
---|
530 | f.close() |
---|
531 | |
---|
532 | # this starts the rest of the clients |
---|
533 | for i in range(1, self.numclients): |
---|
534 | c = self.add_service(client.Client(basedir=basedirs[i])) |
---|
535 | self.clients.append(c) |
---|
536 | c.set_default_mutable_keysize(TEST_RSA_KEY_SIZE) |
---|
537 | log.msg("STARTING") |
---|
538 | d = self.wait_for_connections() |
---|
539 | def _connected(res): |
---|
540 | log.msg("CONNECTED") |
---|
541 | # now find out where the web port was |
---|
542 | self.webish_url = self.clients[0].getServiceNamed("webish").getURL() |
---|
543 | if self.numclients >=4: |
---|
544 | # and the helper-using webport |
---|
545 | self.helper_webish_url = self.clients[3].getServiceNamed("webish").getURL() |
---|
546 | d.addCallback(_connected) |
---|
547 | return d |
---|
548 | |
---|
549 | def _set_up_nodes_extra_config(self): |
---|
550 | # for overriding by subclasses |
---|
551 | pass |
---|
552 | |
---|
553 | def _grab_stats(self, res): |
---|
554 | d = self.stats_gatherer.poll() |
---|
555 | return d |
---|
556 | |
---|
557 | def bounce_client(self, num): |
---|
558 | c = self.clients[num] |
---|
559 | d = c.disownServiceParent() |
---|
560 | # I think windows requires a moment to let the connection really stop |
---|
561 | # and the port number made available for re-use. TODO: examine the |
---|
562 | # behavior, see if this is really the problem, see if we can do |
---|
563 | # better than blindly waiting for a second. |
---|
564 | d.addCallback(self.stall, 1.0) |
---|
565 | def _stopped(res): |
---|
566 | new_c = client.Client(basedir=self.getdir("client%d" % num)) |
---|
567 | self.clients[num] = new_c |
---|
568 | new_c.set_default_mutable_keysize(TEST_RSA_KEY_SIZE) |
---|
569 | self.add_service(new_c) |
---|
570 | d.addCallback(_stopped) |
---|
571 | d.addCallback(lambda res: self.wait_for_connections()) |
---|
572 | def _maybe_get_webport(res): |
---|
573 | if num == 0: |
---|
574 | # now find out where the web port was |
---|
575 | self.webish_url = self.clients[0].getServiceNamed("webish").getURL() |
---|
576 | d.addCallback(_maybe_get_webport) |
---|
577 | return d |
---|
578 | |
---|
579 | def add_extra_node(self, client_num, helper_furl=None, |
---|
580 | add_to_sparent=False): |
---|
581 | # usually this node is *not* parented to our self.sparent, so we can |
---|
582 | # shut it down separately from the rest, to exercise the |
---|
583 | # connection-lost code |
---|
584 | basedir = self.getdir("client%d" % client_num) |
---|
585 | if not os.path.isdir(basedir): |
---|
586 | fileutil.make_dirs(basedir) |
---|
587 | config = "[client]\n" |
---|
588 | config += "introducer.furl = %s\n" % self.introducer_furl |
---|
589 | if helper_furl: |
---|
590 | config += "helper.furl = %s\n" % helper_furl |
---|
591 | fileutil.write(os.path.join(basedir, 'tahoe.cfg'), config) |
---|
592 | |
---|
593 | c = client.Client(basedir=basedir) |
---|
594 | self.clients.append(c) |
---|
595 | c.set_default_mutable_keysize(TEST_RSA_KEY_SIZE) |
---|
596 | self.numclients += 1 |
---|
597 | if add_to_sparent: |
---|
598 | c.setServiceParent(self.sparent) |
---|
599 | else: |
---|
600 | c.startService() |
---|
601 | d = self.wait_for_connections() |
---|
602 | d.addCallback(lambda res: c) |
---|
603 | return d |
---|
604 | |
---|
605 | def _check_connections(self): |
---|
606 | for c in self.clients: |
---|
607 | if not c.connected_to_introducer(): |
---|
608 | return False |
---|
609 | sb = c.get_storage_broker() |
---|
610 | if len(sb.get_connected_servers()) != self.numclients: |
---|
611 | return False |
---|
612 | up = c.getServiceNamed("uploader") |
---|
613 | if up._helper_furl and not up._helper: |
---|
614 | return False |
---|
615 | return True |
---|
616 | |
---|
617 | def wait_for_connections(self, ignored=None): |
---|
618 | return self.poll(self._check_connections, timeout=200) |
---|
619 | |
---|
620 | class CountingDataUploadable(upload.Data): |
---|
621 | bytes_read = 0 |
---|
622 | interrupt_after = None |
---|
623 | interrupt_after_d = None |
---|
624 | |
---|
625 | def read(self, length): |
---|
626 | self.bytes_read += length |
---|
627 | if self.interrupt_after is not None: |
---|
628 | if self.bytes_read > self.interrupt_after: |
---|
629 | self.interrupt_after = None |
---|
630 | self.interrupt_after_d.callback(self) |
---|
631 | return upload.Data.read(self, length) |
---|
632 | |
---|
633 | class SystemTest(SystemTestMixin, RunBinTahoeMixin, unittest.TestCase): |
---|
634 | timeout = 3600 # It takes longer than 960 seconds on Zandr's ARM box. |
---|
635 | |
---|
636 | def test_connections(self): |
---|
637 | self.basedir = "system/SystemTest/test_connections" |
---|
638 | d = self.set_up_nodes() |
---|
639 | self.extra_node = None |
---|
640 | d.addCallback(lambda res: self.add_extra_node(self.numclients)) |
---|
641 | def _check(extra_node): |
---|
642 | self.extra_node = extra_node |
---|
643 | for c in self.clients: |
---|
644 | all_peerids = c.get_storage_broker().get_all_serverids() |
---|
645 | self.failUnlessEqual(len(all_peerids), self.numclients+1) |
---|
646 | sb = c.storage_broker |
---|
647 | permuted_peers = sb.get_servers_for_psi("a") |
---|
648 | self.failUnlessEqual(len(permuted_peers), self.numclients+1) |
---|
649 | |
---|
650 | d.addCallback(_check) |
---|
651 | def _shutdown_extra_node(res): |
---|
652 | if self.extra_node: |
---|
653 | return self.extra_node.stopService() |
---|
654 | return res |
---|
655 | d.addBoth(_shutdown_extra_node) |
---|
656 | return d |
---|
657 | # test_connections is subsumed by test_upload_and_download, and takes |
---|
658 | # quite a while to run on a slow machine (because of all the TLS |
---|
659 | # connections that must be established). If we ever rework the introducer |
---|
660 | # code to such an extent that we're not sure if it works anymore, we can |
---|
661 | # reinstate this test until it does. |
---|
662 | del test_connections |
---|
663 | |
---|
664 | def test_upload_and_download_random_key(self): |
---|
665 | self.basedir = "system/SystemTest/test_upload_and_download_random_key" |
---|
666 | return self._test_upload_and_download(convergence=None) |
---|
667 | |
---|
668 | def test_upload_and_download_convergent(self): |
---|
669 | self.basedir = "system/SystemTest/test_upload_and_download_convergent" |
---|
670 | return self._test_upload_and_download(convergence="some convergence string") |
---|
671 | |
---|
672 | def _test_upload_and_download(self, convergence): |
---|
673 | # we use 4000 bytes of data, which will result in about 400k written |
---|
674 | # to disk among all our simulated nodes |
---|
675 | DATA = "Some data to upload\n" * 200 |
---|
676 | d = self.set_up_nodes() |
---|
677 | def _check_connections(res): |
---|
678 | for c in self.clients: |
---|
679 | c.encoding_params['happy'] = 5 |
---|
680 | all_peerids = c.get_storage_broker().get_all_serverids() |
---|
681 | self.failUnlessEqual(len(all_peerids), self.numclients) |
---|
682 | sb = c.storage_broker |
---|
683 | permuted_peers = sb.get_servers_for_psi("a") |
---|
684 | self.failUnlessEqual(len(permuted_peers), self.numclients) |
---|
685 | d.addCallback(_check_connections) |
---|
686 | |
---|
687 | def _do_upload(res): |
---|
688 | log.msg("UPLOADING") |
---|
689 | u = self.clients[0].getServiceNamed("uploader") |
---|
690 | self.uploader = u |
---|
691 | # we crank the max segsize down to 1024b for the duration of this |
---|
692 | # test, so we can exercise multiple segments. It is important |
---|
693 | # that this is not a multiple of the segment size, so that the |
---|
694 | # tail segment is not the same length as the others. This actualy |
---|
695 | # gets rounded up to 1025 to be a multiple of the number of |
---|
696 | # required shares (since we use 25 out of 100 FEC). |
---|
697 | up = upload.Data(DATA, convergence=convergence) |
---|
698 | up.max_segment_size = 1024 |
---|
699 | d1 = u.upload(up) |
---|
700 | return d1 |
---|
701 | d.addCallback(_do_upload) |
---|
702 | def _upload_done(results): |
---|
703 | theuri = results.get_uri() |
---|
704 | log.msg("upload finished: uri is %s" % (theuri,)) |
---|
705 | self.uri = theuri |
---|
706 | assert isinstance(self.uri, str), self.uri |
---|
707 | self.cap = uri.from_string(self.uri) |
---|
708 | self.n = self.clients[1].create_node_from_uri(self.uri) |
---|
709 | d.addCallback(_upload_done) |
---|
710 | |
---|
711 | def _upload_again(res): |
---|
712 | # Upload again. If using convergent encryption then this ought to be |
---|
713 | # short-circuited, however with the way we currently generate URIs |
---|
714 | # (i.e. because they include the roothash), we have to do all of the |
---|
715 | # encoding work, and only get to save on the upload part. |
---|
716 | log.msg("UPLOADING AGAIN") |
---|
717 | up = upload.Data(DATA, convergence=convergence) |
---|
718 | up.max_segment_size = 1024 |
---|
719 | return self.uploader.upload(up) |
---|
720 | d.addCallback(_upload_again) |
---|
721 | |
---|
722 | def _download_to_data(res): |
---|
723 | log.msg("DOWNLOADING") |
---|
724 | return download_to_data(self.n) |
---|
725 | d.addCallback(_download_to_data) |
---|
726 | def _download_to_data_done(data): |
---|
727 | log.msg("download finished") |
---|
728 | self.failUnlessEqual(data, DATA) |
---|
729 | d.addCallback(_download_to_data_done) |
---|
730 | |
---|
731 | def _test_read(res): |
---|
732 | n = self.clients[1].create_node_from_uri(self.uri) |
---|
733 | d = download_to_data(n) |
---|
734 | def _read_done(data): |
---|
735 | self.failUnlessEqual(data, DATA) |
---|
736 | d.addCallback(_read_done) |
---|
737 | d.addCallback(lambda ign: |
---|
738 | n.read(MemoryConsumer(), offset=1, size=4)) |
---|
739 | def _read_portion_done(mc): |
---|
740 | self.failUnlessEqual("".join(mc.chunks), DATA[1:1+4]) |
---|
741 | d.addCallback(_read_portion_done) |
---|
742 | d.addCallback(lambda ign: |
---|
743 | n.read(MemoryConsumer(), offset=2, size=None)) |
---|
744 | def _read_tail_done(mc): |
---|
745 | self.failUnlessEqual("".join(mc.chunks), DATA[2:]) |
---|
746 | d.addCallback(_read_tail_done) |
---|
747 | d.addCallback(lambda ign: |
---|
748 | n.read(MemoryConsumer(), size=len(DATA)+1000)) |
---|
749 | def _read_too_much(mc): |
---|
750 | self.failUnlessEqual("".join(mc.chunks), DATA) |
---|
751 | d.addCallback(_read_too_much) |
---|
752 | |
---|
753 | return d |
---|
754 | d.addCallback(_test_read) |
---|
755 | |
---|
756 | def _test_bad_read(res): |
---|
757 | bad_u = uri.from_string_filenode(self.uri) |
---|
758 | bad_u.key = self.flip_bit(bad_u.key) |
---|
759 | bad_n = self.clients[1].create_node_from_uri(bad_u.to_string()) |
---|
760 | # this should cause an error during download |
---|
761 | |
---|
762 | d = self.shouldFail2(NoSharesError, "'download bad node'", |
---|
763 | None, |
---|
764 | bad_n.read, MemoryConsumer(), offset=2) |
---|
765 | return d |
---|
766 | d.addCallback(_test_bad_read) |
---|
767 | |
---|
768 | def _download_nonexistent_uri(res): |
---|
769 | baduri = self.mangle_uri(self.uri) |
---|
770 | badnode = self.clients[1].create_node_from_uri(baduri) |
---|
771 | log.msg("about to download non-existent URI", level=log.UNUSUAL, |
---|
772 | facility="tahoe.tests") |
---|
773 | d1 = download_to_data(badnode) |
---|
774 | def _baduri_should_fail(res): |
---|
775 | log.msg("finished downloading non-existent URI", |
---|
776 | level=log.UNUSUAL, facility="tahoe.tests") |
---|
777 | self.failUnless(isinstance(res, Failure)) |
---|
778 | self.failUnless(res.check(NoSharesError), |
---|
779 | "expected NoSharesError, got %s" % res) |
---|
780 | d1.addBoth(_baduri_should_fail) |
---|
781 | return d1 |
---|
782 | d.addCallback(_download_nonexistent_uri) |
---|
783 | |
---|
784 | # add a new node, which doesn't accept shares, and only uses the |
---|
785 | # helper for upload. |
---|
786 | d.addCallback(lambda res: self.add_extra_node(self.numclients, |
---|
787 | self.helper_furl, |
---|
788 | add_to_sparent=True)) |
---|
789 | def _added(extra_node): |
---|
790 | self.extra_node = extra_node |
---|
791 | self.extra_node.encoding_params['happy'] = 5 |
---|
792 | d.addCallback(_added) |
---|
793 | |
---|
794 | def _has_helper(): |
---|
795 | uploader = self.extra_node.getServiceNamed("uploader") |
---|
796 | furl, connected = uploader.get_helper_info() |
---|
797 | return connected |
---|
798 | d.addCallback(lambda ign: self.poll(_has_helper)) |
---|
799 | |
---|
800 | HELPER_DATA = "Data that needs help to upload" * 1000 |
---|
801 | def _upload_with_helper(res): |
---|
802 | u = upload.Data(HELPER_DATA, convergence=convergence) |
---|
803 | d = self.extra_node.upload(u) |
---|
804 | def _uploaded(results): |
---|
805 | n = self.clients[1].create_node_from_uri(results.get_uri()) |
---|
806 | return download_to_data(n) |
---|
807 | d.addCallback(_uploaded) |
---|
808 | def _check(newdata): |
---|
809 | self.failUnlessEqual(newdata, HELPER_DATA) |
---|
810 | d.addCallback(_check) |
---|
811 | return d |
---|
812 | d.addCallback(_upload_with_helper) |
---|
813 | |
---|
814 | def _upload_duplicate_with_helper(res): |
---|
815 | u = upload.Data(HELPER_DATA, convergence=convergence) |
---|
816 | u.debug_stash_RemoteEncryptedUploadable = True |
---|
817 | d = self.extra_node.upload(u) |
---|
818 | def _uploaded(results): |
---|
819 | n = self.clients[1].create_node_from_uri(results.get_uri()) |
---|
820 | return download_to_data(n) |
---|
821 | d.addCallback(_uploaded) |
---|
822 | def _check(newdata): |
---|
823 | self.failUnlessEqual(newdata, HELPER_DATA) |
---|
824 | self.failIf(hasattr(u, "debug_RemoteEncryptedUploadable"), |
---|
825 | "uploadable started uploading, should have been avoided") |
---|
826 | d.addCallback(_check) |
---|
827 | return d |
---|
828 | if convergence is not None: |
---|
829 | d.addCallback(_upload_duplicate_with_helper) |
---|
830 | |
---|
831 | d.addCallback(fireEventually) |
---|
832 | |
---|
833 | def _upload_resumable(res): |
---|
834 | DATA = "Data that needs help to upload and gets interrupted" * 1000 |
---|
835 | u1 = CountingDataUploadable(DATA, convergence=convergence) |
---|
836 | u2 = CountingDataUploadable(DATA, convergence=convergence) |
---|
837 | |
---|
838 | # we interrupt the connection after about 5kB by shutting down |
---|
839 | # the helper, then restarting it. |
---|
840 | u1.interrupt_after = 5000 |
---|
841 | u1.interrupt_after_d = defer.Deferred() |
---|
842 | bounced_d = defer.Deferred() |
---|
843 | def _do_bounce(res): |
---|
844 | d = self.bounce_client(0) |
---|
845 | d.addBoth(bounced_d.callback) |
---|
846 | u1.interrupt_after_d.addCallback(_do_bounce) |
---|
847 | |
---|
848 | # sneak into the helper and reduce its chunk size, so that our |
---|
849 | # debug_interrupt will sever the connection on about the fifth |
---|
850 | # chunk fetched. This makes sure that we've started to write the |
---|
851 | # new shares before we abandon them, which exercises the |
---|
852 | # abort/delete-partial-share code. TODO: find a cleaner way to do |
---|
853 | # this. I know that this will affect later uses of the helper in |
---|
854 | # this same test run, but I'm not currently worried about it. |
---|
855 | offloaded.CHKCiphertextFetcher.CHUNK_SIZE = 1000 |
---|
856 | |
---|
857 | upload_d = self.extra_node.upload(u1) |
---|
858 | # The upload will start, and bounce_client() will be called after |
---|
859 | # about 5kB. bounced_d will fire after bounce_client() finishes |
---|
860 | # shutting down and restarting the node. |
---|
861 | d = bounced_d |
---|
862 | def _bounced(ign): |
---|
863 | # By this point, the upload should have failed because of the |
---|
864 | # interruption. upload_d will fire in a moment |
---|
865 | def _should_not_finish(res): |
---|
866 | self.fail("interrupted upload should have failed, not" |
---|
867 | " finished with result %s" % (res,)) |
---|
868 | def _interrupted(f): |
---|
869 | f.trap(DeadReferenceError) |
---|
870 | # make sure we actually interrupted it before finishing |
---|
871 | # the file |
---|
872 | self.failUnless(u1.bytes_read < len(DATA), |
---|
873 | "read %d out of %d total" % |
---|
874 | (u1.bytes_read, len(DATA))) |
---|
875 | upload_d.addCallbacks(_should_not_finish, _interrupted) |
---|
876 | return upload_d |
---|
877 | d.addCallback(_bounced) |
---|
878 | |
---|
879 | def _disconnected(res): |
---|
880 | # check to make sure the storage servers aren't still hanging |
---|
881 | # on to the partial share: their incoming/ directories should |
---|
882 | # now be empty. |
---|
883 | log.msg("disconnected", level=log.NOISY, |
---|
884 | facility="tahoe.test.test_system") |
---|
885 | for i in range(self.numclients): |
---|
886 | incdir = os.path.join(self.getdir("client%d" % i), |
---|
887 | "storage", "shares", "incoming") |
---|
888 | self.failIf(os.path.exists(incdir) and os.listdir(incdir)) |
---|
889 | d.addCallback(_disconnected) |
---|
890 | |
---|
891 | d.addCallback(lambda res: |
---|
892 | log.msg("wait_for_helper", level=log.NOISY, |
---|
893 | facility="tahoe.test.test_system")) |
---|
894 | # then we need to wait for the extra node to reestablish its |
---|
895 | # connection to the helper. |
---|
896 | d.addCallback(lambda ign: self.poll(_has_helper)) |
---|
897 | |
---|
898 | d.addCallback(lambda res: |
---|
899 | log.msg("uploading again", level=log.NOISY, |
---|
900 | facility="tahoe.test.test_system")) |
---|
901 | d.addCallback(lambda res: self.extra_node.upload(u2)) |
---|
902 | |
---|
903 | def _uploaded(results): |
---|
904 | cap = results.get_uri() |
---|
905 | log.msg("Second upload complete", level=log.NOISY, |
---|
906 | facility="tahoe.test.test_system") |
---|
907 | |
---|
908 | # this is really bytes received rather than sent, but it's |
---|
909 | # convenient and basically measures the same thing |
---|
910 | bytes_sent = results.get_ciphertext_fetched() |
---|
911 | self.failUnless(isinstance(bytes_sent, (int, long)), bytes_sent) |
---|
912 | |
---|
913 | # We currently don't support resumption of upload if the data is |
---|
914 | # encrypted with a random key. (Because that would require us |
---|
915 | # to store the key locally and re-use it on the next upload of |
---|
916 | # this file, which isn't a bad thing to do, but we currently |
---|
917 | # don't do it.) |
---|
918 | if convergence is not None: |
---|
919 | # Make sure we did not have to read the whole file the |
---|
920 | # second time around . |
---|
921 | self.failUnless(bytes_sent < len(DATA), |
---|
922 | "resumption didn't save us any work:" |
---|
923 | " read %r bytes out of %r total" % |
---|
924 | (bytes_sent, len(DATA))) |
---|
925 | else: |
---|
926 | # Make sure we did have to read the whole file the second |
---|
927 | # time around -- because the one that we partially uploaded |
---|
928 | # earlier was encrypted with a different random key. |
---|
929 | self.failIf(bytes_sent < len(DATA), |
---|
930 | "resumption saved us some work even though we were using random keys:" |
---|
931 | " read %r bytes out of %r total" % |
---|
932 | (bytes_sent, len(DATA))) |
---|
933 | n = self.clients[1].create_node_from_uri(cap) |
---|
934 | return download_to_data(n) |
---|
935 | d.addCallback(_uploaded) |
---|
936 | |
---|
937 | def _check(newdata): |
---|
938 | self.failUnlessEqual(newdata, DATA) |
---|
939 | # If using convergent encryption, then also check that the |
---|
940 | # helper has removed the temp file from its directories. |
---|
941 | if convergence is not None: |
---|
942 | basedir = os.path.join(self.getdir("client0"), "helper") |
---|
943 | files = os.listdir(os.path.join(basedir, "CHK_encoding")) |
---|
944 | self.failUnlessEqual(files, []) |
---|
945 | files = os.listdir(os.path.join(basedir, "CHK_incoming")) |
---|
946 | self.failUnlessEqual(files, []) |
---|
947 | d.addCallback(_check) |
---|
948 | return d |
---|
949 | d.addCallback(_upload_resumable) |
---|
950 | |
---|
951 | def _grab_stats(ignored): |
---|
952 | # the StatsProvider doesn't normally publish a FURL: |
---|
953 | # instead it passes a live reference to the StatsGatherer |
---|
954 | # (if and when it connects). To exercise the remote stats |
---|
955 | # interface, we manually publish client0's StatsProvider |
---|
956 | # and use client1 to query it. |
---|
957 | sp = self.clients[0].stats_provider |
---|
958 | sp_furl = self.clients[0].tub.registerReference(sp) |
---|
959 | d = self.clients[1].tub.getReference(sp_furl) |
---|
960 | d.addCallback(lambda sp_rref: sp_rref.callRemote("get_stats")) |
---|
961 | def _got_stats(stats): |
---|
962 | #print "STATS" |
---|
963 | #from pprint import pprint |
---|
964 | #pprint(stats) |
---|
965 | s = stats["stats"] |
---|
966 | self.failUnlessEqual(s["storage_server.accepting_immutable_shares"], 1) |
---|
967 | c = stats["counters"] |
---|
968 | self.failUnless("storage_server.allocate" in c) |
---|
969 | d.addCallback(_got_stats) |
---|
970 | return d |
---|
971 | d.addCallback(_grab_stats) |
---|
972 | |
---|
973 | return d |
---|
974 | |
---|
975 | def _find_all_shares(self, basedir): |
---|
976 | shares = [] |
---|
977 | for (dirpath, dirnames, filenames) in os.walk(basedir): |
---|
978 | if "storage" not in dirpath: |
---|
979 | continue |
---|
980 | if not filenames: |
---|
981 | continue |
---|
982 | pieces = dirpath.split(os.sep) |
---|
983 | if (len(pieces) >= 5 |
---|
984 | and pieces[-4] == "storage" |
---|
985 | and pieces[-3] == "shares"): |
---|
986 | # we're sitting in .../storage/shares/$START/$SINDEX , and there |
---|
987 | # are sharefiles here |
---|
988 | assert pieces[-5].startswith("client") |
---|
989 | client_num = int(pieces[-5][-1]) |
---|
990 | storage_index_s = pieces[-1] |
---|
991 | storage_index = si_a2b(storage_index_s) |
---|
992 | for sharename in filenames: |
---|
993 | shnum = int(sharename) |
---|
994 | filename = os.path.join(dirpath, sharename) |
---|
995 | data = (client_num, storage_index, filename, shnum) |
---|
996 | shares.append(data) |
---|
997 | if not shares: |
---|
998 | self.fail("unable to find any share files in %s" % basedir) |
---|
999 | return shares |
---|
1000 | |
---|
1001 | def _corrupt_mutable_share(self, filename, which): |
---|
1002 | msf = MutableShareFile(filename) |
---|
1003 | datav = msf.readv([ (0, 1000000) ]) |
---|
1004 | final_share = datav[0] |
---|
1005 | assert len(final_share) < 1000000 # ought to be truncated |
---|
1006 | pieces = mutable_layout.unpack_share(final_share) |
---|
1007 | (seqnum, root_hash, IV, k, N, segsize, datalen, |
---|
1008 | verification_key, signature, share_hash_chain, block_hash_tree, |
---|
1009 | share_data, enc_privkey) = pieces |
---|
1010 | |
---|
1011 | if which == "seqnum": |
---|
1012 | seqnum = seqnum + 15 |
---|
1013 | elif which == "R": |
---|
1014 | root_hash = self.flip_bit(root_hash) |
---|
1015 | elif which == "IV": |
---|
1016 | IV = self.flip_bit(IV) |
---|
1017 | elif which == "segsize": |
---|
1018 | segsize = segsize + 15 |
---|
1019 | elif which == "pubkey": |
---|
1020 | verification_key = self.flip_bit(verification_key) |
---|
1021 | elif which == "signature": |
---|
1022 | signature = self.flip_bit(signature) |
---|
1023 | elif which == "share_hash_chain": |
---|
1024 | nodenum = share_hash_chain.keys()[0] |
---|
1025 | share_hash_chain[nodenum] = self.flip_bit(share_hash_chain[nodenum]) |
---|
1026 | elif which == "block_hash_tree": |
---|
1027 | block_hash_tree[-1] = self.flip_bit(block_hash_tree[-1]) |
---|
1028 | elif which == "share_data": |
---|
1029 | share_data = self.flip_bit(share_data) |
---|
1030 | elif which == "encprivkey": |
---|
1031 | enc_privkey = self.flip_bit(enc_privkey) |
---|
1032 | |
---|
1033 | prefix = mutable_layout.pack_prefix(seqnum, root_hash, IV, k, N, |
---|
1034 | segsize, datalen) |
---|
1035 | final_share = mutable_layout.pack_share(prefix, |
---|
1036 | verification_key, |
---|
1037 | signature, |
---|
1038 | share_hash_chain, |
---|
1039 | block_hash_tree, |
---|
1040 | share_data, |
---|
1041 | enc_privkey) |
---|
1042 | msf.writev( [(0, final_share)], None) |
---|
1043 | |
---|
1044 | |
---|
1045 | def test_mutable(self): |
---|
1046 | self.basedir = "system/SystemTest/test_mutable" |
---|
1047 | DATA = "initial contents go here." # 25 bytes % 3 != 0 |
---|
1048 | DATA_uploadable = MutableData(DATA) |
---|
1049 | NEWDATA = "new contents yay" |
---|
1050 | NEWDATA_uploadable = MutableData(NEWDATA) |
---|
1051 | NEWERDATA = "this is getting old" |
---|
1052 | NEWERDATA_uploadable = MutableData(NEWERDATA) |
---|
1053 | |
---|
1054 | d = self.set_up_nodes() |
---|
1055 | |
---|
1056 | def _create_mutable(res): |
---|
1057 | c = self.clients[0] |
---|
1058 | log.msg("starting create_mutable_file") |
---|
1059 | d1 = c.create_mutable_file(DATA_uploadable) |
---|
1060 | def _done(res): |
---|
1061 | log.msg("DONE: %s" % (res,)) |
---|
1062 | self._mutable_node_1 = res |
---|
1063 | d1.addCallback(_done) |
---|
1064 | return d1 |
---|
1065 | d.addCallback(_create_mutable) |
---|
1066 | |
---|
1067 | def _test_debug(res): |
---|
1068 | # find a share. It is important to run this while there is only |
---|
1069 | # one slot in the grid. |
---|
1070 | shares = self._find_all_shares(self.basedir) |
---|
1071 | (client_num, storage_index, filename, shnum) = shares[0] |
---|
1072 | log.msg("test_system.SystemTest.test_mutable._test_debug using %s" |
---|
1073 | % filename) |
---|
1074 | log.msg(" for clients[%d]" % client_num) |
---|
1075 | |
---|
1076 | out,err = StringIO(), StringIO() |
---|
1077 | rc = runner.runner(["debug", "dump-share", "--offsets", |
---|
1078 | filename], |
---|
1079 | stdout=out, stderr=err) |
---|
1080 | output = out.getvalue() |
---|
1081 | self.failUnlessEqual(rc, 0) |
---|
1082 | try: |
---|
1083 | self.failUnless("Mutable slot found:\n" in output) |
---|
1084 | self.failUnless("share_type: SDMF\n" in output) |
---|
1085 | peerid = idlib.nodeid_b2a(self.clients[client_num].nodeid) |
---|
1086 | self.failUnless(" WE for nodeid: %s\n" % peerid in output) |
---|
1087 | self.failUnless(" num_extra_leases: 0\n" in output) |
---|
1088 | self.failUnless(" secrets are for nodeid: %s\n" % peerid |
---|
1089 | in output) |
---|
1090 | self.failUnless(" SDMF contents:\n" in output) |
---|
1091 | self.failUnless(" seqnum: 1\n" in output) |
---|
1092 | self.failUnless(" required_shares: 3\n" in output) |
---|
1093 | self.failUnless(" total_shares: 10\n" in output) |
---|
1094 | self.failUnless(" segsize: 27\n" in output, (output, filename)) |
---|
1095 | self.failUnless(" datalen: 25\n" in output) |
---|
1096 | # the exact share_hash_chain nodes depends upon the sharenum, |
---|
1097 | # and is more of a hassle to compute than I want to deal with |
---|
1098 | # now |
---|
1099 | self.failUnless(" share_hash_chain: " in output) |
---|
1100 | self.failUnless(" block_hash_tree: 1 nodes\n" in output) |
---|
1101 | expected = (" verify-cap: URI:SSK-Verifier:%s:" % |
---|
1102 | base32.b2a(storage_index)) |
---|
1103 | self.failUnless(expected in output) |
---|
1104 | except unittest.FailTest: |
---|
1105 | print |
---|
1106 | print "dump-share output was:" |
---|
1107 | print output |
---|
1108 | raise |
---|
1109 | d.addCallback(_test_debug) |
---|
1110 | |
---|
1111 | # test retrieval |
---|
1112 | |
---|
1113 | # first, let's see if we can use the existing node to retrieve the |
---|
1114 | # contents. This allows it to use the cached pubkey and maybe the |
---|
1115 | # latest-known sharemap. |
---|
1116 | |
---|
1117 | d.addCallback(lambda res: self._mutable_node_1.download_best_version()) |
---|
1118 | def _check_download_1(res): |
---|
1119 | self.failUnlessEqual(res, DATA) |
---|
1120 | # now we see if we can retrieve the data from a new node, |
---|
1121 | # constructed using the URI of the original one. We do this test |
---|
1122 | # on the same client that uploaded the data. |
---|
1123 | uri = self._mutable_node_1.get_uri() |
---|
1124 | log.msg("starting retrieve1") |
---|
1125 | newnode = self.clients[0].create_node_from_uri(uri) |
---|
1126 | newnode_2 = self.clients[0].create_node_from_uri(uri) |
---|
1127 | self.failUnlessIdentical(newnode, newnode_2) |
---|
1128 | return newnode.download_best_version() |
---|
1129 | d.addCallback(_check_download_1) |
---|
1130 | |
---|
1131 | def _check_download_2(res): |
---|
1132 | self.failUnlessEqual(res, DATA) |
---|
1133 | # same thing, but with a different client |
---|
1134 | uri = self._mutable_node_1.get_uri() |
---|
1135 | newnode = self.clients[1].create_node_from_uri(uri) |
---|
1136 | log.msg("starting retrieve2") |
---|
1137 | d1 = newnode.download_best_version() |
---|
1138 | d1.addCallback(lambda res: (res, newnode)) |
---|
1139 | return d1 |
---|
1140 | d.addCallback(_check_download_2) |
---|
1141 | |
---|
1142 | def _check_download_3((res, newnode)): |
---|
1143 | self.failUnlessEqual(res, DATA) |
---|
1144 | # replace the data |
---|
1145 | log.msg("starting replace1") |
---|
1146 | d1 = newnode.overwrite(NEWDATA_uploadable) |
---|
1147 | d1.addCallback(lambda res: newnode.download_best_version()) |
---|
1148 | return d1 |
---|
1149 | d.addCallback(_check_download_3) |
---|
1150 | |
---|
1151 | def _check_download_4(res): |
---|
1152 | self.failUnlessEqual(res, NEWDATA) |
---|
1153 | # now create an even newer node and replace the data on it. This |
---|
1154 | # new node has never been used for download before. |
---|
1155 | uri = self._mutable_node_1.get_uri() |
---|
1156 | newnode1 = self.clients[2].create_node_from_uri(uri) |
---|
1157 | newnode2 = self.clients[3].create_node_from_uri(uri) |
---|
1158 | self._newnode3 = self.clients[3].create_node_from_uri(uri) |
---|
1159 | log.msg("starting replace2") |
---|
1160 | d1 = newnode1.overwrite(NEWERDATA_uploadable) |
---|
1161 | d1.addCallback(lambda res: newnode2.download_best_version()) |
---|
1162 | return d1 |
---|
1163 | d.addCallback(_check_download_4) |
---|
1164 | |
---|
1165 | def _check_download_5(res): |
---|
1166 | log.msg("finished replace2") |
---|
1167 | self.failUnlessEqual(res, NEWERDATA) |
---|
1168 | d.addCallback(_check_download_5) |
---|
1169 | |
---|
1170 | def _corrupt_shares(res): |
---|
1171 | # run around and flip bits in all but k of the shares, to test |
---|
1172 | # the hash checks |
---|
1173 | shares = self._find_all_shares(self.basedir) |
---|
1174 | ## sort by share number |
---|
1175 | #shares.sort( lambda a,b: cmp(a[3], b[3]) ) |
---|
1176 | where = dict([ (shnum, filename) |
---|
1177 | for (client_num, storage_index, filename, shnum) |
---|
1178 | in shares ]) |
---|
1179 | assert len(where) == 10 # this test is designed for 3-of-10 |
---|
1180 | for shnum, filename in where.items(): |
---|
1181 | # shares 7,8,9 are left alone. read will check |
---|
1182 | # (share_hash_chain, block_hash_tree, share_data). New |
---|
1183 | # seqnum+R pairs will trigger a check of (seqnum, R, IV, |
---|
1184 | # segsize, signature). |
---|
1185 | if shnum == 0: |
---|
1186 | # read: this will trigger "pubkey doesn't match |
---|
1187 | # fingerprint". |
---|
1188 | self._corrupt_mutable_share(filename, "pubkey") |
---|
1189 | self._corrupt_mutable_share(filename, "encprivkey") |
---|
1190 | elif shnum == 1: |
---|
1191 | # triggers "signature is invalid" |
---|
1192 | self._corrupt_mutable_share(filename, "seqnum") |
---|
1193 | elif shnum == 2: |
---|
1194 | # triggers "signature is invalid" |
---|
1195 | self._corrupt_mutable_share(filename, "R") |
---|
1196 | elif shnum == 3: |
---|
1197 | # triggers "signature is invalid" |
---|
1198 | self._corrupt_mutable_share(filename, "segsize") |
---|
1199 | elif shnum == 4: |
---|
1200 | self._corrupt_mutable_share(filename, "share_hash_chain") |
---|
1201 | elif shnum == 5: |
---|
1202 | self._corrupt_mutable_share(filename, "block_hash_tree") |
---|
1203 | elif shnum == 6: |
---|
1204 | self._corrupt_mutable_share(filename, "share_data") |
---|
1205 | # other things to correct: IV, signature |
---|
1206 | # 7,8,9 are left alone |
---|
1207 | |
---|
1208 | # note that initial_query_count=5 means that we'll hit the |
---|
1209 | # first 5 servers in effectively random order (based upon |
---|
1210 | # response time), so we won't necessarily ever get a "pubkey |
---|
1211 | # doesn't match fingerprint" error (if we hit shnum>=1 before |
---|
1212 | # shnum=0, we pull the pubkey from there). To get repeatable |
---|
1213 | # specific failures, we need to set initial_query_count=1, |
---|
1214 | # but of course that will change the sequencing behavior of |
---|
1215 | # the retrieval process. TODO: find a reasonable way to make |
---|
1216 | # this a parameter, probably when we expand this test to test |
---|
1217 | # for one failure mode at a time. |
---|
1218 | |
---|
1219 | # when we retrieve this, we should get three signature |
---|
1220 | # failures (where we've mangled seqnum, R, and segsize). The |
---|
1221 | # pubkey mangling |
---|
1222 | d.addCallback(_corrupt_shares) |
---|
1223 | |
---|
1224 | d.addCallback(lambda res: self._newnode3.download_best_version()) |
---|
1225 | d.addCallback(_check_download_5) |
---|
1226 | |
---|
1227 | def _check_empty_file(res): |
---|
1228 | # make sure we can create empty files, this usually screws up the |
---|
1229 | # segsize math |
---|
1230 | d1 = self.clients[2].create_mutable_file(MutableData("")) |
---|
1231 | d1.addCallback(lambda newnode: newnode.download_best_version()) |
---|
1232 | d1.addCallback(lambda res: self.failUnlessEqual("", res)) |
---|
1233 | return d1 |
---|
1234 | d.addCallback(_check_empty_file) |
---|
1235 | |
---|
1236 | d.addCallback(lambda res: self.clients[0].create_dirnode()) |
---|
1237 | def _created_dirnode(dnode): |
---|
1238 | log.msg("_created_dirnode(%s)" % (dnode,)) |
---|
1239 | d1 = dnode.list() |
---|
1240 | d1.addCallback(lambda children: self.failUnlessEqual(children, {})) |
---|
1241 | d1.addCallback(lambda res: dnode.has_child(u"edgar")) |
---|
1242 | d1.addCallback(lambda answer: self.failUnlessEqual(answer, False)) |
---|
1243 | d1.addCallback(lambda res: dnode.set_node(u"see recursive", dnode)) |
---|
1244 | d1.addCallback(lambda res: dnode.has_child(u"see recursive")) |
---|
1245 | d1.addCallback(lambda answer: self.failUnlessEqual(answer, True)) |
---|
1246 | d1.addCallback(lambda res: dnode.build_manifest().when_done()) |
---|
1247 | d1.addCallback(lambda res: |
---|
1248 | self.failUnlessEqual(len(res["manifest"]), 1)) |
---|
1249 | return d1 |
---|
1250 | d.addCallback(_created_dirnode) |
---|
1251 | |
---|
1252 | return d |
---|
1253 | |
---|
1254 | def flip_bit(self, good): |
---|
1255 | return good[:-1] + chr(ord(good[-1]) ^ 0x01) |
---|
1256 | |
---|
1257 | def mangle_uri(self, gooduri): |
---|
1258 | # change the key, which changes the storage index, which means we'll |
---|
1259 | # be asking about the wrong file, so nobody will have any shares |
---|
1260 | u = uri.from_string(gooduri) |
---|
1261 | u2 = uri.CHKFileURI(key=self.flip_bit(u.key), |
---|
1262 | uri_extension_hash=u.uri_extension_hash, |
---|
1263 | needed_shares=u.needed_shares, |
---|
1264 | total_shares=u.total_shares, |
---|
1265 | size=u.size) |
---|
1266 | return u2.to_string() |
---|
1267 | |
---|
1268 | # TODO: add a test which mangles the uri_extension_hash instead, and |
---|
1269 | # should fail due to not being able to get a valid uri_extension block. |
---|
1270 | # Also a test which sneakily mangles the uri_extension block to change |
---|
1271 | # some of the validation data, so it will fail in the post-download phase |
---|
1272 | # when the file's crypttext integrity check fails. Do the same thing for |
---|
1273 | # the key, which should cause the download to fail the post-download |
---|
1274 | # plaintext_hash check. |
---|
1275 | |
---|
1276 | def test_filesystem(self): |
---|
1277 | self.basedir = "system/SystemTest/test_filesystem" |
---|
1278 | self.data = LARGE_DATA |
---|
1279 | d = self.set_up_nodes(use_stats_gatherer=True) |
---|
1280 | def _new_happy_semantics(ign): |
---|
1281 | for c in self.clients: |
---|
1282 | c.encoding_params['happy'] = 1 |
---|
1283 | d.addCallback(_new_happy_semantics) |
---|
1284 | d.addCallback(self._test_introweb) |
---|
1285 | d.addCallback(self.log, "starting publish") |
---|
1286 | d.addCallback(self._do_publish1) |
---|
1287 | d.addCallback(self._test_runner) |
---|
1288 | d.addCallback(self._do_publish2) |
---|
1289 | # at this point, we have the following filesystem (where "R" denotes |
---|
1290 | # self._root_directory_uri): |
---|
1291 | # R |
---|
1292 | # R/subdir1 |
---|
1293 | # R/subdir1/mydata567 |
---|
1294 | # R/subdir1/subdir2/ |
---|
1295 | # R/subdir1/subdir2/mydata992 |
---|
1296 | |
---|
1297 | d.addCallback(lambda res: self.bounce_client(0)) |
---|
1298 | d.addCallback(self.log, "bounced client0") |
---|
1299 | |
---|
1300 | d.addCallback(self._check_publish1) |
---|
1301 | d.addCallback(self.log, "did _check_publish1") |
---|
1302 | d.addCallback(self._check_publish2) |
---|
1303 | d.addCallback(self.log, "did _check_publish2") |
---|
1304 | d.addCallback(self._do_publish_private) |
---|
1305 | d.addCallback(self.log, "did _do_publish_private") |
---|
1306 | # now we also have (where "P" denotes a new dir): |
---|
1307 | # P/personal/sekrit data |
---|
1308 | # P/s2-rw -> /subdir1/subdir2/ |
---|
1309 | # P/s2-ro -> /subdir1/subdir2/ (read-only) |
---|
1310 | d.addCallback(self._check_publish_private) |
---|
1311 | d.addCallback(self.log, "did _check_publish_private") |
---|
1312 | d.addCallback(self._test_web) |
---|
1313 | d.addCallback(self._test_control) |
---|
1314 | d.addCallback(self._test_cli) |
---|
1315 | # P now has four top-level children: |
---|
1316 | # P/personal/sekrit data |
---|
1317 | # P/s2-ro/ |
---|
1318 | # P/s2-rw/ |
---|
1319 | # P/test_put/ (empty) |
---|
1320 | d.addCallback(self._test_checker) |
---|
1321 | return d |
---|
1322 | |
---|
1323 | def _test_introweb(self, res): |
---|
1324 | d = getPage(self.introweb_url, method="GET", followRedirect=True) |
---|
1325 | def _check(res): |
---|
1326 | try: |
---|
1327 | self.failUnless("%s: %s" % (allmydata.__appname__, allmydata.__version__) in res) |
---|
1328 | verstr = str(allmydata.__version__) |
---|
1329 | |
---|
1330 | # The Python "rational version numbering" convention |
---|
1331 | # disallows "-r$REV" but allows ".post$REV" |
---|
1332 | # instead. Eventually we'll probably move to |
---|
1333 | # that. When we do, this test won't go red: |
---|
1334 | ix = verstr.rfind('-r') |
---|
1335 | if ix != -1: |
---|
1336 | altverstr = verstr[:ix] + '.post' + verstr[ix+2:] |
---|
1337 | else: |
---|
1338 | ix = verstr.rfind('.post') |
---|
1339 | if ix != -1: |
---|
1340 | altverstr = verstr[:ix] + '-r' + verstr[ix+5:] |
---|
1341 | else: |
---|
1342 | altverstr = verstr |
---|
1343 | |
---|
1344 | appverstr = "%s: %s" % (allmydata.__appname__, verstr) |
---|
1345 | newappverstr = "%s: %s" % (allmydata.__appname__, altverstr) |
---|
1346 | |
---|
1347 | self.failUnless((appverstr in res) or (newappverstr in res), (appverstr, newappverstr, res)) |
---|
1348 | self.failUnless("Announcement Summary: storage: 5" in res) |
---|
1349 | self.failUnless("Subscription Summary: storage: 5" in res) |
---|
1350 | self.failUnless("tahoe.css" in res) |
---|
1351 | except unittest.FailTest: |
---|
1352 | print |
---|
1353 | print "GET %s output was:" % self.introweb_url |
---|
1354 | print res |
---|
1355 | raise |
---|
1356 | d.addCallback(_check) |
---|
1357 | # make sure it serves the CSS too |
---|
1358 | d.addCallback(lambda res: |
---|
1359 | getPage(self.introweb_url+"tahoe.css", method="GET")) |
---|
1360 | d.addCallback(lambda res: |
---|
1361 | getPage(self.introweb_url + "?t=json", |
---|
1362 | method="GET", followRedirect=True)) |
---|
1363 | def _check_json(res): |
---|
1364 | data = simplejson.loads(res) |
---|
1365 | try: |
---|
1366 | self.failUnlessEqual(data["subscription_summary"], |
---|
1367 | {"storage": 5}) |
---|
1368 | self.failUnlessEqual(data["announcement_summary"], |
---|
1369 | {"storage": 5}) |
---|
1370 | except unittest.FailTest: |
---|
1371 | print |
---|
1372 | print "GET %s?t=json output was:" % self.introweb_url |
---|
1373 | print res |
---|
1374 | raise |
---|
1375 | d.addCallback(_check_json) |
---|
1376 | return d |
---|
1377 | |
---|
1378 | def _do_publish1(self, res): |
---|
1379 | ut = upload.Data(self.data, convergence=None) |
---|
1380 | c0 = self.clients[0] |
---|
1381 | d = c0.create_dirnode() |
---|
1382 | def _made_root(new_dirnode): |
---|
1383 | self._root_directory_uri = new_dirnode.get_uri() |
---|
1384 | return c0.create_node_from_uri(self._root_directory_uri) |
---|
1385 | d.addCallback(_made_root) |
---|
1386 | d.addCallback(lambda root: root.create_subdirectory(u"subdir1")) |
---|
1387 | def _made_subdir1(subdir1_node): |
---|
1388 | self._subdir1_node = subdir1_node |
---|
1389 | d1 = subdir1_node.add_file(u"mydata567", ut) |
---|
1390 | d1.addCallback(self.log, "publish finished") |
---|
1391 | def _stash_uri(filenode): |
---|
1392 | self.uri = filenode.get_uri() |
---|
1393 | assert isinstance(self.uri, str), (self.uri, filenode) |
---|
1394 | d1.addCallback(_stash_uri) |
---|
1395 | return d1 |
---|
1396 | d.addCallback(_made_subdir1) |
---|
1397 | return d |
---|
1398 | |
---|
1399 | def _do_publish2(self, res): |
---|
1400 | ut = upload.Data(self.data, convergence=None) |
---|
1401 | d = self._subdir1_node.create_subdirectory(u"subdir2") |
---|
1402 | d.addCallback(lambda subdir2: subdir2.add_file(u"mydata992", ut)) |
---|
1403 | return d |
---|
1404 | |
---|
1405 | def log(self, res, *args, **kwargs): |
---|
1406 | # print "MSG: %s RES: %s" % (msg, args) |
---|
1407 | log.msg(*args, **kwargs) |
---|
1408 | return res |
---|
1409 | |
---|
1410 | def _do_publish_private(self, res): |
---|
1411 | self.smalldata = "sssh, very secret stuff" |
---|
1412 | ut = upload.Data(self.smalldata, convergence=None) |
---|
1413 | d = self.clients[0].create_dirnode() |
---|
1414 | d.addCallback(self.log, "GOT private directory") |
---|
1415 | def _got_new_dir(privnode): |
---|
1416 | rootnode = self.clients[0].create_node_from_uri(self._root_directory_uri) |
---|
1417 | d1 = privnode.create_subdirectory(u"personal") |
---|
1418 | d1.addCallback(self.log, "made P/personal") |
---|
1419 | d1.addCallback(lambda node: node.add_file(u"sekrit data", ut)) |
---|
1420 | d1.addCallback(self.log, "made P/personal/sekrit data") |
---|
1421 | d1.addCallback(lambda res: rootnode.get_child_at_path([u"subdir1", u"subdir2"])) |
---|
1422 | def _got_s2(s2node): |
---|
1423 | d2 = privnode.set_uri(u"s2-rw", s2node.get_uri(), |
---|
1424 | s2node.get_readonly_uri()) |
---|
1425 | d2.addCallback(lambda node: |
---|
1426 | privnode.set_uri(u"s2-ro", |
---|
1427 | s2node.get_readonly_uri(), |
---|
1428 | s2node.get_readonly_uri())) |
---|
1429 | return d2 |
---|
1430 | d1.addCallback(_got_s2) |
---|
1431 | d1.addCallback(lambda res: privnode) |
---|
1432 | return d1 |
---|
1433 | d.addCallback(_got_new_dir) |
---|
1434 | return d |
---|
1435 | |
---|
1436 | def _check_publish1(self, res): |
---|
1437 | # this one uses the iterative API |
---|
1438 | c1 = self.clients[1] |
---|
1439 | d = defer.succeed(c1.create_node_from_uri(self._root_directory_uri)) |
---|
1440 | d.addCallback(self.log, "check_publish1 got /") |
---|
1441 | d.addCallback(lambda root: root.get(u"subdir1")) |
---|
1442 | d.addCallback(lambda subdir1: subdir1.get(u"mydata567")) |
---|
1443 | d.addCallback(lambda filenode: download_to_data(filenode)) |
---|
1444 | d.addCallback(self.log, "get finished") |
---|
1445 | def _get_done(data): |
---|
1446 | self.failUnlessEqual(data, self.data) |
---|
1447 | d.addCallback(_get_done) |
---|
1448 | return d |
---|
1449 | |
---|
1450 | def _check_publish2(self, res): |
---|
1451 | # this one uses the path-based API |
---|
1452 | rootnode = self.clients[1].create_node_from_uri(self._root_directory_uri) |
---|
1453 | d = rootnode.get_child_at_path(u"subdir1") |
---|
1454 | d.addCallback(lambda dirnode: |
---|
1455 | self.failUnless(IDirectoryNode.providedBy(dirnode))) |
---|
1456 | d.addCallback(lambda res: rootnode.get_child_at_path(u"subdir1/mydata567")) |
---|
1457 | d.addCallback(lambda filenode: download_to_data(filenode)) |
---|
1458 | d.addCallback(lambda data: self.failUnlessEqual(data, self.data)) |
---|
1459 | |
---|
1460 | d.addCallback(lambda res: rootnode.get_child_at_path(u"subdir1/mydata567")) |
---|
1461 | def _got_filenode(filenode): |
---|
1462 | fnode = self.clients[1].create_node_from_uri(filenode.get_uri()) |
---|
1463 | assert fnode == filenode |
---|
1464 | d.addCallback(_got_filenode) |
---|
1465 | return d |
---|
1466 | |
---|
1467 | def _check_publish_private(self, resnode): |
---|
1468 | # this one uses the path-based API |
---|
1469 | self._private_node = resnode |
---|
1470 | |
---|
1471 | d = self._private_node.get_child_at_path(u"personal") |
---|
1472 | def _got_personal(personal): |
---|
1473 | self._personal_node = personal |
---|
1474 | return personal |
---|
1475 | d.addCallback(_got_personal) |
---|
1476 | |
---|
1477 | d.addCallback(lambda dirnode: |
---|
1478 | self.failUnless(IDirectoryNode.providedBy(dirnode), dirnode)) |
---|
1479 | def get_path(path): |
---|
1480 | return self._private_node.get_child_at_path(path) |
---|
1481 | |
---|
1482 | d.addCallback(lambda res: get_path(u"personal/sekrit data")) |
---|
1483 | d.addCallback(lambda filenode: download_to_data(filenode)) |
---|
1484 | d.addCallback(lambda data: self.failUnlessEqual(data, self.smalldata)) |
---|
1485 | d.addCallback(lambda res: get_path(u"s2-rw")) |
---|
1486 | d.addCallback(lambda dirnode: self.failUnless(dirnode.is_mutable())) |
---|
1487 | d.addCallback(lambda res: get_path(u"s2-ro")) |
---|
1488 | def _got_s2ro(dirnode): |
---|
1489 | self.failUnless(dirnode.is_mutable(), dirnode) |
---|
1490 | self.failUnless(dirnode.is_readonly(), dirnode) |
---|
1491 | d1 = defer.succeed(None) |
---|
1492 | d1.addCallback(lambda res: dirnode.list()) |
---|
1493 | d1.addCallback(self.log, "dirnode.list") |
---|
1494 | |
---|
1495 | d1.addCallback(lambda res: self.shouldFail2(NotWriteableError, "mkdir(nope)", None, dirnode.create_subdirectory, u"nope")) |
---|
1496 | |
---|
1497 | d1.addCallback(self.log, "doing add_file(ro)") |
---|
1498 | ut = upload.Data("I will disappear, unrecorded and unobserved. The tragedy of my demise is made more poignant by its silence, but this beauty is not for you to ever know.", convergence="99i-p1x4-xd4-18yc-ywt-87uu-msu-zo -- completely and totally unguessable string (unless you read this)") |
---|
1499 | d1.addCallback(lambda res: self.shouldFail2(NotWriteableError, "add_file(nope)", None, dirnode.add_file, u"hope", ut)) |
---|
1500 | |
---|
1501 | d1.addCallback(self.log, "doing get(ro)") |
---|
1502 | d1.addCallback(lambda res: dirnode.get(u"mydata992")) |
---|
1503 | d1.addCallback(lambda filenode: |
---|
1504 | self.failUnless(IFileNode.providedBy(filenode))) |
---|
1505 | |
---|
1506 | d1.addCallback(self.log, "doing delete(ro)") |
---|
1507 | d1.addCallback(lambda res: self.shouldFail2(NotWriteableError, "delete(nope)", None, dirnode.delete, u"mydata992")) |
---|
1508 | |
---|
1509 | d1.addCallback(lambda res: self.shouldFail2(NotWriteableError, "set_uri(nope)", None, dirnode.set_uri, u"hopeless", self.uri, self.uri)) |
---|
1510 | |
---|
1511 | d1.addCallback(lambda res: self.shouldFail2(NoSuchChildError, "get(missing)", "missing", dirnode.get, u"missing")) |
---|
1512 | |
---|
1513 | personal = self._personal_node |
---|
1514 | d1.addCallback(lambda res: self.shouldFail2(NotWriteableError, "mv from readonly", None, dirnode.move_child_to, u"mydata992", personal, u"nope")) |
---|
1515 | |
---|
1516 | d1.addCallback(self.log, "doing move_child_to(ro)2") |
---|
1517 | d1.addCallback(lambda res: self.shouldFail2(NotWriteableError, "mv to readonly", None, personal.move_child_to, u"sekrit data", dirnode, u"nope")) |
---|
1518 | |
---|
1519 | d1.addCallback(self.log, "finished with _got_s2ro") |
---|
1520 | return d1 |
---|
1521 | d.addCallback(_got_s2ro) |
---|
1522 | def _got_home(dummy): |
---|
1523 | home = self._private_node |
---|
1524 | personal = self._personal_node |
---|
1525 | d1 = defer.succeed(None) |
---|
1526 | d1.addCallback(self.log, "mv 'P/personal/sekrit data' to P/sekrit") |
---|
1527 | d1.addCallback(lambda res: |
---|
1528 | personal.move_child_to(u"sekrit data",home,u"sekrit")) |
---|
1529 | |
---|
1530 | d1.addCallback(self.log, "mv P/sekrit 'P/sekrit data'") |
---|
1531 | d1.addCallback(lambda res: |
---|
1532 | home.move_child_to(u"sekrit", home, u"sekrit data")) |
---|
1533 | |
---|
1534 | d1.addCallback(self.log, "mv 'P/sekret data' P/personal/") |
---|
1535 | d1.addCallback(lambda res: |
---|
1536 | home.move_child_to(u"sekrit data", personal)) |
---|
1537 | |
---|
1538 | d1.addCallback(lambda res: home.build_manifest().when_done()) |
---|
1539 | d1.addCallback(self.log, "manifest") |
---|
1540 | # five items: |
---|
1541 | # P/ |
---|
1542 | # P/personal/ |
---|
1543 | # P/personal/sekrit data |
---|
1544 | # P/s2-rw (same as P/s2-ro) |
---|
1545 | # P/s2-rw/mydata992 (same as P/s2-rw/mydata992) |
---|
1546 | d1.addCallback(lambda res: |
---|
1547 | self.failUnlessEqual(len(res["manifest"]), 5)) |
---|
1548 | d1.addCallback(lambda res: home.start_deep_stats().when_done()) |
---|
1549 | def _check_stats(stats): |
---|
1550 | expected = {"count-immutable-files": 1, |
---|
1551 | "count-mutable-files": 0, |
---|
1552 | "count-literal-files": 1, |
---|
1553 | "count-files": 2, |
---|
1554 | "count-directories": 3, |
---|
1555 | "size-immutable-files": 112, |
---|
1556 | "size-literal-files": 23, |
---|
1557 | #"size-directories": 616, # varies |
---|
1558 | #"largest-directory": 616, |
---|
1559 | "largest-directory-children": 3, |
---|
1560 | "largest-immutable-file": 112, |
---|
1561 | } |
---|
1562 | for k,v in expected.iteritems(): |
---|
1563 | self.failUnlessEqual(stats[k], v, |
---|
1564 | "stats[%s] was %s, not %s" % |
---|
1565 | (k, stats[k], v)) |
---|
1566 | self.failUnless(stats["size-directories"] > 1300, |
---|
1567 | stats["size-directories"]) |
---|
1568 | self.failUnless(stats["largest-directory"] > 800, |
---|
1569 | stats["largest-directory"]) |
---|
1570 | self.failUnlessEqual(stats["size-files-histogram"], |
---|
1571 | [ (11, 31, 1), (101, 316, 1) ]) |
---|
1572 | d1.addCallback(_check_stats) |
---|
1573 | return d1 |
---|
1574 | d.addCallback(_got_home) |
---|
1575 | return d |
---|
1576 | |
---|
1577 | def shouldFail(self, res, expected_failure, which, substring=None): |
---|
1578 | if isinstance(res, Failure): |
---|
1579 | res.trap(expected_failure) |
---|
1580 | if substring: |
---|
1581 | self.failUnless(substring in str(res), |
---|
1582 | "substring '%s' not in '%s'" |
---|
1583 | % (substring, str(res))) |
---|
1584 | else: |
---|
1585 | self.fail("%s was supposed to raise %s, not get '%s'" % |
---|
1586 | (which, expected_failure, res)) |
---|
1587 | |
---|
1588 | def shouldFail2(self, expected_failure, which, substring, callable, *args, **kwargs): |
---|
1589 | assert substring is None or isinstance(substring, str) |
---|
1590 | d = defer.maybeDeferred(callable, *args, **kwargs) |
---|
1591 | def done(res): |
---|
1592 | if isinstance(res, Failure): |
---|
1593 | res.trap(expected_failure) |
---|
1594 | if substring: |
---|
1595 | self.failUnless(substring in str(res), |
---|
1596 | "substring '%s' not in '%s'" |
---|
1597 | % (substring, str(res))) |
---|
1598 | else: |
---|
1599 | self.fail("%s was supposed to raise %s, not get '%s'" % |
---|
1600 | (which, expected_failure, res)) |
---|
1601 | d.addBoth(done) |
---|
1602 | return d |
---|
1603 | |
---|
1604 | def PUT(self, urlpath, data): |
---|
1605 | url = self.webish_url + urlpath |
---|
1606 | return getPage(url, method="PUT", postdata=data) |
---|
1607 | |
---|
1608 | def GET(self, urlpath, followRedirect=False): |
---|
1609 | url = self.webish_url + urlpath |
---|
1610 | return getPage(url, method="GET", followRedirect=followRedirect) |
---|
1611 | |
---|
1612 | def POST(self, urlpath, followRedirect=False, use_helper=False, **fields): |
---|
1613 | sepbase = "boogabooga" |
---|
1614 | sep = "--" + sepbase |
---|
1615 | form = [] |
---|
1616 | form.append(sep) |
---|
1617 | form.append('Content-Disposition: form-data; name="_charset"') |
---|
1618 | form.append('') |
---|
1619 | form.append('UTF-8') |
---|
1620 | form.append(sep) |
---|
1621 | for name, value in fields.iteritems(): |
---|
1622 | if isinstance(value, tuple): |
---|
1623 | filename, value = value |
---|
1624 | form.append('Content-Disposition: form-data; name="%s"; ' |
---|
1625 | 'filename="%s"' % (name, filename.encode("utf-8"))) |
---|
1626 | else: |
---|
1627 | form.append('Content-Disposition: form-data; name="%s"' % name) |
---|
1628 | form.append('') |
---|
1629 | form.append(str(value)) |
---|
1630 | form.append(sep) |
---|
1631 | form[-1] += "--" |
---|
1632 | body = "" |
---|
1633 | headers = {} |
---|
1634 | if fields: |
---|
1635 | body = "\r\n".join(form) + "\r\n" |
---|
1636 | headers["content-type"] = "multipart/form-data; boundary=%s" % sepbase |
---|
1637 | return self.POST2(urlpath, body, headers, followRedirect, use_helper) |
---|
1638 | |
---|
1639 | def POST2(self, urlpath, body="", headers={}, followRedirect=False, |
---|
1640 | use_helper=False): |
---|
1641 | if use_helper: |
---|
1642 | url = self.helper_webish_url + urlpath |
---|
1643 | else: |
---|
1644 | url = self.webish_url + urlpath |
---|
1645 | return getPage(url, method="POST", postdata=body, headers=headers, |
---|
1646 | followRedirect=followRedirect) |
---|
1647 | |
---|
1648 | def _test_web(self, res): |
---|
1649 | base = self.webish_url |
---|
1650 | public = "uri/" + self._root_directory_uri |
---|
1651 | d = getPage(base) |
---|
1652 | def _got_welcome(page): |
---|
1653 | html = page.replace('\n', ' ') |
---|
1654 | connected_re = r'Connected to <span>%d</span>\s*of <span>%d</span> known storage servers' % (self.numclients, self.numclients) |
---|
1655 | self.failUnless(re.search(connected_re, html), |
---|
1656 | "I didn't see the right '%s' message in:\n%s" % (connected_re, page)) |
---|
1657 | # nodeids/tubids don't have any regexp-special characters |
---|
1658 | nodeid_re = r'<th>Node ID:</th>\s*<td title="TubID: %s">%s</td>' % ( |
---|
1659 | self.clients[0].get_long_tubid(), self.clients[0].get_long_nodeid()) |
---|
1660 | self.failUnless(re.search(nodeid_re, html), |
---|
1661 | "I didn't see the right '%s' message in:\n%s" % (nodeid_re, page)) |
---|
1662 | self.failUnless("Helper: 0 active uploads" in page) |
---|
1663 | d.addCallback(_got_welcome) |
---|
1664 | d.addCallback(self.log, "done with _got_welcome") |
---|
1665 | |
---|
1666 | # get the welcome page from the node that uses the helper too |
---|
1667 | d.addCallback(lambda res: getPage(self.helper_webish_url)) |
---|
1668 | def _got_welcome_helper(page): |
---|
1669 | html = page.replace('\n', ' ') |
---|
1670 | self.failUnless(re.search('<img (src="img/connected-yes.png" |alt="Connected" ){2}/>', html), page) |
---|
1671 | self.failUnlessIn("Not running helper", page) |
---|
1672 | d.addCallback(_got_welcome_helper) |
---|
1673 | |
---|
1674 | d.addCallback(lambda res: getPage(base + public)) |
---|
1675 | d.addCallback(lambda res: getPage(base + public + "/subdir1")) |
---|
1676 | def _got_subdir1(page): |
---|
1677 | # there ought to be an href for our file |
---|
1678 | self.failUnlessIn('<td align="right">%d</td>' % len(self.data), page) |
---|
1679 | self.failUnless(">mydata567</a>" in page) |
---|
1680 | d.addCallback(_got_subdir1) |
---|
1681 | d.addCallback(self.log, "done with _got_subdir1") |
---|
1682 | d.addCallback(lambda res: |
---|
1683 | getPage(base + public + "/subdir1/mydata567")) |
---|
1684 | def _got_data(page): |
---|
1685 | self.failUnlessEqual(page, self.data) |
---|
1686 | d.addCallback(_got_data) |
---|
1687 | |
---|
1688 | # download from a URI embedded in a URL |
---|
1689 | d.addCallback(self.log, "_get_from_uri") |
---|
1690 | def _get_from_uri(res): |
---|
1691 | return getPage(base + "uri/%s?filename=%s" |
---|
1692 | % (self.uri, "mydata567")) |
---|
1693 | d.addCallback(_get_from_uri) |
---|
1694 | def _got_from_uri(page): |
---|
1695 | self.failUnlessEqual(page, self.data) |
---|
1696 | d.addCallback(_got_from_uri) |
---|
1697 | |
---|
1698 | # download from a URI embedded in a URL, second form |
---|
1699 | d.addCallback(self.log, "_get_from_uri2") |
---|
1700 | def _get_from_uri2(res): |
---|
1701 | return getPage(base + "uri?uri=%s" % (self.uri,)) |
---|
1702 | d.addCallback(_get_from_uri2) |
---|
1703 | d.addCallback(_got_from_uri) |
---|
1704 | |
---|
1705 | # download from a bogus URI, make sure we get a reasonable error |
---|
1706 | d.addCallback(self.log, "_get_from_bogus_uri", level=log.UNUSUAL) |
---|
1707 | def _get_from_bogus_uri(res): |
---|
1708 | d1 = getPage(base + "uri/%s?filename=%s" |
---|
1709 | % (self.mangle_uri(self.uri), "mydata567")) |
---|
1710 | d1.addBoth(self.shouldFail, Error, "downloading bogus URI", |
---|
1711 | "410") |
---|
1712 | return d1 |
---|
1713 | d.addCallback(_get_from_bogus_uri) |
---|
1714 | d.addCallback(self.log, "_got_from_bogus_uri", level=log.UNUSUAL) |
---|
1715 | |
---|
1716 | # upload a file with PUT |
---|
1717 | d.addCallback(self.log, "about to try PUT") |
---|
1718 | d.addCallback(lambda res: self.PUT(public + "/subdir3/new.txt", |
---|
1719 | "new.txt contents")) |
---|
1720 | d.addCallback(lambda res: self.GET(public + "/subdir3/new.txt")) |
---|
1721 | d.addCallback(self.failUnlessEqual, "new.txt contents") |
---|
1722 | # and again with something large enough to use multiple segments, |
---|
1723 | # and hopefully trigger pauseProducing too |
---|
1724 | def _new_happy_semantics(ign): |
---|
1725 | for c in self.clients: |
---|
1726 | # these get reset somewhere? Whatever. |
---|
1727 | c.encoding_params['happy'] = 1 |
---|
1728 | d.addCallback(_new_happy_semantics) |
---|
1729 | d.addCallback(lambda res: self.PUT(public + "/subdir3/big.txt", |
---|
1730 | "big" * 500000)) # 1.5MB |
---|
1731 | d.addCallback(lambda res: self.GET(public + "/subdir3/big.txt")) |
---|
1732 | d.addCallback(lambda res: self.failUnlessEqual(len(res), 1500000)) |
---|
1733 | |
---|
1734 | # can we replace files in place? |
---|
1735 | d.addCallback(lambda res: self.PUT(public + "/subdir3/new.txt", |
---|
1736 | "NEWER contents")) |
---|
1737 | d.addCallback(lambda res: self.GET(public + "/subdir3/new.txt")) |
---|
1738 | d.addCallback(self.failUnlessEqual, "NEWER contents") |
---|
1739 | |
---|
1740 | # test unlinked POST |
---|
1741 | d.addCallback(lambda res: self.POST("uri", t="upload", |
---|
1742 | file=("new.txt", "data" * 10000))) |
---|
1743 | # and again using the helper, which exercises different upload-status |
---|
1744 | # display code |
---|
1745 | d.addCallback(lambda res: self.POST("uri", use_helper=True, t="upload", |
---|
1746 | file=("foo.txt", "data2" * 10000))) |
---|
1747 | |
---|
1748 | # check that the status page exists |
---|
1749 | d.addCallback(lambda res: self.GET("status", followRedirect=True)) |
---|
1750 | def _got_status(res): |
---|
1751 | # find an interesting upload and download to look at. LIT files |
---|
1752 | # are not interesting. |
---|
1753 | h = self.clients[0].get_history() |
---|
1754 | for ds in h.list_all_download_statuses(): |
---|
1755 | if ds.get_size() > 200: |
---|
1756 | self._down_status = ds.get_counter() |
---|
1757 | for us in h.list_all_upload_statuses(): |
---|
1758 | if us.get_size() > 200: |
---|
1759 | self._up_status = us.get_counter() |
---|
1760 | rs = list(h.list_all_retrieve_statuses())[0] |
---|
1761 | self._retrieve_status = rs.get_counter() |
---|
1762 | ps = list(h.list_all_publish_statuses())[0] |
---|
1763 | self._publish_status = ps.get_counter() |
---|
1764 | us = list(h.list_all_mapupdate_statuses())[0] |
---|
1765 | self._update_status = us.get_counter() |
---|
1766 | |
---|
1767 | # and that there are some upload- and download- status pages |
---|
1768 | return self.GET("status/up-%d" % self._up_status) |
---|
1769 | d.addCallback(_got_status) |
---|
1770 | def _got_up(res): |
---|
1771 | return self.GET("status/down-%d" % self._down_status) |
---|
1772 | d.addCallback(_got_up) |
---|
1773 | def _got_down(res): |
---|
1774 | return self.GET("status/mapupdate-%d" % self._update_status) |
---|
1775 | d.addCallback(_got_down) |
---|
1776 | def _got_update(res): |
---|
1777 | return self.GET("status/publish-%d" % self._publish_status) |
---|
1778 | d.addCallback(_got_update) |
---|
1779 | def _got_publish(res): |
---|
1780 | self.failUnlessIn("Publish Results", res) |
---|
1781 | return self.GET("status/retrieve-%d" % self._retrieve_status) |
---|
1782 | d.addCallback(_got_publish) |
---|
1783 | def _got_retrieve(res): |
---|
1784 | self.failUnlessIn("Retrieve Results", res) |
---|
1785 | d.addCallback(_got_retrieve) |
---|
1786 | |
---|
1787 | # check that the helper status page exists |
---|
1788 | d.addCallback(lambda res: |
---|
1789 | self.GET("helper_status", followRedirect=True)) |
---|
1790 | def _got_helper_status(res): |
---|
1791 | self.failUnless("Bytes Fetched:" in res) |
---|
1792 | # touch a couple of files in the helper's working directory to |
---|
1793 | # exercise more code paths |
---|
1794 | workdir = os.path.join(self.getdir("client0"), "helper") |
---|
1795 | incfile = os.path.join(workdir, "CHK_incoming", "spurious") |
---|
1796 | f = open(incfile, "wb") |
---|
1797 | f.write("small file") |
---|
1798 | f.close() |
---|
1799 | then = time.time() - 86400*3 |
---|
1800 | now = time.time() |
---|
1801 | os.utime(incfile, (now, then)) |
---|
1802 | encfile = os.path.join(workdir, "CHK_encoding", "spurious") |
---|
1803 | f = open(encfile, "wb") |
---|
1804 | f.write("less small file") |
---|
1805 | f.close() |
---|
1806 | os.utime(encfile, (now, then)) |
---|
1807 | d.addCallback(_got_helper_status) |
---|
1808 | # and that the json form exists |
---|
1809 | d.addCallback(lambda res: |
---|
1810 | self.GET("helper_status?t=json", followRedirect=True)) |
---|
1811 | def _got_helper_status_json(res): |
---|
1812 | data = simplejson.loads(res) |
---|
1813 | self.failUnlessEqual(data["chk_upload_helper.upload_need_upload"], |
---|
1814 | 1) |
---|
1815 | self.failUnlessEqual(data["chk_upload_helper.incoming_count"], 1) |
---|
1816 | self.failUnlessEqual(data["chk_upload_helper.incoming_size"], 10) |
---|
1817 | self.failUnlessEqual(data["chk_upload_helper.incoming_size_old"], |
---|
1818 | 10) |
---|
1819 | self.failUnlessEqual(data["chk_upload_helper.encoding_count"], 1) |
---|
1820 | self.failUnlessEqual(data["chk_upload_helper.encoding_size"], 15) |
---|
1821 | self.failUnlessEqual(data["chk_upload_helper.encoding_size_old"], |
---|
1822 | 15) |
---|
1823 | d.addCallback(_got_helper_status_json) |
---|
1824 | |
---|
1825 | # and check that client[3] (which uses a helper but does not run one |
---|
1826 | # itself) doesn't explode when you ask for its status |
---|
1827 | d.addCallback(lambda res: getPage(self.helper_webish_url + "status/")) |
---|
1828 | def _got_non_helper_status(res): |
---|
1829 | self.failUnlessIn("Recent and Active Operations", res) |
---|
1830 | d.addCallback(_got_non_helper_status) |
---|
1831 | |
---|
1832 | # or for helper status with t=json |
---|
1833 | d.addCallback(lambda res: |
---|
1834 | getPage(self.helper_webish_url + "helper_status?t=json")) |
---|
1835 | def _got_non_helper_status_json(res): |
---|
1836 | data = simplejson.loads(res) |
---|
1837 | self.failUnlessEqual(data, {}) |
---|
1838 | d.addCallback(_got_non_helper_status_json) |
---|
1839 | |
---|
1840 | # see if the statistics page exists |
---|
1841 | d.addCallback(lambda res: self.GET("statistics")) |
---|
1842 | def _got_stats(res): |
---|
1843 | self.failUnlessIn("Operational Statistics", res) |
---|
1844 | self.failUnlessIn(" 'downloader.files_downloaded': 5,", res) |
---|
1845 | d.addCallback(_got_stats) |
---|
1846 | d.addCallback(lambda res: self.GET("statistics?t=json")) |
---|
1847 | def _got_stats_json(res): |
---|
1848 | data = simplejson.loads(res) |
---|
1849 | self.failUnlessEqual(data["counters"]["uploader.files_uploaded"], 5) |
---|
1850 | self.failUnlessEqual(data["stats"]["chk_upload_helper.upload_need_upload"], 1) |
---|
1851 | d.addCallback(_got_stats_json) |
---|
1852 | |
---|
1853 | # TODO: mangle the second segment of a file, to test errors that |
---|
1854 | # occur after we've already sent some good data, which uses a |
---|
1855 | # different error path. |
---|
1856 | |
---|
1857 | # TODO: download a URI with a form |
---|
1858 | # TODO: create a directory by using a form |
---|
1859 | # TODO: upload by using a form on the directory page |
---|
1860 | # url = base + "somedir/subdir1/freeform_post!!upload" |
---|
1861 | # TODO: delete a file by using a button on the directory page |
---|
1862 | |
---|
1863 | return d |
---|
1864 | |
---|
1865 | def _test_runner(self, res): |
---|
1866 | # exercise some of the diagnostic tools in runner.py |
---|
1867 | |
---|
1868 | # find a share |
---|
1869 | for (dirpath, dirnames, filenames) in os.walk(unicode(self.basedir)): |
---|
1870 | if "storage" not in dirpath: |
---|
1871 | continue |
---|
1872 | if not filenames: |
---|
1873 | continue |
---|
1874 | pieces = dirpath.split(os.sep) |
---|
1875 | if (len(pieces) >= 4 |
---|
1876 | and pieces[-4] == "storage" |
---|
1877 | and pieces[-3] == "shares"): |
---|
1878 | # we're sitting in .../storage/shares/$START/$SINDEX , and there |
---|
1879 | # are sharefiles here |
---|
1880 | filename = os.path.join(dirpath, filenames[0]) |
---|
1881 | # peek at the magic to see if it is a chk share |
---|
1882 | magic = open(filename, "rb").read(4) |
---|
1883 | if magic == '\x00\x00\x00\x01': |
---|
1884 | break |
---|
1885 | else: |
---|
1886 | self.fail("unable to find any uri_extension files in %r" |
---|
1887 | % self.basedir) |
---|
1888 | log.msg("test_system.SystemTest._test_runner using %r" % filename) |
---|
1889 | |
---|
1890 | out,err = StringIO(), StringIO() |
---|
1891 | rc = runner.runner(["debug", "dump-share", "--offsets", |
---|
1892 | unicode_to_argv(filename)], |
---|
1893 | stdout=out, stderr=err) |
---|
1894 | output = out.getvalue() |
---|
1895 | self.failUnlessEqual(rc, 0) |
---|
1896 | |
---|
1897 | # we only upload a single file, so we can assert some things about |
---|
1898 | # its size and shares. |
---|
1899 | self.failUnlessIn("share filename: %s" % quote_output(abspath_expanduser_unicode(filename)), output) |
---|
1900 | self.failUnlessIn("size: %d\n" % len(self.data), output) |
---|
1901 | self.failUnlessIn("num_segments: 1\n", output) |
---|
1902 | # segment_size is always a multiple of needed_shares |
---|
1903 | self.failUnlessIn("segment_size: %d\n" % mathutil.next_multiple(len(self.data), 3), output) |
---|
1904 | self.failUnlessIn("total_shares: 10\n", output) |
---|
1905 | # keys which are supposed to be present |
---|
1906 | for key in ("size", "num_segments", "segment_size", |
---|
1907 | "needed_shares", "total_shares", |
---|
1908 | "codec_name", "codec_params", "tail_codec_params", |
---|
1909 | #"plaintext_hash", "plaintext_root_hash", |
---|
1910 | "crypttext_hash", "crypttext_root_hash", |
---|
1911 | "share_root_hash", "UEB_hash"): |
---|
1912 | self.failUnlessIn("%s: " % key, output) |
---|
1913 | self.failUnlessIn(" verify-cap: URI:CHK-Verifier:", output) |
---|
1914 | |
---|
1915 | # now use its storage index to find the other shares using the |
---|
1916 | # 'find-shares' tool |
---|
1917 | sharedir, shnum = os.path.split(filename) |
---|
1918 | storagedir, storage_index_s = os.path.split(sharedir) |
---|
1919 | storage_index_s = str(storage_index_s) |
---|
1920 | out,err = StringIO(), StringIO() |
---|
1921 | nodedirs = [self.getdir("client%d" % i) for i in range(self.numclients)] |
---|
1922 | cmd = ["debug", "find-shares", storage_index_s] + nodedirs |
---|
1923 | rc = runner.runner(cmd, stdout=out, stderr=err) |
---|
1924 | self.failUnlessEqual(rc, 0) |
---|
1925 | out.seek(0) |
---|
1926 | sharefiles = [sfn.strip() for sfn in out.readlines()] |
---|
1927 | self.failUnlessEqual(len(sharefiles), 10) |
---|
1928 | |
---|
1929 | # also exercise the 'catalog-shares' tool |
---|
1930 | out,err = StringIO(), StringIO() |
---|
1931 | nodedirs = [self.getdir("client%d" % i) for i in range(self.numclients)] |
---|
1932 | cmd = ["debug", "catalog-shares"] + nodedirs |
---|
1933 | rc = runner.runner(cmd, stdout=out, stderr=err) |
---|
1934 | self.failUnlessEqual(rc, 0) |
---|
1935 | out.seek(0) |
---|
1936 | descriptions = [sfn.strip() for sfn in out.readlines()] |
---|
1937 | self.failUnlessEqual(len(descriptions), 30) |
---|
1938 | matching = [line |
---|
1939 | for line in descriptions |
---|
1940 | if line.startswith("CHK %s " % storage_index_s)] |
---|
1941 | self.failUnlessEqual(len(matching), 10) |
---|
1942 | |
---|
1943 | def _test_control(self, res): |
---|
1944 | # exercise the remote-control-the-client foolscap interfaces in |
---|
1945 | # allmydata.control (mostly used for performance tests) |
---|
1946 | c0 = self.clients[0] |
---|
1947 | control_furl_file = os.path.join(c0.basedir, "private", "control.furl") |
---|
1948 | control_furl = open(control_furl_file, "r").read().strip() |
---|
1949 | # it doesn't really matter which Tub we use to connect to the client, |
---|
1950 | # so let's just use our IntroducerNode's |
---|
1951 | d = self.introducer.tub.getReference(control_furl) |
---|
1952 | d.addCallback(self._test_control2, control_furl_file) |
---|
1953 | return d |
---|
1954 | def _test_control2(self, rref, filename): |
---|
1955 | d = defer.succeed(None) |
---|
1956 | d.addCallback(lambda res: rref.callRemote("speed_test", 1, 200, False)) |
---|
1957 | if sys.platform in ("linux2", "linux3"): |
---|
1958 | d.addCallback(lambda res: rref.callRemote("get_memory_usage")) |
---|
1959 | d.addCallback(lambda res: rref.callRemote("measure_peer_response_time")) |
---|
1960 | return d |
---|
1961 | |
---|
1962 | def _test_cli(self, res): |
---|
1963 | # run various CLI commands (in a thread, since they use blocking |
---|
1964 | # network calls) |
---|
1965 | |
---|
1966 | private_uri = self._private_node.get_uri() |
---|
1967 | client0_basedir = self.getdir("client0") |
---|
1968 | |
---|
1969 | nodeargs = [ |
---|
1970 | "--node-directory", client0_basedir, |
---|
1971 | ] |
---|
1972 | |
---|
1973 | d = defer.succeed(None) |
---|
1974 | |
---|
1975 | # for compatibility with earlier versions, private/root_dir.cap is |
---|
1976 | # supposed to be treated as an alias named "tahoe:". Start by making |
---|
1977 | # sure that works, before we add other aliases. |
---|
1978 | |
---|
1979 | root_file = os.path.join(client0_basedir, "private", "root_dir.cap") |
---|
1980 | f = open(root_file, "w") |
---|
1981 | f.write(private_uri) |
---|
1982 | f.close() |
---|
1983 | |
---|
1984 | def run(ignored, verb, *args, **kwargs): |
---|
1985 | stdin = kwargs.get("stdin", "") |
---|
1986 | newargs = nodeargs + [verb] + list(args) |
---|
1987 | return self._run_cli(newargs, stdin=stdin) |
---|
1988 | |
---|
1989 | def _check_ls((out,err), expected_children, unexpected_children=[]): |
---|
1990 | self.failUnlessEqual(err, "") |
---|
1991 | for s in expected_children: |
---|
1992 | self.failUnless(s in out, (s,out)) |
---|
1993 | for s in unexpected_children: |
---|
1994 | self.failIf(s in out, (s,out)) |
---|
1995 | |
---|
1996 | def _check_ls_root((out,err)): |
---|
1997 | self.failUnless("personal" in out) |
---|
1998 | self.failUnless("s2-ro" in out) |
---|
1999 | self.failUnless("s2-rw" in out) |
---|
2000 | self.failUnlessEqual(err, "") |
---|
2001 | |
---|
2002 | # this should reference private_uri |
---|
2003 | d.addCallback(run, "ls") |
---|
2004 | d.addCallback(_check_ls, ["personal", "s2-ro", "s2-rw"]) |
---|
2005 | |
---|
2006 | d.addCallback(run, "list-aliases") |
---|
2007 | def _check_aliases_1((out,err)): |
---|
2008 | self.failUnlessEqual(err, "") |
---|
2009 | self.failUnlessEqual(out.strip(" \n"), "tahoe: %s" % private_uri) |
---|
2010 | d.addCallback(_check_aliases_1) |
---|
2011 | |
---|
2012 | # now that that's out of the way, remove root_dir.cap and work with |
---|
2013 | # new files |
---|
2014 | d.addCallback(lambda res: os.unlink(root_file)) |
---|
2015 | d.addCallback(run, "list-aliases") |
---|
2016 | def _check_aliases_2((out,err)): |
---|
2017 | self.failUnlessEqual(err, "") |
---|
2018 | self.failUnlessEqual(out, "") |
---|
2019 | d.addCallback(_check_aliases_2) |
---|
2020 | |
---|
2021 | d.addCallback(run, "mkdir") |
---|
2022 | def _got_dir( (out,err) ): |
---|
2023 | self.failUnless(uri.from_string_dirnode(out.strip())) |
---|
2024 | return out.strip() |
---|
2025 | d.addCallback(_got_dir) |
---|
2026 | d.addCallback(lambda newcap: run(None, "add-alias", "tahoe", newcap)) |
---|
2027 | |
---|
2028 | d.addCallback(run, "list-aliases") |
---|
2029 | def _check_aliases_3((out,err)): |
---|
2030 | self.failUnlessEqual(err, "") |
---|
2031 | self.failUnless("tahoe: " in out) |
---|
2032 | d.addCallback(_check_aliases_3) |
---|
2033 | |
---|
2034 | def _check_empty_dir((out,err)): |
---|
2035 | self.failUnlessEqual(out, "") |
---|
2036 | self.failUnlessEqual(err, "") |
---|
2037 | d.addCallback(run, "ls") |
---|
2038 | d.addCallback(_check_empty_dir) |
---|
2039 | |
---|
2040 | def _check_missing_dir((out,err)): |
---|
2041 | # TODO: check that rc==2 |
---|
2042 | self.failUnlessEqual(out, "") |
---|
2043 | self.failUnlessEqual(err, "No such file or directory\n") |
---|
2044 | d.addCallback(run, "ls", "bogus") |
---|
2045 | d.addCallback(_check_missing_dir) |
---|
2046 | |
---|
2047 | files = [] |
---|
2048 | datas = [] |
---|
2049 | for i in range(10): |
---|
2050 | fn = os.path.join(self.basedir, "file%d" % i) |
---|
2051 | files.append(fn) |
---|
2052 | data = "data to be uploaded: file%d\n" % i |
---|
2053 | datas.append(data) |
---|
2054 | open(fn,"wb").write(data) |
---|
2055 | |
---|
2056 | def _check_stdout_against((out,err), filenum=None, data=None): |
---|
2057 | self.failUnlessEqual(err, "") |
---|
2058 | if filenum is not None: |
---|
2059 | self.failUnlessEqual(out, datas[filenum]) |
---|
2060 | if data is not None: |
---|
2061 | self.failUnlessEqual(out, data) |
---|
2062 | |
---|
2063 | # test all both forms of put: from a file, and from stdin |
---|
2064 | # tahoe put bar FOO |
---|
2065 | d.addCallback(run, "put", files[0], "tahoe-file0") |
---|
2066 | def _put_out((out,err)): |
---|
2067 | self.failUnless("URI:LIT:" in out, out) |
---|
2068 | self.failUnless("201 Created" in err, err) |
---|
2069 | uri0 = out.strip() |
---|
2070 | return run(None, "get", uri0) |
---|
2071 | d.addCallback(_put_out) |
---|
2072 | d.addCallback(lambda (out,err): self.failUnlessEqual(out, datas[0])) |
---|
2073 | |
---|
2074 | d.addCallback(run, "put", files[1], "subdir/tahoe-file1") |
---|
2075 | # tahoe put bar tahoe:FOO |
---|
2076 | d.addCallback(run, "put", files[2], "tahoe:file2") |
---|
2077 | d.addCallback(run, "put", "--format=SDMF", files[3], "tahoe:file3") |
---|
2078 | def _check_put_mutable((out,err)): |
---|
2079 | self._mutable_file3_uri = out.strip() |
---|
2080 | d.addCallback(_check_put_mutable) |
---|
2081 | d.addCallback(run, "get", "tahoe:file3") |
---|
2082 | d.addCallback(_check_stdout_against, 3) |
---|
2083 | |
---|
2084 | # tahoe put FOO |
---|
2085 | STDIN_DATA = "This is the file to upload from stdin." |
---|
2086 | d.addCallback(run, "put", "-", "tahoe-file-stdin", stdin=STDIN_DATA) |
---|
2087 | # tahoe put tahoe:FOO |
---|
2088 | d.addCallback(run, "put", "-", "tahoe:from-stdin", |
---|
2089 | stdin="Other file from stdin.") |
---|
2090 | |
---|
2091 | d.addCallback(run, "ls") |
---|
2092 | d.addCallback(_check_ls, ["tahoe-file0", "file2", "file3", "subdir", |
---|
2093 | "tahoe-file-stdin", "from-stdin"]) |
---|
2094 | d.addCallback(run, "ls", "subdir") |
---|
2095 | d.addCallback(_check_ls, ["tahoe-file1"]) |
---|
2096 | |
---|
2097 | # tahoe mkdir FOO |
---|
2098 | d.addCallback(run, "mkdir", "subdir2") |
---|
2099 | d.addCallback(run, "ls") |
---|
2100 | # TODO: extract the URI, set an alias with it |
---|
2101 | d.addCallback(_check_ls, ["subdir2"]) |
---|
2102 | |
---|
2103 | # tahoe get: (to stdin and to a file) |
---|
2104 | d.addCallback(run, "get", "tahoe-file0") |
---|
2105 | d.addCallback(_check_stdout_against, 0) |
---|
2106 | d.addCallback(run, "get", "tahoe:subdir/tahoe-file1") |
---|
2107 | d.addCallback(_check_stdout_against, 1) |
---|
2108 | outfile0 = os.path.join(self.basedir, "outfile0") |
---|
2109 | d.addCallback(run, "get", "file2", outfile0) |
---|
2110 | def _check_outfile0((out,err)): |
---|
2111 | data = open(outfile0,"rb").read() |
---|
2112 | self.failUnlessEqual(data, "data to be uploaded: file2\n") |
---|
2113 | d.addCallback(_check_outfile0) |
---|
2114 | outfile1 = os.path.join(self.basedir, "outfile0") |
---|
2115 | d.addCallback(run, "get", "tahoe:subdir/tahoe-file1", outfile1) |
---|
2116 | def _check_outfile1((out,err)): |
---|
2117 | data = open(outfile1,"rb").read() |
---|
2118 | self.failUnlessEqual(data, "data to be uploaded: file1\n") |
---|
2119 | d.addCallback(_check_outfile1) |
---|
2120 | |
---|
2121 | d.addCallback(run, "rm", "tahoe-file0") |
---|
2122 | d.addCallback(run, "rm", "tahoe:file2") |
---|
2123 | d.addCallback(run, "ls") |
---|
2124 | d.addCallback(_check_ls, [], ["tahoe-file0", "file2"]) |
---|
2125 | |
---|
2126 | d.addCallback(run, "ls", "-l") |
---|
2127 | def _check_ls_l((out,err)): |
---|
2128 | lines = out.split("\n") |
---|
2129 | for l in lines: |
---|
2130 | if "tahoe-file-stdin" in l: |
---|
2131 | self.failUnless(l.startswith("-r-- "), l) |
---|
2132 | self.failUnless(" %d " % len(STDIN_DATA) in l) |
---|
2133 | if "file3" in l: |
---|
2134 | self.failUnless(l.startswith("-rw- "), l) # mutable |
---|
2135 | d.addCallback(_check_ls_l) |
---|
2136 | |
---|
2137 | d.addCallback(run, "ls", "--uri") |
---|
2138 | def _check_ls_uri((out,err)): |
---|
2139 | lines = out.split("\n") |
---|
2140 | for l in lines: |
---|
2141 | if "file3" in l: |
---|
2142 | self.failUnless(self._mutable_file3_uri in l) |
---|
2143 | d.addCallback(_check_ls_uri) |
---|
2144 | |
---|
2145 | d.addCallback(run, "ls", "--readonly-uri") |
---|
2146 | def _check_ls_rouri((out,err)): |
---|
2147 | lines = out.split("\n") |
---|
2148 | for l in lines: |
---|
2149 | if "file3" in l: |
---|
2150 | rw_uri = self._mutable_file3_uri |
---|
2151 | u = uri.from_string_mutable_filenode(rw_uri) |
---|
2152 | ro_uri = u.get_readonly().to_string() |
---|
2153 | self.failUnless(ro_uri in l) |
---|
2154 | d.addCallback(_check_ls_rouri) |
---|
2155 | |
---|
2156 | |
---|
2157 | d.addCallback(run, "mv", "tahoe-file-stdin", "tahoe-moved") |
---|
2158 | d.addCallback(run, "ls") |
---|
2159 | d.addCallback(_check_ls, ["tahoe-moved"], ["tahoe-file-stdin"]) |
---|
2160 | |
---|
2161 | d.addCallback(run, "ln", "tahoe-moved", "newlink") |
---|
2162 | d.addCallback(run, "ls") |
---|
2163 | d.addCallback(_check_ls, ["tahoe-moved", "newlink"]) |
---|
2164 | |
---|
2165 | d.addCallback(run, "cp", "tahoe:file3", "tahoe:file3-copy") |
---|
2166 | d.addCallback(run, "ls") |
---|
2167 | d.addCallback(_check_ls, ["file3", "file3-copy"]) |
---|
2168 | d.addCallback(run, "get", "tahoe:file3-copy") |
---|
2169 | d.addCallback(_check_stdout_against, 3) |
---|
2170 | |
---|
2171 | # copy from disk into tahoe |
---|
2172 | d.addCallback(run, "cp", files[4], "tahoe:file4") |
---|
2173 | d.addCallback(run, "ls") |
---|
2174 | d.addCallback(_check_ls, ["file3", "file3-copy", "file4"]) |
---|
2175 | d.addCallback(run, "get", "tahoe:file4") |
---|
2176 | d.addCallback(_check_stdout_against, 4) |
---|
2177 | |
---|
2178 | # copy from tahoe into disk |
---|
2179 | target_filename = os.path.join(self.basedir, "file-out") |
---|
2180 | d.addCallback(run, "cp", "tahoe:file4", target_filename) |
---|
2181 | def _check_cp_out((out,err)): |
---|
2182 | self.failUnless(os.path.exists(target_filename)) |
---|
2183 | got = open(target_filename,"rb").read() |
---|
2184 | self.failUnlessEqual(got, datas[4]) |
---|
2185 | d.addCallback(_check_cp_out) |
---|
2186 | |
---|
2187 | # copy from disk to disk (silly case) |
---|
2188 | target2_filename = os.path.join(self.basedir, "file-out-copy") |
---|
2189 | d.addCallback(run, "cp", target_filename, target2_filename) |
---|
2190 | def _check_cp_out2((out,err)): |
---|
2191 | self.failUnless(os.path.exists(target2_filename)) |
---|
2192 | got = open(target2_filename,"rb").read() |
---|
2193 | self.failUnlessEqual(got, datas[4]) |
---|
2194 | d.addCallback(_check_cp_out2) |
---|
2195 | |
---|
2196 | # copy from tahoe into disk, overwriting an existing file |
---|
2197 | d.addCallback(run, "cp", "tahoe:file3", target_filename) |
---|
2198 | def _check_cp_out3((out,err)): |
---|
2199 | self.failUnless(os.path.exists(target_filename)) |
---|
2200 | got = open(target_filename,"rb").read() |
---|
2201 | self.failUnlessEqual(got, datas[3]) |
---|
2202 | d.addCallback(_check_cp_out3) |
---|
2203 | |
---|
2204 | # copy from disk into tahoe, overwriting an existing immutable file |
---|
2205 | d.addCallback(run, "cp", files[5], "tahoe:file4") |
---|
2206 | d.addCallback(run, "ls") |
---|
2207 | d.addCallback(_check_ls, ["file3", "file3-copy", "file4"]) |
---|
2208 | d.addCallback(run, "get", "tahoe:file4") |
---|
2209 | d.addCallback(_check_stdout_against, 5) |
---|
2210 | |
---|
2211 | # copy from disk into tahoe, overwriting an existing mutable file |
---|
2212 | d.addCallback(run, "cp", files[5], "tahoe:file3") |
---|
2213 | d.addCallback(run, "ls") |
---|
2214 | d.addCallback(_check_ls, ["file3", "file3-copy", "file4"]) |
---|
2215 | d.addCallback(run, "get", "tahoe:file3") |
---|
2216 | d.addCallback(_check_stdout_against, 5) |
---|
2217 | |
---|
2218 | # recursive copy: setup |
---|
2219 | dn = os.path.join(self.basedir, "dir1") |
---|
2220 | os.makedirs(dn) |
---|
2221 | open(os.path.join(dn, "rfile1"), "wb").write("rfile1") |
---|
2222 | open(os.path.join(dn, "rfile2"), "wb").write("rfile2") |
---|
2223 | open(os.path.join(dn, "rfile3"), "wb").write("rfile3") |
---|
2224 | sdn2 = os.path.join(dn, "subdir2") |
---|
2225 | os.makedirs(sdn2) |
---|
2226 | open(os.path.join(sdn2, "rfile4"), "wb").write("rfile4") |
---|
2227 | open(os.path.join(sdn2, "rfile5"), "wb").write("rfile5") |
---|
2228 | |
---|
2229 | # from disk into tahoe |
---|
2230 | d.addCallback(run, "cp", "-r", dn, "tahoe:") |
---|
2231 | d.addCallback(run, "ls") |
---|
2232 | d.addCallback(_check_ls, ["dir1"]) |
---|
2233 | d.addCallback(run, "ls", "dir1") |
---|
2234 | d.addCallback(_check_ls, ["rfile1", "rfile2", "rfile3", "subdir2"], |
---|
2235 | ["rfile4", "rfile5"]) |
---|
2236 | d.addCallback(run, "ls", "tahoe:dir1/subdir2") |
---|
2237 | d.addCallback(_check_ls, ["rfile4", "rfile5"], |
---|
2238 | ["rfile1", "rfile2", "rfile3"]) |
---|
2239 | d.addCallback(run, "get", "dir1/subdir2/rfile4") |
---|
2240 | d.addCallback(_check_stdout_against, data="rfile4") |
---|
2241 | |
---|
2242 | # and back out again |
---|
2243 | dn_copy = os.path.join(self.basedir, "dir1-copy") |
---|
2244 | d.addCallback(run, "cp", "--verbose", "-r", "tahoe:dir1", dn_copy) |
---|
2245 | def _check_cp_r_out((out,err)): |
---|
2246 | def _cmp(name): |
---|
2247 | old = open(os.path.join(dn, name), "rb").read() |
---|
2248 | newfn = os.path.join(dn_copy, "dir1", name) |
---|
2249 | self.failUnless(os.path.exists(newfn)) |
---|
2250 | new = open(newfn, "rb").read() |
---|
2251 | self.failUnlessEqual(old, new) |
---|
2252 | _cmp("rfile1") |
---|
2253 | _cmp("rfile2") |
---|
2254 | _cmp("rfile3") |
---|
2255 | _cmp(os.path.join("subdir2", "rfile4")) |
---|
2256 | _cmp(os.path.join("subdir2", "rfile5")) |
---|
2257 | d.addCallback(_check_cp_r_out) |
---|
2258 | |
---|
2259 | # and copy it a second time, which ought to overwrite the same files |
---|
2260 | d.addCallback(run, "cp", "-r", "tahoe:dir1", dn_copy) |
---|
2261 | |
---|
2262 | # and again, only writing filecaps |
---|
2263 | dn_copy2 = os.path.join(self.basedir, "dir1-copy-capsonly") |
---|
2264 | d.addCallback(run, "cp", "-r", "--caps-only", "tahoe:dir1", dn_copy2) |
---|
2265 | def _check_capsonly((out,err)): |
---|
2266 | # these should all be LITs |
---|
2267 | x = open(os.path.join(dn_copy2, "dir1", "subdir2", "rfile4")).read() |
---|
2268 | y = uri.from_string_filenode(x) |
---|
2269 | self.failUnlessEqual(y.data, "rfile4") |
---|
2270 | d.addCallback(_check_capsonly) |
---|
2271 | |
---|
2272 | # and tahoe-to-tahoe |
---|
2273 | d.addCallback(run, "cp", "-r", "tahoe:dir1", "tahoe:dir1-copy") |
---|
2274 | d.addCallback(run, "ls") |
---|
2275 | d.addCallback(_check_ls, ["dir1", "dir1-copy"]) |
---|
2276 | d.addCallback(run, "ls", "dir1-copy/dir1") |
---|
2277 | d.addCallback(_check_ls, ["rfile1", "rfile2", "rfile3", "subdir2"], |
---|
2278 | ["rfile4", "rfile5"]) |
---|
2279 | d.addCallback(run, "ls", "tahoe:dir1-copy/dir1/subdir2") |
---|
2280 | d.addCallback(_check_ls, ["rfile4", "rfile5"], |
---|
2281 | ["rfile1", "rfile2", "rfile3"]) |
---|
2282 | d.addCallback(run, "get", "dir1-copy/dir1/subdir2/rfile4") |
---|
2283 | d.addCallback(_check_stdout_against, data="rfile4") |
---|
2284 | |
---|
2285 | # and copy it a second time, which ought to overwrite the same files |
---|
2286 | d.addCallback(run, "cp", "-r", "tahoe:dir1", "tahoe:dir1-copy") |
---|
2287 | |
---|
2288 | # tahoe_ls doesn't currently handle the error correctly: it tries to |
---|
2289 | # JSON-parse a traceback. |
---|
2290 | ## def _ls_missing(res): |
---|
2291 | ## argv = nodeargs + ["ls", "bogus"] |
---|
2292 | ## return self._run_cli(argv) |
---|
2293 | ## d.addCallback(_ls_missing) |
---|
2294 | ## def _check_ls_missing((out,err)): |
---|
2295 | ## print "OUT", out |
---|
2296 | ## print "ERR", err |
---|
2297 | ## self.failUnlessEqual(err, "") |
---|
2298 | ## d.addCallback(_check_ls_missing) |
---|
2299 | |
---|
2300 | return d |
---|
2301 | |
---|
2302 | def test_filesystem_with_cli_in_subprocess(self): |
---|
2303 | # We do this in a separate test so that test_filesystem doesn't skip if we can't run bin/tahoe. |
---|
2304 | |
---|
2305 | self.basedir = "system/SystemTest/test_filesystem_with_cli_in_subprocess" |
---|
2306 | d = self.set_up_nodes() |
---|
2307 | def _new_happy_semantics(ign): |
---|
2308 | for c in self.clients: |
---|
2309 | c.encoding_params['happy'] = 1 |
---|
2310 | d.addCallback(_new_happy_semantics) |
---|
2311 | |
---|
2312 | def _run_in_subprocess(ignored, verb, *args, **kwargs): |
---|
2313 | stdin = kwargs.get("stdin") |
---|
2314 | env = kwargs.get("env") |
---|
2315 | newargs = ["--node-directory", self.getdir("client0"), verb] + list(args) |
---|
2316 | return self.run_bintahoe(newargs, stdin=stdin, env=env) |
---|
2317 | |
---|
2318 | def _check_succeeded(res, check_stderr=True): |
---|
2319 | out, err, rc_or_sig = res |
---|
2320 | self.failUnlessEqual(rc_or_sig, 0, str(res)) |
---|
2321 | if check_stderr: |
---|
2322 | self.failUnlessEqual(err, "") |
---|
2323 | |
---|
2324 | d.addCallback(_run_in_subprocess, "create-alias", "newalias") |
---|
2325 | d.addCallback(_check_succeeded) |
---|
2326 | |
---|
2327 | STDIN_DATA = "This is the file to upload from stdin." |
---|
2328 | d.addCallback(_run_in_subprocess, "put", "-", "newalias:tahoe-file", stdin=STDIN_DATA) |
---|
2329 | d.addCallback(_check_succeeded, check_stderr=False) |
---|
2330 | |
---|
2331 | def _mv_with_http_proxy(ign): |
---|
2332 | env = os.environ |
---|
2333 | env['http_proxy'] = env['HTTP_PROXY'] = "http://127.0.0.0:12345" # invalid address |
---|
2334 | return _run_in_subprocess(None, "mv", "newalias:tahoe-file", "newalias:tahoe-moved", env=env) |
---|
2335 | d.addCallback(_mv_with_http_proxy) |
---|
2336 | d.addCallback(_check_succeeded) |
---|
2337 | |
---|
2338 | d.addCallback(_run_in_subprocess, "ls", "newalias:") |
---|
2339 | def _check_ls(res): |
---|
2340 | out, err, rc_or_sig = res |
---|
2341 | self.failUnlessEqual(rc_or_sig, 0, str(res)) |
---|
2342 | self.failUnlessEqual(err, "", str(res)) |
---|
2343 | self.failUnlessIn("tahoe-moved", out) |
---|
2344 | self.failIfIn("tahoe-file", out) |
---|
2345 | d.addCallback(_check_ls) |
---|
2346 | return d |
---|
2347 | |
---|
2348 | def _run_cli(self, argv, stdin=""): |
---|
2349 | #print "CLI:", argv |
---|
2350 | stdout, stderr = StringIO(), StringIO() |
---|
2351 | d = threads.deferToThread(runner.runner, argv, run_by_human=False, |
---|
2352 | stdin=StringIO(stdin), |
---|
2353 | stdout=stdout, stderr=stderr) |
---|
2354 | def _done(res): |
---|
2355 | return stdout.getvalue(), stderr.getvalue() |
---|
2356 | d.addCallback(_done) |
---|
2357 | return d |
---|
2358 | |
---|
2359 | def _test_checker(self, res): |
---|
2360 | ut = upload.Data("too big to be literal" * 200, convergence=None) |
---|
2361 | d = self._personal_node.add_file(u"big file", ut) |
---|
2362 | |
---|
2363 | d.addCallback(lambda res: self._personal_node.check(Monitor())) |
---|
2364 | def _check_dirnode_results(r): |
---|
2365 | self.failUnless(r.is_healthy()) |
---|
2366 | d.addCallback(_check_dirnode_results) |
---|
2367 | d.addCallback(lambda res: self._personal_node.check(Monitor(), verify=True)) |
---|
2368 | d.addCallback(_check_dirnode_results) |
---|
2369 | |
---|
2370 | d.addCallback(lambda res: self._personal_node.get(u"big file")) |
---|
2371 | def _got_chk_filenode(n): |
---|
2372 | self.failUnless(isinstance(n, ImmutableFileNode)) |
---|
2373 | d = n.check(Monitor()) |
---|
2374 | def _check_filenode_results(r): |
---|
2375 | self.failUnless(r.is_healthy()) |
---|
2376 | d.addCallback(_check_filenode_results) |
---|
2377 | d.addCallback(lambda res: n.check(Monitor(), verify=True)) |
---|
2378 | d.addCallback(_check_filenode_results) |
---|
2379 | return d |
---|
2380 | d.addCallback(_got_chk_filenode) |
---|
2381 | |
---|
2382 | d.addCallback(lambda res: self._personal_node.get(u"sekrit data")) |
---|
2383 | def _got_lit_filenode(n): |
---|
2384 | self.failUnless(isinstance(n, LiteralFileNode)) |
---|
2385 | d = n.check(Monitor()) |
---|
2386 | def _check_lit_filenode_results(r): |
---|
2387 | self.failUnlessEqual(r, None) |
---|
2388 | d.addCallback(_check_lit_filenode_results) |
---|
2389 | d.addCallback(lambda res: n.check(Monitor(), verify=True)) |
---|
2390 | d.addCallback(_check_lit_filenode_results) |
---|
2391 | return d |
---|
2392 | d.addCallback(_got_lit_filenode) |
---|
2393 | return d |
---|
2394 | |
---|
2395 | |
---|
2396 | class Connections(SystemTestMixin, unittest.TestCase): |
---|
2397 | def test_rref(self): |
---|
2398 | self.basedir = "system/Connections/rref" |
---|
2399 | d = self.set_up_nodes(2) |
---|
2400 | def _start(ign): |
---|
2401 | self.c0 = self.clients[0] |
---|
2402 | nonclients = [s for s in self.c0.storage_broker.get_connected_servers() |
---|
2403 | if s.get_serverid() != self.c0.get_long_nodeid()] |
---|
2404 | self.failUnlessEqual(len(nonclients), 1) |
---|
2405 | |
---|
2406 | self.s1 = nonclients[0] # s1 is the server, not c0 |
---|
2407 | self.s1_rref = self.s1.get_rref() |
---|
2408 | self.failIfEqual(self.s1_rref, None) |
---|
2409 | self.failUnless(self.s1.is_connected()) |
---|
2410 | d.addCallback(_start) |
---|
2411 | |
---|
2412 | # now shut down the server |
---|
2413 | d.addCallback(lambda ign: self.clients[1].disownServiceParent()) |
---|
2414 | # and wait for the client to notice |
---|
2415 | def _poll(): |
---|
2416 | return len(self.c0.storage_broker.get_connected_servers()) < 2 |
---|
2417 | d.addCallback(lambda ign: self.poll(_poll)) |
---|
2418 | |
---|
2419 | def _down(ign): |
---|
2420 | self.failIf(self.s1.is_connected()) |
---|
2421 | rref = self.s1.get_rref() |
---|
2422 | self.failUnless(rref) |
---|
2423 | self.failUnlessIdentical(rref, self.s1_rref) |
---|
2424 | d.addCallback(_down) |
---|
2425 | return d |
---|