diff -Naur orig/zfec-1.4.22/zfec/cmdline_zfec.py streaming/zfec-1.4.22/zfec/cmdline_zfec.py
old
|
new
|
|
57 | 57 | if args.requiredshares == args.totalshares: |
58 | 58 | print "warning: silly parameters: requiredshares == totalshares, which means that all shares will be required in order to reconstruct the file. You could use \"split\" for the same effect. But proceeding to do it anyway..." |
59 | 59 | |
60 | | args.inputfile.seek(0, 2) |
61 | | fsize = args.inputfile.tell() |
62 | | args.inputfile.seek(0, 0) |
| 60 | if args.inputfile.name == '<stdin>': |
| 61 | fsize = None |
| 62 | else: |
| 63 | args.inputfile.seek(0, 2) |
| 64 | fsize = args.inputfile.tell() |
| 65 | args.inputfile.seek(0, 0) |
63 | 66 | return filefec.encode_to_files(args.inputfile, fsize, args.output_dir, args.prefix, args.requiredshares, args.totalshares, args.suffix, args.force, args.verbose) |
64 | 67 | |
65 | 68 | # zfec -- fast forward error correction library with Python interface |
diff -Naur orig/zfec-1.4.22/zfec/cmdline_zunfec.py streaming/zfec-1.4.22/zfec/cmdline_zunfec.py
old
|
new
|
|
30 | 30 | print "At least two sharefiles are required." |
31 | 31 | return 1 |
32 | 32 | |
33 | | if args.force: |
| 33 | if args.outputfile == '-': |
| 34 | outf = sys.stdout |
| 35 | elif args.force: |
34 | 36 | outf = open(args.outputfile, 'wb') |
35 | 37 | else: |
36 | 38 | try: |
… |
… |
|
49 | 51 | for fn in args.sharefiles: |
50 | 52 | sharefs.append(open(fn, 'rb')) |
51 | 53 | try: |
52 | | ret = filefec.decode_from_files(outf, sharefs, args.verbose) |
| 54 | ret = filefec.decode_from_files(outf, sharefs, args.verbose, sys.stdout==outf) |
53 | 55 | except filefec.InsufficientShareFilesError, e: |
54 | 56 | print str(e) |
55 | 57 | return 3 |
diff -Naur orig/zfec-1.4.22/zfec/filefec.py streaming/zfec-1.4.22/zfec/filefec.py
old
|
new
|
|
2 | 2 | from pyutil import fileutil |
3 | 3 | from pyutil.mathutil import pad_size, log_ceil |
4 | 4 | |
5 | | import array, os, struct |
| 5 | import array, os, struct, sys |
6 | 6 | |
7 | 7 | CHUNKSIZE = 4096 |
8 | 8 | |
… |
… |
|
164 | 164 | Encode inf, writing the shares to specially named, newly created files. |
165 | 165 | |
166 | 166 | @param fsize: calling read() on inf must yield fsize bytes of data and |
167 | | then raise an EOFError |
| 167 | then raise an EOFError. If None, the file is being read as a stream |
| 168 | and the size is not known in advance, header must be updated after |
| 169 | the reading is complete. |
168 | 170 | @param dirname: the name of the directory into which the sharefiles will |
169 | 171 | be written |
170 | 172 | """ |
171 | 173 | mlen = len(str(m)) |
172 | 174 | format = FORMAT_FORMAT % (mlen, mlen,) |
173 | 175 | |
174 | | padbytes = pad_size(fsize, k) |
| 176 | if fsize is None: |
| 177 | # Streaming case, file size is unknown. |
| 178 | # We'll recompute the pad at the end and write it |
| 179 | padbytes = pad_size(0, k) |
| 180 | else: |
| 181 | padbytes = pad_size(fsize, k) |
175 | 182 | |
176 | 183 | fns = [] |
177 | 184 | fs = [] |
… |
… |
|
197 | 204 | oldsumlen = sumlen[0] |
198 | 205 | sumlen[0] += length |
199 | 206 | if verbose: |
200 | | if int((float(oldsumlen) / fsize) * 10) != int((float(sumlen[0]) / fsize) * 10): |
201 | | print str(int((float(sumlen[0]) / fsize) * 10) * 10) + "% ...", |
| 207 | if fsize is None: |
| 208 | # 30MB takes a short while to write on a normal drive. |
| 209 | interval = MILLION_BYTES * 30 |
| 210 | if int((float(oldsumlen) / interval)) != int((float(sumlen[0]) / interval)): |
| 211 | print str(int((float(sumlen[0]) / MILLION_BYTES))) + "MB ...", |
| 212 | else: |
| 213 | if int((float(oldsumlen) / fsize) * 10) != int((float(sumlen[0]) / fsize) * 10): |
| 214 | print str(int((float(sumlen[0]) / fsize) * 10) * 10) + "% ...", |
| 215 | sys.stdout.flush() |
202 | 216 | |
203 | | if sumlen[0] > fsize: |
| 217 | if fsize is not None and sumlen[0] > fsize: |
204 | 218 | raise IOError("Wrong file size -- possibly the size of the file changed during encoding. Original size: %d, observed size at least: %s" % (fsize, sumlen[0],)) |
205 | | for i in range(len(blocks)): |
206 | | data = blocks[i] |
207 | | fs[i].write(data) |
208 | | length -= len(data) |
| 219 | if length != 0: |
| 220 | for i in range(len(blocks)): |
| 221 | data = blocks[i] |
| 222 | fs[i].write(data) |
| 223 | length -= len(data) |
| 224 | # EOF signal, if no fsize, update pad |
| 225 | elif fsize is None: |
| 226 | padbytes = pad_size(oldsumlen, k) |
| 227 | for shnum in range(len(fs)): |
| 228 | hdr = _build_header(m, k, padbytes, shnum) |
| 229 | fs[shnum].seek(0) |
| 230 | fs[shnum].write(hdr) |
209 | 231 | |
210 | 232 | encode_file_stringy_easyfec(inf, cb, k, m, chunksize=4096) |
211 | 233 | except EnvironmentError, le: |
… |
… |
|
232 | 254 | # Thanks. |
233 | 255 | MILLION_BYTES=10**6 |
234 | 256 | |
235 | | def decode_from_files(outf, infiles, verbose=False): |
| 257 | def decode_from_files(outf, infiles, verbose=False, is_stream=False): |
236 | 258 | """ |
237 | 259 | Decode from the first k files in infiles, writing the results to outf. |
238 | 260 | """ |
… |
… |
|
277 | 299 | outf.write(resultdata) |
278 | 300 | byteswritten += len(resultdata) |
279 | 301 | if verbose: |
280 | | if ((byteswritten - len(resultdata)) / (10*MILLION_BYTES)) != (byteswritten / (10*MILLION_BYTES)): |
281 | | print str(byteswritten / MILLION_BYTES) + " MB ...", |
| 302 | if ((byteswritten - len(resultdata)) / (30*MILLION_BYTES)) != (byteswritten / (30*MILLION_BYTES)): |
| 303 | message = str(byteswritten / MILLION_BYTES) + "MB ..." |
| 304 | outpipe = is_stream and sys.stderr or sys.stdout |
| 305 | print >> outpipe, message, |
| 306 | outpipe.flush() |
282 | 307 | else: |
283 | 308 | # Then this was a short read, so we've reached the end of the sharefiles. |
284 | 309 | resultdata = dec.decode(chunks, shnums, padlen) |
285 | 310 | outf.write(resultdata) |
286 | | return # Done. |
| 311 | break # Done |
287 | 312 | if verbose: |
288 | | print |
289 | | print "Done!" |
| 313 | outpipe = is_stream and sys.stderr or sys.stdout |
| 314 | print >> outpipe |
| 315 | print >> outpipe, "Done!" |
290 | 316 | |
291 | 317 | def encode_file(inf, cb, k, m, chunksize=4096): |
292 | 318 | """ |
… |
… |
|
493 | 519 | res = enc.encode(indata) |
494 | 520 | cb(res, len(indata)) |
495 | 521 | indata = inf.read(readsize) |
| 522 | # Final callback to update pad length in header for streaming case. |
| 523 | cb([''] * m, 0) |
496 | 524 | |
497 | 525 | # zfec -- fast forward error correction library with Python interface |
498 | 526 | # |
diff -Naur orig/zfec-1.4.22/zfec/test/test_zfec.py streaming/zfec-1.4.22/zfec/test/test_zfec.py
old
|
new
|
|
246 | 246 | assert (rm, rk, rpad, rsh,) == (m, k, pad, sh,), h |
247 | 247 | |
248 | 248 | def _help_test_filefec(self, teststr, k, m, numshs=None): |
| 249 | self._do_help_test_filefec(teststr, k, m, numshs=numshs, withsize=False) |
| 250 | self._do_help_test_filefec(teststr, k, m, numshs=numshs, withsize=True) |
| 251 | |
| 252 | def _do_help_test_filefec(self, teststr, k, m, numshs=None, withsize=True): |
249 | 253 | if numshs == None: |
250 | 254 | numshs = m |
251 | 255 | |
… |
… |
|
253 | 257 | PREFIX = "test" |
254 | 258 | SUFFIX = ".fec" |
255 | 259 | |
256 | | fsize = len(teststr) |
| 260 | if withsize: |
| 261 | fsize = len(teststr) |
| 262 | else: |
| 263 | fsize = None |
257 | 264 | |
258 | 265 | tempdir = fileutil.NamedTemporaryDirectory(cleanup=True) |
259 | 266 | try: |