Решение на HTTP сваляч от Анатоли Бързев

Обратно към всички решения

Към профила на Анатоли Бързев

Резултати

  • 8 точки от тестове
  • 0 бонус точки
  • 8 точки общо
  • 14 успешни тест(а)
  • 3 неуспешни тест(а)

Код

package main
import (
"context"
"fmt"
"io"
"net/http"
"sort"
"sync"
)
type chunk struct {
data []byte
from int64
to int64
pos int64
url string
err error
ctxClosed bool
}
type comm struct {
jobs chan chunk
res chan chunk
done chan struct{}
}
type chunkSort []chunk
func (a chunkSort) Len() int { return len(a) }
func (a chunkSort) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
func (a chunkSort) Less(i, j int) bool { return a[i].from < a[j].from }
type file struct {
chunks []chunk
size int64
currSize int64
pos int64
urls []string
comm *comm
err error
maxWorkers int
lock *sync.Mutex
}
func (f *file) Read(p []byte) (n int, err error) {
f.lock.Lock()
defer f.lock.Unlock()
if f.err != nil {
return f.readChunks(p), f.err
}
if f.pos == f.size && f.size > 0 {
return 0, io.EOF
}
return f.readChunks(p), nil
}
func (f *file) readChunks(p []byte) (n int) {
//fmt.Println("read chunks", len(p), f.pos, f.size)
prevTo := int64(0)
bytesToRead := len(p)
bytes := 0
if bytesToRead == 0 {
return 0
}
for _, c := range f.chunks {
if prevTo != c.from {
break
}
prevTo = c.pos
if f.pos > c.pos {
continue
}
relPos := int64(len(c.data)) - (c.pos - f.pos)
//fmt.Println(f.pos, c.pos, relPos, len(c.data))
for _, d := range c.data[relPos:] {
p[bytes] = d
bytes++
f.pos++
if bytes == bytesToRead {
return bytes
}
}
}
return bytes
}
func (f *file) addChunk(chunk chunk) (err error) {
if len(chunk.data) == 0 {
return nil
}
f.lock.Lock()
defer f.lock.Unlock()
f.chunks = append(f.chunks, chunk)
f.currSize += int64(len(chunk.data))
sort.Sort(chunkSort(f.chunks))
return nil
}
func (f *file) setErr(err error) {
f.lock.Lock()
defer f.lock.Unlock()
f.err = err
}
func (f *file) setSize(size int64) {
f.lock.Lock()
defer f.lock.Unlock()
f.size = size
}
func (f *file) isReady() bool {
return f.currSize == f.size && f.size > 0
}
//DownloadFile No comment
func DownloadFile(ctx context.Context, urls []string) io.Reader {
fmt.Println("---- NEW DOWNLOADER ----")
maxWorkers := len(urls)
urlCount := maxWorkers
if ctx == nil {
ctx = context.Background()
}
if tmpMaxWorkers, ok := ctx.Value("max-connections").(int); ok {
maxWorkers = tmpMaxWorkers
}
comm := &comm{
jobs: make(chan chunk, urlCount),
res: make(chan chunk, urlCount),
done: make(chan struct{}, 1),
}
// create downloaders
for i := 0; i < maxWorkers; i++ {
go downloadWorker(ctx, comm)
}
f := &file{
chunks: make([]chunk, 0),
lock: &sync.Mutex{},
comm: comm,
urls: urls,
maxWorkers: maxWorkers,
}
go startDownloading(ctx, f)
return f
}
func downloadWorker(ctx context.Context, comm *comm) {
var client http.Client
Breaking:
for {
select {
case chunk := <-comm.jobs:
to := chunk.to
from := chunk.from
chunk.pos = chunk.from
for {
rheader := fmt.Sprintf("bytes=%d-%d", from, to)
req, _ := http.NewRequest("GET", chunk.url, nil)
req.Header.Add("Range", rheader)
resp, rerr := client.Do(req)
if rerr != nil {
chunk.err = rerr
comm.res <- chunk
continue Breaking
}
if resp.StatusCode < 200 || resp.StatusCode > 299 {
resp.Body.Close()
chunk.err = fmt.Errorf("HTTP status %v", resp.StatusCode)
comm.res <- chunk
continue Breaking
}
buf := make([]byte, 1000)
var ioerr error
var read int
for {
read, ioerr = resp.Body.Read(buf)
if read > 0 {
chunk.data = append(chunk.data, buf[:read]...)
chunk.pos = chunk.pos + int64(len(buf[:read]))
}
if ioerr != nil {
break
}
select {
case <-ctx.Done():
resp.Body.Close()
chunk.err = ctx.Err()
chunk.ctxClosed = true
comm.res <- chunk
return
default:
}
}
resp.Body.Close()
if (ioerr == nil || ioerr == io.EOF) && (chunk.pos-1) == chunk.to {
break
}
from = chunk.pos
}
comm.res <- chunk
case <-comm.done:
return
case <-ctx.Done():
chunk := chunk{
data: make([]byte, 0),
}
chunk.err = ctx.Err()
chunk.ctxClosed = true
comm.res <- chunk
return
}
}
//fmt.Println("Exit worker...")
}
func startDownloading(ctx context.Context, f *file) {
// while the file is not ready
defer close(f.comm.done)
contentLen, err := readContentLen(f.urls)
if err != nil {
fmt.Println("Failed to read content len from: ", f.urls)
f.setErr(err)
return
}
f.setSize(contentLen)
currUrls := f.urls
createDownloadJobs(currUrls, f.size, 0, f.comm.jobs)
ctxErrs := make([]error, 0)
for {
select {
case chunk := <-f.comm.res:
if chunk.err != nil {
fmt.Printf(
"Failed to download chunk %v-%v(%v): %v\n",
chunk.from,
chunk.to,
chunk.pos,
chunk.err,
)
f.addChunk(chunk)
if chunk.ctxClosed {
ctxErrs = append(ctxErrs, chunk.err)
} else {
currUrls = discardURL(currUrls, chunk.url)
if len(currUrls) == 0 {
f.setErr(fmt.Errorf("no valid urls"))
return
}
currLen := chunk.to - chunk.pos
currStart := chunk.pos
createDownloadJobs(currUrls, currLen, currStart, f.comm.jobs)
}
} else {
f.addChunk(chunk)
}
if f.isReady() {
return
}
case <-ctx.Done():
if len(ctxErrs) == f.maxWorkers {
f.setErr(ctxErrs[len(ctxErrs)-1])
return
}
}
}
}
func readContentLen(urls []string) (size int64, err error) {
for _, url := range urls {
resp, tmperr := http.Head(url)
if tmperr != nil {
err = fmt.Errorf("[ERROR] HEAD %v: %v", url, tmperr)
continue
}
if resp.StatusCode < 200 || resp.StatusCode > 299 {
err = fmt.Errorf("[ERROR] HEAD `%v`[%v]", url, resp.StatusCode)
continue
}
if resp.ContentLength == -1 || resp.ContentLength == 0 {
err = io.EOF
continue
}
size = resp.ContentLength
err = nil
break
}
return size, err
}
func createDownloadJobs(urls []string, currLen, start int64, jobsChan chan<- chunk) {
ranges := calcRanges(int64(len(urls)), currLen, start)
for idx, url := range urls {
r := ranges[idx]
fmt.Printf("New job for range %v-%v/%v : %v\n", r[0], r[1], currLen, url)
job := chunk{
data: make([]byte, 0),
from: r[0],
to: r[1],
url: url,
}
jobsChan <- job
}
}
func calcRanges(urlCount int64, contentLen, start int64) (ranges [][]int64) {
var i int64
lastByte := contentLen % urlCount
step := contentLen / urlCount
end := start + step
ranges = make([][]int64, urlCount)
if start == 0 {
end--
}
for i = 0; i < urlCount; i++ {
ranges[i] = make([]int64, 2)
if lastByte > 0 {
end++
lastByte--
}
ranges[i][0] = start
ranges[i][1] = end
start = end + 1
end = end + step
}
return ranges
}
func discardURL(urls []string, url string) []string {
for idx, tmpURL := range urls {
if url == tmpURL {
fmt.Printf("Discarding url %s ...\n", url)
return append(urls[:idx], urls[idx+1:]...)
}
}
return nil
}
func main() {
fmt.Println("ahellow")
}

Лог от изпълнението

---- NEW DOWNLOADER ----
Failed to read content len from:  [http://127.0.0.1:48319/pesho]
PASS
ok  	_/tmp/d20170109-30451-uky4cw	0.010s
---- NEW DOWNLOADER ----
New job for range 0-36/37 : http://127.0.0.1:48315/pesho
PASS
ok  	_/tmp/d20170109-30451-uky4cw	0.007s
---- NEW DOWNLOADER ----
New job for range 0-36/37 : http://127.0.0.1:43531/pesho
PASS
ok  	_/tmp/d20170109-30451-uky4cw	0.007s
---- NEW DOWNLOADER ----
New job for range 0-36/37 : http://127.0.0.1:36255/pesho
panic: test timed out after 1s

goroutine 10 [running]:
panic(0x66afe0, 0xc420118210)
	/usr/local/go/src/runtime/panic.go:500 +0x1a1
testing.startAlarm.func1()
	/usr/local/go/src/testing/testing.go:918 +0x10b
created by time.goFunc
	/usr/local/go/src/time/sleep.go:154 +0x44

goroutine 1 [chan receive]:
testing.(*T).Run(0xc42007c0c0, 0x6d9c89, 0x32, 0x6f1db0, 0xc42004bd01)
	/usr/local/go/src/testing/testing.go:647 +0x316
testing.RunTests.func1(0xc42007c0c0)
	/usr/local/go/src/testing/testing.go:793 +0x6d
testing.tRunner(0xc42007c0c0, 0xc42004be20)
	/usr/local/go/src/testing/testing.go:610 +0x81
testing.RunTests(0x6f1fa8, 0x805180, 0x11, 0x11, 0x7fb3ca096000)
	/usr/local/go/src/testing/testing.go:799 +0x2f5
testing.(*M).Run(0xc42004bee8, 0x688ea0)
	/usr/local/go/src/testing/testing.go:743 +0x85
main.main()
	_/tmp/d20170109-30451-uky4cw/_test/_testmain.go:86 +0xc6

goroutine 17 [syscall, locked to thread]:
runtime.goexit()
	/usr/local/go/src/runtime/asm_amd64.s:2086 +0x1

goroutine 6 [runnable]:
_/tmp/d20170109-30451-uky4cw.(*file).Read(0xc420014380, 0xc420016400, 0x200, 0x200, 0x0, 0x0, 0x0)
	/tmp/d20170109-30451-uky4cw/solution.go:46
bytes.(*Buffer).ReadFrom(0xc42003bef8, 0x7e8860, 0xc420014380, 0x1, 0x1, 0x7e8860)
	/usr/local/go/src/bytes/buffer.go:176 +0x155
_/tmp/d20170109-30451-uky4cw.TestSingleURLCancelContextAfterHalfBytesWereServed(0xc42007c180)
	/tmp/d20170109-30451-uky4cw/solution_test.go:193 +0x2e7
testing.tRunner(0xc42007c180, 0x6f1db0)
	/usr/local/go/src/testing/testing.go:610 +0x81
created by testing.(*T).Run
	/usr/local/go/src/testing/testing.go:646 +0x2ec

goroutine 7 [IO wait]:
net.runtime_pollWait(0x7fb3ca03b178, 0x72, 0x0)
	/usr/local/go/src/runtime/netpoll.go:160 +0x59
net.(*pollDesc).wait(0xc420014370, 0x72, 0xc4200285e0, 0xc4200121b0)
	/usr/local/go/src/net/fd_poll_runtime.go:73 +0x38
net.(*pollDesc).waitRead(0xc420014370, 0x7ea8a0, 0xc4200121b0)
	/usr/local/go/src/net/fd_poll_runtime.go:78 +0x34
net.(*netFD).accept(0xc420014310, 0x0, 0x7e94a0, 0xc4200d6240)
	/usr/local/go/src/net/fd_unix.go:419 +0x238
net.(*TCPListener).accept(0xc42002a048, 0x43413e, 0xc420028690, 0x52dabd)
	/usr/local/go/src/net/tcpsock_posix.go:132 +0x2e
net.(*TCPListener).Accept(0xc42002a048, 0x6f2188, 0xc4200e0080, 0x7ed460, 0xc4200dc060)
	/usr/local/go/src/net/tcpsock.go:222 +0x49
net/http.(*Server).Serve(0xc42001a300, 0x7ecba0, 0xc42002a048, 0x0, 0x0)
	/usr/local/go/src/net/http/server.go:2273 +0x1ce
net/http/httptest.(*Server).goServe.func1(0xc42005e4e0)
	/usr/local/go/src/net/http/httptest/server.go:235 +0x6d
created by net/http/httptest.(*Server).goServe
	/usr/local/go/src/net/http/httptest/server.go:236 +0x5c

goroutine 8 [select]:
net/http.(*persistConn).roundTrip(0xc4200f0000, 0xc4200bf8c0, 0x0, 0x0, 0x0)
	/usr/local/go/src/net/http/transport.go:1840 +0x93b
net/http.(*Transport).RoundTrip(0xc4200c2000, 0xc4200c3ef0, 0xc4200c2000, 0x0, 0xc400000000)
	/usr/local/go/src/net/http/transport.go:380 +0x4ee
net/http.send(0xc4200c3ef0, 0x7e8ea0, 0xc4200c2000, 0x0, 0x0, 0x0, 0x8, 0xc420049900, 0xc42002a268)
	/usr/local/go/src/net/http/client.go:256 +0x15f
net/http.(*Client).send(0xc420049bd0, 0xc4200c3ef0, 0x0, 0x0, 0x0, 0xc42002a268, 0x0, 0x1)
	/usr/local/go/src/net/http/client.go:146 +0x102
net/http.(*Client).doFollowingRedirects(0xc420049bd0, 0xc4200c3ef0, 0x6f2320, 0x3, 0x51f601, 0xc420111080)
	/usr/local/go/src/net/http/client.go:528 +0x5e5
net/http.(*Client).Do(0xc420049bd0, 0xc4200c3ef0, 0x5, 0xc420118150, 0xb)
	/usr/local/go/src/net/http/client.go:184 +0x1ea
_/tmp/d20170109-30451-uky4cw.downloadWorker(0x7ed3a0, 0xc420018980, 0xc4200bea00)
	/tmp/d20170109-30451-uky4cw/solution.go:180 +0x364
created by _/tmp/d20170109-30451-uky4cw.DownloadFile
	/tmp/d20170109-30451-uky4cw/solution.go:149 +0x2d1

goroutine 9 [runnable]:
_/tmp/d20170109-30451-uky4cw.startDownloading(0x7ed3a0, 0xc420018980, 0xc420014380)
	/tmp/d20170109-30451-uky4cw/solution.go:265 +0x83f
created by _/tmp/d20170109-30451-uky4cw.DownloadFile
	/tmp/d20170109-30451-uky4cw/solution.go:160 +0x455

goroutine 22 [IO wait]:
net.runtime_pollWait(0x7fb3ca03aff8, 0x72, 0x6)
	/usr/local/go/src/runtime/netpoll.go:160 +0x59
net.(*pollDesc).wait(0xc4200f21b0, 0x72, 0xc4200397b0, 0xc4200121b0)
	/usr/local/go/src/net/fd_poll_runtime.go:73 +0x38
net.(*pollDesc).waitRead(0xc4200f21b0, 0x7ea8a0, 0xc4200121b0)
	/usr/local/go/src/net/fd_poll_runtime.go:78 +0x34
net.(*netFD).Read(0xc4200f2150, 0xc4200f8000, 0x1000, 0x1000, 0x0, 0x7ea8a0, 0xc4200121b0)
	/usr/local/go/src/net/fd_unix.go:243 +0x1a1
net.(*conn).Read(0xc4200e4020, 0xc4200f8000, 0x1000, 0x1000, 0x0, 0x0, 0x0)
	/usr/local/go/src/net/net.go:173 +0x70
net/http.(*connReader).Read(0xc4200d6260, 0xc4200f8000, 0x1000, 0x1000, 0xc420039918, 0x6d2ef7, 0x19)
	/usr/local/go/src/net/http/server.go:586 +0x144
bufio.(*Reader).fill(0xc4200e86c0)
	/usr/local/go/src/bufio/bufio.go:97 +0x10c
bufio.(*Reader).ReadSlice(0xc4200e86c0, 0xa, 0x0, 0x1e, 0xc4200399d8, 0x33, 0x0)
	/usr/local/go/src/bufio/bufio.go:330 +0xb5
bufio.(*Reader).ReadLine(0xc4200e86c0, 0xc42011c000, 0xf0, 0xf0, 0x6c2ea0, 0x4a1b01, 0x1000000807db8)
	/usr/local/go/src/bufio/bufio.go:359 +0x37
net/textproto.(*Reader).readLineSlice(0xc4200dc390, 0xc420039aa8, 0xc420039aa8, 0x410688, 0xf0, 0x6c2ea0)
	/usr/local/go/src/net/textproto/reader.go:55 +0x5e
net/textproto.(*Reader).ReadLine(0xc4200dc390, 0xc42011c000, 0xc420039b20, 0x401863, 0xc420039c78)
	/usr/local/go/src/net/textproto/reader.go:36 +0x2f
net/http.readRequest(0xc4200e86c0, 0xc420039c00, 0xc42011c000, 0x0, 0x0)
	/usr/local/go/src/net/http/request.go:793 +0xa5
net/http.(*conn).readRequest(0xc4200e0080, 0x7ed3a0, 0xc4200d2340, 0x0, 0x0, 0x0)
	/usr/local/go/src/net/http/server.go:765 +0x10d
net/http.(*conn).serve(0xc4200e0080, 0x7ed3a0, 0xc4200d2340)
	/usr/local/go/src/net/http/server.go:1532 +0x3d3
created by net/http.(*Server).Serve
	/usr/local/go/src/net/http/server.go:2293 +0x44d

goroutine 20 [IO wait]:
net.runtime_pollWait(0x7fb3ca03b0b8, 0x72, 0x5)
	/usr/local/go/src/runtime/netpoll.go:160 +0x59
net.(*pollDesc).wait(0xc4200f20d0, 0x72, 0xc4200379d0, 0xc4200121b0)
	/usr/local/go/src/net/fd_poll_runtime.go:73 +0x38
net.(*pollDesc).waitRead(0xc4200f20d0, 0x7ea8a0, 0xc4200121b0)
	/usr/local/go/src/net/fd_poll_runtime.go:78 +0x34
net.(*netFD).Read(0xc4200f2070, 0xc4200f4000, 0x1000, 0x1000, 0x0, 0x7ea8a0, 0xc4200121b0)
	/usr/local/go/src/net/fd_unix.go:243 +0x1a1
net.(*conn).Read(0xc4200e4018, 0xc4200f4000, 0x1000, 0x1000, 0x0, 0x0, 0x0)
	/usr/local/go/src/net/net.go:173 +0x70
net/http.(*persistConn).Read(0xc4200f0000, 0xc4200f4000, 0x1000, 0x1000, 0x30, 0xc420037b58, 0x43b1ec)
	/usr/local/go/src/net/http/transport.go:1261 +0x154
bufio.(*Reader).fill(0xc4200e84e0)
	/usr/local/go/src/bufio/bufio.go:97 +0x10c
bufio.(*Reader).Peek(0xc4200e84e0, 0x1, 0x0, 0x1, 0x0, 0xc42005fda0, 0x0)
	/usr/local/go/src/bufio/bufio.go:129 +0x62
net/http.(*persistConn).readLoop(0xc4200f0000)
	/usr/local/go/src/net/http/transport.go:1418 +0x1a1
created by net/http.(*Transport).dialConn
	/usr/local/go/src/net/http/transport.go:1062 +0x4e9

goroutine 21 [select]:
net/http.(*persistConn).writeLoop(0xc4200f0000)
	/usr/local/go/src/net/http/transport.go:1646 +0x3bd
created by net/http.(*Transport).dialConn
	/usr/local/go/src/net/http/transport.go:1063 +0x50e
exit status 2
FAIL	_/tmp/d20170109-30451-uky4cw	1.014s
---- NEW DOWNLOADER ----
Failed to read content len from:  [http://some.non.existing.domain.at.nowhere/pesho]
PASS
ok  	_/tmp/d20170109-30451-uky4cw	0.010s
---- NEW DOWNLOADER ----
New job for range 0-36/37 : http://127.0.0.1:38692/pesho
Failed to download chunk 0-36(10): HTTP status 500
Discarding url http://127.0.0.1:38692/pesho ...
PASS
ok  	_/tmp/d20170109-30451-uky4cw	0.007s
---- NEW DOWNLOADER ----
New job for range 0-18/37 : http://127.0.0.1:48148/pesho
New job for range 19-36/37 : http://127.0.0.1:48148/pesho2
PASS
ok  	_/tmp/d20170109-30451-uky4cw	0.008s
---- NEW DOWNLOADER ----
New job for range 0-18/37 : http://127.0.0.1:53780/pesho
New job for range 19-36/37 : http://127.0.0.1:53780/pesho2
PASS
ok  	_/tmp/d20170109-30451-uky4cw	0.007s
---- NEW DOWNLOADER ----
New job for range 0-18/37 : http://127.0.0.1:60236/pesho
New job for range 19-36/37 : http://some.non.existing.domain.at.nowhere/pesho
Failed to download chunk 19-36(19): Get http://some.non.existing.domain.at.nowhere/pesho: dial tcp: lookup some.non.existing.domain.at.nowhere on 192.168.1.1:53: no such host
Discarding url http://some.non.existing.domain.at.nowhere/pesho ...
New job for range 19-36/17 : http://127.0.0.1:60236/pesho
PASS
ok  	_/tmp/d20170109-30451-uky4cw	0.016s
---- NEW DOWNLOADER ----
New job for range 0-12/37 : http://some.non.existing.domain.at.nowhere/pesho
New job for range 13-24/37 : http://127.0.0.1:37827/pesho
New job for range 25-36/37 : 
Failed to download chunk 25-36(25): Get : unsupported protocol scheme ""
Discarding url  ...
New job for range 25-31/11 : http://some.non.existing.domain.at.nowhere/pesho
New job for range 32-36/11 : http://127.0.0.1:37827/pesho
Failed to download chunk 0-12(0): Get http://some.non.existing.domain.at.nowhere/pesho: dial tcp: lookup some.non.existing.domain.at.nowhere on 192.168.1.1:53: no such host
Discarding url http://some.non.existing.domain.at.nowhere/pesho ...
New job for range 0-11/12 : http://127.0.0.1:37827/pesho
Failed to download chunk 25-31(25): Get http://some.non.existing.domain.at.nowhere/pesho: dial tcp: lookup some.non.existing.domain.at.nowhere on 192.168.1.1:53: no such host
--- FAIL: TestTwoUrlsWithTheOtherOneBroken (0.01s)
	solution_test.go:481: Expected to read 37 bytes from simple download but got 0
	solution_test.go:485: Expected to get no error, but got 'no valid urls'
	solution_test.go:53: Expected result was '5468697320495320746865206d6f73742065706963206f6620616c6c20726573706f6e7365' but got ''
FAIL
exit status 1
FAIL	_/tmp/d20170109-30451-uky4cw	0.015s
---- NEW DOWNLOADER ----
New job for range 0-18/37 : http://127.0.0.1:36575/pesho
New job for range 19-36/37 : http://127.0.0.1:36575/pesho2
Failed to download chunk 0-18(5): HTTP status 500
Discarding url http://127.0.0.1:36575/pesho ...
New job for range 5-18/13 : http://127.0.0.1:36575/pesho2
PASS
ok  	_/tmp/d20170109-30451-uky4cw	0.011s
---- NEW DOWNLOADER ----
New job for range 0-12/37 : http://127.0.0.1:55737/pesho
New job for range 13-24/37 : http://127.0.0.1:55737/pesho2
New job for range 25-36/37 : http://127.0.0.1:55737/pesho3
Failed to download chunk 0-12(2): HTTP status 500
Discarding url http://127.0.0.1:55737/pesho ...
New job for range 2-7/10 : http://127.0.0.1:55737/pesho2
New job for range 8-12/10 : http://127.0.0.1:55737/pesho3
PASS
ok  	_/tmp/d20170109-30451-uky4cw	0.012s
---- NEW DOWNLOADER ----
New job for range 0-36/37 : http://127.0.0.1:45404/pesho
PASS
ok  	_/tmp/d20170109-30451-uky4cw	0.030s
---- NEW DOWNLOADER ----
New job for range 0-18/37 : http://127.0.0.1:53274/pesho
New job for range 19-36/37 : http://127.0.0.1:53274/pesho2
PASS
ok  	_/tmp/d20170109-30451-uky4cw	0.027s
---- NEW DOWNLOADER ----
New job for range 0-0/500 : http://127.0.0.1:52830/0
New job for range 1-1/500 : http://127.0.0.1:52830/1
New job for range 2-2/500 : http://127.0.0.1:52830/2
New job for range 3-3/500 : http://127.0.0.1:52830/3
New job for range 4-4/500 : http://127.0.0.1:52830/4
New job for range 5-5/500 : http://127.0.0.1:52830/5
New job for range 6-6/500 : http://127.0.0.1:52830/6
New job for range 7-7/500 : http://127.0.0.1:52830/7
New job for range 8-8/500 : http://127.0.0.1:52830/8
New job for range 9-9/500 : http://127.0.0.1:52830/9
New job for range 10-10/500 : http://127.0.0.1:52830/10
New job for range 11-11/500 : http://127.0.0.1:52830/11
New job for range 12-12/500 : http://127.0.0.1:52830/12
New job for range 13-13/500 : http://127.0.0.1:52830/13
New job for range 14-14/500 : http://127.0.0.1:52830/14
New job for range 15-15/500 : http://127.0.0.1:52830/15
New job for range 16-16/500 : http://127.0.0.1:52830/16
New job for range 17-17/500 : http://127.0.0.1:52830/17
New job for range 18-18/500 : http://127.0.0.1:52830/18
New job for range 19-19/500 : http://127.0.0.1:52830/19
New job for range 20-20/500 : http://127.0.0.1:52830/20
New job for range 21-21/500 : http://127.0.0.1:52830/21
New job for range 22-22/500 : http://127.0.0.1:52830/22
New job for range 23-23/500 : http://127.0.0.1:52830/23
New job for range 24-24/500 : http://127.0.0.1:52830/24
New job for range 25-25/500 : http://127.0.0.1:52830/25
New job for range 26-26/500 : http://127.0.0.1:52830/26
New job for range 27-27/500 : http://127.0.0.1:52830/27
New job for range 28-28/500 : http://127.0.0.1:52830/28
New job for range 29-29/500 : http://127.0.0.1:52830/29
New job for range 30-30/500 : http://127.0.0.1:52830/30
New job for range 31-31/500 : http://127.0.0.1:52830/31
New job for range 32-32/500 : http://127.0.0.1:52830/32
New job for range 33-33/500 : http://127.0.0.1:52830/33
New job for range 34-34/500 : http://127.0.0.1:52830/34
New job for range 35-35/500 : http://127.0.0.1:52830/35
New job for range 36-36/500 : http://127.0.0.1:52830/36
New job for range 37-37/500 : http://127.0.0.1:52830/37
New job for range 38-38/500 : http://127.0.0.1:52830/38
New job for range 39-39/500 : http://127.0.0.1:52830/39
New job for range 40-40/500 : http://127.0.0.1:52830/40
New job for range 41-41/500 : http://127.0.0.1:52830/41
New job for range 42-42/500 : http://127.0.0.1:52830/42
New job for range 43-43/500 : http://127.0.0.1:52830/43
New job for range 44-44/500 : http://127.0.0.1:52830/44
New job for range 45-45/500 : http://127.0.0.1:52830/45
New job for range 46-46/500 : http://127.0.0.1:52830/46
New job for range 47-47/500 : http://127.0.0.1:52830/47
New job for range 48-48/500 : http://127.0.0.1:52830/48
New job for range 49-49/500 : http://127.0.0.1:52830/49
New job for range 50-50/500 : http://127.0.0.1:52830/50
New job for range 51-51/500 : http://127.0.0.1:52830/51
New job for range 52-52/500 : http://127.0.0.1:52830/52
New job for range 53-53/500 : http://127.0.0.1:52830/53
New job for range 54-54/500 : http://127.0.0.1:52830/54
New job for range 55-55/500 : http://127.0.0.1:52830/55
New job for range 56-56/500 : http://127.0.0.1:52830/56
New job for range 57-57/500 : http://127.0.0.1:52830/57
New job for range 58-58/500 : http://127.0.0.1:52830/58
New job for range 59-59/500 : http://127.0.0.1:52830/59
New job for range 60-60/500 : http://127.0.0.1:52830/60
New job for range 61-61/500 : http://127.0.0.1:52830/61
New job for range 62-62/500 : http://127.0.0.1:52830/62
New job for range 63-63/500 : http://127.0.0.1:52830/63
New job for range 64-64/500 : http://127.0.0.1:52830/64
New job for range 65-65/500 : http://127.0.0.1:52830/65
New job for range 66-66/500 : http://127.0.0.1:52830/66
New job for range 67-67/500 : http://127.0.0.1:52830/67
New job for range 68-68/500 : http://127.0.0.1:52830/68
New job for range 69-69/500 : http://127.0.0.1:52830/69
New job for range 70-70/500 : http://127.0.0.1:52830/70
New job for range 71-71/500 : http://127.0.0.1:52830/71
New job for range 72-72/500 : http://127.0.0.1:52830/72
New job for range 73-73/500 : http://127.0.0.1:52830/73
New job for range 74-74/500 : http://127.0.0.1:52830/74
New job for range 75-75/500 : http://127.0.0.1:52830/75
New job for range 76-76/500 : http://127.0.0.1:52830/76
New job for range 77-77/500 : http://127.0.0.1:52830/77
New job for range 78-78/500 : http://127.0.0.1:52830/78
New job for range 79-79/500 : http://127.0.0.1:52830/79
New job for range 80-80/500 : http://127.0.0.1:52830/80
New job for range 81-81/500 : http://127.0.0.1:52830/81
New job for range 82-82/500 : http://127.0.0.1:52830/82
New job for range 83-83/500 : http://127.0.0.1:52830/83
New job for range 84-84/500 : http://127.0.0.1:52830/84
New job for range 85-85/500 : http://127.0.0.1:52830/85
New job for range 86-86/500 : http://127.0.0.1:52830/86
New job for range 87-87/500 : http://127.0.0.1:52830/87
New job for range 88-88/500 : http://127.0.0.1:52830/88
New job for range 89-89/500 : http://127.0.0.1:52830/89
New job for range 90-90/500 : http://127.0.0.1:52830/90
New job for range 91-91/500 : http://127.0.0.1:52830/91
New job for range 92-92/500 : http://127.0.0.1:52830/92
New job for range 93-93/500 : http://127.0.0.1:52830/93
New job for range 94-94/500 : http://127.0.0.1:52830/94
New job for range 95-95/500 : http://127.0.0.1:52830/95
New job for range 96-96/500 : http://127.0.0.1:52830/96
New job for range 97-97/500 : http://127.0.0.1:52830/97
New job for range 98-98/500 : http://127.0.0.1:52830/98
New job for range 99-99/500 : http://127.0.0.1:52830/99
New job for range 100-100/500 : http://127.0.0.1:52830/100
New job for range 101-101/500 : http://127.0.0.1:52830/101
New job for range 102-102/500 : http://127.0.0.1:52830/102
New job for range 103-103/500 : http://127.0.0.1:52830/103
New job for range 104-104/500 : http://127.0.0.1:52830/104
New job for range 105-105/500 : http://127.0.0.1:52830/105
New job for range 106-106/500 : http://127.0.0.1:52830/106
New job for range 107-107/500 : http://127.0.0.1:52830/107
New job for range 108-108/500 : http://127.0.0.1:52830/108
New job for range 109-109/500 : http://127.0.0.1:52830/109
New job for range 110-110/500 : http://127.0.0.1:52830/110
New job for range 111-111/500 : http://127.0.0.1:52830/111
New job for range 112-112/500 : http://127.0.0.1:52830/112
New job for range 113-113/500 : http://127.0.0.1:52830/113
New job for range 114-114/500 : http://127.0.0.1:52830/114
New job for range 115-115/500 : http://127.0.0.1:52830/115
New job for range 116-116/500 : http://127.0.0.1:52830/116
New job for range 117-117/500 : http://127.0.0.1:52830/117
New job for range 118-118/500 : http://127.0.0.1:52830/118
New job for range 119-119/500 : http://127.0.0.1:52830/119
New job for range 120-120/500 : http://127.0.0.1:52830/120
New job for range 121-121/500 : http://127.0.0.1:52830/121
New job for range 122-122/500 : http://127.0.0.1:52830/122
New job for range 123-123/500 : http://127.0.0.1:52830/123
New job for range 124-124/500 : http://127.0.0.1:52830/124
New job for range 125-125/500 : http://127.0.0.1:52830/125
New job for range 126-126/500 : http://127.0.0.1:52830/126
New job for range 127-127/500 : http://127.0.0.1:52830/127
New job for range 128-128/500 : http://127.0.0.1:52830/128
New job for range 129-129/500 : http://127.0.0.1:52830/129
New job for range 130-130/500 : http://127.0.0.1:52830/130
New job for range 131-131/500 : http://127.0.0.1:52830/131
New job for range 132-132/500 : http://127.0.0.1:52830/132
New job for range 133-133/500 : http://127.0.0.1:52830/133
New job for range 134-134/500 : http://127.0.0.1:52830/134
New job for range 135-135/500 : http://127.0.0.1:52830/135
New job for range 136-136/500 : http://127.0.0.1:52830/136
New job for range 137-137/500 : http://127.0.0.1:52830/137
New job for range 138-138/500 : http://127.0.0.1:52830/138
New job for range 139-139/500 : http://127.0.0.1:52830/139
New job for range 140-140/500 : http://127.0.0.1:52830/140
New job for range 141-141/500 : http://127.0.0.1:52830/141
New job for range 142-142/500 : http://127.0.0.1:52830/142
New job for range 143-143/500 : http://127.0.0.1:52830/143
New job for range 144-144/500 : http://127.0.0.1:52830/144
New job for range 145-145/500 : http://127.0.0.1:52830/145
New job for range 146-146/500 : http://127.0.0.1:52830/146
New job for range 147-147/500 : http://127.0.0.1:52830/147
New job for range 148-148/500 : http://127.0.0.1:52830/148
New job for range 149-149/500 : http://127.0.0.1:52830/149
New job for range 150-150/500 : http://127.0.0.1:52830/150
New job for range 151-151/500 : http://127.0.0.1:52830/151
New job for range 152-152/500 : http://127.0.0.1:52830/152
New job for range 153-153/500 : http://127.0.0.1:52830/153
New job for range 154-154/500 : http://127.0.0.1:52830/154
New job for range 155-155/500 : http://127.0.0.1:52830/155
New job for range 156-156/500 : http://127.0.0.1:52830/156
New job for range 157-157/500 : http://127.0.0.1:52830/157
New job for range 158-158/500 : http://127.0.0.1:52830/158
New job for range 159-159/500 : http://127.0.0.1:52830/159
New job for range 160-160/500 : http://127.0.0.1:52830/160
New job for range 161-161/500 : http://127.0.0.1:52830/161
New job for range 162-162/500 : http://127.0.0.1:52830/162
New job for range 163-163/500 : http://127.0.0.1:52830/163
New job for range 164-164/500 : http://127.0.0.1:52830/164
New job for range 165-165/500 : http://127.0.0.1:52830/165
New job for range 166-166/500 : http://127.0.0.1:52830/166
New job for range 167-167/500 : http://127.0.0.1:52830/167
New job for range 168-168/500 : http://127.0.0.1:52830/168
New job for range 169-169/500 : http://127.0.0.1:52830/169
New job for range 170-170/500 : http://127.0.0.1:52830/170
New job for range 171-171/500 : http://127.0.0.1:52830/171
New job for range 172-172/500 : http://127.0.0.1:52830/172
New job for range 173-173/500 : http://127.0.0.1:52830/173
New job for range 174-174/500 : http://127.0.0.1:52830/174
New job for range 175-175/500 : http://127.0.0.1:52830/175
New job for range 176-176/500 : http://127.0.0.1:52830/176
New job for range 177-177/500 : http://127.0.0.1:52830/177
New job for range 178-178/500 : http://127.0.0.1:52830/178
New job for range 179-179/500 : http://127.0.0.1:52830/179
New job for range 180-180/500 : http://127.0.0.1:52830/180
New job for range 181-181/500 : http://127.0.0.1:52830/181
New job for range 182-182/500 : http://127.0.0.1:52830/182
New job for range 183-183/500 : http://127.0.0.1:52830/183
New job for range 184-184/500 : http://127.0.0.1:52830/184
New job for range 185-185/500 : http://127.0.0.1:52830/185
New job for range 186-186/500 : http://127.0.0.1:52830/186
New job for range 187-187/500 : http://127.0.0.1:52830/187
New job for range 188-188/500 : http://127.0.0.1:52830/188
New job for range 189-189/500 : http://127.0.0.1:52830/189
New job for range 190-190/500 : http://127.0.0.1:52830/190
New job for range 191-191/500 : http://127.0.0.1:52830/191
New job for range 192-192/500 : http://127.0.0.1:52830/192
New job for range 193-193/500 : http://127.0.0.1:52830/193
New job for range 194-194/500 : http://127.0.0.1:52830/194
New job for range 195-195/500 : http://127.0.0.1:52830/195
New job for range 196-196/500 : http://127.0.0.1:52830/196
New job for range 197-197/500 : http://127.0.0.1:52830/197
New job for range 198-198/500 : http://127.0.0.1:52830/198
New job for range 199-199/500 : http://127.0.0.1:52830/199
New job for range 200-200/500 : http://127.0.0.1:52830/200
New job for range 201-201/500 : http://127.0.0.1:52830/201
New job for range 202-202/500 : http://127.0.0.1:52830/202
New job for range 203-203/500 : http://127.0.0.1:52830/203
New job for range 204-204/500 : http://127.0.0.1:52830/204
New job for range 205-205/500 : http://127.0.0.1:52830/205
New job for range 206-206/500 : http://127.0.0.1:52830/206
New job for range 207-207/500 : http://127.0.0.1:52830/207
New job for range 208-208/500 : http://127.0.0.1:52830/208
New job for range 209-209/500 : http://127.0.0.1:52830/209
New job for range 210-210/500 : http://127.0.0.1:52830/210
New job for range 211-211/500 : http://127.0.0.1:52830/211
New job for range 212-212/500 : http://127.0.0.1:52830/212
New job for range 213-213/500 : http://127.0.0.1:52830/213
New job for range 214-214/500 : http://127.0.0.1:52830/214
New job for range 215-215/500 : http://127.0.0.1:52830/215
New job for range 216-216/500 : http://127.0.0.1:52830/216
New job for range 217-217/500 : http://127.0.0.1:52830/217
New job for range 218-218/500 : http://127.0.0.1:52830/218
New job for range 219-219/500 : http://127.0.0.1:52830/219
New job for range 220-220/500 : http://127.0.0.1:52830/220
New job for range 221-221/500 : http://127.0.0.1:52830/221
New job for range 222-222/500 : http://127.0.0.1:52830/222
New job for range 223-223/500 : http://127.0.0.1:52830/223
New job for range 224-224/500 : http://127.0.0.1:52830/224
New job for range 225-225/500 : http://127.0.0.1:52830/225
New job for range 226-226/500 : http://127.0.0.1:52830/226
New job for range 227-227/500 : http://127.0.0.1:52830/227
New job for range 228-228/500 : http://127.0.0.1:52830/228
New job for range 229-229/500 : http://127.0.0.1:52830/229
New job for range 230-230/500 : http://127.0.0.1:52830/230
New job for range 231-231/500 : http://127.0.0.1:52830/231
New job for range 232-232/500 : http://127.0.0.1:52830/232
New job for range 233-233/500 : http://127.0.0.1:52830/233
New job for range 234-234/500 : http://127.0.0.1:52830/234
New job for range 235-235/500 : http://127.0.0.1:52830/235
New job for range 236-236/500 : http://127.0.0.1:52830/236
New job for range 237-237/500 : http://127.0.0.1:52830/237
New job for range 238-238/500 : http://127.0.0.1:52830/238
New job for range 239-239/500 : http://127.0.0.1:52830/239
New job for range 240-240/500 : http://127.0.0.1:52830/240
New job for range 241-241/500 : http://127.0.0.1:52830/241
New job for range 242-242/500 : http://127.0.0.1:52830/242
New job for range 243-243/500 : http://127.0.0.1:52830/243
New job for range 244-244/500 : http://127.0.0.1:52830/244
New job for range 245-245/500 : http://127.0.0.1:52830/245
New job for range 246-246/500 : http://127.0.0.1:52830/246
New job for range 247-247/500 : http://127.0.0.1:52830/247
New job for range 248-248/500 : http://127.0.0.1:52830/248
New job for range 249-249/500 : http://127.0.0.1:52830/249
New job for range 250-250/500 : http://127.0.0.1:52830/250
New job for range 251-251/500 : http://127.0.0.1:52830/251
New job for range 252-252/500 : http://127.0.0.1:52830/252
New job for range 253-253/500 : http://127.0.0.1:52830/253
New job for range 254-254/500 : http://127.0.0.1:52830/254
New job for range 255-255/500 : http://127.0.0.1:52830/255
New job for range 256-256/500 : http://127.0.0.1:52830/256
New job for range 257-257/500 : http://127.0.0.1:52830/257
New job for range 258-258/500 : http://127.0.0.1:52830/258
New job for range 259-259/500 : http://127.0.0.1:52830/259
New job for range 260-260/500 : http://127.0.0.1:52830/260
New job for range 261-261/500 : http://127.0.0.1:52830/261
New job for range 262-262/500 : http://127.0.0.1:52830/262
New job for range 263-263/500 : http://127.0.0.1:52830/263
New job for range 264-264/500 : http://127.0.0.1:52830/264
New job for range 265-265/500 : http://127.0.0.1:52830/265
New job for range 266-266/500 : http://127.0.0.1:52830/266
New job for range 267-267/500 : http://127.0.0.1:52830/267
New job for range 268-268/500 : http://127.0.0.1:52830/268
New job for range 269-269/500 : http://127.0.0.1:52830/269
New job for range 270-270/500 : http://127.0.0.1:52830/270
New job for range 271-271/500 : http://127.0.0.1:52830/271
New job for range 272-272/500 : http://127.0.0.1:52830/272
New job for range 273-273/500 : http://127.0.0.1:52830/273
New job for range 274-274/500 : http://127.0.0.1:52830/274
New job for range 275-275/500 : http://127.0.0.1:52830/275
New job for range 276-276/500 : http://127.0.0.1:52830/276
New job for range 277-277/500 : http://127.0.0.1:52830/277
New job for range 278-278/500 : http://127.0.0.1:52830/278
New job for range 279-279/500 : http://127.0.0.1:52830/279
New job for range 280-280/500 : http://127.0.0.1:52830/280
New job for range 281-281/500 : http://127.0.0.1:52830/281
New job for range 282-282/500 : http://127.0.0.1:52830/282
New job for range 283-283/500 : http://127.0.0.1:52830/283
New job for range 284-284/500 : http://127.0.0.1:52830/284
New job for range 285-285/500 : http://127.0.0.1:52830/285
New job for range 286-286/500 : http://127.0.0.1:52830/286
New job for range 287-287/500 : http://127.0.0.1:52830/287
New job for range 288-288/500 : http://127.0.0.1:52830/288
New job for range 289-289/500 : http://127.0.0.1:52830/289
New job for range 290-290/500 : http://127.0.0.1:52830/290
New job for range 291-291/500 : http://127.0.0.1:52830/291
New job for range 292-292/500 : http://127.0.0.1:52830/292
New job for range 293-293/500 : http://127.0.0.1:52830/293
New job for range 294-294/500 : http://127.0.0.1:52830/294
New job for range 295-295/500 : http://127.0.0.1:52830/295
New job for range 296-296/500 : http://127.0.0.1:52830/296
New job for range 297-297/500 : http://127.0.0.1:52830/297
New job for range 298-298/500 : http://127.0.0.1:52830/298
New job for range 299-299/500 : http://127.0.0.1:52830/299
New job for range 300-300/500 : http://127.0.0.1:52830/300
New job for range 301-301/500 : http://127.0.0.1:52830/301
New job for range 302-302/500 : http://127.0.0.1:52830/302
New job for range 303-303/500 : http://127.0.0.1:52830/303
New job for range 304-304/500 : http://127.0.0.1:52830/304
New job for range 305-305/500 : http://127.0.0.1:52830/305
New job for range 306-306/500 : http://127.0.0.1:52830/306
New job for range 307-307/500 : http://127.0.0.1:52830/307
New job for range 308-308/500 : http://127.0.0.1:52830/308
New job for range 309-309/500 : http://127.0.0.1:52830/309
New job for range 310-310/500 : http://127.0.0.1:52830/310
New job for range 311-311/500 : http://127.0.0.1:52830/311
New job for range 312-312/500 : http://127.0.0.1:52830/312
New job for range 313-313/500 : http://127.0.0.1:52830/313
New job for range 314-314/500 : http://127.0.0.1:52830/314
New job for range 315-315/500 : http://127.0.0.1:52830/315
New job for range 316-316/500 : http://127.0.0.1:52830/316
New job for range 317-317/500 : http://127.0.0.1:52830/317
New job for range 318-318/500 : http://127.0.0.1:52830/318
New job for range 319-319/500 : http://127.0.0.1:52830/319
New job for range 320-320/500 : http://127.0.0.1:52830/320
New job for range 321-321/500 : http://127.0.0.1:52830/321
New job for range 322-322/500 : http://127.0.0.1:52830/322
New job for range 323-323/500 : http://127.0.0.1:52830/323
New job for range 324-324/500 : http://127.0.0.1:52830/324
New job for range 325-325/500 : http://127.0.0.1:52830/325
New job for range 326-326/500 : http://127.0.0.1:52830/326
New job for range 327-327/500 : http://127.0.0.1:52830/327
New job for range 328-328/500 : http://127.0.0.1:52830/328
New job for range 329-329/500 : http://127.0.0.1:52830/329
New job for range 330-330/500 : http://127.0.0.1:52830/330
New job for range 331-331/500 : http://127.0.0.1:52830/331
New job for range 332-332/500 : http://127.0.0.1:52830/332
New job for range 333-333/500 : http://127.0.0.1:52830/333
New job for range 334-334/500 : http://127.0.0.1:52830/334
New job for range 335-335/500 : http://127.0.0.1:52830/335
New job for range 336-336/500 : http://127.0.0.1:52830/336
New job for range 337-337/500 : http://127.0.0.1:52830/337
New job for range 338-338/500 : http://127.0.0.1:52830/338
New job for range 339-339/500 : http://127.0.0.1:52830/339
New job for range 340-340/500 : http://127.0.0.1:52830/340
New job for range 341-341/500 : http://127.0.0.1:52830/341
New job for range 342-342/500 : http://127.0.0.1:52830/342
New job for range 343-343/500 : http://127.0.0.1:52830/343
New job for range 344-344/500 : http://127.0.0.1:52830/344
New job for range 345-345/500 : http://127.0.0.1:52830/345
New job for range 346-346/500 : http://127.0.0.1:52830/346
New job for range 347-347/500 : http://127.0.0.1:52830/347
New job for range 348-348/500 : http://127.0.0.1:52830/348
New job for range 349-349/500 : http://127.0.0.1:52830/349
New job for range 350-350/500 : http://127.0.0.1:52830/350
New job for range 351-351/500 : http://127.0.0.1:52830/351
New job for range 352-352/500 : http://127.0.0.1:52830/352
New job for range 353-353/500 : http://127.0.0.1:52830/353
New job for range 354-354/500 : http://127.0.0.1:52830/354
New job for range 355-355/500 : http://127.0.0.1:52830/355
New job for range 356-356/500 : http://127.0.0.1:52830/356
New job for range 357-357/500 : http://127.0.0.1:52830/357
New job for range 358-358/500 : http://127.0.0.1:52830/358
New job for range 359-359/500 : http://127.0.0.1:52830/359
New job for range 360-360/500 : http://127.0.0.1:52830/360
New job for range 361-361/500 : http://127.0.0.1:52830/361
New job for range 362-362/500 : http://127.0.0.1:52830/362
New job for range 363-363/500 : http://127.0.0.1:52830/363
New job for range 364-364/500 : http://127.0.0.1:52830/364
New job for range 365-365/500 : http://127.0.0.1:52830/365
New job for range 366-366/500 : http://127.0.0.1:52830/366
New job for range 367-367/500 : http://127.0.0.1:52830/367
New job for range 368-368/500 : http://127.0.0.1:52830/368
New job for range 369-369/500 : http://127.0.0.1:52830/369
New job for range 370-370/500 : http://127.0.0.1:52830/370
New job for range 371-371/500 : http://127.0.0.1:52830/371
New job for range 372-372/500 : http://127.0.0.1:52830/372
New job for range 373-373/500 : http://127.0.0.1:52830/373
New job for range 374-374/500 : http://127.0.0.1:52830/374
New job for range 375-375/500 : http://127.0.0.1:52830/375
New job for range 376-376/500 : http://127.0.0.1:52830/376
New job for range 377-377/500 : http://127.0.0.1:52830/377
New job for range 378-378/500 : http://127.0.0.1:52830/378
New job for range 379-379/500 : http://127.0.0.1:52830/379
New job for range 380-380/500 : http://127.0.0.1:52830/380
New job for range 381-381/500 : http://127.0.0.1:52830/381
New job for range 382-382/500 : http://127.0.0.1:52830/382
New job for range 383-383/500 : http://127.0.0.1:52830/383
New job for range 384-384/500 : http://127.0.0.1:52830/384
New job for range 385-385/500 : http://127.0.0.1:52830/385
New job for range 386-386/500 : http://127.0.0.1:52830/386
New job for range 387-387/500 : http://127.0.0.1:52830/387
New job for range 388-388/500 : http://127.0.0.1:52830/388
New job for range 389-389/500 : http://127.0.0.1:52830/389
New job for range 390-390/500 : http://127.0.0.1:52830/390
New job for range 391-391/500 : http://127.0.0.1:52830/391
New job for range 392-392/500 : http://127.0.0.1:52830/392
New job for range 393-393/500 : http://127.0.0.1:52830/393
New job for range 394-394/500 : http://127.0.0.1:52830/394
New job for range 395-395/500 : http://127.0.0.1:52830/395
New job for range 396-396/500 : http://127.0.0.1:52830/396
New job for range 397-397/500 : http://127.0.0.1:52830/397
New job for range 398-398/500 : http://127.0.0.1:52830/398
New job for range 399-399/500 : http://127.0.0.1:52830/399
New job for range 400-400/500 : http://127.0.0.1:52830/400
New job for range 401-401/500 : http://127.0.0.1:52830/401
New job for range 402-402/500 : http://127.0.0.1:52830/402
New job for range 403-403/500 : http://127.0.0.1:52830/403
New job for range 404-404/500 : http://127.0.0.1:52830/404
New job for range 405-405/500 : http://127.0.0.1:52830/405
New job for range 406-406/500 : http://127.0.0.1:52830/406
New job for range 407-407/500 : http://127.0.0.1:52830/407
New job for range 408-408/500 : http://127.0.0.1:52830/408
New job for range 409-409/500 : http://127.0.0.1:52830/409
New job for range 410-410/500 : http://127.0.0.1:52830/410
New job for range 411-411/500 : http://127.0.0.1:52830/411
New job for range 412-412/500 : http://127.0.0.1:52830/412
New job for range 413-413/500 : http://127.0.0.1:52830/413
New job for range 414-414/500 : http://127.0.0.1:52830/414
New job for range 415-415/500 : http://127.0.0.1:52830/415
New job for range 416-416/500 : http://127.0.0.1:52830/416
New job for range 417-417/500 : http://127.0.0.1:52830/417
New job for range 418-418/500 : http://127.0.0.1:52830/418
New job for range 419-419/500 : http://127.0.0.1:52830/419
New job for range 420-420/500 : http://127.0.0.1:52830/420
New job for range 421-421/500 : http://127.0.0.1:52830/421
New job for range 422-422/500 : http://127.0.0.1:52830/422
New job for range 423-423/500 : http://127.0.0.1:52830/423
New job for range 424-424/500 : http://127.0.0.1:52830/424
New job for range 425-425/500 : http://127.0.0.1:52830/425
New job for range 426-426/500 : http://127.0.0.1:52830/426
New job for range 427-427/500 : http://127.0.0.1:52830/427
New job for range 428-428/500 : http://127.0.0.1:52830/428
New job for range 429-429/500 : http://127.0.0.1:52830/429
New job for range 430-430/500 : http://127.0.0.1:52830/430
New job for range 431-431/500 : http://127.0.0.1:52830/431
New job for range 432-432/500 : http://127.0.0.1:52830/432
New job for range 433-433/500 : http://127.0.0.1:52830/433
New job for range 434-434/500 : http://127.0.0.1:52830/434
New job for range 435-435/500 : http://127.0.0.1:52830/435
New job for range 436-436/500 : http://127.0.0.1:52830/436
New job for range 437-437/500 : http://127.0.0.1:52830/437
New job for range 438-438/500 : http://127.0.0.1:52830/438
New job for range 439-439/500 : http://127.0.0.1:52830/439
New job for range 440-440/500 : http://127.0.0.1:52830/440
New job for range 441-441/500 : http://127.0.0.1:52830/441
New job for range 442-442/500 : http://127.0.0.1:52830/442
New job for range 443-443/500 : http://127.0.0.1:52830/443
New job for range 444-444/500 : http://127.0.0.1:52830/444
New job for range 445-445/500 : http://127.0.0.1:52830/445
New job for range 446-446/500 : http://127.0.0.1:52830/446
New job for range 447-447/500 : http://127.0.0.1:52830/447
New job for range 448-448/500 : http://127.0.0.1:52830/448
New job for range 449-449/500 : http://127.0.0.1:52830/449
New job for range 450-450/500 : http://127.0.0.1:52830/450
New job for range 451-451/500 : http://127.0.0.1:52830/451
New job for range 452-452/500 : http://127.0.0.1:52830/452
New job for range 453-453/500 : http://127.0.0.1:52830/453
New job for range 454-454/500 : http://127.0.0.1:52830/454
New job for range 455-455/500 : http://127.0.0.1:52830/455
New job for range 456-456/500 : http://127.0.0.1:52830/456
New job for range 457-457/500 : http://127.0.0.1:52830/457
New job for range 458-458/500 : http://127.0.0.1:52830/458
New job for range 459-459/500 : http://127.0.0.1:52830/459
New job for range 460-460/500 : http://127.0.0.1:52830/460
New job for range 461-461/500 : http://127.0.0.1:52830/461
New job for range 462-462/500 : http://127.0.0.1:52830/462
New job for range 463-463/500 : http://127.0.0.1:52830/463
New job for range 464-464/500 : http://127.0.0.1:52830/464
New job for range 465-465/500 : http://127.0.0.1:52830/465
New job for range 466-466/500 : http://127.0.0.1:52830/466
New job for range 467-467/500 : http://127.0.0.1:52830/467
New job for range 468-468/500 : http://127.0.0.1:52830/468
New job for range 469-469/500 : http://127.0.0.1:52830/469
New job for range 470-470/500 : http://127.0.0.1:52830/470
New job for range 471-471/500 : http://127.0.0.1:52830/471
New job for range 472-472/500 : http://127.0.0.1:52830/472
New job for range 473-473/500 : http://127.0.0.1:52830/473
New job for range 474-474/500 : http://127.0.0.1:52830/474
New job for range 475-475/500 : http://127.0.0.1:52830/475
New job for range 476-476/500 : http://127.0.0.1:52830/476
New job for range 477-477/500 : http://127.0.0.1:52830/477
New job for range 478-478/500 : http://127.0.0.1:52830/478
New job for range 479-479/500 : http://127.0.0.1:52830/479
New job for range 480-480/500 : http://127.0.0.1:52830/480
New job for range 481-481/500 : http://127.0.0.1:52830/481
New job for range 482-482/500 : http://127.0.0.1:52830/482
New job for range 483-483/500 : http://127.0.0.1:52830/483
New job for range 484-484/500 : http://127.0.0.1:52830/484
New job for range 485-485/500 : http://127.0.0.1:52830/485
New job for range 486-486/500 : http://127.0.0.1:52830/486
New job for range 487-487/500 : http://127.0.0.1:52830/487
New job for range 488-488/500 : http://127.0.0.1:52830/488
New job for range 489-489/500 : http://127.0.0.1:52830/489
New job for range 490-490/500 : http://127.0.0.1:52830/490
New job for range 491-491/500 : http://127.0.0.1:52830/491
New job for range 492-492/500 : http://127.0.0.1:52830/492
New job for range 493-493/500 : http://127.0.0.1:52830/493
New job for range 494-494/500 : http://127.0.0.1:52830/494
New job for range 495-495/500 : http://127.0.0.1:52830/495
New job for range 496-496/500 : http://127.0.0.1:52830/496
New job for range 497-497/500 : http://127.0.0.1:52830/497
New job for range 498-498/500 : http://127.0.0.1:52830/498
New job for range 499-499/500 : http://127.0.0.1:52830/499
PASS
ok  	_/tmp/d20170109-30451-uky4cw	0.228s
---- NEW DOWNLOADER ----
New job for range 0-0/100 : http://127.0.0.1:36597/0
New job for range 1-1/100 : http://127.0.0.1:36597/1
New job for range 2-2/100 : http://127.0.0.1:36597/2
New job for range 3-3/100 : http://127.0.0.1:36597/3
New job for range 4-4/100 : http://127.0.0.1:36597/4
New job for range 5-5/100 : http://127.0.0.1:36597/5
New job for range 6-6/100 : http://127.0.0.1:36597/6
New job for range 7-7/100 : http://127.0.0.1:36597/7
New job for range 8-8/100 : http://127.0.0.1:36597/8
New job for range 9-9/100 : http://127.0.0.1:36597/9
New job for range 10-10/100 : http://127.0.0.1:36597/10
New job for range 11-11/100 : http://127.0.0.1:36597/11
New job for range 12-12/100 : http://127.0.0.1:36597/12
New job for range 13-13/100 : http://127.0.0.1:36597/13
New job for range 14-14/100 : http://127.0.0.1:36597/14
New job for range 15-15/100 : http://127.0.0.1:36597/15
New job for range 16-16/100 : http://127.0.0.1:36597/16
New job for range 17-17/100 : http://127.0.0.1:36597/17
New job for range 18-18/100 : http://127.0.0.1:36597/18
New job for range 19-19/100 : http://127.0.0.1:36597/19
New job for range 20-20/100 : http://127.0.0.1:36597/20
New job for range 21-21/100 : http://127.0.0.1:36597/21
New job for range 22-22/100 : http://127.0.0.1:36597/22
New job for range 23-23/100 : http://127.0.0.1:36597/23
New job for range 24-24/100 : http://127.0.0.1:36597/24
New job for range 25-25/100 : http://127.0.0.1:36597/25
New job for range 26-26/100 : http://127.0.0.1:36597/26
New job for range 27-27/100 : http://127.0.0.1:36597/27
New job for range 28-28/100 : http://127.0.0.1:36597/28
New job for range 29-29/100 : http://127.0.0.1:36597/29
New job for range 30-30/100 : http://127.0.0.1:36597/30
New job for range 31-31/100 : http://127.0.0.1:36597/31
New job for range 32-32/100 : http://127.0.0.1:36597/32
New job for range 33-33/100 : http://127.0.0.1:36597/33
New job for range 34-34/100 : http://127.0.0.1:36597/34
New job for range 35-35/100 : http://127.0.0.1:36597/35
New job for range 36-36/100 : http://127.0.0.1:36597/36
New job for range 37-37/100 : http://127.0.0.1:36597/37
New job for range 38-38/100 : http://127.0.0.1:36597/38
New job for range 39-39/100 : http://127.0.0.1:36597/39
New job for range 40-40/100 : http://127.0.0.1:36597/40
New job for range 41-41/100 : http://127.0.0.1:36597/41
New job for range 42-42/100 : http://127.0.0.1:36597/42
New job for range 43-43/100 : http://127.0.0.1:36597/43
New job for range 44-44/100 : http://127.0.0.1:36597/44
New job for range 45-45/100 : http://127.0.0.1:36597/45
New job for range 46-46/100 : http://127.0.0.1:36597/46
New job for range 47-47/100 : http://127.0.0.1:36597/47
New job for range 48-48/100 : http://127.0.0.1:36597/48
New job for range 49-49/100 : http://127.0.0.1:36597/49
New job for range 50-50/100 : http://127.0.0.1:36597/50
New job for range 51-51/100 : http://127.0.0.1:36597/51
New job for range 52-52/100 : http://127.0.0.1:36597/52
New job for range 53-53/100 : http://127.0.0.1:36597/53
New job for range 54-54/100 : http://127.0.0.1:36597/54
New job for range 55-55/100 : http://127.0.0.1:36597/55
New job for range 56-56/100 : http://127.0.0.1:36597/56
New job for range 57-57/100 : http://127.0.0.1:36597/57
New job for range 58-58/100 : http://127.0.0.1:36597/58
New job for range 59-59/100 : http://127.0.0.1:36597/59
New job for range 60-60/100 : http://127.0.0.1:36597/60
New job for range 61-61/100 : http://127.0.0.1:36597/61
New job for range 62-62/100 : http://127.0.0.1:36597/62
New job for range 63-63/100 : http://127.0.0.1:36597/63
New job for range 64-64/100 : http://127.0.0.1:36597/64
New job for range 65-65/100 : http://127.0.0.1:36597/65
New job for range 66-66/100 : http://127.0.0.1:36597/66
New job for range 67-67/100 : http://127.0.0.1:36597/67
New job for range 68-68/100 : http://127.0.0.1:36597/68
New job for range 69-69/100 : http://127.0.0.1:36597/69
New job for range 70-70/100 : http://127.0.0.1:36597/70
New job for range 71-71/100 : http://127.0.0.1:36597/71
New job for range 72-72/100 : http://127.0.0.1:36597/72
New job for range 73-73/100 : http://127.0.0.1:36597/73
New job for range 74-74/100 : http://127.0.0.1:36597/74
New job for range 75-75/100 : http://127.0.0.1:36597/75
New job for range 76-76/100 : http://127.0.0.1:36597/76
New job for range 77-77/100 : http://127.0.0.1:36597/77
New job for range 78-78/100 : http://127.0.0.1:36597/78
New job for range 79-79/100 : http://127.0.0.1:36597/79
New job for range 80-80/100 : http://127.0.0.1:36597/80
New job for range 81-81/100 : http://127.0.0.1:36597/81
New job for range 82-82/100 : http://127.0.0.1:36597/82
New job for range 83-83/100 : http://127.0.0.1:36597/83
New job for range 84-84/100 : http://127.0.0.1:36597/84
New job for range 85-85/100 : http://127.0.0.1:36597/85
New job for range 86-86/100 : http://127.0.0.1:36597/86
New job for range 87-87/100 : http://127.0.0.1:36597/87
New job for range 88-88/100 : http://127.0.0.1:36597/88
New job for range 89-89/100 : http://127.0.0.1:36597/89
New job for range 90-90/100 : http://127.0.0.1:36597/90
New job for range 91-91/100 : http://127.0.0.1:36597/91
New job for range 92-92/100 : http://127.0.0.1:36597/92
New job for range 93-93/100 : http://127.0.0.1:36597/93
New job for range 94-94/100 : http://127.0.0.1:36597/94
New job for range 95-95/100 : http://127.0.0.1:36597/95
New job for range 96-96/100 : http://127.0.0.1:36597/96
New job for range 97-97/100 : http://127.0.0.1:36597/97
New job for range 98-98/100 : http://127.0.0.1:36597/98
New job for range 99-99/100 : http://127.0.0.1:36597/99
PASS
ok  	_/tmp/d20170109-30451-uky4cw	0.094s
---- NEW DOWNLOADER ----
New job for range 0-0/50 : http://127.0.0.1:34085/0
New job for range 1-1/50 : http://127.0.0.1:34085/1
New job for range 2-2/50 : http://127.0.0.1:34085/2
New job for range 3-3/50 : http://127.0.0.1:34085/3
New job for range 4-4/50 : http://127.0.0.1:34085/4
New job for range 5-5/50 : http://127.0.0.1:34085/5
New job for range 6-6/50 : http://127.0.0.1:34085/6
New job for range 7-7/50 : http://127.0.0.1:34085/7
New job for range 8-8/50 : http://127.0.0.1:34085/8
New job for range 9-9/50 : http://127.0.0.1:34085/9
New job for range 10-10/50 : http://127.0.0.1:34085/10
New job for range 11-11/50 : http://127.0.0.1:34085/11
New job for range 12-12/50 : http://127.0.0.1:34085/12
New job for range 13-13/50 : http://127.0.0.1:34085/13
New job for range 14-14/50 : http://127.0.0.1:34085/14
New job for range 15-15/50 : http://127.0.0.1:34085/15
New job for range 16-16/50 : http://127.0.0.1:34085/16
New job for range 17-17/50 : http://127.0.0.1:34085/17
New job for range 18-18/50 : http://127.0.0.1:34085/18
New job for range 19-19/50 : http://127.0.0.1:34085/19
New job for range 20-20/50 : http://127.0.0.1:34085/20
New job for range 21-21/50 : http://127.0.0.1:34085/21
New job for range 22-22/50 : http://127.0.0.1:34085/22
New job for range 23-23/50 : http://127.0.0.1:34085/23
New job for range 24-24/50 : http://127.0.0.1:34085/24
New job for range 25-25/50 : http://127.0.0.1:34085/25
New job for range 26-26/50 : http://127.0.0.1:34085/26
New job for range 27-27/50 : http://127.0.0.1:34085/27
New job for range 28-28/50 : http://127.0.0.1:34085/28
New job for range 29-29/50 : http://127.0.0.1:34085/29
New job for range 30-30/50 : http://127.0.0.1:34085/30
New job for range 31-31/50 : http://127.0.0.1:34085/31
New job for range 32-32/50 : http://127.0.0.1:34085/32
New job for range 33-33/50 : http://127.0.0.1:34085/33
New job for range 34-34/50 : http://127.0.0.1:34085/34
New job for range 35-35/50 : http://127.0.0.1:34085/35
New job for range 36-36/50 : http://127.0.0.1:34085/36
New job for range 37-37/50 : http://127.0.0.1:34085/37
New job for range 38-38/50 : http://127.0.0.1:34085/38
New job for range 39-39/50 : http://127.0.0.1:34085/39
New job for range 40-40/50 : http://127.0.0.1:34085/40
New job for range 41-41/50 : http://127.0.0.1:34085/41
New job for range 42-42/50 : http://127.0.0.1:34085/42
New job for range 43-43/50 : http://127.0.0.1:34085/43
New job for range 44-44/50 : http://127.0.0.1:34085/44
New job for range 45-45/50 : http://127.0.0.1:34085/45
New job for range 46-46/50 : http://127.0.0.1:34085/46
New job for range 47-47/50 : http://127.0.0.1:34085/47
New job for range 48-48/50 : http://127.0.0.1:34085/48
New job for range 49-49/50 : http://127.0.0.1:34085/49
Failed to download chunk 11-11(11): HTTP status 500
Discarding url http://127.0.0.1:34085/11 ...
New job for range 11-11/0 : http://127.0.0.1:34085/0
New job for range 12-11/0 : http://127.0.0.1:34085/1
New job for range 12-11/0 : http://127.0.0.1:34085/2
New job for range 12-11/0 : http://127.0.0.1:34085/3
New job for range 12-11/0 : http://127.0.0.1:34085/4
New job for range 12-11/0 : http://127.0.0.1:34085/5
New job for range 12-11/0 : http://127.0.0.1:34085/6
New job for range 12-11/0 : http://127.0.0.1:34085/7
New job for range 12-11/0 : http://127.0.0.1:34085/8
New job for range 12-11/0 : http://127.0.0.1:34085/9
New job for range 12-11/0 : http://127.0.0.1:34085/10
New job for range 12-11/0 : http://127.0.0.1:34085/12
New job for range 12-11/0 : http://127.0.0.1:34085/13
New job for range 12-11/0 : http://127.0.0.1:34085/14
New job for range 12-11/0 : http://127.0.0.1:34085/15
New job for range 12-11/0 : http://127.0.0.1:34085/16
New job for range 12-11/0 : http://127.0.0.1:34085/17
New job for range 12-11/0 : http://127.0.0.1:34085/18
New job for range 12-11/0 : http://127.0.0.1:34085/19
New job for range 12-11/0 : http://127.0.0.1:34085/20
New job for range 12-11/0 : http://127.0.0.1:34085/21
New job for range 12-11/0 : http://127.0.0.1:34085/22
New job for range 12-11/0 : http://127.0.0.1:34085/23
New job for range 12-11/0 : http://127.0.0.1:34085/24
New job for range 12-11/0 : http://127.0.0.1:34085/25
New job for range 12-11/0 : http://127.0.0.1:34085/26
New job for range 12-11/0 : http://127.0.0.1:34085/27
New job for range 12-11/0 : http://127.0.0.1:34085/28
New job for range 12-11/0 : http://127.0.0.1:34085/29
New job for range 12-11/0 : http://127.0.0.1:34085/30
New job for range 12-11/0 : http://127.0.0.1:34085/31
New job for range 12-11/0 : http://127.0.0.1:34085/32
New job for range 12-11/0 : http://127.0.0.1:34085/33
New job for range 12-11/0 : http://127.0.0.1:34085/34
New job for range 12-11/0 : http://127.0.0.1:34085/35
New job for range 12-11/0 : http://127.0.0.1:34085/36
New job for range 12-11/0 : http://127.0.0.1:34085/37
New job for range 12-11/0 : http://127.0.0.1:34085/38
New job for range 12-11/0 : http://127.0.0.1:34085/39
New job for range 12-11/0 : http://127.0.0.1:34085/40
New job for range 12-11/0 : http://127.0.0.1:34085/41
New job for range 12-11/0 : http://127.0.0.1:34085/42
New job for range 12-11/0 : http://127.0.0.1:34085/43
New job for range 12-11/0 : http://127.0.0.1:34085/44
New job for range 12-11/0 : http://127.0.0.1:34085/45
New job for range 12-11/0 : http://127.0.0.1:34085/46
New job for range 12-11/0 : http://127.0.0.1:34085/47
New job for range 12-11/0 : http://127.0.0.1:34085/48
New job for range 12-11/0 : http://127.0.0.1:34085/49
Failed to download chunk 9-9(9): HTTP status 500
Discarding url http://127.0.0.1:34085/9 ...
New job for range 9-9/0 : http://127.0.0.1:34085/0
New job for range 10-9/0 : http://127.0.0.1:34085/1
New job for range 10-9/0 : http://127.0.0.1:34085/2
New job for range 10-9/0 : http://127.0.0.1:34085/3
New job for range 10-9/0 : http://127.0.0.1:34085/4
New job for range 10-9/0 : http://127.0.0.1:34085/5
New job for range 10-9/0 : http://127.0.0.1:34085/6
New job for range 10-9/0 : http://127.0.0.1:34085/7
New job for range 10-9/0 : http://127.0.0.1:34085/8
New job for range 10-9/0 : http://127.0.0.1:34085/10
New job for range 10-9/0 : http://127.0.0.1:34085/12
New job for range 10-9/0 : http://127.0.0.1:34085/13
New job for range 10-9/0 : http://127.0.0.1:34085/14
New job for range 10-9/0 : http://127.0.0.1:34085/15
New job for range 10-9/0 : http://127.0.0.1:34085/16
New job for range 10-9/0 : http://127.0.0.1:34085/17
New job for range 10-9/0 : http://127.0.0.1:34085/18
New job for range 10-9/0 : http://127.0.0.1:34085/19
New job for range 10-9/0 : http://127.0.0.1:34085/20
New job for range 10-9/0 : http://127.0.0.1:34085/21
New job for range 10-9/0 : http://127.0.0.1:34085/22
New job for range 10-9/0 : http://127.0.0.1:34085/23
New job for range 10-9/0 : http://127.0.0.1:34085/24
New job for range 10-9/0 : http://127.0.0.1:34085/25
New job for range 10-9/0 : http://127.0.0.1:34085/26
New job for range 10-9/0 : http://127.0.0.1:34085/27
New job for range 10-9/0 : http://127.0.0.1:34085/28
panic: test timed out after 1s

goroutine 30 [running]:
panic(0x66afe0, 0xc420263690)
	/usr/local/go/src/runtime/panic.go:500 +0x1a1
testing.startAlarm.func1()
	/usr/local/go/src/testing/testing.go:918 +0x10b
created by time.goFunc
	/usr/local/go/src/time/sleep.go:154 +0x44

goroutine 1 [chan receive]:
testing.(*T).Run(0xc42007c0c0, 0x6da0ad, 0x34, 0x6f1dc8, 0xc42004bd01)
	/usr/local/go/src/testing/testing.go:647 +0x316
testing.RunTests.func1(0xc42007c0c0)
	/usr/local/go/src/testing/testing.go:793 +0x6d
testing.tRunner(0xc42007c0c0, 0xc42004be20)
	/usr/local/go/src/testing/testing.go:610 +0x81
testing.RunTests(0x6f1fa8, 0x805180, 0x11, 0x11, 0x7f3377e6a000)
	/usr/local/go/src/testing/testing.go:799 +0x2f5
testing.(*M).Run(0xc42004bee8, 0x688ea0)
	/usr/local/go/src/testing/testing.go:743 +0x85
main.main()
	_/tmp/d20170109-30451-uky4cw/_test/_testmain.go:86 +0xc6

goroutine 17 [syscall, locked to thread]:
runtime.goexit()
	/usr/local/go/src/runtime/asm_amd64.s:2086 +0x1

goroutine 6 [runnable]:
_/tmp/d20170109-30451-uky4cw.(*file).readChunks(0xc420014380, 0xc420016400, 0x1, 0x200, 0x0)
	/tmp/d20170109-30451-uky4cw/solution.go:61
_/tmp/d20170109-30451-uky4cw.(*file).Read(0xc420014380, 0xc420016400, 0x1, 0x200, 0x0, 0x0, 0x0)
	/tmp/d20170109-30451-uky4cw/solution.go:58 +0xd0
testing/iotest.(*oneByteReader).Read(0xc420013460, 0xc420016400, 0x200, 0x200, 0x0, 0x0, 0x0)
	/usr/local/go/src/testing/iotest/reader.go:25 +0x64
bytes.(*Buffer).ReadFrom(0xc42003bef8, 0x7e96e0, 0xc420013460, 0x32, 0x32, 0x7e8860)
	/usr/local/go/src/bytes/buffer.go:176 +0x155
_/tmp/d20170109-30451-uky4cw.TestSlowLingchiWithBothMaxConnectionsAndALotOfErrors(0xc42007c180)
	/tmp/d20170109-30451-uky4cw/solution_test.go:998 +0x5cb
testing.tRunner(0xc42007c180, 0x6f1dc8)
	/usr/local/go/src/testing/testing.go:610 +0x81
created by testing.(*T).Run
	/usr/local/go/src/testing/testing.go:646 +0x2ec

goroutine 7 [IO wait]:
net.runtime_pollWait(0x7f3377e0f178, 0x72, 0x0)
	/usr/local/go/src/runtime/netpoll.go:160 +0x59
net.(*pollDesc).wait(0xc420014370, 0x72, 0xc420036dd0, 0xc4200121b0)
	/usr/local/go/src/net/fd_poll_runtime.go:73 +0x38
net.(*pollDesc).waitRead(0xc420014370, 0x7ea8a0, 0xc4200121b0)
	/usr/local/go/src/net/fd_poll_runtime.go:78 +0x34
net.(*netFD).accept(0xc420014310, 0x0, 0x7e94a0, 0xc4200e9b80)
	/usr/local/go/src/net/fd_unix.go:419 +0x238
net.(*TCPListener).accept(0xc42002a048, 0x43413e, 0xc420036e80, 0x52dabd)
	/usr/local/go/src/net/tcpsock_posix.go:132 +0x2e
net.(*TCPListener).Accept(0xc42002a048, 0x6f2188, 0xc42010b500, 0x7ed460, 0xc4200ee060)
	/usr/local/go/src/net/tcpsock.go:222 +0x49
net/http.(*Server).Serve(0xc42001a300, 0x7ecba0, 0xc42002a048, 0x0, 0x0)
	/usr/local/go/src/net/http/server.go:2273 +0x1ce
net/http/httptest.(*Server).goServe.func1(0xc42005e4e0)
	/usr/local/go/src/net/http/httptest/server.go:235 +0x6d
created by net/http/httptest.(*Server).goServe
	/usr/local/go/src/net/http/httptest/server.go:236 +0x5c

goroutine 8 [chan send]:
_/tmp/d20170109-30451-uky4cw.downloadWorker(0x7ed460, 0xc42000d0b0, 0xc4200bf020)
	/tmp/d20170109-30451-uky4cw/solution.go:229 +0x724
created by _/tmp/d20170109-30451-uky4cw.DownloadFile
	/tmp/d20170109-30451-uky4cw/solution.go:149 +0x2d1

goroutine 9 [chan send]:
_/tmp/d20170109-30451-uky4cw.downloadWorker(0x7ed460, 0xc42000d0b0, 0xc4200bf020)
	/tmp/d20170109-30451-uky4cw/solution.go:191 +0x8b1
created by _/tmp/d20170109-30451-uky4cw.DownloadFile
	/tmp/d20170109-30451-uky4cw/solution.go:149 +0x2d1

goroutine 10 [chan send]:
_/tmp/d20170109-30451-uky4cw.downloadWorker(0x7ed460, 0xc42000d0b0, 0xc4200bf020)
	/tmp/d20170109-30451-uky4cw/solution.go:229 +0x724
created by _/tmp/d20170109-30451-uky4cw.DownloadFile
	/tmp/d20170109-30451-uky4cw/solution.go:149 +0x2d1

goroutine 11 [chan send]:
_/tmp/d20170109-30451-uky4cw.downloadWorker(0x7ed460, 0xc42000d0b0, 0xc4200bf020)
	/tmp/d20170109-30451-uky4cw/solution.go:191 +0x8b1
created by _/tmp/d20170109-30451-uky4cw.DownloadFile
	/tmp/d20170109-30451-uky4cw/solution.go:149 +0x2d1

goroutine 12 [chan send]:
_/tmp/d20170109-30451-uky4cw.downloadWorker(0x7ed460, 0xc42000d0b0, 0xc4200bf020)
	/tmp/d20170109-30451-uky4cw/solution.go:229 +0x724
created by _/tmp/d20170109-30451-uky4cw.DownloadFile
	/tmp/d20170109-30451-uky4cw/solution.go:149 +0x2d1

goroutine 13 [chan send]:
_/tmp/d20170109-30451-uky4cw.downloadWorker(0x7ed460, 0xc42000d0b0, 0xc4200bf020)
	/tmp/d20170109-30451-uky4cw/solution.go:191 +0x8b1
created by _/tmp/d20170109-30451-uky4cw.DownloadFile
	/tmp/d20170109-30451-uky4cw/solution.go:149 +0x2d1

goroutine 14 [chan send]:
_/tmp/d20170109-30451-uky4cw.downloadWorker(0x7ed460, 0xc42000d0b0, 0xc4200bf020)
	/tmp/d20170109-30451-uky4cw/solution.go:229 +0x724
created by _/tmp/d20170109-30451-uky4cw.DownloadFile
	/tmp/d20170109-30451-uky4cw/solution.go:149 +0x2d1

goroutine 15 [chan send]:
_/tmp/d20170109-30451-uky4cw.downloadWorker(0x7ed460, 0xc42000d0b0, 0xc4200bf020)
	/tmp/d20170109-30451-uky4cw/solution.go:191 +0x8b1
created by _/tmp/d20170109-30451-uky4cw.DownloadFile
	/tmp/d20170109-30451-uky4cw/solution.go:149 +0x2d1

goroutine 16 [chan send]:
_/tmp/d20170109-30451-uky4cw.downloadWorker(0x7ed460, 0xc42000d0b0, 0xc4200bf020)
	/tmp/d20170109-30451-uky4cw/solution.go:229 +0x724
created by _/tmp/d20170109-30451-uky4cw.DownloadFile
	/tmp/d20170109-30451-uky4cw/solution.go:149 +0x2d1

goroutine 18 [chan send]:
_/tmp/d20170109-30451-uky4cw.downloadWorker(0x7ed460, 0xc42000d0b0, 0xc4200bf020)
	/tmp/d20170109-30451-uky4cw/solution.go:191 +0x8b1
created by _/tmp/d20170109-30451-uky4cw.DownloadFile
	/tmp/d20170109-30451-uky4cw/solution.go:149 +0x2d1

goroutine 19 [chan send]:
_/tmp/d20170109-30451-uky4cw.downloadWorker(0x7ed460, 0xc42000d0b0, 0xc4200bf020)
	/tmp/d20170109-30451-uky4cw/solution.go:191 +0x8b1
created by _/tmp/d20170109-30451-uky4cw.DownloadFile
	/tmp/d20170109-30451-uky4cw/solution.go:149 +0x2d1

goroutine 20 [chan send]:
_/tmp/d20170109-30451-uky4cw.downloadWorker(0x7ed460, 0xc42000d0b0, 0xc4200bf020)
	/tmp/d20170109-30451-uky4cw/solution.go:229 +0x724
created by _/tmp/d20170109-30451-uky4cw.DownloadFile
	/tmp/d20170109-30451-uky4cw/solution.go:149 +0x2d1

goroutine 21 [chan send]:
_/tmp/d20170109-30451-uky4cw.downloadWorker(0x7ed460, 0xc42000d0b0, 0xc4200bf020)
	/tmp/d20170109-30451-uky4cw/solution.go:191 +0x8b1
created by _/tmp/d20170109-30451-uky4cw.DownloadFile
	/tmp/d20170109-30451-uky4cw/solution.go:149 +0x2d1

goroutine 22 [chan send]:
_/tmp/d20170109-30451-uky4cw.downloadWorker(0x7ed460, 0xc42000d0b0, 0xc4200bf020)
	/tmp/d20170109-30451-uky4cw/solution.go:191 +0x8b1
created by _/tmp/d20170109-30451-uky4cw.DownloadFile
	/tmp/d20170109-30451-uky4cw/solution.go:149 +0x2d1

goroutine 23 [chan send]:
_/tmp/d20170109-30451-uky4cw.downloadWorker(0x7ed460, 0xc42000d0b0, 0xc4200bf020)
	/tmp/d20170109-30451-uky4cw/solution.go:229 +0x724
created by _/tmp/d20170109-30451-uky4cw.DownloadFile
	/tmp/d20170109-30451-uky4cw/solution.go:149 +0x2d1

goroutine 24 [chan send]:
_/tmp/d20170109-30451-uky4cw.downloadWorker(0x7ed460, 0xc42000d0b0, 0xc4200bf020)
	/tmp/d20170109-30451-uky4cw/solution.go:191 +0x8b1
created by _/tmp/d20170109-30451-uky4cw.DownloadFile
	/tmp/d20170109-30451-uky4cw/solution.go:149 +0x2d1

goroutine 25 [chan send]:
_/tmp/d20170109-30451-uky4cw.downloadWorker(0x7ed460, 0xc42000d0b0, 0xc4200bf020)
	/tmp/d20170109-30451-uky4cw/solution.go:229 +0x724
created by _/tmp/d20170109-30451-uky4cw.DownloadFile
	/tmp/d20170109-30451-uky4cw/solution.go:149 +0x2d1

goroutine 26 [chan send]:
_/tmp/d20170109-30451-uky4cw.downloadWorker(0x7ed460, 0xc42000d0b0, 0xc4200bf020)
	/tmp/d20170109-30451-uky4cw/solution.go:191 +0x8b1
created by _/tmp/d20170109-30451-uky4cw.DownloadFile
	/tmp/d20170109-30451-uky4cw/solution.go:149 +0x2d1

goroutine 27 [chan send]:
_/tmp/d20170109-30451-uky4cw.downloadWorker(0x7ed460, 0xc42000d0b0, 0xc4200bf020)
	/tmp/d20170109-30451-uky4cw/solution.go:229 +0x724
created by _/tmp/d20170109-30451-uky4cw.DownloadFile
	/tmp/d20170109-30451-uky4cw/solution.go:149 +0x2d1

goroutine 28 [chan send]:
_/tmp/d20170109-30451-uky4cw.downloadWorker(0x7ed460, 0xc42000d0b0, 0xc4200bf020)
	/tmp/d20170109-30451-uky4cw/solution.go:229 +0x724
created by _/tmp/d20170109-30451-uky4cw.DownloadFile
	/tmp/d20170109-30451-uky4cw/solution.go:149 +0x2d1

goroutine 29 [chan send]:
_/tmp/d20170109-30451-uky4cw.createDownloadJobs(0xc4200ca380, 0x30, 0x32, 0x0, 0x9, 0xc42005e540)
	/tmp/d20170109-30451-uky4cw/solution.go:348 +0x378
_/tmp/d20170109-30451-uky4cw.startDownloading(0x7ed460, 0xc42000d0b0, 0xc420014380)
	/tmp/d20170109-30451-uky4cw/solution.go:290 +0x655
created by _/tmp/d20170109-30451-uky4cw.DownloadFile
	/tmp/d20170109-30451-uky4cw/solution.go:160 +0x455

goroutine 85 [IO wait]:
net.runtime_pollWait(0x7f3377e0e4b8, 0x72, 0x15)
	/usr/local/go/src/runtime/netpoll.go:160 +0x59
net.(*pollDesc).wait(0xc42011cf40, 0x72, 0xc4200fb9d0, 0xc4200121b0)
	/usr/local/go/src/net/fd_poll_runtime.go:73 +0x38
net.(*pollDesc).waitRead(0xc42011cf40, 0x7ea8a0, 0xc4200121b0)
	/usr/local/go/src/net/fd_poll_runtime.go:78 +0x34
net.(*netFD).Read(0xc42011cee0, 0xc4201a0000, 0x1000, 0x1000, 0x0, 0x7ea8a0, 0xc4200121b0)
	/usr/local/go/src/net/fd_unix.go:243 +0x1a1
net.(*conn).Read(0xc42010e230, 0xc4201a0000, 0x1000, 0x1000, 0x0, 0x0, 0x0)
	/usr/local/go/src/net/net.go:173 +0x70
net/http.(*persistConn).Read(0xc42011b100, 0xc4201a0000, 0x1000, 0x1000, 0x30, 0xc4200fbb58, 0x43b1ec)
	/usr/local/go/src/net/http/transport.go:1261 +0x154
bufio.(*Reader).fill(0xc420198540)
	/usr/local/go/src/bufio/bufio.go:97 +0x10c
bufio.(*Reader).Peek(0xc420198540, 0x1, 0x0, 0x1, 0x0, 0xc42005fd40, 0x0)
	/usr/local/go/src/bufio/bufio.go:129 +0x62
net/http.(*persistConn).readLoop(0xc42011b100)
	/usr/local/go/src/net/http/transport.go:1418 +0x1a1
created by net/http.(*Transport).dialConn
	/usr/local/go/src/net/http/transport.go:1062 +0x4e9

goroutine 86 [select]:
net/http.(*persistConn).writeLoop(0xc42011b100)
	/usr/local/go/src/net/http/transport.go:1646 +0x3bd
created by net/http.(*Transport).dialConn
	/usr/local/go/src/net/http/transport.go:1063 +0x50e

goroutine 83 [IO wait]:
net.runtime_pollWait(0x7f3377e0e3f8, 0x72, 0x16)
	/usr/local/go/src/runtime/netpoll.go:160 +0x59
net.(*pollDesc).wait(0xc42011d020, 0x72, 0xc4201019d0, 0xc4200121b0)
	/usr/local/go/src/net/fd_poll_runtime.go:73 +0x38
net.(*pollDesc).waitRead(0xc42011d020, 0x7ea8a0, 0xc4200121b0)
	/usr/local/go/src/net/fd_poll_runtime.go:78 +0x34
net.(*netFD).Read(0xc42011cfc0, 0xc42019e000, 0x1000, 0x1000, 0x0, 0x7ea8a0, 0xc4200121b0)
	/usr/local/go/src/net/fd_unix.go:243 +0x1a1
net.(*conn).Read(0xc42010e228, 0xc42019e000, 0x1000, 0x1000, 0x0, 0x0, 0x0)
	/usr/local/go/src/net/net.go:173 +0x70
net/http.(*persistConn).Read(0xc42011b200, 0xc42019e000, 0x1000, 0x1000, 0x30, 0xc420101b58, 0x43b1ec)
	/usr/local/go/src/net/http/transport.go:1261 +0x154
bufio.(*Reader).fill(0xc4201983c0)
	/usr/local/go/src/bufio/bufio.go:97 +0x10c
bufio.(*Reader).Peek(0xc4201983c0, 0x1, 0x0, 0x1, 0x0, 0xc42005fc20, 0x0)
	/usr/local/go/src/bufio/bufio.go:129 +0x62
net/http.(*persistConn).readLoop(0xc42011b200)
	/usr/local/go/src/net/http/transport.go:1418 +0x1a1
created by net/http.(*Transport).dialConn
	/usr/local/go/src/net/http/transport.go:1062 +0x4e9

goroutine 84 [select]:
net/http.(*persistConn).writeLoop(0xc42011b200)
	/usr/local/go/src/net/http/transport.go:1646 +0x3bd
created by net/http.(*Transport).dialConn
	/usr/local/go/src/net/http/transport.go:1063 +0x50e

goroutine 129 [IO wait]:
net.runtime_pollWait(0x7f3377dce460, 0x72, 0x28)
	/usr/local/go/src/runtime/netpoll.go:160 +0x59
net.(*pollDesc).wait(0xc4201dc140, 0x72, 0xc4202057b0, 0xc4200121b0)
	/usr/local/go/src/net/fd_poll_runtime.go:73 +0x38
net.(*pollDesc).waitRead(0xc4201dc140, 0x7ea8a0, 0xc4200121b0)
	/usr/local/go/src/net/fd_poll_runtime.go:78 +0x34
net.(*netFD).Read(0xc4201dc0e0, 0xc42021e000, 0x1000, 0x1000, 0x0, 0x7ea8a0, 0xc4200121b0)
	/usr/local/go/src/net/fd_unix.go:243 +0x1a1
net.(*conn).Read(0xc42010e318, 0xc42021e000, 0x1000, 0x1000, 0x0, 0x0, 0x0)
	/usr/local/go/src/net/net.go:173 +0x70
net/http.(*connReader).Read(0xc4200e9fa0, 0xc42021e000, 0x1000, 0x1000, 0xc420205918, 0x6d2ef7, 0x19)
	/usr/local/go/src/net/http/server.go:586 +0x144
bufio.(*Reader).fill(0xc4201d93e0)
	/usr/local/go/src/bufio/bufio.go:97 +0x10c
bufio.(*Reader).ReadSlice(0xc4201d93e0, 0xa, 0x0, 0x1e, 0x6, 0x0, 0x0)
	/usr/local/go/src/bufio/bufio.go:330 +0xb5
bufio.(*Reader).ReadLine(0xc4201d93e0, 0xc42026a870, 0xf0, 0xf0, 0x6c2ea0, 0xc4200ea6e0, 0x6b9620)
	/usr/local/go/src/bufio/bufio.go:359 +0x37
net/textproto.(*Reader).readLineSlice(0xc42000d350, 0xc420205aa8, 0xc420205aa8, 0x410688, 0xf0, 0x6c2ea0)
	/usr/local/go/src/net/textproto/reader.go:55 +0x5e
net/textproto.(*Reader).ReadLine(0xc42000d350, 0xc42026a870, 0xc420205b20, 0x401863, 0xc420205c78)
	/usr/local/go/src/net/textproto/reader.go:36 +0x2f
net/http.readRequest(0xc4201d93e0, 0xc420205c00, 0xc42026a870, 0x0, 0x0)
	/usr/local/go/src/net/http/request.go:793 +0xa5
net/http.(*conn).readRequest(0xc42010b300, 0x7ed3a0, 0xc42021a1c0, 0x0, 0x0, 0x0)
	/usr/local/go/src/net/http/server.go:765 +0x10d
net/http.(*conn).serve(0xc42010b300, 0x7ed3a0, 0xc42021a1c0)
	/usr/local/go/src/net/http/server.go:1532 +0x3d3
created by net/http.(*Server).Serve
	/usr/local/go/src/net/http/server.go:2293 +0x44d

goroutine 130 [IO wait]:
net.runtime_pollWait(0x7f3377dce3a0, 0x72, 0x29)
	/usr/local/go/src/runtime/netpoll.go:160 +0x59
net.(*pollDesc).wait(0xc4201dc1b0, 0x72, 0xc4202067b0, 0xc4200121b0)
	/usr/local/go/src/net/fd_poll_runtime.go:73 +0x38
net.(*pollDesc).waitRead(0xc4201dc1b0, 0x7ea8a0, 0xc4200121b0)
	/usr/local/go/src/net/fd_poll_runtime.go:78 +0x34
net.(*netFD).Read(0xc4201dc150, 0xc420222000, 0x1000, 0x1000, 0x0, 0x7ea8a0, 0xc4200121b0)
	/usr/local/go/src/net/fd_unix.go:243 +0x1a1
net.(*conn).Read(0xc42010e320, 0xc420222000, 0x1000, 0x1000, 0x0, 0x0, 0x0)
	/usr/local/go/src/net/net.go:173 +0x70
net/http.(*connReader).Read(0xc4200e9fe0, 0xc420222000, 0x1000, 0x1000, 0xc420206918, 0x6d2ef7, 0x19)
	/usr/local/go/src/net/http/server.go:586 +0x144
bufio.(*Reader).fill(0xc4201d9500)
	/usr/local/go/src/bufio/bufio.go:97 +0x10c
bufio.(*Reader).ReadSlice(0xc4201d9500, 0xa, 0x0, 0x1e, 0xc4200ea7b8, 0x33, 0x0)
	/usr/local/go/src/bufio/bufio.go:330 +0xb5
bufio.(*Reader).ReadLine(0xc4201d9500, 0xc42026a960, 0xf0, 0xf0, 0x6c2ea0, 0xc4200ea6e0, 0x6b9620)
	/usr/local/go/src/bufio/bufio.go:359 +0x37
net/textproto.(*Reader).readLineSlice(0xc42000d320, 0xc420206aa8, 0xc420206aa8, 0x410688, 0xf0, 0x6c2ea0)
	/usr/local/go/src/net/textproto/reader.go:55 +0x5e
net/textproto.(*Reader).ReadLine(0xc42000d320, 0xc42026a960, 0xc420206b20, 0x401863, 0xc420206c78)
	/usr/local/go/src/net/textproto/reader.go:36 +0x2f
net/http.readRequest(0xc4201d9500, 0xc420206c00, 0xc42026a960, 0x0, 0x0)
	/usr/local/go/src/net/http/request.go:793 +0xa5
net/http.(*conn).readRequest(0xc42010b380, 0x7ed3a0, 0xc42021a380, 0x0, 0x0, 0x0)
	/usr/local/go/src/net/http/server.go:765 +0x10d
net/http.(*conn).serve(0xc42010b380, 0x7ed3a0, 0xc42021a380)
	/usr/local/go/src/net/http/server.go:1532 +0x3d3
created by net/http.(*Server).Serve
	/usr/local/go/src/net/http/server.go:2293 +0x44d
exit status 2
FAIL	_/tmp/d20170109-30451-uky4cw	1.011s

История (8 версии и 2 коментара)

Анатоли обнови решението на 02.01.2017 19:42 (преди над 1 година)

+package main
+
+import (
+ "context"
+ "fmt"
+ "io"
+ "net/http"
+ "sort"
+ "sync"
+)
+
+type chunk struct {
+ data []byte
+ from int64
+ to int64
+ pos int64
+ url string
+ err error
+}
+
+type comm struct {
+ jobs chan chunk
+ res chan chunk
+ done chan struct{}
+}
+
+type chunkSort []chunk
+
+func (a chunkSort) Len() int { return len(a) }
+func (a chunkSort) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
+func (a chunkSort) Less(i, j int) bool { return a[i].from < a[j].from }
+
+type file struct {
+ chunks []chunk
+ size int64
+ currSize int64
+ pos int64
+ urls []string
+ comm *comm
+ err error
+ lock *sync.Mutex
+}
+
+func (f *file) Read(p []byte) (n int, err error) {
+ f.lock.Lock()
+ defer f.lock.Unlock()
+
+ if f.err != nil {
+ return f.readChunks(p), f.err
+ }
+
+ if f.pos == f.size && f.size > 0 {
+ return 0, io.EOF
+ }
+
+ return f.readChunks(p), nil
+}
+
+func (f *file) readChunks(p []byte) (n int) {
+ //fmt.Println("read chunks", len(p), f.pos, f.size)
+ prevTo := int64(0)
+ bytesToRead := len(p)
+ pos := int64(0)
+ bytes := 0
+
+ if bytesToRead == 0 {
+ return 0
+ }
+
+Breaking:
+ for _, c := range f.chunks {
+ if prevTo != c.from {
+ break
+ }
+
+ for _, d := range c.data {
+ if pos < f.pos {
+ pos++
+ continue
+ }
+ p[bytes] = d
+
+ bytes++
+ f.pos++
+ pos = f.pos + 1
+
+ if bytes == bytesToRead {
+ break Breaking
+ }
+ }
+
+ prevTo = c.pos
+ }
+
+ return bytes
+}
+
+func (f *file) addChunk(chunk chunk) (err error) {
+ f.lock.Lock()
+ defer f.lock.Unlock()
+ f.chunks = append(f.chunks, chunk)
+ f.currSize += int64(len(chunk.data))
+ sort.Sort(chunkSort(f.chunks))
+ return nil
+}
+
+func (f *file) setErr(err error) {
+ f.lock.Lock()
+ defer f.lock.Unlock()
+ f.err = err
+}
+
+func (f *file) setSize(size int64) {
+ f.lock.Lock()
+ defer f.lock.Unlock()
+ f.size = size
+}
+
+func (f *file) isReady() bool {
+ return f.currSize == f.size && f.size > 0
+}
+
+//DownloadFile No comment
+func DownloadFile(ctx context.Context, urls []string) io.Reader {
+ fmt.Println("---- NEW DOWNLOADER ----")
+ maxWorkers := len(urls)
+ urlCount := maxWorkers
+
+ if ctx == nil {
+ ctx = context.Background()
+ }
+
+ if tmpMaxWorkers, ok := ctx.Value("max-connections").(int); ok {
+ maxWorkers = tmpMaxWorkers
+ }
+
+ comm := &comm{
+ jobs: make(chan chunk, urlCount),
+ res: make(chan chunk, urlCount),
+ done: make(chan struct{}, 1),
+ }
+
+ // create downloaders
+ for i := 0; i < maxWorkers; i++ {
+ go downloadWorker(ctx, comm)
+ }
+
+ f := &file{
+ chunks: make([]chunk, 0),
+ lock: &sync.Mutex{},
+ comm: comm,
+ urls: urls,
+ }
+
+ go startDownloading(ctx, f)
+ return f
+}
+
+func downloadWorker(ctx context.Context, comm *comm) {
+ var client http.Client
+Breaking:
+ for {
+ select {
+ case chunk := <-comm.jobs:
+
+ to := chunk.to
+ from := chunk.from
+ chunk.pos = chunk.from
+
+ for {
+ rheader := fmt.Sprintf("bytes=%d-%d", from, to)
+
+ req, _ := http.NewRequest("GET", chunk.url, nil)
+ req.Header.Add("Range", rheader)
+ resp, rerr := client.Do(req)
+
+ if rerr != nil {
+ chunk.err = rerr
+ comm.res <- chunk
+ continue Breaking
+ }
+
+ if resp.StatusCode < 200 || resp.StatusCode > 299 {
+ resp.Body.Close()
+ chunk.err = fmt.Errorf("HTTP status %v", resp.StatusCode)
+ comm.res <- chunk
+ continue Breaking
+ }
+
+ buf := make([]byte, 1000)
+ var ioerr error
+ var read int
+ for {
+ read, ioerr = resp.Body.Read(buf)
+
+ if read > 0 {
+ chunk.data = append(chunk.data, buf[:read]...)
+ chunk.pos = chunk.pos + int64(len(buf[:read]))
+ }
+
+ if ioerr != nil {
+ break
+ }
+
+ select {
+ case <-ctx.Done():
+ resp.Body.Close()
+ comm.res <- chunk
+ break Breaking
+ default:
+ }
+ }
+
+ resp.Body.Close()
+ if (ioerr == nil || ioerr == io.EOF) && (chunk.pos-1) == chunk.to {
+ break
+ }
+
+ from = chunk.pos
+ }
+
+ comm.res <- chunk
+
+ case <-comm.done:
+ return
+ case <-ctx.Done():
+ return
+ }
+ }
+ //fmt.Println("Exit worker...")
+}
+
+func startDownloading(ctx context.Context, f *file) {
+ // while the file is not ready
+ defer close(f.comm.done)
+ contentLen, err := readContentLen(f.urls)
+
+ if err != nil {
+ fmt.Println("Failed to read content len from: ", f.urls)
+ f.setErr(err)
+ return
+ }
+
+ f.setSize(contentLen)
+
+ currUrls := f.urls
+ createDownloadJobs(currUrls, f.size, 0, f.comm.jobs)
+
+ for {
+ select {
+ case chunk := <-f.comm.res:
+ if chunk.err != nil {
+ fmt.Printf(
+ "Failed to download chunk %v-%v(%v): %v\n",
+ chunk.from,
+ chunk.to,
+ chunk.pos,
+ chunk.err,
+ )
+
+ currUrls = discardURL(currUrls, chunk.url)
+ currLen := chunk.to - chunk.pos
+ currStart := chunk.pos
+
+ if len(chunk.data) > 0 {
+ f.addChunk(chunk)
+ }
+
+ if len(currUrls) == 0 {
+ f.setErr(fmt.Errorf("no valid urls"))
+ return
+ }
+
+ createDownloadJobs(currUrls, currLen, currStart, f.comm.jobs)
+ } else {
+ f.addChunk(chunk)
+ }
+ case <-ctx.Done():
+ return
+ }
+
+ if f.isReady() {
+ return
+ }
+ }
+}
+
+func readContentLen(urls []string) (size int64, err error) {
+ for _, url := range urls {
+ resp, tmperr := http.Head(url)
+ if tmperr != nil {
+ err = fmt.Errorf("[ERROR] HEAD %v: %v", url, tmperr)
+ continue
+ }
+
+ if resp.StatusCode < 200 || resp.StatusCode > 299 {
+ err = fmt.Errorf("[ERROR] HEAD `%v`[%v]", url, resp.StatusCode)
+ continue
+ }
+
+ if resp.ContentLength == -1 {
+ err = io.EOF
+ continue
+ }
+
+ size = resp.ContentLength
+ err = nil
+ break
+ }
+
+ return size, err
+}
+
+func createDownloadJobs(urls []string, currLen, start int64, jobsChan chan<- chunk) {
+ ranges := calcRanges(int64(len(urls)), currLen, start)
+
+ for idx, url := range urls {
+ r := ranges[idx]
+ fmt.Printf("New job for range %v-%v/%v : %v\n", r[0], r[1], currLen, url)
+
+ job := chunk{
+ data: make([]byte, 0),
+ from: r[0],
+ to: r[1],
+ url: url,
+ }
+
+ jobsChan <- job
+ }
+}
+
+func calcRanges(urlCount int64, contentLen, start int64) (ranges [][]int64) {
+
+ var i int64
+ lastByte := contentLen % urlCount
+ step := contentLen / urlCount
+ end := start + step
+ ranges = make([][]int64, urlCount)
+
+ if start == 0 {
+ end--
+ }
+
+ for i = 0; i < urlCount; i++ {
+ ranges[i] = make([]int64, 2)
+ if lastByte > 0 {
+ end++
+ lastByte--
+ }
+
+ ranges[i][0] = start
+ ranges[i][1] = end
+
+ start = end + 1
+ end = end + step
+ }
+
+ return ranges
+}
+
+func discardURL(urls []string, url string) []string {
+ for idx, tmpURL := range urls {
+ if url == tmpURL {
+ fmt.Printf("Discarding url %s ...\n", url)
+ return append(urls[:idx], urls[idx+1:]...)
+ }
+ }
+
+ return nil
+}
+
+func main() {
+ fmt.Println("ahellow")
+}

Анатоли обнови решението на 03.01.2017 00:28 (преди над 1 година)

package main
import (
"context"
"fmt"
"io"
"net/http"
"sort"
"sync"
)
type chunk struct {
data []byte
from int64
to int64
pos int64
url string
err error
}
type comm struct {
jobs chan chunk
res chan chunk
done chan struct{}
}
type chunkSort []chunk
func (a chunkSort) Len() int { return len(a) }
func (a chunkSort) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
func (a chunkSort) Less(i, j int) bool { return a[i].from < a[j].from }
type file struct {
chunks []chunk
size int64
currSize int64
pos int64
urls []string
comm *comm
err error
lock *sync.Mutex
}
func (f *file) Read(p []byte) (n int, err error) {
f.lock.Lock()
defer f.lock.Unlock()
if f.err != nil {
return f.readChunks(p), f.err
}
if f.pos == f.size && f.size > 0 {
return 0, io.EOF
}
return f.readChunks(p), nil
}
func (f *file) readChunks(p []byte) (n int) {
//fmt.Println("read chunks", len(p), f.pos, f.size)
prevTo := int64(0)
bytesToRead := len(p)
- pos := int64(0)
bytes := 0
if bytesToRead == 0 {
return 0
}
-Breaking:
for _, c := range f.chunks {
if prevTo != c.from {
break
}
- for _, d := range c.data {
- if pos < f.pos {
- pos++
- continue
- }
- p[bytes] = d
+ prevTo = c.pos
+ if f.pos > c.pos {
+ continue
+ }
+ relPos := int64(len(c.data)) - (c.pos - f.pos)
+ //fmt.Println(f.pos, c.pos, relPos, len(c.data))
+
+ for _, d := range c.data[relPos:] {
+ p[bytes] = d
bytes++
f.pos++
- pos = f.pos + 1
if bytes == bytesToRead {
- break Breaking
+ return bytes
}
}
-
- prevTo = c.pos
}
return bytes
}
func (f *file) addChunk(chunk chunk) (err error) {
f.lock.Lock()
defer f.lock.Unlock()
f.chunks = append(f.chunks, chunk)
f.currSize += int64(len(chunk.data))
sort.Sort(chunkSort(f.chunks))
return nil
}
func (f *file) setErr(err error) {
f.lock.Lock()
defer f.lock.Unlock()
f.err = err
}
func (f *file) setSize(size int64) {
f.lock.Lock()
defer f.lock.Unlock()
f.size = size
}
func (f *file) isReady() bool {
return f.currSize == f.size && f.size > 0
}
//DownloadFile No comment
func DownloadFile(ctx context.Context, urls []string) io.Reader {
fmt.Println("---- NEW DOWNLOADER ----")
maxWorkers := len(urls)
urlCount := maxWorkers
if ctx == nil {
ctx = context.Background()
}
if tmpMaxWorkers, ok := ctx.Value("max-connections").(int); ok {
maxWorkers = tmpMaxWorkers
}
comm := &comm{
jobs: make(chan chunk, urlCount),
res: make(chan chunk, urlCount),
done: make(chan struct{}, 1),
}
// create downloaders
for i := 0; i < maxWorkers; i++ {
go downloadWorker(ctx, comm)
}
f := &file{
chunks: make([]chunk, 0),
lock: &sync.Mutex{},
comm: comm,
urls: urls,
}
go startDownloading(ctx, f)
return f
}
func downloadWorker(ctx context.Context, comm *comm) {
var client http.Client
Breaking:
for {
select {
case chunk := <-comm.jobs:
to := chunk.to
from := chunk.from
chunk.pos = chunk.from
for {
rheader := fmt.Sprintf("bytes=%d-%d", from, to)
req, _ := http.NewRequest("GET", chunk.url, nil)
req.Header.Add("Range", rheader)
resp, rerr := client.Do(req)
if rerr != nil {
chunk.err = rerr
comm.res <- chunk
continue Breaking
}
if resp.StatusCode < 200 || resp.StatusCode > 299 {
resp.Body.Close()
chunk.err = fmt.Errorf("HTTP status %v", resp.StatusCode)
comm.res <- chunk
continue Breaking
}
buf := make([]byte, 1000)
var ioerr error
var read int
for {
read, ioerr = resp.Body.Read(buf)
if read > 0 {
chunk.data = append(chunk.data, buf[:read]...)
chunk.pos = chunk.pos + int64(len(buf[:read]))
}
if ioerr != nil {
break
}
select {
case <-ctx.Done():
resp.Body.Close()
comm.res <- chunk
break Breaking
default:
}
}
resp.Body.Close()
if (ioerr == nil || ioerr == io.EOF) && (chunk.pos-1) == chunk.to {
break
}
from = chunk.pos
}
comm.res <- chunk
case <-comm.done:
return
case <-ctx.Done():
return
}
}
//fmt.Println("Exit worker...")
}
func startDownloading(ctx context.Context, f *file) {
// while the file is not ready
defer close(f.comm.done)
contentLen, err := readContentLen(f.urls)
if err != nil {
fmt.Println("Failed to read content len from: ", f.urls)
f.setErr(err)
return
}
f.setSize(contentLen)
currUrls := f.urls
createDownloadJobs(currUrls, f.size, 0, f.comm.jobs)
for {
select {
case chunk := <-f.comm.res:
if chunk.err != nil {
fmt.Printf(
"Failed to download chunk %v-%v(%v): %v\n",
chunk.from,
chunk.to,
chunk.pos,
chunk.err,
)
currUrls = discardURL(currUrls, chunk.url)
currLen := chunk.to - chunk.pos
currStart := chunk.pos
if len(chunk.data) > 0 {
f.addChunk(chunk)
}
if len(currUrls) == 0 {
f.setErr(fmt.Errorf("no valid urls"))
return
}
createDownloadJobs(currUrls, currLen, currStart, f.comm.jobs)
} else {
f.addChunk(chunk)
}
case <-ctx.Done():
return
}
if f.isReady() {
return
}
}
}
func readContentLen(urls []string) (size int64, err error) {
for _, url := range urls {
resp, tmperr := http.Head(url)
if tmperr != nil {
err = fmt.Errorf("[ERROR] HEAD %v: %v", url, tmperr)
continue
}
if resp.StatusCode < 200 || resp.StatusCode > 299 {
err = fmt.Errorf("[ERROR] HEAD `%v`[%v]", url, resp.StatusCode)
continue
}
if resp.ContentLength == -1 {
err = io.EOF
continue
}
size = resp.ContentLength
err = nil
break
}
return size, err
}
func createDownloadJobs(urls []string, currLen, start int64, jobsChan chan<- chunk) {
ranges := calcRanges(int64(len(urls)), currLen, start)
for idx, url := range urls {
r := ranges[idx]
fmt.Printf("New job for range %v-%v/%v : %v\n", r[0], r[1], currLen, url)
job := chunk{
data: make([]byte, 0),
from: r[0],
to: r[1],
url: url,
}
jobsChan <- job
}
}
func calcRanges(urlCount int64, contentLen, start int64) (ranges [][]int64) {
var i int64
lastByte := contentLen % urlCount
step := contentLen / urlCount
end := start + step
ranges = make([][]int64, urlCount)
if start == 0 {
end--
}
for i = 0; i < urlCount; i++ {
ranges[i] = make([]int64, 2)
if lastByte > 0 {
end++
lastByte--
}
ranges[i][0] = start
ranges[i][1] = end
start = end + 1
end = end + step
}
return ranges
}
func discardURL(urls []string, url string) []string {
for idx, tmpURL := range urls {
if url == tmpURL {
fmt.Printf("Discarding url %s ...\n", url)
return append(urls[:idx], urls[idx+1:]...)
}
}
return nil
}
func main() {
fmt.Println("ahellow")
}

Анатоли обнови решението на 03.01.2017 14:41 (преди над 1 година)

package main
import (
"context"
"fmt"
"io"
"net/http"
"sort"
"sync"
+ "time"
)
type chunk struct {
data []byte
from int64
to int64
pos int64
url string
err error
}
type comm struct {
jobs chan chunk
res chan chunk
done chan struct{}
}
type chunkSort []chunk
func (a chunkSort) Len() int { return len(a) }
func (a chunkSort) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
func (a chunkSort) Less(i, j int) bool { return a[i].from < a[j].from }
type file struct {
chunks []chunk
size int64
currSize int64
pos int64
urls []string
comm *comm
err error
lock *sync.Mutex
}
func (f *file) Read(p []byte) (n int, err error) {
f.lock.Lock()
defer f.lock.Unlock()
if f.err != nil {
return f.readChunks(p), f.err
}
if f.pos == f.size && f.size > 0 {
return 0, io.EOF
}
return f.readChunks(p), nil
}
func (f *file) readChunks(p []byte) (n int) {
//fmt.Println("read chunks", len(p), f.pos, f.size)
prevTo := int64(0)
bytesToRead := len(p)
bytes := 0
if bytesToRead == 0 {
return 0
}
for _, c := range f.chunks {
if prevTo != c.from {
break
}
prevTo = c.pos
if f.pos > c.pos {
continue
}
relPos := int64(len(c.data)) - (c.pos - f.pos)
//fmt.Println(f.pos, c.pos, relPos, len(c.data))
for _, d := range c.data[relPos:] {
p[bytes] = d
bytes++
f.pos++
if bytes == bytesToRead {
return bytes
}
}
}
return bytes
}
func (f *file) addChunk(chunk chunk) (err error) {
f.lock.Lock()
defer f.lock.Unlock()
f.chunks = append(f.chunks, chunk)
f.currSize += int64(len(chunk.data))
sort.Sort(chunkSort(f.chunks))
return nil
}
func (f *file) setErr(err error) {
f.lock.Lock()
defer f.lock.Unlock()
f.err = err
}
func (f *file) setSize(size int64) {
f.lock.Lock()
defer f.lock.Unlock()
f.size = size
}
func (f *file) isReady() bool {
return f.currSize == f.size && f.size > 0
}
//DownloadFile No comment
func DownloadFile(ctx context.Context, urls []string) io.Reader {
fmt.Println("---- NEW DOWNLOADER ----")
maxWorkers := len(urls)
urlCount := maxWorkers
if ctx == nil {
ctx = context.Background()
}
if tmpMaxWorkers, ok := ctx.Value("max-connections").(int); ok {
maxWorkers = tmpMaxWorkers
}
comm := &comm{
jobs: make(chan chunk, urlCount),
res: make(chan chunk, urlCount),
done: make(chan struct{}, 1),
}
// create downloaders
for i := 0; i < maxWorkers; i++ {
go downloadWorker(ctx, comm)
}
f := &file{
chunks: make([]chunk, 0),
lock: &sync.Mutex{},
comm: comm,
urls: urls,
}
go startDownloading(ctx, f)
return f
}
func downloadWorker(ctx context.Context, comm *comm) {
var client http.Client
Breaking:
for {
select {
case chunk := <-comm.jobs:
to := chunk.to
from := chunk.from
chunk.pos = chunk.from
for {
+
+ if from > 500000 && from < 600000 {
+ time.Sleep(5 * time.Second)
+ }
rheader := fmt.Sprintf("bytes=%d-%d", from, to)
req, _ := http.NewRequest("GET", chunk.url, nil)
req.Header.Add("Range", rheader)
resp, rerr := client.Do(req)
if rerr != nil {
chunk.err = rerr
comm.res <- chunk
continue Breaking
}
if resp.StatusCode < 200 || resp.StatusCode > 299 {
resp.Body.Close()
chunk.err = fmt.Errorf("HTTP status %v", resp.StatusCode)
comm.res <- chunk
continue Breaking
}
buf := make([]byte, 1000)
var ioerr error
var read int
for {
read, ioerr = resp.Body.Read(buf)
if read > 0 {
chunk.data = append(chunk.data, buf[:read]...)
chunk.pos = chunk.pos + int64(len(buf[:read]))
}
if ioerr != nil {
break
}
select {
case <-ctx.Done():
resp.Body.Close()
comm.res <- chunk
break Breaking
default:
}
}
resp.Body.Close()
if (ioerr == nil || ioerr == io.EOF) && (chunk.pos-1) == chunk.to {
break
}
from = chunk.pos
}
comm.res <- chunk
case <-comm.done:
return
case <-ctx.Done():
return
}
}
//fmt.Println("Exit worker...")
}
func startDownloading(ctx context.Context, f *file) {
// while the file is not ready
defer close(f.comm.done)
contentLen, err := readContentLen(f.urls)
if err != nil {
fmt.Println("Failed to read content len from: ", f.urls)
f.setErr(err)
return
}
f.setSize(contentLen)
currUrls := f.urls
createDownloadJobs(currUrls, f.size, 0, f.comm.jobs)
for {
select {
case chunk := <-f.comm.res:
if chunk.err != nil {
fmt.Printf(
"Failed to download chunk %v-%v(%v): %v\n",
chunk.from,
chunk.to,
chunk.pos,
chunk.err,
)
currUrls = discardURL(currUrls, chunk.url)
currLen := chunk.to - chunk.pos
currStart := chunk.pos
if len(chunk.data) > 0 {
f.addChunk(chunk)
}
if len(currUrls) == 0 {
f.setErr(fmt.Errorf("no valid urls"))
return
}
createDownloadJobs(currUrls, currLen, currStart, f.comm.jobs)
} else {
f.addChunk(chunk)
}
case <-ctx.Done():
return
}
if f.isReady() {
return
}
}
}
func readContentLen(urls []string) (size int64, err error) {
for _, url := range urls {
resp, tmperr := http.Head(url)
if tmperr != nil {
err = fmt.Errorf("[ERROR] HEAD %v: %v", url, tmperr)
continue
}
if resp.StatusCode < 200 || resp.StatusCode > 299 {
err = fmt.Errorf("[ERROR] HEAD `%v`[%v]", url, resp.StatusCode)
continue
}
- if resp.ContentLength == -1 {
+ if resp.ContentLength == -1 || resp.ContentLength == 0 {
err = io.EOF
continue
}
size = resp.ContentLength
err = nil
break
}
return size, err
}
func createDownloadJobs(urls []string, currLen, start int64, jobsChan chan<- chunk) {
ranges := calcRanges(int64(len(urls)), currLen, start)
for idx, url := range urls {
r := ranges[idx]
fmt.Printf("New job for range %v-%v/%v : %v\n", r[0], r[1], currLen, url)
job := chunk{
data: make([]byte, 0),
from: r[0],
to: r[1],
url: url,
}
jobsChan <- job
}
}
func calcRanges(urlCount int64, contentLen, start int64) (ranges [][]int64) {
var i int64
lastByte := contentLen % urlCount
step := contentLen / urlCount
end := start + step
ranges = make([][]int64, urlCount)
if start == 0 {
end--
}
for i = 0; i < urlCount; i++ {
ranges[i] = make([]int64, 2)
if lastByte > 0 {
end++
lastByte--
}
ranges[i][0] = start
ranges[i][1] = end
start = end + 1
end = end + step
}
return ranges
}
func discardURL(urls []string, url string) []string {
for idx, tmpURL := range urls {
if url == tmpURL {
fmt.Printf("Discarding url %s ...\n", url)
return append(urls[:idx], urls[idx+1:]...)
}
}
return nil
}
func main() {
fmt.Println("ahellow")
}
+

Анатоли обнови решението на 03.01.2017 14:43 (преди над 1 година)

package main
import (
"context"
"fmt"
"io"
"net/http"
"sort"
"sync"
"time"
)
type chunk struct {
data []byte
from int64
to int64
pos int64
url string
err error
}
type comm struct {
jobs chan chunk
res chan chunk
done chan struct{}
}
type chunkSort []chunk
func (a chunkSort) Len() int { return len(a) }
func (a chunkSort) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
func (a chunkSort) Less(i, j int) bool { return a[i].from < a[j].from }
type file struct {
chunks []chunk
size int64
currSize int64
pos int64
urls []string
comm *comm
err error
lock *sync.Mutex
}
func (f *file) Read(p []byte) (n int, err error) {
f.lock.Lock()
defer f.lock.Unlock()
if f.err != nil {
return f.readChunks(p), f.err
}
if f.pos == f.size && f.size > 0 {
return 0, io.EOF
}
return f.readChunks(p), nil
}
func (f *file) readChunks(p []byte) (n int) {
//fmt.Println("read chunks", len(p), f.pos, f.size)
prevTo := int64(0)
bytesToRead := len(p)
bytes := 0
if bytesToRead == 0 {
return 0
}
for _, c := range f.chunks {
if prevTo != c.from {
break
}
prevTo = c.pos
if f.pos > c.pos {
continue
}
relPos := int64(len(c.data)) - (c.pos - f.pos)
//fmt.Println(f.pos, c.pos, relPos, len(c.data))
for _, d := range c.data[relPos:] {
p[bytes] = d
bytes++
f.pos++
if bytes == bytesToRead {
return bytes
}
}
}
return bytes
}
func (f *file) addChunk(chunk chunk) (err error) {
f.lock.Lock()
defer f.lock.Unlock()
f.chunks = append(f.chunks, chunk)
f.currSize += int64(len(chunk.data))
sort.Sort(chunkSort(f.chunks))
return nil
}
func (f *file) setErr(err error) {
f.lock.Lock()
defer f.lock.Unlock()
f.err = err
}
func (f *file) setSize(size int64) {
f.lock.Lock()
defer f.lock.Unlock()
f.size = size
}
func (f *file) isReady() bool {
return f.currSize == f.size && f.size > 0
}
//DownloadFile No comment
func DownloadFile(ctx context.Context, urls []string) io.Reader {
fmt.Println("---- NEW DOWNLOADER ----")
maxWorkers := len(urls)
urlCount := maxWorkers
if ctx == nil {
ctx = context.Background()
}
if tmpMaxWorkers, ok := ctx.Value("max-connections").(int); ok {
maxWorkers = tmpMaxWorkers
}
comm := &comm{
jobs: make(chan chunk, urlCount),
res: make(chan chunk, urlCount),
done: make(chan struct{}, 1),
}
// create downloaders
for i := 0; i < maxWorkers; i++ {
go downloadWorker(ctx, comm)
}
f := &file{
chunks: make([]chunk, 0),
lock: &sync.Mutex{},
comm: comm,
urls: urls,
}
go startDownloading(ctx, f)
return f
}
func downloadWorker(ctx context.Context, comm *comm) {
var client http.Client
Breaking:
for {
select {
case chunk := <-comm.jobs:
to := chunk.to
from := chunk.from
chunk.pos = chunk.from
for {
if from > 500000 && from < 600000 {
time.Sleep(5 * time.Second)
}
rheader := fmt.Sprintf("bytes=%d-%d", from, to)
req, _ := http.NewRequest("GET", chunk.url, nil)
req.Header.Add("Range", rheader)
resp, rerr := client.Do(req)
if rerr != nil {
chunk.err = rerr
comm.res <- chunk
continue Breaking
}
if resp.StatusCode < 200 || resp.StatusCode > 299 {
resp.Body.Close()
chunk.err = fmt.Errorf("HTTP status %v", resp.StatusCode)
comm.res <- chunk
continue Breaking
}
buf := make([]byte, 1000)
var ioerr error
var read int
for {
read, ioerr = resp.Body.Read(buf)
if read > 0 {
chunk.data = append(chunk.data, buf[:read]...)
chunk.pos = chunk.pos + int64(len(buf[:read]))
}
if ioerr != nil {
break
}
select {
case <-ctx.Done():
resp.Body.Close()
comm.res <- chunk
break Breaking
default:
}
}
resp.Body.Close()
if (ioerr == nil || ioerr == io.EOF) && (chunk.pos-1) == chunk.to {
break
}
from = chunk.pos
}
comm.res <- chunk
case <-comm.done:
return
case <-ctx.Done():
return
}
}
//fmt.Println("Exit worker...")
}
func startDownloading(ctx context.Context, f *file) {
// while the file is not ready
defer close(f.comm.done)
contentLen, err := readContentLen(f.urls)
if err != nil {
fmt.Println("Failed to read content len from: ", f.urls)
f.setErr(err)
return
}
f.setSize(contentLen)
currUrls := f.urls
createDownloadJobs(currUrls, f.size, 0, f.comm.jobs)
for {
select {
case chunk := <-f.comm.res:
if chunk.err != nil {
fmt.Printf(
"Failed to download chunk %v-%v(%v): %v\n",
chunk.from,
chunk.to,
chunk.pos,
chunk.err,
)
currUrls = discardURL(currUrls, chunk.url)
currLen := chunk.to - chunk.pos
currStart := chunk.pos
if len(chunk.data) > 0 {
f.addChunk(chunk)
}
if len(currUrls) == 0 {
f.setErr(fmt.Errorf("no valid urls"))
return
}
createDownloadJobs(currUrls, currLen, currStart, f.comm.jobs)
} else {
f.addChunk(chunk)
}
case <-ctx.Done():
return
}
if f.isReady() {
return
}
}
}
func readContentLen(urls []string) (size int64, err error) {
for _, url := range urls {
resp, tmperr := http.Head(url)
if tmperr != nil {
err = fmt.Errorf("[ERROR] HEAD %v: %v", url, tmperr)
continue
}
if resp.StatusCode < 200 || resp.StatusCode > 299 {
err = fmt.Errorf("[ERROR] HEAD `%v`[%v]", url, resp.StatusCode)
continue
}
if resp.ContentLength == -1 || resp.ContentLength == 0 {
err = io.EOF
continue
}
size = resp.ContentLength
err = nil
break
}
return size, err
}
func createDownloadJobs(urls []string, currLen, start int64, jobsChan chan<- chunk) {
ranges := calcRanges(int64(len(urls)), currLen, start)
for idx, url := range urls {
r := ranges[idx]
fmt.Printf("New job for range %v-%v/%v : %v\n", r[0], r[1], currLen, url)
job := chunk{
data: make([]byte, 0),
from: r[0],
to: r[1],
url: url,
}
jobsChan <- job
}
}
func calcRanges(urlCount int64, contentLen, start int64) (ranges [][]int64) {
var i int64
lastByte := contentLen % urlCount
step := contentLen / urlCount
end := start + step
ranges = make([][]int64, urlCount)
if start == 0 {
end--
}
for i = 0; i < urlCount; i++ {
ranges[i] = make([]int64, 2)
if lastByte > 0 {
end++
lastByte--
}
ranges[i][0] = start
ranges[i][1] = end
start = end + 1
end = end + step
}
return ranges
}
func discardURL(urls []string, url string) []string {
for idx, tmpURL := range urls {
if url == tmpURL {
fmt.Printf("Discarding url %s ...\n", url)
return append(urls[:idx], urls[idx+1:]...)
}
}
return nil
}
func main() {
fmt.Println("ahellow")
}
-

Анатоли обнови решението на 03.01.2017 15:47 (преди над 1 година)

package main
import (
"context"
"fmt"
"io"
"net/http"
"sort"
"sync"
- "time"
)
type chunk struct {
- data []byte
- from int64
- to int64
- pos int64
- url string
- err error
+ data []byte
+ from int64
+ to int64
+ pos int64
+ url string
+ err error
+ ctxClosed bool
}
type comm struct {
jobs chan chunk
res chan chunk
done chan struct{}
}
type chunkSort []chunk
func (a chunkSort) Len() int { return len(a) }
func (a chunkSort) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
func (a chunkSort) Less(i, j int) bool { return a[i].from < a[j].from }
type file struct {
- chunks []chunk
- size int64
- currSize int64
- pos int64
- urls []string
- comm *comm
- err error
- lock *sync.Mutex
+ chunks []chunk
+ size int64
+ currSize int64
+ pos int64
+ urls []string
+ comm *comm
+ err error
+ maxWorkers int
+ lock *sync.Mutex
}
func (f *file) Read(p []byte) (n int, err error) {
f.lock.Lock()
defer f.lock.Unlock()
if f.err != nil {
return f.readChunks(p), f.err
}
if f.pos == f.size && f.size > 0 {
return 0, io.EOF
}
return f.readChunks(p), nil
}
func (f *file) readChunks(p []byte) (n int) {
//fmt.Println("read chunks", len(p), f.pos, f.size)
prevTo := int64(0)
bytesToRead := len(p)
bytes := 0
if bytesToRead == 0 {
return 0
}
for _, c := range f.chunks {
if prevTo != c.from {
break
}
prevTo = c.pos
if f.pos > c.pos {
continue
}
relPos := int64(len(c.data)) - (c.pos - f.pos)
//fmt.Println(f.pos, c.pos, relPos, len(c.data))
for _, d := range c.data[relPos:] {
p[bytes] = d
bytes++
f.pos++
if bytes == bytesToRead {
return bytes
}
}
}
return bytes
}
func (f *file) addChunk(chunk chunk) (err error) {
f.lock.Lock()
defer f.lock.Unlock()
f.chunks = append(f.chunks, chunk)
f.currSize += int64(len(chunk.data))
sort.Sort(chunkSort(f.chunks))
return nil
}
func (f *file) setErr(err error) {
f.lock.Lock()
defer f.lock.Unlock()
f.err = err
}
func (f *file) setSize(size int64) {
f.lock.Lock()
defer f.lock.Unlock()
f.size = size
}
func (f *file) isReady() bool {
return f.currSize == f.size && f.size > 0
}
//DownloadFile No comment
func DownloadFile(ctx context.Context, urls []string) io.Reader {
fmt.Println("---- NEW DOWNLOADER ----")
maxWorkers := len(urls)
urlCount := maxWorkers
if ctx == nil {
ctx = context.Background()
}
if tmpMaxWorkers, ok := ctx.Value("max-connections").(int); ok {
maxWorkers = tmpMaxWorkers
}
comm := &comm{
jobs: make(chan chunk, urlCount),
res: make(chan chunk, urlCount),
done: make(chan struct{}, 1),
}
// create downloaders
for i := 0; i < maxWorkers; i++ {
go downloadWorker(ctx, comm)
}
f := &file{
- chunks: make([]chunk, 0),
- lock: &sync.Mutex{},
- comm: comm,
- urls: urls,
+ chunks: make([]chunk, 0),
+ lock: &sync.Mutex{},
+ comm: comm,
+ urls: urls,
+ maxWorkers: maxWorkers,
}
go startDownloading(ctx, f)
return f
}
func downloadWorker(ctx context.Context, comm *comm) {
var client http.Client
Breaking:
for {
select {
case chunk := <-comm.jobs:
to := chunk.to
from := chunk.from
chunk.pos = chunk.from
for {
-
- if from > 500000 && from < 600000 {
- time.Sleep(5 * time.Second)
- }
rheader := fmt.Sprintf("bytes=%d-%d", from, to)
-
req, _ := http.NewRequest("GET", chunk.url, nil)
req.Header.Add("Range", rheader)
resp, rerr := client.Do(req)
if rerr != nil {
chunk.err = rerr
comm.res <- chunk
continue Breaking
}
if resp.StatusCode < 200 || resp.StatusCode > 299 {
resp.Body.Close()
chunk.err = fmt.Errorf("HTTP status %v", resp.StatusCode)
comm.res <- chunk
continue Breaking
}
buf := make([]byte, 1000)
var ioerr error
var read int
for {
read, ioerr = resp.Body.Read(buf)
if read > 0 {
chunk.data = append(chunk.data, buf[:read]...)
chunk.pos = chunk.pos + int64(len(buf[:read]))
}
if ioerr != nil {
break
}
select {
case <-ctx.Done():
resp.Body.Close()
+ chunk.err = fmt.Errorf("ctx closed")
+ chunk.ctxClosed = true
comm.res <- chunk
- break Breaking
+ return
default:
}
}
resp.Body.Close()
if (ioerr == nil || ioerr == io.EOF) && (chunk.pos-1) == chunk.to {
break
}
from = chunk.pos
}
comm.res <- chunk
case <-comm.done:
return
case <-ctx.Done():
+ chunk := chunk{
+ data: make([]byte, 0),
+ }
+ chunk.err = fmt.Errorf("ctx closed")
+ chunk.ctxClosed = true
+ comm.res <- chunk
return
}
}
//fmt.Println("Exit worker...")
}
func startDownloading(ctx context.Context, f *file) {
// while the file is not ready
defer close(f.comm.done)
contentLen, err := readContentLen(f.urls)
if err != nil {
fmt.Println("Failed to read content len from: ", f.urls)
f.setErr(err)
return
}
f.setSize(contentLen)
currUrls := f.urls
createDownloadJobs(currUrls, f.size, 0, f.comm.jobs)
+ ctxErrs := make([]error, 0)
+
for {
select {
case chunk := <-f.comm.res:
if chunk.err != nil {
+
fmt.Printf(
"Failed to download chunk %v-%v(%v): %v\n",
chunk.from,
chunk.to,
chunk.pos,
chunk.err,
)
- currUrls = discardURL(currUrls, chunk.url)
- currLen := chunk.to - chunk.pos
- currStart := chunk.pos
-
if len(chunk.data) > 0 {
f.addChunk(chunk)
}
+ if chunk.ctxClosed {
+ ctxErrs = append(ctxErrs, chunk.err)
+ break
+ }
+
+ currUrls = discardURL(currUrls, chunk.url)
+
if len(currUrls) == 0 {
f.setErr(fmt.Errorf("no valid urls"))
return
}
+ currLen := chunk.to - chunk.pos
+ currStart := chunk.pos
createDownloadJobs(currUrls, currLen, currStart, f.comm.jobs)
} else {
f.addChunk(chunk)
}
- case <-ctx.Done():
- return
- }
- if f.isReady() {
+ if f.isReady() {
+ return
+ }
+ case <-ctx.Done():
+ if len(ctxErrs) == f.maxWorkers {
+ f.setErr(ctxErrs[len(ctxErrs)-1])
+ }
return
}
}
}
func readContentLen(urls []string) (size int64, err error) {
for _, url := range urls {
resp, tmperr := http.Head(url)
if tmperr != nil {
err = fmt.Errorf("[ERROR] HEAD %v: %v", url, tmperr)
continue
}
if resp.StatusCode < 200 || resp.StatusCode > 299 {
err = fmt.Errorf("[ERROR] HEAD `%v`[%v]", url, resp.StatusCode)
continue
}
if resp.ContentLength == -1 || resp.ContentLength == 0 {
err = io.EOF
continue
}
size = resp.ContentLength
err = nil
break
}
return size, err
}
func createDownloadJobs(urls []string, currLen, start int64, jobsChan chan<- chunk) {
ranges := calcRanges(int64(len(urls)), currLen, start)
for idx, url := range urls {
r := ranges[idx]
fmt.Printf("New job for range %v-%v/%v : %v\n", r[0], r[1], currLen, url)
job := chunk{
data: make([]byte, 0),
from: r[0],
to: r[1],
url: url,
}
jobsChan <- job
}
}
func calcRanges(urlCount int64, contentLen, start int64) (ranges [][]int64) {
var i int64
lastByte := contentLen % urlCount
step := contentLen / urlCount
end := start + step
ranges = make([][]int64, urlCount)
if start == 0 {
end--
}
for i = 0; i < urlCount; i++ {
ranges[i] = make([]int64, 2)
if lastByte > 0 {
end++
lastByte--
}
ranges[i][0] = start
ranges[i][1] = end
start = end + 1
end = end + step
}
return ranges
}
func discardURL(urls []string, url string) []string {
for idx, tmpURL := range urls {
if url == tmpURL {
fmt.Printf("Discarding url %s ...\n", url)
return append(urls[:idx], urls[idx+1:]...)
}
}
return nil
}
func main() {
fmt.Println("ahellow")
}

Анатоли обнови решението на 03.01.2017 15:57 (преди над 1 година)

package main
import (
"context"
"fmt"
"io"
"net/http"
"sort"
"sync"
)
type chunk struct {
data []byte
from int64
to int64
pos int64
url string
err error
ctxClosed bool
}
type comm struct {
jobs chan chunk
res chan chunk
done chan struct{}
}
type chunkSort []chunk
func (a chunkSort) Len() int { return len(a) }
func (a chunkSort) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
func (a chunkSort) Less(i, j int) bool { return a[i].from < a[j].from }
type file struct {
chunks []chunk
size int64
currSize int64
pos int64
urls []string
comm *comm
err error
maxWorkers int
lock *sync.Mutex
}
func (f *file) Read(p []byte) (n int, err error) {
f.lock.Lock()
defer f.lock.Unlock()
if f.err != nil {
return f.readChunks(p), f.err
}
if f.pos == f.size && f.size > 0 {
return 0, io.EOF
}
return f.readChunks(p), nil
}
func (f *file) readChunks(p []byte) (n int) {
//fmt.Println("read chunks", len(p), f.pos, f.size)
prevTo := int64(0)
bytesToRead := len(p)
bytes := 0
if bytesToRead == 0 {
return 0
}
for _, c := range f.chunks {
if prevTo != c.from {
break
}
prevTo = c.pos
if f.pos > c.pos {
continue
}
relPos := int64(len(c.data)) - (c.pos - f.pos)
//fmt.Println(f.pos, c.pos, relPos, len(c.data))
for _, d := range c.data[relPos:] {
p[bytes] = d
bytes++
f.pos++
if bytes == bytesToRead {
return bytes
}
}
}
return bytes
}
func (f *file) addChunk(chunk chunk) (err error) {
f.lock.Lock()
defer f.lock.Unlock()
f.chunks = append(f.chunks, chunk)
f.currSize += int64(len(chunk.data))
sort.Sort(chunkSort(f.chunks))
return nil
}
func (f *file) setErr(err error) {
f.lock.Lock()
defer f.lock.Unlock()
f.err = err
}
func (f *file) setSize(size int64) {
f.lock.Lock()
defer f.lock.Unlock()
f.size = size
}
func (f *file) isReady() bool {
return f.currSize == f.size && f.size > 0
}
//DownloadFile No comment
func DownloadFile(ctx context.Context, urls []string) io.Reader {
fmt.Println("---- NEW DOWNLOADER ----")
maxWorkers := len(urls)
urlCount := maxWorkers
if ctx == nil {
ctx = context.Background()
}
if tmpMaxWorkers, ok := ctx.Value("max-connections").(int); ok {
maxWorkers = tmpMaxWorkers
}
comm := &comm{
jobs: make(chan chunk, urlCount),
res: make(chan chunk, urlCount),
done: make(chan struct{}, 1),
}
// create downloaders
for i := 0; i < maxWorkers; i++ {
go downloadWorker(ctx, comm)
}
f := &file{
chunks: make([]chunk, 0),
lock: &sync.Mutex{},
comm: comm,
urls: urls,
maxWorkers: maxWorkers,
}
go startDownloading(ctx, f)
return f
}
func downloadWorker(ctx context.Context, comm *comm) {
var client http.Client
Breaking:
for {
select {
case chunk := <-comm.jobs:
to := chunk.to
from := chunk.from
chunk.pos = chunk.from
for {
+
rheader := fmt.Sprintf("bytes=%d-%d", from, to)
req, _ := http.NewRequest("GET", chunk.url, nil)
req.Header.Add("Range", rheader)
resp, rerr := client.Do(req)
if rerr != nil {
chunk.err = rerr
comm.res <- chunk
continue Breaking
}
if resp.StatusCode < 200 || resp.StatusCode > 299 {
resp.Body.Close()
chunk.err = fmt.Errorf("HTTP status %v", resp.StatusCode)
comm.res <- chunk
continue Breaking
}
buf := make([]byte, 1000)
var ioerr error
var read int
for {
read, ioerr = resp.Body.Read(buf)
if read > 0 {
chunk.data = append(chunk.data, buf[:read]...)
chunk.pos = chunk.pos + int64(len(buf[:read]))
}
if ioerr != nil {
break
}
select {
case <-ctx.Done():
resp.Body.Close()
chunk.err = fmt.Errorf("ctx closed")
chunk.ctxClosed = true
comm.res <- chunk
return
default:
}
}
resp.Body.Close()
if (ioerr == nil || ioerr == io.EOF) && (chunk.pos-1) == chunk.to {
break
}
from = chunk.pos
}
comm.res <- chunk
case <-comm.done:
return
case <-ctx.Done():
chunk := chunk{
data: make([]byte, 0),
}
chunk.err = fmt.Errorf("ctx closed")
chunk.ctxClosed = true
comm.res <- chunk
return
}
}
//fmt.Println("Exit worker...")
}
func startDownloading(ctx context.Context, f *file) {
// while the file is not ready
defer close(f.comm.done)
contentLen, err := readContentLen(f.urls)
if err != nil {
fmt.Println("Failed to read content len from: ", f.urls)
f.setErr(err)
return
}
f.setSize(contentLen)
currUrls := f.urls
createDownloadJobs(currUrls, f.size, 0, f.comm.jobs)
ctxErrs := make([]error, 0)
for {
select {
case chunk := <-f.comm.res:
if chunk.err != nil {
fmt.Printf(
"Failed to download chunk %v-%v(%v): %v\n",
chunk.from,
chunk.to,
chunk.pos,
chunk.err,
)
if len(chunk.data) > 0 {
f.addChunk(chunk)
}
if chunk.ctxClosed {
ctxErrs = append(ctxErrs, chunk.err)
break
}
currUrls = discardURL(currUrls, chunk.url)
if len(currUrls) == 0 {
f.setErr(fmt.Errorf("no valid urls"))
return
}
currLen := chunk.to - chunk.pos
currStart := chunk.pos
createDownloadJobs(currUrls, currLen, currStart, f.comm.jobs)
} else {
f.addChunk(chunk)
}
if f.isReady() {
return
}
case <-ctx.Done():
if len(ctxErrs) == f.maxWorkers {
f.setErr(ctxErrs[len(ctxErrs)-1])
+ return
}
- return
+
}
}
}
func readContentLen(urls []string) (size int64, err error) {
for _, url := range urls {
resp, tmperr := http.Head(url)
if tmperr != nil {
err = fmt.Errorf("[ERROR] HEAD %v: %v", url, tmperr)
continue
}
if resp.StatusCode < 200 || resp.StatusCode > 299 {
err = fmt.Errorf("[ERROR] HEAD `%v`[%v]", url, resp.StatusCode)
continue
}
if resp.ContentLength == -1 || resp.ContentLength == 0 {
err = io.EOF
continue
}
size = resp.ContentLength
err = nil
break
}
return size, err
}
func createDownloadJobs(urls []string, currLen, start int64, jobsChan chan<- chunk) {
ranges := calcRanges(int64(len(urls)), currLen, start)
for idx, url := range urls {
r := ranges[idx]
fmt.Printf("New job for range %v-%v/%v : %v\n", r[0], r[1], currLen, url)
job := chunk{
data: make([]byte, 0),
from: r[0],
to: r[1],
url: url,
}
jobsChan <- job
}
}
func calcRanges(urlCount int64, contentLen, start int64) (ranges [][]int64) {
var i int64
lastByte := contentLen % urlCount
step := contentLen / urlCount
end := start + step
ranges = make([][]int64, urlCount)
if start == 0 {
end--
}
for i = 0; i < urlCount; i++ {
ranges[i] = make([]int64, 2)
if lastByte > 0 {
end++
lastByte--
}
ranges[i][0] = start
ranges[i][1] = end
start = end + 1
end = end + step
}
return ranges
}
func discardURL(urls []string, url string) []string {
for idx, tmpURL := range urls {
if url == tmpURL {
fmt.Printf("Discarding url %s ...\n", url)
return append(urls[:idx], urls[idx+1:]...)
}
}
return nil
}
func main() {
fmt.Println("ahellow")
}

Анатоли обнови решението на 03.01.2017 16:11 (преди над 1 година)

package main
import (
"context"
"fmt"
"io"
"net/http"
"sort"
"sync"
)
type chunk struct {
data []byte
from int64
to int64
pos int64
url string
err error
ctxClosed bool
}
type comm struct {
jobs chan chunk
res chan chunk
done chan struct{}
}
type chunkSort []chunk
func (a chunkSort) Len() int { return len(a) }
func (a chunkSort) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
func (a chunkSort) Less(i, j int) bool { return a[i].from < a[j].from }
type file struct {
chunks []chunk
size int64
currSize int64
pos int64
urls []string
comm *comm
err error
maxWorkers int
lock *sync.Mutex
}
func (f *file) Read(p []byte) (n int, err error) {
f.lock.Lock()
defer f.lock.Unlock()
if f.err != nil {
return f.readChunks(p), f.err
}
if f.pos == f.size && f.size > 0 {
return 0, io.EOF
}
return f.readChunks(p), nil
}
func (f *file) readChunks(p []byte) (n int) {
//fmt.Println("read chunks", len(p), f.pos, f.size)
prevTo := int64(0)
bytesToRead := len(p)
bytes := 0
if bytesToRead == 0 {
return 0
}
for _, c := range f.chunks {
if prevTo != c.from {
break
}
prevTo = c.pos
if f.pos > c.pos {
continue
}
relPos := int64(len(c.data)) - (c.pos - f.pos)
//fmt.Println(f.pos, c.pos, relPos, len(c.data))
for _, d := range c.data[relPos:] {
p[bytes] = d
bytes++
f.pos++
if bytes == bytesToRead {
return bytes
}
}
}
return bytes
}
func (f *file) addChunk(chunk chunk) (err error) {
f.lock.Lock()
defer f.lock.Unlock()
f.chunks = append(f.chunks, chunk)
f.currSize += int64(len(chunk.data))
sort.Sort(chunkSort(f.chunks))
return nil
}
func (f *file) setErr(err error) {
f.lock.Lock()
defer f.lock.Unlock()
f.err = err
}
func (f *file) setSize(size int64) {
f.lock.Lock()
defer f.lock.Unlock()
f.size = size
}
func (f *file) isReady() bool {
return f.currSize == f.size && f.size > 0
}
//DownloadFile No comment
func DownloadFile(ctx context.Context, urls []string) io.Reader {
fmt.Println("---- NEW DOWNLOADER ----")
maxWorkers := len(urls)
urlCount := maxWorkers
if ctx == nil {
ctx = context.Background()
}
if tmpMaxWorkers, ok := ctx.Value("max-connections").(int); ok {
maxWorkers = tmpMaxWorkers
}
comm := &comm{
jobs: make(chan chunk, urlCount),
res: make(chan chunk, urlCount),
done: make(chan struct{}, 1),
}
// create downloaders
for i := 0; i < maxWorkers; i++ {
go downloadWorker(ctx, comm)
}
f := &file{
chunks: make([]chunk, 0),
lock: &sync.Mutex{},
comm: comm,
urls: urls,
maxWorkers: maxWorkers,
}
go startDownloading(ctx, f)
return f
}
func downloadWorker(ctx context.Context, comm *comm) {
var client http.Client
Breaking:
for {
select {
case chunk := <-comm.jobs:
to := chunk.to
from := chunk.from
chunk.pos = chunk.from
for {
rheader := fmt.Sprintf("bytes=%d-%d", from, to)
req, _ := http.NewRequest("GET", chunk.url, nil)
req.Header.Add("Range", rheader)
resp, rerr := client.Do(req)
if rerr != nil {
chunk.err = rerr
comm.res <- chunk
continue Breaking
}
if resp.StatusCode < 200 || resp.StatusCode > 299 {
resp.Body.Close()
chunk.err = fmt.Errorf("HTTP status %v", resp.StatusCode)
comm.res <- chunk
continue Breaking
}
buf := make([]byte, 1000)
var ioerr error
var read int
for {
read, ioerr = resp.Body.Read(buf)
if read > 0 {
chunk.data = append(chunk.data, buf[:read]...)
chunk.pos = chunk.pos + int64(len(buf[:read]))
}
if ioerr != nil {
break
}
select {
case <-ctx.Done():
resp.Body.Close()
- chunk.err = fmt.Errorf("ctx closed")
+ chunk.err = ctx.Err()
chunk.ctxClosed = true
comm.res <- chunk
return
default:
}
}
resp.Body.Close()
if (ioerr == nil || ioerr == io.EOF) && (chunk.pos-1) == chunk.to {
break
}
from = chunk.pos
}
comm.res <- chunk
case <-comm.done:
return
case <-ctx.Done():
chunk := chunk{
data: make([]byte, 0),
}
- chunk.err = fmt.Errorf("ctx closed")
+ chunk.err = ctx.Err()
chunk.ctxClosed = true
comm.res <- chunk
return
}
}
//fmt.Println("Exit worker...")
}
func startDownloading(ctx context.Context, f *file) {
// while the file is not ready
defer close(f.comm.done)
contentLen, err := readContentLen(f.urls)
if err != nil {
fmt.Println("Failed to read content len from: ", f.urls)
f.setErr(err)
return
}
f.setSize(contentLen)
currUrls := f.urls
createDownloadJobs(currUrls, f.size, 0, f.comm.jobs)
ctxErrs := make([]error, 0)
for {
select {
case chunk := <-f.comm.res:
if chunk.err != nil {
fmt.Printf(
"Failed to download chunk %v-%v(%v): %v\n",
chunk.from,
chunk.to,
chunk.pos,
chunk.err,
)
if len(chunk.data) > 0 {
f.addChunk(chunk)
}
if chunk.ctxClosed {
ctxErrs = append(ctxErrs, chunk.err)
break
}
currUrls = discardURL(currUrls, chunk.url)
if len(currUrls) == 0 {
f.setErr(fmt.Errorf("no valid urls"))
return
}
currLen := chunk.to - chunk.pos
currStart := chunk.pos
createDownloadJobs(currUrls, currLen, currStart, f.comm.jobs)
} else {
f.addChunk(chunk)
}
if f.isReady() {
return
}
case <-ctx.Done():
if len(ctxErrs) == f.maxWorkers {
f.setErr(ctxErrs[len(ctxErrs)-1])
return
}
-
}
}
}
func readContentLen(urls []string) (size int64, err error) {
for _, url := range urls {
resp, tmperr := http.Head(url)
if tmperr != nil {
err = fmt.Errorf("[ERROR] HEAD %v: %v", url, tmperr)
continue
}
if resp.StatusCode < 200 || resp.StatusCode > 299 {
err = fmt.Errorf("[ERROR] HEAD `%v`[%v]", url, resp.StatusCode)
continue
}
if resp.ContentLength == -1 || resp.ContentLength == 0 {
err = io.EOF
continue
}
size = resp.ContentLength
err = nil
break
}
return size, err
}
func createDownloadJobs(urls []string, currLen, start int64, jobsChan chan<- chunk) {
ranges := calcRanges(int64(len(urls)), currLen, start)
for idx, url := range urls {
r := ranges[idx]
fmt.Printf("New job for range %v-%v/%v : %v\n", r[0], r[1], currLen, url)
job := chunk{
data: make([]byte, 0),
from: r[0],
to: r[1],
url: url,
}
jobsChan <- job
}
}
func calcRanges(urlCount int64, contentLen, start int64) (ranges [][]int64) {
var i int64
lastByte := contentLen % urlCount
step := contentLen / urlCount
end := start + step
ranges = make([][]int64, urlCount)
if start == 0 {
end--
}
for i = 0; i < urlCount; i++ {
ranges[i] = make([]int64, 2)
if lastByte > 0 {
end++
lastByte--
}
ranges[i][0] = start
ranges[i][1] = end
start = end + 1
end = end + step
}
return ranges
}
func discardURL(urls []string, url string) []string {
for idx, tmpURL := range urls {
if url == tmpURL {
fmt.Printf("Discarding url %s ...\n", url)
return append(urls[:idx], urls[idx+1:]...)
}
}
return nil
}
func main() {
fmt.Println("ahellow")
}

Анатоли обнови решението на 03.01.2017 16:20 (преди над 1 година)

package main
import (
"context"
"fmt"
"io"
"net/http"
"sort"
"sync"
)
type chunk struct {
data []byte
from int64
to int64
pos int64
url string
err error
ctxClosed bool
}
type comm struct {
jobs chan chunk
res chan chunk
done chan struct{}
}
type chunkSort []chunk
func (a chunkSort) Len() int { return len(a) }
func (a chunkSort) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
func (a chunkSort) Less(i, j int) bool { return a[i].from < a[j].from }
type file struct {
chunks []chunk
size int64
currSize int64
pos int64
urls []string
comm *comm
err error
maxWorkers int
lock *sync.Mutex
}
func (f *file) Read(p []byte) (n int, err error) {
f.lock.Lock()
defer f.lock.Unlock()
if f.err != nil {
return f.readChunks(p), f.err
}
if f.pos == f.size && f.size > 0 {
return 0, io.EOF
}
return f.readChunks(p), nil
}
func (f *file) readChunks(p []byte) (n int) {
//fmt.Println("read chunks", len(p), f.pos, f.size)
prevTo := int64(0)
bytesToRead := len(p)
bytes := 0
if bytesToRead == 0 {
return 0
}
for _, c := range f.chunks {
if prevTo != c.from {
break
}
prevTo = c.pos
if f.pos > c.pos {
continue
}
relPos := int64(len(c.data)) - (c.pos - f.pos)
//fmt.Println(f.pos, c.pos, relPos, len(c.data))
for _, d := range c.data[relPos:] {
p[bytes] = d
bytes++
f.pos++
if bytes == bytesToRead {
return bytes
}
}
}
return bytes
}
func (f *file) addChunk(chunk chunk) (err error) {
+ if len(chunk.data) == 0 {
+ return nil
+ }
+
f.lock.Lock()
defer f.lock.Unlock()
f.chunks = append(f.chunks, chunk)
f.currSize += int64(len(chunk.data))
sort.Sort(chunkSort(f.chunks))
return nil
}
func (f *file) setErr(err error) {
f.lock.Lock()
defer f.lock.Unlock()
f.err = err
}
func (f *file) setSize(size int64) {
f.lock.Lock()
defer f.lock.Unlock()
f.size = size
}
func (f *file) isReady() bool {
return f.currSize == f.size && f.size > 0
}
//DownloadFile No comment
func DownloadFile(ctx context.Context, urls []string) io.Reader {
fmt.Println("---- NEW DOWNLOADER ----")
maxWorkers := len(urls)
urlCount := maxWorkers
if ctx == nil {
ctx = context.Background()
}
if tmpMaxWorkers, ok := ctx.Value("max-connections").(int); ok {
maxWorkers = tmpMaxWorkers
}
comm := &comm{
jobs: make(chan chunk, urlCount),
res: make(chan chunk, urlCount),
done: make(chan struct{}, 1),
}
// create downloaders
for i := 0; i < maxWorkers; i++ {
go downloadWorker(ctx, comm)
}
f := &file{
chunks: make([]chunk, 0),
lock: &sync.Mutex{},
comm: comm,
urls: urls,
maxWorkers: maxWorkers,
}
go startDownloading(ctx, f)
return f
}
func downloadWorker(ctx context.Context, comm *comm) {
var client http.Client
Breaking:
for {
select {
case chunk := <-comm.jobs:
to := chunk.to
from := chunk.from
chunk.pos = chunk.from
for {
rheader := fmt.Sprintf("bytes=%d-%d", from, to)
req, _ := http.NewRequest("GET", chunk.url, nil)
req.Header.Add("Range", rheader)
resp, rerr := client.Do(req)
if rerr != nil {
chunk.err = rerr
comm.res <- chunk
continue Breaking
}
if resp.StatusCode < 200 || resp.StatusCode > 299 {
resp.Body.Close()
chunk.err = fmt.Errorf("HTTP status %v", resp.StatusCode)
comm.res <- chunk
continue Breaking
}
buf := make([]byte, 1000)
var ioerr error
var read int
for {
read, ioerr = resp.Body.Read(buf)
if read > 0 {
chunk.data = append(chunk.data, buf[:read]...)
chunk.pos = chunk.pos + int64(len(buf[:read]))
}
if ioerr != nil {
break
}
select {
case <-ctx.Done():
resp.Body.Close()
chunk.err = ctx.Err()
chunk.ctxClosed = true
comm.res <- chunk
return
default:
}
}
resp.Body.Close()
if (ioerr == nil || ioerr == io.EOF) && (chunk.pos-1) == chunk.to {
break
}
from = chunk.pos
}
comm.res <- chunk
case <-comm.done:
return
case <-ctx.Done():
chunk := chunk{
data: make([]byte, 0),
}
chunk.err = ctx.Err()
chunk.ctxClosed = true
comm.res <- chunk
return
}
}
//fmt.Println("Exit worker...")
}
func startDownloading(ctx context.Context, f *file) {
// while the file is not ready
defer close(f.comm.done)
contentLen, err := readContentLen(f.urls)
if err != nil {
fmt.Println("Failed to read content len from: ", f.urls)
f.setErr(err)
return
}
f.setSize(contentLen)
currUrls := f.urls
createDownloadJobs(currUrls, f.size, 0, f.comm.jobs)
ctxErrs := make([]error, 0)
for {
select {
case chunk := <-f.comm.res:
if chunk.err != nil {
fmt.Printf(
"Failed to download chunk %v-%v(%v): %v\n",
chunk.from,
chunk.to,
chunk.pos,
chunk.err,
)
- if len(chunk.data) > 0 {
- f.addChunk(chunk)
- }
+ f.addChunk(chunk)
if chunk.ctxClosed {
ctxErrs = append(ctxErrs, chunk.err)
- break
- }
+ } else {
+ currUrls = discardURL(currUrls, chunk.url)
+ if len(currUrls) == 0 {
+ f.setErr(fmt.Errorf("no valid urls"))
+ return
+ }
- currUrls = discardURL(currUrls, chunk.url)
-
- if len(currUrls) == 0 {
- f.setErr(fmt.Errorf("no valid urls"))
- return
+ currLen := chunk.to - chunk.pos
+ currStart := chunk.pos
+ createDownloadJobs(currUrls, currLen, currStart, f.comm.jobs)
}
-
- currLen := chunk.to - chunk.pos
- currStart := chunk.pos
- createDownloadJobs(currUrls, currLen, currStart, f.comm.jobs)
} else {
f.addChunk(chunk)
}
if f.isReady() {
return
}
case <-ctx.Done():
if len(ctxErrs) == f.maxWorkers {
f.setErr(ctxErrs[len(ctxErrs)-1])
return
}
}
}
}
func readContentLen(urls []string) (size int64, err error) {
for _, url := range urls {
resp, tmperr := http.Head(url)
if tmperr != nil {
err = fmt.Errorf("[ERROR] HEAD %v: %v", url, tmperr)
continue
}
if resp.StatusCode < 200 || resp.StatusCode > 299 {
err = fmt.Errorf("[ERROR] HEAD `%v`[%v]", url, resp.StatusCode)
continue
}
if resp.ContentLength == -1 || resp.ContentLength == 0 {
err = io.EOF
continue
}
size = resp.ContentLength
err = nil
break
}
return size, err
}
func createDownloadJobs(urls []string, currLen, start int64, jobsChan chan<- chunk) {
ranges := calcRanges(int64(len(urls)), currLen, start)
for idx, url := range urls {
r := ranges[idx]
fmt.Printf("New job for range %v-%v/%v : %v\n", r[0], r[1], currLen, url)
job := chunk{
data: make([]byte, 0),
from: r[0],
to: r[1],
url: url,
}
jobsChan <- job
}
}
func calcRanges(urlCount int64, contentLen, start int64) (ranges [][]int64) {
var i int64
lastByte := contentLen % urlCount
step := contentLen / urlCount
end := start + step
ranges = make([][]int64, urlCount)
if start == 0 {
end--
}
for i = 0; i < urlCount; i++ {
ranges[i] = make([]int64, 2)
if lastByte > 0 {
end++
lastByte--
}
ranges[i][0] = start
ranges[i][1] = end
start = end + 1
end = end + step
}
return ranges
}
func discardURL(urls []string, url string) []string {
for idx, tmpURL := range urls {
if url == tmpURL {
fmt.Printf("Discarding url %s ...\n", url)
return append(urls[:idx], urls[idx+1:]...)
}
}
return nil
}
func main() {
fmt.Println("ahellow")
}