Skip to content

Commit 4a8d703

Browse files
committed
Fix two issues affecting webseeds
1) Ensure web requests request data in-order by fixing the piecepicker to correctly use 'CanRequestMorePieces' 2) Fix a bug in the handling of padding files. A request for some data from a webseed needs to take padding files into account in all cases. A request can begin or end inside padding, it can also begin/end in regular files while optionally crossing one stretch of padding. This means the existing logic to insert synethic requests to the padding file are broken in a number of cases.
1 parent 3992687 commit 4a8d703

File tree

9 files changed

+217
-108
lines changed

9 files changed

+217
-108
lines changed

src/MonoTorrent.Client/MonoTorrent.Client/PeerId.PieceRequester.cs

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -61,6 +61,7 @@ bool IRequester.CanRequestMorePieces {
6161

6262
int IRequester.PreferredRequestAmount (int pieceLength)
6363
{
64+
return 1;
6465
if (Connection is HttpPeerConnection) {
6566
// How many whole pieces fit into 2MB
6667
var count = (2 * 1024 * 1024) / pieceLength;

src/MonoTorrent.Client/MonoTorrent.Connections/HttpPeerConnection.cs

Lines changed: 29 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -357,23 +357,41 @@ void CreateWebRequests (BlockInfo start, BlockInfo end)
357357
break;
358358

359359
// We want data from a later file
360-
if (startOffset >= file.Length) {
361-
startOffset -= file.Length;
362-
endOffset -= file.Length;
360+
if (startOffset >= file.Length + file.Padding) {
361+
startOffset -= file.Length + file.Padding;
362+
endOffset -= file.Length + file.Padding;
363363
}
364364
// We want data from the end of the current file and from the next few files
365-
else if (endOffset >= file.Length) {
366-
WebRequests.Enqueue ((u, startOffset, file.Length - startOffset));
367-
startOffset = 0;
368-
endOffset -= file.Length;
365+
else if (endOffset >= (file.Length + file.Padding)) {
366+
// Maybe we need data from the actaul file?
367+
if (file.Length - startOffset < 0) {
368+
WebRequests.Enqueue ((u, startOffset, file.Length - startOffset));
369+
startOffset += file.Length - startOffset;
370+
}
371+
// and/or maybe we need data from the padding
369372
if (file.Padding > 0) {
370-
WebRequests.Enqueue ((PaddingFileUri, 0, file.Padding));
371-
endOffset -= file.Padding;
373+
var paddingToRead = Math.Min (endOffset, file.Padding);
374+
WebRequests.Enqueue ((PaddingFileUri, 0, paddingToRead));
375+
endOffset -= paddingToRead;
372376
}
377+
// either way, the next chunk of data comes from byte 0 in the next file
378+
startOffset = 0;
379+
endOffset -= file.Length - file.Padding - startOffset;
373380
}
374-
// All the data we want is from within this file
381+
// All the data we want is from within this file or it's padding
375382
else {
376-
WebRequests.Enqueue ((u, startOffset, endOffset - startOffset));
383+
var fileToRead = Math.Min (endOffset, file.Length) - startOffset;
384+
WebRequests.Enqueue ((u, startOffset, fileToRead));
385+
startOffset += fileToRead;
386+
387+
if (endOffset - startOffset > 0) {
388+
var paddingToRead = Math.Min (endOffset - startOffset, file.Padding);
389+
if (paddingToRead < 0)
390+
throw new ArgumentNullException ();
391+
if (paddingToRead > 0)
392+
WebRequests.Enqueue ((PaddingFileUri, 0, paddingToRead));
393+
394+
}
377395
endOffset = 0;
378396
}
379397
}

src/MonoTorrent.PiecePicking/MonoTorrent.PiecePicking/StandardPieceRequester.cs

Lines changed: 3 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -96,16 +96,14 @@ public void AddRequests (ReadOnlySpan<(IRequester Peer, ReadOnlyBitField Availab
9696

9797
public void AddRequests (IRequester peer, ReadOnlyBitField available, ReadOnlySpan<ReadOnlyBitField> allPeers)
9898
{
99-
int maxRequests = peer.MaxPendingRequests;
100-
10199
if (!peer.CanRequestMorePieces || Picker == null || TorrentData == null || Enqueuer == null)
102100
return;
103101

104102
// This is safe to invoke. 'ContinueExistingRequest' strongly guarantees that a peer will only
105103
// continue a piece they have initiated. If they're choking then the only piece they can continue
106104
// will be a fast piece (if one exists!)
107105
if (!peer.IsChoking || peer.SupportsFastPeer) {
108-
while (peer.AmRequestingPiecesCount < maxRequests) {
106+
while (peer.CanRequestMorePieces) {
109107
if (Picker.ContinueExistingRequest (peer, 0, available.Length - 1, out PieceSegment segment))
110108
Enqueuer.EnqueueRequest (peer, segment);
111109
else
@@ -123,7 +121,7 @@ public void AddRequests (IRequester peer, ReadOnlyBitField available, ReadOnlySp
123121
if (!peer.IsChoking || (peer.SupportsFastPeer && peer.IsAllowedFastPieces.Count > 0)) {
124122
ReadOnlyBitField? filtered = null;
125123

126-
while (peer.AmRequestingPiecesCount < maxRequests) {
124+
while (peer.CanRequestMorePieces) {
127125
filtered ??= ApplyIgnorables (available);
128126

129127
int requests = Picker.PickPiece (peer, filtered, allPeers, 0, TorrentData.PieceCount - 1, requestBuffer);
@@ -137,7 +135,7 @@ public void AddRequests (IRequester peer, ReadOnlyBitField available, ReadOnlySp
137135
if (!peer.IsChoking && peer.AmRequestingPiecesCount == 0) {
138136
ReadOnlyBitField? filtered = null;
139137
PieceSegment segment;
140-
while (peer.AmRequestingPiecesCount < maxRequests) {
138+
while (peer.CanRequestMorePieces) {
141139
filtered ??= ApplyIgnorables (available);
142140

143141
if (Picker.ContinueAnyExistingRequest (peer, filtered, 0, TorrentData.PieceCount - 1, 1, out segment)) {

src/MonoTorrent.PiecePicking/MonoTorrent.PiecePicking/StreamingPieceRequester.cs

Lines changed: 3 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -160,17 +160,13 @@ void AddRequests (IRequester peer, ReadOnlyBitField available, ReadOnlySpan<Read
160160
return;
161161

162162
int preferredRequestAmount = peer.PreferredRequestAmount (TorrentData.PieceLength);
163-
var maxRequests = Math.Min (preferredMaxRequests, peer.MaxPendingRequests);
164-
165-
if (peer.AmRequestingPiecesCount >= maxRequests)
166-
return;
167163

168164
// FIXME: Add a test to ensure we do not unintentionally request blocks off peers which are choking us.
169165
// This used to say if (!peer.IsChoing || peer.SupportsFastPeer), and with the recent changes we might
170166
// not actually guarantee that 'ContinueExistingRequest' or 'ContinueAnyExistingRequest' properly takes
171167
// into account that a peer which is choking us can *only* resume a 'fast piece' in the 'AmAllowedfastPiece' list.
172168
if (!peer.IsChoking) {
173-
while (peer.AmRequestingPiecesCount < maxRequests) {
169+
while (peer.CanRequestMorePieces) {
174170
if (LowPriorityPicker!.ContinueAnyExistingRequest (peer, available, startPieceIndex, endPieceIndex, maxDuplicates, out PieceSegment request))
175171
Enqueuer.EnqueueRequest(peer, request);
176172
else
@@ -180,7 +176,7 @@ void AddRequests (IRequester peer, ReadOnlyBitField available, ReadOnlySpan<Read
180176

181177
// If the peer supports fast peer and they are choking us, they'll still send pieces in the allowed fast set.
182178
if (peer.SupportsFastPeer && peer.IsChoking) {
183-
while (peer.AmRequestingPiecesCount < maxRequests) {
179+
while (peer.CanRequestMorePieces) {
184180
if (LowPriorityPicker!.ContinueExistingRequest (peer, startPieceIndex, endPieceIndex, out PieceSegment segment))
185181
Enqueuer.EnqueueRequest (peer, segment);
186182
else
@@ -193,7 +189,7 @@ void AddRequests (IRequester peer, ReadOnlyBitField available, ReadOnlySpan<Read
193189
// FIXME add a test for this.
194190
if (!peer.IsChoking || (peer.SupportsFastPeer && peer.IsAllowedFastPieces.Count > 0)) {
195191
BitField filtered = null!;
196-
while (peer.AmRequestingPiecesCount < maxRequests) {
192+
while (peer.CanRequestMorePieces) {
197193
filtered ??= GenerateAlreadyHaves ().Not ().And (available);
198194
Span<PieceSegment> buffer = stackalloc PieceSegment[preferredRequestAmount];
199195
int requested = PriorityPick (peer, filtered, allPeers, startPieceIndex, endPieceIndex, buffer);

src/Tests/Tests.MonoTorrent.IntegrationTests/IntegrationTests_FakePieceWriter.cs

Lines changed: 36 additions & 36 deletions
Original file line numberDiff line numberDiff line change
@@ -348,48 +348,48 @@ private void OnHttpContext (IAsyncResult ar)
348348
try {
349349
ctx = _httpSeeder.EndGetContext (ar);
350350
_httpSeeder.BeginGetContext (OnHttpContext, ar.AsyncState);
351-
} catch {
352-
// Do nothing!
353-
return;
354-
}
355351

356-
var localPath = ctx.Request.Url.LocalPath;
357-
string relativeSeedingPath = $"/{_webSeedPrefix}/{_torrentName}/";
358-
if (_failHttpRequest) {
359-
_failHttpRequest = false;
360-
ctx.Response.StatusCode = 500;
361-
ctx.Response.Close ();
362-
} else if (!localPath.Contains (relativeSeedingPath)) {
363-
ctx.Response.StatusCode = 404;
364-
ctx.Response.Close ();
365-
} else {
366-
var fileName = localPath.Replace (relativeSeedingPath, string.Empty);
367-
var files = _seederDir.GetFiles ();
368-
var file = files.FirstOrDefault (x => x.Name == fileName);
369-
if (file == null) {
370-
ctx.Response.StatusCode = 406;
352+
var localPath = ctx.Request.Url.LocalPath;
353+
string relativeSeedingPath = $"/{_webSeedPrefix}/{_torrentName}/";
354+
if (_failHttpRequest) {
355+
_failHttpRequest = false;
356+
ctx.Response.StatusCode = 500;
357+
ctx.Response.Close ();
358+
} else if (!localPath.Contains (relativeSeedingPath)) {
359+
ctx.Response.StatusCode = 404;
371360
ctx.Response.Close ();
372361
} else {
373-
using FileStream fs = new FileStream (file.FullName, FileMode.Open, FileAccess.Read, FileShare.ReadWrite | FileShare.Delete);
374-
long start = 0;
375-
long end = fs.Length - 1;
376-
var rangeHeader = ctx.Request.Headers["Range"];
377-
if (rangeHeader != null) {
378-
var startAndEnd = rangeHeader.Replace ("bytes=", "").Split ('-');
379-
start = long.Parse (startAndEnd[0]);
380-
end = long.Parse (startAndEnd[1]);
381-
}
382-
var buffer = new byte[end - start + 1];
383-
fs.Seek (start, SeekOrigin.Begin);
384-
if (fs.Read (buffer, 0, buffer.Length) == buffer.Length) {
385-
ctx.Response.OutputStream.Write (buffer, 0, buffer.Length);
386-
ctx.Response.OutputStream.Close ();
387-
} else {
388-
ctx.Response.StatusCode = 405;
362+
var fileName = localPath.Replace (relativeSeedingPath, string.Empty);
363+
var files = _seederDir.GetFiles ();
364+
var file = files.FirstOrDefault (x => x.Name == fileName);
365+
if (file == null) {
366+
ctx.Response.StatusCode = 406;
389367
ctx.Response.Close ();
368+
} else {
369+
using FileStream fs = new FileStream (file.FullName, FileMode.Open, FileAccess.Read, FileShare.ReadWrite | FileShare.Delete);
370+
long start = 0;
371+
long end = fs.Length - 1;
372+
var rangeHeader = ctx.Request.Headers["Range"];
373+
if (rangeHeader != null) {
374+
var startAndEnd = rangeHeader.Replace ("bytes=", "").Split ('-');
375+
start = long.Parse (startAndEnd[0]);
376+
end = long.Parse (startAndEnd[1]);
377+
}
378+
var buffer = new byte[end - start + 1];
379+
fs.Seek (start, SeekOrigin.Begin);
380+
if (fs.Read (buffer, 0, buffer.Length) == buffer.Length) {
381+
ctx.Response.OutputStream.Write (buffer, 0, buffer.Length);
382+
ctx.Response.OutputStream.Close ();
383+
} else {
384+
ctx.Response.StatusCode = 405;
385+
ctx.Response.Close ();
386+
}
390387
}
391-
}
392388

389+
}
390+
} catch {
391+
// Do nothing!
392+
return;
393393
}
394394
}
395395

0 commit comments

Comments
 (0)