Skip to content

Commit 5f06455

Browse files
committed
Fix two issues affecting webseeds
1) Ensure web requests request data in-order by fixing the piecepicker to correctly use 'CanRequestMorePieces' 2) Fix a bug in the handling of padding files. A request for some data from a webseed needs to take padding files into account in all cases. A request can begin or end inside padding, it can also begin/end in regular files while optionally crossing one stretch of padding. This means the existing logic to insert synethic requests to the padding file are broken in a number of cases.
1 parent 3992687 commit 5f06455

File tree

8 files changed

+216
-123
lines changed

8 files changed

+216
-123
lines changed

src/MonoTorrent.Client/MonoTorrent.Connections/HttpPeerConnection.cs

Lines changed: 30 additions & 24 deletions
Original file line numberDiff line numberDiff line change
@@ -337,45 +337,51 @@ static List<BlockInfo> DecodeMessages (ReadOnlySpan<byte> buffer)
337337

338338
void CreateWebRequests (BlockInfo start, BlockInfo end)
339339
{
340-
// Properly handle the case where we have multiple files
341-
// This is only implemented for single file torrents
342340
Uri uri = Uri;
343341

344342
if (Uri.OriginalString.EndsWith ("/"))
345343
uri = new Uri (uri, $"{TorrentData.Name}/");
346344

347345
// startOffset and endOffset are *inclusive*. I need to subtract '1' from the end index so that i
348-
// stop at the correct byte when requesting the byte ranges from the server
346+
// stop at the correct byte when requesting the byte ranges from the server.
347+
//
348+
// These values are also always relative to the *current* file as we iterate through the list of files.
349349
long startOffset = TorrentData.TorrentInfo!.PieceIndexToByteOffset (start.PieceIndex) + start.StartOffset;
350-
long endOffset = TorrentData.TorrentInfo!.PieceIndexToByteOffset (end.PieceIndex) + end.StartOffset + end.RequestLength;
350+
long count = TorrentData.TorrentInfo!.PieceIndexToByteOffset (end.PieceIndex) + end.StartOffset + end.RequestLength - startOffset;
351351

352352
foreach (var file in TorrentData.Files) {
353+
// Bail out after we've read all the data.
354+
if (count == 0)
355+
break;
356+
357+
var lengthWithPadding = file.Length + file.Padding;
358+
// If the first byte of data is from the next file, move to the next file immediately
359+
// and adjust start offset to be relative to that file.
360+
if (startOffset >= lengthWithPadding) {
361+
startOffset -= lengthWithPadding;
362+
continue;
363+
}
364+
353365
Uri u = uri;
354366
if (TorrentData.Files.Count > 1)
355367
u = new Uri (u, file.Path);
356-
if (endOffset == 0)
357-
break;
358368

359-
// We want data from a later file
360-
if (startOffset >= file.Length) {
361-
startOffset -= file.Length;
362-
endOffset -= file.Length;
369+
// Should data be read from this file?
370+
if (startOffset < file.Length) {
371+
var toReadFromFile = Math.Min (count, file.Length - startOffset);
372+
WebRequests.Enqueue ((u, startOffset, toReadFromFile));
373+
count -= toReadFromFile;
363374
}
364-
// We want data from the end of the current file and from the next few files
365-
else if (endOffset >= file.Length) {
366-
WebRequests.Enqueue ((u, startOffset, file.Length - startOffset));
367-
startOffset = 0;
368-
endOffset -= file.Length;
369-
if (file.Padding > 0) {
370-
WebRequests.Enqueue ((PaddingFileUri, 0, file.Padding));
371-
endOffset -= file.Padding;
372-
}
373-
}
374-
// All the data we want is from within this file
375-
else {
376-
WebRequests.Enqueue ((u, startOffset, endOffset - startOffset));
377-
endOffset = 0;
375+
376+
// Should data be read from this file's padding?
377+
if (file.Padding > 0 && count > 0) {
378+
var toReadFromPadding = Math.Min (count, file.Padding);
379+
WebRequests.Enqueue ((PaddingFileUri, 0, toReadFromPadding));
380+
count -= toReadFromPadding;
378381
}
382+
383+
// As of the next read, we'll be reading data from the start of the file.
384+
startOffset = 0;
379385
}
380386
}
381387

src/MonoTorrent.PiecePicking/MonoTorrent.PiecePicking/StandardPieceRequester.cs

Lines changed: 3 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -96,16 +96,14 @@ public void AddRequests (ReadOnlySpan<(IRequester Peer, ReadOnlyBitField Availab
9696

9797
public void AddRequests (IRequester peer, ReadOnlyBitField available, ReadOnlySpan<ReadOnlyBitField> allPeers)
9898
{
99-
int maxRequests = peer.MaxPendingRequests;
100-
10199
if (!peer.CanRequestMorePieces || Picker == null || TorrentData == null || Enqueuer == null)
102100
return;
103101

104102
// This is safe to invoke. 'ContinueExistingRequest' strongly guarantees that a peer will only
105103
// continue a piece they have initiated. If they're choking then the only piece they can continue
106104
// will be a fast piece (if one exists!)
107105
if (!peer.IsChoking || peer.SupportsFastPeer) {
108-
while (peer.AmRequestingPiecesCount < maxRequests) {
106+
while (peer.CanRequestMorePieces) {
109107
if (Picker.ContinueExistingRequest (peer, 0, available.Length - 1, out PieceSegment segment))
110108
Enqueuer.EnqueueRequest (peer, segment);
111109
else
@@ -123,7 +121,7 @@ public void AddRequests (IRequester peer, ReadOnlyBitField available, ReadOnlySp
123121
if (!peer.IsChoking || (peer.SupportsFastPeer && peer.IsAllowedFastPieces.Count > 0)) {
124122
ReadOnlyBitField? filtered = null;
125123

126-
while (peer.AmRequestingPiecesCount < maxRequests) {
124+
while (peer.CanRequestMorePieces) {
127125
filtered ??= ApplyIgnorables (available);
128126

129127
int requests = Picker.PickPiece (peer, filtered, allPeers, 0, TorrentData.PieceCount - 1, requestBuffer);
@@ -137,7 +135,7 @@ public void AddRequests (IRequester peer, ReadOnlyBitField available, ReadOnlySp
137135
if (!peer.IsChoking && peer.AmRequestingPiecesCount == 0) {
138136
ReadOnlyBitField? filtered = null;
139137
PieceSegment segment;
140-
while (peer.AmRequestingPiecesCount < maxRequests) {
138+
while (peer.CanRequestMorePieces) {
141139
filtered ??= ApplyIgnorables (available);
142140

143141
if (Picker.ContinueAnyExistingRequest (peer, filtered, 0, TorrentData.PieceCount - 1, 1, out segment)) {

src/MonoTorrent.PiecePicking/MonoTorrent.PiecePicking/StreamingPieceRequester.cs

Lines changed: 5 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -160,17 +160,13 @@ void AddRequests (IRequester peer, ReadOnlyBitField available, ReadOnlySpan<Read
160160
return;
161161

162162
int preferredRequestAmount = peer.PreferredRequestAmount (TorrentData.PieceLength);
163-
var maxRequests = Math.Min (preferredMaxRequests, peer.MaxPendingRequests);
164-
165-
if (peer.AmRequestingPiecesCount >= maxRequests)
166-
return;
167-
163+
int maxTotalRequests = Math.Min (preferredMaxRequests, peer.MaxPendingRequests);
168164
// FIXME: Add a test to ensure we do not unintentionally request blocks off peers which are choking us.
169165
// This used to say if (!peer.IsChoing || peer.SupportsFastPeer), and with the recent changes we might
170166
// not actually guarantee that 'ContinueExistingRequest' or 'ContinueAnyExistingRequest' properly takes
171167
// into account that a peer which is choking us can *only* resume a 'fast piece' in the 'AmAllowedfastPiece' list.
172168
if (!peer.IsChoking) {
173-
while (peer.AmRequestingPiecesCount < maxRequests) {
169+
while (peer.CanRequestMorePieces && peer.AmRequestingPiecesCount < maxTotalRequests) {
174170
if (LowPriorityPicker!.ContinueAnyExistingRequest (peer, available, startPieceIndex, endPieceIndex, maxDuplicates, out PieceSegment request))
175171
Enqueuer.EnqueueRequest(peer, request);
176172
else
@@ -180,7 +176,7 @@ void AddRequests (IRequester peer, ReadOnlyBitField available, ReadOnlySpan<Read
180176

181177
// If the peer supports fast peer and they are choking us, they'll still send pieces in the allowed fast set.
182178
if (peer.SupportsFastPeer && peer.IsChoking) {
183-
while (peer.AmRequestingPiecesCount < maxRequests) {
179+
while (peer.CanRequestMorePieces && peer.AmRequestingPiecesCount < maxTotalRequests) {
184180
if (LowPriorityPicker!.ContinueExistingRequest (peer, startPieceIndex, endPieceIndex, out PieceSegment segment))
185181
Enqueuer.EnqueueRequest (peer, segment);
186182
else
@@ -193,9 +189,9 @@ void AddRequests (IRequester peer, ReadOnlyBitField available, ReadOnlySpan<Read
193189
// FIXME add a test for this.
194190
if (!peer.IsChoking || (peer.SupportsFastPeer && peer.IsAllowedFastPieces.Count > 0)) {
195191
BitField filtered = null!;
196-
while (peer.AmRequestingPiecesCount < maxRequests) {
192+
while (peer.CanRequestMorePieces && peer.AmRequestingPiecesCount < maxTotalRequests) {
197193
filtered ??= GenerateAlreadyHaves ().Not ().And (available);
198-
Span<PieceSegment> buffer = stackalloc PieceSegment[preferredRequestAmount];
194+
Span<PieceSegment> buffer = stackalloc PieceSegment[maxTotalRequests - peer.AmRequestingPiecesCount];
199195
int requested = PriorityPick (peer, filtered, allPeers, startPieceIndex, endPieceIndex, buffer);
200196
if (requested > 0) {
201197
Enqueuer.EnqueueRequests (peer, buffer.Slice (0, requested));

src/Tests/Tests.MonoTorrent.IntegrationTests/IntegrationTests_FakePieceWriter.cs

Lines changed: 36 additions & 36 deletions
Original file line numberDiff line numberDiff line change
@@ -348,48 +348,48 @@ private void OnHttpContext (IAsyncResult ar)
348348
try {
349349
ctx = _httpSeeder.EndGetContext (ar);
350350
_httpSeeder.BeginGetContext (OnHttpContext, ar.AsyncState);
351-
} catch {
352-
// Do nothing!
353-
return;
354-
}
355351

356-
var localPath = ctx.Request.Url.LocalPath;
357-
string relativeSeedingPath = $"/{_webSeedPrefix}/{_torrentName}/";
358-
if (_failHttpRequest) {
359-
_failHttpRequest = false;
360-
ctx.Response.StatusCode = 500;
361-
ctx.Response.Close ();
362-
} else if (!localPath.Contains (relativeSeedingPath)) {
363-
ctx.Response.StatusCode = 404;
364-
ctx.Response.Close ();
365-
} else {
366-
var fileName = localPath.Replace (relativeSeedingPath, string.Empty);
367-
var files = _seederDir.GetFiles ();
368-
var file = files.FirstOrDefault (x => x.Name == fileName);
369-
if (file == null) {
370-
ctx.Response.StatusCode = 406;
352+
var localPath = ctx.Request.Url.LocalPath;
353+
string relativeSeedingPath = $"/{_webSeedPrefix}/{_torrentName}/";
354+
if (_failHttpRequest) {
355+
_failHttpRequest = false;
356+
ctx.Response.StatusCode = 500;
357+
ctx.Response.Close ();
358+
} else if (!localPath.Contains (relativeSeedingPath)) {
359+
ctx.Response.StatusCode = 404;
371360
ctx.Response.Close ();
372361
} else {
373-
using FileStream fs = new FileStream (file.FullName, FileMode.Open, FileAccess.Read, FileShare.ReadWrite | FileShare.Delete);
374-
long start = 0;
375-
long end = fs.Length - 1;
376-
var rangeHeader = ctx.Request.Headers["Range"];
377-
if (rangeHeader != null) {
378-
var startAndEnd = rangeHeader.Replace ("bytes=", "").Split ('-');
379-
start = long.Parse (startAndEnd[0]);
380-
end = long.Parse (startAndEnd[1]);
381-
}
382-
var buffer = new byte[end - start + 1];
383-
fs.Seek (start, SeekOrigin.Begin);
384-
if (fs.Read (buffer, 0, buffer.Length) == buffer.Length) {
385-
ctx.Response.OutputStream.Write (buffer, 0, buffer.Length);
386-
ctx.Response.OutputStream.Close ();
387-
} else {
388-
ctx.Response.StatusCode = 405;
362+
var fileName = localPath.Replace (relativeSeedingPath, string.Empty);
363+
var files = _seederDir.GetFiles ();
364+
var file = files.FirstOrDefault (x => x.Name == fileName);
365+
if (file == null) {
366+
ctx.Response.StatusCode = 406;
389367
ctx.Response.Close ();
368+
} else {
369+
using FileStream fs = new FileStream (file.FullName, FileMode.Open, FileAccess.Read, FileShare.ReadWrite | FileShare.Delete);
370+
long start = 0;
371+
long end = fs.Length - 1;
372+
var rangeHeader = ctx.Request.Headers["Range"];
373+
if (rangeHeader != null) {
374+
var startAndEnd = rangeHeader.Replace ("bytes=", "").Split ('-');
375+
start = long.Parse (startAndEnd[0]);
376+
end = long.Parse (startAndEnd[1]);
377+
}
378+
var buffer = new byte[end - start + 1];
379+
fs.Seek (start, SeekOrigin.Begin);
380+
if (fs.Read (buffer, 0, buffer.Length) == buffer.Length) {
381+
ctx.Response.OutputStream.Write (buffer, 0, buffer.Length);
382+
ctx.Response.OutputStream.Close ();
383+
} else {
384+
ctx.Response.StatusCode = 405;
385+
ctx.Response.Close ();
386+
}
390387
}
391-
}
392388

389+
}
390+
} catch {
391+
// Do nothing!
392+
return;
393393
}
394394
}
395395

0 commit comments

Comments
 (0)