Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -337,45 +337,51 @@ static List<BlockInfo> DecodeMessages (ReadOnlySpan<byte> buffer)

void CreateWebRequests (BlockInfo start, BlockInfo end)
{
// Properly handle the case where we have multiple files
// This is only implemented for single file torrents
Uri uri = Uri;

if (Uri.OriginalString.EndsWith ("/"))
uri = new Uri (uri, $"{TorrentData.Name}/");

// startOffset and endOffset are *inclusive*. I need to subtract '1' from the end index so that i
// stop at the correct byte when requesting the byte ranges from the server
// stop at the correct byte when requesting the byte ranges from the server.
//
// These values are also always relative to the *current* file as we iterate through the list of files.
long startOffset = TorrentData.TorrentInfo!.PieceIndexToByteOffset (start.PieceIndex) + start.StartOffset;
long endOffset = TorrentData.TorrentInfo!.PieceIndexToByteOffset (end.PieceIndex) + end.StartOffset + end.RequestLength;
long count = TorrentData.TorrentInfo!.PieceIndexToByteOffset (end.PieceIndex) + end.StartOffset + end.RequestLength - startOffset;

foreach (var file in TorrentData.Files) {
// Bail out after we've read all the data.
if (count == 0)
break;

var lengthWithPadding = file.Length + file.Padding;
// If the first byte of data is from the next file, move to the next file immediately
// and adjust start offset to be relative to that file.
if (startOffset >= lengthWithPadding) {
startOffset -= lengthWithPadding;
continue;
}

Uri u = uri;
if (TorrentData.Files.Count > 1)
u = new Uri (u, file.Path);
if (endOffset == 0)
break;

// We want data from a later file
if (startOffset >= file.Length) {
startOffset -= file.Length;
endOffset -= file.Length;
// Should data be read from this file?
if (startOffset < file.Length) {
var toReadFromFile = Math.Min (count, file.Length - startOffset);
WebRequests.Enqueue ((u, startOffset, toReadFromFile));
count -= toReadFromFile;
}
// We want data from the end of the current file and from the next few files
else if (endOffset >= file.Length) {
WebRequests.Enqueue ((u, startOffset, file.Length - startOffset));
startOffset = 0;
endOffset -= file.Length;
if (file.Padding > 0) {
WebRequests.Enqueue ((PaddingFileUri, 0, file.Padding));
endOffset -= file.Padding;
}
}
// All the data we want is from within this file
else {
WebRequests.Enqueue ((u, startOffset, endOffset - startOffset));
endOffset = 0;

// Should data be read from this file's padding?
if (file.Padding > 0 && count > 0) {
var toReadFromPadding = Math.Min (count, file.Padding);
WebRequests.Enqueue ((PaddingFileUri, 0, toReadFromPadding));
count -= toReadFromPadding;
}

// As of the next read, we'll be reading data from the start of the file.
startOffset = 0;
}
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -96,16 +96,14 @@ public void AddRequests (ReadOnlySpan<(IRequester Peer, ReadOnlyBitField Availab

public void AddRequests (IRequester peer, ReadOnlyBitField available, ReadOnlySpan<ReadOnlyBitField> allPeers)
{
int maxRequests = peer.MaxPendingRequests;

if (!peer.CanRequestMorePieces || Picker == null || TorrentData == null || Enqueuer == null)
return;

// This is safe to invoke. 'ContinueExistingRequest' strongly guarantees that a peer will only
// continue a piece they have initiated. If they're choking then the only piece they can continue
// will be a fast piece (if one exists!)
if (!peer.IsChoking || peer.SupportsFastPeer) {
while (peer.AmRequestingPiecesCount < maxRequests) {
while (peer.CanRequestMorePieces) {
if (Picker.ContinueExistingRequest (peer, 0, available.Length - 1, out PieceSegment segment))
Enqueuer.EnqueueRequest (peer, segment);
else
Expand All @@ -123,7 +121,7 @@ public void AddRequests (IRequester peer, ReadOnlyBitField available, ReadOnlySp
if (!peer.IsChoking || (peer.SupportsFastPeer && peer.IsAllowedFastPieces.Count > 0)) {
ReadOnlyBitField? filtered = null;

while (peer.AmRequestingPiecesCount < maxRequests) {
while (peer.CanRequestMorePieces) {
filtered ??= ApplyIgnorables (available);

int requests = Picker.PickPiece (peer, filtered, allPeers, 0, TorrentData.PieceCount - 1, requestBuffer);
Expand All @@ -137,7 +135,7 @@ public void AddRequests (IRequester peer, ReadOnlyBitField available, ReadOnlySp
if (!peer.IsChoking && peer.AmRequestingPiecesCount == 0) {
ReadOnlyBitField? filtered = null;
PieceSegment segment;
while (peer.AmRequestingPiecesCount < maxRequests) {
while (peer.CanRequestMorePieces) {
filtered ??= ApplyIgnorables (available);

if (Picker.ContinueAnyExistingRequest (peer, filtered, 0, TorrentData.PieceCount - 1, 1, out segment)) {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -160,17 +160,13 @@ void AddRequests (IRequester peer, ReadOnlyBitField available, ReadOnlySpan<Read
return;

int preferredRequestAmount = peer.PreferredRequestAmount (TorrentData.PieceLength);
var maxRequests = Math.Min (preferredMaxRequests, peer.MaxPendingRequests);

if (peer.AmRequestingPiecesCount >= maxRequests)
return;

int maxTotalRequests = Math.Min (preferredMaxRequests, peer.MaxPendingRequests);
// FIXME: Add a test to ensure we do not unintentionally request blocks off peers which are choking us.
// This used to say if (!peer.IsChoing || peer.SupportsFastPeer), and with the recent changes we might
// not actually guarantee that 'ContinueExistingRequest' or 'ContinueAnyExistingRequest' properly takes
// into account that a peer which is choking us can *only* resume a 'fast piece' in the 'AmAllowedfastPiece' list.
if (!peer.IsChoking) {
while (peer.AmRequestingPiecesCount < maxRequests) {
while (peer.CanRequestMorePieces && peer.AmRequestingPiecesCount < maxTotalRequests) {
if (LowPriorityPicker!.ContinueAnyExistingRequest (peer, available, startPieceIndex, endPieceIndex, maxDuplicates, out PieceSegment request))
Enqueuer.EnqueueRequest(peer, request);
else
Expand All @@ -180,7 +176,7 @@ void AddRequests (IRequester peer, ReadOnlyBitField available, ReadOnlySpan<Read

// If the peer supports fast peer and they are choking us, they'll still send pieces in the allowed fast set.
if (peer.SupportsFastPeer && peer.IsChoking) {
while (peer.AmRequestingPiecesCount < maxRequests) {
while (peer.CanRequestMorePieces && peer.AmRequestingPiecesCount < maxTotalRequests) {
if (LowPriorityPicker!.ContinueExistingRequest (peer, startPieceIndex, endPieceIndex, out PieceSegment segment))
Enqueuer.EnqueueRequest (peer, segment);
else
Expand All @@ -193,9 +189,9 @@ void AddRequests (IRequester peer, ReadOnlyBitField available, ReadOnlySpan<Read
// FIXME add a test for this.
if (!peer.IsChoking || (peer.SupportsFastPeer && peer.IsAllowedFastPieces.Count > 0)) {
BitField filtered = null!;
while (peer.AmRequestingPiecesCount < maxRequests) {
while (peer.CanRequestMorePieces && peer.AmRequestingPiecesCount < maxTotalRequests) {
filtered ??= GenerateAlreadyHaves ().Not ().And (available);
Span<PieceSegment> buffer = stackalloc PieceSegment[preferredRequestAmount];
Span<PieceSegment> buffer = stackalloc PieceSegment[maxTotalRequests - peer.AmRequestingPiecesCount];
int requested = PriorityPick (peer, filtered, allPeers, startPieceIndex, endPieceIndex, buffer);
if (requested > 0) {
Enqueuer.EnqueueRequests (peer, buffer.Slice (0, requested));
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -348,48 +348,48 @@ private void OnHttpContext (IAsyncResult ar)
try {
ctx = _httpSeeder.EndGetContext (ar);
_httpSeeder.BeginGetContext (OnHttpContext, ar.AsyncState);
} catch {
// Do nothing!
return;
}

var localPath = ctx.Request.Url.LocalPath;
string relativeSeedingPath = $"/{_webSeedPrefix}/{_torrentName}/";
if (_failHttpRequest) {
_failHttpRequest = false;
ctx.Response.StatusCode = 500;
ctx.Response.Close ();
} else if (!localPath.Contains (relativeSeedingPath)) {
ctx.Response.StatusCode = 404;
ctx.Response.Close ();
} else {
var fileName = localPath.Replace (relativeSeedingPath, string.Empty);
var files = _seederDir.GetFiles ();
var file = files.FirstOrDefault (x => x.Name == fileName);
if (file == null) {
ctx.Response.StatusCode = 406;
var localPath = ctx.Request.Url.LocalPath;
string relativeSeedingPath = $"/{_webSeedPrefix}/{_torrentName}/";
if (_failHttpRequest) {
_failHttpRequest = false;
ctx.Response.StatusCode = 500;
ctx.Response.Close ();
} else if (!localPath.Contains (relativeSeedingPath)) {
ctx.Response.StatusCode = 404;
ctx.Response.Close ();
} else {
using FileStream fs = new FileStream (file.FullName, FileMode.Open, FileAccess.Read, FileShare.ReadWrite | FileShare.Delete);
long start = 0;
long end = fs.Length - 1;
var rangeHeader = ctx.Request.Headers["Range"];
if (rangeHeader != null) {
var startAndEnd = rangeHeader.Replace ("bytes=", "").Split ('-');
start = long.Parse (startAndEnd[0]);
end = long.Parse (startAndEnd[1]);
}
var buffer = new byte[end - start + 1];
fs.Seek (start, SeekOrigin.Begin);
if (fs.Read (buffer, 0, buffer.Length) == buffer.Length) {
ctx.Response.OutputStream.Write (buffer, 0, buffer.Length);
ctx.Response.OutputStream.Close ();
} else {
ctx.Response.StatusCode = 405;
var fileName = localPath.Replace (relativeSeedingPath, string.Empty);
var files = _seederDir.GetFiles ();
var file = files.FirstOrDefault (x => x.Name == fileName);
if (file == null) {
ctx.Response.StatusCode = 406;
ctx.Response.Close ();
} else {
using FileStream fs = new FileStream (file.FullName, FileMode.Open, FileAccess.Read, FileShare.ReadWrite | FileShare.Delete);
long start = 0;
long end = fs.Length - 1;
var rangeHeader = ctx.Request.Headers["Range"];
if (rangeHeader != null) {
var startAndEnd = rangeHeader.Replace ("bytes=", "").Split ('-');
start = long.Parse (startAndEnd[0]);
end = long.Parse (startAndEnd[1]);
}
var buffer = new byte[end - start + 1];
fs.Seek (start, SeekOrigin.Begin);
if (fs.Read (buffer, 0, buffer.Length) == buffer.Length) {
ctx.Response.OutputStream.Write (buffer, 0, buffer.Length);
ctx.Response.OutputStream.Close ();
} else {
ctx.Response.StatusCode = 405;
ctx.Response.Close ();
}
}
}

}
} catch {
// Do nothing!
return;
}
}

Expand Down
Loading