Fix bug where DD would exit prematurely

pull/19/head
Netshroud 9 years ago
parent ab9ca2a7da
commit b17e343d0e

@ -3,12 +3,10 @@ using System.Collections.Generic;
using System.IO;
using System.Linq;
using System.Net;
using System.Net.Sockets;
using System.Text;
using System.Text.RegularExpressions;
using System.Threading;
using SteamKit2;
using System.Collections.Concurrent;
using System.Threading.Tasks;
namespace DepotDownloader
@ -442,7 +440,7 @@ namespace DepotDownloader
foreach (var depot in depotIDs)
{
DepotDownloadInfo info = GetDepotInfo(depot, appId, branch);
var info = GetDepotInfo(depot, appId, branch);
if (info != null)
{
infos.Add(info);
@ -678,210 +676,220 @@ namespace DepotDownloader
});
var semaphore = new SemaphoreSlim(Config.MaxDownloads);
filesAfterExclusions.Where(f => !f.Flags.HasFlag(EDepotFileFlag.Directory))
.AsParallel().WithCancellation(cts.Token).WithDegreeOfParallelism(Config.MaxDownloads)
.ForAll(async file =>
var files = filesAfterExclusions.Where(f => !f.Flags.HasFlag(EDepotFileFlag.Directory)).ToArray();
var tasks = new Task[files.Length];
for (var i = 0; i < files.Length; i++)
{
try
var file = files[i];
var task = Task.Run(async () =>
{
await semaphore.WaitAsync().ConfigureAwait(false);
cts.Token.ThrowIfCancellationRequested();
string fileFinalPath = Path.Combine(depot.installDir, file.FileName);
string fileStagingPath = Path.Combine(stagingDir, file.FileName);
// This may still exist if the previous run exited before cleanup
if (File.Exists(fileStagingPath))
try
{
File.Delete(fileStagingPath);
}
await semaphore.WaitAsync().ConfigureAwait(false);
FileStream fs = null;
List<ProtoManifest.ChunkData> neededChunks;
FileInfo fi = new FileInfo(fileFinalPath);
if (!fi.Exists)
{
// create new file. need all chunks
fs = File.Create(fileFinalPath);
fs.SetLength((long)file.TotalSize);
neededChunks = new List<ProtoManifest.ChunkData>(file.Chunks);
}
else
{
// open existing
ProtoManifest.FileData oldManifestFile = null;
if (oldProtoManifest != null)
string fileFinalPath = Path.Combine(depot.installDir, file.FileName);
string fileStagingPath = Path.Combine(stagingDir, file.FileName);
// This may still exist if the previous run exited before cleanup
if (File.Exists(fileStagingPath))
{
oldManifestFile = oldProtoManifest.Files.SingleOrDefault(f => f.FileName == file.FileName);
File.Delete(fileStagingPath);
}
if (oldManifestFile != null)
FileStream fs = null;
List<ProtoManifest.ChunkData> neededChunks;
FileInfo fi = new FileInfo(fileFinalPath);
if (!fi.Exists)
{
neededChunks = new List<ProtoManifest.ChunkData>();
if (Config.VerifyAll || !oldManifestFile.FileHash.SequenceEqual(file.FileHash))
// create new file. need all chunks
fs = File.Create(fileFinalPath);
fs.SetLength((long)file.TotalSize);
neededChunks = new List<ProtoManifest.ChunkData>(file.Chunks);
}
else
{
// open existing
ProtoManifest.FileData oldManifestFile = null;
if (oldProtoManifest != null)
{
// we have a version of this file, but it doesn't fully match what we want
oldManifestFile = oldProtoManifest.Files.SingleOrDefault(f => f.FileName == file.FileName);
}
var matchingChunks = new List<ChunkMatch>();
if (oldManifestFile != null)
{
neededChunks = new List<ProtoManifest.ChunkData>();
foreach (var chunk in file.Chunks)
if (Config.VerifyAll || !oldManifestFile.FileHash.SequenceEqual(file.FileHash))
{
var oldChunk = oldManifestFile.Chunks.FirstOrDefault(c => c.ChunkID.SequenceEqual(chunk.ChunkID));
if (oldChunk != null)
{
matchingChunks.Add(new ChunkMatch(oldChunk, chunk));
}
else
{
neededChunks.Add(chunk);
}
}
File.Move(fileFinalPath, fileStagingPath);
// we have a version of this file, but it doesn't fully match what we want
fs = File.Open(fileFinalPath, FileMode.Create);
fs.SetLength((long)file.TotalSize);
var matchingChunks = new List<ChunkMatch>();
using (var fsOld = File.Open(fileStagingPath, FileMode.Open))
{
foreach (var match in matchingChunks)
foreach (var chunk in file.Chunks)
{
fsOld.Seek((long)match.OldChunk.Offset, SeekOrigin.Begin);
byte[] tmp = new byte[match.OldChunk.UncompressedLength];
fsOld.Read(tmp, 0, tmp.Length);
byte[] adler = Util.AdlerHash(tmp);
if (!adler.SequenceEqual(match.OldChunk.Checksum))
var oldChunk = oldManifestFile.Chunks.FirstOrDefault(c => c.ChunkID.SequenceEqual(chunk.ChunkID));
if (oldChunk != null)
{
neededChunks.Add(match.NewChunk);
matchingChunks.Add(new ChunkMatch(oldChunk, chunk));
}
else
{
fs.Seek((long)match.NewChunk.Offset, SeekOrigin.Begin);
fs.Write(tmp, 0, tmp.Length);
neededChunks.Add(chunk);
}
}
}
File.Delete(fileStagingPath);
}
}
else
{
// No old manifest or file not in old manifest. We must validate.
File.Move(fileFinalPath, fileStagingPath);
fs = File.Open(fileFinalPath, FileMode.Open);
if ((ulong)fi.Length != file.TotalSize)
{
fs.SetLength((long)file.TotalSize);
}
fs = File.Open(fileFinalPath, FileMode.Create);
fs.SetLength((long)file.TotalSize);
neededChunks = Util.ValidateSteam3FileChecksums(fs, file.Chunks.OrderBy(x => x.Offset).ToArray());
}
if (neededChunks.Count() == 0)
{
size_downloaded += file.TotalSize;
Console.WriteLine("{0,6:#00.00}% {1}", ((float)size_downloaded / (float)complete_download_size) * 100.0f, fileFinalPath);
if (fs != null)
fs.Dispose();
return;
}
else
{
size_downloaded += (file.TotalSize - (ulong)neededChunks.Select(x => (long)x.UncompressedLength).Sum());
}
}
using (var fsOld = File.Open(fileStagingPath, FileMode.Open))
{
foreach (var match in matchingChunks)
{
fsOld.Seek((long)match.OldChunk.Offset, SeekOrigin.Begin);
byte[] tmp = new byte[match.OldChunk.UncompressedLength];
fsOld.Read(tmp, 0, tmp.Length);
byte[] adler = Util.AdlerHash(tmp);
if (!adler.SequenceEqual(match.OldChunk.Checksum))
{
neededChunks.Add(match.NewChunk);
}
else
{
fs.Seek((long)match.NewChunk.Offset, SeekOrigin.Begin);
fs.Write(tmp, 0, tmp.Length);
}
}
}
foreach (var chunk in neededChunks)
{
if (cts.IsCancellationRequested) break;
File.Delete(fileStagingPath);
}
}
else
{
// No old manifest or file not in old manifest. We must validate.
string chunkID = Util.EncodeHexString(chunk.ChunkID);
CDNClient.DepotChunk chunkData = null;
fs = File.Open(fileFinalPath, FileMode.Open);
if ((ulong)fi.Length != file.TotalSize)
{
fs.SetLength((long)file.TotalSize);
}
while (!cts.IsCancellationRequested)
{
CDNClient client;
try
neededChunks = Util.ValidateSteam3FileChecksums(fs, file.Chunks.OrderBy(x => x.Offset).ToArray());
}
if (neededChunks.Count() == 0)
{
client = await cdnPool.GetConnectionForDepotAsync(appId, depot.id, depot.depotKey, cts.Token).ConfigureAwait(false);
size_downloaded += file.TotalSize;
Console.WriteLine("{0,6:#00.00}% {1}", ((float)size_downloaded / (float)complete_download_size) * 100.0f, fileFinalPath);
if (fs != null)
fs.Dispose();
return;
}
catch (OperationCanceledException)
else
{
break;
size_downloaded += (file.TotalSize - (ulong)neededChunks.Select(x => (long)x.UncompressedLength).Sum());
}
}
foreach (var chunk in neededChunks)
{
if (cts.IsCancellationRequested) break;
DepotManifest.ChunkData data = new DepotManifest.ChunkData();
data.ChunkID = chunk.ChunkID;
data.Checksum = chunk.Checksum;
data.Offset = chunk.Offset;
data.CompressedLength = chunk.CompressedLength;
data.UncompressedLength = chunk.UncompressedLength;
string chunkID = Util.EncodeHexString(chunk.ChunkID);
CDNClient.DepotChunk chunkData = null;
try
{
chunkData = await client.DownloadDepotChunkAsync(depot.id, data).ConfigureAwait(false);
cdnPool.ReturnConnection(client);
break;
}
catch (WebException e)
while (!cts.IsCancellationRequested)
{
cdnPool.ReturnBrokenConnection(client);
CDNClient client;
try
{
client = await cdnPool.GetConnectionForDepotAsync(appId, depot.id, depot.depotKey, cts.Token).ConfigureAwait(false);
}
catch (OperationCanceledException)
{
break;
}
DepotManifest.ChunkData data = new DepotManifest.ChunkData();
data.ChunkID = chunk.ChunkID;
data.Checksum = chunk.Checksum;
data.Offset = chunk.Offset;
data.CompressedLength = chunk.CompressedLength;
data.UncompressedLength = chunk.UncompressedLength;
if (e.Status == WebExceptionStatus.ProtocolError)
try
{
var response = e.Response as HttpWebResponse;
if (response.StatusCode == HttpStatusCode.Unauthorized || response.StatusCode == HttpStatusCode.Forbidden)
chunkData = await client.DownloadDepotChunkAsync(depot.id, data).ConfigureAwait(false);
cdnPool.ReturnConnection(client);
break;
}
catch (WebException e)
{
cdnPool.ReturnBrokenConnection(client);
if (e.Status == WebExceptionStatus.ProtocolError)
{
Console.WriteLine("Encountered 401 for chunk {0}. Aborting.", chunkID);
cts.Cancel();
break;
var response = e.Response as HttpWebResponse;
if (response.StatusCode == HttpStatusCode.Unauthorized || response.StatusCode == HttpStatusCode.Forbidden)
{
Console.WriteLine("Encountered 401 for chunk {0}. Aborting.", chunkID);
cts.Cancel();
break;
}
else
{
Console.WriteLine("Encountered error downloading chunk {0}: {1}", chunkID, response.StatusCode);
}
}
else
{
Console.WriteLine("Encountered error downloading chunk {0}: {1}", chunkID, response.StatusCode);
Console.WriteLine("Encountered error downloading chunk {0}: {1}", chunkID, e.Status);
}
}
else
catch (Exception e)
{
Console.WriteLine("Encountered error downloading chunk {0}: {1}", chunkID, e.Status);
cdnPool.ReturnBrokenConnection(client);
Console.WriteLine("Encountered unexpected error downloading chunk {0}: {1}", chunkID, e.Message);
}
}
catch (Exception e)
if (chunkData == null)
{
cdnPool.ReturnBrokenConnection(client);
Console.WriteLine("Encountered unexpected error downloading chunk {0}: {1}", chunkID, e.Message);
Console.WriteLine("Failed to find any server with chunk {0} for depot {1}. Aborting.", chunkID, depot.id);
return;
}
}
if (chunkData == null)
{
Console.WriteLine("Failed to find any server with chunk {0} for depot {1}. Aborting.", chunkID, depot.id);
return;
}
TotalBytesCompressed += chunk.CompressedLength;
DepotBytesCompressed += chunk.CompressedLength;
TotalBytesUncompressed += chunk.UncompressedLength;
DepotBytesUncompressed += chunk.UncompressedLength;
fs.Seek((long)chunk.Offset, SeekOrigin.Begin);
fs.Write(chunkData.Data, 0, chunkData.Data.Length);
TotalBytesCompressed += chunk.CompressedLength;
DepotBytesCompressed += chunk.CompressedLength;
TotalBytesUncompressed += chunk.UncompressedLength;
DepotBytesUncompressed += chunk.UncompressedLength;
size_downloaded += chunk.UncompressedLength;
}
fs.Seek((long)chunk.Offset, SeekOrigin.Begin);
fs.Write(chunkData.Data, 0, chunkData.Data.Length);
fs.Dispose();
size_downloaded += chunk.UncompressedLength;
Console.WriteLine("{0,6:#00.00}% {1}", ((float)size_downloaded / (float)complete_download_size) * 100.0f, fileFinalPath);
}
finally
{
semaphore.Release();
}
});
fs.Dispose();
tasks[i] = task;
}
Console.WriteLine("{0,6:#00.00}% {1}", ((float)size_downloaded / (float)complete_download_size) * 100.0f, fileFinalPath);
}
finally
{
semaphore.Release();
}
});
await Task.WhenAll(tasks).ConfigureAwait(false);;
ConfigStore.TheConfig.LastManifests[depot.id] = depot.manifestId;
ConfigStore.Save();

Loading…
Cancel
Save