use chunks for cleanup

This commit is contained in:
rootdarkarchon
2023-11-17 00:27:51 +01:00
parent 41e73d6fba
commit 8c8c3509d3
4 changed files with 66 additions and 44 deletions

View File

@@ -16,7 +16,6 @@ using Microsoft.AspNetCore.SignalR;
using Microsoft.EntityFrameworkCore;
using System.Collections.Concurrent;
using System.Security.Cryptography;
using System.Security.Policy;
using System.Text.Json;
using System.Text.RegularExpressions;
@@ -230,6 +229,7 @@ public class ServerFilesController : ControllerBase
finally
{
fileLock.Release();
fileLock.Dispose();
}
}
@@ -305,6 +305,7 @@ public class ServerFilesController : ControllerBase
finally
{
fileLock.Release();
fileLock.Dispose();
}
}
@@ -388,6 +389,7 @@ public class ServerFilesController : ControllerBase
finally
{
fileLock.Release();
fileLock.Dispose();
}
}
}

View File

@@ -18,6 +18,10 @@
</ItemGroup>
<ItemGroup>
<PackageReference Include="IDisposableAnalyzers" Version="4.0.7">
<PrivateAssets>all</PrivateAssets>
<IncludeAssets>runtime; build; native; contentfiles; analyzers; buildtransitive</IncludeAssets>
</PackageReference>
<PackageReference Include="lz4net" Version="1.0.15.93" />
<PackageReference Include="Meziantou.Analyzer" Version="2.0.93">
<PrivateAssets>all</PrivateAssets>

View File

@@ -164,54 +164,66 @@ public class FileCleanupService : IHostedService
var prevTimeForcedDeletion = DateTime.Now.Subtract(TimeSpan.FromHours(forcedDeletionAfterHours));
DirectoryInfo dir = new(_cacheDir);
var allFilesInDir = dir.GetFiles("*", SearchOption.AllDirectories);
var allFiles = await dbContext.Files.ToListAsync().ConfigureAwait(false);
int fileCounter = 0;
foreach (var fileCache in allFiles.Where(f => f.Uploaded))
int filesToTake = 10000;
var filesChunk = await dbContext.Files.Take(filesToTake).ToListAsync().ConfigureAwait(false);
int iterations = 1;
var allFiles = new List<FileCache>();
while (filesChunk.Any())
{
var file = FilePathUtil.GetFileInfoForHash(_cacheDir, fileCache.Hash);
bool fileDeleted = false;
if (file == null && _isMainServer)
int fileCounter = 0;
foreach (var fileCache in filesChunk.Where(f => f.Uploaded))
{
_logger.LogInformation("File does not exist anymore: {fileName}", fileCache.Hash);
dbContext.Files.Remove(fileCache);
fileDeleted = true;
}
else if (file != null && file.LastAccessTime < prevTime)
{
_metrics.DecGauge(MetricsAPI.GaugeFilesTotalSize, file.Length);
_metrics.DecGauge(MetricsAPI.GaugeFilesTotal);
_logger.LogInformation("File outdated: {fileName}, {fileSize}MiB", file.Name, ByteSize.FromBytes(file.Length).MebiBytes);
file.Delete();
if (_isMainServer)
bool fileDeleted = false;
var file = FilePathUtil.GetFileInfoForHash(_cacheDir, fileCache.Hash);
if (file == null && _isMainServer)
{
fileDeleted = true;
_logger.LogInformation("File does not exist anymore: {fileName}", fileCache.Hash);
dbContext.Files.Remove(fileCache);
fileDeleted = true;
}
}
else if (file != null && forcedDeletionAfterHours > 0 && file.LastWriteTime < prevTimeForcedDeletion)
{
_metrics.DecGauge(MetricsAPI.GaugeFilesTotalSize, file.Length);
_metrics.DecGauge(MetricsAPI.GaugeFilesTotal);
_logger.LogInformation("File forcefully deleted: {fileName}, {fileSize}MiB", file.Name, ByteSize.FromBytes(file.Length).MebiBytes);
file.Delete();
if (_isMainServer)
else if (file != null && file.LastAccessTime < prevTime)
{
fileDeleted = true;
dbContext.Files.Remove(fileCache);
_metrics.DecGauge(MetricsAPI.GaugeFilesTotalSize, file.Length);
_metrics.DecGauge(MetricsAPI.GaugeFilesTotal);
_logger.LogInformation("File outdated: {fileName}, {fileSize}MiB", file.Name, ByteSize.FromBytes(file.Length).MebiBytes);
file.Delete();
if (_isMainServer)
{
fileDeleted = true;
dbContext.Files.Remove(fileCache);
}
}
else if (file != null && forcedDeletionAfterHours > 0 && file.LastWriteTime < prevTimeForcedDeletion)
{
_metrics.DecGauge(MetricsAPI.GaugeFilesTotalSize, file.Length);
_metrics.DecGauge(MetricsAPI.GaugeFilesTotal);
_logger.LogInformation("File forcefully deleted: {fileName}, {fileSize}MiB", file.Name, ByteSize.FromBytes(file.Length).MebiBytes);
file.Delete();
if (_isMainServer)
{
fileDeleted = true;
dbContext.Files.Remove(fileCache);
}
}
if (_isMainServer && !fileDeleted && file != null && fileCache.Size == 0)
{
_logger.LogInformation("Setting File Size of " + fileCache.Hash + " to " + file.Length);
fileCache.Size = file.Length;
// commit every 1000 files to db
if (fileCounter % 1000 == 0) await dbContext.SaveChangesAsync().ConfigureAwait(false);
}
fileCounter++;
ct.ThrowIfCancellationRequested();
}
if (_isMainServer && !fileDeleted && file != null && fileCache.Size == 0)
{
_logger.LogInformation("Setting File Size of " + fileCache.Hash + " to " + file.Length);
fileCache.Size = file.Length;
// commit every 1000 files to db
if (fileCounter % 1000 == 0) await dbContext.SaveChangesAsync().ConfigureAwait(false);
}
fileCounter++;
ct.ThrowIfCancellationRequested();
allFiles.AddRange(filesChunk);
filesChunk = await dbContext.Files.Skip(filesToTake * iterations).Take(filesToTake).ToListAsync(cancellationToken: ct).ConfigureAwait(false);
iterations++;
}
// clean up files that are on disk but not in DB for some reason

View File

@@ -40,12 +40,16 @@ public sealed class BlockFileDataStream : Stream
protected override void Dispose(bool disposing)
{
base.Dispose(disposing);
foreach (var substream in _substreams)
if (disposing)
{
// probably unnecessary but better safe than sorry
substream.Dispose();
foreach (var substream in _substreams)
{
// probably unnecessary but better safe than sorry
substream.Dispose();
}
}
base.Dispose(disposing);
}
public override void Flush()