mirror of
https://github.com/chylex/Nextcloud-Desktop.git
synced 2025-05-11 20:34:09 +02:00
Fix warnings about signedness
Sizes are always qint64, not unsigned. TransferIds are always uint.
This commit is contained in:
parent
60cb5d3b34
commit
4c04351360
src
common
gui
libsync
test
@ -1270,7 +1270,7 @@ bool SyncJournalDb::updateFileRecordChecksum(const QString &filename,
|
||||
}
|
||||
|
||||
bool SyncJournalDb::updateLocalMetadata(const QString &filename,
|
||||
qint64 modtime, quint64 size, quint64 inode)
|
||||
qint64 modtime, qint64 size, quint64 inode)
|
||||
|
||||
{
|
||||
QMutexLocker locker(&_mutex);
|
||||
|
@ -71,7 +71,7 @@ public:
|
||||
const QByteArray &contentChecksum,
|
||||
const QByteArray &contentChecksumType);
|
||||
bool updateLocalMetadata(const QString &filename,
|
||||
qint64 modtime, quint64 size, quint64 inode);
|
||||
qint64 modtime, qint64 size, quint64 inode);
|
||||
bool exists();
|
||||
void walCheckpoint();
|
||||
|
||||
@ -95,8 +95,8 @@ public:
|
||||
struct UploadInfo
|
||||
{
|
||||
int _chunk = 0;
|
||||
quint64 _transferid = 0;
|
||||
quint64 _size = 0; //currently unused
|
||||
uint _transferid = 0;
|
||||
qint64 _size = 0;
|
||||
qint64 _modtime = 0;
|
||||
int _errorCount = 0;
|
||||
bool _valid = false;
|
||||
|
@ -145,7 +145,7 @@ public:
|
||||
*
|
||||
* Returning false and setting error indicates an error.
|
||||
*/
|
||||
virtual bool updateMetadata(const QString &filePath, time_t modtime, quint64 size, const QByteArray &fileId, QString *error) = 0;
|
||||
virtual bool updateMetadata(const QString &filePath, time_t modtime, qint64 size, const QByteArray &fileId, QString *error) = 0;
|
||||
|
||||
/// Create a new dehydrated placeholder. Called from PropagateDownload.
|
||||
virtual void createPlaceholder(const SyncFileItem &item) = 0;
|
||||
@ -259,7 +259,7 @@ public:
|
||||
bool socketApiPinStateActionsShown() const override { return false; }
|
||||
bool isHydrating() const override { return false; }
|
||||
|
||||
bool updateMetadata(const QString &, time_t, quint64, const QByteArray &, QString *) override { return true; }
|
||||
bool updateMetadata(const QString &, time_t, qint64, const QByteArray &, QString *) override { return true; }
|
||||
void createPlaceholder(const SyncFileItem &) override {}
|
||||
void dehydratePlaceholder(const SyncFileItem &) override {}
|
||||
void convertToPlaceholder(const QString &, const SyncFileItem &, const QString &) override {}
|
||||
|
@ -123,8 +123,8 @@ void CloudProviderWrapper::slotUpdateProgress(const QString &folder, const Progr
|
||||
if (!progress._currentDiscoveredRemoteFolder.isEmpty()) {
|
||||
msg = tr("Checking for changes in '%1'").arg(progress._currentDiscoveredRemoteFolder);
|
||||
} else if (progress.totalSize() == 0) {
|
||||
quint64 currentFile = progress.currentFile();
|
||||
quint64 totalFileCount = qMax(progress.totalFiles(), currentFile);
|
||||
qint64 currentFile = progress.currentFile();
|
||||
qint64 totalFileCount = qMax(progress.totalFiles(), currentFile);
|
||||
if (progress.trustEta()) {
|
||||
msg = tr("Syncing %1 of %2 (%3 left)")
|
||||
.arg(currentFile)
|
||||
|
@ -939,7 +939,7 @@ void FolderStatusModel::slotSetProgress(const ProgressInfo &progress)
|
||||
// item if no items are in progress.
|
||||
SyncFileItem curItem = progress._lastCompletedItem;
|
||||
qint64 curItemProgress = -1; // -1 means finished
|
||||
quint64 biggerItemSize = -1;
|
||||
qint64 biggerItemSize = 0;
|
||||
quint64 estimatedUpBw = 0;
|
||||
quint64 estimatedDownBw = 0;
|
||||
QString allFilenames;
|
||||
@ -1018,13 +1018,11 @@ void FolderStatusModel::slotSetProgress(const ProgressInfo &progress)
|
||||
pi->_progressString = fileProgressString;
|
||||
|
||||
// overall progress
|
||||
quint64 completedSize = progress.completedSize();
|
||||
quint64 completedFile = progress.completedFiles();
|
||||
quint64 currentFile = progress.currentFile();
|
||||
if (currentFile == ULLONG_MAX)
|
||||
currentFile = 0;
|
||||
quint64 totalSize = qMax(completedSize, progress.totalSize());
|
||||
quint64 totalFileCount = qMax(currentFile, progress.totalFiles());
|
||||
qint64 completedSize = progress.completedSize();
|
||||
qint64 completedFile = progress.completedFiles();
|
||||
qint64 currentFile = progress.currentFile();
|
||||
qint64 totalSize = qMax(completedSize, progress.totalSize());
|
||||
qint64 totalFileCount = qMax(currentFile, progress.totalFiles());
|
||||
QString overallSyncString;
|
||||
if (totalSize > 0) {
|
||||
QString s1 = Utility::octetsToString(completedSize);
|
||||
|
@ -426,8 +426,8 @@ void ownCloudGui::slotUpdateProgress(const QString &folder, const ProgressInfo &
|
||||
}
|
||||
|
||||
if (progress.totalSize() == 0) {
|
||||
quint64 currentFile = progress.currentFile();
|
||||
quint64 totalFileCount = qMax(progress.totalFiles(), currentFile);
|
||||
qint64 currentFile = progress.currentFile();
|
||||
qint64 totalFileCount = qMax(progress.totalFiles(), currentFile);
|
||||
QString msg;
|
||||
if (progress.trustEta()) {
|
||||
msg = tr("Syncing %1 of %2 (%3 left)")
|
||||
|
@ -218,19 +218,19 @@ int ConfigFile::timeout() const
|
||||
return settings.value(QLatin1String(timeoutC), 300).toInt(); // default to 5 min
|
||||
}
|
||||
|
||||
quint64 ConfigFile::chunkSize() const
|
||||
qint64 ConfigFile::chunkSize() const
|
||||
{
|
||||
QSettings settings(configFile(), QSettings::IniFormat);
|
||||
return settings.value(QLatin1String(chunkSizeC), 10 * 1000 * 1000).toLongLong(); // default to 10 MB
|
||||
}
|
||||
|
||||
quint64 ConfigFile::maxChunkSize() const
|
||||
qint64 ConfigFile::maxChunkSize() const
|
||||
{
|
||||
QSettings settings(configFile(), QSettings::IniFormat);
|
||||
return settings.value(QLatin1String(maxChunkSizeC), 100 * 1000 * 1000).toLongLong(); // default to 100 MB
|
||||
}
|
||||
|
||||
quint64 ConfigFile::minChunkSize() const
|
||||
qint64 ConfigFile::minChunkSize() const
|
||||
{
|
||||
QSettings settings(configFile(), QSettings::IniFormat);
|
||||
return settings.value(QLatin1String(minChunkSizeC), 1000 * 1000).toLongLong(); // default to 1 MB
|
||||
@ -861,15 +861,15 @@ void ConfigFile::setDownloadLimit(int kbytes)
|
||||
setValue(downloadLimitC, kbytes);
|
||||
}
|
||||
|
||||
QPair<bool, quint64> ConfigFile::newBigFolderSizeLimit() const
|
||||
QPair<bool, qint64> ConfigFile::newBigFolderSizeLimit() const
|
||||
{
|
||||
auto defaultValue = Theme::instance()->newBigFolderSizeLimit();
|
||||
qint64 value = getValue(newBigFolderSizeLimitC, QString(), defaultValue).toLongLong();
|
||||
bool use = value >= 0 && getValue(useNewBigFolderSizeLimitC, QString(), true).toBool();
|
||||
return qMakePair(use, quint64(qMax<qint64>(0, value)));
|
||||
return qMakePair(use, qMax<qint64>(0, value));
|
||||
}
|
||||
|
||||
void ConfigFile::setNewBigFolderSizeLimit(bool isChecked, quint64 mbytes)
|
||||
void ConfigFile::setNewBigFolderSizeLimit(bool isChecked, qint64 mbytes)
|
||||
{
|
||||
setValue(newBigFolderSizeLimitC, mbytes);
|
||||
setValue(useNewBigFolderSizeLimitC, isChecked);
|
||||
|
@ -139,8 +139,8 @@ public:
|
||||
void setUploadLimit(int kbytes);
|
||||
void setDownloadLimit(int kbytes);
|
||||
/** [checked, size in MB] **/
|
||||
QPair<bool, quint64> newBigFolderSizeLimit() const;
|
||||
void setNewBigFolderSizeLimit(bool isChecked, quint64 mbytes);
|
||||
QPair<bool, qint64> newBigFolderSizeLimit() const;
|
||||
void setNewBigFolderSizeLimit(bool isChecked, qint64 mbytes);
|
||||
bool confirmExternalStorage() const;
|
||||
void setConfirmExternalStorage(bool);
|
||||
|
||||
@ -157,9 +157,9 @@ public:
|
||||
void setShowInExplorerNavigationPane(bool show);
|
||||
|
||||
int timeout() const;
|
||||
quint64 chunkSize() const;
|
||||
quint64 maxChunkSize() const;
|
||||
quint64 minChunkSize() const;
|
||||
qint64 chunkSize() const;
|
||||
qint64 maxChunkSize() const;
|
||||
qint64 minChunkSize() const;
|
||||
std::chrono::milliseconds targetChunkUploadDuration() const;
|
||||
|
||||
void saveGeometry(QWidget *w);
|
||||
|
@ -358,9 +358,9 @@ PropagateItemJob *OwncloudPropagator::createJob(const SyncFileItemPtr &item)
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
quint64 OwncloudPropagator::smallFileSize()
|
||||
qint64 OwncloudPropagator::smallFileSize()
|
||||
{
|
||||
const quint64 smallFileSize = 100 * 1024; //default to 1 MB. Not dynamic right now.
|
||||
const qint64 smallFileSize = 100 * 1024; //default to 1 MB. Not dynamic right now.
|
||||
return smallFileSize;
|
||||
}
|
||||
|
||||
@ -621,7 +621,7 @@ void OwncloudPropagator::scheduleNextJobImpl()
|
||||
}
|
||||
}
|
||||
|
||||
void OwncloudPropagator::reportProgress(const SyncFileItem &item, quint64 bytes)
|
||||
void OwncloudPropagator::reportProgress(const SyncFileItem &item, qint64 bytes)
|
||||
{
|
||||
emit progress(item, bytes);
|
||||
}
|
||||
|
@ -414,7 +414,7 @@ public:
|
||||
*
|
||||
* This allows skipping of uploads that have a very high likelihood of failure.
|
||||
*/
|
||||
QHash<QString, quint64> _folderQuota;
|
||||
QHash<QString, qint64> _folderQuota;
|
||||
|
||||
/* the maximum number of jobs using bandwidth (uploads or downloads, in parallel) */
|
||||
int maximumActiveTransferJob();
|
||||
@ -425,8 +425,8 @@ public:
|
||||
* if Capabilities::desiredChunkUploadDuration has a target
|
||||
* chunk-upload duration set.
|
||||
*/
|
||||
quint64 _chunkSize;
|
||||
quint64 smallFileSize();
|
||||
qint64 _chunkSize;
|
||||
qint64 smallFileSize();
|
||||
|
||||
/* The maximum number of active jobs in parallel */
|
||||
int hardMaximumActiveJob();
|
||||
@ -455,7 +455,7 @@ public:
|
||||
PropagateItemJob *createJob(const SyncFileItemPtr &item);
|
||||
|
||||
void scheduleNextJob();
|
||||
void reportProgress(const SyncFileItem &, quint64 bytes);
|
||||
void reportProgress(const SyncFileItem &, qint64 bytes);
|
||||
|
||||
void abort()
|
||||
{
|
||||
@ -539,7 +539,7 @@ private slots:
|
||||
signals:
|
||||
void newItem(const SyncFileItemPtr &);
|
||||
void itemCompleted(const SyncFileItemPtr &);
|
||||
void progress(const SyncFileItem &, quint64 bytes);
|
||||
void progress(const SyncFileItem &, qint64 bytes);
|
||||
void finished(bool success);
|
||||
|
||||
/** Emitted when propagation has problems with a locked file. */
|
||||
|
@ -200,27 +200,27 @@ void ProgressInfo::adjustTotalsForFile(const SyncFileItem &item)
|
||||
}
|
||||
}
|
||||
|
||||
quint64 ProgressInfo::totalFiles() const
|
||||
qint64 ProgressInfo::totalFiles() const
|
||||
{
|
||||
return _fileProgress._total;
|
||||
}
|
||||
|
||||
quint64 ProgressInfo::completedFiles() const
|
||||
qint64 ProgressInfo::completedFiles() const
|
||||
{
|
||||
return _fileProgress._completed;
|
||||
}
|
||||
|
||||
quint64 ProgressInfo::currentFile() const
|
||||
qint64 ProgressInfo::currentFile() const
|
||||
{
|
||||
return completedFiles() + _currentItems.size();
|
||||
}
|
||||
|
||||
quint64 ProgressInfo::totalSize() const
|
||||
qint64 ProgressInfo::totalSize() const
|
||||
{
|
||||
return _sizeProgress._total;
|
||||
}
|
||||
|
||||
quint64 ProgressInfo::completedSize() const
|
||||
qint64 ProgressInfo::completedSize() const
|
||||
{
|
||||
return _sizeProgress._completed;
|
||||
}
|
||||
@ -240,7 +240,7 @@ void ProgressInfo::setProgressComplete(const SyncFileItem &item)
|
||||
_lastCompletedItem = item;
|
||||
}
|
||||
|
||||
void ProgressInfo::setProgressItem(const SyncFileItem &item, quint64 completed)
|
||||
void ProgressInfo::setProgressItem(const SyncFileItem &item, qint64 completed)
|
||||
{
|
||||
if (!shouldCountProgress(item)) {
|
||||
return;
|
||||
@ -309,8 +309,8 @@ ProgressInfo::Estimates ProgressInfo::totalProgress() const
|
||||
1.0);
|
||||
|
||||
double beOptimistic = nearMaxFps * slowTransfer;
|
||||
size.estimatedEta = (1.0 - beOptimistic) * size.estimatedEta
|
||||
+ beOptimistic * optimisticEta();
|
||||
size.estimatedEta = quint64((1.0 - beOptimistic) * size.estimatedEta
|
||||
+ beOptimistic * optimisticEta());
|
||||
|
||||
return size;
|
||||
}
|
||||
@ -355,7 +355,7 @@ void ProgressInfo::updateEstimates()
|
||||
|
||||
void ProgressInfo::recomputeCompletedSize()
|
||||
{
|
||||
quint64 r = _totalSizeOfCompletedJobs;
|
||||
qint64 r = _totalSizeOfCompletedJobs;
|
||||
foreach (const ProgressItem &i, _currentItems) {
|
||||
if (isSizeDependent(i._item))
|
||||
r += i._progress._completed;
|
||||
@ -370,17 +370,17 @@ ProgressInfo::Estimates ProgressInfo::Progress::estimates() const
|
||||
if (_progressPerSec != 0) {
|
||||
est.estimatedEta = qRound64(static_cast<double>(_total - _completed) / _progressPerSec) * 1000;
|
||||
} else {
|
||||
est.estimatedEta = 0; // looks better than quint64 max
|
||||
est.estimatedEta = 0; // looks better than qint64 max
|
||||
}
|
||||
return est;
|
||||
}
|
||||
|
||||
quint64 ProgressInfo::Progress::completed() const
|
||||
qint64 ProgressInfo::Progress::completed() const
|
||||
{
|
||||
return _completed;
|
||||
}
|
||||
|
||||
quint64 ProgressInfo::Progress::remaining() const
|
||||
qint64 ProgressInfo::Progress::remaining() const
|
||||
{
|
||||
return _total - _completed;
|
||||
}
|
||||
@ -401,7 +401,7 @@ void ProgressInfo::Progress::update()
|
||||
_prevCompleted = _completed;
|
||||
}
|
||||
|
||||
void ProgressInfo::Progress::setCompleted(quint64 completed)
|
||||
void ProgressInfo::Progress::setCompleted(qint64 completed)
|
||||
{
|
||||
_completed = qMin(completed, _total);
|
||||
_prevCompleted = qMin(_prevCompleted, _completed);
|
||||
|
@ -91,14 +91,14 @@ public:
|
||||
*/
|
||||
void adjustTotalsForFile(const SyncFileItem &item);
|
||||
|
||||
quint64 totalFiles() const;
|
||||
quint64 completedFiles() const;
|
||||
qint64 totalFiles() const;
|
||||
qint64 completedFiles() const;
|
||||
|
||||
quint64 totalSize() const;
|
||||
quint64 completedSize() const;
|
||||
qint64 totalSize() const;
|
||||
qint64 completedSize() const;
|
||||
|
||||
/** Number of a file that is currently in progress. */
|
||||
quint64 currentFile() const;
|
||||
qint64 currentFile() const;
|
||||
|
||||
/** Return true if the size needs to be taken in account in the total amount of time */
|
||||
static inline bool isSizeDependent(const SyncFileItem &item)
|
||||
@ -118,7 +118,7 @@ public:
|
||||
struct Estimates
|
||||
{
|
||||
/// Estimated completion amount per second. (of bytes or files)
|
||||
quint64 estimatedBandwidth;
|
||||
qint64 estimatedBandwidth;
|
||||
|
||||
/// Estimated time remaining in milliseconds.
|
||||
quint64 estimatedEta;
|
||||
@ -133,8 +133,8 @@ public:
|
||||
/** Returns the estimates about progress per second and eta. */
|
||||
Estimates estimates() const;
|
||||
|
||||
quint64 completed() const;
|
||||
quint64 remaining() const;
|
||||
qint64 completed() const;
|
||||
qint64 remaining() const;
|
||||
|
||||
private:
|
||||
/**
|
||||
@ -146,19 +146,19 @@ public:
|
||||
* Changes the _completed value and does sanity checks on
|
||||
* _prevCompleted and _total.
|
||||
*/
|
||||
void setCompleted(quint64 completed);
|
||||
void setCompleted(qint64 completed);
|
||||
|
||||
// Updated by update()
|
||||
double _progressPerSec = 0;
|
||||
quint64 _prevCompleted = 0;
|
||||
qint64 _prevCompleted = 0;
|
||||
|
||||
// Used to get to a good value faster when
|
||||
// progress measurement stats. See update().
|
||||
double _initialSmoothing = 1.0;
|
||||
|
||||
// Set and updated by ProgressInfo
|
||||
quint64 _completed = 0;
|
||||
quint64 _total = 0;
|
||||
qint64 _completed = 0;
|
||||
qint64 _total = 0;
|
||||
|
||||
friend class ProgressInfo;
|
||||
};
|
||||
@ -180,7 +180,7 @@ public:
|
||||
|
||||
void setProgressComplete(const SyncFileItem &item);
|
||||
|
||||
void setProgressItem(const SyncFileItem &item, quint64 completed);
|
||||
void setProgressItem(const SyncFileItem &item, qint64 completed);
|
||||
|
||||
/**
|
||||
* Get the total completion estimate
|
||||
@ -227,7 +227,7 @@ private:
|
||||
Progress _fileProgress;
|
||||
|
||||
// All size from completed jobs only.
|
||||
quint64 _totalSizeOfCompletedJobs;
|
||||
qint64 _totalSizeOfCompletedJobs;
|
||||
|
||||
// The fastest observed rate of files per second in this sync.
|
||||
double _maxFilesPerSecond;
|
||||
|
@ -71,7 +71,7 @@ QString OWNCLOUDSYNC_EXPORT createDownloadTmpFileName(const QString &previous)
|
||||
// DOES NOT take ownership of the device.
|
||||
GETFileJob::GETFileJob(AccountPtr account, const QString &path, QIODevice *device,
|
||||
const QMap<QByteArray, QByteArray> &headers, const QByteArray &expectedEtagForResume,
|
||||
quint64 resumeStart, QObject *parent)
|
||||
qint64 resumeStart, QObject *parent)
|
||||
: AbstractNetworkJob(account, path, parent)
|
||||
, _device(device)
|
||||
, _headers(headers)
|
||||
@ -91,8 +91,7 @@ GETFileJob::GETFileJob(AccountPtr account, const QString &path, QIODevice *devic
|
||||
|
||||
GETFileJob::GETFileJob(AccountPtr account, const QUrl &url, QIODevice *device,
|
||||
const QMap<QByteArray, QByteArray> &headers, const QByteArray &expectedEtagForResume,
|
||||
quint64 resumeStart, QObject *parent)
|
||||
|
||||
qint64 resumeStart, QObject *parent)
|
||||
: AbstractNetworkJob(account, url.toEncoded(), parent)
|
||||
, _device(device)
|
||||
, _headers(headers)
|
||||
@ -216,12 +215,12 @@ void GETFileJob::slotMetaDataChanged()
|
||||
return;
|
||||
}
|
||||
|
||||
quint64 start = 0;
|
||||
qint64 start = 0;
|
||||
QByteArray ranges = reply()->rawHeader("Content-Range");
|
||||
if (!ranges.isEmpty()) {
|
||||
QRegExp rx("bytes (\\d+)-");
|
||||
if (rx.indexIn(ranges) >= 0) {
|
||||
start = rx.cap(1).toULongLong();
|
||||
start = rx.cap(1).toLongLong();
|
||||
}
|
||||
}
|
||||
if (start != _resumeStart) {
|
||||
@ -610,7 +609,7 @@ void PropagateDownloadFile::startDownload()
|
||||
qint64 PropagateDownloadFile::committedDiskSpace() const
|
||||
{
|
||||
if (_state == Running) {
|
||||
return qBound(0ULL, _item->_size - _resumeStart - _downloadProgress, _item->_size);
|
||||
return qBound(0LL, _item->_size - _resumeStart - _downloadProgress, _item->_size);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
@ -724,7 +723,7 @@ void PropagateDownloadFile::slotGetFinished()
|
||||
* truncated, as described here: https://github.com/owncloud/mirall/issues/2528
|
||||
*/
|
||||
const QByteArray sizeHeader("Content-Length");
|
||||
quint64 bodySize = job->reply()->rawHeader(sizeHeader).toULongLong();
|
||||
qint64 bodySize = job->reply()->rawHeader(sizeHeader).toLongLong();
|
||||
bool hasSizeHeader = !job->reply()->rawHeader(sizeHeader).isEmpty();
|
||||
|
||||
// Qt removes the content-length header for transparently decompressed HTTP1 replies
|
||||
|
@ -36,7 +36,7 @@ class GETFileJob : public AbstractNetworkJob
|
||||
QByteArray _expectedEtagForResume;
|
||||
qint64 _expectedContentLength;
|
||||
qint64 _contentLength;
|
||||
quint64 _resumeStart;
|
||||
qint64 _resumeStart;
|
||||
SyncFileItem::Status _errorStatus;
|
||||
QUrl _directDownloadUrl;
|
||||
QByteArray _etag;
|
||||
@ -54,11 +54,11 @@ public:
|
||||
// DOES NOT take ownership of the device.
|
||||
explicit GETFileJob(AccountPtr account, const QString &path, QIODevice *device,
|
||||
const QMap<QByteArray, QByteArray> &headers, const QByteArray &expectedEtagForResume,
|
||||
quint64 resumeStart, QObject *parent = nullptr);
|
||||
qint64 resumeStart, QObject *parent = nullptr);
|
||||
// For directDownloadUrl:
|
||||
explicit GETFileJob(AccountPtr account, const QUrl &url, QIODevice *device,
|
||||
const QMap<QByteArray, QByteArray> &headers, const QByteArray &expectedEtagForResume,
|
||||
quint64 resumeStart, QObject *parent = nullptr);
|
||||
qint64 resumeStart, QObject *parent = nullptr);
|
||||
virtual ~GETFileJob()
|
||||
{
|
||||
if (_bandwidthManager) {
|
||||
@ -100,7 +100,7 @@ public:
|
||||
void onTimedOut() override;
|
||||
|
||||
QByteArray &etag() { return _etag; }
|
||||
quint64 resumeStart() { return _resumeStart; }
|
||||
qint64 resumeStart() { return _resumeStart; }
|
||||
time_t lastModified() { return _lastModified; }
|
||||
|
||||
qint64 contentLength() const { return _contentLength; }
|
||||
@ -205,7 +205,7 @@ private:
|
||||
void startAfterIsEncryptedIsChecked();
|
||||
void deleteExistingFolder();
|
||||
|
||||
quint64 _resumeStart;
|
||||
qint64 _resumeStart;
|
||||
qint64 _downloadProgress;
|
||||
QPointer<GETFileJob> _job;
|
||||
QFile _tmpFile;
|
||||
|
@ -284,8 +284,8 @@ void PropagateUploadFileCommon::startUploadFile() {
|
||||
}
|
||||
|
||||
// Check if we believe that the upload will fail due to remote quota limits
|
||||
const quint64 quotaGuess = propagator()->_folderQuota.value(
|
||||
QFileInfo(_fileToUpload._file).path(), std::numeric_limits<quint64>::max());
|
||||
const qint64 quotaGuess = propagator()->_folderQuota.value(
|
||||
QFileInfo(_fileToUpload._file).path(), std::numeric_limits<qint64>::max());
|
||||
if (_fileToUpload._size > quotaGuess) {
|
||||
// Necessary for blacklisting logic
|
||||
_item->_httpErrorCode = 507;
|
||||
@ -417,7 +417,7 @@ void PropagateUploadFileCommon::slotStartUpload(const QByteArray &transmissionCh
|
||||
return;
|
||||
}
|
||||
|
||||
quint64 fileSize = FileSystem::getSize(fullFilePath);
|
||||
qint64 fileSize = FileSystem::getSize(fullFilePath);
|
||||
_item->_size = fileSize;
|
||||
_fileToUpload._size = fileSize;
|
||||
|
||||
@ -676,7 +676,7 @@ void PropagateUploadFileCommon::commonErrorHandling(AbstractNetworkJob *job)
|
||||
abortWithError(status, errorString);
|
||||
}
|
||||
|
||||
void PropagateUploadFileCommon::adjustLastJobTimeout(AbstractNetworkJob *job, quint64 fileSize)
|
||||
void PropagateUploadFileCommon::adjustLastJobTimeout(AbstractNetworkJob *job, qint64 fileSize)
|
||||
{
|
||||
constexpr double threeMinutes = 3.0 * 60 * 1000;
|
||||
|
||||
|
@ -229,7 +229,7 @@ protected:
|
||||
struct UploadFileInfo {
|
||||
QString _file; /// I'm still unsure if I should use a SyncFilePtr here.
|
||||
QString _path; /// the full path on disk.
|
||||
quint64 _size;
|
||||
qint64 _size;
|
||||
};
|
||||
UploadFileInfo _fileToUpload;
|
||||
QByteArray _transmissionChecksumHeader;
|
||||
@ -308,7 +308,7 @@ protected:
|
||||
*
|
||||
* See #6527, enterprise#2480
|
||||
*/
|
||||
static void adjustLastJobTimeout(AbstractNetworkJob *job, quint64 fileSize);
|
||||
static void adjustLastJobTimeout(AbstractNetworkJob *job, qint64 fileSize);
|
||||
|
||||
// Bases headers that need to be sent with every chunk
|
||||
QMap<QByteArray, QByteArray> headers();
|
||||
@ -341,9 +341,9 @@ private:
|
||||
*/
|
||||
int _currentChunk = 0;
|
||||
int _chunkCount = 0; /// Total number of chunks for this file
|
||||
quint64 _transferId = 0; /// transfer id (part of the url)
|
||||
uint _transferId = 0; /// transfer id (part of the url)
|
||||
|
||||
quint64 chunkSize() const {
|
||||
qint64 chunkSize() const {
|
||||
// Old chunking does not use dynamic chunking algorithm, and does not adjusts the chunk size respectively,
|
||||
// thus this value should be used as the one classifing item to be chunked
|
||||
return propagator()->syncOptions()._initialChunkSize;
|
||||
@ -374,20 +374,20 @@ class PropagateUploadFileNG : public PropagateUploadFileCommon
|
||||
{
|
||||
Q_OBJECT
|
||||
private:
|
||||
quint64 _sent = 0; /// amount of data (bytes) that was already sent
|
||||
qint64 _sent = 0; /// amount of data (bytes) that was already sent
|
||||
uint _transferId = 0; /// transfer id (part of the url)
|
||||
int _currentChunk = 0; /// Id of the next chunk that will be sent
|
||||
quint64 _currentChunkSize = 0; /// current chunk size
|
||||
qint64 _currentChunkSize = 0; /// current chunk size
|
||||
bool _removeJobError = false; /// If not null, there was an error removing the job
|
||||
|
||||
// Map chunk number with its size from the PROPFIND on resume.
|
||||
// (Only used from slotPropfindIterate/slotPropfindFinished because the LsColJob use signals to report data.)
|
||||
struct ServerChunkInfo
|
||||
{
|
||||
quint64 size;
|
||||
qint64 size;
|
||||
QString originalName;
|
||||
};
|
||||
QMap<int, ServerChunkInfo> _serverChunks;
|
||||
QMap<qint64, ServerChunkInfo> _serverChunks;
|
||||
|
||||
/**
|
||||
* Return the URL of a chunk.
|
||||
|
@ -42,7 +42,7 @@ QUrl PropagateUploadFileNG::chunkUrl(int chunk)
|
||||
+ QLatin1Char('/') + QString::number(_transferId);
|
||||
if (chunk >= 0) {
|
||||
// We need to do add leading 0 because the server orders the chunk alphabetically
|
||||
path += QLatin1Char('/') + QString::number(chunk).rightJustified(8, '0');
|
||||
path += QLatin1Char('/') + QString::number(chunk).rightJustified(16, '0'); // 1e16 is 10 petabyte
|
||||
}
|
||||
return Utility::concatUrlPath(propagator()->account()->url(), path);
|
||||
}
|
||||
@ -84,7 +84,7 @@ void PropagateUploadFileNG::doStartUpload()
|
||||
|
||||
const SyncJournalDb::UploadInfo progressInfo = propagator()->_journal->getUploadInfo(_item->_file);
|
||||
if (progressInfo._valid && progressInfo.isChunked() && progressInfo._modtime == _item->_modtime
|
||||
&& progressInfo._size == qint64(_item->_size)) {
|
||||
&& progressInfo._size == _item->_size) {
|
||||
_transferId = progressInfo._transferid;
|
||||
auto url = chunkUrl();
|
||||
auto job = new LsColJob(propagator()->account(), url, this);
|
||||
@ -117,9 +117,9 @@ void PropagateUploadFileNG::slotPropfindIterate(const QString &name, const QMap<
|
||||
}
|
||||
bool ok = false;
|
||||
QString chunkName = name.mid(name.lastIndexOf('/') + 1);
|
||||
auto chunkId = chunkName.toUInt(&ok);
|
||||
auto chunkId = chunkName.toLongLong(&ok);
|
||||
if (ok) {
|
||||
ServerChunkInfo chunkinfo = { properties["getcontentlength"].toULongLong(), chunkName };
|
||||
ServerChunkInfo chunkinfo = { properties["getcontentlength"].toLongLong(), chunkName };
|
||||
_serverChunks[chunkId] = chunkinfo;
|
||||
}
|
||||
}
|
||||
@ -229,7 +229,7 @@ void PropagateUploadFileNG::slotDeleteJobFinished()
|
||||
void PropagateUploadFileNG::startNewUpload()
|
||||
{
|
||||
ASSERT(propagator()->_activeJobList.count(this) == 1);
|
||||
_transferId = qrand() ^ _item->_modtime ^ (_fileToUpload._size << 16) ^ qHash(_fileToUpload._file);
|
||||
_transferId = uint(qrand() ^ uint(_item->_modtime) ^ (uint(_fileToUpload._size) << 16) ^ qHash(_fileToUpload._file));
|
||||
_sent = 0;
|
||||
_currentChunk = 0;
|
||||
|
||||
@ -278,7 +278,7 @@ void PropagateUploadFileNG::startNextChunk()
|
||||
if (propagator()->_abortRequested.fetchAndAddRelaxed(0))
|
||||
return;
|
||||
|
||||
quint64 fileSize = _fileToUpload._size;
|
||||
qint64 fileSize = _fileToUpload._size;
|
||||
ENFORCE(fileSize >= _sent, "Sent data exceeds file size");
|
||||
|
||||
// prevent situation that chunk size is bigger then required one to send
|
||||
@ -393,7 +393,7 @@ void PropagateUploadFileNG::slotPutFinished()
|
||||
//
|
||||
// We use an exponential moving average here as a cheap way of smoothing
|
||||
// the chunk sizes a bit.
|
||||
quint64 targetSize = (propagator()->_chunkSize + predictedGoodSize) / 2;
|
||||
qint64 targetSize = propagator()->_chunkSize / 2 + predictedGoodSize / 2;
|
||||
|
||||
// Adjust the dynamic chunk size _chunkSize used for sizing of the item's chunks to be send
|
||||
propagator()->_chunkSize = qBound(
|
||||
|
@ -37,13 +37,13 @@ namespace OCC {
|
||||
|
||||
void PropagateUploadFileV1::doStartUpload()
|
||||
{
|
||||
_chunkCount = std::ceil(_fileToUpload._size / double(chunkSize()));
|
||||
_chunkCount = int(std::ceil(_fileToUpload._size / double(chunkSize())));
|
||||
_startChunk = 0;
|
||||
_transferId = qrand() ^ _item->_modtime ^ (_fileToUpload._size << 16);
|
||||
_transferId = uint(qrand()) ^ uint(_item->_modtime) ^ (uint(_fileToUpload._size) << 16);
|
||||
|
||||
const SyncJournalDb::UploadInfo progressInfo = propagator()->_journal->getUploadInfo(_item->_file);
|
||||
|
||||
if (progressInfo._valid && progressInfo.isChunked() && progressInfo._modtime == _item->_modtime && progressInfo._size == qint64(_item->_size)
|
||||
if (progressInfo._valid && progressInfo.isChunked() && progressInfo._modtime == _item->_modtime && progressInfo._size == _item->_size
|
||||
&& (progressInfo._contentChecksum == _item->_checksumHeader || progressInfo._contentChecksum.isEmpty() || _item->_checksumHeader.isEmpty())) {
|
||||
_startChunk = progressInfo._chunk;
|
||||
_transferId = progressInfo._transferid;
|
||||
@ -83,10 +83,10 @@ void PropagateUploadFileV1::startNextChunk()
|
||||
// is sent last.
|
||||
return;
|
||||
}
|
||||
quint64 fileSize = _fileToUpload._size;
|
||||
qint64 fileSize = _fileToUpload._size;
|
||||
auto headers = PropagateUploadFileCommon::headers();
|
||||
headers[QByteArrayLiteral("OC-Total-Length")] = QByteArray::number(fileSize);
|
||||
headers[QByteArrayLiteral("OC-Chunk-Size")] = QByteArray::number(quint64(chunkSize()));
|
||||
headers[QByteArrayLiteral("OC-Chunk-Size")] = QByteArray::number(chunkSize());
|
||||
|
||||
QString path = _fileToUpload._file;
|
||||
|
||||
@ -97,13 +97,13 @@ void PropagateUploadFileV1::startNextChunk()
|
||||
if (_chunkCount > 1) {
|
||||
int sendingChunk = (_currentChunk + _startChunk) % _chunkCount;
|
||||
// XOR with chunk size to make sure everything goes well if chunk size changes between runs
|
||||
uint transid = _transferId ^ chunkSize();
|
||||
uint transid = _transferId ^ uint(chunkSize());
|
||||
qCInfo(lcPropagateUpload) << "Upload chunk" << sendingChunk << "of" << _chunkCount << "transferid(remote)=" << transid;
|
||||
path += QString("-chunking-%1-%2-%3").arg(transid).arg(_chunkCount).arg(sendingChunk);
|
||||
|
||||
headers[QByteArrayLiteral("OC-Chunked")] = QByteArrayLiteral("1");
|
||||
|
||||
chunkStart = chunkSize() * quint64(sendingChunk);
|
||||
chunkStart = chunkSize() * sendingChunk;
|
||||
currentChunkSize = chunkSize();
|
||||
if (sendingChunk == _chunkCount - 1) { // last chunk
|
||||
currentChunkSize = (fileSize % chunkSize());
|
||||
@ -337,7 +337,7 @@ void PropagateUploadFileV1::slotUploadProgress(qint64 sent, qint64 total)
|
||||
// not including this one.
|
||||
// FIXME: this assumes all chunks have the same size, which is true only if the last chunk
|
||||
// has not been finished (which should not happen because the last chunk is sent sequentially)
|
||||
quint64 amount = progressChunk * chunkSize();
|
||||
qint64 amount = progressChunk * chunkSize();
|
||||
|
||||
sender()->setProperty("byteWritten", sent);
|
||||
if (_jobs.count() > 1) {
|
||||
|
@ -877,7 +877,7 @@ void SyncEngine::finalize(bool success)
|
||||
_clearTouchedFilesTimer.start();
|
||||
}
|
||||
|
||||
void SyncEngine::slotProgress(const SyncFileItem &item, quint64 current)
|
||||
void SyncEngine::slotProgress(const SyncFileItem &item, qint64 current)
|
||||
{
|
||||
_progressInfo->setProgressItem(item, current);
|
||||
emit transmissionProgress(*_progressInfo);
|
||||
|
@ -191,7 +191,7 @@ private slots:
|
||||
void slotItemCompleted(const SyncFileItemPtr &item);
|
||||
void slotDiscoveryFinished();
|
||||
void slotPropagationFinished(bool success);
|
||||
void slotProgress(const SyncFileItem &item, quint64 curent);
|
||||
void slotProgress(const SyncFileItem &item, qint64 curent);
|
||||
void slotCleanPollsJobAborted(const QString &error);
|
||||
|
||||
/** Records that a file was touched by a job. */
|
||||
|
@ -251,7 +251,7 @@ public:
|
||||
csync_instructions_e _instruction = CSYNC_INSTRUCTION_NONE;
|
||||
time_t _modtime = 0;
|
||||
QByteArray _etag;
|
||||
quint64 _size = 0;
|
||||
qint64 _size = 0;
|
||||
quint64 _inode = 0;
|
||||
QByteArray _fileId;
|
||||
|
||||
@ -264,7 +264,7 @@ public:
|
||||
QByteArray _checksumHeader;
|
||||
|
||||
// The size and modtime of the file getting overwritten (on the disk for downloads, on the server for uploads).
|
||||
quint64 _previousSize = 0;
|
||||
qint64 _previousSize = 0;
|
||||
time_t _previousModtime = 0;
|
||||
|
||||
QString _directDownloadUrl;
|
||||
|
@ -51,13 +51,13 @@ struct SyncOptions
|
||||
* starting value and is then gradually adjusted within the
|
||||
* minChunkSize / maxChunkSize bounds.
|
||||
*/
|
||||
quint64 _initialChunkSize = 10 * 1000 * 1000; // 10MB
|
||||
qint64 _initialChunkSize = 10 * 1000 * 1000; // 10MB
|
||||
|
||||
/** The minimum chunk size in bytes for chunked uploads */
|
||||
quint64 _minChunkSize = 1 * 1000 * 1000; // 1MB
|
||||
qint64 _minChunkSize = 1 * 1000 * 1000; // 1MB
|
||||
|
||||
/** The maximum chunk size in bytes for chunked uploads */
|
||||
quint64 _maxChunkSize = 100 * 1000 * 1000; // 100MB
|
||||
qint64 _maxChunkSize = 100 * 1000 * 1000; // 100MB
|
||||
|
||||
/** The target duration of chunk uploads for dynamic chunk sizing.
|
||||
*
|
||||
|
@ -53,7 +53,7 @@ bool VfsSuffix::isHydrating() const
|
||||
return false;
|
||||
}
|
||||
|
||||
bool VfsSuffix::updateMetadata(const QString &filePath, time_t modtime, quint64, const QByteArray &, QString *)
|
||||
bool VfsSuffix::updateMetadata(const QString &filePath, time_t modtime, qint64, const QByteArray &, QString *)
|
||||
{
|
||||
FileSystem::setModTime(filePath, modtime);
|
||||
return true;
|
||||
|
@ -38,7 +38,7 @@ public:
|
||||
bool socketApiPinStateActionsShown() const override { return true; }
|
||||
bool isHydrating() const override;
|
||||
|
||||
bool updateMetadata(const QString &filePath, time_t modtime, quint64 size, const QByteArray &fileId, QString *error) override;
|
||||
bool updateMetadata(const QString &filePath, time_t modtime, qint64 size, const QByteArray &fileId, QString *error) override;
|
||||
|
||||
void createPlaceholder(const SyncFileItem &item) override;
|
||||
void dehydratePlaceholder(const SyncFileItem &item) override;
|
||||
|
@ -647,12 +647,12 @@ public:
|
||||
auto sourceFolder = uploadsFileInfo.find(source);
|
||||
Q_ASSERT(sourceFolder);
|
||||
Q_ASSERT(sourceFolder->isDir);
|
||||
int count = 0;
|
||||
int size = 0;
|
||||
qint64 count = 0;
|
||||
qint64 size = 0;
|
||||
char payload = '\0';
|
||||
|
||||
do {
|
||||
QString chunkName = QString::number(count).rightJustified(8, '0');
|
||||
QString chunkName = QString::number(count).rightJustified(16, '0');
|
||||
if (!sourceFolder->children.contains(chunkName))
|
||||
break;
|
||||
auto &x = sourceFolder->children[chunkName];
|
||||
|
@ -41,7 +41,7 @@ static void partialUpload(FakeFolder &fakeFolder, const QString &name, int size)
|
||||
}
|
||||
|
||||
// Reduce max chunk size a bit so we get more chunks
|
||||
static void setChunkSize(SyncEngine &engine, quint64 size)
|
||||
static void setChunkSize(SyncEngine &engine, qint64 size)
|
||||
{
|
||||
SyncOptions options;
|
||||
options._maxChunkSize = size;
|
||||
@ -86,7 +86,7 @@ private slots:
|
||||
QCOMPARE(fakeFolder.uploadState().children.count(), 1);
|
||||
auto chunkingId = fakeFolder.uploadState().children.first().name;
|
||||
const auto &chunkMap = fakeFolder.uploadState().children.first().children;
|
||||
quint64 uploadedSize = std::accumulate(chunkMap.begin(), chunkMap.end(), 0LL, [](quint64 s, const FileInfo &f) { return s + f.size; });
|
||||
qint64 uploadedSize = std::accumulate(chunkMap.begin(), chunkMap.end(), 0LL, [](qint64 s, const FileInfo &f) { return s + f.size; });
|
||||
QVERIFY(uploadedSize > 2 * 1000 * 1000); // at least 2 MB
|
||||
|
||||
// Add a fake chunk to make sure it gets deleted
|
||||
@ -95,7 +95,7 @@ private slots:
|
||||
fakeFolder.setServerOverride([&](QNetworkAccessManager::Operation op, const QNetworkRequest &request, QIODevice *) -> QNetworkReply * {
|
||||
if (op == QNetworkAccessManager::PutOperation) {
|
||||
// Test that we properly resuming and are not sending past data again.
|
||||
Q_ASSERT(request.rawHeader("OC-Chunk-Offset").toULongLong() >= uploadedSize);
|
||||
Q_ASSERT(request.rawHeader("OC-Chunk-Offset").toLongLong() >= uploadedSize);
|
||||
} else if (op == QNetworkAccessManager::DeleteOperation) {
|
||||
Q_ASSERT(request.url().path().endsWith("/10000"));
|
||||
}
|
||||
@ -121,7 +121,7 @@ private slots:
|
||||
QCOMPARE(fakeFolder.uploadState().children.count(), 1);
|
||||
auto chunkingId = fakeFolder.uploadState().children.first().name;
|
||||
const auto &chunkMap = fakeFolder.uploadState().children.first().children;
|
||||
quint64 uploadedSize = std::accumulate(chunkMap.begin(), chunkMap.end(), 0LL, [](quint64 s, const FileInfo &f) { return s + f.size; });
|
||||
qint64 uploadedSize = std::accumulate(chunkMap.begin(), chunkMap.end(), 0LL, [](qint64 s, const FileInfo &f) { return s + f.size; });
|
||||
QVERIFY(uploadedSize > 2 * 1000 * 1000); // at least 50 MB
|
||||
QVERIFY(chunkMap.size() >= 3); // at least three chunks
|
||||
|
||||
@ -177,12 +177,12 @@ private slots:
|
||||
QCOMPARE(fakeFolder.uploadState().children.count(), 1);
|
||||
auto chunkingId = fakeFolder.uploadState().children.first().name;
|
||||
const auto &chunkMap = fakeFolder.uploadState().children.first().children;
|
||||
quint64 uploadedSize = std::accumulate(chunkMap.begin(), chunkMap.end(), 0LL, [](quint64 s, const FileInfo &f) { return s + f.size; });
|
||||
qint64 uploadedSize = std::accumulate(chunkMap.begin(), chunkMap.end(), 0LL, [](qint64 s, const FileInfo &f) { return s + f.size; });
|
||||
QVERIFY(uploadedSize > 5 * 1000 * 1000); // at least 5 MB
|
||||
|
||||
// Add a chunk that makes the file completely uploaded
|
||||
fakeFolder.uploadState().children.first().insert(
|
||||
QString::number(chunkMap.size()).rightJustified(8, '0'), size - uploadedSize);
|
||||
QString::number(chunkMap.size()).rightJustified(16, '0'), size - uploadedSize);
|
||||
|
||||
bool sawPut = false;
|
||||
bool sawDelete = false;
|
||||
@ -222,12 +222,12 @@ private slots:
|
||||
QCOMPARE(fakeFolder.uploadState().children.count(), 1);
|
||||
auto chunkingId = fakeFolder.uploadState().children.first().name;
|
||||
const auto &chunkMap = fakeFolder.uploadState().children.first().children;
|
||||
quint64 uploadedSize = std::accumulate(chunkMap.begin(), chunkMap.end(), 0LL, [](quint64 s, const FileInfo &f) { return s + f.size; });
|
||||
qint64 uploadedSize = std::accumulate(chunkMap.begin(), chunkMap.end(), 0LL, [](qint64 s, const FileInfo &f) { return s + f.size; });
|
||||
QVERIFY(uploadedSize > 5 * 1000 * 1000); // at least 5 MB
|
||||
|
||||
// Add a chunk that makes the file more than completely uploaded
|
||||
fakeFolder.uploadState().children.first().insert(
|
||||
QString::number(chunkMap.size()).rightJustified(8, '0'), size - uploadedSize + 100);
|
||||
QString::number(chunkMap.size()).rightJustified(16, '0'), size - uploadedSize + 100);
|
||||
|
||||
QVERIFY(fakeFolder.syncOnce());
|
||||
|
||||
|
@ -396,28 +396,28 @@ private slots:
|
||||
QVERIFY(a1);
|
||||
QCOMPARE(a1->_instruction, CSYNC_INSTRUCTION_SYNC);
|
||||
QCOMPARE(a1->_direction, SyncFileItem::Up);
|
||||
QCOMPARE(a1->_size, quint64(5));
|
||||
QCOMPARE(a1->_size, qint64(5));
|
||||
|
||||
QCOMPARE(Utility::qDateTimeFromTime_t(a1->_modtime), changedMtime);
|
||||
QCOMPARE(a1->_previousSize, quint64(4));
|
||||
QCOMPARE(a1->_previousSize, qint64(4));
|
||||
QCOMPARE(Utility::qDateTimeFromTime_t(a1->_previousModtime), initialMtime);
|
||||
|
||||
// b2: should have remote size and modtime
|
||||
QVERIFY(b1);
|
||||
QCOMPARE(b1->_instruction, CSYNC_INSTRUCTION_SYNC);
|
||||
QCOMPARE(b1->_direction, SyncFileItem::Down);
|
||||
QCOMPARE(b1->_size, quint64(17));
|
||||
QCOMPARE(b1->_size, qint64(17));
|
||||
QCOMPARE(Utility::qDateTimeFromTime_t(b1->_modtime), changedMtime);
|
||||
QCOMPARE(b1->_previousSize, quint64(16));
|
||||
QCOMPARE(b1->_previousSize, qint64(16));
|
||||
QCOMPARE(Utility::qDateTimeFromTime_t(b1->_previousModtime), initialMtime);
|
||||
|
||||
// c1: conflicts are downloads, so remote size and modtime
|
||||
QVERIFY(c1);
|
||||
QCOMPARE(c1->_instruction, CSYNC_INSTRUCTION_CONFLICT);
|
||||
QCOMPARE(c1->_direction, SyncFileItem::None);
|
||||
QCOMPARE(c1->_size, quint64(25));
|
||||
QCOMPARE(c1->_size, qint64(25));
|
||||
QCOMPARE(Utility::qDateTimeFromTime_t(c1->_modtime), changedMtime2);
|
||||
QCOMPARE(c1->_previousSize, quint64(26));
|
||||
QCOMPARE(c1->_previousSize, qint64(26));
|
||||
QCOMPARE(Utility::qDateTimeFromTime_t(c1->_previousModtime), changedMtime);
|
||||
});
|
||||
|
||||
|
@ -46,28 +46,28 @@ private slots:
|
||||
|
||||
uploadInfo = fakeFolder.syncEngine().journal()->getUploadInfo("A/a0");
|
||||
QCOMPARE(uploadInfo._errorCount, 1);
|
||||
QCOMPARE(uploadInfo._transferid, 1);
|
||||
QCOMPARE(uploadInfo._transferid, 1U);
|
||||
|
||||
fakeFolder.syncEngine().journal()->wipeErrorBlacklist();
|
||||
QVERIFY(!fakeFolder.syncOnce());
|
||||
|
||||
uploadInfo = fakeFolder.syncEngine().journal()->getUploadInfo("A/a0");
|
||||
QCOMPARE(uploadInfo._errorCount, 2);
|
||||
QCOMPARE(uploadInfo._transferid, 1);
|
||||
QCOMPARE(uploadInfo._transferid, 1U);
|
||||
|
||||
fakeFolder.syncEngine().journal()->wipeErrorBlacklist();
|
||||
QVERIFY(!fakeFolder.syncOnce());
|
||||
|
||||
uploadInfo = fakeFolder.syncEngine().journal()->getUploadInfo("A/a0");
|
||||
QCOMPARE(uploadInfo._errorCount, 3);
|
||||
QCOMPARE(uploadInfo._transferid, 1);
|
||||
QCOMPARE(uploadInfo._transferid, 1U);
|
||||
|
||||
fakeFolder.syncEngine().journal()->wipeErrorBlacklist();
|
||||
QVERIFY(!fakeFolder.syncOnce());
|
||||
|
||||
uploadInfo = fakeFolder.syncEngine().journal()->getUploadInfo("A/a0");
|
||||
QCOMPARE(uploadInfo._errorCount, 0);
|
||||
QCOMPARE(uploadInfo._transferid, 0);
|
||||
QCOMPARE(uploadInfo._transferid, 0U);
|
||||
QVERIFY(!uploadInfo._valid);
|
||||
}
|
||||
};
|
||||
|
Loading…
Reference in New Issue
Block a user