refactor: organize a little more the code in launcher/net/
This also reduces some code duplication by using some Task logic in NetAction.
This commit is contained in:
parent
649b8ac7c6
commit
8c8eabf7ac
@ -14,27 +14,25 @@
|
||||
*/
|
||||
|
||||
#include "InstanceImportTask.h"
|
||||
#include <QtConcurrentRun>
|
||||
#include "Application.h"
|
||||
#include "BaseInstance.h"
|
||||
#include "FileSystem.h"
|
||||
#include "Application.h"
|
||||
#include "MMCZip.h"
|
||||
#include "NullInstance.h"
|
||||
#include "settings/INISettingsObject.h"
|
||||
#include "icons/IconList.h"
|
||||
#include "icons/IconUtils.h"
|
||||
#include <QtConcurrentRun>
|
||||
#include "settings/INISettingsObject.h"
|
||||
|
||||
// FIXME: this does not belong here, it's Minecraft/Flame specific
|
||||
#include <quazip/quazipdir.h>
|
||||
#include "Json.h"
|
||||
#include "minecraft/MinecraftInstance.h"
|
||||
#include "minecraft/PackProfile.h"
|
||||
#include "modplatform/flame/FileResolvingTask.h"
|
||||
#include "modplatform/flame/PackManifest.h"
|
||||
#include "Json.h"
|
||||
#include <quazip/quazipdir.h>
|
||||
#include "modplatform/technic/TechnicPackProcessor.h"
|
||||
|
||||
#include "icons/IconList.h"
|
||||
#include "Application.h"
|
||||
|
||||
InstanceImportTask::InstanceImportTask(const QUrl sourceUrl)
|
||||
{
|
||||
m_sourceUrl = sourceUrl;
|
||||
|
@ -297,7 +297,7 @@ NetAction::Ptr AssetObject::getDownloadAction()
|
||||
auto rawHash = QByteArray::fromHex(hash.toLatin1());
|
||||
objectDL->addValidator(new Net::ChecksumValidator(QCryptographicHash::Sha1, rawHash));
|
||||
}
|
||||
objectDL->m_total_progress = size;
|
||||
objectDL->setProgress(objectDL->getProgress(), size);
|
||||
return objectDL;
|
||||
}
|
||||
return nullptr;
|
||||
|
@ -3,60 +3,59 @@
|
||||
#include "Sink.h"
|
||||
|
||||
namespace Net {
|
||||
|
||||
/*
|
||||
* Sink object for downloads that uses an external QByteArray it doesn't own as a target.
|
||||
*/
|
||||
class ByteArraySink : public Sink
|
||||
{
|
||||
public:
|
||||
ByteArraySink(QByteArray *output)
|
||||
:m_output(output)
|
||||
{
|
||||
// nil
|
||||
};
|
||||
class ByteArraySink : public Sink {
|
||||
public:
|
||||
ByteArraySink(QByteArray* output) : m_output(output){};
|
||||
|
||||
virtual ~ByteArraySink()
|
||||
{
|
||||
// nil
|
||||
}
|
||||
virtual ~ByteArraySink() = default;
|
||||
|
||||
public:
|
||||
JobStatus init(QNetworkRequest & request) override
|
||||
public:
|
||||
auto init(QNetworkRequest& request) -> Task::State override
|
||||
{
|
||||
if(!m_output)
|
||||
return Task::State::Failed;
|
||||
|
||||
m_output->clear();
|
||||
if(initAllValidators(request))
|
||||
return Job_InProgress;
|
||||
return Job_Failed;
|
||||
if (initAllValidators(request))
|
||||
return Task::State::Running;
|
||||
return Task::State::Failed;
|
||||
};
|
||||
|
||||
JobStatus write(QByteArray & data) override
|
||||
auto write(QByteArray& data) -> Task::State override
|
||||
{
|
||||
if(!m_output)
|
||||
return Task::State::Failed;
|
||||
|
||||
m_output->append(data);
|
||||
if(writeAllValidators(data))
|
||||
return Job_InProgress;
|
||||
return Job_Failed;
|
||||
if (writeAllValidators(data))
|
||||
return Task::State::Running;
|
||||
return Task::State::Failed;
|
||||
}
|
||||
|
||||
JobStatus abort() override
|
||||
auto abort() -> Task::State override
|
||||
{
|
||||
if(!m_output)
|
||||
return Task::State::Failed;
|
||||
|
||||
m_output->clear();
|
||||
failAllValidators();
|
||||
return Job_Failed;
|
||||
return Task::State::Failed;
|
||||
}
|
||||
|
||||
JobStatus finalize(QNetworkReply &reply) override
|
||||
auto finalize(QNetworkReply& reply) -> Task::State override
|
||||
{
|
||||
if(finalizeAllValidators(reply))
|
||||
return Job_Finished;
|
||||
return Job_Failed;
|
||||
if (finalizeAllValidators(reply))
|
||||
return Task::State::Succeeded;
|
||||
return Task::State::Failed;
|
||||
}
|
||||
|
||||
bool hasLocalData() override
|
||||
{
|
||||
return false;
|
||||
}
|
||||
auto hasLocalData() -> bool override { return false; }
|
||||
|
||||
private:
|
||||
QByteArray * m_output;
|
||||
private:
|
||||
QByteArray* m_output;
|
||||
};
|
||||
}
|
||||
} // namespace Net
|
||||
|
@ -30,7 +30,7 @@ namespace Net {
|
||||
|
||||
Download::Download() : NetAction()
|
||||
{
|
||||
m_status = Job_NotStarted;
|
||||
m_state = State::Inactive;
|
||||
}
|
||||
|
||||
Download::Ptr Download::makeCached(QUrl url, MetaEntryPtr entry, Options options)
|
||||
@ -68,29 +68,29 @@ void Download::addValidator(Validator* v)
|
||||
m_sink->addValidator(v);
|
||||
}
|
||||
|
||||
void Download::startImpl()
|
||||
void Download::executeTask()
|
||||
{
|
||||
if (m_status == Job_Aborted) {
|
||||
if (getState() == Task::State::AbortedByUser) {
|
||||
qWarning() << "Attempt to start an aborted Download:" << m_url.toString();
|
||||
emit aborted(m_index_within_job);
|
||||
return;
|
||||
}
|
||||
|
||||
QNetworkRequest request(m_url);
|
||||
m_status = m_sink->init(request);
|
||||
switch (m_status) {
|
||||
case Job_Finished:
|
||||
m_state = m_sink->init(request);
|
||||
switch (m_state) {
|
||||
case State::Succeeded:
|
||||
emit succeeded(m_index_within_job);
|
||||
qDebug() << "Download cache hit " << m_url.toString();
|
||||
return;
|
||||
case Job_InProgress:
|
||||
case State::Running:
|
||||
qDebug() << "Downloading " << m_url.toString();
|
||||
break;
|
||||
case Job_Failed_Proceed: // this is meaningless in this context. We do need a sink.
|
||||
case Job_NotStarted:
|
||||
case Job_Failed:
|
||||
case State::Inactive:
|
||||
case State::Failed:
|
||||
emit failed(m_index_within_job);
|
||||
return;
|
||||
case Job_Aborted:
|
||||
case State::AbortedByUser:
|
||||
return;
|
||||
}
|
||||
|
||||
@ -111,8 +111,7 @@ void Download::startImpl()
|
||||
|
||||
void Download::downloadProgress(qint64 bytesReceived, qint64 bytesTotal)
|
||||
{
|
||||
m_total_progress = bytesTotal;
|
||||
m_progress = bytesReceived;
|
||||
setProgress(bytesReceived, bytesTotal);
|
||||
emit netActionProgress(m_index_within_job, bytesReceived, bytesTotal);
|
||||
}
|
||||
|
||||
@ -120,17 +119,17 @@ void Download::downloadError(QNetworkReply::NetworkError error)
|
||||
{
|
||||
if (error == QNetworkReply::OperationCanceledError) {
|
||||
qCritical() << "Aborted " << m_url.toString();
|
||||
m_status = Job_Aborted;
|
||||
m_state = State::AbortedByUser;
|
||||
} else {
|
||||
if (m_options & Option::AcceptLocalFiles) {
|
||||
if (m_sink->hasLocalData()) {
|
||||
m_status = Job_Failed_Proceed;
|
||||
m_state = State::Succeeded;
|
||||
return;
|
||||
}
|
||||
}
|
||||
// error happened during download.
|
||||
qCritical() << "Failed " << m_url.toString() << " with reason " << error;
|
||||
m_status = Job_Failed;
|
||||
m_state = State::Failed;
|
||||
}
|
||||
}
|
||||
|
||||
@ -194,7 +193,8 @@ bool Download::handleRedirect()
|
||||
|
||||
m_url = QUrl(redirect.toString());
|
||||
qDebug() << "Following redirect to " << m_url.toString();
|
||||
start(m_network);
|
||||
startAction(m_network);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
@ -207,19 +207,20 @@ void Download::downloadFinished()
|
||||
}
|
||||
|
||||
// if the download failed before this point ...
|
||||
if (m_status == Job_Failed_Proceed) {
|
||||
if (m_state == State::Succeeded) // pretend to succeed so we continue processing :)
|
||||
{
|
||||
qDebug() << "Download failed but we are allowed to proceed:" << m_url.toString();
|
||||
m_sink->abort();
|
||||
m_reply.reset();
|
||||
emit succeeded(m_index_within_job);
|
||||
return;
|
||||
} else if (m_status == Job_Failed) {
|
||||
} else if (m_state == State::Failed) {
|
||||
qDebug() << "Download failed in previous step:" << m_url.toString();
|
||||
m_sink->abort();
|
||||
m_reply.reset();
|
||||
emit failed(m_index_within_job);
|
||||
return;
|
||||
} else if (m_status == Job_Aborted) {
|
||||
} else if (m_state == State::AbortedByUser) {
|
||||
qDebug() << "Download aborted in previous step:" << m_url.toString();
|
||||
m_sink->abort();
|
||||
m_reply.reset();
|
||||
@ -231,12 +232,12 @@ void Download::downloadFinished()
|
||||
auto data = m_reply->readAll();
|
||||
if (data.size()) {
|
||||
qDebug() << "Writing extra" << data.size() << "bytes to" << m_target_path;
|
||||
m_status = m_sink->write(data);
|
||||
m_state = m_sink->write(data);
|
||||
}
|
||||
|
||||
// otherwise, finalize the whole graph
|
||||
m_status = m_sink->finalize(*m_reply.get());
|
||||
if (m_status != Job_Finished) {
|
||||
m_state = m_sink->finalize(*m_reply.get());
|
||||
if (m_state != State::Succeeded) {
|
||||
qDebug() << "Download failed to finalize:" << m_url.toString();
|
||||
m_sink->abort();
|
||||
m_reply.reset();
|
||||
@ -250,10 +251,10 @@ void Download::downloadFinished()
|
||||
|
||||
void Download::downloadReadyRead()
|
||||
{
|
||||
if (m_status == Job_InProgress) {
|
||||
if (m_state == State::Running) {
|
||||
auto data = m_reply->readAll();
|
||||
m_status = m_sink->write(data);
|
||||
if (m_status == Job_Failed) {
|
||||
m_state = m_sink->write(data);
|
||||
if (m_state == State::Failed) {
|
||||
qCritical() << "Failed to process response chunk for " << m_target_path;
|
||||
}
|
||||
// qDebug() << "Download" << m_url.toString() << "gained" << data.size() << "bytes";
|
||||
@ -269,12 +270,7 @@ bool Net::Download::abort()
|
||||
if (m_reply) {
|
||||
m_reply->abort();
|
||||
} else {
|
||||
m_status = Job_Aborted;
|
||||
m_state = State::AbortedByUser;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
bool Net::Download::canAbort()
|
||||
{
|
||||
return true;
|
||||
}
|
||||
|
@ -27,7 +27,7 @@ class Download : public NetAction
|
||||
{
|
||||
Q_OBJECT
|
||||
|
||||
public: /* types */
|
||||
public:
|
||||
typedef shared_qobject_ptr<class Download> Ptr;
|
||||
enum class Option
|
||||
{
|
||||
@ -36,7 +36,7 @@ public: /* types */
|
||||
};
|
||||
Q_DECLARE_FLAGS(Options, Option)
|
||||
|
||||
protected: /* con/des */
|
||||
protected:
|
||||
explicit Download();
|
||||
public:
|
||||
virtual ~Download(){};
|
||||
@ -44,16 +44,16 @@ public:
|
||||
static Download::Ptr makeByteArray(QUrl url, QByteArray *output, Options options = Option::NoOptions);
|
||||
static Download::Ptr makeFile(QUrl url, QString path, Options options = Option::NoOptions);
|
||||
|
||||
public: /* methods */
|
||||
public:
|
||||
QString getTargetFilepath()
|
||||
{
|
||||
return m_target_path;
|
||||
}
|
||||
void addValidator(Validator * v);
|
||||
bool abort() override;
|
||||
bool canAbort() override;
|
||||
bool canAbort() const override { return true; };
|
||||
|
||||
private: /* methods */
|
||||
private:
|
||||
bool handleRedirect();
|
||||
|
||||
protected slots:
|
||||
@ -64,9 +64,9 @@ protected slots:
|
||||
void downloadReadyRead() override;
|
||||
|
||||
public slots:
|
||||
void startImpl() override;
|
||||
void executeTask() override;
|
||||
|
||||
private: /* data */
|
||||
private:
|
||||
// FIXME: remove this, it has no business being here.
|
||||
QString m_target_path;
|
||||
std::unique_ptr<Sink> m_sink;
|
||||
|
@ -1,25 +1,15 @@
|
||||
#include "FileSink.h"
|
||||
|
||||
#include <QFile>
|
||||
#include <QFileInfo>
|
||||
|
||||
#include "FileSystem.h"
|
||||
|
||||
namespace Net {
|
||||
|
||||
FileSink::FileSink(QString filename)
|
||||
:m_filename(filename)
|
||||
{
|
||||
// nil
|
||||
}
|
||||
|
||||
FileSink::~FileSink()
|
||||
{
|
||||
// nil
|
||||
}
|
||||
|
||||
JobStatus FileSink::init(QNetworkRequest& request)
|
||||
Task::State FileSink::init(QNetworkRequest& request)
|
||||
{
|
||||
auto result = initCache(request);
|
||||
if(result != Job_InProgress)
|
||||
if(result != Task::State::Running)
|
||||
{
|
||||
return result;
|
||||
}
|
||||
@ -27,27 +17,27 @@ JobStatus FileSink::init(QNetworkRequest& request)
|
||||
if (!FS::ensureFilePathExists(m_filename))
|
||||
{
|
||||
qCritical() << "Could not create folder for " + m_filename;
|
||||
return Job_Failed;
|
||||
return Task::State::Failed;
|
||||
}
|
||||
wroteAnyData = false;
|
||||
m_output_file.reset(new QSaveFile(m_filename));
|
||||
if (!m_output_file->open(QIODevice::WriteOnly))
|
||||
{
|
||||
qCritical() << "Could not open " + m_filename + " for writing";
|
||||
return Job_Failed;
|
||||
return Task::State::Failed;
|
||||
}
|
||||
|
||||
if(initAllValidators(request))
|
||||
return Job_InProgress;
|
||||
return Job_Failed;
|
||||
return Task::State::Running;
|
||||
return Task::State::Failed;
|
||||
}
|
||||
|
||||
JobStatus FileSink::initCache(QNetworkRequest &)
|
||||
Task::State FileSink::initCache(QNetworkRequest &)
|
||||
{
|
||||
return Job_InProgress;
|
||||
return Task::State::Running;
|
||||
}
|
||||
|
||||
JobStatus FileSink::write(QByteArray& data)
|
||||
Task::State FileSink::write(QByteArray& data)
|
||||
{
|
||||
if (!writeAllValidators(data) || m_output_file->write(data) != data.size())
|
||||
{
|
||||
@ -55,20 +45,20 @@ JobStatus FileSink::write(QByteArray& data)
|
||||
m_output_file->cancelWriting();
|
||||
m_output_file.reset();
|
||||
wroteAnyData = false;
|
||||
return Job_Failed;
|
||||
return Task::State::Failed;
|
||||
}
|
||||
wroteAnyData = true;
|
||||
return Job_InProgress;
|
||||
return Task::State::Running;
|
||||
}
|
||||
|
||||
JobStatus FileSink::abort()
|
||||
Task::State FileSink::abort()
|
||||
{
|
||||
m_output_file->cancelWriting();
|
||||
failAllValidators();
|
||||
return Job_Failed;
|
||||
return Task::State::Failed;
|
||||
}
|
||||
|
||||
JobStatus FileSink::finalize(QNetworkReply& reply)
|
||||
Task::State FileSink::finalize(QNetworkReply& reply)
|
||||
{
|
||||
bool gotFile = false;
|
||||
QVariant statusCodeV = reply.attribute(QNetworkRequest::HttpStatusCodeAttribute);
|
||||
@ -86,13 +76,13 @@ JobStatus FileSink::finalize(QNetworkReply& reply)
|
||||
// ask validators for data consistency
|
||||
// we only do this for actual downloads, not 'your data is still the same' cache hits
|
||||
if(!finalizeAllValidators(reply))
|
||||
return Job_Failed;
|
||||
return Task::State::Failed;
|
||||
// nothing went wrong...
|
||||
if (!m_output_file->commit())
|
||||
{
|
||||
qCritical() << "Failed to commit changes to " << m_filename;
|
||||
m_output_file->cancelWriting();
|
||||
return Job_Failed;
|
||||
return Task::State::Failed;
|
||||
}
|
||||
}
|
||||
// then get rid of the save file
|
||||
@ -101,9 +91,9 @@ JobStatus FileSink::finalize(QNetworkReply& reply)
|
||||
return finalizeCache(reply);
|
||||
}
|
||||
|
||||
JobStatus FileSink::finalizeCache(QNetworkReply &)
|
||||
Task::State FileSink::finalizeCache(QNetworkReply &)
|
||||
{
|
||||
return Job_Finished;
|
||||
return Task::State::Succeeded;
|
||||
}
|
||||
|
||||
bool FileSink::hasLocalData()
|
||||
|
@ -1,28 +1,30 @@
|
||||
#pragma once
|
||||
#include "Sink.h"
|
||||
|
||||
#include <QSaveFile>
|
||||
|
||||
#include "Sink.h"
|
||||
|
||||
namespace Net {
|
||||
class FileSink : public Sink
|
||||
{
|
||||
public: /* con/des */
|
||||
FileSink(QString filename);
|
||||
virtual ~FileSink();
|
||||
class FileSink : public Sink {
|
||||
public:
|
||||
FileSink(QString filename) : m_filename(filename){};
|
||||
virtual ~FileSink() = default;
|
||||
|
||||
public: /* methods */
|
||||
JobStatus init(QNetworkRequest & request) override;
|
||||
JobStatus write(QByteArray & data) override;
|
||||
JobStatus abort() override;
|
||||
JobStatus finalize(QNetworkReply & reply) override;
|
||||
bool hasLocalData() override;
|
||||
public:
|
||||
auto init(QNetworkRequest& request) -> Task::State override;
|
||||
auto write(QByteArray& data) -> Task::State override;
|
||||
auto abort() -> Task::State override;
|
||||
auto finalize(QNetworkReply& reply) -> Task::State override;
|
||||
|
||||
protected: /* methods */
|
||||
virtual JobStatus initCache(QNetworkRequest &);
|
||||
virtual JobStatus finalizeCache(QNetworkReply &reply);
|
||||
auto hasLocalData() -> bool override;
|
||||
|
||||
protected: /* data */
|
||||
protected:
|
||||
virtual auto initCache(QNetworkRequest&) -> Task::State;
|
||||
virtual auto finalizeCache(QNetworkReply& reply) -> Task::State;
|
||||
|
||||
protected:
|
||||
QString m_filename;
|
||||
bool wroteAnyData = false;
|
||||
std::unique_ptr<QSaveFile> m_output_file;
|
||||
};
|
||||
}
|
||||
} // namespace Net
|
||||
|
@ -12,17 +12,13 @@ MetaCacheSink::MetaCacheSink(MetaEntryPtr entry, ChecksumValidator * md5sum)
|
||||
addValidator(md5sum);
|
||||
}
|
||||
|
||||
MetaCacheSink::~MetaCacheSink()
|
||||
{
|
||||
// nil
|
||||
}
|
||||
|
||||
JobStatus MetaCacheSink::initCache(QNetworkRequest& request)
|
||||
Task::State MetaCacheSink::initCache(QNetworkRequest& request)
|
||||
{
|
||||
if (!m_entry->isStale())
|
||||
{
|
||||
return Job_Finished;
|
||||
return Task::State::Succeeded;
|
||||
}
|
||||
|
||||
// check if file exists, if it does, use its information for the request
|
||||
QFile current(m_filename);
|
||||
if(current.exists() && current.size() != 0)
|
||||
@ -36,25 +32,31 @@ JobStatus MetaCacheSink::initCache(QNetworkRequest& request)
|
||||
request.setRawHeader(QString("If-None-Match").toLatin1(), m_entry->getETag().toLatin1());
|
||||
}
|
||||
}
|
||||
return Job_InProgress;
|
||||
|
||||
return Task::State::Running;
|
||||
}
|
||||
|
||||
JobStatus MetaCacheSink::finalizeCache(QNetworkReply & reply)
|
||||
Task::State MetaCacheSink::finalizeCache(QNetworkReply & reply)
|
||||
{
|
||||
QFileInfo output_file_info(m_filename);
|
||||
|
||||
if(wroteAnyData)
|
||||
{
|
||||
m_entry->setMD5Sum(m_md5Node->hash().toHex().constData());
|
||||
}
|
||||
|
||||
m_entry->setETag(reply.rawHeader("ETag").constData());
|
||||
|
||||
if (reply.hasRawHeader("Last-Modified"))
|
||||
{
|
||||
m_entry->setRemoteChangedTimestamp(reply.rawHeader("Last-Modified").constData());
|
||||
}
|
||||
|
||||
m_entry->setLocalChangedTimestamp(output_file_info.lastModified().toUTC().toMSecsSinceEpoch());
|
||||
m_entry->setStale(false);
|
||||
APPLICATION->metacache()->updateEntry(m_entry);
|
||||
return Job_Finished;
|
||||
|
||||
return Task::State::Succeeded;
|
||||
}
|
||||
|
||||
bool MetaCacheSink::hasLocalData()
|
||||
|
@ -1,22 +1,23 @@
|
||||
#pragma once
|
||||
#include "FileSink.h"
|
||||
|
||||
#include "ChecksumValidator.h"
|
||||
#include "FileSink.h"
|
||||
#include "net/HttpMetaCache.h"
|
||||
|
||||
namespace Net {
|
||||
class MetaCacheSink : public FileSink
|
||||
{
|
||||
public: /* con/des */
|
||||
MetaCacheSink(MetaEntryPtr entry, ChecksumValidator * md5sum);
|
||||
virtual ~MetaCacheSink();
|
||||
bool hasLocalData() override;
|
||||
class MetaCacheSink : public FileSink {
|
||||
public:
|
||||
MetaCacheSink(MetaEntryPtr entry, ChecksumValidator* md5sum);
|
||||
virtual ~MetaCacheSink() = default;
|
||||
|
||||
protected: /* methods */
|
||||
JobStatus initCache(QNetworkRequest & request) override;
|
||||
JobStatus finalizeCache(QNetworkReply & reply) override;
|
||||
auto hasLocalData() -> bool override;
|
||||
|
||||
private: /* data */
|
||||
protected:
|
||||
auto initCache(QNetworkRequest& request) -> Task::State override;
|
||||
auto finalizeCache(QNetworkReply& reply) -> Task::State override;
|
||||
|
||||
private:
|
||||
MetaEntryPtr m_entry;
|
||||
ChecksumValidator * m_md5Node;
|
||||
ChecksumValidator* m_md5Node;
|
||||
};
|
||||
}
|
||||
} // namespace Net
|
||||
|
@ -1,4 +1,23 @@
|
||||
/* Copyright 2013-2021 MultiMC Contributors
|
||||
// SPDX-License-Identifier: GPL-3.0-only
|
||||
/*
|
||||
* PolyMC - Minecraft Launcher
|
||||
*
|
||||
* This program is free software: you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License as published by
|
||||
* the Free Software Foundation, version 3.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program. If not, see <https://www.gnu.org/licenses/>.
|
||||
*
|
||||
* This file incorporates work covered by the following copyright and
|
||||
* permission notice:
|
||||
*
|
||||
* Copyright 2013-2021 MultiMC Contributors
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
@ -15,94 +34,48 @@
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <QObject>
|
||||
#include <QUrl>
|
||||
#include <memory>
|
||||
#include <QNetworkReply>
|
||||
#include <QObjectPtr.h>
|
||||
#include <QUrl>
|
||||
|
||||
enum JobStatus
|
||||
{
|
||||
Job_NotStarted,
|
||||
Job_InProgress,
|
||||
Job_Finished,
|
||||
Job_Failed,
|
||||
Job_Aborted,
|
||||
/*
|
||||
* FIXME: @NUKE this confuses the task failing with us having a fallback in the form of local data. Clear up the confusion.
|
||||
* Same could be true for aborted task - the presence of pre-existing result is a separate concern
|
||||
*/
|
||||
Job_Failed_Proceed
|
||||
};
|
||||
#include "QObjectPtr.h"
|
||||
#include "tasks/Task.h"
|
||||
|
||||
class NetAction : public QObject
|
||||
{
|
||||
class NetAction : public Task {
|
||||
Q_OBJECT
|
||||
protected:
|
||||
explicit NetAction() : QObject(nullptr) {};
|
||||
protected:
|
||||
explicit NetAction() : Task(nullptr) {};
|
||||
|
||||
public:
|
||||
public:
|
||||
using Ptr = shared_qobject_ptr<NetAction>;
|
||||
|
||||
virtual ~NetAction() {};
|
||||
virtual ~NetAction() = default;
|
||||
|
||||
bool isRunning() const
|
||||
{
|
||||
return m_status == Job_InProgress;
|
||||
}
|
||||
bool isFinished() const
|
||||
{
|
||||
return m_status >= Job_Finished;
|
||||
}
|
||||
bool wasSuccessful() const
|
||||
{
|
||||
return m_status == Job_Finished || m_status == Job_Failed_Proceed;
|
||||
}
|
||||
QUrl url() { return m_url; }
|
||||
|
||||
qint64 totalProgress() const
|
||||
{
|
||||
return m_total_progress;
|
||||
}
|
||||
qint64 currentProgress() const
|
||||
{
|
||||
return m_progress;
|
||||
}
|
||||
virtual bool abort()
|
||||
{
|
||||
return false;
|
||||
}
|
||||
virtual bool canAbort()
|
||||
{
|
||||
return false;
|
||||
}
|
||||
QUrl url()
|
||||
{
|
||||
return m_url;
|
||||
}
|
||||
|
||||
signals:
|
||||
signals:
|
||||
void started(int index);
|
||||
void netActionProgress(int index, qint64 current, qint64 total);
|
||||
void succeeded(int index);
|
||||
void failed(int index);
|
||||
void aborted(int index);
|
||||
|
||||
protected slots:
|
||||
protected slots:
|
||||
virtual void downloadProgress(qint64 bytesReceived, qint64 bytesTotal) = 0;
|
||||
virtual void downloadError(QNetworkReply::NetworkError error) = 0;
|
||||
virtual void downloadFinished() = 0;
|
||||
virtual void downloadReadyRead() = 0;
|
||||
|
||||
public slots:
|
||||
void start(shared_qobject_ptr<QNetworkAccessManager> network) {
|
||||
public slots:
|
||||
void startAction(shared_qobject_ptr<QNetworkAccessManager> network)
|
||||
{
|
||||
m_network = network;
|
||||
startImpl();
|
||||
executeTask();
|
||||
}
|
||||
|
||||
protected:
|
||||
virtual void startImpl() = 0;
|
||||
protected:
|
||||
void executeTask() override {};
|
||||
|
||||
public:
|
||||
public:
|
||||
shared_qobject_ptr<QNetworkAccessManager> m_network;
|
||||
|
||||
/// index within the parent job, FIXME: nuke
|
||||
@ -113,10 +86,4 @@ public:
|
||||
|
||||
/// source URL
|
||||
QUrl m_url;
|
||||
|
||||
qint64 m_progress = 0;
|
||||
qint64 m_total_progress = 1;
|
||||
|
||||
protected:
|
||||
JobStatus m_status = Job_NotStarted;
|
||||
};
|
||||
|
@ -1,4 +1,23 @@
|
||||
/* Copyright 2013-2021 MultiMC Contributors
|
||||
// SPDX-License-Identifier: GPL-3.0-only
|
||||
/*
|
||||
* PolyMC - Minecraft Launcher
|
||||
*
|
||||
* This program is free software: you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License as published by
|
||||
* the Free Software Foundation, version 3.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program. If not, see <https://www.gnu.org/licenses/>.
|
||||
*
|
||||
* This file incorporates work covered by the following copyright and
|
||||
* permission notice:
|
||||
*
|
||||
* Copyright 2013-2021 MultiMC Contributors
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
@ -16,64 +35,136 @@
|
||||
#include "NetJob.h"
|
||||
#include "Download.h"
|
||||
|
||||
#include <QDebug>
|
||||
auto NetJob::addNetAction(NetAction::Ptr action) -> bool
|
||||
{
|
||||
action->m_index_within_job = m_downloads.size();
|
||||
m_downloads.append(action);
|
||||
part_info pi;
|
||||
m_parts_progress.append(pi);
|
||||
|
||||
partProgress(m_parts_progress.count() - 1, action->getProgress(), action->getTotalProgress());
|
||||
|
||||
if (action->isRunning()) {
|
||||
connect(action.get(), &NetAction::succeeded, this, &NetJob::partSucceeded);
|
||||
connect(action.get(), &NetAction::failed, this, &NetJob::partFailed);
|
||||
connect(action.get(), &NetAction::netActionProgress, this, &NetJob::partProgress);
|
||||
} else {
|
||||
m_todo.append(m_parts_progress.size() - 1);
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
auto NetJob::canAbort() const -> bool
|
||||
{
|
||||
bool canFullyAbort = true;
|
||||
|
||||
// can abort the downloads on the queue?
|
||||
for (auto index : m_todo) {
|
||||
auto part = m_downloads[index];
|
||||
canFullyAbort &= part->canAbort();
|
||||
}
|
||||
// can abort the active downloads?
|
||||
for (auto index : m_doing) {
|
||||
auto part = m_downloads[index];
|
||||
canFullyAbort &= part->canAbort();
|
||||
}
|
||||
|
||||
return canFullyAbort;
|
||||
}
|
||||
|
||||
void NetJob::executeTask()
|
||||
{
|
||||
// hack that delays early failures so they can be caught easier
|
||||
QMetaObject::invokeMethod(this, "startMoreParts", Qt::QueuedConnection);
|
||||
}
|
||||
|
||||
auto NetJob::getFailedFiles() -> QStringList
|
||||
{
|
||||
QStringList failed;
|
||||
for (auto index : m_failed) {
|
||||
failed.push_back(m_downloads[index]->url().toString());
|
||||
}
|
||||
failed.sort();
|
||||
return failed;
|
||||
}
|
||||
|
||||
auto NetJob::abort() -> bool
|
||||
{
|
||||
bool fullyAborted = true;
|
||||
|
||||
// fail all downloads on the queue
|
||||
m_failed.unite(m_todo.toSet());
|
||||
m_todo.clear();
|
||||
|
||||
// abort active downloads
|
||||
auto toKill = m_doing.toList();
|
||||
for (auto index : toKill) {
|
||||
auto part = m_downloads[index];
|
||||
fullyAborted &= part->abort();
|
||||
}
|
||||
|
||||
return fullyAborted;
|
||||
}
|
||||
|
||||
void NetJob::partSucceeded(int index)
|
||||
{
|
||||
// do progress. all slots are 1 in size at least
|
||||
auto &slot = parts_progress[index];
|
||||
auto& slot = m_parts_progress[index];
|
||||
partProgress(index, slot.total_progress, slot.total_progress);
|
||||
|
||||
m_doing.remove(index);
|
||||
m_done.insert(index);
|
||||
downloads[index].get()->disconnect(this);
|
||||
m_downloads[index].get()->disconnect(this);
|
||||
|
||||
startMoreParts();
|
||||
}
|
||||
|
||||
void NetJob::partFailed(int index)
|
||||
{
|
||||
m_doing.remove(index);
|
||||
auto &slot = parts_progress[index];
|
||||
if (slot.failures == 3)
|
||||
{
|
||||
|
||||
auto& slot = m_parts_progress[index];
|
||||
// Can try 3 times before failing by definitive
|
||||
if (slot.failures == 3) {
|
||||
m_failed.insert(index);
|
||||
}
|
||||
else
|
||||
{
|
||||
} else {
|
||||
slot.failures++;
|
||||
m_todo.enqueue(index);
|
||||
}
|
||||
downloads[index].get()->disconnect(this);
|
||||
|
||||
m_downloads[index].get()->disconnect(this);
|
||||
|
||||
startMoreParts();
|
||||
}
|
||||
|
||||
void NetJob::partAborted(int index)
|
||||
{
|
||||
m_aborted = true;
|
||||
|
||||
m_doing.remove(index);
|
||||
m_failed.insert(index);
|
||||
downloads[index].get()->disconnect(this);
|
||||
m_downloads[index].get()->disconnect(this);
|
||||
|
||||
startMoreParts();
|
||||
}
|
||||
|
||||
void NetJob::partProgress(int index, qint64 bytesReceived, qint64 bytesTotal)
|
||||
{
|
||||
auto &slot = parts_progress[index];
|
||||
auto& slot = m_parts_progress[index];
|
||||
slot.current_progress = bytesReceived;
|
||||
slot.total_progress = bytesTotal;
|
||||
|
||||
int done = m_done.size();
|
||||
int doing = m_doing.size();
|
||||
int all = parts_progress.size();
|
||||
int all = m_parts_progress.size();
|
||||
|
||||
qint64 bytesAll = 0;
|
||||
qint64 bytesTotalAll = 0;
|
||||
for(auto & partIdx: m_doing)
|
||||
{
|
||||
auto part = parts_progress[partIdx];
|
||||
for (auto& partIdx : m_doing) {
|
||||
auto part = m_parts_progress[partIdx];
|
||||
// do not count parts with unknown/nonsensical total size
|
||||
if(part.total_progress <= 0)
|
||||
{
|
||||
if (part.total_progress <= 0) {
|
||||
continue;
|
||||
}
|
||||
bytesAll += part.current_progress;
|
||||
@ -85,134 +176,53 @@ void NetJob::partProgress(int index, qint64 bytesReceived, qint64 bytesTotal)
|
||||
auto current_total = all * 1000;
|
||||
// HACK: make sure it never jumps backwards.
|
||||
// FAIL: This breaks if the size is not known (or is it something else?) and jumps to 1000, so if it is 1000 reset it to inprogress
|
||||
if(m_current_progress == 1000) {
|
||||
if (m_current_progress == 1000) {
|
||||
m_current_progress = inprogress;
|
||||
}
|
||||
if(m_current_progress > current)
|
||||
{
|
||||
if (m_current_progress > current) {
|
||||
current = m_current_progress;
|
||||
}
|
||||
m_current_progress = current;
|
||||
setProgress(current, current_total);
|
||||
}
|
||||
|
||||
void NetJob::executeTask()
|
||||
{
|
||||
// hack that delays early failures so they can be caught easier
|
||||
QMetaObject::invokeMethod(this, "startMoreParts", Qt::QueuedConnection);
|
||||
}
|
||||
|
||||
void NetJob::startMoreParts()
|
||||
{
|
||||
if(!isRunning())
|
||||
{
|
||||
// this actually makes sense. You can put running downloads into a NetJob and then not start it until much later.
|
||||
if (!isRunning()) {
|
||||
// this actually makes sense. You can put running m_downloads into a NetJob and then not start it until much later.
|
||||
return;
|
||||
}
|
||||
|
||||
// OK. We are actively processing tasks, proceed.
|
||||
// Check for final conditions if there's nothing in the queue.
|
||||
if(!m_todo.size())
|
||||
{
|
||||
if(!m_doing.size())
|
||||
{
|
||||
if(!m_failed.size())
|
||||
{
|
||||
if (!m_todo.size()) {
|
||||
if (!m_doing.size()) {
|
||||
if (!m_failed.size()) {
|
||||
emitSucceeded();
|
||||
}
|
||||
else if(m_aborted)
|
||||
{
|
||||
} else if (m_aborted) {
|
||||
emitAborted();
|
||||
}
|
||||
else
|
||||
{
|
||||
} else {
|
||||
emitFailed(tr("Job '%1' failed to process:\n%2").arg(objectName()).arg(getFailedFiles().join("\n")));
|
||||
}
|
||||
}
|
||||
return;
|
||||
}
|
||||
// There's work to do, try to start more parts.
|
||||
while (m_doing.size() < 6)
|
||||
{
|
||||
if(!m_todo.size())
|
||||
|
||||
// There's work to do, try to start more parts, to a maximum of 6 concurrent ones.
|
||||
while (m_doing.size() < 6) {
|
||||
if (m_todo.size() == 0)
|
||||
return;
|
||||
int doThis = m_todo.dequeue();
|
||||
m_doing.insert(doThis);
|
||||
auto part = downloads[doThis];
|
||||
|
||||
auto part = m_downloads[doThis];
|
||||
|
||||
// connect signals :D
|
||||
connect(part.get(), SIGNAL(succeeded(int)), SLOT(partSucceeded(int)));
|
||||
connect(part.get(), SIGNAL(failed(int)), SLOT(partFailed(int)));
|
||||
connect(part.get(), SIGNAL(aborted(int)), SLOT(partAborted(int)));
|
||||
connect(part.get(), SIGNAL(netActionProgress(int, qint64, qint64)),
|
||||
SLOT(partProgress(int, qint64, qint64)));
|
||||
part->start(m_network);
|
||||
connect(part.get(), &NetAction::succeeded, this, &NetJob::partSucceeded);
|
||||
connect(part.get(), &NetAction::failed, this, &NetJob::partFailed);
|
||||
connect(part.get(), &NetAction::aborted, this, &NetJob::partAborted);
|
||||
connect(part.get(), &NetAction::netActionProgress, this, &NetJob::partProgress);
|
||||
|
||||
part->startAction(m_network);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
QStringList NetJob::getFailedFiles()
|
||||
{
|
||||
QStringList failed;
|
||||
for (auto index: m_failed)
|
||||
{
|
||||
failed.push_back(downloads[index]->url().toString());
|
||||
}
|
||||
failed.sort();
|
||||
return failed;
|
||||
}
|
||||
|
||||
bool NetJob::canAbort() const
|
||||
{
|
||||
bool canFullyAbort = true;
|
||||
// can abort the waiting?
|
||||
for(auto index: m_todo)
|
||||
{
|
||||
auto part = downloads[index];
|
||||
canFullyAbort &= part->canAbort();
|
||||
}
|
||||
// can abort the active?
|
||||
for(auto index: m_doing)
|
||||
{
|
||||
auto part = downloads[index];
|
||||
canFullyAbort &= part->canAbort();
|
||||
}
|
||||
return canFullyAbort;
|
||||
}
|
||||
|
||||
bool NetJob::abort()
|
||||
{
|
||||
bool fullyAborted = true;
|
||||
// fail all waiting
|
||||
m_failed.unite(m_todo.toSet());
|
||||
m_todo.clear();
|
||||
// abort active
|
||||
auto toKill = m_doing.toList();
|
||||
for(auto index: toKill)
|
||||
{
|
||||
auto part = downloads[index];
|
||||
fullyAborted &= part->abort();
|
||||
}
|
||||
return fullyAborted;
|
||||
}
|
||||
|
||||
bool NetJob::addNetAction(NetAction::Ptr action)
|
||||
{
|
||||
action->m_index_within_job = downloads.size();
|
||||
downloads.append(action);
|
||||
part_info pi;
|
||||
parts_progress.append(pi);
|
||||
partProgress(parts_progress.count() - 1, action->currentProgress(), action->totalProgress());
|
||||
|
||||
if(action->isRunning())
|
||||
{
|
||||
connect(action.get(), SIGNAL(succeeded(int)), SLOT(partSucceeded(int)));
|
||||
connect(action.get(), SIGNAL(failed(int)), SLOT(partFailed(int)));
|
||||
connect(action.get(), SIGNAL(netActionProgress(int, qint64, qint64)), SLOT(partProgress(int, qint64, qint64)));
|
||||
}
|
||||
else
|
||||
{
|
||||
m_todo.append(parts_progress.size() - 1);
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
NetJob::~NetJob() = default;
|
||||
|
@ -1,4 +1,23 @@
|
||||
/* Copyright 2013-2021 MultiMC Contributors
|
||||
// SPDX-License-Identifier: GPL-3.0-only
|
||||
/*
|
||||
* PolyMC - Minecraft Launcher
|
||||
*
|
||||
* This program is free software: you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License as published by
|
||||
* the Free Software Foundation, version 3.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program. If not, see <https://www.gnu.org/licenses/>.
|
||||
*
|
||||
* This file incorporates work covered by the following copyright and
|
||||
* permission notice:
|
||||
*
|
||||
* Copyright 2013-2021 MultiMC Contributors
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
@ -14,75 +33,65 @@
|
||||
*/
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <QtNetwork>
|
||||
|
||||
#include <QObject>
|
||||
#include "NetAction.h"
|
||||
#include "Download.h"
|
||||
#include "HttpMetaCache.h"
|
||||
#include "tasks/Task.h"
|
||||
#include "QObjectPtr.h"
|
||||
|
||||
class NetJob;
|
||||
// Those are included so that they are also included by anyone using NetJob
|
||||
#include "net/Download.h"
|
||||
#include "net/HttpMetaCache.h"
|
||||
|
||||
class NetJob : public Task
|
||||
{
|
||||
class NetJob : public Task {
|
||||
Q_OBJECT
|
||||
public:
|
||||
|
||||
public:
|
||||
using Ptr = shared_qobject_ptr<NetJob>;
|
||||
|
||||
explicit NetJob(QString job_name, shared_qobject_ptr<QNetworkAccessManager> network) : Task(), m_network(network)
|
||||
{
|
||||
setObjectName(job_name);
|
||||
}
|
||||
virtual ~NetJob();
|
||||
virtual ~NetJob() = default;
|
||||
|
||||
bool addNetAction(NetAction::Ptr action);
|
||||
void executeTask() override;
|
||||
|
||||
NetAction::Ptr operator[](int index)
|
||||
{
|
||||
return downloads[index];
|
||||
}
|
||||
const NetAction::Ptr at(const int index)
|
||||
{
|
||||
return downloads.at(index);
|
||||
}
|
||||
NetAction::Ptr first()
|
||||
{
|
||||
if (downloads.size())
|
||||
return downloads[0];
|
||||
return NetAction::Ptr();
|
||||
}
|
||||
int size() const
|
||||
{
|
||||
return downloads.size();
|
||||
}
|
||||
QStringList getFailedFiles();
|
||||
auto canAbort() const -> bool override;
|
||||
|
||||
bool canAbort() const override;
|
||||
auto addNetAction(NetAction::Ptr action) -> bool;
|
||||
|
||||
private slots:
|
||||
auto operator[](int index) -> NetAction::Ptr { return m_downloads[index]; }
|
||||
auto at(int index) -> const NetAction::Ptr { return m_downloads.at(index); }
|
||||
auto size() const -> int { return m_downloads.size(); }
|
||||
auto first() -> NetAction::Ptr { return m_downloads.size() != 0 ? m_downloads[0] : NetAction::Ptr{}; }
|
||||
|
||||
auto getFailedFiles() -> QStringList;
|
||||
|
||||
public slots:
|
||||
// Qt can't handle auto at the start for some reason?
|
||||
bool abort() override;
|
||||
|
||||
private slots:
|
||||
void startMoreParts();
|
||||
|
||||
public slots:
|
||||
virtual void executeTask() override;
|
||||
virtual bool abort() override;
|
||||
|
||||
private slots:
|
||||
void partProgress(int index, qint64 bytesReceived, qint64 bytesTotal);
|
||||
void partSucceeded(int index);
|
||||
void partFailed(int index);
|
||||
void partAborted(int index);
|
||||
|
||||
private:
|
||||
private:
|
||||
shared_qobject_ptr<QNetworkAccessManager> m_network;
|
||||
|
||||
struct part_info
|
||||
{
|
||||
struct part_info {
|
||||
qint64 current_progress = 0;
|
||||
qint64 total_progress = 1;
|
||||
int failures = 0;
|
||||
};
|
||||
QList<NetAction::Ptr> downloads;
|
||||
QList<part_info> parts_progress;
|
||||
|
||||
QList<NetAction::Ptr> m_downloads;
|
||||
QList<part_info> m_parts_progress;
|
||||
QQueue<int> m_todo;
|
||||
QSet<int> m_doing;
|
||||
QSet<int> m_done;
|
||||
|
@ -5,33 +5,30 @@
|
||||
#include "Validator.h"
|
||||
|
||||
namespace Net {
|
||||
class Sink
|
||||
{
|
||||
public: /* con/des */
|
||||
Sink() {};
|
||||
virtual ~Sink() {};
|
||||
class Sink {
|
||||
public:
|
||||
Sink() = default;
|
||||
virtual ~Sink(){};
|
||||
|
||||
public: /* methods */
|
||||
virtual JobStatus init(QNetworkRequest & request) = 0;
|
||||
virtual JobStatus write(QByteArray & data) = 0;
|
||||
virtual JobStatus abort() = 0;
|
||||
virtual JobStatus finalize(QNetworkReply & reply) = 0;
|
||||
public:
|
||||
virtual Task::State init(QNetworkRequest& request) = 0;
|
||||
virtual Task::State write(QByteArray& data) = 0;
|
||||
virtual Task::State abort() = 0;
|
||||
virtual Task::State finalize(QNetworkReply& reply) = 0;
|
||||
virtual bool hasLocalData() = 0;
|
||||
|
||||
void addValidator(Validator * validator)
|
||||
{
|
||||
if(validator)
|
||||
void addValidator(Validator* validator)
|
||||
{
|
||||
if (validator) {
|
||||
validators.push_back(std::shared_ptr<Validator>(validator));
|
||||
}
|
||||
}
|
||||
|
||||
protected: /* methods */
|
||||
bool finalizeAllValidators(QNetworkReply & reply)
|
||||
protected: /* methods */
|
||||
bool finalizeAllValidators(QNetworkReply& reply)
|
||||
{
|
||||
for(auto & validator: validators)
|
||||
{
|
||||
if(!validator->validate(reply))
|
||||
for (auto& validator : validators) {
|
||||
if (!validator->validate(reply))
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
@ -39,32 +36,29 @@ protected: /* methods */
|
||||
bool failAllValidators()
|
||||
{
|
||||
bool success = true;
|
||||
for(auto & validator: validators)
|
||||
{
|
||||
for (auto& validator : validators) {
|
||||
success &= validator->abort();
|
||||
}
|
||||
return success;
|
||||
}
|
||||
bool initAllValidators(QNetworkRequest & request)
|
||||
bool initAllValidators(QNetworkRequest& request)
|
||||
{
|
||||
for(auto & validator: validators)
|
||||
{
|
||||
if(!validator->init(request))
|
||||
for (auto& validator : validators) {
|
||||
if (!validator->init(request))
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
bool writeAllValidators(QByteArray & data)
|
||||
bool writeAllValidators(QByteArray& data)
|
||||
{
|
||||
for(auto & validator: validators)
|
||||
{
|
||||
if(!validator->write(data))
|
||||
for (auto& validator : validators) {
|
||||
if (!validator->write(data))
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
protected: /* data */
|
||||
protected: /* data */
|
||||
std::vector<std::shared_ptr<Validator>> validators;
|
||||
};
|
||||
}
|
||||
} // namespace Net
|
||||
|
@ -13,12 +13,12 @@
|
||||
ImgurAlbumCreation::ImgurAlbumCreation(QList<ScreenShot::Ptr> screenshots) : NetAction(), m_screenshots(screenshots)
|
||||
{
|
||||
m_url = BuildConfig.IMGUR_BASE_URL + "album.json";
|
||||
m_status = Job_NotStarted;
|
||||
m_state = State::Inactive;
|
||||
}
|
||||
|
||||
void ImgurAlbumCreation::startImpl()
|
||||
void ImgurAlbumCreation::executeTask()
|
||||
{
|
||||
m_status = Job_InProgress;
|
||||
m_state = State::Running;
|
||||
QNetworkRequest request(m_url);
|
||||
request.setHeader(QNetworkRequest::UserAgentHeader, BuildConfig.USER_AGENT_UNCACHED);
|
||||
request.setHeader(QNetworkRequest::ContentTypeHeader, "application/x-www-form-urlencoded");
|
||||
@ -43,11 +43,11 @@ void ImgurAlbumCreation::startImpl()
|
||||
void ImgurAlbumCreation::downloadError(QNetworkReply::NetworkError error)
|
||||
{
|
||||
qDebug() << m_reply->errorString();
|
||||
m_status = Job_Failed;
|
||||
m_state = State::Failed;
|
||||
}
|
||||
void ImgurAlbumCreation::downloadFinished()
|
||||
{
|
||||
if (m_status != Job_Failed)
|
||||
if (m_state != State::Failed)
|
||||
{
|
||||
QByteArray data = m_reply->readAll();
|
||||
m_reply.reset();
|
||||
@ -68,7 +68,7 @@ void ImgurAlbumCreation::downloadFinished()
|
||||
}
|
||||
m_deleteHash = object.value("data").toObject().value("deletehash").toString();
|
||||
m_id = object.value("data").toObject().value("id").toString();
|
||||
m_status = Job_Finished;
|
||||
m_state = State::Succeeded;
|
||||
emit succeeded(m_index_within_job);
|
||||
return;
|
||||
}
|
||||
@ -82,7 +82,6 @@ void ImgurAlbumCreation::downloadFinished()
|
||||
}
|
||||
void ImgurAlbumCreation::downloadProgress(qint64 bytesReceived, qint64 bytesTotal)
|
||||
{
|
||||
m_total_progress = bytesTotal;
|
||||
m_progress = bytesReceived;
|
||||
setProgress(bytesReceived, bytesTotal);
|
||||
emit netActionProgress(m_index_within_job, bytesReceived, bytesTotal);
|
||||
}
|
||||
|
@ -24,16 +24,14 @@ public:
|
||||
|
||||
protected
|
||||
slots:
|
||||
virtual void downloadProgress(qint64 bytesReceived, qint64 bytesTotal);
|
||||
virtual void downloadError(QNetworkReply::NetworkError error);
|
||||
virtual void downloadFinished();
|
||||
virtual void downloadReadyRead()
|
||||
{
|
||||
}
|
||||
void downloadProgress(qint64 bytesReceived, qint64 bytesTotal) override;
|
||||
void downloadError(QNetworkReply::NetworkError error) override;
|
||||
void downloadFinished() override;
|
||||
void downloadReadyRead() override {}
|
||||
|
||||
public
|
||||
slots:
|
||||
virtual void startImpl();
|
||||
void executeTask() override;
|
||||
|
||||
private:
|
||||
QList<ScreenShot::Ptr> m_screenshots;
|
||||
|
@ -13,13 +13,13 @@
|
||||
ImgurUpload::ImgurUpload(ScreenShot::Ptr shot) : NetAction(), m_shot(shot)
|
||||
{
|
||||
m_url = BuildConfig.IMGUR_BASE_URL + "upload.json";
|
||||
m_status = Job_NotStarted;
|
||||
m_state = State::Inactive;
|
||||
}
|
||||
|
||||
void ImgurUpload::startImpl()
|
||||
void ImgurUpload::executeTask()
|
||||
{
|
||||
finished = false;
|
||||
m_status = Job_InProgress;
|
||||
m_state = Task::State::Running;
|
||||
QNetworkRequest request(m_url);
|
||||
request.setHeader(QNetworkRequest::UserAgentHeader, BuildConfig.USER_AGENT_UNCACHED);
|
||||
request.setRawHeader("Authorization", QString("Client-ID %1").arg(BuildConfig.IMGUR_CLIENT_ID).toStdString().c_str());
|
||||
@ -63,7 +63,7 @@ void ImgurUpload::downloadError(QNetworkReply::NetworkError error)
|
||||
qCritical() << "Double finished ImgurUpload!";
|
||||
return;
|
||||
}
|
||||
m_status = Job_Failed;
|
||||
m_state = Task::State::Failed;
|
||||
finished = true;
|
||||
m_reply.reset();
|
||||
emit failed(m_index_within_job);
|
||||
@ -99,14 +99,13 @@ void ImgurUpload::downloadFinished()
|
||||
m_shot->m_imgurId = object.value("data").toObject().value("id").toString();
|
||||
m_shot->m_url = object.value("data").toObject().value("link").toString();
|
||||
m_shot->m_imgurDeleteHash = object.value("data").toObject().value("deletehash").toString();
|
||||
m_status = Job_Finished;
|
||||
m_state = Task::State::Succeeded;
|
||||
finished = true;
|
||||
emit succeeded(m_index_within_job);
|
||||
return;
|
||||
}
|
||||
void ImgurUpload::downloadProgress(qint64 bytesReceived, qint64 bytesTotal)
|
||||
{
|
||||
m_total_progress = bytesTotal;
|
||||
m_progress = bytesReceived;
|
||||
setProgress(bytesReceived, bytesTotal);
|
||||
emit netActionProgress(m_index_within_job, bytesReceived, bytesTotal);
|
||||
}
|
||||
|
@ -21,7 +21,7 @@ slots:
|
||||
|
||||
public
|
||||
slots:
|
||||
void startImpl() override;
|
||||
void executeTask() override;
|
||||
|
||||
private:
|
||||
ScreenShot::Ptr m_shot;
|
||||
|
@ -52,6 +52,8 @@ class Task : public QObject {
|
||||
|
||||
virtual bool canAbort() const { return false; }
|
||||
|
||||
auto getState() const -> State { return m_state; }
|
||||
|
||||
QString getStatus() { return m_status; }
|
||||
virtual auto getStepStatus() const -> QString { return m_status; }
|
||||
|
||||
@ -90,7 +92,7 @@ class Task : public QObject {
|
||||
void setStatus(const QString& status);
|
||||
void setProgress(qint64 current, qint64 total);
|
||||
|
||||
private:
|
||||
protected:
|
||||
State m_state = State::Inactive;
|
||||
QStringList m_Warnings;
|
||||
QString m_failReason = "";
|
||||
|
@ -667,7 +667,7 @@ void TranslationsModel::downloadTranslation(QString key)
|
||||
auto dl = Net::Download::makeCached(QUrl(BuildConfig.TRANSLATIONS_BASE_URL + lang->file_name), entry);
|
||||
auto rawHash = QByteArray::fromHex(lang->file_sha1.toLatin1());
|
||||
dl->addValidator(new Net::ChecksumValidator(QCryptographicHash::Sha1, rawHash));
|
||||
dl->m_total_progress = lang->file_size;
|
||||
dl->setProgress(dl->getProgress(), lang->file_size);
|
||||
|
||||
d->m_dl_job = new NetJob("Translation for " + key, APPLICATION->network());
|
||||
d->m_dl_job->addNetAction(dl);
|
||||
|
Loading…
Reference in New Issue
Block a user