Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

always save chats to disk, but save them as text by default #1495

Merged
merged 1 commit into from
Oct 12, 2023
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
12 changes: 10 additions & 2 deletions gpt4all-chat/chat.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -385,7 +385,11 @@ bool Chat::serialize(QDataStream &stream, int version) const
stream << m_modelInfo.filename();
if (version > 2)
stream << m_collections;
if (!m_llmodel->serialize(stream, version, true /*serializeKV*/))

const bool serializeKV = MySettings::globalInstance()->saveChatsContext();
if (version > 5)
stream << serializeKV;
if (!m_llmodel->serialize(stream, version, serializeKV))
return false;
if (!m_chatModel->serialize(stream, version))
return false;
Expand Down Expand Up @@ -413,7 +417,6 @@ bool Chat::deserialize(QDataStream &stream, int version)
if (!m_modelInfo.id().isEmpty())
emit modelInfoChanged();

bool deserializeKV = true; // make this a setting
bool discardKV = m_modelInfo.id().isEmpty();

// Prior to version 2 gptj models had a bug that fixed the kv_cache to F32 instead of F16 so
Expand All @@ -425,6 +428,11 @@ bool Chat::deserialize(QDataStream &stream, int version)
stream >> m_collections;
emit collectionListChanged(m_collections);
}

bool deserializeKV = true;
if (version > 5)
stream >> deserializeKV;

m_llmodel->setModelInfo(m_modelInfo);
if (!m_llmodel->deserialize(stream, version, deserializeKV, discardKV))
return false;
Expand Down
2 changes: 2 additions & 0 deletions gpt4all-chat/chat.h
Original file line number Diff line number Diff line change
Expand Up @@ -54,6 +54,8 @@ class Chat : public QObject
}
ChatModel *chatModel() { return m_chatModel; }

bool isNewChat() const { return m_name == tr("New Chat") && !m_chatModel->count(); }
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Why do we need to check the name of the chat here?

Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

doubly weird to match on a localized string - does it not work without that?

Copy link
Collaborator Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

We probably don't? But if someone took the time to save the chat to a new name I guess we'd want to serialize it as it is work done by the user and guessing they'd want to save to disk?


Q_INVOKABLE void reset();
Q_INVOKABLE void processSystemPrompt();
Q_INVOKABLE bool isModelLoaded() const;
Expand Down
108 changes: 41 additions & 67 deletions gpt4all-chat/chatlistmodel.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@
#include <QDataStream>

#define CHAT_FORMAT_MAGIC 0xF5D553CC
#define CHAT_FORMAT_VERSION 5
#define CHAT_FORMAT_VERSION 6

class MyChatListModel: public ChatListModel { };
Q_GLOBAL_STATIC(MyChatListModel, chatListModelInstance)
Expand All @@ -17,11 +17,10 @@ ChatListModel *ChatListModel::globalInstance()
ChatListModel::ChatListModel()
: QAbstractListModel(nullptr)
, m_newChat(nullptr)
, m_dummyChat(nullptr)
, m_serverChat(nullptr)
, m_currentChat(nullptr)
{
addDummyChat();
addChat();

ChatsRestoreThread *thread = new ChatsRestoreThread;
connect(thread, &ChatsRestoreThread::chatRestored, this, &ChatListModel::restoreChat);
Expand Down Expand Up @@ -59,10 +58,7 @@ void ChatListModel::saveChats()
for (Chat *chat : m_chats) {
if (chat == m_serverChat)
continue;
const bool isChatGPT = chat->modelInfo().isChatGPT;
if (!isChatGPT && !MySettings::globalInstance()->saveChats())
continue;
if (isChatGPT && !MySettings::globalInstance()->saveChatGPTChats())
if (chat->isNewChat())
continue;
toSave.append(chat);
}
Expand Down Expand Up @@ -197,47 +193,47 @@ void ChatsRestoreThread::run()
});

for (FileInfo &f : files) {
QFile file(f.file);
bool success = file.open(QIODevice::ReadOnly);
if (!success) {
qWarning() << "ERROR: Couldn't restore chat from file:" << file.fileName();
QFile file(f.file);
bool success = file.open(QIODevice::ReadOnly);
if (!success) {
qWarning() << "ERROR: Couldn't restore chat from file:" << file.fileName();
continue;
}
QDataStream in(&file);

qint32 version = 0;
if (!f.oldFile) {
// Read and check the header
quint32 magic;
in >> magic;
if (magic != CHAT_FORMAT_MAGIC) {
qWarning() << "ERROR: Chat file has bad magic:" << file.fileName();
continue;
}
QDataStream in(&file);

qint32 version = 0;
if (!f.oldFile) {
// Read and check the header
quint32 magic;
in >> magic;
if (magic != CHAT_FORMAT_MAGIC) {
qWarning() << "ERROR: Chat file has bad magic:" << file.fileName();
continue;
}

// Read the version
in >> version;
if (version < 1) {
qWarning() << "ERROR: Chat file has non supported version:" << file.fileName();
continue;
}

if (version <= 1)
in.setVersion(QDataStream::Qt_6_2);
// Read the version
in >> version;
if (version < 1) {
qWarning() << "ERROR: Chat file has non supported version:" << file.fileName();
continue;
}

qDebug() << "deserializing chat" << f.file;
if (version <= 1)
in.setVersion(QDataStream::Qt_6_2);
}

Chat *chat = new Chat;
chat->moveToThread(qApp->thread());
if (!chat->deserialize(in, version)) {
qWarning() << "ERROR: Couldn't deserialize chat from file:" << file.fileName();
} else {
emit chatRestored(chat);
}
if (f.oldFile)
file.remove(); // No longer storing in this directory
file.close();
qDebug() << "deserializing chat" << f.file;

Chat *chat = new Chat;
chat->moveToThread(qApp->thread());
if (!chat->deserialize(in, version)) {
qWarning() << "ERROR: Couldn't deserialize chat from file:" << file.fileName();
} else {
emit chatRestored(chat);
}
if (f.oldFile)
file.remove(); // No longer storing in this directory
file.close();
}

qint64 elapsedTime = timer.elapsed();
Expand All @@ -249,35 +245,13 @@ void ChatListModel::restoreChat(Chat *chat)
chat->setParent(this);
connect(chat, &Chat::nameChanged, this, &ChatListModel::nameChanged);

if (m_dummyChat) {
beginResetModel();
m_chats = QList<Chat*>({chat});
setCurrentChat(chat);
delete m_dummyChat;
m_dummyChat = nullptr;
endResetModel();
} else {
beginInsertRows(QModelIndex(), m_chats.size(), m_chats.size());
m_chats.append(chat);
endInsertRows();
}
beginInsertRows(QModelIndex(), m_chats.size(), m_chats.size());
m_chats.append(chat);
endInsertRows();
}

void ChatListModel::chatsRestoredFinished()
{
if (m_dummyChat) {
beginResetModel();
Chat *dummy = m_dummyChat;
m_dummyChat = nullptr;
m_chats.clear();
addChat();
delete dummy;
endResetModel();
}

if (m_chats.isEmpty())
addChat();

addServerChat();
}

Expand Down
15 changes: 1 addition & 14 deletions gpt4all-chat/chatlistmodel.h
Original file line number Diff line number Diff line change
Expand Up @@ -84,7 +84,7 @@ class ChatListModel : public QAbstractListModel
Q_INVOKABLE void addChat()
{
// Don't add a new chat if we already have one
if (m_newChat || m_dummyChat)
if (m_newChat)
return;

// Create a new chat pointer and connect it to determine when it is populated
Expand All @@ -101,18 +101,6 @@ class ChatListModel : public QAbstractListModel
setCurrentChat(m_newChat);
}

Q_INVOKABLE void addDummyChat()
{
// Create a new dummy chat pointer and don't connect it
m_dummyChat = new Chat(this);
beginInsertRows(QModelIndex(), 0, 0);
m_chats.prepend(m_dummyChat);
endInsertRows();
emit countChanged();
m_currentChat = m_dummyChat;
emit currentChatChanged();
}

Q_INVOKABLE void addServerChat()
{
// Create a new dummy chat pointer and don't connect it
Expand Down Expand Up @@ -252,7 +240,6 @@ private Q_SLOTS:

private:
Chat* m_newChat;
Chat* m_dummyChat;
Chat* m_serverChat;
Chat* m_currentChat;
QList<Chat*> m_chats;
Expand Down
36 changes: 8 additions & 28 deletions gpt4all-chat/mysettings.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -10,8 +10,7 @@
#include <QUrl>

static int default_threadCount = std::min(4, (int32_t) std::thread::hardware_concurrency());
static bool default_saveChats = false;
static bool default_saveChatGPTChats = true;
static bool default_saveChatsContext = false;
static bool default_serverChat = false;
static QString default_userDefaultModel = "Application default";
static bool default_forceMetal = false;
Expand Down Expand Up @@ -103,8 +102,7 @@ void MySettings::restoreApplicationDefaults()
setFontSize(default_fontSize);
setDevice(default_device);
setThreadCount(default_threadCount);
setSaveChats(default_saveChats);
setSaveChatGPTChats(default_saveChatGPTChats);
setSaveChatsContext(default_saveChatsContext);
setServerChat(default_serverChat);
setModelPath(defaultLocalModelsPath());
setUserDefaultModel(default_userDefaultModel);
Expand Down Expand Up @@ -397,40 +395,22 @@ void MySettings::setThreadCount(int c)
emit threadCountChanged();
}

bool MySettings::saveChats() const
bool MySettings::saveChatsContext() const
{
QSettings setting;
setting.sync();
return setting.value("saveChats", default_saveChats).toBool();
return setting.value("saveChatsContext", default_saveChatsContext).toBool();
}

void MySettings::setSaveChats(bool b)
void MySettings::setSaveChatsContext(bool b)
{
if (saveChats() == b)
if (saveChatsContext() == b)
return;

QSettings setting;
setting.setValue("saveChats", b);
setting.setValue("saveChatsContext", b);
setting.sync();
emit saveChatsChanged();
}

bool MySettings::saveChatGPTChats() const
{
QSettings setting;
setting.sync();
return setting.value("saveChatGPTChats", default_saveChatGPTChats).toBool();
}

void MySettings::setSaveChatGPTChats(bool b)
{
if (saveChatGPTChats() == b)
return;

QSettings setting;
setting.setValue("saveChatGPTChats", b);
setting.sync();
emit saveChatGPTChatsChanged();
emit saveChatsContextChanged();
}

bool MySettings::serverChat() const
Expand Down
12 changes: 4 additions & 8 deletions gpt4all-chat/mysettings.h
Original file line number Diff line number Diff line change
Expand Up @@ -10,8 +10,7 @@ class MySettings : public QObject
{
Q_OBJECT
Q_PROPERTY(int threadCount READ threadCount WRITE setThreadCount NOTIFY threadCountChanged)
Q_PROPERTY(bool saveChats READ saveChats WRITE setSaveChats NOTIFY saveChatsChanged)
Q_PROPERTY(bool saveChatGPTChats READ saveChatGPTChats WRITE setSaveChatGPTChats NOTIFY saveChatGPTChatsChanged)
Q_PROPERTY(bool saveChatsContext READ saveChatsContext WRITE setSaveChatsContext NOTIFY saveChatsContextChanged)
Q_PROPERTY(bool serverChat READ serverChat WRITE setServerChat NOTIFY serverChatChanged)
Q_PROPERTY(QString modelPath READ modelPath WRITE setModelPath NOTIFY modelPathChanged)
Q_PROPERTY(QString userDefaultModel READ userDefaultModel WRITE setUserDefaultModel NOTIFY userDefaultModelChanged)
Expand Down Expand Up @@ -64,10 +63,8 @@ class MySettings : public QObject
// Application settings
int threadCount() const;
void setThreadCount(int c);
bool saveChats() const;
void setSaveChats(bool b);
bool saveChatGPTChats() const;
void setSaveChatGPTChats(bool b);
bool saveChatsContext() const;
void setSaveChatsContext(bool b);
bool serverChat() const;
void setServerChat(bool b);
QString modelPath() const;
Expand Down Expand Up @@ -122,8 +119,7 @@ class MySettings : public QObject
void promptTemplateChanged(const ModelInfo &model);
void systemPromptChanged(const ModelInfo &model);
void threadCountChanged();
void saveChatsChanged();
void saveChatGPTChatsChanged();
void saveChatsContextChanged();
void serverChatChanged();
void modelPathChanged();
void userDefaultModelChanged();
Expand Down
10 changes: 0 additions & 10 deletions gpt4all-chat/network.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -317,16 +317,6 @@ void Network::sendNetworkToggled(bool isActive)
sendMixpanelEvent("network_toggled", QVector<KeyValue>{kv});
}

void Network::sendSaveChatsToggled(bool isActive)
{
if (!MySettings::globalInstance()->networkUsageStatsActive())
return;
KeyValue kv;
kv.key = QString("isActive");
kv.value = QJsonValue(isActive);
sendMixpanelEvent("savechats_toggled", QVector<KeyValue>{kv});
}

void Network::sendNewChat(int count)
{
if (!MySettings::globalInstance()->networkUsageStatsActive())
Expand Down
1 change: 0 additions & 1 deletion gpt4all-chat/network.h
Original file line number Diff line number Diff line change
Expand Up @@ -38,7 +38,6 @@ public Q_SLOTS:
void sendDownloadFinished(const QString &model, bool success);
Q_INVOKABLE void sendSettingsDialog();
Q_INVOKABLE void sendNetworkToggled(bool active);
Q_INVOKABLE void sendSaveChatsToggled(bool active);
Q_INVOKABLE void sendNewChat(int count);
Q_INVOKABLE void sendRemoveChat();
Q_INVOKABLE void sendRenameChat();
Expand Down
Loading