diff --git a/src/CallManager.cpp b/src/CallManager.cpp index 4678131..4589080 100644 --- a/src/CallManager.cpp +++ b/src/CallManager.cpp @@ -264,6 +264,7 @@ CallManager::handleEvent(const RoomEvent &callInviteEvent) caller.display_name, QString::fromStdString(roomInfo.name), QString::fromStdString(roomInfo.avatar_url), + settings_, MainWindow::instance()); connect(dialog, &dialogs::AcceptCall::accept, this, [this, callInviteEvent]() { MainWindow::instance()->hideOverlay(); diff --git a/src/ChatPage.cpp b/src/ChatPage.cpp index 589aa3c..84a5e4d 100644 --- a/src/ChatPage.cpp +++ b/src/ChatPage.cpp @@ -474,6 +474,7 @@ ChatPage::ChatPage(QSharedPointer userSettings, QWidget *parent) callee.display_name, QString::fromStdString(roomInfo.name), QString::fromStdString(roomInfo.avatar_url), + userSettings_, MainWindow::instance()); connect(dialog, &dialogs::PlaceCall::voice, this, [this]() { callManager_.sendInvite(current_room_); diff --git a/src/UserSettingsPage.cpp b/src/UserSettingsPage.cpp index e67da99..ab5658a 100644 --- a/src/UserSettingsPage.cpp +++ b/src/UserSettingsPage.cpp @@ -77,7 +77,8 @@ UserSettings::load() presence_ = settings.value("user/presence", QVariant::fromValue(Presence::AutomaticPresence)) .value(); - useStunServer_ = settings.value("user/use_stun_server", false).toBool(); + useStunServer_ = settings.value("user/use_stun_server", false).toBool(); + defaultAudioSource_ = settings.value("user/default_audio_source", QString()).toString(); applyTheme(); } @@ -290,6 +291,16 @@ UserSettings::setUseStunServer(bool useStunServer) save(); } +void +UserSettings::setDefaultAudioSource(const QString &defaultAudioSource) +{ + if (defaultAudioSource == defaultAudioSource_) + return; + defaultAudioSource_ = defaultAudioSource; + emit defaultAudioSourceChanged(defaultAudioSource); + save(); +} + void UserSettings::applyTheme() { @@ -376,6 +387,7 @@ UserSettings::save() settings.setValue("emoji_font_family", emojiFont_); settings.setValue("presence", QVariant::fromValue(presence_)); settings.setValue("use_stun_server", useStunServer_); + settings.setValue("default_audio_source", defaultAudioSource_); settings.endGroup(); @@ -501,6 +513,9 @@ UserSettingsPage::UserSettingsPage(QSharedPointer settings, QWidge callsLabel->setFont(font); useStunServer_ = new Toggle{this}; + defaultAudioSourceValue_ = new QLabel(this); + defaultAudioSourceValue_->setFont(font); + auto encryptionLabel_ = new QLabel{tr("ENCRYPTION"), this}; encryptionLabel_->setFixedHeight(encryptionLabel_->minimumHeight() + LayoutTopMargin); encryptionLabel_->setAlignment(Qt::AlignBottom); @@ -634,9 +649,10 @@ UserSettingsPage::UserSettingsPage(QSharedPointer settings, QWidge formLayout_->addRow(callsLabel); formLayout_->addRow(new HorizontalLine{this}); - boxWrap(tr("Allow Fallback Call Assist Server"), + boxWrap(tr("Allow fallback call assist server"), useStunServer_, tr("Will use turn.matrix.org as assist when your home server does not offer one.")); + boxWrap(tr("Default audio source device"), defaultAudioSourceValue_); formLayout_->addRow(encryptionLabel_); formLayout_->addRow(new HorizontalLine{this}); @@ -797,6 +813,7 @@ UserSettingsPage::showEvent(QShowEvent *) deviceIdValue_->setText(QString::fromStdString(http::client()->device_id())); timelineMaxWidthSpin_->setValue(settings_->timelineMaxWidth()); useStunServer_->setState(!settings_->useStunServer()); + defaultAudioSourceValue_->setText(settings_->defaultAudioSource()); deviceFingerprintValue_->setText( utils::humanReadableFingerprint(olm::client()->identity_keys().ed25519)); diff --git a/src/UserSettingsPage.h b/src/UserSettingsPage.h index 567a752..52ff946 100644 --- a/src/UserSettingsPage.h +++ b/src/UserSettingsPage.h @@ -73,6 +73,8 @@ class UserSettings : public QObject Q_PROPERTY(Presence presence READ presence WRITE setPresence NOTIFY presenceChanged) Q_PROPERTY( bool useStunServer READ useStunServer WRITE setUseStunServer NOTIFY useStunServerChanged) + Q_PROPERTY(QString defaultAudioSource READ defaultAudioSource WRITE setDefaultAudioSource + NOTIFY defaultAudioSourceChanged) public: UserSettings(); @@ -110,6 +112,7 @@ public: void setDecryptSidebar(bool state); void setPresence(Presence state); void setUseStunServer(bool state); + void setDefaultAudioSource(const QString &deviceName); QString theme() const { return !theme_.isEmpty() ? theme_ : defaultTheme_; } bool messageHoverHighlight() const { return messageHoverHighlight_; } @@ -136,6 +139,7 @@ public: QString emojiFont() const { return emojiFont_; } Presence presence() const { return presence_; } bool useStunServer() const { return useStunServer_; } + QString defaultAudioSource() const { return defaultAudioSource_; } signals: void groupViewStateChanged(bool state); @@ -159,6 +163,7 @@ signals: void emojiFontChanged(QString state); void presenceChanged(Presence state); void useStunServerChanged(bool state); + void defaultAudioSourceChanged(const QString &deviceName); private: // Default to system theme if QT_QPA_PLATFORMTHEME var is set. @@ -187,6 +192,7 @@ private: QString emojiFont_; Presence presence_; bool useStunServer_; + QString defaultAudioSource_; }; class HorizontalLine : public QFrame @@ -244,6 +250,7 @@ private: Toggle *decryptSidebar_; QLabel *deviceFingerprintValue_; QLabel *deviceIdValue_; + QLabel *defaultAudioSourceValue_; QComboBox *themeCombo_; QComboBox *scaleFactorCombo_; diff --git a/src/WebRTCSession.cpp b/src/WebRTCSession.cpp index 07dfaac..5638c60 100644 --- a/src/WebRTCSession.cpp +++ b/src/WebRTCSession.cpp @@ -487,23 +487,74 @@ WebRTCSession::startPipeline(int opusPayloadType) return true; } -#define RTP_CAPS_OPUS "application/x-rtp,media=audio,encoding-name=OPUS,payload=" - bool WebRTCSession::createPipeline(int opusPayloadType) { - std::string pipeline("webrtcbin bundle-policy=max-bundle name=webrtcbin " - "autoaudiosrc ! volume name=srclevel ! audioconvert ! " - "audioresample ! queue ! opusenc ! rtpopuspay ! " - "queue ! " RTP_CAPS_OPUS + - std::to_string(opusPayloadType) + " ! webrtcbin."); + int nSources = audioSources_ ? g_list_length(audioSources_) : 0; + if (nSources == 0) { + nhlog::ui()->error("WebRTC: no audio sources"); + return false; + } - webrtc_ = nullptr; - GError *error = nullptr; - pipe_ = gst_parse_launch(pipeline.c_str(), &error); - if (error) { - nhlog::ui()->error("WebRTC: failed to parse pipeline: {}", error->message); - g_error_free(error); + if (audioSourceIndex_ < 0 || audioSourceIndex_ >= nSources) { + nhlog::ui()->error("WebRTC: invalid audio source index"); + return false; + } + + GstElement *source = gst_device_create_element( + GST_DEVICE_CAST(g_list_nth_data(audioSources_, audioSourceIndex_)), nullptr); + GstElement *volume = gst_element_factory_make("volume", "srclevel"); + GstElement *convert = gst_element_factory_make("audioconvert", nullptr); + GstElement *resample = gst_element_factory_make("audioresample", nullptr); + GstElement *queue1 = gst_element_factory_make("queue", nullptr); + GstElement *opusenc = gst_element_factory_make("opusenc", nullptr); + GstElement *rtp = gst_element_factory_make("rtpopuspay", nullptr); + GstElement *queue2 = gst_element_factory_make("queue", nullptr); + GstElement *capsfilter = gst_element_factory_make("capsfilter", nullptr); + + GstCaps *rtpcaps = gst_caps_new_simple("application/x-rtp", + "media", + G_TYPE_STRING, + "audio", + "encoding-name", + G_TYPE_STRING, + "OPUS", + "payload", + G_TYPE_INT, + opusPayloadType, + nullptr); + g_object_set(capsfilter, "caps", rtpcaps, nullptr); + gst_caps_unref(rtpcaps); + + GstElement *webrtcbin = gst_element_factory_make("webrtcbin", "webrtcbin"); + g_object_set(webrtcbin, "bundle-policy", GST_WEBRTC_BUNDLE_POLICY_MAX_BUNDLE, nullptr); + + pipe_ = gst_pipeline_new(nullptr); + gst_bin_add_many(GST_BIN(pipe_), + source, + volume, + convert, + resample, + queue1, + opusenc, + rtp, + queue2, + capsfilter, + webrtcbin, + nullptr); + + if (!gst_element_link_many(source, + volume, + convert, + resample, + queue1, + opusenc, + rtp, + queue2, + capsfilter, + webrtcbin, + nullptr)) { + nhlog::ui()->error("WebRTC: failed to link pipeline elements"); end(); return false; } @@ -541,3 +592,42 @@ WebRTCSession::end() if (state_ != State::DISCONNECTED) emit stateChanged(State::DISCONNECTED); } + +void +WebRTCSession::refreshDevices() +{ + if (!initialised_) + return; + + static GstDeviceMonitor *monitor = nullptr; + if (!monitor) { + monitor = gst_device_monitor_new(); + GstCaps *caps = gst_caps_new_empty_simple("audio/x-raw"); + gst_device_monitor_add_filter(monitor, "Audio/Source", caps); + gst_caps_unref(caps); + } + g_list_free_full(audioSources_, g_object_unref); + audioSources_ = gst_device_monitor_get_devices(monitor); +} + +std::vector +WebRTCSession::getAudioSourceNames(const std::string &defaultDevice) +{ + if (!initialised_) + return {}; + + refreshDevices(); + std::vector ret; + ret.reserve(g_list_length(audioSources_)); + for (GList *l = audioSources_; l != nullptr; l = l->next) { + gchar *name = gst_device_get_display_name(GST_DEVICE_CAST(l->data)); + ret.emplace_back(name); + g_free(name); + if (ret.back() == defaultDevice) { + // move default device to top of the list + std::swap(audioSources_->data, l->data); + std::swap(ret.front(), ret.back()); + } + } + return ret; +} diff --git a/src/WebRTCSession.h b/src/WebRTCSession.h index 6b54f37..56d76fa 100644 --- a/src/WebRTCSession.h +++ b/src/WebRTCSession.h @@ -7,6 +7,7 @@ #include "mtx/events/voip.hpp" +typedef struct _GList GList; typedef struct _GstElement GstElement; class WebRTCSession : public QObject @@ -46,6 +47,9 @@ public: void setStunServer(const std::string &stunServer) { stunServer_ = stunServer; } void setTurnServers(const std::vector &uris) { turnServers_ = uris; } + std::vector getAudioSourceNames(const std::string &defaultDevice); + void setAudioSource(int audioDeviceIndex) { audioSourceIndex_ = audioDeviceIndex; } + signals: void offerCreated(const std::string &sdp, const std::vector &); @@ -66,9 +70,12 @@ private: GstElement *webrtc_ = nullptr; std::string stunServer_; std::vector turnServers_; + GList *audioSources_ = nullptr; + int audioSourceIndex_ = -1; bool startPipeline(int opusPayloadType); bool createPipeline(int opusPayloadType); + void refreshDevices(); public: WebRTCSession(WebRTCSession const &) = delete; diff --git a/src/dialogs/AcceptCall.cpp b/src/dialogs/AcceptCall.cpp index fd6565e..be1eb0c 100644 --- a/src/dialogs/AcceptCall.cpp +++ b/src/dialogs/AcceptCall.cpp @@ -1,11 +1,14 @@ +#include #include -#include #include #include #include +#include "ChatPage.h" #include "Config.h" +#include "UserSettingsPage.h" #include "Utils.h" +#include "WebRTCSession.h" #include "dialogs/AcceptCall.h" #include "ui/Avatar.h" @@ -15,9 +18,25 @@ AcceptCall::AcceptCall(const QString &caller, const QString &displayName, const QString &roomName, const QString &avatarUrl, + QSharedPointer settings, QWidget *parent) : QWidget(parent) { + std::string errorMessage; + if (!WebRTCSession::instance().init(&errorMessage)) { + emit ChatPage::instance()->showNotification(QString::fromStdString(errorMessage)); + emit close(); + return; + } + audioDevices_ = WebRTCSession::instance().getAudioSourceNames( + settings->defaultAudioSource().toStdString()); + if (audioDevices_.empty()) { + emit ChatPage::instance()->showNotification( + "Incoming call: No audio sources found."); + emit close(); + return; + } + setAutoFillBackground(true); setWindowFlags(Qt::Tool | Qt::WindowStaysOnTopHint); setWindowModality(Qt::WindowModal); @@ -55,7 +74,7 @@ AcceptCall::AcceptCall(const QString &caller, else avatar->setLetter(utils::firstChar(roomName)); - const int iconSize = 24; + const int iconSize = 22; QLabel *callTypeIndicator = new QLabel(this); callTypeIndicator->setPixmap( QIcon(":/icons/icons/ui/place-call.png").pixmap(QSize(iconSize * 2, iconSize * 2))); @@ -66,7 +85,7 @@ AcceptCall::AcceptCall(const QString &caller, callTypeLabel->setAlignment(Qt::AlignCenter); auto buttonLayout = new QHBoxLayout; - buttonLayout->setSpacing(20); + buttonLayout->setSpacing(18); acceptBtn_ = new QPushButton(tr("Accept"), this); acceptBtn_->setDefault(true); acceptBtn_->setIcon(QIcon(":/icons/icons/ui/place-call.png")); @@ -78,6 +97,19 @@ AcceptCall::AcceptCall(const QString &caller, buttonLayout->addWidget(acceptBtn_); buttonLayout->addWidget(rejectBtn_); + auto deviceLayout = new QHBoxLayout; + auto audioLabel = new QLabel(this); + audioLabel->setPixmap( + QIcon(":/icons/icons/ui/microphone-unmute.png").pixmap(QSize(iconSize, iconSize))); + + auto deviceList = new QComboBox(this); + for (const auto &d : audioDevices_) + deviceList->addItem(QString::fromStdString(d)); + + deviceLayout->addStretch(); + deviceLayout->addWidget(audioLabel); + deviceLayout->addWidget(deviceList); + if (displayNameLabel) layout->addWidget(displayNameLabel, 0, Qt::AlignCenter); layout->addWidget(callerLabel, 0, Qt::AlignCenter); @@ -85,8 +117,12 @@ AcceptCall::AcceptCall(const QString &caller, layout->addWidget(callTypeIndicator, 0, Qt::AlignCenter); layout->addWidget(callTypeLabel, 0, Qt::AlignCenter); layout->addLayout(buttonLayout); + layout->addLayout(deviceLayout); - connect(acceptBtn_, &QPushButton::clicked, this, [this]() { + connect(acceptBtn_, &QPushButton::clicked, this, [this, deviceList, settings]() { + WebRTCSession::instance().setAudioSource(deviceList->currentIndex()); + settings->setDefaultAudioSource( + QString::fromStdString(audioDevices_[deviceList->currentIndex()])); emit accept(); emit close(); }); diff --git a/src/dialogs/AcceptCall.h b/src/dialogs/AcceptCall.h index 5d2251f..909605d 100644 --- a/src/dialogs/AcceptCall.h +++ b/src/dialogs/AcceptCall.h @@ -1,9 +1,14 @@ #pragma once +#include +#include + +#include #include class QPushButton; class QString; +class UserSettings; namespace dialogs { @@ -16,6 +21,7 @@ public: const QString &displayName, const QString &roomName, const QString &avatarUrl, + QSharedPointer settings, QWidget *parent = nullptr); signals: @@ -25,6 +31,7 @@ signals: private: QPushButton *acceptBtn_; QPushButton *rejectBtn_; + std::vector audioDevices_; }; } diff --git a/src/dialogs/PlaceCall.cpp b/src/dialogs/PlaceCall.cpp index 0fda179..4e70370 100644 --- a/src/dialogs/PlaceCall.cpp +++ b/src/dialogs/PlaceCall.cpp @@ -1,10 +1,14 @@ +#include #include #include #include #include +#include "ChatPage.h" #include "Config.h" +#include "UserSettingsPage.h" #include "Utils.h" +#include "WebRTCSession.h" #include "dialogs/PlaceCall.h" #include "ui/Avatar.h" @@ -14,9 +18,24 @@ PlaceCall::PlaceCall(const QString &callee, const QString &displayName, const QString &roomName, const QString &avatarUrl, + QSharedPointer settings, QWidget *parent) : QWidget(parent) { + std::string errorMessage; + if (!WebRTCSession::instance().init(&errorMessage)) { + emit ChatPage::instance()->showNotification(QString::fromStdString(errorMessage)); + emit close(); + return; + } + audioDevices_ = WebRTCSession::instance().getAudioSourceNames( + settings->defaultAudioSource().toStdString()); + if (audioDevices_.empty()) { + emit ChatPage::instance()->showNotification("No audio sources found."); + emit close(); + return; + } + setAutoFillBackground(true); setWindowFlags(Qt::Tool | Qt::WindowStaysOnTopHint); setWindowModality(Qt::WindowModal); @@ -37,25 +56,42 @@ PlaceCall::PlaceCall(const QString &callee, avatar->setImage(avatarUrl); else avatar->setLetter(utils::firstChar(roomName)); - const int iconSize = 24; + const int iconSize = 18; voiceBtn_ = new QPushButton(tr("Voice"), this); voiceBtn_->setIcon(QIcon(":/icons/icons/ui/place-call.png")); voiceBtn_->setIconSize(QSize(iconSize, iconSize)); voiceBtn_->setDefault(true); cancelBtn_ = new QPushButton(tr("Cancel"), this); - buttonLayout->addStretch(1); buttonLayout->addWidget(avatar); + buttonLayout->addStretch(); buttonLayout->addWidget(voiceBtn_); buttonLayout->addWidget(cancelBtn_); QString name = displayName.isEmpty() ? callee : displayName; QLabel *label = new QLabel("Place a call to " + name + "?", this); + auto deviceLayout = new QHBoxLayout; + auto audioLabel = new QLabel(this); + audioLabel->setPixmap(QIcon(":/icons/icons/ui/microphone-unmute.png") + .pixmap(QSize(iconSize * 1.2, iconSize * 1.2))); + + auto deviceList = new QComboBox(this); + for (const auto &d : audioDevices_) + deviceList->addItem(QString::fromStdString(d)); + + deviceLayout->addStretch(); + deviceLayout->addWidget(audioLabel); + deviceLayout->addWidget(deviceList); + layout->addWidget(label); layout->addLayout(buttonLayout); + layout->addLayout(deviceLayout); - connect(voiceBtn_, &QPushButton::clicked, this, [this]() { + connect(voiceBtn_, &QPushButton::clicked, this, [this, deviceList, settings]() { + WebRTCSession::instance().setAudioSource(deviceList->currentIndex()); + settings->setDefaultAudioSource( + QString::fromStdString(audioDevices_[deviceList->currentIndex()])); emit voice(); emit close(); }); diff --git a/src/dialogs/PlaceCall.h b/src/dialogs/PlaceCall.h index f6db9ab..5a1e982 100644 --- a/src/dialogs/PlaceCall.h +++ b/src/dialogs/PlaceCall.h @@ -1,9 +1,14 @@ #pragma once +#include +#include + +#include #include class QPushButton; class QString; +class UserSettings; namespace dialogs { @@ -16,6 +21,7 @@ public: const QString &displayName, const QString &roomName, const QString &avatarUrl, + QSharedPointer settings, QWidget *parent = nullptr); signals: @@ -25,6 +31,7 @@ signals: private: QPushButton *voiceBtn_; QPushButton *cancelBtn_; + std::vector audioDevices_; }; }