commit 2cdf0b3883d47926e82f24bb599ba693fd050e59 Author: coral Date: Fri Mar 20 16:08:41 2026 -0700 big one diff --git a/.env.example b/.env.example new file mode 100644 index 0000000..34bc06e --- /dev/null +++ b/.env.example @@ -0,0 +1,12 @@ +# Required +SIP_PUBLIC_HOST=0.0.0.0 +DISCORD_BOT_TOKEN=your_bot_token_here + +# Optional (defaults shown) +# DATA_DIR=/var/lib/sipcord +# CONFIG_PATH=./config.toml +# SOUNDS_DIR=./wav +# SIP_PORT=5060 +# RTP_PORT_START=10000 +# RTP_PORT_END=15000 +# DEV_MODE=false diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..bd66c5b --- /dev/null +++ b/.gitignore @@ -0,0 +1,27 @@ +# Cargo build output +target/ +debug/ + +# Rustfmt backup files +**/*.rs.bk + +# MSVC debug info +*.pdb + +# Environment variables +.env +.env.local +.env.*.local + +# OS +.DS_Store +Thumbs.db + +# IDE +.idea/ +.vscode/ +*.swp +*.swo + +# Temporary fax files +tmp/ diff --git a/Cargo.lock b/Cargo.lock new file mode 100644 index 0000000..164c106 --- /dev/null +++ b/Cargo.lock @@ -0,0 +1,5572 @@ +# This file is automatically @generated by Cargo. +# It is not intended for manual editing. +version = 4 + +[[package]] +name = "adler2" +version = "2.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "320119579fcad9c21884f5c4861d16174d0e06250625266f50fe6898340abefa" + +[[package]] +name = "aead" +version = "0.5.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d122413f284cf2d62fb1b7db97e02edb8cda96d769b16e443a4f6195e35662b0" +dependencies = [ + "crypto-common", + "generic-array", +] + +[[package]] +name = "aes" +version = "0.8.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b169f7a6d4742236a0a00c541b845991d0ac43e546831af1249753ab4c3aa3a0" +dependencies = [ + "cfg-if", + "cipher", + "cpufeatures", +] + +[[package]] +name = "aes-gcm" +version = "0.10.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "831010a0f742e1209b3bcea8fab6a8e149051ba6099432c8cb2cc117dec3ead1" +dependencies = [ + "aead", + "aes", + "cipher", + "ctr", + "ghash", + "subtle", +] + +[[package]] +name = "aformat" +version = "0.1.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f387c59d52324934bdd3586fe904051338ce4583a9bb921982a3dbb060a26e6f" +dependencies = [ + "aformat-macros", + "to-arraystring", + "typenum", + "typenum_mappings", +] + +[[package]] +name = "aformat-macros" +version = "0.1.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "254adeba6d67e7e6706f01ffdf1787cdad41e361be5b7c1e3265bba54dca7d8f" +dependencies = [ + "bytestring", + "proc-macro2", + "quote", + "syn 2.0.117", +] + +[[package]] +name = "aho-corasick" +version = "1.1.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ddd31a130427c27518df266943a5308ed92d4b226cc639f5a8f1002816174301" +dependencies = [ + "memchr", +] + +[[package]] +name = "android_system_properties" +version = "0.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "819e7219dbd41043ac279b19830f2efc897156490d7fd6ea916720117ee66311" +dependencies = [ + "libc", +] + +[[package]] +name = "anyhow" +version = "1.0.102" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7f202df86484c868dbad7eaa557ef785d5c66295e41b460ef922eca0723b842c" + +[[package]] +name = "arrayvec" +version = "0.7.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7c02d123df017efcdfbd739ef81735b36c5ba83ec3c59c80a9d7ecc718f92e50" +dependencies = [ + "serde", +] + +[[package]] +name = "async-trait" +version = "0.1.89" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9035ad2d096bed7955a320ee7e2230574d28fd3c3a0f186cbea1ff3c7eed5dbb" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.117", +] + +[[package]] +name = "atomic-waker" +version = "1.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1505bd5d3d116872e7271a6d4e16d81d0c8570876c8de68093a09ac269d8aac0" + +[[package]] +name = "audio-core" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f93ebbf82d06013f4c41fe71303feb980cddd78496d904d06be627972de51a24" + +[[package]] +name = "audioadapter" +version = "2.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e25c5bb54993ad4693d8b68b6f29f872c5fd9f92a6469d0acb0cbaf80a13d0f9" +dependencies = [ + "audio-core", + "num-traits", +] + +[[package]] +name = "audioadapter-buffers" +version = "2.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a6af89882334c4e501faa08992888593ada468f9e1ab211635c32f9ada7786e0" +dependencies = [ + "audioadapter", + "audioadapter-sample", + "num-traits", +] + +[[package]] +name = "audioadapter-sample" +version = "2.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4e9a3d502fec0b21aa420febe0b110875cf8a7057c49e83a0cace1df6a73e03e" +dependencies = [ + "audio-core", + "num-traits", +] + +[[package]] +name = "audiopus" +version = "0.3.0-rc.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ab55eb0e56d7c6de3d59f544e5db122d7725ec33be6a276ee8241f3be6473955" +dependencies = [ + "audiopus_sys", +] + +[[package]] +name = "audiopus_sys" +version = "0.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "62314a1546a2064e033665d658e88c620a62904be945f8147e6b16c3db9f8651" +dependencies = [ + "cmake", + "log", + "pkg-config", +] + +[[package]] +name = "autocfg" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c08606f8c3cbf4ce6ec8e28fb0014a2c086708fe954eaa885384a6165172e7e8" + +[[package]] +name = "aws-lc-rs" +version = "1.16.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a054912289d18629dc78375ba2c3726a3afe3ff71b4edba9dedfca0e3446d1fc" +dependencies = [ + "aws-lc-sys", + "zeroize", +] + +[[package]] +name = "aws-lc-sys" +version = "0.39.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1fa7e52a4c5c547c741610a2c6f123f3881e409b714cd27e6798ef020c514f0a" +dependencies = [ + "cc", + "cmake", + "dunce", + "fs_extra", +] + +[[package]] +name = "base16ct" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4c7f02d4ea65f2c1853089ffd8d2787bdbc63de2f0d29dedbcf8ccdfa0ccd4cf" + +[[package]] +name = "base64" +version = "0.22.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "72b3254f16251a8381aa12e40e3c4d2f0199f8c6508fbecb9d91f575e0fbb8c6" + +[[package]] +name = "base64ct" +version = "1.8.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2af50177e190e07a26ab74f8b1efbfe2ef87da2116221318cb1c2e82baf7de06" + +[[package]] +name = "bindgen" +version = "0.71.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5f58bf3d7db68cfbac37cfc485a8d711e87e064c3d0fe0435b92f7a407f9d6b3" +dependencies = [ + "bitflags 2.11.0", + "cexpr", + "clang-sys", + "itertools", + "log", + "prettyplease", + "proc-macro2", + "quote", + "regex", + "rustc-hash", + "shlex", + "syn 2.0.117", +] + +[[package]] +name = "bindgen" +version = "0.72.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "993776b509cfb49c750f11b8f07a46fa23e0a1386ffc01fb1e7d343efc387895" +dependencies = [ + "bitflags 2.11.0", + "cexpr", + "clang-sys", + "itertools", + "log", + "prettyplease", + "proc-macro2", + "quote", + "regex", + "rustc-hash", + "shlex", + "syn 2.0.117", +] + +[[package]] +name = "bitflags" +version = "1.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a" + +[[package]] +name = "bitflags" +version = "2.11.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "843867be96c8daad0d758b57df9392b6d8d271134fce549de6ce169ff98a92af" + +[[package]] +name = "block-buffer" +version = "0.10.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3078c7629b62d3f0439517fa394996acacc5cbc91c5a20d8c658e77abd503a71" +dependencies = [ + "generic-array", +] + +[[package]] +name = "bool_to_bitflags" +version = "0.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c7c039d9bc676b768f6d59556e99f95f5e47c811b672f8b2b2b606eb28527a2f" +dependencies = [ + "darling 0.20.11", + "proc-macro2", + "quote", + "syn 2.0.117", + "to-arraystring", +] + +[[package]] +name = "bumpalo" +version = "3.20.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5d20789868f4b01b2f2caec9f5c4e0213b41e3e5702a50157d699ae31ced2fcb" + +[[package]] +name = "bytemuck" +version = "1.25.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c8efb64bd706a16a1bdde310ae86b351e4d21550d98d056f22f8a7f7a2183fec" + +[[package]] +name = "byteorder" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1fd0f2584146f6f2ef48085050886acf353beff7305ebd1ae69500e27c67f64b" + +[[package]] +name = "byteorder-lite" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8f1fe948ff07f4bd06c30984e69f5b4899c516a3ef74f34df92a2df2ab535495" + +[[package]] +name = "bytes" +version = "1.11.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1e748733b7cbc798e1434b6ac524f0c1ff2ab456fe201501e6497c8417a4fc33" + +[[package]] +name = "bytestring" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "113b4343b5f6617e7ad401ced8de3cc8b012e73a594347c307b90db3e9271289" +dependencies = [ + "bytes", +] + +[[package]] +name = "cc" +version = "1.2.57" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7a0dd1ca384932ff3641c8718a02769f1698e7563dc6974ffd03346116310423" +dependencies = [ + "find-msvc-tools", + "jobserver", + "libc", + "shlex", +] + +[[package]] +name = "cesu8" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6d43a04d8753f35258c91f8ec639f792891f748a1edbd759cf1dcea3382ad83c" + +[[package]] +name = "cexpr" +version = "0.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6fac387a98bb7c37292057cffc56d62ecb629900026402633ae9160df93a8766" +dependencies = [ + "nom", +] + +[[package]] +name = "cfg-if" +version = "1.0.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9330f8b2ff13f34540b44e946ef35111825727b38d33286ef986142615121801" + +[[package]] +name = "cfg_aliases" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "613afe47fcd5fac7ccf1db93babcb082c5994d996f20b8b159f2ad1658eb5724" + +[[package]] +name = "chacha20" +version = "0.9.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c3613f74bd2eac03dad61bd53dbe620703d4371614fe0bc3b9f04dd36fe4e818" +dependencies = [ + "cfg-if", + "cipher", + "cpufeatures", +] + +[[package]] +name = "chacha20poly1305" +version = "0.10.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "10cd79432192d1c0f4e1a0fef9527696cc039165d729fb41b3f4f4f354c2dc35" +dependencies = [ + "aead", + "chacha20", + "cipher", + "poly1305", + "zeroize", +] + +[[package]] +name = "chrono" +version = "0.4.44" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c673075a2e0e5f4a1dde27ce9dee1ea4558c7ffe648f576438a20ca1d2acc4b0" +dependencies = [ + "iana-time-zone", + "js-sys", + "num-traits", + "serde", + "wasm-bindgen", + "windows-link", +] + +[[package]] +name = "cipher" +version = "0.4.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "773f3b9af64447d2ce9850330c473515014aa235e6a783b02db81ff39e4a3dad" +dependencies = [ + "crypto-common", + "inout", + "zeroize", +] + +[[package]] +name = "clang-sys" +version = "1.8.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0b023947811758c97c59bf9d1c188fd619ad4718dcaa767947df1cadb14f39f4" +dependencies = [ + "glob", + "libc", + "libloading", +] + +[[package]] +name = "claxon" +version = "0.4.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4bfbf56724aa9eca8afa4fcfadeb479e722935bb2a0900c2d37e0cc477af0688" + +[[package]] +name = "cmake" +version = "0.1.57" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "75443c44cd6b379beb8c5b45d85d0773baf31cce901fe7bb252f4eff3008ef7d" +dependencies = [ + "cc", +] + +[[package]] +name = "combine" +version = "4.6.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ba5a308b75df32fe02788e748662718f03fde005016435c444eea572398219fd" +dependencies = [ + "bytes", + "memchr", +] + +[[package]] +name = "const-oid" +version = "0.9.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c2459377285ad874054d797f3ccebf984978aa39129f6eafde5cdc8315b612f8" + +[[package]] +name = "convert_case" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6245d59a3e82a7fc217c5828a6692dbc6dfb63a0c8c90495621f7b9d79704a0e" + +[[package]] +name = "core-foundation" +version = "0.10.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b2a6cd9ae233e7f62ba4e9353e81a88df7fc8a5987b8d445b4d90c879bd156f6" +dependencies = [ + "core-foundation-sys", + "libc", +] + +[[package]] +name = "core-foundation-sys" +version = "0.8.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "773648b94d0e5d620f64f280777445740e61fe701025087ec8b57f45c791888b" + +[[package]] +name = "core-models" +version = "0.0.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "94950e87ea550d6d68f1993f3e7bebc8cb7235157bff84337d46195c3aa0b3f0" +dependencies = [ + "hax-lib", + "pastey", + "rand 0.9.2", +] + +[[package]] +name = "cpufeatures" +version = "0.2.17" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "59ed5838eebb26a2bb2e58f6d5b5316989ae9d08bab10e0e6d103e656d1b0280" +dependencies = [ + "libc", +] + +[[package]] +name = "crc32fast" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9481c1c90cbf2ac953f07c8d4a58aa3945c425b7185c9154d67a65e4230da511" +dependencies = [ + "cfg-if", +] + +[[package]] +name = "crossbeam-channel" +version = "0.5.15" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "82b8f8f868b36967f9606790d1903570de9ceaf870a7bf9fbbd3016d636a2cb2" +dependencies = [ + "crossbeam-utils", +] + +[[package]] +name = "crossbeam-deque" +version = "0.8.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9dd111b7b7f7d55b72c0a6ae361660ee5853c9af73f70c3c2ef6858b950e2e51" +dependencies = [ + "crossbeam-epoch", + "crossbeam-utils", +] + +[[package]] +name = "crossbeam-epoch" +version = "0.9.18" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5b82ac4a3c2ca9c3460964f020e1402edd5753411d7737aa39c3714ad1b5420e" +dependencies = [ + "crossbeam-utils", +] + +[[package]] +name = "crossbeam-queue" +version = "0.3.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0f58bbc28f91df819d0aa2a2c00cd19754769c2fad90579b3592b1c9ba7a3115" +dependencies = [ + "crossbeam-utils", +] + +[[package]] +name = "crossbeam-utils" +version = "0.8.21" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d0a5c400df2834b80a4c3327b3aad3a4c4cd4de0629063962b03235697506a28" + +[[package]] +name = "crypto-bigint" +version = "0.5.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0dc92fb57ca44df6db8059111ab3af99a63d5d0f8375d9972e319a379c6bab76" +dependencies = [ + "generic-array", + "rand_core 0.6.4", + "subtle", + "zeroize", +] + +[[package]] +name = "crypto-common" +version = "0.1.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "78c8292055d1c1df0cce5d180393dc8cce0abec0a7102adb6c7b1eef6016d60a" +dependencies = [ + "generic-array", + "rand_core 0.6.4", + "typenum", +] + +[[package]] +name = "ctr" +version = "0.9.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0369ee1ad671834580515889b80f2ea915f23b8be8d0daa4bbaf2ac5c7590835" +dependencies = [ + "cipher", +] + +[[package]] +name = "curve25519-dalek" +version = "4.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "97fb8b7c4503de7d6ae7b42ab72a5a59857b4c937ec27a3d4539dba95b5ab2be" +dependencies = [ + "cfg-if", + "cpufeatures", + "curve25519-dalek-derive", + "digest", + "fiat-crypto", + "rustc_version", + "subtle", + "zeroize", +] + +[[package]] +name = "curve25519-dalek-derive" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f46882e17999c6cc590af592290432be3bce0428cb0d5f8b6715e4dc7b383eb3" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.117", +] + +[[package]] +name = "darling" +version = "0.10.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0d706e75d87e35569db781a9b5e2416cff1236a47ed380831f959382ccd5f858" +dependencies = [ + "darling_core 0.10.2", + "darling_macro 0.10.2", +] + +[[package]] +name = "darling" +version = "0.20.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fc7f46116c46ff9ab3eb1597a45688b6715c6e628b5c133e288e709a29bcb4ee" +dependencies = [ + "darling_core 0.20.11", + "darling_macro 0.20.11", +] + +[[package]] +name = "darling_core" +version = "0.10.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f0c960ae2da4de88a91b2d920c2a7233b400bc33cb28453a2987822d8392519b" +dependencies = [ + "fnv", + "ident_case", + "proc-macro2", + "quote", + "strsim 0.9.3", + "syn 1.0.109", +] + +[[package]] +name = "darling_core" +version = "0.20.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0d00b9596d185e565c2207a0b01f8bd1a135483d02d9b7b0a54b11da8d53412e" +dependencies = [ + "fnv", + "ident_case", + "proc-macro2", + "quote", + "strsim 0.11.1", + "syn 2.0.117", +] + +[[package]] +name = "darling_macro" +version = "0.10.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d9b5a2f4ac4969822c62224815d069952656cadc7084fdca9751e6d959189b72" +dependencies = [ + "darling_core 0.10.2", + "quote", + "syn 1.0.109", +] + +[[package]] +name = "darling_macro" +version = "0.20.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fc34b93ccb385b40dc71c6fceac4b2ad23662c7eeb248cf10d529b7e055b6ead" +dependencies = [ + "darling_core 0.20.11", + "quote", + "syn 2.0.117", +] + +[[package]] +name = "dashmap" +version = "6.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5041cc499144891f3790297212f32a74fb938e5136a14943f338ef9e0ae276cf" +dependencies = [ + "cfg-if", + "crossbeam-utils", + "hashbrown 0.14.5", + "lock_api", + "once_cell", + "parking_lot_core 0.9.12", + "serde", +] + +[[package]] +name = "data-encoding" +version = "2.10.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d7a1e2f27636f116493b8b860f5546edb47c8d8f8ea73e1d2a20be88e28d1fea" + +[[package]] +name = "davey" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "20e05bae3e741c249912d39c6cec28d2ef3c1a196e9acac025582182a8f9fbb7" +dependencies = [ + "aead", + "aes", + "cipher", + "ctr", + "ghash", + "hex-literal", + "hmac", + "num-derive", + "num-traits", + "openmls", + "openmls_basic_credential", + "openmls_rust_crypto", + "p256", + "rand 0.8.5", + "scrypt", + "sha2", + "subtle", + "thiserror 2.0.18", + "tracing", +] + +[[package]] +name = "der" +version = "0.7.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e7c1832837b905bbfb5101e07cc24c8deddf52f93225eee6ead5f4d63d53ddcb" +dependencies = [ + "const-oid", + "pem-rfc7468", + "zeroize", +] + +[[package]] +name = "deranged" +version = "0.5.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7cd812cc2bc1d69d4764bd80df88b4317eaef9e773c75226407d9bc0876b211c" +dependencies = [ + "powerfmt", + "serde_core", +] + +[[package]] +name = "derivative" +version = "2.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fcc3dd5e9e9c0b295d6e1e4d811fb6f157d5ffd784b8d202fc62eac8035a770b" +dependencies = [ + "proc-macro2", + "quote", + "syn 1.0.109", +] + +[[package]] +name = "derive_builder" +version = "0.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a2658621297f2cf68762a6f7dc0bb7e1ff2cfd6583daef8ee0fed6f7ec468ec0" +dependencies = [ + "darling 0.10.2", + "derive_builder_core", + "proc-macro2", + "quote", + "syn 1.0.109", +] + +[[package]] +name = "derive_builder_core" +version = "0.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2791ea3e372c8495c0bc2033991d76b512cd799d07491fbd6890124db9458bef" +dependencies = [ + "darling 0.10.2", + "proc-macro2", + "quote", + "syn 1.0.109", +] + +[[package]] +name = "derive_more" +version = "0.99.20" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6edb4b64a43d977b8e99788fe3a04d483834fba1215a7e02caa415b626497f7f" +dependencies = [ + "convert_case", + "proc-macro2", + "quote", + "rustc_version", + "syn 2.0.117", +] + +[[package]] +name = "digest" +version = "0.10.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9ed9a281f7bc9b7576e61468ba615a66a5c8cfdff42420a70aa82701a3b1e292" +dependencies = [ + "block-buffer", + "const-oid", + "crypto-common", + "subtle", +] + +[[package]] +name = "discortp" +version = "0.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5c793408a15d361754613fa68123ffa60424c2617fafdf82127b4bedf37d3f5d" +dependencies = [ + "pnet_macros", + "pnet_macros_support", +] + +[[package]] +name = "displaydoc" +version = "0.2.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "97369cbbc041bc366949bc74d34658d6cda5621039731c6310521892a3a20ae0" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.117", +] + +[[package]] +name = "dotenvy" +version = "0.15.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1aaf95b3e5c8f23aa320147307562d361db0ae0d51242340f558153b4eb2439b" + +[[package]] +name = "dunce" +version = "1.0.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "92773504d58c093f6de2459af4af33faa518c13451eb8f2b5698ed3d36e7c813" + +[[package]] +name = "ecdsa" +version = "0.16.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ee27f32b5c5292967d2d4a9d7f1e0b0aed2c15daded5a60300e4abb9d8020bca" +dependencies = [ + "der", + "digest", + "elliptic-curve", + "rfc6979", + "signature", + "spki", +] + +[[package]] +name = "ed25519" +version = "2.2.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "115531babc129696a58c64a4fef0a8bf9e9698629fb97e9e40767d235cfbcd53" +dependencies = [ + "pkcs8", + "signature", +] + +[[package]] +name = "ed25519-dalek" +version = "2.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "70e796c081cee67dc755e1a36a0a172b897fab85fc3f6bc48307991f64e4eca9" +dependencies = [ + "curve25519-dalek", + "ed25519", + "rand_core 0.6.4", + "serde", + "sha2", + "subtle", + "zeroize", +] + +[[package]] +name = "either" +version = "1.15.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "48c757948c5ede0e46177b7add2e67155f70e33c07fea8284df6576da70b3719" + +[[package]] +name = "elliptic-curve" +version = "0.13.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b5e6043086bf7973472e0c7dff2142ea0b680d30e18d9cc40f267efbf222bd47" +dependencies = [ + "base16ct", + "crypto-bigint", + "digest", + "ff", + "generic-array", + "group", + "hkdf", + "pem-rfc7468", + "pkcs8", + "rand_core 0.6.4", + "sec1", + "subtle", + "zeroize", +] + +[[package]] +name = "encoding_rs" +version = "0.8.35" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "75030f3c4f45dafd7586dd6780965a8c7e8e285a5ecb86713e63a79c5b2766f3" +dependencies = [ + "cfg-if", +] + +[[package]] +name = "envy" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3f47e0157f2cb54f5ae1bd371b30a2ae4311e1c028f575cd4e81de7353215965" +dependencies = [ + "serde", +] + +[[package]] +name = "equivalent" +version = "1.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "877a4ace8713b0bcf2a4e7eec82529c029f1d0619886d18145fea96c3ffe5c0f" + +[[package]] +name = "errno" +version = "0.3.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "39cab71617ae0d63f51a36d69f866391735b51691dbda63cf6f96d042b63efeb" +dependencies = [ + "libc", + "windows-sys 0.61.2", +] + +[[package]] +name = "extract_map" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a8855baff5d450715f5d34c1d291a8c77363bd5a20ddacf560d7d6ea2a07f2c3" +dependencies = [ + "hashbrown 0.15.5", + "serde", +] + +[[package]] +name = "fastrand" +version = "2.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "37909eebbb50d72f9059c3b6d82c0463f2ff062c9e95845c43a6c9c0355411be" + +[[package]] +name = "fdeflate" +version = "0.3.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1e6853b52649d4ac5c0bd02320cddc5ba956bdb407c4b75a2c6b75bf51500f8c" +dependencies = [ + "simd-adler32", +] + +[[package]] +name = "ff" +version = "0.13.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c0b50bfb653653f9ca9095b427bed08ab8d75a137839d9ad64eb11810d5b6393" +dependencies = [ + "rand_core 0.6.4", + "subtle", +] + +[[package]] +name = "fiat-crypto" +version = "0.2.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "28dea519a9695b9977216879a3ebfddf92f1c08c05d984f8996aecd6ecdc811d" + +[[package]] +name = "find-msvc-tools" +version = "0.1.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5baebc0774151f905a1a2cc41989300b1e6fbb29aff0ceffa1064fdd3088d582" + +[[package]] +name = "flate2" +version = "1.1.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "843fba2746e448b37e26a819579957415c8cef339bf08564fe8b7ddbd959573c" +dependencies = [ + "crc32fast", + "miniz_oxide", + "zlib-rs", +] + +[[package]] +name = "flume" +version = "0.11.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "da0e4dd2a88388a1f4ccc7c9ce104604dab68d9f408dc34cd45823d5a9069095" +dependencies = [ + "futures-core", + "futures-sink", + "nanorand", + "spin", +] + +[[package]] +name = "fluvio-wasm-timer" +version = "0.2.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b768c170dc045fa587a8f948c91f9bcfb87f774930477c6215addf54317f137f" +dependencies = [ + "futures", + "js-sys", + "parking_lot 0.11.2", + "pin-utils", + "wasm-bindgen", + "wasm-bindgen-futures", + "web-sys", +] + +[[package]] +name = "fnv" +version = "1.0.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3f9eec918d3f24069decb9af1554cad7c880e2da24a9afd88aca000531ab82c1" + +[[package]] +name = "foldhash" +version = "0.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d9c4f5dac5e15c24eb999c26181a6ca40b39fe946cbe4c263c7209467bc83af2" + +[[package]] +name = "foldhash" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "77ce24cb58228fbb8aa041425bb1050850ac19177686ea6e0f41a70416f56fdb" + +[[package]] +name = "form_urlencoded" +version = "1.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cb4cb245038516f5f85277875cdaa4f7d2c9a0fa0468de06ed190163b1581fcf" +dependencies = [ + "percent-encoding", +] + +[[package]] +name = "from_map" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "99f31122ab0445ff8cee420b805f24e07683073815de1dd276ee7d588d301700" +dependencies = [ + "hashmap_derive", +] + +[[package]] +name = "fs_extra" +version = "1.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "42703706b716c37f96a77aea830392ad231f44c9e9a67872fa5548707e11b11c" + +[[package]] +name = "futures" +version = "0.3.32" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8b147ee9d1f6d097cef9ce628cd2ee62288d963e16fb287bd9286455b241382d" +dependencies = [ + "futures-channel", + "futures-core", + "futures-executor", + "futures-io", + "futures-sink", + "futures-task", + "futures-util", +] + +[[package]] +name = "futures-channel" +version = "0.3.32" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "07bbe89c50d7a535e539b8c17bc0b49bdb77747034daa8087407d655f3f7cc1d" +dependencies = [ + "futures-core", + "futures-sink", +] + +[[package]] +name = "futures-core" +version = "0.3.32" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7e3450815272ef58cec6d564423f6e755e25379b217b0bc688e295ba24df6b1d" + +[[package]] +name = "futures-executor" +version = "0.3.32" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "baf29c38818342a3b26b5b923639e7b1f4a61fc5e76102d4b1981c6dc7a7579d" +dependencies = [ + "futures-core", + "futures-task", + "futures-util", +] + +[[package]] +name = "futures-io" +version = "0.3.32" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cecba35d7ad927e23624b22ad55235f2239cfa44fd10428eecbeba6d6a717718" + +[[package]] +name = "futures-macro" +version = "0.3.32" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e835b70203e41293343137df5c0664546da5745f82ec9b84d40be8336958447b" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.117", +] + +[[package]] +name = "futures-sink" +version = "0.3.32" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c39754e157331b013978ec91992bde1ac089843443c49cbc7f46150b0fad0893" + +[[package]] +name = "futures-task" +version = "0.3.32" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "037711b3d59c33004d3856fbdc83b99d4ff37a24768fa1be9ce3538a1cde4393" + +[[package]] +name = "futures-util" +version = "0.3.32" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "389ca41296e6190b48053de0321d02a77f32f8a5d2461dd38762c0593805c6d6" +dependencies = [ + "futures-channel", + "futures-core", + "futures-io", + "futures-macro", + "futures-sink", + "futures-task", + "memchr", + "pin-project-lite", + "slab", +] + +[[package]] +name = "generator" +version = "0.7.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5cc16584ff22b460a382b7feec54b23d2908d858152e5739a120b949293bd74e" +dependencies = [ + "cc", + "libc", + "log", + "rustversion", + "windows", +] + +[[package]] +name = "generic-array" +version = "0.14.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "85649ca51fd72272d7821adaf274ad91c288277713d9c18820d8499a7ff69e9a" +dependencies = [ + "typenum", + "version_check", + "zeroize", +] + +[[package]] +name = "getrandom" +version = "0.2.17" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ff2abc00be7fca6ebc474524697ae276ad847ad0a6b3faa4bcb027e9a4614ad0" +dependencies = [ + "cfg-if", + "js-sys", + "libc", + "wasi", + "wasm-bindgen", +] + +[[package]] +name = "getrandom" +version = "0.3.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "899def5c37c4fd7b2664648c28120ecec138e4d395b459e5ca34f9cce2dd77fd" +dependencies = [ + "cfg-if", + "js-sys", + "libc", + "r-efi 5.3.0", + "wasip2", + "wasm-bindgen", +] + +[[package]] +name = "getrandom" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0de51e6874e94e7bf76d726fc5d13ba782deca734ff60d5bb2fb2607c7406555" +dependencies = [ + "cfg-if", + "libc", + "r-efi 6.0.0", + "wasip2", + "wasip3", +] + +[[package]] +name = "ghash" +version = "0.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f0d8a4362ccb29cb0b265253fb0a2728f592895ee6854fd9bc13f2ffda266ff1" +dependencies = [ + "opaque-debug", + "polyval", +] + +[[package]] +name = "glob" +version = "0.3.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0cc23270f6e1808e30a928bdc84dea0b9b4136a8bc82338574f23baf47bbd280" + +[[package]] +name = "group" +version = "0.13.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f0f9ef7462f7c099f518d754361858f86d8a07af53ba9af0fe635bbccb151a63" +dependencies = [ + "ff", + "rand_core 0.6.4", + "subtle", +] + +[[package]] +name = "hashbrown" +version = "0.14.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e5274423e17b7c9fc20b6e7e208532f9b19825d82dfd615708b70edd83df41f1" + +[[package]] +name = "hashbrown" +version = "0.15.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9229cfe53dfd69f0609a49f65461bd93001ea1ef889cd5529dd176593f5338a1" +dependencies = [ + "foldhash 0.1.5", +] + +[[package]] +name = "hashbrown" +version = "0.16.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "841d1cc9bed7f9236f321df977030373f4a4163ae1a7dbfe1a51a2c1a51d9100" + +[[package]] +name = "hashmap_derive" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fb30bf173e72cc31b5265dac095423ca14e7789ff7c3b0e6096a37a996f12883" +dependencies = [ + "proc-macro2", + "quote", + "syn 1.0.109", +] + +[[package]] +name = "hax-lib" +version = "0.3.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "543f93241d32b3f00569201bfce9d7a93c92c6421b23c77864ac929dc947b9fc" +dependencies = [ + "hax-lib-macros", + "num-bigint", + "num-traits", +] + +[[package]] +name = "hax-lib-macros" +version = "0.3.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f8755751e760b11021765bb04cb4a6c4e24742688d9f3aa14c2079638f537b0f" +dependencies = [ + "hax-lib-macros-types", + "proc-macro-error2", + "proc-macro2", + "quote", + "syn 2.0.117", +] + +[[package]] +name = "hax-lib-macros-types" +version = "0.3.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f177c9ae8ea456e2f71ff3c1ea47bf4464f772a05133fcbba56cd5ba169035a2" +dependencies = [ + "proc-macro2", + "quote", + "serde", + "serde_json", + "uuid", +] + +[[package]] +name = "heck" +version = "0.3.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6d621efb26863f0e9924c6ac577e8275e5e6b77455db64ffa6c65c904e9e132c" +dependencies = [ + "unicode-segmentation", +] + +[[package]] +name = "heck" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2304e00983f87ffb38b55b444b5e3b60a884b5d30c0fca7d82fe33449bbe55ea" + +[[package]] +name = "hermit-abi" +version = "0.5.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fc0fef456e4baa96da950455cd02c081ca953b141298e41db3fc7e36b1da849c" + +[[package]] +name = "hex" +version = "0.4.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7f24254aa9a54b5c858eaee2f5bccdb46aaf0e486a595ed5fd8f86ba55232a70" + +[[package]] +name = "hex-literal" +version = "0.3.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7ebdb29d2ea9ed0083cd8cece49bbd968021bd99b0849edb4a9a7ee0fdf6a4e0" + +[[package]] +name = "hkdf" +version = "0.12.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7b5f8eb2ad728638ea2c7d47a21db23b7b58a72ed6a38256b8a1849f15fbbdf7" +dependencies = [ + "hmac", +] + +[[package]] +name = "hls_m3u8" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b906521a5b0e6d2ec07ea0bb855d92a1db30b48812744a645a3b2a1405cb8159" +dependencies = [ + "derive_builder", + "derive_more", + "hex", + "shorthand", + "stable-vec", + "strum 0.17.1", + "thiserror 1.0.69", +] + +[[package]] +name = "hmac" +version = "0.12.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6c49c37c09c17a53d937dfbb742eb3a961d65a994e6bcdcf37e7399d0cc8ab5e" +dependencies = [ + "digest", +] + +[[package]] +name = "hpke-rs" +version = "0.3.0-alpha.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b245758dea58531acbdd0e9a20d73a93561a78f78531a2bed0ef9b5a39cc0ff2" +dependencies = [ + "hpke-rs-crypto", + "hpke-rs-libcrux", + "hpke-rs-rust-crypto", + "libcrux-sha3", + "log", + "rand_core 0.9.5", + "serde", + "tls_codec", + "zeroize", +] + +[[package]] +name = "hpke-rs-crypto" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d51ffd304e06803f90f2e56a24a6910f19b8516f842d7b72a436c51026279876" +dependencies = [ + "rand_core 0.9.5", +] + +[[package]] +name = "hpke-rs-libcrux" +version = "0.3.0-alpha.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "96fa708a147e2068a04ec209f5d94f2446f89a754e2556a4c14b88101aa26ff8" +dependencies = [ + "hpke-rs-crypto", + "libcrux-chacha20poly1305", + "libcrux-ecdh", + "libcrux-hkdf", + "libcrux-kem", + "rand 0.9.2", + "rand_chacha 0.9.0", + "rand_core 0.9.5", +] + +[[package]] +name = "hpke-rs-rust-crypto" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ff7dc0df494528a0b90005bb511c117453c6a89cd8819f6cf311d0f4446dcf45" +dependencies = [ + "aes-gcm", + "chacha20poly1305", + "hkdf", + "hpke-rs-crypto", + "k256", + "p256", + "p384", + "rand 0.8.5", + "rand_chacha 0.3.1", + "rand_core 0.6.4", + "sha2", + "x25519-dalek", +] + +[[package]] +name = "http" +version = "1.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e3ba2a386d7f85a81f119ad7498ebe444d2e22c2af0b86b069416ace48b3311a" +dependencies = [ + "bytes", + "itoa", +] + +[[package]] +name = "http-body" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1efedce1fb8e6913f23e0c92de8e62cd5b772a67e7b3946df930a62566c93184" +dependencies = [ + "bytes", + "http", +] + +[[package]] +name = "http-body-util" +version = "0.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b021d93e26becf5dc7e1b75b1bed1fd93124b374ceb73f43d4d4eafec896a64a" +dependencies = [ + "bytes", + "futures-core", + "http", + "http-body", + "pin-project-lite", +] + +[[package]] +name = "httparse" +version = "1.10.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6dbf3de79e51f3d586ab4cb9d5c3e2c14aa28ed23d180cf89b4df0454a69cc87" + +[[package]] +name = "hyper" +version = "1.8.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2ab2d4f250c3d7b1c9fcdff1cece94ea4e2dfbec68614f7b87cb205f24ca9d11" +dependencies = [ + "atomic-waker", + "bytes", + "futures-channel", + "futures-core", + "http", + "http-body", + "httparse", + "itoa", + "pin-project-lite", + "pin-utils", + "smallvec", + "tokio", + "want", +] + +[[package]] +name = "hyper-rustls" +version = "0.27.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e3c93eb611681b207e1fe55d5a71ecf91572ec8a6705cdb6857f7d8d5242cf58" +dependencies = [ + "http", + "hyper", + "hyper-util", + "rustls", + "rustls-pki-types", + "tokio", + "tokio-rustls", + "tower-service", + "webpki-roots 1.0.6", +] + +[[package]] +name = "hyper-util" +version = "0.1.20" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "96547c2556ec9d12fb1578c4eaf448b04993e7fb79cbaad930a656880a6bdfa0" +dependencies = [ + "base64", + "bytes", + "futures-channel", + "futures-util", + "http", + "http-body", + "hyper", + "ipnet", + "libc", + "percent-encoding", + "pin-project-lite", + "socket2 0.6.3", + "tokio", + "tower-service", + "tracing", +] + +[[package]] +name = "iana-time-zone" +version = "0.1.65" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e31bc9ad994ba00e440a8aa5c9ef0ec67d5cb5e5cb0cc7f8b744a35b389cc470" +dependencies = [ + "android_system_properties", + "core-foundation-sys", + "iana-time-zone-haiku", + "js-sys", + "log", + "wasm-bindgen", + "windows-core", +] + +[[package]] +name = "iana-time-zone-haiku" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f31827a206f56af32e590ba56d5d2d085f558508192593743f16b2306495269f" +dependencies = [ + "cc", +] + +[[package]] +name = "icu_collections" +version = "2.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4c6b649701667bbe825c3b7e6388cb521c23d88644678e83c0c4d0a621a34b43" +dependencies = [ + "displaydoc", + "potential_utf", + "yoke", + "zerofrom", + "zerovec", +] + +[[package]] +name = "icu_locale_core" +version = "2.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "edba7861004dd3714265b4db54a3c390e880ab658fec5f7db895fae2046b5bb6" +dependencies = [ + "displaydoc", + "litemap", + "tinystr", + "writeable", + "zerovec", +] + +[[package]] +name = "icu_normalizer" +version = "2.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5f6c8828b67bf8908d82127b2054ea1b4427ff0230ee9141c54251934ab1b599" +dependencies = [ + "icu_collections", + "icu_normalizer_data", + "icu_properties", + "icu_provider", + "smallvec", + "zerovec", +] + +[[package]] +name = "icu_normalizer_data" +version = "2.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7aedcccd01fc5fe81e6b489c15b247b8b0690feb23304303a9e560f37efc560a" + +[[package]] +name = "icu_properties" +version = "2.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "020bfc02fe870ec3a66d93e677ccca0562506e5872c650f893269e08615d74ec" +dependencies = [ + "icu_collections", + "icu_locale_core", + "icu_properties_data", + "icu_provider", + "zerotrie", + "zerovec", +] + +[[package]] +name = "icu_properties_data" +version = "2.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "616c294cf8d725c6afcd8f55abc17c56464ef6211f9ed59cccffe534129c77af" + +[[package]] +name = "icu_provider" +version = "2.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "85962cf0ce02e1e0a629cc34e7ca3e373ce20dda4c4d7294bbd0bf1fdb59e614" +dependencies = [ + "displaydoc", + "icu_locale_core", + "writeable", + "yoke", + "zerofrom", + "zerotrie", + "zerovec", +] + +[[package]] +name = "id-arena" +version = "2.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3d3067d79b975e8844ca9eb072e16b31c3c1c36928edf9c6789548c524d0d954" + +[[package]] +name = "ident_case" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b9e0384b61958566e926dc50660321d12159025e767c18e043daf26b70104c39" + +[[package]] +name = "idna" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3b0875f23caa03898994f6ddc501886a45c7d3d62d04d2d90788d47be1b1e4de" +dependencies = [ + "idna_adapter", + "smallvec", + "utf8_iter", +] + +[[package]] +name = "idna_adapter" +version = "1.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3acae9609540aa318d1bc588455225fb2085b9ed0c4f6bd0d9d5bcd86f1a0344" +dependencies = [ + "icu_normalizer", + "icu_properties", +] + +[[package]] +name = "image" +version = "0.25.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "85ab80394333c02fe689eaf900ab500fbd0c2213da414687ebf995a65d5a6104" +dependencies = [ + "bytemuck", + "byteorder-lite", + "moxcms", + "num-traits", + "png", + "zune-core", + "zune-jpeg", +] + +[[package]] +name = "indexmap" +version = "2.13.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7714e70437a7dc3ac8eb7e6f8df75fd8eb422675fc7678aff7364301092b1017" +dependencies = [ + "equivalent", + "hashbrown 0.16.1", + "serde", + "serde_core", +] + +[[package]] +name = "inout" +version = "0.1.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "879f10e63c20629ecabbb64a8010319738c66a5cd0c29b02d63d272b03751d01" +dependencies = [ + "generic-array", +] + +[[package]] +name = "instant" +version = "0.1.13" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e0242819d153cba4b4b05a5a8f2a7e9bbf97b6055b2a002b395c96b5ff3c0222" +dependencies = [ + "cfg-if", + "js-sys", + "wasm-bindgen", + "web-sys", +] + +[[package]] +name = "ipnet" +version = "2.12.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d98f6fed1fde3f8c21bc40a1abb88dd75e67924f9cffc3ef95607bad8017f8e2" + +[[package]] +name = "iri-string" +version = "0.7.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c91338f0783edbd6195decb37bae672fd3b165faffb89bf7b9e6942f8b1a731a" +dependencies = [ + "memchr", + "serde", +] + +[[package]] +name = "itertools" +version = "0.13.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "413ee7dfc52ee1a4949ceeb7dbc8a33f2d6c088194d9f922fb8318faf1f01186" +dependencies = [ + "either", +] + +[[package]] +name = "itoa" +version = "1.0.18" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8f42a60cbdf9a97f5d2305f08a87dc4e09308d1276d28c869c684d7777685682" + +[[package]] +name = "jni" +version = "0.21.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1a87aa2bb7d2af34197c04845522473242e1aa17c12f4935d5856491a7fb8c97" +dependencies = [ + "cesu8", + "cfg-if", + "combine", + "jni-sys", + "log", + "thiserror 1.0.69", + "walkdir", + "windows-sys 0.45.0", +] + +[[package]] +name = "jni-sys" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8eaf4bc02d17cbdd7ff4c7438cafcdf7fb9a4613313ad11b4f8fefe7d3fa0130" + +[[package]] +name = "jobserver" +version = "0.1.34" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9afb3de4395d6b3e67a780b6de64b51c978ecf11cb9a462c66be7d4ca9039d33" +dependencies = [ + "getrandom 0.3.4", + "libc", +] + +[[package]] +name = "js-sys" +version = "0.3.91" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b49715b7073f385ba4bc528e5747d02e66cb39c6146efb66b781f131f0fb399c" +dependencies = [ + "once_cell", + "wasm-bindgen", +] + +[[package]] +name = "k256" +version = "0.13.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f6e3919bbaa2945715f0bb6d3934a173d1e9a59ac23767fbaaef277265a7411b" +dependencies = [ + "cfg-if", + "elliptic-curve", +] + +[[package]] +name = "lazy_static" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bbd2bcb4c963f2ddae06a2efc7e9f3591312473c50c6685e1f298068316e66fe" + +[[package]] +name = "leb128fmt" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "09edd9e8b54e49e587e4f6295a7d29c3ea94d469cb40ab8ca70b288248a81db2" + +[[package]] +name = "libc" +version = "0.2.183" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b5b646652bf6661599e1da8901b3b9522896f01e736bad5f723fe7a3a27f899d" + +[[package]] +name = "libcrux-chacha20poly1305" +version = "0.0.3-alpha.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4e0683aedd9048bead90863fa83f56fc224ea545762fdd108c845d5c15391413" +dependencies = [ + "libcrux-hacl-rs", + "libcrux-macros", + "libcrux-poly1305", +] + +[[package]] +name = "libcrux-curve25519" +version = "0.0.3-alpha.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1a39960483f24efea15b1aa111bb8668dc671f808598793104ccc4fec9f5e28b" +dependencies = [ + "libcrux-hacl-rs", + "libcrux-macros", +] + +[[package]] +name = "libcrux-ecdh" +version = "0.0.3-alpha.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9e5ecef729c99bb2f751133b89186a636cd8b3e8320d094131d21ea8c82348ca" +dependencies = [ + "libcrux-curve25519", + "libcrux-p256", + "rand 0.9.2", +] + +[[package]] +name = "libcrux-hacl-rs" +version = "0.0.3-alpha.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d8a141e79dcefa1a91b68831783114232ed6a69b8c8c853c6e6b1cf2af231c3c" +dependencies = [ + "libcrux-macros", +] + +[[package]] +name = "libcrux-hkdf" +version = "0.0.3-alpha.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2663b258d1a4a023a03e946bb949cf30f862d2da1e68fe9a1d3e6103c1d4a6a5" +dependencies = [ + "libcrux-hacl-rs", + "libcrux-hmac", +] + +[[package]] +name = "libcrux-hmac" +version = "0.0.3-alpha.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "29c8d021153affaad2aba7c6dd4c23e7304e77198080ce9b949c725682912154" +dependencies = [ + "libcrux-hacl-rs", + "libcrux-macros", + "libcrux-sha2", +] + +[[package]] +name = "libcrux-intrinsics" +version = "0.0.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5d3b41dcbc21a5fb7efbbb5af7405b2e79c4bfe443924e90b13afc0080318d31" +dependencies = [ + "core-models", + "hax-lib", +] + +[[package]] +name = "libcrux-kem" +version = "0.0.3-alpha.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fc932402ccd803c064e228ff2a4d2aef5b5a0b03b461518d29046e01ebc2cf98" +dependencies = [ + "libcrux-ecdh", + "libcrux-ml-kem", + "libcrux-sha3", + "libcrux-traits", + "rand 0.9.2", +] + +[[package]] +name = "libcrux-macros" +version = "0.0.3-alpha.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dc8e38ec9c49ba83cb7e72d278c3537552afbc67728f22e567c21725cdd8b3ba" +dependencies = [ + "quote", + "syn 2.0.117", +] + +[[package]] +name = "libcrux-ml-kem" +version = "0.0.3-alpha.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6206bb81fc3e51bd94d4b847760039d44a9a8e77bac841df8ed9320f79a6f3be" +dependencies = [ + "hax-lib", + "libcrux-intrinsics", + "libcrux-platform", + "libcrux-secrets", + "libcrux-sha3", + "rand 0.9.2", +] + +[[package]] +name = "libcrux-p256" +version = "0.0.3-alpha.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6fb56de31fa136bdaa838401547c3644f3e11c7929818dfb45d934a2db7ab521" +dependencies = [ + "libcrux-hacl-rs", + "libcrux-macros", + "libcrux-sha2", +] + +[[package]] +name = "libcrux-platform" +version = "0.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "db82d058aa76ea315a3b2092f69dfbd67ddb0e462038a206e1dcd73f058c0778" +dependencies = [ + "libc", +] + +[[package]] +name = "libcrux-poly1305" +version = "0.0.3-alpha.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7a8907194cd2d35dd763519189036c6062f5464ac9b63fb968b10abcb09feef3" +dependencies = [ + "libcrux-hacl-rs", + "libcrux-macros", +] + +[[package]] +name = "libcrux-secrets" +version = "0.0.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "332737e629fe6ba7547f5c0f90559eac865d5dbecf98138ffae8f16ab8cbe33f" +dependencies = [ + "hax-lib", +] + +[[package]] +name = "libcrux-sha2" +version = "0.0.3-alpha.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "df0c0266cc2b0920f3b1540bb1268ea5dae2cfff9aa0e92b316d2c73e618fb64" +dependencies = [ + "libcrux-hacl-rs", + "libcrux-macros", + "libcrux-traits", +] + +[[package]] +name = "libcrux-sha3" +version = "0.0.3-alpha.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "84c076a07a2df2cc8f6603042823e752c0057bce51beb4e0b2cbf0b3dfb7f73d" +dependencies = [ + "hax-lib", + "libcrux-intrinsics", + "libcrux-platform", +] + +[[package]] +name = "libcrux-traits" +version = "0.0.3-alpha.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "477d39395a82293e079313c288f313bcbb62501ae4c31588e471344eea1a77da" +dependencies = [ + "rand 0.9.2", +] + +[[package]] +name = "libloading" +version = "0.8.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d7c4b02199fee7c5d21a5ae7d8cfa79a6ef5bb2fc834d6e9058e89c825efdc55" +dependencies = [ + "cfg-if", + "windows-link", +] + +[[package]] +name = "litemap" +version = "0.8.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6373607a59f0be73a39b6fe456b8192fcc3585f602af20751600e974dd455e77" + +[[package]] +name = "lock_api" +version = "0.4.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "224399e74b87b5f3557511d98dff8b14089b3dadafcab6bb93eab67d3aace965" +dependencies = [ + "scopeguard", +] + +[[package]] +name = "log" +version = "0.4.29" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5e5032e24019045c762d3c0f28f5b6b8bbf38563a65908389bf7978758920897" + +[[package]] +name = "loom" +version = "0.5.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ff50ecb28bb86013e935fb6683ab1f6d3a20016f123c76fd4c27470076ac30f5" +dependencies = [ + "cfg-if", + "generator", + "scoped-tls", + "serde", + "serde_json", + "tracing", + "tracing-subscriber", +] + +[[package]] +name = "lru-slab" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "112b39cec0b298b6c1999fee3e31427f74f676e4cb9879ed1a121b43661a4154" + +[[package]] +name = "matchers" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d1525a2a28c7f4fa0fc98bb91ae755d1e2d1505079e05539e35bc876b5d65ae9" +dependencies = [ + "regex-automata", +] + +[[package]] +name = "md-5" +version = "0.10.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d89e7ee0cfbedfc4da3340218492196241d89eefb6dab27de5df917a6d2e78cf" +dependencies = [ + "cfg-if", + "digest", +] + +[[package]] +name = "memchr" +version = "2.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f8ca58f447f06ed17d5fc4043ce1b10dd205e060fb3ce5b979b8ed8e59ff3f79" + +[[package]] +name = "mime" +version = "0.3.17" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6877bb514081ee2a7ff5ef9de3281f14a4dd4bceac4c09388074a6b5df8a139a" + +[[package]] +name = "mime_guess" +version = "2.0.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f7c44f8e672c00fe5308fa235f821cb4198414e1c77935c1ab6948d3fd78550e" +dependencies = [ + "mime", + "unicase", +] + +[[package]] +name = "minimal-lexical" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "68354c5c6bd36d73ff3feceb05efa59b6acb7626617f4962be322a825e61f79a" + +[[package]] +name = "miniz_oxide" +version = "0.8.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1fa76a2c86f704bdb222d66965fb3d63269ce38518b83cb0575fca855ebb6316" +dependencies = [ + "adler2", + "simd-adler32", +] + +[[package]] +name = "mio" +version = "1.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a69bcab0ad47271a0234d9422b131806bf3968021e5dc9328caf2d4cd58557fc" +dependencies = [ + "libc", + "wasi", + "windows-sys 0.61.2", +] + +[[package]] +name = "moka" +version = "0.12.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "85f8024e1c8e71c778968af91d43700ce1d11b219d127d79fb2934153b82b42b" +dependencies = [ + "crossbeam-channel", + "crossbeam-epoch", + "crossbeam-utils", + "equivalent", + "parking_lot 0.12.5", + "portable-atomic", + "smallvec", + "tagptr", + "uuid", +] + +[[package]] +name = "moxcms" +version = "0.8.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bb85c154ba489f01b25c0d36ae69a87e4a1c73a72631fc6c0eb6dde34a73e44b" +dependencies = [ + "num-traits", + "pxfm", +] + +[[package]] +name = "nanorand" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6a51313c5820b0b02bd422f4b44776fbf47961755c74ce64afc73bfad10226c3" +dependencies = [ + "getrandom 0.2.17", +] + +[[package]] +name = "no-std-net" +version = "0.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "43794a0ace135be66a25d3ae77d41b91615fb68ae937f904090203e81f755b65" + +[[package]] +name = "nohash-hasher" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2bf50223579dc7cdcfb3bfcacf7069ff68243f8c363f62ffa99cf000a6b9c451" + +[[package]] +name = "nom" +version = "7.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d273983c5a657a70a3e8f2a01329822f3b8c8172b73826411a55751e404a0a4a" +dependencies = [ + "memchr", + "minimal-lexical", +] + +[[package]] +name = "nonmax" +version = "0.5.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "610a5acd306ec67f907abe5567859a3c693fb9886eb1f012ab8f2a47bef3db51" +dependencies = [ + "serde", +] + +[[package]] +name = "nu-ansi-term" +version = "0.50.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7957b9740744892f114936ab4a57b3f487491bbeafaf8083688b16841a4240e5" +dependencies = [ + "windows-sys 0.61.2", +] + +[[package]] +name = "num-bigint" +version = "0.4.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a5e44f723f1133c9deac646763579fdb3ac745e418f2a7af9cd0c431da1f20b9" +dependencies = [ + "num-integer", + "num-traits", +] + +[[package]] +name = "num-complex" +version = "0.4.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "73f88a1307638156682bada9d7604135552957b7818057dcef22705b4d509495" +dependencies = [ + "num-traits", +] + +[[package]] +name = "num-conv" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cf97ec579c3c42f953ef76dbf8d55ac91fb219dde70e49aa4a6b7d74e9919050" + +[[package]] +name = "num-derive" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ed3955f1a9c7c0c15e092f9c887db08b1fc683305fdf6eb6684f22555355e202" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.117", +] + +[[package]] +name = "num-integer" +version = "0.1.46" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7969661fd2958a5cb096e56c8e1ad0444ac2bbcd0061bd28660485a44879858f" +dependencies = [ + "num-traits", +] + +[[package]] +name = "num-traits" +version = "0.2.19" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "071dfc062690e90b734c0b2273ce72ad0ffa95f0c74596bc250dcfd960262841" +dependencies = [ + "autocfg", +] + +[[package]] +name = "num_cpus" +version = "1.17.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "91df4bbde75afed763b708b7eee1e8e7651e02d97f6d5dd763e89367e957b23b" +dependencies = [ + "hermit-abi", + "libc", +] + +[[package]] +name = "once_cell" +version = "1.21.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9f7c3e4beb33f85d45ae3e3a1792185706c8e16d043238c593331cc7cd313b50" + +[[package]] +name = "opaque-debug" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c08d65885ee38876c4f86fa503fb49d7b507c2b62552df7c70b2fce627e06381" + +[[package]] +name = "openmls" +version = "0.7.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "692e9c6b7d725e53cc42d1fa870066770fd67df35cf1cfbbd047dc0355e06b67" +dependencies = [ + "fluvio-wasm-timer", + "getrandom 0.2.17", + "log", + "openmls_traits", + "rayon", + "serde", + "serde_bytes", + "thiserror 2.0.18", + "tls_codec", + "zeroize", +] + +[[package]] +name = "openmls_basic_credential" +version = "0.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e3e6454b2b1b6749fc2f142d7f74eb387f7793be88187ed372e9f5f4cf10c34c" +dependencies = [ + "ed25519-dalek", + "openmls_traits", + "p256", + "rand 0.8.5", + "serde", + "tls_codec", +] + +[[package]] +name = "openmls_memory_storage" +version = "0.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9e7b071ea5573a97efaa72b7c53e81cebc644b62ef0fe992bad685cc0f7dd4ea" +dependencies = [ + "log", + "openmls_traits", + "serde", + "serde_json", + "thiserror 2.0.18", +] + +[[package]] +name = "openmls_rust_crypto" +version = "0.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3faef09e17a15c8065b9ec6b1e150c19dcb0c4cb810a636b6f010a94a189678e" +dependencies = [ + "aes-gcm", + "chacha20poly1305", + "ed25519-dalek", + "hkdf", + "hmac", + "hpke-rs", + "hpke-rs-crypto", + "hpke-rs-rust-crypto", + "openmls_memory_storage", + "openmls_traits", + "p256", + "rand 0.8.5", + "rand_chacha 0.3.1", + "serde", + "sha2", + "thiserror 2.0.18", + "tls_codec", +] + +[[package]] +name = "openmls_traits" +version = "0.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e21d8877bacdbc407060df29bf59b145bb886a8fa0099b87ae8067a34b902a13" +dependencies = [ + "serde", + "tls_codec", +] + +[[package]] +name = "openssl-probe" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7c87def4c32ab89d880effc9e097653c8da5d6ef28e6b539d313baaacfbafcbe" + +[[package]] +name = "ordered-float" +version = "2.10.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "68f19d67e5a2795c94e73e0bb1cc1a7edeb2e28efd39e2e1c9b7a40c1108b11c" +dependencies = [ + "num-traits", +] + +[[package]] +name = "p256" +version = "0.13.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c9863ad85fa8f4460f9c48cb909d38a0d689dba1f6f6988a5e3e0d31071bcd4b" +dependencies = [ + "ecdsa", + "elliptic-curve", + "primeorder", + "sha2", +] + +[[package]] +name = "p384" +version = "0.13.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fe42f1670a52a47d448f14b6a5c61dd78fce51856e68edaa38f7ae3a46b8d6b6" +dependencies = [ + "elliptic-curve", + "primeorder", +] + +[[package]] +name = "parking_lot" +version = "0.11.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7d17b78036a60663b797adeaee46f5c9dfebb86948d1255007a1d6be0271ff99" +dependencies = [ + "instant", + "lock_api", + "parking_lot_core 0.8.6", +] + +[[package]] +name = "parking_lot" +version = "0.12.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "93857453250e3077bd71ff98b6a65ea6621a19bb0f559a85248955ac12c45a1a" +dependencies = [ + "lock_api", + "parking_lot_core 0.9.12", +] + +[[package]] +name = "parking_lot_core" +version = "0.8.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "60a2cfe6f0ad2bfc16aefa463b497d5c7a5ecd44a23efa72aa342d90177356dc" +dependencies = [ + "cfg-if", + "instant", + "libc", + "redox_syscall 0.2.16", + "smallvec", + "winapi", +] + +[[package]] +name = "parking_lot_core" +version = "0.9.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2621685985a2ebf1c516881c026032ac7deafcda1a2c9b7850dc81e3dfcb64c1" +dependencies = [ + "cfg-if", + "libc", + "redox_syscall 0.5.18", + "smallvec", + "windows-link", +] + +[[package]] +name = "pastey" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "35fb2e5f958ec131621fdd531e9fc186ed768cbe395337403ae56c17a74c68ec" + +[[package]] +name = "patricia_tree" +version = "0.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "31f2f4539bffe53fc4b4da301df49d114b845b077bd5727b7fe2bd9d8df2ae68" +dependencies = [ + "bitflags 2.11.0", +] + +[[package]] +name = "pbkdf2" +version = "0.12.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f8ed6a7761f76e3b9f92dfb0a60a6a6477c61024b775147ff0973a02653abaf2" +dependencies = [ + "digest", + "hmac", +] + +[[package]] +name = "pem-rfc7468" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "88b39c9bfcfc231068454382784bb460aae594343fb030d46e9f50a645418412" +dependencies = [ + "base64ct", +] + +[[package]] +name = "percent-encoding" +version = "2.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9b4f627cb1b25917193a259e49bdad08f671f8d9708acfd5fe0a8c1455d87220" + +[[package]] +name = "pin-project" +version = "1.1.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f1749c7ed4bcaf4c3d0a3efc28538844fb29bcdd7d2b67b2be7e20ba861ff517" +dependencies = [ + "pin-project-internal", +] + +[[package]] +name = "pin-project-internal" +version = "1.1.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d9b20ed30f105399776b9c883e68e536ef602a16ae6f596d2c473591d6ad64c6" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.117", +] + +[[package]] +name = "pin-project-lite" +version = "0.2.17" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a89322df9ebe1c1578d689c92318e070967d1042b512afbe49518723f4e6d5cd" + +[[package]] +name = "pin-utils" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8b870d8c151b6f2fb93e84a13146138f05d02ed11c7e7c54f8826aaaf7c9f184" + +[[package]] +name = "pjsua" +version = "0.1.0" +dependencies = [ + "bindgen 0.71.1", + "cmake", + "pkg-config", +] + +[[package]] +name = "pkcs8" +version = "0.10.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f950b2377845cebe5cf8b5165cb3cc1a5e0fa5cfa3e1f7f55707d8fd82e0a7b7" +dependencies = [ + "der", + "spki", +] + +[[package]] +name = "pkg-config" +version = "0.3.32" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7edddbd0b52d732b21ad9a5fab5c704c14cd949e5e9a1ec5929a24fded1b904c" + +[[package]] +name = "pnet_base" +version = "0.34.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fe4cf6fb3ab38b68d01ab2aea03ed3d1132b4868fa4e06285f29f16da01c5f4c" +dependencies = [ + "no-std-net", +] + +[[package]] +name = "pnet_macros" +version = "0.34.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "688b17499eee04a0408aca0aa5cba5fc86401d7216de8a63fdf7a4c227871804" +dependencies = [ + "proc-macro2", + "quote", + "regex", + "syn 2.0.117", +] + +[[package]] +name = "pnet_macros_support" +version = "0.34.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "eea925b72f4bd37f8eab0f221bbe4c78b63498350c983ffa9dd4bcde7e030f56" +dependencies = [ + "pnet_base", +] + +[[package]] +name = "png" +version = "0.18.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "60769b8b31b2a9f263dae2776c37b1b28ae246943cf719eb6946a1db05128a61" +dependencies = [ + "bitflags 2.11.0", + "crc32fast", + "fdeflate", + "flate2", + "miniz_oxide", +] + +[[package]] +name = "poise" +version = "0.6.1" +source = "git+https://github.com/serenity-rs/poise?branch=serenity-next#8807088ee077f81c2b88d54d065b82b4c7dce28d" +dependencies = [ + "async-trait", + "derivative", + "futures-util", + "indexmap", + "parking_lot 0.12.5", + "poise_macros", + "regex", + "serenity", + "tokio", + "tracing", + "trim-in-place", +] + +[[package]] +name = "poise_macros" +version = "0.6.1" +source = "git+https://github.com/serenity-rs/poise?branch=serenity-next#8807088ee077f81c2b88d54d065b82b4c7dce28d" +dependencies = [ + "darling 0.20.11", + "proc-macro2", + "quote", + "syn 2.0.117", +] + +[[package]] +name = "poly1305" +version = "0.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8159bd90725d2df49889a078b54f4f79e87f1f8a8444194cdca81d38f5393abf" +dependencies = [ + "cpufeatures", + "opaque-debug", + "universal-hash", +] + +[[package]] +name = "polyval" +version = "0.6.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9d1fe60d06143b2430aa532c94cfe9e29783047f06c0d7fd359a9a51b729fa25" +dependencies = [ + "cfg-if", + "cpufeatures", + "opaque-debug", + "universal-hash", +] + +[[package]] +name = "portable-atomic" +version = "1.13.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c33a9471896f1c69cecef8d20cbe2f7accd12527ce60845ff44c153bb2a21b49" + +[[package]] +name = "portable-atomic-util" +version = "0.2.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "091397be61a01d4be58e7841595bd4bfedb15f1cd54977d79b8271e94ed799a3" +dependencies = [ + "portable-atomic", +] + +[[package]] +name = "potential_utf" +version = "0.1.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b73949432f5e2a09657003c25bca5e19a0e9c84f8058ca374f49e0ebe605af77" +dependencies = [ + "zerovec", +] + +[[package]] +name = "powerfmt" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "439ee305def115ba05938db6eb1644ff94165c5ab5e9420d1c1bcedbba909391" + +[[package]] +name = "ppv-lite86" +version = "0.2.21" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "85eae3c4ed2f50dcfe72643da4befc30deadb458a9b590d720cde2f2b1e97da9" +dependencies = [ + "zerocopy", +] + +[[package]] +name = "prettyplease" +version = "0.2.37" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "479ca8adacdd7ce8f1fb39ce9ecccbfe93a3f1344b3d0d97f20bc0196208f62b" +dependencies = [ + "proc-macro2", + "syn 2.0.117", +] + +[[package]] +name = "primal-check" +version = "0.3.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dc0d895b311e3af9902528fbb8f928688abbd95872819320517cc24ca6b2bd08" +dependencies = [ + "num-integer", +] + +[[package]] +name = "primeorder" +version = "0.13.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "353e1ca18966c16d9deb1c69278edbc5f194139612772bd9537af60ac231e1e6" +dependencies = [ + "elliptic-curve", +] + +[[package]] +name = "proc-macro-error-attr2" +version = "2.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "96de42df36bb9bba5542fe9f1a054b8cc87e172759a1868aa05c1f3acc89dfc5" +dependencies = [ + "proc-macro2", + "quote", +] + +[[package]] +name = "proc-macro-error2" +version = "2.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "11ec05c52be0a07b08061f7dd003e7d7092e0472bc731b4af7bb1ef876109802" +dependencies = [ + "proc-macro-error-attr2", + "proc-macro2", + "quote", + "syn 2.0.117", +] + +[[package]] +name = "proc-macro2" +version = "1.0.106" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8fd00f0bb2e90d81d1044c2b32617f68fcb9fa3bb7640c23e9c748e53fb30934" +dependencies = [ + "unicode-ident", +] + +[[package]] +name = "pxfm" +version = "0.1.28" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b5a041e753da8b807c9255f28de81879c78c876392ff2469cde94799b2896b9d" + +[[package]] +name = "quinn" +version = "0.11.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b9e20a958963c291dc322d98411f541009df2ced7b5a4f2bd52337638cfccf20" +dependencies = [ + "bytes", + "cfg_aliases", + "pin-project-lite", + "quinn-proto", + "quinn-udp", + "rustc-hash", + "rustls", + "socket2 0.6.3", + "thiserror 2.0.18", + "tokio", + "tracing", + "web-time", +] + +[[package]] +name = "quinn-proto" +version = "0.11.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "434b42fec591c96ef50e21e886936e66d3cc3f737104fdb9b737c40ffb94c098" +dependencies = [ + "aws-lc-rs", + "bytes", + "getrandom 0.3.4", + "lru-slab", + "rand 0.9.2", + "ring", + "rustc-hash", + "rustls", + "rustls-pki-types", + "slab", + "thiserror 2.0.18", + "tinyvec", + "tracing", + "web-time", +] + +[[package]] +name = "quinn-udp" +version = "0.5.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "addec6a0dcad8a8d96a771f815f0eaf55f9d1805756410b39f5fa81332574cbd" +dependencies = [ + "cfg_aliases", + "libc", + "once_cell", + "socket2 0.6.3", + "tracing", + "windows-sys 0.60.2", +] + +[[package]] +name = "quote" +version = "1.0.45" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "41f2619966050689382d2b44f664f4bc593e129785a36d6ee376ddf37259b924" +dependencies = [ + "proc-macro2", +] + +[[package]] +name = "r-efi" +version = "5.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "69cdb34c158ceb288df11e18b4bd39de994f6657d83847bdffdbd7f346754b0f" + +[[package]] +name = "r-efi" +version = "6.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f8dcc9c7d52a811697d2151c701e0d08956f92b0e24136cf4cf27b57a6a0d9bf" + +[[package]] +name = "rand" +version = "0.8.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "34af8d1a0e25924bc5b7c43c079c942339d8f0a8b57c39049bef581b46327404" +dependencies = [ + "libc", + "rand_chacha 0.3.1", + "rand_core 0.6.4", +] + +[[package]] +name = "rand" +version = "0.9.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6db2770f06117d490610c7488547d543617b21bfa07796d7a12f6f1bd53850d1" +dependencies = [ + "rand_chacha 0.9.0", + "rand_core 0.9.5", +] + +[[package]] +name = "rand_chacha" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e6c10a63a0fa32252be49d21e7709d4d4baf8d231c2dbce1eaa8141b9b127d88" +dependencies = [ + "ppv-lite86", + "rand_core 0.6.4", +] + +[[package]] +name = "rand_chacha" +version = "0.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d3022b5f1df60f26e1ffddd6c66e8aa15de382ae63b3a0c1bfc0e4d3e3f325cb" +dependencies = [ + "ppv-lite86", + "rand_core 0.9.5", +] + +[[package]] +name = "rand_core" +version = "0.6.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ec0be4795e2f6a28069bec0b5ff3e2ac9bafc99e6a9a7dc3547996c5c816922c" +dependencies = [ + "getrandom 0.2.17", +] + +[[package]] +name = "rand_core" +version = "0.9.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "76afc826de14238e6e8c374ddcc1fa19e374fd8dd986b0d2af0d02377261d83c" +dependencies = [ + "getrandom 0.3.4", +] + +[[package]] +name = "rayon" +version = "1.11.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "368f01d005bf8fd9b1206fb6fa653e6c4a81ceb1466406b81792d87c5677a58f" +dependencies = [ + "either", + "rayon-core", +] + +[[package]] +name = "rayon-core" +version = "1.13.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "22e18b0f0062d30d4230b2e85ff77fdfe4326feb054b9783a3460d8435c8ab91" +dependencies = [ + "crossbeam-deque", + "crossbeam-utils", +] + +[[package]] +name = "realfft" +version = "3.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f821338fddb99d089116342c46e9f1fbf3828dba077674613e734e01d6ea8677" +dependencies = [ + "rustfft", +] + +[[package]] +name = "redox_syscall" +version = "0.2.16" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fb5a58c1855b4b6819d59012155603f0b22ad30cad752600aadfcb695265519a" +dependencies = [ + "bitflags 1.3.2", +] + +[[package]] +name = "redox_syscall" +version = "0.5.18" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ed2bf2547551a7053d6fdfafda3f938979645c44812fbfcda098faae3f1a362d" +dependencies = [ + "bitflags 2.11.0", +] + +[[package]] +name = "ref-cast" +version = "1.0.25" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f354300ae66f76f1c85c5f84693f0ce81d747e2c3f21a45fef496d89c960bf7d" +dependencies = [ + "ref-cast-impl", +] + +[[package]] +name = "ref-cast-impl" +version = "1.0.25" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b7186006dcb21920990093f30e3dea63b7d6e977bf1256be20c3563a5db070da" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.117", +] + +[[package]] +name = "regex" +version = "1.12.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e10754a14b9137dd7b1e3e5b0493cc9171fdd105e0ab477f51b72e7f3ac0e276" +dependencies = [ + "aho-corasick", + "memchr", + "regex-automata", + "regex-syntax", +] + +[[package]] +name = "regex-automata" +version = "0.4.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6e1dd4122fc1595e8162618945476892eefca7b88c52820e74af6262213cae8f" +dependencies = [ + "aho-corasick", + "memchr", + "regex-syntax", +] + +[[package]] +name = "regex-syntax" +version = "0.8.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dc897dd8d9e8bd1ed8cdad82b5966c3e0ecae09fb1907d58efaa013543185d0a" + +[[package]] +name = "reqwest" +version = "0.12.28" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "eddd3ca559203180a307f12d114c268abf583f59b03cb906fd0b3ff8646c1147" +dependencies = [ + "base64", + "bytes", + "futures-core", + "futures-util", + "http", + "http-body", + "http-body-util", + "hyper", + "hyper-rustls", + "hyper-util", + "js-sys", + "log", + "percent-encoding", + "pin-project-lite", + "quinn", + "rustls", + "rustls-pki-types", + "serde", + "serde_json", + "serde_urlencoded", + "sync_wrapper", + "tokio", + "tokio-rustls", + "tokio-util", + "tower", + "tower-http", + "tower-service", + "url", + "wasm-bindgen", + "wasm-bindgen-futures", + "wasm-streams 0.4.2", + "web-sys", + "webpki-roots 1.0.6", +] + +[[package]] +name = "reqwest" +version = "0.13.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ab3f43e3283ab1488b624b44b0e988d0acea0b3214e694730a055cb6b2efa801" +dependencies = [ + "base64", + "bytes", + "futures-core", + "futures-util", + "http", + "http-body", + "http-body-util", + "hyper", + "hyper-rustls", + "hyper-util", + "js-sys", + "log", + "mime_guess", + "percent-encoding", + "pin-project-lite", + "quinn", + "rustls", + "rustls-pki-types", + "rustls-platform-verifier", + "serde", + "serde_json", + "sync_wrapper", + "tokio", + "tokio-rustls", + "tokio-util", + "tower", + "tower-http", + "tower-service", + "url", + "wasm-bindgen", + "wasm-bindgen-futures", + "wasm-streams 0.5.0", + "web-sys", +] + +[[package]] +name = "rfc6979" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f8dd2a808d456c4a54e300a23e9f5a67e122c3024119acbfd73e3bf664491cb2" +dependencies = [ + "hmac", + "subtle", +] + +[[package]] +name = "ring" +version = "0.17.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a4689e6c2294d81e88dc6261c768b63bc4fcdb852be6d1352498b114f61383b7" +dependencies = [ + "cc", + "cfg-if", + "getrandom 0.2.17", + "libc", + "untrusted", + "windows-sys 0.52.0", +] + +[[package]] +name = "ringbuf" +version = "0.4.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fe47b720588c8702e34b5979cb3271a8b1842c7cb6f57408efa70c779363488c" +dependencies = [ + "crossbeam-utils", + "portable-atomic", + "portable-atomic-util", +] + +[[package]] +name = "rtrb" +version = "0.3.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7204ed6420f698836b76d4d5c2ec5dec7585fd5c3a788fd1cde855d1de598239" + +[[package]] +name = "rubato" +version = "0.16.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5258099699851cfd0082aeb645feb9c084d9a5e1f1b8d5372086b989fc5e56a1" +dependencies = [ + "num-complex", + "num-integer", + "num-traits", + "realfft", +] + +[[package]] +name = "rubato" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "90173154a8a14e6adb109ea641743bc95ec81c093d94e70c6763565f7108ebeb" +dependencies = [ + "audioadapter", + "audioadapter-buffers", + "num-complex", + "num-integer", + "num-traits", + "realfft", + "visibility", + "windowfunctions", +] + +[[package]] +name = "rustc-hash" +version = "2.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "357703d41365b4b27c590e3ed91eabb1b663f07c4c084095e60cbed4362dff0d" + +[[package]] +name = "rustc_version" +version = "0.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cfcb3a22ef46e85b45de6ee7e79d063319ebb6594faafcf1c225ea92ab6e9b92" +dependencies = [ + "semver", +] + +[[package]] +name = "rustfft" +version = "6.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "21db5f9893e91f41798c88680037dba611ca6674703c1a18601b01a72c8adb89" +dependencies = [ + "num-complex", + "num-integer", + "num-traits", + "primal-check", + "strength_reduce", + "transpose", +] + +[[package]] +name = "rustls" +version = "0.23.37" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "758025cb5fccfd3bc2fd74708fd4682be41d99e5dff73c377c0646c6012c73a4" +dependencies = [ + "aws-lc-rs", + "once_cell", + "ring", + "rustls-pki-types", + "rustls-webpki", + "subtle", + "zeroize", +] + +[[package]] +name = "rustls-native-certs" +version = "0.8.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "612460d5f7bea540c490b2b6395d8e34a953e52b491accd6c86c8164c5932a63" +dependencies = [ + "openssl-probe", + "rustls-pki-types", + "schannel", + "security-framework", +] + +[[package]] +name = "rustls-pki-types" +version = "1.14.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "be040f8b0a225e40375822a563fa9524378b9d63112f53e19ffff34df5d33fdd" +dependencies = [ + "web-time", + "zeroize", +] + +[[package]] +name = "rustls-platform-verifier" +version = "0.6.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1d99feebc72bae7ab76ba994bb5e121b8d83d910ca40b36e0921f53becc41784" +dependencies = [ + "core-foundation", + "core-foundation-sys", + "jni", + "log", + "once_cell", + "rustls", + "rustls-native-certs", + "rustls-platform-verifier-android", + "rustls-webpki", + "security-framework", + "security-framework-sys", + "webpki-root-certs", + "windows-sys 0.61.2", +] + +[[package]] +name = "rustls-platform-verifier-android" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f87165f0995f63a9fbeea62b64d10b4d9d8e78ec6d7d51fb2125fda7bb36788f" + +[[package]] +name = "rustls-webpki" +version = "0.103.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "df33b2b81ac578cabaf06b89b0631153a3f416b0a886e8a7a1707fb51abbd1ef" +dependencies = [ + "aws-lc-rs", + "ring", + "rustls-pki-types", + "untrusted", +] + +[[package]] +name = "rustversion" +version = "1.0.22" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b39cdef0fa800fc44525c84ccb54a029961a8215f9619753635a9c0d2538d46d" + +[[package]] +name = "rusty_pool" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4ed36cdb20de66d89a17ea04b8883fc7a386f2cf877aaedca5005583ce4876ff" +dependencies = [ + "crossbeam-channel", + "futures", + "futures-channel", + "futures-executor", + "num_cpus", +] + +[[package]] +name = "ryu" +version = "1.0.23" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9774ba4a74de5f7b1c1451ed6cd5285a32eddb5cccb8cc655a4e50009e06477f" + +[[package]] +name = "salsa20" +version = "0.10.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "97a22f5af31f73a954c10289c93e8a50cc23d971e80ee446f1f6f7137a088213" +dependencies = [ + "cipher", +] + +[[package]] +name = "same-file" +version = "1.0.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "93fc1dc3aaa9bfed95e02e6eadabb4baf7e3078b0bd1b4d7b6b0b68378900502" +dependencies = [ + "winapi-util", +] + +[[package]] +name = "schannel" +version = "0.1.29" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "91c1b7e4904c873ef0710c1f407dde2e6287de2bebc1bbbf7d430bb7cbffd939" +dependencies = [ + "windows-sys 0.61.2", +] + +[[package]] +name = "scoped-tls" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e1cf6437eb19a8f4a6cc0f7dca544973b0b78843adbfeb3683d1a94a0024a294" + +[[package]] +name = "scopeguard" +version = "1.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "94143f37725109f92c262ed2cf5e59bce7498c01bcc1502d7b9afe439a4e9f49" + +[[package]] +name = "scrypt" +version = "0.11.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0516a385866c09368f0b5bcd1caff3366aace790fcd46e2bb032697bb172fd1f" +dependencies = [ + "pbkdf2", + "salsa20", + "sha2", +] + +[[package]] +name = "sec1" +version = "0.7.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d3e97a565f76233a6003f9f5c54be1d9c5bdfa3eccfb189469f11ec4901c47dc" +dependencies = [ + "base16ct", + "der", + "generic-array", + "pkcs8", + "subtle", + "zeroize", +] + +[[package]] +name = "secrecy" +version = "0.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9bd1c54ea06cfd2f6b63219704de0b9b4f72dcc2b8fdef820be6cd799780e91e" +dependencies = [ + "zeroize", +] + +[[package]] +name = "security-framework" +version = "3.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b7f4bc775c73d9a02cde8bf7b2ec4c9d12743edf609006c7facc23998404cd1d" +dependencies = [ + "bitflags 2.11.0", + "core-foundation", + "core-foundation-sys", + "libc", + "security-framework-sys", +] + +[[package]] +name = "security-framework-sys" +version = "2.17.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6ce2691df843ecc5d231c0b14ece2acc3efb62c0a398c7e1d875f3983ce020e3" +dependencies = [ + "core-foundation-sys", + "libc", +] + +[[package]] +name = "semver" +version = "1.0.27" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d767eb0aabc880b29956c35734170f26ed551a859dbd361d140cdbeca61ab1e2" + +[[package]] +name = "serde" +version = "1.0.228" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9a8e94ea7f378bd32cbbd37198a4a91436180c5bb472411e48b5ec2e2124ae9e" +dependencies = [ + "serde_core", + "serde_derive", +] + +[[package]] +name = "serde-aux" +version = "4.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "207f67b28fe90fb596503a9bf0bf1ea5e831e21307658e177c5dfcdfc3ab8a0a" +dependencies = [ + "chrono", + "serde", + "serde-value", + "serde_json", +] + +[[package]] +name = "serde-value" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f3a1a3341211875ef120e117ea7fd5228530ae7e7036a779fdc9117be6b3282c" +dependencies = [ + "ordered-float", + "serde", +] + +[[package]] +name = "serde_bytes" +version = "0.11.19" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a5d440709e79d88e51ac01c4b72fc6cb7314017bb7da9eeff678aa94c10e3ea8" +dependencies = [ + "serde", + "serde_core", +] + +[[package]] +name = "serde_core" +version = "1.0.228" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "41d385c7d4ca58e59fc732af25c3983b67ac852c1a25000afe1175de458b67ad" +dependencies = [ + "serde_derive", +] + +[[package]] +name = "serde_cow" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1e7bbbec7196bfde255ab54b65e34087c0849629280028238e67ee25d6a4b7da" +dependencies = [ + "serde", +] + +[[package]] +name = "serde_derive" +version = "1.0.228" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d540f220d3187173da220f885ab66608367b6574e925011a9353e4badda91d79" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.117", +] + +[[package]] +name = "serde_json" +version = "1.0.149" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "83fc039473c5595ace860d8c4fafa220ff474b3fc6bfdb4293327f1a37e94d86" +dependencies = [ + "itoa", + "memchr", + "serde", + "serde_core", + "zmij", +] + +[[package]] +name = "serde_repr" +version = "0.1.20" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "175ee3e80ae9982737ca543e96133087cbd9a485eecc3bc4de9c1a37b47ea59c" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.117", +] + +[[package]] +name = "serde_spanned" +version = "1.0.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f8bbf91e5a4d6315eee45e704372590b30e260ee83af6639d64557f51b067776" +dependencies = [ + "serde_core", +] + +[[package]] +name = "serde_urlencoded" +version = "0.7.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d3491c14715ca2294c4d6a88f15e84739788c1d030eed8c110436aafdaa2f3fd" +dependencies = [ + "form_urlencoded", + "itoa", + "ryu", + "serde", +] + +[[package]] +name = "serenity" +version = "0.12.5" +source = "git+https://github.com/serenity-rs/serenity?branch=next#4e1cd66c8fb64354c293a5991e287f26efb94006" +dependencies = [ + "aformat", + "arrayvec", + "async-trait", + "base64", + "bitflags 2.11.0", + "bool_to_bitflags", + "bytes", + "chrono", + "dashmap", + "extract_map", + "flate2", + "foldhash 0.2.0", + "futures", + "mime", + "mime_guess", + "nonmax", + "parking_lot 0.12.5", + "percent-encoding", + "ref-cast", + "reqwest 0.13.2", + "serde", + "serde_cow", + "serde_json", + "small-fixed-array", + "strum 0.27.2", + "time", + "to-arraystring", + "tokio", + "tokio-tungstenite 0.28.0", + "tracing", + "typesize", + "url", + "zeroize", + "zstd", +] + +[[package]] +name = "serenity-voice-model" +version = "0.3.0" +source = "git+https://github.com/serenity-rs/voice-model.git?branch=next#cef32674d5ad6c539b1ee2ff9dcda39b6d71e18f" +dependencies = [ + "bitflags 2.11.0", + "num-traits", + "serde", + "serde_json", + "serde_repr", +] + +[[package]] +name = "sha1" +version = "0.10.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e3bf829a2d51ab4a5ddf1352d8470c140cadc8301b2ae1789db023f01cedd6ba" +dependencies = [ + "cfg-if", + "cpufeatures", + "digest", +] + +[[package]] +name = "sha1_smol" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bbfa15b3dddfee50a0fff136974b3e1bde555604ba463834a7eb7deb6417705d" + +[[package]] +name = "sha2" +version = "0.10.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a7507d819769d01a365ab707794a4084392c824f54a7a6a7862f8c3d0892b283" +dependencies = [ + "cfg-if", + "cpufeatures", + "digest", +] + +[[package]] +name = "sharded-slab" +version = "0.1.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f40ca3c46823713e0d4209592e8d6e826aa57e928f09752619fc696c499637f6" +dependencies = [ + "lazy_static", +] + +[[package]] +name = "shlex" +version = "1.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0fda2ff0d084019ba4d7c6f371c95d8fd75ce3524c3cb8fb653a3023f6323e64" + +[[package]] +name = "shorthand" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "474f77f985d8212610f170332eaf173e768404c0c1d4deb041f32c297cf18931" +dependencies = [ + "from_map", + "proc-macro2", + "quote", + "syn 1.0.109", +] + +[[package]] +name = "signal-hook-registry" +version = "1.4.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c4db69cba1110affc0e9f7bcd48bbf87b3f4fc7c61fc9155afd4c469eb3d6c1b" +dependencies = [ + "errno", + "libc", +] + +[[package]] +name = "signature" +version = "2.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "77549399552de45a898a580c1b41d445bf730df867cc44e6c0233bbc4b8329de" +dependencies = [ + "digest", + "rand_core 0.6.4", +] + +[[package]] +name = "simd-adler32" +version = "0.3.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e320a6c5ad31d271ad523dcf3ad13e2767ad8b1cb8f047f75a8aeaf8da139da2" + +[[package]] +name = "simdutf8" +version = "0.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e3a9fe34e3e7a50316060351f37187a3f546bce95496156754b601a5fa71b76e" + +[[package]] +name = "sipcord-bridge" +version = "1.5.5" +dependencies = [ + "anyhow", + "async-trait", + "audioadapter", + "audioadapter-buffers", + "audiopus", + "base64", + "byteorder", + "bytes", + "chrono", + "claxon", + "crossbeam-channel", + "crossbeam-queue", + "dashmap", + "dotenvy", + "envy", + "image", + "ipnet", + "md-5", + "moka", + "parking_lot 0.12.5", + "pjsua", + "poise", + "rand 0.9.2", + "reqwest 0.13.2", + "rtrb", + "rubato 1.0.1", + "rustls", + "serde", + "serde_json", + "serenity", + "songbird", + "spandsp", + "symphonia", + "symphonia-core", + "thiserror 2.0.18", + "tokio", + "tokio-util", + "toml", + "tracing", + "tracing-subscriber", + "udptl", +] + +[[package]] +name = "slab" +version = "0.4.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0c790de23124f9ab44544d7ac05d60440adc586479ce501c1d6d7da3cd8c9cf5" + +[[package]] +name = "small-fixed-array" +version = "0.4.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f47eb472ef0994fb63d68ce4851eef89fa0faaf0dc4088c941b4015ce32c083f" +dependencies = [ + "serde", +] + +[[package]] +name = "smallvec" +version = "1.15.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "67b1b7a3b5fe4f1376887184045fcf45c69e92af734b7aaddc05fb777b6fbd03" + +[[package]] +name = "socket2" +version = "0.5.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e22376abed350d73dd1cd119b57ffccad95b4e585a7cda43e286245ce23c0678" +dependencies = [ + "libc", + "windows-sys 0.52.0", +] + +[[package]] +name = "socket2" +version = "0.6.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3a766e1110788c36f4fa1c2b71b387a7815aa65f88ce0229841826633d93723e" +dependencies = [ + "libc", + "windows-sys 0.61.2", +] + +[[package]] +name = "songbird" +version = "0.5.0" +source = "git+https://github.com/jtscuba/songbird?branch=davey#2ee686af37a692cd189a17771beda426ae7f051f" +dependencies = [ + "aead", + "aes-gcm", + "async-trait", + "audiopus", + "byteorder", + "bytes", + "chacha20poly1305", + "crypto-common", + "dashmap", + "davey", + "derivative", + "discortp", + "either", + "flume", + "futures", + "nohash-hasher", + "nonmax", + "parking_lot 0.12.5", + "pin-project", + "rand 0.9.2", + "reqwest 0.12.28", + "ringbuf", + "rubato 0.16.2", + "rusty_pool", + "serde", + "serde-aux", + "serde_json", + "serenity", + "serenity-voice-model", + "socket2 0.5.10", + "stream_lib", + "streamcatcher", + "symphonia", + "symphonia-core", + "tokio", + "tokio-tungstenite 0.26.2", + "tokio-util", + "tokio-websockets", + "tracing", + "tracing-futures", + "twilight-gateway", + "typenum", + "url", + "uuid", +] + +[[package]] +name = "spandsp" +version = "0.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b5f076b6e56f1a1062d6950dcd1c6c1df281ae2828db271929c50c191ec8c79e" +dependencies = [ + "bitflags 2.11.0", + "spandsp-sys", + "thiserror 2.0.18", +] + +[[package]] +name = "spandsp-sys" +version = "0.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c05ab99051230293dded61ba3cd32f06eb15b437a8135be21f560f72bab713db" +dependencies = [ + "bindgen 0.72.1", + "cc", + "pkg-config", +] + +[[package]] +name = "spin" +version = "0.9.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6980e8d7511241f8acf4aebddbb1ff938df5eebe98691418c4468d0b72a96a67" +dependencies = [ + "lock_api", +] + +[[package]] +name = "spki" +version = "0.7.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d91ed6c858b01f942cd56b37a94b3e0a1798290327d1236e4d9cf4eaca44d29d" +dependencies = [ + "base64ct", + "der", +] + +[[package]] +name = "stable-vec" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6dac7bc0f7d0d44329b200020effbc25a534d89fa142af95e3ddf76113412a5e" + +[[package]] +name = "stable_deref_trait" +version = "1.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6ce2be8dc25455e1f91df71bfa12ad37d7af1092ae736f3a6cd0e37bc7810596" + +[[package]] +name = "stream_lib" +version = "0.5.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e106dd009a0dfd2cf57777c39cad08f852debd366df6e841b250d956cec3277e" +dependencies = [ + "bytes", + "futures-core", + "futures-util", + "hls_m3u8", + "patricia_tree", + "reqwest 0.12.28", + "tokio", + "tracing", + "url", +] + +[[package]] +name = "streamcatcher" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "71664755c349abb0758fda6218fb2d2391ca2a73f9302c03b145491db4fcea29" +dependencies = [ + "crossbeam-utils", + "futures-util", + "loom", +] + +[[package]] +name = "strength_reduce" +version = "0.2.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fe895eb47f22e2ddd4dabc02bce419d2e643c8e3b585c78158b349195bc24d82" + +[[package]] +name = "strsim" +version = "0.9.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6446ced80d6c486436db5c078dde11a9f73d42b57fb273121e160b84f63d894c" + +[[package]] +name = "strsim" +version = "0.11.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7da8b5736845d9f2fcb837ea5d9e2628564b3b043a70948a3f0b778838c5fb4f" + +[[package]] +name = "strum" +version = "0.17.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "530efb820d53b712f4e347916c5e7ed20deb76a4f0457943b3182fb889b06d2c" +dependencies = [ + "strum_macros 0.17.1", +] + +[[package]] +name = "strum" +version = "0.27.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "af23d6f6c1a224baef9d3f61e287d2761385a5b88fdab4eb4c6f11aeb54c4bcf" +dependencies = [ + "strum_macros 0.27.2", +] + +[[package]] +name = "strum_macros" +version = "0.17.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5e6e163a520367c465f59e0a61a23cfae3b10b6546d78b6f672a382be79f7110" +dependencies = [ + "heck 0.3.3", + "proc-macro2", + "quote", + "syn 1.0.109", +] + +[[package]] +name = "strum_macros" +version = "0.27.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7695ce3845ea4b33927c055a39dc438a45b059f7c1b3d91d38d10355fb8cbca7" +dependencies = [ + "heck 0.5.0", + "proc-macro2", + "quote", + "syn 2.0.117", +] + +[[package]] +name = "subtle" +version = "2.6.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "13c2bddecc57b384dee18652358fb23172facb8a2c51ccc10d74c157bdea3292" + +[[package]] +name = "symphonia" +version = "0.5.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5773a4c030a19d9bfaa090f49746ff35c75dfddfa700df7a5939d5e076a57039" +dependencies = [ + "lazy_static", + "symphonia-bundle-flac", + "symphonia-codec-pcm", + "symphonia-core", + "symphonia-metadata", +] + +[[package]] +name = "symphonia-bundle-flac" +version = "0.5.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c91565e180aea25d9b80a910c546802526ffd0072d0b8974e3ebe59b686c9976" +dependencies = [ + "log", + "symphonia-core", + "symphonia-metadata", + "symphonia-utils-xiph", +] + +[[package]] +name = "symphonia-codec-pcm" +version = "0.5.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4e89d716c01541ad3ebe7c91ce4c8d38a7cf266a3f7b2f090b108fb0cb031d95" +dependencies = [ + "log", + "symphonia-core", +] + +[[package]] +name = "symphonia-core" +version = "0.5.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ea00cc4f79b7f6bb7ff87eddc065a1066f3a43fe1875979056672c9ef948c2af" +dependencies = [ + "arrayvec", + "bitflags 1.3.2", + "bytemuck", + "lazy_static", + "log", +] + +[[package]] +name = "symphonia-metadata" +version = "0.5.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "36306ff42b9ffe6e5afc99d49e121e0bd62fe79b9db7b9681d48e29fa19e6b16" +dependencies = [ + "encoding_rs", + "lazy_static", + "log", + "symphonia-core", +] + +[[package]] +name = "symphonia-utils-xiph" +version = "0.5.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ee27c85ab799a338446b68eec77abf42e1a6f1bb490656e121c6e27bfbab9f16" +dependencies = [ + "symphonia-core", + "symphonia-metadata", +] + +[[package]] +name = "syn" +version = "1.0.109" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "72b64191b275b66ffe2469e8af2c1cfe3bafa67b529ead792a6d0160888b4237" +dependencies = [ + "proc-macro2", + "quote", + "unicode-ident", +] + +[[package]] +name = "syn" +version = "2.0.117" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e665b8803e7b1d2a727f4023456bbbbe74da67099c585258af0ad9c5013b9b99" +dependencies = [ + "proc-macro2", + "quote", + "unicode-ident", +] + +[[package]] +name = "sync_wrapper" +version = "1.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0bf256ce5efdfa370213c1dabab5935a12e49f2c58d15e9eac2870d3b4f27263" +dependencies = [ + "futures-core", +] + +[[package]] +name = "synstructure" +version = "0.13.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "728a70f3dbaf5bab7f0c4b1ac8d7ae5ea60a4b5549c8a5914361c99147a709d2" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.117", +] + +[[package]] +name = "tagptr" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7b2093cf4c8eb1e67749a6762251bc9cd836b6fc171623bd0a9d324d37af2417" + +[[package]] +name = "thiserror" +version = "1.0.69" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b6aaf5339b578ea85b50e080feb250a3e8ae8cfcdff9a461c9ec2904bc923f52" +dependencies = [ + "thiserror-impl 1.0.69", +] + +[[package]] +name = "thiserror" +version = "2.0.18" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4288b5bcbc7920c07a1149a35cf9590a2aa808e0bc1eafaade0b80947865fbc4" +dependencies = [ + "thiserror-impl 2.0.18", +] + +[[package]] +name = "thiserror-impl" +version = "1.0.69" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4fee6c4efc90059e10f81e6d42c60a18f76588c3d74cb83a0b242a2b6c7504c1" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.117", +] + +[[package]] +name = "thiserror-impl" +version = "2.0.18" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ebc4ee7f67670e9b64d05fa4253e753e016c6c95ff35b89b7941d6b856dec1d5" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.117", +] + +[[package]] +name = "thread_local" +version = "1.1.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f60246a4944f24f6e018aa17cdeffb7818b76356965d03b07d6a9886e8962185" +dependencies = [ + "cfg-if", +] + +[[package]] +name = "time" +version = "0.3.47" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "743bd48c283afc0388f9b8827b976905fb217ad9e647fae3a379a9283c4def2c" +dependencies = [ + "deranged", + "itoa", + "num-conv", + "powerfmt", + "serde_core", + "time-core", + "time-macros", +] + +[[package]] +name = "time-core" +version = "0.1.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7694e1cfe791f8d31026952abf09c69ca6f6fa4e1a1229e18988f06a04a12dca" + +[[package]] +name = "time-macros" +version = "0.2.27" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2e70e4c5a0e0a8a4823ad65dfe1a6930e4f4d756dcd9dd7939022b5e8c501215" +dependencies = [ + "num-conv", + "time-core", +] + +[[package]] +name = "tinystr" +version = "0.8.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "42d3e9c45c09de15d06dd8acf5f4e0e399e85927b7f00711024eb7ae10fa4869" +dependencies = [ + "displaydoc", + "zerovec", +] + +[[package]] +name = "tinyvec" +version = "1.11.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3e61e67053d25a4e82c844e8424039d9745781b3fc4f32b8d55ed50f5f667ef3" +dependencies = [ + "tinyvec_macros", +] + +[[package]] +name = "tinyvec_macros" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1f3ccbac311fea05f86f61904b462b55fb3df8837a366dfc601a0161d0532f20" + +[[package]] +name = "tls_codec" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0de2e01245e2bb89d6f05801c564fa27624dbd7b1846859876c7dad82e90bf6b" +dependencies = [ + "serde", + "tls_codec_derive", + "zeroize", +] + +[[package]] +name = "tls_codec_derive" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2d2e76690929402faae40aebdda620a2c0e25dd6d3b9afe48867dfd95991f4bd" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.117", +] + +[[package]] +name = "to-arraystring" +version = "0.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0fafaa22f176928fb926345e78eb2ec404603c878b274e6ab1f76de1f6dde1b1" +dependencies = [ + "arrayvec", + "itoa", + "ryu", +] + +[[package]] +name = "tokio" +version = "1.50.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "27ad5e34374e03cfffefc301becb44e9dc3c17584f414349ebe29ed26661822d" +dependencies = [ + "bytes", + "libc", + "mio", + "parking_lot 0.12.5", + "pin-project-lite", + "signal-hook-registry", + "socket2 0.6.3", + "tokio-macros", + "windows-sys 0.61.2", +] + +[[package]] +name = "tokio-macros" +version = "2.6.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5c55a2eff8b69ce66c84f85e1da1c233edc36ceb85a2058d11b0d6a3c7e7569c" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.117", +] + +[[package]] +name = "tokio-rustls" +version = "0.26.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1729aa945f29d91ba541258c8df89027d5792d85a8841fb65e8bf0f4ede4ef61" +dependencies = [ + "rustls", + "tokio", +] + +[[package]] +name = "tokio-tungstenite" +version = "0.26.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7a9daff607c6d2bf6c16fd681ccb7eecc83e4e2cdc1ca067ffaadfca5de7f084" +dependencies = [ + "futures-util", + "log", + "rustls", + "rustls-pki-types", + "tokio", + "tokio-rustls", + "tungstenite 0.26.2", + "webpki-roots 0.26.11", +] + +[[package]] +name = "tokio-tungstenite" +version = "0.28.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d25a406cddcc431a75d3d9afc6a7c0f7428d4891dd973e4d54c56b46127bf857" +dependencies = [ + "futures-util", + "log", + "rustls", + "rustls-pki-types", + "tokio", + "tokio-rustls", + "tungstenite 0.28.0", + "webpki-roots 0.26.11", +] + +[[package]] +name = "tokio-util" +version = "0.7.18" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9ae9cec805b01e8fc3fd2fe289f89149a9b66dd16786abd8b19cfa7b48cb0098" +dependencies = [ + "bytes", + "futures-core", + "futures-sink", + "pin-project-lite", + "tokio", +] + +[[package]] +name = "tokio-websockets" +version = "0.11.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9fcaf159b4e7a376b05b5bfd77bfd38f3324f5fce751b4213bfc7eaa47affb4e" +dependencies = [ + "base64", + "bytes", + "fastrand", + "futures-core", + "futures-sink", + "http", + "httparse", + "ring", + "rustls-native-certs", + "rustls-pki-types", + "sha1_smol", + "simdutf8", + "tokio", + "tokio-rustls", + "tokio-util", +] + +[[package]] +name = "toml" +version = "0.9.12+spec-1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cf92845e79fc2e2def6a5d828f0801e29a2f8acc037becc5ab08595c7d5e9863" +dependencies = [ + "indexmap", + "serde_core", + "serde_spanned", + "toml_datetime", + "toml_parser", + "toml_writer", + "winnow 0.7.15", +] + +[[package]] +name = "toml_datetime" +version = "0.7.5+spec-1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "92e1cfed4a3038bc5a127e35a2d360f145e1f4b971b551a2ba5fd7aedf7e1347" +dependencies = [ + "serde_core", +] + +[[package]] +name = "toml_parser" +version = "1.0.10+spec-1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7df25b4befd31c4816df190124375d5a20c6b6921e2cad937316de3fccd63420" +dependencies = [ + "winnow 1.0.0", +] + +[[package]] +name = "toml_writer" +version = "1.0.7+spec-1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f17aaa1c6e3dc22b1da4b6bba97d066e354c7945cac2f7852d4e4e7ca7a6b56d" + +[[package]] +name = "tower" +version = "0.5.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ebe5ef63511595f1344e2d5cfa636d973292adc0eec1f0ad45fae9f0851ab1d4" +dependencies = [ + "futures-core", + "futures-util", + "pin-project-lite", + "sync_wrapper", + "tokio", + "tower-layer", + "tower-service", +] + +[[package]] +name = "tower-http" +version = "0.6.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d4e6559d53cc268e5031cd8429d05415bc4cb4aefc4aa5d6cc35fbf5b924a1f8" +dependencies = [ + "bitflags 2.11.0", + "bytes", + "futures-util", + "http", + "http-body", + "iri-string", + "pin-project-lite", + "tower", + "tower-layer", + "tower-service", +] + +[[package]] +name = "tower-layer" +version = "0.3.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "121c2a6cda46980bb0fcd1647ffaf6cd3fc79a013de288782836f6df9c48780e" + +[[package]] +name = "tower-service" +version = "0.3.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8df9b6e13f2d32c91b9bd719c00d1958837bc7dec474d94952798cc8e69eeec3" + +[[package]] +name = "tracing" +version = "0.1.44" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "63e71662fa4b2a2c3a26f570f037eb95bb1f85397f3cd8076caed2f026a6d100" +dependencies = [ + "log", + "pin-project-lite", + "tracing-attributes", + "tracing-core", +] + +[[package]] +name = "tracing-attributes" +version = "0.1.31" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7490cfa5ec963746568740651ac6781f701c9c5ea257c58e057f3ba8cf69e8da" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.117", +] + +[[package]] +name = "tracing-core" +version = "0.1.36" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "db97caf9d906fbde555dd62fa95ddba9eecfd14cb388e4f491a66d74cd5fb79a" +dependencies = [ + "once_cell", + "valuable", +] + +[[package]] +name = "tracing-futures" +version = "0.2.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "97d095ae15e245a057c8e8451bab9b3ee1e1f68e9ba2b4fbc18d0ac5237835f2" +dependencies = [ + "pin-project", + "tracing", +] + +[[package]] +name = "tracing-log" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ee855f1f400bd0e5c02d150ae5de3840039a3f54b025156404e34c23c03f47c3" +dependencies = [ + "log", + "once_cell", + "tracing-core", +] + +[[package]] +name = "tracing-subscriber" +version = "0.3.23" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cb7f578e5945fb242538965c2d0b04418d38ec25c79d160cd279bf0731c8d319" +dependencies = [ + "matchers", + "nu-ansi-term", + "once_cell", + "regex-automata", + "sharded-slab", + "smallvec", + "thread_local", + "tracing", + "tracing-core", + "tracing-log", +] + +[[package]] +name = "transpose" +version = "0.2.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1ad61aed86bc3faea4300c7aee358b4c6d0c8d6ccc36524c96e4c92ccf26e77e" +dependencies = [ + "num-integer", + "strength_reduce", +] + +[[package]] +name = "trim-in-place" +version = "0.1.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "343e926fc669bc8cde4fa3129ab681c63671bae288b1f1081ceee6d9d37904fc" + +[[package]] +name = "try-lock" +version = "0.2.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e421abadd41a4225275504ea4d6566923418b7f05506fbc9c0fe86ba7396114b" + +[[package]] +name = "tungstenite" +version = "0.26.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4793cb5e56680ecbb1d843515b23b6de9a75eb04b66643e256a396d43be33c13" +dependencies = [ + "bytes", + "data-encoding", + "http", + "httparse", + "log", + "rand 0.9.2", + "rustls", + "rustls-pki-types", + "sha1", + "thiserror 2.0.18", + "url", + "utf-8", +] + +[[package]] +name = "tungstenite" +version = "0.28.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8628dcc84e5a09eb3d8423d6cb682965dea9133204e8fb3efee74c2a0c259442" +dependencies = [ + "bytes", + "data-encoding", + "http", + "httparse", + "log", + "rand 0.9.2", + "rustls", + "rustls-pki-types", + "sha1", + "thiserror 2.0.18", + "url", + "utf-8", +] + +[[package]] +name = "twilight-gateway" +version = "0.16.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "863ef55467bcf6a2958162766fd0b72f33112e36971b37f9ec09b86d4fd0f7d1" +dependencies = [ + "bitflags 2.11.0", + "fastrand", + "futures-core", + "futures-sink", + "serde", + "serde_json", + "tokio", + "tokio-websockets", + "tracing", + "twilight-gateway-queue", + "twilight-model", +] + +[[package]] +name = "twilight-gateway-queue" +version = "0.16.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "27c962bd4693da0a215abe6b431fd73eb87111f49c431b87951780f9f7985002" +dependencies = [ + "tokio", + "tracing", +] + +[[package]] +name = "twilight-model" +version = "0.16.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "191e2efa051dfbd9bed4c9f6bd3f5e007bda909c687a1db2760371a3d566617d" +dependencies = [ + "bitflags 2.11.0", + "serde", + "serde-value", + "serde_repr", + "time", +] + +[[package]] +name = "typenum" +version = "1.19.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "562d481066bde0658276a35467c4af00bdc6ee726305698a55b86e61d7ad82bb" + +[[package]] +name = "typenum_mappings" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0cbc2d8952dd1e08b0164a5b51549e80631ac9da4107669d26c8ea89cb0b5545" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.117", + "to-arraystring", +] + +[[package]] +name = "typesize" +version = "0.1.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7da66c62c5b7017a2787e77373c03e6a5aafde77a73bff1ff96e91cd2e128179" +dependencies = [ + "chrono", + "nonmax", + "parking_lot 0.12.5", + "secrecy", + "serde_json", + "time", + "typesize-derive", + "url", +] + +[[package]] +name = "typesize-derive" +version = "0.1.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "536b6812192bda8551cfa0e52524e328c6a951b48e66529ee4522d6c721243d6" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.117", +] + +[[package]] +name = "udptl" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b255ad0ff36582a8a453c42a2bcc16c72d00f0ab16a14a4a7aeacb55ccb2a351" +dependencies = [ + "thiserror 2.0.18", + "tokio", + "tracing", +] + +[[package]] +name = "unicase" +version = "2.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dbc4bc3a9f746d862c45cb89d705aa10f187bb96c76001afab07a0d35ce60142" + +[[package]] +name = "unicode-ident" +version = "1.0.24" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e6e4313cd5fcd3dad5cafa179702e2b244f760991f45397d14d4ebf38247da75" + +[[package]] +name = "unicode-segmentation" +version = "1.12.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f6ccf251212114b54433ec949fd6a7841275f9ada20dddd2f29e9ceea4501493" + +[[package]] +name = "unicode-xid" +version = "0.2.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ebc1c04c71510c7f702b52b7c350734c9ff1295c464a03335b00bb84fc54f853" + +[[package]] +name = "universal-hash" +version = "0.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fc1de2c688dc15305988b563c3854064043356019f97a4b46276fe734c4f07ea" +dependencies = [ + "crypto-common", + "subtle", +] + +[[package]] +name = "untrusted" +version = "0.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8ecb6da28b8a351d773b68d5825ac39017e680750f980f3a1a85cd8dd28a47c1" + +[[package]] +name = "url" +version = "2.5.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ff67a8a4397373c3ef660812acab3268222035010ab8680ec4215f38ba3d0eed" +dependencies = [ + "form_urlencoded", + "idna", + "percent-encoding", + "serde", + "serde_derive", +] + +[[package]] +name = "utf-8" +version = "0.7.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "09cc8ee72d2a9becf2f2febe0205bbed8fc6615b7cb429ad062dc7b7ddd036a9" + +[[package]] +name = "utf8_iter" +version = "1.0.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b6c140620e7ffbb22c2dee59cafe6084a59b5ffc27a8859a5f0d494b5d52b6be" + +[[package]] +name = "uuid" +version = "1.22.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a68d3c8f01c0cfa54a75291d83601161799e4a89a39e0929f4b0354d88757a37" +dependencies = [ + "getrandom 0.4.2", + "js-sys", + "wasm-bindgen", +] + +[[package]] +name = "valuable" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ba73ea9cf16a25df0c8caa16c51acb937d5712a8429db78a3ee29d5dcacd3a65" + +[[package]] +name = "version_check" +version = "0.9.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0b928f33d975fc6ad9f86c8f283853ad26bdd5b10b7f1542aa2fa15e2289105a" + +[[package]] +name = "visibility" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d674d135b4a8c1d7e813e2f8d1c9a58308aee4a680323066025e53132218bd91" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.117", +] + +[[package]] +name = "walkdir" +version = "2.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "29790946404f91d9c5d06f9874efddea1dc06c5efe94541a7d6863108e3a5e4b" +dependencies = [ + "same-file", + "winapi-util", +] + +[[package]] +name = "want" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bfa7760aed19e106de2c7c0b581b509f2f25d3dacaf737cb82ac61bc6d760b0e" +dependencies = [ + "try-lock", +] + +[[package]] +name = "wasi" +version = "0.11.1+wasi-snapshot-preview1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ccf3ec651a847eb01de73ccad15eb7d99f80485de043efb2f370cd654f4ea44b" + +[[package]] +name = "wasip2" +version = "1.0.2+wasi-0.2.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9517f9239f02c069db75e65f174b3da828fe5f5b945c4dd26bd25d89c03ebcf5" +dependencies = [ + "wit-bindgen", +] + +[[package]] +name = "wasip3" +version = "0.4.0+wasi-0.3.0-rc-2026-01-06" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5428f8bf88ea5ddc08faddef2ac4a67e390b88186c703ce6dbd955e1c145aca5" +dependencies = [ + "wit-bindgen", +] + +[[package]] +name = "wasm-bindgen" +version = "0.2.114" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6532f9a5c1ece3798cb1c2cfdba640b9b3ba884f5db45973a6f442510a87d38e" +dependencies = [ + "cfg-if", + "once_cell", + "rustversion", + "wasm-bindgen-macro", + "wasm-bindgen-shared", +] + +[[package]] +name = "wasm-bindgen-futures" +version = "0.4.64" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e9c5522b3a28661442748e09d40924dfb9ca614b21c00d3fd135720e48b67db8" +dependencies = [ + "cfg-if", + "futures-util", + "js-sys", + "once_cell", + "wasm-bindgen", + "web-sys", +] + +[[package]] +name = "wasm-bindgen-macro" +version = "0.2.114" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "18a2d50fcf105fb33bb15f00e7a77b772945a2ee45dcf454961fd843e74c18e6" +dependencies = [ + "quote", + "wasm-bindgen-macro-support", +] + +[[package]] +name = "wasm-bindgen-macro-support" +version = "0.2.114" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "03ce4caeaac547cdf713d280eda22a730824dd11e6b8c3ca9e42247b25c631e3" +dependencies = [ + "bumpalo", + "proc-macro2", + "quote", + "syn 2.0.117", + "wasm-bindgen-shared", +] + +[[package]] +name = "wasm-bindgen-shared" +version = "0.2.114" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "75a326b8c223ee17883a4251907455a2431acc2791c98c26279376490c378c16" +dependencies = [ + "unicode-ident", +] + +[[package]] +name = "wasm-encoder" +version = "0.244.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "990065f2fe63003fe337b932cfb5e3b80e0b4d0f5ff650e6985b1048f62c8319" +dependencies = [ + "leb128fmt", + "wasmparser", +] + +[[package]] +name = "wasm-metadata" +version = "0.244.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bb0e353e6a2fbdc176932bbaab493762eb1255a7900fe0fea1a2f96c296cc909" +dependencies = [ + "anyhow", + "indexmap", + "wasm-encoder", + "wasmparser", +] + +[[package]] +name = "wasm-streams" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "15053d8d85c7eccdbefef60f06769760a563c7f0a9d6902a13d35c7800b0ad65" +dependencies = [ + "futures-util", + "js-sys", + "wasm-bindgen", + "wasm-bindgen-futures", + "web-sys", +] + +[[package]] +name = "wasm-streams" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9d1ec4f6517c9e11ae630e200b2b65d193279042e28edd4a2cda233e46670bbb" +dependencies = [ + "futures-util", + "js-sys", + "wasm-bindgen", + "wasm-bindgen-futures", + "web-sys", +] + +[[package]] +name = "wasmparser" +version = "0.244.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "47b807c72e1bac69382b3a6fb3dbe8ea4c0ed87ff5629b8685ae6b9a611028fe" +dependencies = [ + "bitflags 2.11.0", + "hashbrown 0.15.5", + "indexmap", + "semver", +] + +[[package]] +name = "web-sys" +version = "0.3.91" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "854ba17bb104abfb26ba36da9729addc7ce7f06f5c0f90f3c391f8461cca21f9" +dependencies = [ + "js-sys", + "wasm-bindgen", +] + +[[package]] +name = "web-time" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5a6580f308b1fad9207618087a65c04e7a10bc77e02c8e84e9b00dd4b12fa0bb" +dependencies = [ + "js-sys", + "wasm-bindgen", +] + +[[package]] +name = "webpki-root-certs" +version = "1.0.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "804f18a4ac2676ffb4e8b5b5fa9ae38af06df08162314f96a68d2a363e21a8ca" +dependencies = [ + "rustls-pki-types", +] + +[[package]] +name = "webpki-roots" +version = "0.26.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "521bc38abb08001b01866da9f51eb7c5d647a19260e00054a8c7fd5f9e57f7a9" +dependencies = [ + "webpki-roots 1.0.6", +] + +[[package]] +name = "webpki-roots" +version = "1.0.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "22cfaf3c063993ff62e73cb4311efde4db1efb31ab78a3e5c457939ad5cc0bed" +dependencies = [ + "rustls-pki-types", +] + +[[package]] +name = "winapi" +version = "0.3.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5c839a674fcd7a98952e593242ea400abe93992746761e38641405d28b00f419" +dependencies = [ + "winapi-i686-pc-windows-gnu", + "winapi-x86_64-pc-windows-gnu", +] + +[[package]] +name = "winapi-i686-pc-windows-gnu" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6" + +[[package]] +name = "winapi-util" +version = "0.1.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c2a7b1c03c876122aa43f3020e6c3c3ee5c05081c9a00739faf7503aeba10d22" +dependencies = [ + "windows-sys 0.61.2", +] + +[[package]] +name = "winapi-x86_64-pc-windows-gnu" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f" + +[[package]] +name = "windowfunctions" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "90628d739333b7c5d2ee0b70210b97b8cddc38440c682c96fd9e2c24c2db5f3a" +dependencies = [ + "num-traits", +] + +[[package]] +name = "windows" +version = "0.48.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e686886bc078bc1b0b600cac0147aadb815089b6e4da64016cbd754b6342700f" +dependencies = [ + "windows-targets 0.48.5", +] + +[[package]] +name = "windows-core" +version = "0.62.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b8e83a14d34d0623b51dce9581199302a221863196a1dde71a7663a4c2be9deb" +dependencies = [ + "windows-implement", + "windows-interface", + "windows-link", + "windows-result", + "windows-strings", +] + +[[package]] +name = "windows-implement" +version = "0.60.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "053e2e040ab57b9dc951b72c264860db7eb3b0200ba345b4e4c3b14f67855ddf" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.117", +] + +[[package]] +name = "windows-interface" +version = "0.59.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3f316c4a2570ba26bbec722032c4099d8c8bc095efccdc15688708623367e358" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.117", +] + +[[package]] +name = "windows-link" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f0805222e57f7521d6a62e36fa9163bc891acd422f971defe97d64e70d0a4fe5" + +[[package]] +name = "windows-result" +version = "0.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7781fa89eaf60850ac3d2da7af8e5242a5ea78d1a11c49bf2910bb5a73853eb5" +dependencies = [ + "windows-link", +] + +[[package]] +name = "windows-strings" +version = "0.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7837d08f69c77cf6b07689544538e017c1bfcf57e34b4c0ff58e6c2cd3b37091" +dependencies = [ + "windows-link", +] + +[[package]] +name = "windows-sys" +version = "0.45.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "75283be5efb2831d37ea142365f009c02ec203cd29a3ebecbc093d52315b66d0" +dependencies = [ + "windows-targets 0.42.2", +] + +[[package]] +name = "windows-sys" +version = "0.52.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "282be5f36a8ce781fad8c8ae18fa3f9beff57ec1b52cb3de0789201425d9a33d" +dependencies = [ + "windows-targets 0.52.6", +] + +[[package]] +name = "windows-sys" +version = "0.60.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f2f500e4d28234f72040990ec9d39e3a6b950f9f22d3dba18416c35882612bcb" +dependencies = [ + "windows-targets 0.53.5", +] + +[[package]] +name = "windows-sys" +version = "0.61.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ae137229bcbd6cdf0f7b80a31df61766145077ddf49416a728b02cb3921ff3fc" +dependencies = [ + "windows-link", +] + +[[package]] +name = "windows-targets" +version = "0.42.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8e5180c00cd44c9b1c88adb3693291f1cd93605ded80c250a75d472756b4d071" +dependencies = [ + "windows_aarch64_gnullvm 0.42.2", + "windows_aarch64_msvc 0.42.2", + "windows_i686_gnu 0.42.2", + "windows_i686_msvc 0.42.2", + "windows_x86_64_gnu 0.42.2", + "windows_x86_64_gnullvm 0.42.2", + "windows_x86_64_msvc 0.42.2", +] + +[[package]] +name = "windows-targets" +version = "0.48.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9a2fa6e2155d7247be68c096456083145c183cbbbc2764150dda45a87197940c" +dependencies = [ + "windows_aarch64_gnullvm 0.48.5", + "windows_aarch64_msvc 0.48.5", + "windows_i686_gnu 0.48.5", + "windows_i686_msvc 0.48.5", + "windows_x86_64_gnu 0.48.5", + "windows_x86_64_gnullvm 0.48.5", + "windows_x86_64_msvc 0.48.5", +] + +[[package]] +name = "windows-targets" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9b724f72796e036ab90c1021d4780d4d3d648aca59e491e6b98e725b84e99973" +dependencies = [ + "windows_aarch64_gnullvm 0.52.6", + "windows_aarch64_msvc 0.52.6", + "windows_i686_gnu 0.52.6", + "windows_i686_gnullvm 0.52.6", + "windows_i686_msvc 0.52.6", + "windows_x86_64_gnu 0.52.6", + "windows_x86_64_gnullvm 0.52.6", + "windows_x86_64_msvc 0.52.6", +] + +[[package]] +name = "windows-targets" +version = "0.53.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4945f9f551b88e0d65f3db0bc25c33b8acea4d9e41163edf90dcd0b19f9069f3" +dependencies = [ + "windows-link", + "windows_aarch64_gnullvm 0.53.1", + "windows_aarch64_msvc 0.53.1", + "windows_i686_gnu 0.53.1", + "windows_i686_gnullvm 0.53.1", + "windows_i686_msvc 0.53.1", + "windows_x86_64_gnu 0.53.1", + "windows_x86_64_gnullvm 0.53.1", + "windows_x86_64_msvc 0.53.1", +] + +[[package]] +name = "windows_aarch64_gnullvm" +version = "0.42.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "597a5118570b68bc08d8d59125332c54f1ba9d9adeedeef5b99b02ba2b0698f8" + +[[package]] +name = "windows_aarch64_gnullvm" +version = "0.48.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2b38e32f0abccf9987a4e3079dfb67dcd799fb61361e53e2882c3cbaf0d905d8" + +[[package]] +name = "windows_aarch64_gnullvm" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "32a4622180e7a0ec044bb555404c800bc9fd9ec262ec147edd5989ccd0c02cd3" + +[[package]] +name = "windows_aarch64_gnullvm" +version = "0.53.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a9d8416fa8b42f5c947f8482c43e7d89e73a173cead56d044f6a56104a6d1b53" + +[[package]] +name = "windows_aarch64_msvc" +version = "0.42.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e08e8864a60f06ef0d0ff4ba04124db8b0fb3be5776a5cd47641e942e58c4d43" + +[[package]] +name = "windows_aarch64_msvc" +version = "0.48.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dc35310971f3b2dbbf3f0690a219f40e2d9afcf64f9ab7cc1be722937c26b4bc" + +[[package]] +name = "windows_aarch64_msvc" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "09ec2a7bb152e2252b53fa7803150007879548bc709c039df7627cabbd05d469" + +[[package]] +name = "windows_aarch64_msvc" +version = "0.53.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b9d782e804c2f632e395708e99a94275910eb9100b2114651e04744e9b125006" + +[[package]] +name = "windows_i686_gnu" +version = "0.42.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c61d927d8da41da96a81f029489353e68739737d3beca43145c8afec9a31a84f" + +[[package]] +name = "windows_i686_gnu" +version = "0.48.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a75915e7def60c94dcef72200b9a8e58e5091744960da64ec734a6c6e9b3743e" + +[[package]] +name = "windows_i686_gnu" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8e9b5ad5ab802e97eb8e295ac6720e509ee4c243f69d781394014ebfe8bbfa0b" + +[[package]] +name = "windows_i686_gnu" +version = "0.53.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "960e6da069d81e09becb0ca57a65220ddff016ff2d6af6a223cf372a506593a3" + +[[package]] +name = "windows_i686_gnullvm" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0eee52d38c090b3caa76c563b86c3a4bd71ef1a819287c19d586d7334ae8ed66" + +[[package]] +name = "windows_i686_gnullvm" +version = "0.53.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fa7359d10048f68ab8b09fa71c3daccfb0e9b559aed648a8f95469c27057180c" + +[[package]] +name = "windows_i686_msvc" +version = "0.42.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "44d840b6ec649f480a41c8d80f9c65108b92d89345dd94027bfe06ac444d1060" + +[[package]] +name = "windows_i686_msvc" +version = "0.48.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8f55c233f70c4b27f66c523580f78f1004e8b5a8b659e05a4eb49d4166cca406" + +[[package]] +name = "windows_i686_msvc" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "240948bc05c5e7c6dabba28bf89d89ffce3e303022809e73deaefe4f6ec56c66" + +[[package]] +name = "windows_i686_msvc" +version = "0.53.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1e7ac75179f18232fe9c285163565a57ef8d3c89254a30685b57d83a38d326c2" + +[[package]] +name = "windows_x86_64_gnu" +version = "0.42.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8de912b8b8feb55c064867cf047dda097f92d51efad5b491dfb98f6bbb70cb36" + +[[package]] +name = "windows_x86_64_gnu" +version = "0.48.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "53d40abd2583d23e4718fddf1ebec84dbff8381c07cae67ff7768bbf19c6718e" + +[[package]] +name = "windows_x86_64_gnu" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "147a5c80aabfbf0c7d901cb5895d1de30ef2907eb21fbbab29ca94c5b08b1a78" + +[[package]] +name = "windows_x86_64_gnu" +version = "0.53.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9c3842cdd74a865a8066ab39c8a7a473c0778a3f29370b5fd6b4b9aa7df4a499" + +[[package]] +name = "windows_x86_64_gnullvm" +version = "0.42.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "26d41b46a36d453748aedef1486d5c7a85db22e56aff34643984ea85514e94a3" + +[[package]] +name = "windows_x86_64_gnullvm" +version = "0.48.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0b7b52767868a23d5bab768e390dc5f5c55825b6d30b86c844ff2dc7414044cc" + +[[package]] +name = "windows_x86_64_gnullvm" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "24d5b23dc417412679681396f2b49f3de8c1473deb516bd34410872eff51ed0d" + +[[package]] +name = "windows_x86_64_gnullvm" +version = "0.53.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0ffa179e2d07eee8ad8f57493436566c7cc30ac536a3379fdf008f47f6bb7ae1" + +[[package]] +name = "windows_x86_64_msvc" +version = "0.42.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9aec5da331524158c6d1a4ac0ab1541149c0b9505fde06423b02f5ef0106b9f0" + +[[package]] +name = "windows_x86_64_msvc" +version = "0.48.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ed94fce61571a4006852b7389a063ab983c02eb1bb37b47f8272ce92d06d9538" + +[[package]] +name = "windows_x86_64_msvc" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "589f6da84c646204747d1270a2a5661ea66ed1cced2631d546fdfb155959f9ec" + +[[package]] +name = "windows_x86_64_msvc" +version = "0.53.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d6bbff5f0aada427a1e5a6da5f1f98158182f26556f345ac9e04d36d0ebed650" + +[[package]] +name = "winnow" +version = "0.7.15" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "df79d97927682d2fd8adb29682d1140b343be4ac0f08fd68b7765d9c059d3945" + +[[package]] +name = "winnow" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a90e88e4667264a994d34e6d1ab2d26d398dcdca8b7f52bec8668957517fc7d8" + +[[package]] +name = "wit-bindgen" +version = "0.51.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d7249219f66ced02969388cf2bb044a09756a083d0fab1e566056b04d9fbcaa5" +dependencies = [ + "wit-bindgen-rust-macro", +] + +[[package]] +name = "wit-bindgen-core" +version = "0.51.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ea61de684c3ea68cb082b7a88508a8b27fcc8b797d738bfc99a82facf1d752dc" +dependencies = [ + "anyhow", + "heck 0.5.0", + "wit-parser", +] + +[[package]] +name = "wit-bindgen-rust" +version = "0.51.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b7c566e0f4b284dd6561c786d9cb0142da491f46a9fbed79ea69cdad5db17f21" +dependencies = [ + "anyhow", + "heck 0.5.0", + "indexmap", + "prettyplease", + "syn 2.0.117", + "wasm-metadata", + "wit-bindgen-core", + "wit-component", +] + +[[package]] +name = "wit-bindgen-rust-macro" +version = "0.51.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0c0f9bfd77e6a48eccf51359e3ae77140a7f50b1e2ebfe62422d8afdaffab17a" +dependencies = [ + "anyhow", + "prettyplease", + "proc-macro2", + "quote", + "syn 2.0.117", + "wit-bindgen-core", + "wit-bindgen-rust", +] + +[[package]] +name = "wit-component" +version = "0.244.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9d66ea20e9553b30172b5e831994e35fbde2d165325bec84fc43dbf6f4eb9cb2" +dependencies = [ + "anyhow", + "bitflags 2.11.0", + "indexmap", + "log", + "serde", + "serde_derive", + "serde_json", + "wasm-encoder", + "wasm-metadata", + "wasmparser", + "wit-parser", +] + +[[package]] +name = "wit-parser" +version = "0.244.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ecc8ac4bc1dc3381b7f59c34f00b67e18f910c2c0f50015669dde7def656a736" +dependencies = [ + "anyhow", + "id-arena", + "indexmap", + "log", + "semver", + "serde", + "serde_derive", + "serde_json", + "unicode-xid", + "wasmparser", +] + +[[package]] +name = "writeable" +version = "0.6.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9edde0db4769d2dc68579893f2306b26c6ecfbe0ef499b013d731b7b9247e0b9" + +[[package]] +name = "x25519-dalek" +version = "2.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c7e468321c81fb07fa7f4c636c3972b9100f0346e5b6a9f2bd0603a52f7ed277" +dependencies = [ + "curve25519-dalek", + "rand_core 0.6.4", + "serde", + "zeroize", +] + +[[package]] +name = "yoke" +version = "0.8.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "72d6e5c6afb84d73944e5cedb052c4680d5657337201555f9f2a16b7406d4954" +dependencies = [ + "stable_deref_trait", + "yoke-derive", + "zerofrom", +] + +[[package]] +name = "yoke-derive" +version = "0.8.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b659052874eb698efe5b9e8cf382204678a0086ebf46982b79d6ca3182927e5d" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.117", + "synstructure", +] + +[[package]] +name = "zerocopy" +version = "0.8.47" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "efbb2a062be311f2ba113ce66f697a4dc589f85e78a4aea276200804cea0ed87" +dependencies = [ + "zerocopy-derive", +] + +[[package]] +name = "zerocopy-derive" +version = "0.8.47" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0e8bc7269b54418e7aeeef514aa68f8690b8c0489a06b0136e5f57c4c5ccab89" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.117", +] + +[[package]] +name = "zerofrom" +version = "0.1.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "50cc42e0333e05660c3587f3bf9d0478688e15d870fab3346451ce7f8c9fbea5" +dependencies = [ + "zerofrom-derive", +] + +[[package]] +name = "zerofrom-derive" +version = "0.1.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d71e5d6e06ab090c67b5e44993ec16b72dcbaabc526db883a360057678b48502" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.117", + "synstructure", +] + +[[package]] +name = "zeroize" +version = "1.8.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b97154e67e32c85465826e8bcc1c59429aaaf107c1e4a9e53c8d8ccd5eff88d0" +dependencies = [ + "zeroize_derive", +] + +[[package]] +name = "zeroize_derive" +version = "1.4.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "85a5b4158499876c763cb03bc4e49185d3cccbabb15b33c627f7884f43db852e" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.117", +] + +[[package]] +name = "zerotrie" +version = "0.2.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2a59c17a5562d507e4b54960e8569ebee33bee890c70aa3fe7b97e85a9fd7851" +dependencies = [ + "displaydoc", + "yoke", + "zerofrom", +] + +[[package]] +name = "zerovec" +version = "0.11.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6c28719294829477f525be0186d13efa9a3c602f7ec202ca9e353d310fb9a002" +dependencies = [ + "yoke", + "zerofrom", + "zerovec-derive", +] + +[[package]] +name = "zerovec-derive" +version = "0.11.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "eadce39539ca5cb3985590102671f2567e659fca9666581ad3411d59207951f3" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.117", +] + +[[package]] +name = "zlib-rs" +version = "0.6.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3be3d40e40a133f9c916ee3f9f4fa2d9d63435b5fbe1bfc6d9dae0aa0ada1513" + +[[package]] +name = "zmij" +version = "1.0.21" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b8848ee67ecc8aedbaf3e4122217aff892639231befc6a1b58d29fff4c2cabaa" + +[[package]] +name = "zstd" +version = "0.13.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e91ee311a569c327171651566e07972200e76fcfe2242a4fa446149a3881c08a" +dependencies = [ + "zstd-safe", +] + +[[package]] +name = "zstd-safe" +version = "7.2.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8f49c4d5f0abb602a93fb8736af2a4f4dd9512e36f7f570d66e65ff867ed3b9d" +dependencies = [ + "zstd-sys", +] + +[[package]] +name = "zstd-sys" +version = "2.0.16+zstd.1.5.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "91e19ebc2adc8f83e43039e79776e3fda8ca919132d68a1fed6a5faca2683748" +dependencies = [ + "cc", + "pkg-config", +] + +[[package]] +name = "zune-core" +version = "0.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cb8a0807f7c01457d0379ba880ba6322660448ddebc890ce29bb64da71fb40f9" + +[[package]] +name = "zune-jpeg" +version = "0.5.13" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ec5f41c76397b7da451efd19915684f727d7e1d516384ca6bd0ec43ec94de23c" +dependencies = [ + "zune-core", +] diff --git a/Cargo.toml b/Cargo.toml new file mode 100644 index 0000000..cd8458a --- /dev/null +++ b/Cargo.toml @@ -0,0 +1,3 @@ +[workspace] +members = ["sipcord-bridge", "pjsua"] +resolver = "2" diff --git a/Dockerfile b/Dockerfile new file mode 100644 index 0000000..a2b8ccb --- /dev/null +++ b/Dockerfile @@ -0,0 +1,115 @@ +# Stage 0: Shared base with build dependencies +FROM debian:trixie AS build-base + +RUN apt-get update && apt-get install -y \ + cmake \ + pkg-config \ + build-essential \ + libssl-dev \ + libasound2-dev \ + uuid-dev \ + libclang-dev \ + curl \ + libopencore-amrnb-dev \ + libopencore-amrwb-dev \ + libopus-dev \ + libtiff-dev \ + libjpeg-dev \ + && rm -rf /var/lib/apt/lists/* + +# Stage 1: Build pjproject C library (slow, cached unless pjsua/pjproject changes) +FROM build-base AS pjproject-builder + +WORKDIR /build + +COPY pjsua/pjproject/ pjproject-src/ + +RUN mkdir -p pjproject-build pjproject-install && \ + cd pjproject-build && \ + cmake \ + -G "Unix Makefiles" \ + -DCMAKE_INSTALL_PREFIX=/build/pjproject-install \ + -DCMAKE_BUILD_TYPE=Release \ + -DBUILD_SHARED_LIBS=OFF \ + -DPJ_SKIP_EXPERIMENTAL_NOTICE=ON \ + -DPJ_ENABLE_TESTS=OFF \ + -DBUILD_TESTING=OFF \ + -DPJMEDIA_WITH_VIDEO=OFF \ + -DPJMEDIA_WITH_FFMPEG=OFF \ + -DPJMEDIA_WITH_LIBYUV=OFF \ + -DPJMEDIA_WITH_OPENCORE_AMRNB_CODEC=ON \ + -DPJMEDIA_WITH_OPENCORE_AMRWB_CODEC=ON \ + -DPJMEDIA_WITH_OPUS_CODEC=ON \ + -DPJLIB_WITH_SSL=openssl \ + "-DCMAKE_C_FLAGS=-DPJSUA_MAX_CALLS=128" \ + "-DCMAKE_CXX_FLAGS=-DPJSUA_MAX_CALLS=128" \ + ../pjproject-src && \ + cmake --build . -j$(nproc) \ + --target pjlib pjlib-util pjnath pjmedia pjmedia-audiodev \ + pjmedia-codec pjsip pjsip-simple pjsip-ua pjsua-lib pjsua2 \ + resample srtp speex g7221 gsm ilbc && \ + cmake --install . || true + +# Collect all .a files into a single flat lib directory +RUN mkdir -p /build/pjproject-install/lib && \ + find /build/pjproject-build /build/pjproject-install -name '*.a' -exec cp -n {} /build/pjproject-install/lib/ \; && \ + echo "Libraries collected:" && ls /build/pjproject-install/lib/ + +# Stage 2: Build Rust dependencies (cached unless Cargo.toml/lock changes) +FROM build-base AS deps-builder + +# Install Rust nightly (required for portable_simd) +RUN curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y --default-toolchain nightly +ENV PATH="/root/.cargo/bin:${PATH}" + +WORKDIR /build + +# Copy pre-built pjproject from stage 1 +COPY --from=pjproject-builder /build/pjproject-install /pjproject +ENV PJPROJECT_DIR=/pjproject + +# Copy only what cargo needs for dependency resolution +COPY Cargo.toml Cargo.lock ./ +COPY pjsua/ pjsua/ +COPY sipcord-bridge/Cargo.toml sipcord-bridge/Cargo.toml + +# Create dummy source files to build dependencies only +RUN mkdir -p sipcord-bridge/src && \ + echo '#![feature(portable_simd)] fn main() {}' > sipcord-bridge/src/main.rs && \ + echo '#![feature(portable_simd)]' > sipcord-bridge/src/lib.rs + +RUN cargo build --release -p sipcord-bridge + +# Stage 3: Build application (fast, only rebuilds when src/ changes) +FROM deps-builder AS builder + +RUN rm -rf sipcord-bridge/src +COPY sipcord-bridge/src/ sipcord-bridge/src/ +COPY wav/ wav/ +COPY config.toml config.toml + +RUN touch sipcord-bridge/src/main.rs sipcord-bridge/src/lib.rs +RUN cargo build --release -p sipcord-bridge + +# Stage 4: Minimal runtime image +FROM debian:trixie-slim + +RUN apt-get update && apt-get install -y \ + ca-certificates \ + libasound2 \ + libssl3 \ + libuuid1 \ + libopencore-amrnb0 \ + libopencore-amrwb0 \ + libopus0 \ + libtiff6 \ + libjpeg62-turbo \ + && rm -rf /var/lib/apt/lists/* + +WORKDIR /app + +COPY --from=builder /build/target/release/sipcord-bridge /app/sipcord-bridge +COPY --from=builder /build/config.toml /app/config.toml +COPY --from=builder /build/wav/ /app/wav/ + +ENTRYPOINT ["/app/sipcord-bridge"] diff --git a/README.md b/README.md new file mode 100644 index 0000000..8783f3c --- /dev/null +++ b/README.md @@ -0,0 +1,20 @@ +# SIPcord Bridge + +This is a slice of the code that powers [SIPcord](https://sipcord.net/) that you can use to self host something similar. It's not the full SIPcord package but rather the core functionality used in SIPcord with ways to build your own backend adapter. SIPcord itself uses this as a component of the full build so the code is the same that runs on the public bridges. + +## Help! + +I am providing 0 support for this, my goal is to run [sipcord.net](https://sipcord.net/), not support self hosting. If you want to run this self hosted, feel free to use this code but do not ask me for support. + +## I have a feature request! + +**PR's welcome**. No really, feel free to implement it and contribute. + +## Acknowledgements + +- Thanks to [dusthillguy](https://www.youtube.com/watch?v=IK1ydvw3xkU) for letting me use the song *"Joona Kouvolalainen buttermilk"* as hold music and distribute it. +- Thanks to chrischrome for hosting bridge-use1 + +## License + +GPLv3 \ No newline at end of file diff --git a/config.toml b/config.toml new file mode 100644 index 0000000..7ca420b --- /dev/null +++ b/config.toml @@ -0,0 +1,43 @@ +# Sipcord Bridge — sample configuration +# +# Copy this file to your working directory and adjust as needed. +# All sections except [sounds] are optional (defaults shown). + +[sounds] +# System sounds (preloaded into memory at startup) +discord_join = { src = "discord_join.wav", preload = true } +connecting = { src = "connecting.wav", preload = true } +unknown_error = { src = "unknown.wav", preload = true } +no_channel_mapping = { src = "no_channel_mapping.wav", preload = true } +no_permissions = { src = "no_permissions.wav", preload = true } +server_is_busy = { src = "serverisbusy.wav", preload = true } + +# Easter eggs (streamed from disk on demand) +easteregg = { src = "nokia.flac", preload = false, extension = 11111 } +hold = { src = "JoonaKouvolalainen.flac", preload = false, extension = 10000 } + +# Test tone — 440Hz sine wave, generated dynamically (no file needed) +test_tone = { extension = 0 } + +[bridge] +# rtp_inactivity_timeout_secs = 60 +# no_audio_timeout_secs = 10 +# empty_bridge_grace_period_secs = 30 +# max_channel_buffer_samples = 32000 +# api_timeout_secs = 10 +# health_check_interval_secs = 5 +# voice_join_max_retries = 2 +# voice_join_retry_delay_secs = 5 +# pjsip_log_level = 4 + +[audio] +# ring_buffer_samples = 96000 +# pre_buffer_samples = 14400 +# vad_silence_threshold = 200 +# vad_mute_threshold = 50 +# vad_silence_frames_before_stop = 15 + +[fax] +# tmp_folder = "/tmp/sipcord-fax" +# prefix = "fax_" +# output_format = "png" diff --git a/pjsua/Cargo.toml b/pjsua/Cargo.toml new file mode 100644 index 0000000..b827add --- /dev/null +++ b/pjsua/Cargo.toml @@ -0,0 +1,12 @@ +[package] +name = "pjsua" +version = "0.1.0" +edition = "2021" +description = "Rust bindings for pjsua (pjproject SIP library)" + +[dependencies] + +[build-dependencies] +bindgen = "0.71" +pkg-config = "0.3" +cmake = "0.1" diff --git a/pjsua/build.rs b/pjsua/build.rs new file mode 100644 index 0000000..e6c0d51 --- /dev/null +++ b/pjsua/build.rs @@ -0,0 +1,535 @@ +//! Build script for pjsua bindings +//! +//! This script builds pjproject from source if not found, then generates +//! Rust bindings using bindgen. +//! +//! Set PJPROJECT_DIR to a pre-built pjproject install prefix to skip the +//! cmake build (used in Docker to separate the slow C build into its own layer). + +use std::env; +use std::path::PathBuf; +use std::process::Command; + +fn main() { + println!("cargo:rerun-if-changed=build.rs"); + println!("cargo:rerun-if-env-changed=PJPROJECT_DIR"); + + let out_dir = PathBuf::from(env::var("OUT_DIR").unwrap()); + + // If PJPROJECT_DIR is set, use pre-built pjproject (e.g. from a separate Docker stage). + // Otherwise build from source via cmake. + let include_paths = if let Ok(prefix) = env::var("PJPROJECT_DIR") { + let prefix = PathBuf::from(&prefix); + println!("cargo:warning=Using pre-built pjproject from: {}", prefix.display()); + + let lib_dir = prefix.join("lib"); + println!("cargo:rustc-link-search=native={}", lib_dir.display()); + + // Link libraries in the correct dependency order (same as build-from-source path) + let pj_libs = [ + "pjsua-lib", "pjsua2", "pjsip-ua", "pjsip-simple", "pjsip", + "pjmedia-codec", "pjmedia", "pjmedia-audiodev", "pjnath", + "pjlib-util", "pjlib", + "srtp", "resample", "speex", "g7221", "gsm", "ilbc", + ]; + for lib in &pj_libs { + println!("cargo:rustc-link-lib=static={}", lib); + } + + vec![prefix.join("include")] + } else { + build_from_source(&out_dir) + }; + + // ---- System libraries (common to both paths) ---- + + #[cfg(target_os = "macos")] + { + println!("cargo:rustc-link-lib=framework=AudioToolbox"); + println!("cargo:rustc-link-lib=framework=AudioUnit"); + println!("cargo:rustc-link-lib=framework=CoreAudio"); + println!("cargo:rustc-link-lib=framework=CoreServices"); + println!("cargo:rustc-link-lib=framework=Foundation"); + println!("cargo:rustc-link-lib=framework=AVFoundation"); + println!("cargo:rustc-link-lib=framework=CoreMedia"); + println!("cargo:rustc-link-lib=framework=CoreVideo"); + println!("cargo:rustc-link-lib=framework=VideoToolbox"); + println!("cargo:rustc-link-lib=framework=Security"); + } + + #[cfg(target_os = "linux")] + { + println!("cargo:rustc-link-lib=asound"); + println!("cargo:rustc-link-lib=pthread"); + println!("cargo:rustc-link-lib=m"); + println!("cargo:rustc-link-lib=rt"); + println!("cargo:rustc-link-lib=uuid"); + println!("cargo:rustc-link-lib=opencore-amrnb"); + println!("cargo:rustc-link-lib=opencore-amrwb"); + println!("cargo:rustc-link-lib=opus"); + } + + // OpenSSL + #[cfg(target_os = "macos")] + { + let openssl_paths = [ + "/opt/homebrew/opt/openssl@3/lib", + "/opt/homebrew/opt/openssl/lib", + "/usr/local/opt/openssl@3/lib", + "/usr/local/opt/openssl/lib", + ]; + for path in &openssl_paths { + if std::path::Path::new(path).exists() { + println!("cargo:rustc-link-search=native={}", path); + break; + } + } + + let amr_paths = [ + "/opt/homebrew/opt/opencore-amr/lib", + "/usr/local/opt/opencore-amr/lib", + ]; + for path in &amr_paths { + if std::path::Path::new(path).exists() { + println!("cargo:rustc-link-search=native={}", path); + println!("cargo:rustc-link-lib=opencore-amrnb"); + println!("cargo:rustc-link-lib=opencore-amrwb"); + break; + } + } + + let opus_paths = [ + "/opt/homebrew/opt/opus/lib", + "/usr/local/opt/opus/lib", + ]; + for path in &opus_paths { + if std::path::Path::new(path).exists() { + println!("cargo:rustc-link-search=native={}", path); + println!("cargo:rustc-link-lib=opus"); + break; + } + } + } + + println!("cargo:rustc-link-lib=ssl"); + println!("cargo:rustc-link-lib=crypto"); + + #[cfg(target_os = "macos")] + println!("cargo:rustc-link-lib=c++"); + #[cfg(target_os = "linux")] + println!("cargo:rustc-link-lib=stdc++"); + + // ---- Generate bindings ---- + + let mut clang_args = Vec::new(); + + for path in &include_paths { + clang_args.push(format!("-I{}", path.display())); + } + + #[cfg(target_endian = "little")] + { + clang_args.push("-DPJ_IS_LITTLE_ENDIAN=1".to_string()); + clang_args.push("-DPJ_IS_BIG_ENDIAN=0".to_string()); + } + #[cfg(target_endian = "big")] + { + clang_args.push("-DPJ_IS_LITTLE_ENDIAN=0".to_string()); + clang_args.push("-DPJ_IS_BIG_ENDIAN=1".to_string()); + } + + #[cfg(target_os = "macos")] + { + clang_args.push("-DPJ_DARWINOS=1".to_string()); + clang_args.push("-DPJ_HAS_LIMITS_H=1".to_string()); + } + #[cfg(target_os = "linux")] + { + clang_args.push("-DPJ_LINUX=1".to_string()); + clang_args.push("-DPJ_HAS_LIMITS_H=1".to_string()); + } + + #[cfg(target_pointer_width = "64")] + clang_args.push("-DPJ_HAS_INT64=1".to_string()); + + clang_args.push("-DPJ_AUTOCONF=1".to_string()); + + let pjsua_header = include_paths.iter() + .find_map(|p| { + let header = p.join("pjsua-lib/pjsua.h"); + if header.exists() { + return Some(header); + } + let header = p.join("pjsua.h"); + if header.exists() { Some(header) } else { None } + }) + .expect("Could not find pjsua.h header in installed location"); + + println!("cargo:warning=Using pjsua.h from: {}", pjsua_header.display()); + println!("cargo:warning=Include paths: {:?}", include_paths); + + let bindings = bindgen::Builder::default() + .header(pjsua_header.to_str().unwrap()) + .clang_args(&clang_args) + .generate_comments(false) + .allowlist_type(r"pj.*") + .allowlist_type(r"PJ.*") + .allowlist_var(r"pj.*") + .allowlist_var(r"PJ.*") + .allowlist_function(r"pj.*") + .allowlist_function(r"PJ.*") + .generate() + .expect("Unable to generate bindings"); + + let bindings_path = out_dir.join("bindings.rs"); + bindings + .write_to_file(&bindings_path) + .expect("Couldn't write bindings!"); + + println!("cargo:warning=Bindings written to: {}", bindings_path.display()); +} + +/// Build pjproject from source and return include paths. +fn build_from_source(out_dir: &PathBuf) -> Vec { + let manifest_dir = PathBuf::from(env::var("CARGO_MANIFEST_DIR").unwrap()); + let pjproject_src = manifest_dir.join("pjproject"); + + let pjproject_build = out_dir.join("pjproject-build"); + let pjproject_install = out_dir.join("pjproject-install"); + + std::fs::create_dir_all(&pjproject_build).expect("Failed to create build directory"); + std::fs::create_dir_all(&pjproject_install).expect("Failed to create install directory"); + + let include_dir = pjproject_install.join("include"); + let lib_dir = pjproject_install.join("lib"); + + build_pjproject(&pjproject_src, &pjproject_build, &pjproject_install); + + let include_paths = vec![include_dir.clone()]; + let lib_paths = vec![lib_dir.clone()]; + + // Set up library paths + for path in &lib_paths { + println!("cargo:rustc-link-search=native={}", path.display()); + } + + // For built-from-source pjproject, libraries are in the build directory subdirs + let pjproject_build_for_libs = out_dir.join("pjproject-build"); + if pjproject_build_for_libs.exists() { + let lib_subdirs = [ + "pjlib", + "pjlib-util", + "pjmedia", + "pjnath", + "pjsip", + "third_party/resample", + "third_party/speex", + "third_party/g7221", + "third_party/yuv", + "third_party/gsm", + "third_party/srtp", + "third_party/ilbc", + ]; + + for subdir in &lib_subdirs { + let lib_path = pjproject_build_for_libs.join(subdir); + if lib_path.exists() { + println!("cargo:rustc-link-search=native={}", lib_path.display()); + } + } + + // Link libraries in the correct order (dependencies matter!) + let pj_libs = [ + "pjsua-lib", // main pjsua library + "pjsua2", // C++ wrapper (may be needed) + "pjsip-ua", // SIP user agent + "pjsip-simple", // SIP SIMPLE presence + "pjsip", // Core SIP + "pjmedia-codec",// Media codecs + "pjmedia", // Media framework + "pjmedia-audiodev", // Audio device + "pjnath", // NAT traversal + "pjlib-util", // Utility functions + "pjlib", // Core library + // Third party + "srtp", + "resample", + "speex", + "g7221", + "gsm", + "ilbc", + ]; + + for lib in &pj_libs { + println!("cargo:rustc-link-lib=static={}", lib); + } + } else { + // Link against pjproject libraries from install directory (static) + for lib_path in &lib_paths { + if let Ok(entries) = std::fs::read_dir(lib_path) { + for entry in entries.flatten() { + let path = entry.path(); + if let Some(ext) = path.extension() { + if ext == "a" { + if let Some(name) = path.file_stem() { + let name = name.to_string_lossy(); + if name.starts_with("lib") { + let lib_name = name.strip_prefix("lib").unwrap(); + println!("cargo:rustc-link-lib=static={}", lib_name); + } + } + } + } + } + } + } + } + + include_paths +} + +fn build_pjproject(pjproject_src: &std::path::Path, pjproject_build: &std::path::Path, pjproject_install: &std::path::Path) { + // Check for .pc file in build dir (CMake install doesn't always copy it to install dir) + let pc_file = pjproject_build.join("libpjproject.pc"); + + if !pc_file.exists() { + println!("cargo:warning=Building pjproject from source (this may take several minutes)..."); + + // Detect cross-compilation target + let target = env::var("TARGET").unwrap_or_default(); + let host = env::var("HOST").unwrap_or_default(); + let is_cross = target != host; + + // Collect C/CXX flags — merged at the end into CMAKE_C_FLAGS/CMAKE_CXX_FLAGS. + // pjsua.h guards PJSUA_MAX_CALLS with #ifndef, so -D on the command line wins. + let mut c_flags: Vec<&str> = vec!["-DPJSUA_MAX_CALLS=128"]; + + let mut cmake_args = vec![ + "-G".to_string(), "Unix Makefiles".to_string(), + format!("-DCMAKE_INSTALL_PREFIX={}", pjproject_install.display()), + "-DCMAKE_BUILD_TYPE=Release".to_string(), + "-DBUILD_SHARED_LIBS=OFF".to_string(), + "-DPJ_SKIP_EXPERIMENTAL_NOTICE=ON".to_string(), + // Disable tests to avoid linking issues with cross-compilation + "-DPJ_ENABLE_TESTS=OFF".to_string(), + "-DBUILD_TESTING=OFF".to_string(), + // Disable video support + "-DPJMEDIA_WITH_VIDEO=OFF".to_string(), + "-DPJMEDIA_WITH_FFMPEG=OFF".to_string(), + "-DPJMEDIA_WITH_LIBYUV=OFF".to_string(), + // Enable AMR codecs (IMS/MR-NB support) + "-DPJMEDIA_WITH_OPENCORE_AMRNB_CODEC=ON".to_string(), + "-DPJMEDIA_WITH_OPENCORE_AMRWB_CODEC=ON".to_string(), + // Enable Opus codec + "-DPJMEDIA_WITH_OPUS_CODEC=ON".to_string(), + // Enable TLS/SSL support with OpenSSL + "-DPJLIB_WITH_SSL=openssl".to_string(), + ]; + + // Configure cross-compilation toolchain + if is_cross { + println!("cargo:warning=Cross-compiling for {} from {}", target, host); + + // Map Rust target to cross-compiler prefix + let cross_prefix = match target.as_str() { + "aarch64-unknown-linux-gnu" => "aarch64-linux-gnu", + "x86_64-unknown-linux-gnu" => "x86_64-linux-gnu", + _ => "", + }; + + if !cross_prefix.is_empty() { + let cc = format!("{}-gcc", cross_prefix); + let cxx = format!("{}-g++", cross_prefix); + + // Check if cross-compiler exists + if std::process::Command::new("which").arg(&cc).output().map(|o| o.status.success()).unwrap_or(false) { + cmake_args.push(format!("-DCMAKE_C_COMPILER={}", cc)); + cmake_args.push(format!("-DCMAKE_CXX_COMPILER={}", cxx)); + println!("cargo:warning=Using cross-compiler: {}", cc); + + // ARM64: Fix atomic alignment issues + // 1. -mno-outline-atomics: Use inline atomics instead of helper functions + // 2. -DPJ_POOL_ALIGNMENT=8: Force pjlib pool to use 8-byte alignment (C define) + if target.contains("aarch64") { + c_flags.push("-mno-outline-atomics"); + c_flags.push("-DPJ_POOL_ALIGNMENT=8"); + println!("cargo:warning=ARM64: Using inline atomics with 8-byte pool alignment"); + } + + // The cross-compiler (from crossbuild-essential-arm64) has --sysroot=/usr/aarch64-linux-gnu + // baked into its specs, but the actual libraries are in /usr/lib/aarch64-linux-gnu/ + // via Debian's multiarch. We must override the sysroot to "/" so the linker + // finds libc at /lib/aarch64-linux-gnu/ instead of the non-existent + // /usr/aarch64-linux-gnu/lib/libc.so.6 + cmake_args.push("-DCMAKE_SYSROOT=/".to_string()); + println!("cargo:warning=Overriding sysroot to / for multiarch compatibility"); + + // Help CMake find cross-compiled libraries in multiarch paths + let multiarch_lib = format!("/usr/lib/{}", cross_prefix); + if std::path::Path::new(&multiarch_lib).exists() { + cmake_args.push(format!("-DCMAKE_FIND_ROOT_PATH=/usr;{}", multiarch_lib)); + cmake_args.push("-DCMAKE_FIND_ROOT_PATH_MODE_LIBRARY=BOTH".to_string()); + cmake_args.push("-DCMAKE_FIND_ROOT_PATH_MODE_INCLUDE=BOTH".to_string()); + cmake_args.push("-DCMAKE_FIND_ROOT_PATH_MODE_PROGRAM=NEVER".to_string()); + println!("cargo:warning=Using multiarch library path: {}", multiarch_lib); + + // Explicitly set OpenSSL paths for cross-compilation + let openssl_ssl = format!("{}/libssl.so", multiarch_lib); + let openssl_crypto = format!("{}/libcrypto.so", multiarch_lib); + if std::path::Path::new(&openssl_ssl).exists() { + cmake_args.push("-DOPENSSL_ROOT_DIR=/usr".to_string()); + cmake_args.push("-DOPENSSL_INCLUDE_DIR=/usr/include".to_string()); + cmake_args.push(format!("-DOPENSSL_SSL_LIBRARY={}", openssl_ssl)); + cmake_args.push(format!("-DOPENSSL_CRYPTO_LIBRARY={}", openssl_crypto)); + println!("cargo:warning=Using cross-compiled OpenSSL from {}", multiarch_lib); + } + + // Explicitly set Opus paths for cross-compilation + let opus_lib = format!("{}/libopus.so", multiarch_lib); + if std::path::Path::new(&opus_lib).exists() { + cmake_args.push("-DOPUS_INCLUDE_DIR=/usr/include".to_string()); + cmake_args.push(format!("-DOPUS_LIBRARY={}", opus_lib)); + println!("cargo:warning=Using cross-compiled Opus from {}", multiarch_lib); + } + } + } + } + } else { + // Native build - find OpenSSL in standard locations + let openssl_prefixes = if cfg!(target_os = "macos") { + vec![ + "/opt/homebrew/opt/openssl@3", + "/opt/homebrew/opt/openssl", + "/usr/local/opt/openssl@3", + "/usr/local/opt/openssl", + ] + } else { + vec!["/usr", "/usr/local"] + }; + + for prefix in &openssl_prefixes { + let include_path = format!("{}/include", prefix); + if std::path::Path::new(&include_path).join("openssl/ssl.h").exists() { + println!("cargo:warning=Found OpenSSL at: {}", prefix); + cmake_args.push(format!("-DOPENSSL_ROOT_DIR={}", prefix)); + if cfg!(target_os = "macos") { + cmake_args.push(format!("-DOPENSSL_INCLUDE_DIR={}", include_path)); + let lib_path = format!("{}/lib", prefix); + let static_crypto = format!("{}/libcrypto.a", lib_path); + let static_ssl = format!("{}/libssl.a", lib_path); + if std::path::Path::new(&static_crypto).exists() { + cmake_args.push(format!("-DOPENSSL_CRYPTO_LIBRARY={}", static_crypto)); + cmake_args.push(format!("-DOPENSSL_SSL_LIBRARY={}", static_ssl)); + } + } + break; + } + } + + // Native build - find Opus codec library + let opus_prefixes = if cfg!(target_os = "macos") { + vec![ + "/opt/homebrew/opt/opus", + "/usr/local/opt/opus", + ] + } else { + vec!["/usr", "/usr/local"] + }; + + for prefix in &opus_prefixes { + let include_path = format!("{}/include", prefix); + if std::path::Path::new(&include_path).join("opus/opus.h").exists() { + println!("cargo:warning=Found Opus at: {}", prefix); + cmake_args.push(format!("-DOPUS_INCLUDE_DIR={}", include_path)); + let lib_path = format!("{}/lib", prefix); + let opus_lib = if cfg!(target_os = "macos") { + format!("{}/libopus.a", lib_path) + } else { + format!("{}/libopus.so", lib_path) + }; + if std::path::Path::new(&opus_lib).exists() { + cmake_args.push(format!("-DOPUS_LIBRARY={}", opus_lib)); + } + break; + } + } + } + + // Merge all collected C/CXX flags into cmake args + if !c_flags.is_empty() { + let flags = c_flags.join(" "); + println!("cargo:warning=C flags: {}", flags); + cmake_args.push(format!("-DCMAKE_C_FLAGS={}", flags)); + cmake_args.push(format!("-DCMAKE_CXX_FLAGS={}", flags)); + } + + cmake_args.push(pjproject_src.to_str().unwrap().to_string()); + + // Run CMake configure + let cmake_result = Command::new("cmake") + .current_dir(pjproject_build) + .args(&cmake_args) + .output() + .expect("Failed to run cmake configure"); + + if !cmake_result.status.success() { + eprintln!("CMake configure stdout: {}", String::from_utf8_lossy(&cmake_result.stdout)); + eprintln!("CMake configure stderr: {}", String::from_utf8_lossy(&cmake_result.stderr)); + panic!("CMake configure failed"); + } + + // Get number of CPUs for parallel build + let num_cpus = std::thread::available_parallelism() + .map(|n| n.get()) + .unwrap_or(4); + + // Run CMake build - only build the libraries we need, not sample apps + println!("cargo:warning=Compiling pjproject with {} threads...", num_cpus); + let mut build_args = vec![ + "--build".to_string(), ".".to_string(), + "--config".to_string(), "Release".to_string(), + ]; + + // Specify only the library targets we need + let targets = [ + "pjlib", "pjlib-util", "pjnath", "pjmedia", "pjmedia-audiodev", + "pjmedia-codec", "pjsip", "pjsip-simple", "pjsip-ua", "pjsua-lib", "pjsua2", + "resample", "srtp", "speex", "g7221", "gsm", "ilbc", + ]; + for target in &targets { + build_args.push("--target".to_string()); + build_args.push(target.to_string()); + } + build_args.push("-j".to_string()); + build_args.push(num_cpus.to_string()); + + let build_result = Command::new("cmake") + .current_dir(pjproject_build) + .args(&build_args) + .output() + .expect("Failed to run cmake build"); + + if !build_result.status.success() { + eprintln!("CMake build stdout: {}", String::from_utf8_lossy(&build_result.stdout)); + eprintln!("CMake build stderr: {}", String::from_utf8_lossy(&build_result.stderr)); + panic!("CMake build failed"); + } + println!("cargo:warning=Library builds complete"); + + // Run CMake install - may fail for sample apps but that's OK + let install_result = Command::new("cmake") + .current_dir(pjproject_build) + .args(["--install", "."]) + .output() + .expect("Failed to run cmake install"); + + if !install_result.status.success() { + // Install might fail for sample apps we didn't build, but libraries are installed + println!("cargo:warning=CMake install had errors (OK if only sample apps failed)"); + } + + println!("cargo:warning=pjproject build complete!"); + } else { + println!("cargo:warning=Using cached pjproject build"); + } +} diff --git a/pjsua/pjproject b/pjsua/pjproject new file mode 160000 index 0000000..9caa8d4 --- /dev/null +++ b/pjsua/pjproject @@ -0,0 +1 @@ +Subproject commit 9caa8d4ef5374650cb4d6e06080e854ea5ea339b diff --git a/pjsua/src/lib.rs b/pjsua/src/lib.rs new file mode 100644 index 0000000..5a87e31 --- /dev/null +++ b/pjsua/src/lib.rs @@ -0,0 +1,17 @@ +//! Rust bindings for pjsua (pjproject SIP library) +//! +//! This crate provides low-level FFI bindings to pjsua, generated via bindgen. +//! The pjproject library is built from source automatically if not found on the system. + +#![allow(non_upper_case_globals)] +#![allow(non_camel_case_types)] +#![allow(non_snake_case)] +#![allow(improper_ctypes)] +#![allow(clippy::all)] + +mod pjsua { + #![allow(unnecessary_transmutes)] + include!(concat!(env!("OUT_DIR"), "/bindings.rs")); +} + +pub use pjsua::*; diff --git a/sipcord-bridge/Cargo.toml b/sipcord-bridge/Cargo.toml new file mode 100644 index 0000000..e3a20ec --- /dev/null +++ b/sipcord-bridge/Cargo.toml @@ -0,0 +1,113 @@ +[package] +name = "sipcord-bridge" +version = "1.5.5" +edition = "2021" +publish = false + +[[bin]] +name = "sipcord-bridge" +path = "src/main.rs" + +[lib] +name = "sipcord_bridge" +path = "src/lib.rs" + +[dependencies] +# Async runtime +tokio = { version = "1.49.0", features = ["full"] } +tokio-util = "0.7" + +# Discord +serenity = { git = "https://github.com/serenity-rs/serenity", default-features = false, features = [ + "default_no_backend", + "rustls_backend", + "voice", + "model", + "framework", +] , branch = "next"} +songbird = { version = "0.5.0", git = "https://github.com/jtscuba/songbird", features = ["driver", "gateway", "receive", "tungstenite"] , branch = "davey" } +poise = { version = "0.6.1", git = "https://github.com/serenity-rs/poise", branch = "serenity-next"} + +# HTTP client for API calls +reqwest = { version = "0.13.1", default-features = false, features = [ + "json", + "multipart", + "rustls", +] } + +# Serialization +serde = { version = "1.0.228", features = ["derive"] } +serde_json = "1.0.149" + +# Audio processing +audiopus = "0.3.0-rc.0" +rubato = "1.0.1" +audioadapter = "2.0.0" +audioadapter-buffers = "2.0.0" + +# Symphonia with PCM codec for RawAdapter support in Songbird +symphonia = { version = "0.5.5", default-features = false, features = [ + "pcm", + "flac", +] } + +# FLAC decoder +claxon = "0.4.3" + +# SIP - using pjsua bindings (builds pjproject from pjsua/pjproject submodule) +pjsua = { path = "../pjsua" } + +# SpanDSP - fax demodulation +spandsp = "0.1.5" + +# UDPTL transport for T.38 fax +udptl = "0.1.0" + +# Image conversion (fax TIFF -> PNG for Discord) +image = { version = "0.25", default-features = false, features = [ + "png", + "jpeg", +] } + +# Networking +ipnet = "2.11.0" + +# Lock-free ring buffer for real-time audio +rtrb = "0.3.2" + +# Async trait for dyn Backend +async-trait = "0.1" + +# Utilities +anyhow = "1.0.100" +thiserror = "2.0.18" +tracing = "0.1.44" +tracing-subscriber = { version = "0.3.22", features = ["env-filter"] } +parking_lot = "0.12.5" +crossbeam-channel = "0.5.15" +bytes = "1.11.1" +byteorder = "1.5.0" +symphonia-core = "0.5.5" +dashmap = "6.1.0" +moka = { version = "0.12.13", features = ["sync"] } + +# Crypto (MD5 for SIP digest auth cache verification, rand for nonces) +md-5 = "0.10" +rand = "0.9" + +# Configuration +dotenvy = "0.15.7" +envy = "0.4" +base64 = "0.22.1" +toml = "0.9.11" + +# Date/time handling +chrono = { version = "0.4.43", features = ["serde"] } + +# TLS - explicit crypto provider for rustls 0.23+ +rustls = { version = "0.23", default-features = false, features = [ + "ring", + "std", + "tls12", +] } +crossbeam-queue = "0.3.12" diff --git a/sipcord-bridge/src/audio/flac.rs b/sipcord-bridge/src/audio/flac.rs new file mode 100644 index 0000000..f8cbfc6 --- /dev/null +++ b/sipcord-bridge/src/audio/flac.rs @@ -0,0 +1,73 @@ +//! FLAC file parsing +//! +//! Parses FLAC file bytes to extract raw PCM i16 samples. + +use anyhow::{bail, Context}; +use tracing::debug; + +/// Parse a FLAC file and return the raw PCM i16 samples (mono). +/// +/// Handles: +/// - Standard FLAC files +/// - Stereo to mono conversion (if needed) +/// - Various bit depths (converted to 16-bit) +pub fn parse_flac(data: &[u8]) -> anyhow::Result<(Vec, u32)> { + let cursor = std::io::Cursor::new(data); + let mut reader = claxon::FlacReader::new(cursor).context("Failed to create FLAC reader")?; + + let info = reader.streaminfo(); + let sample_rate = info.sample_rate; + let num_channels = info.channels as usize; + let bits_per_sample = info.bits_per_sample; + + debug!( + "FLAC format: {}Hz, {} channels, {} bits per sample", + sample_rate, num_channels, bits_per_sample + ); + + // Read all samples + let mut raw_samples: Vec = Vec::new(); + for sample in reader.samples() { + raw_samples.push(sample.context("Failed to read FLAC sample")?); + } + + // Convert to i16 based on bit depth + let samples_i16: Vec = match bits_per_sample { + 8 => raw_samples.iter().map(|&s| (s << 8) as i16).collect(), + 16 => raw_samples.iter().map(|&s| s as i16).collect(), + 24 => raw_samples.iter().map(|&s| (s >> 8) as i16).collect(), + 32 => raw_samples.iter().map(|&s| (s >> 16) as i16).collect(), + _ => bail!("Unsupported FLAC bit depth: {}", bits_per_sample), + }; + + // Convert to mono if stereo (samples are interleaved) + let mono_samples = if num_channels == 2 { + samples_i16 + .chunks(2) + .map(|chunk| { + if chunk.len() == 2 { + ((chunk[0] as i32 + chunk[1] as i32) / 2) as i16 + } else { + chunk[0] + } + }) + .collect() + } else if num_channels > 2 { + // For more than 2 channels, take first channel only + samples_i16 + .chunks(num_channels) + .map(|chunk| chunk[0]) + .collect() + } else { + samples_i16 + }; + + debug!( + "FLAC data: {} samples ({}Hz, {} channels -> mono)", + mono_samples.len(), + sample_rate, + num_channels + ); + + Ok((mono_samples, sample_rate)) +} diff --git a/sipcord-bridge/src/audio/mod.rs b/sipcord-bridge/src/audio/mod.rs new file mode 100644 index 0000000..5fa5b20 --- /dev/null +++ b/sipcord-bridge/src/audio/mod.rs @@ -0,0 +1,8 @@ +//! Audio parsing utilities +//! +//! This module provides audio file parsing for WAV and FLAC formats. +//! Used by the `sound` module for loading audio files from disk. + +pub mod flac; +pub mod simd; +pub mod wav; diff --git a/sipcord-bridge/src/audio/simd.rs b/sipcord-bridge/src/audio/simd.rs new file mode 100644 index 0000000..afd5132 --- /dev/null +++ b/sipcord-bridge/src/audio/simd.rs @@ -0,0 +1,260 @@ +//! SIMD-accelerated audio processing utilities +//! +//! Uses portable_simd for cross-platform support (x86_64 SSE/AVX, aarch64 NEON). +//! Falls back to scalar code for unsupported platforms. + +use std::simd::{cmp::SimdOrd, i16x8, i32x8, num::SimdInt}; + +/// SIMD-accelerated max absolute value for i16 samples. +/// +/// Processes 8 samples at a time using SIMD, with scalar fallback for remainder. +/// This is the hot path for Voice Activity Detection (VAD). +/// +/// # Performance +/// - x86_64: Uses SSE2/AVX2 instructions (vpabsw, pmaxsw) +/// - aarch64: Uses NEON instructions +/// - Expected speedup: 4-8x vs scalar +#[inline] +pub fn max_abs_i16(samples: &[i16]) -> i16 { + if samples.is_empty() { + return 0; + } + + let chunks = samples.chunks_exact(8); + let remainder = chunks.remainder(); + + let mut max_vec = i16x8::splat(0); + for chunk in chunks { + let v = i16x8::from_slice(chunk); + // Handle i16::MIN specially since abs(i16::MIN) overflows + // For audio samples this is rare, but we handle it correctly + let abs_v = v.abs(); + max_vec = max_vec.simd_max(abs_v); + } + + // Horizontal max reduction + let mut result = max_vec.reduce_max(); + + // Process remainder with scalar code + for &s in remainder { + result = result.max(s.saturating_abs()); + } + + result +} + +/// SIMD-accelerated widen i16 to i32 (first speaker — overwrites dst). +/// +/// Processes 8 samples at a time. Used for the first speaker in mixing. +#[inline] +pub fn widen_i16_to_i32(src: &[i16], dst: &mut [i32]) { + let len = src.len().min(dst.len()); + let chunks_src = src[..len].chunks_exact(8); + let chunks_dst = dst[..len].chunks_exact_mut(8); + let remainder_start = chunks_src.remainder().len(); + + for (src_chunk, dst_chunk) in chunks_src.zip(chunks_dst) { + let v = i16x8::from_slice(src_chunk); + // Widen i16 -> i32 by casting each lane + let wide: [i32; 8] = [ + v[0] as i32, + v[1] as i32, + v[2] as i32, + v[3] as i32, + v[4] as i32, + v[5] as i32, + v[6] as i32, + v[7] as i32, + ]; + dst_chunk.copy_from_slice(&wide); + } + + // Scalar remainder + let start = len - remainder_start; + for i in start..len { + dst[i] = src[i] as i32; + } +} + +/// SIMD-accelerated accumulate i16 into i32 (mix additional speakers — adds to dst). +/// +/// Processes 8 samples at a time. Used for mixing additional speakers. +#[inline] +pub fn accumulate_i16_to_i32(src: &[i16], dst: &mut [i32]) { + let len = src.len().min(dst.len()); + let chunks_src = src[..len].chunks_exact(8); + let chunks_dst = dst[..len].chunks_exact_mut(8); + let remainder_start = chunks_src.remainder().len(); + + for (src_chunk, dst_chunk) in chunks_src.zip(chunks_dst) { + let v = i16x8::from_slice(src_chunk); + let dst_v = i32x8::from_slice(dst_chunk); + let wide = i32x8::from_array([ + v[0] as i32, + v[1] as i32, + v[2] as i32, + v[3] as i32, + v[4] as i32, + v[5] as i32, + v[6] as i32, + v[7] as i32, + ]); + let sum = dst_v + wide; + dst_chunk.copy_from_slice(sum.as_array()); + } + + // Scalar remainder + let start = len - remainder_start; + for i in start..len { + dst[i] += src[i] as i32; + } +} + +/// SIMD-accelerated clamp i32 to i16 with saturation. +/// +/// Processes 8 samples at a time. Clamps values to i16 range [-32768, 32767]. +#[inline] +pub fn clamp_i32_to_i16(src: &[i32], dst: &mut [i16]) { + let len = src.len().min(dst.len()); + let chunks_src = src[..len].chunks_exact(8); + let chunks_dst = dst[..len].chunks_exact_mut(8); + let remainder_start = chunks_src.remainder().len(); + + let min_val = i32x8::splat(-32768); + let max_val = i32x8::splat(32767); + + for (src_chunk, dst_chunk) in chunks_src.zip(chunks_dst) { + let v = i32x8::from_slice(src_chunk); + let clamped = v.simd_max(min_val).simd_min(max_val); + let narrow: [i16; 8] = [ + clamped[0] as i16, + clamped[1] as i16, + clamped[2] as i16, + clamped[3] as i16, + clamped[4] as i16, + clamped[5] as i16, + clamped[6] as i16, + clamped[7] as i16, + ]; + dst_chunk.copy_from_slice(&narrow); + } + + // Scalar remainder + let start = len - remainder_start; + for i in start..len { + dst[i] = src[i].clamp(-32768, 32767) as i16; + } +} + +/// SIMD-accelerated stereo to mono conversion. +/// +/// Averages adjacent sample pairs (L, R) -> (L+R)/2. +/// `stereo` length must be even. `mono` must be at least `stereo.len() / 2`. +#[inline] +pub fn stereo_to_mono_i16(stereo: &[i16], mono: &mut [i16]) { + let mono_len = (stereo.len() / 2).min(mono.len()); + + // Process 8 mono samples at a time (16 stereo samples) + let mut i = 0; + while i + 8 <= mono_len { + let si = i * 2; + // Load 16 stereo samples as two i16x8 vectors + let v0 = i16x8::from_slice(&stereo[si..si + 8]); + let v1 = i16x8::from_slice(&stereo[si + 8..si + 16]); + + // Deinterleave: extract even (left) and odd (right) samples + let left = i16x8::from_array([v0[0], v0[2], v0[4], v0[6], v1[0], v1[2], v1[4], v1[6]]); + let right = i16x8::from_array([v0[1], v0[3], v0[5], v0[7], v1[1], v1[3], v1[5], v1[7]]); + + // Average: (l + r) / 2 — use arithmetic shift to avoid overflow + // (l >> 1) + (r >> 1) + ((l & r) & 1) for exact rounding + let avg = (left >> i16x8::splat(1)) + (right >> i16x8::splat(1)); + mono[i..i + 8].copy_from_slice(avg.as_array()); + i += 8; + } + + // Scalar remainder + while i < mono_len { + let l = stereo[i * 2] as i32; + let r = stereo[i * 2 + 1] as i32; + mono[i] = ((l + r) / 2) as i16; + i += 1; + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_max_abs_i16_basic() { + let samples = [1, -5, 3, -2, 4, -10, 7, -8, 100]; + assert_eq!(max_abs_i16(&samples), 100); + } + + #[test] + fn test_max_abs_i16_negative_max() { + let samples = [1, -500, 3, -2]; + assert_eq!(max_abs_i16(&samples), 500); + } + + #[test] + fn test_max_abs_i16_empty() { + let samples: [i16; 0] = []; + assert_eq!(max_abs_i16(&samples), 0); + } + + #[test] + fn test_max_abs_i16_aligned() { + // Exactly 8 samples (one SIMD vector) + let samples = [100, -200, 300, -400, 500, -600, 700, -800]; + assert_eq!(max_abs_i16(&samples), 800); + } + + #[test] + fn test_widen_i16_to_i32() { + let src: Vec = (0..20).map(|i| (i * 100 - 1000) as i16).collect(); + let mut dst = vec![0i32; 20]; + widen_i16_to_i32(&src, &mut dst); + for i in 0..20 { + assert_eq!(dst[i], src[i] as i32, "mismatch at index {}", i); + } + } + + #[test] + fn test_accumulate_i16_to_i32() { + let src = [100i16, -200, 300, -400, 500, -600, 700, -800, 900]; + let mut dst = [1i32, 2, 3, 4, 5, 6, 7, 8, 9]; + accumulate_i16_to_i32(&src, &mut dst); + assert_eq!(dst, [101, -198, 303, -396, 505, -594, 707, -792, 909]); + } + + #[test] + fn test_clamp_i32_to_i16() { + let src = [0i32, 32767, -32768, 40000, -40000, 100, -100, 0, 12345]; + let mut dst = [0i16; 9]; + clamp_i32_to_i16(&src, &mut dst); + assert_eq!(dst, [0, 32767, -32768, 32767, -32768, 100, -100, 0, 12345]); + } + + #[test] + fn test_stereo_to_mono() { + // 20 stereo samples -> 10 mono + let stereo: Vec = (0..20).map(|i| (i * 100) as i16).collect(); + let mut mono = vec![0i16; 10]; + stereo_to_mono_i16(&stereo, &mut mono); + for i in 0..10 { + let l = stereo[i * 2] as i32; + let r = stereo[i * 2 + 1] as i32; + let expected = ((l + r) / 2) as i16; + // Allow +-1 for rounding differences between SIMD and scalar + assert!( + (mono[i] as i32 - expected as i32).abs() <= 1, + "mismatch at {}: got {} expected {}", + i, + mono[i], + expected + ); + } + } +} diff --git a/sipcord-bridge/src/audio/wav.rs b/sipcord-bridge/src/audio/wav.rs new file mode 100644 index 0000000..f2efe16 --- /dev/null +++ b/sipcord-bridge/src/audio/wav.rs @@ -0,0 +1,206 @@ +//! WAV file parsing +//! +//! Parses WAV file bytes to extract raw PCM i16 samples. +//! Supports standard PCM WAV files (format code 1). + +use anyhow::ensure; +use tracing::debug; + +/// WAV format chunk data +#[derive(Debug)] +struct WavFormat { + /// Audio format (1 = PCM) + audio_format: u16, + /// Number of channels + num_channels: u16, + /// Sample rate in Hz + sample_rate: u32, + /// Bits per sample (typically 16) + bits_per_sample: u16, +} + +/// Parse a WAV file and return the raw PCM i16 samples (mono). +/// +/// Handles: +/// - Standard PCM WAV files (format code 1) +/// - Stereo to mono conversion (if needed) +/// - 16-bit samples +pub fn parse_wav(data: &[u8]) -> anyhow::Result<(Vec, u32)> { + // Validate RIFF header + ensure!(data.len() >= 12, "WAV file too short for header"); + ensure!(&data[0..4] == b"RIFF", "Missing RIFF header"); + ensure!(&data[8..12] == b"WAVE", "Missing WAVE format"); + + let mut pos = 12; + let mut format: Option = None; + let mut samples: Vec = Vec::new(); + + // Parse chunks + while pos + 8 <= data.len() { + let chunk_id = &data[pos..pos + 4]; + let chunk_size = + u32::from_le_bytes([data[pos + 4], data[pos + 5], data[pos + 6], data[pos + 7]]) + as usize; + pos += 8; + + match chunk_id { + b"fmt " => { + ensure!(chunk_size >= 16, "fmt chunk too small"); + format = Some(WavFormat { + audio_format: u16::from_le_bytes([data[pos], data[pos + 1]]), + num_channels: u16::from_le_bytes([data[pos + 2], data[pos + 3]]), + sample_rate: u32::from_le_bytes([ + data[pos + 4], + data[pos + 5], + data[pos + 6], + data[pos + 7], + ]), + // Skip byte rate (4 bytes) and block align (2 bytes) + bits_per_sample: u16::from_le_bytes([data[pos + 14], data[pos + 15]]), + }); + debug!("WAV format: {:?}", format); + } + b"data" => { + let fmt = format.as_ref().ok_or_else(|| anyhow::anyhow!("data chunk before fmt chunk"))?; + ensure!(fmt.audio_format == 1, "Only PCM format supported"); + ensure!(fmt.bits_per_sample == 16, "Only 16-bit samples supported"); + + let data_end = (pos + chunk_size).min(data.len()); + let sample_data = &data[pos..data_end]; + + // Parse i16 samples + let raw_samples: Vec = sample_data + .chunks_exact(2) + .map(|chunk| i16::from_le_bytes([chunk[0], chunk[1]])) + .collect(); + + // Convert to mono if stereo + samples = if fmt.num_channels == 2 { + raw_samples + .chunks(2) + .map(|chunk| { + if chunk.len() == 2 { + ((chunk[0] as i32 + chunk[1] as i32) / 2) as i16 + } else { + chunk[0] + } + }) + .collect() + } else { + raw_samples + }; + + debug!( + "WAV data: {} samples ({}Hz, {} channels -> mono)", + samples.len(), + fmt.sample_rate, + fmt.num_channels + ); + } + _ => { + // Skip unknown chunks + debug!("Skipping WAV chunk: {:?}", std::str::from_utf8(chunk_id)); + } + } + + // Move to next chunk (chunks are word-aligned) + pos += chunk_size; + if !chunk_size.is_multiple_of(2) { + pos += 1; + } + } + + let sample_rate = format + .as_ref() + .map(|f| f.sample_rate) + .ok_or_else(|| anyhow::anyhow!("No fmt chunk found"))?; + + Ok((samples, sample_rate)) +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_parse_simple_wav() { + // Minimal valid WAV with 4 samples of silence + let wav = [ + // RIFF header + b'R', b'I', b'F', b'F', // "RIFF" + 0x2C, 0x00, 0x00, 0x00, // File size - 8 = 44 + b'W', b'A', b'V', b'E', // "WAVE" + // fmt chunk + b'f', b'm', b't', b' ', // "fmt " + 0x10, 0x00, 0x00, 0x00, // Chunk size = 16 + 0x01, 0x00, // Audio format = 1 (PCM) + 0x01, 0x00, // Num channels = 1 (mono) + 0x80, 0x3E, 0x00, 0x00, // Sample rate = 16000 + 0x00, 0x7D, 0x00, 0x00, // Byte rate = 32000 + 0x02, 0x00, // Block align = 2 + 0x10, 0x00, // Bits per sample = 16 + // data chunk + b'd', b'a', b't', b'a', // "data" + 0x08, 0x00, 0x00, 0x00, // Chunk size = 8 bytes = 4 samples + 0x00, 0x00, // Sample 0 = 0 + 0x00, 0x10, // Sample 1 = 4096 + 0x00, 0x20, // Sample 2 = 8192 + 0x00, 0x30, // Sample 3 = 12288 + ]; + + let (samples, rate) = parse_wav(&wav).unwrap(); + assert_eq!(rate, 16000); + assert_eq!(samples.len(), 4); + assert_eq!(samples[0], 0); + assert_eq!(samples[1], 4096); + assert_eq!(samples[2], 8192); + assert_eq!(samples[3], 12288); + } + + #[test] + fn test_parse_wav_too_short() { + let result = parse_wav(&[0u8; 4]); + assert!(result.is_err()); + assert!(result.unwrap_err().to_string().contains("too short")); + } + + #[test] + fn test_parse_wav_wrong_magic() { + let mut data = [0u8; 44]; + data[0..4].copy_from_slice(b"NOPE"); + let result = parse_wav(&data); + assert!(result.is_err()); + assert!(result.unwrap_err().to_string().contains("RIFF")); + } + + #[test] + fn test_parse_stereo_wav() { + // Stereo WAV: 2 stereo sample frames = 4 raw samples -> 2 mono samples + let wav = [ + // RIFF header + b'R', b'I', b'F', b'F', 0x2C, 0x00, 0x00, 0x00, // File size - 8 + b'W', b'A', b'V', b'E', // fmt chunk + b'f', b'm', b't', b' ', 0x10, 0x00, 0x00, 0x00, // Chunk size = 16 + 0x01, 0x00, // PCM + 0x02, 0x00, // 2 channels (stereo) + 0x80, 0x3E, 0x00, 0x00, // 16000 Hz + 0x00, 0xFA, 0x00, 0x00, // Byte rate = 64000 + 0x04, 0x00, // Block align = 4 + 0x10, 0x00, // 16 bits + // data chunk + b'd', b'a', b't', b'a', 0x08, 0x00, 0x00, 0x00, // 8 bytes = 2 stereo frames + // Frame 1: L=1000, R=3000 -> mono = 2000 + 0xE8, 0x03, // 1000 LE + 0xB8, 0x0B, // 3000 LE + // Frame 2: L=-100, R=100 -> mono = 0 + 0x9C, 0xFF, // -100 LE + 0x64, 0x00, // 100 LE + ]; + + let (samples, rate) = parse_wav(&wav).unwrap(); + assert_eq!(rate, 16000); + assert_eq!(samples.len(), 2); + assert_eq!(samples[0], 2000); + assert_eq!(samples[1], 0); + } +} diff --git a/sipcord-bridge/src/call/mod.rs b/sipcord-bridge/src/call/mod.rs new file mode 100644 index 0000000..33ff721 --- /dev/null +++ b/sipcord-bridge/src/call/mod.rs @@ -0,0 +1,2078 @@ +//! Audio bridge between SIP and Discord +//! +//! Architecture: +//! - ChannelBridge: One per Discord voice channel, shared by multiple SIP callers +//! - SipCallInfo: Tracks which channel each SIP call is connected to +//! +//! New Call Flow (with 183 Session Progress): +//! 1. SIP call comes in with Digest auth → SipEvent::IncomingCall +//! 2. Send 183 Session Progress (establishes early media) +//! 3. Start playing "connecting" sound in loop +//! 4. Bridge routes call via Backend → gets channel_id and bot_token +//! 5. Connect to Discord +//! 6. Stop connecting loop, play discord_join sound, send 200 OK +//! 7. When caller hangs up, remove from bridge +//! 8. When last caller leaves, destroy the bridge (disconnect bot) + +use crate::fax::session::FaxSession; +use crate::fax::spandsp::FaxT38Receiver; +use crate::routing::{Backend, CallError, CallStartedInfo, OutboundCallRequest, RouteDecision}; +use crate::services::snowflake::Snowflake; +use crate::services::sound::{create_sound_manager, SoundManager}; +use crate::transport::discord::{ + register_discord_to_sip_producer, unregister_discord_to_sip_producer, DiscordEvent, + DiscordVoiceConnection, SharedDiscordClient, +}; +use crate::transport::sip::{ + cleanup_channel_port, clear_channel_stale_audio, empty_bridge_grace_period_secs, + register_call_channel, register_discord_to_sip, stop_loop, unregister_call_channel, + unregister_discord_to_sip, CallId, SipCommand, SipEvent, CONF_SAMPLE_RATE, +}; +use anyhow::Result; +use crossbeam_channel::{bounded, Receiver, Sender}; +use dashmap::{DashMap, DashSet}; +use std::collections::HashSet; +use std::path::PathBuf; +use std::sync::Arc; +use std::time::{Duration, Instant}; +use tokio::sync::Notify; +use tokio_util::sync::CancellationToken; +use tracing::{debug, error, info, trace, warn}; +use udptl::AsyncUdptlSocket; + +/// Ring buffer capacity for Discord→SIP audio (i16 mono @ 16kHz). +/// 3200 samples = 200ms of audio, enough for timing jitter. +const DISCORD_TO_SIP_RING_BUFFER_SIZE: usize = 3200; + +/// Create and register bidirectional ring buffers for a channel. +/// Call this when a new ChannelBridge is created (after Discord connects). +fn setup_channel_ring_buffers(channel_id: Snowflake) { + let (producer, consumer) = rtrb::RingBuffer::new(DISCORD_TO_SIP_RING_BUFFER_SIZE); + register_discord_to_sip_producer(channel_id, producer); + register_discord_to_sip(channel_id, consumer); + info!( + "Created Discord→SIP ring buffer for channel {} (capacity={})", + channel_id, DISCORD_TO_SIP_RING_BUFFER_SIZE + ); +} + +/// Tear down ring buffers for a channel. Call when a ChannelBridge is destroyed. +fn teardown_channel_ring_buffers(channel_id: Snowflake) { + unregister_discord_to_sip_producer(channel_id); + unregister_discord_to_sip(channel_id); + clear_channel_stale_audio(channel_id); + debug!("Removed Discord→SIP ring buffer for channel {}", channel_id); +} + +/// A bridge to a Discord voice channel (shared by multiple SIP callers) +pub struct ChannelBridge { + /// Guild ID (needed for API call on bridge destruction) + pub guild_id: Snowflake, + /// The Discord voice connection (one per channel) + pub discord_connection: DiscordVoiceConnection, + /// SIP call IDs currently connected to this bridge + pub sip_calls: HashSet, + /// Bot token (stored for reference, no longer used for per-call client creation) + pub bot_token: String, + /// Last time a SIP call was active on this bridge (for orphan detection) + pub last_call_time: Instant, + /// When this bridge was created + pub created_at: Instant, + /// Number of reconnection attempts for this channel + pub reconnect_attempts: u32, + /// When the last reconnection attempt was made + pub last_reconnect_at: Option, +} + +/// Info about an active SIP call +pub struct SipCallInfo { + /// Which Discord channel this call is connected to (None if still authenticating) + pub channel_id: Option, + /// User ID from API authentication (for call tracking) + pub _user_id: Option, + /// Guild ID (for call tracking) + pub _guild_id: Option, + /// Tracking ID for outbound calls (used to report no_audio status back to DO) + pub tracking_id: Option, +} + +/// Shared state passed to per-call task handlers +#[derive(Clone)] +struct BridgeContext { + backend: Arc, + bridges: Arc>, + pending_bridges: Arc>, + /// Notify waiters when a pending bridge completes (or fails) + bridge_ready_notifiers: Arc>>, + sip_calls: Arc>, + /// Active fax sessions keyed by SIP call ID. + /// Each entry holds the session and a cancellation token for the T.38 processing task. + fax_sessions: Arc>, CancellationToken)>>, + discord_event_tx: Sender, + sip_cmd_tx: Sender, + sound_manager: Arc, + shared_discord: Arc, + /// Wakes the health check loop immediately when a Songbird driver disconnects unexpectedly. + health_check_notify: Arc, +} + +/// The main bridge coordinator +pub struct BridgeCoordinator { + backend: Arc, + sip_cmd_tx: Sender, + sip_event_rx: Receiver, + bridges: Arc>, + pending_bridges: Arc>, + bridge_ready_notifiers: Arc>>, + sip_calls: Arc>, + /// Active fax sessions keyed by SIP call ID. + /// Each entry holds the session and a cancellation token for the T.38 processing task. + fax_sessions: Arc>, CancellationToken)>>, + /// Stores outbound call requests by tracking_id so the answered handler can retrieve them. + /// Entries are cleaned on answer/fail and periodically swept for stale entries. + outbound_requests: Arc>, + discord_event_tx: Sender, + discord_event_rx: Receiver, + sound_manager: Arc, + shared_discord: Arc, +} + +impl BridgeCoordinator { + pub fn new( + backend: Arc, + sip_cmd_tx: Sender, + sip_event_rx: Receiver, + shared_discord: Arc, + ) -> Self { + let (discord_event_tx, discord_event_rx) = bounded(1000); + + // Load sounds from config.toml + let sounds_dir = PathBuf::from(&crate::config::EnvConfig::global().sounds_dir); + + let sound_manager = create_sound_manager(sounds_dir) + .expect("Failed to create SoundManager - check config.toml and sound files"); + + Self { + backend, + sip_cmd_tx, + sip_event_rx, + bridges: Arc::new(DashMap::new()), + pending_bridges: Arc::new(DashSet::new()), + bridge_ready_notifiers: Arc::new(DashMap::new()), + sip_calls: Arc::new(DashMap::new()), + fax_sessions: Arc::new(DashMap::new()), + outbound_requests: Arc::new(DashMap::new()), + discord_event_tx, + discord_event_rx, + sound_manager, + shared_discord, + } + } + + /// Run the bridge coordinator (consumes self) + pub async fn run(self) -> Result<()> { + info!("Bridge coordinator started"); + + // Shared notify: VoiceReceiver signals this on unexpected DriverDisconnect, + // waking the health check loop immediately instead of waiting for the next tick. + let health_check_notify = Arc::new(Notify::new()); + + // Build shared context for per-call task handlers + let ctx = BridgeContext { + backend: self.backend.clone(), + bridges: self.bridges.clone(), + pending_bridges: self.pending_bridges.clone(), + bridge_ready_notifiers: self.bridge_ready_notifiers.clone(), + sip_calls: self.sip_calls.clone(), + fax_sessions: self.fax_sessions.clone(), + discord_event_tx: self.discord_event_tx.clone(), + sip_cmd_tx: self.sip_cmd_tx.clone(), + sound_manager: self.sound_manager.clone(), + shared_discord: self.shared_discord.clone(), + health_check_notify: health_check_notify.clone(), + }; + + // Clone what we need for the SIP event handler + let backend_for_sip = ctx.backend.clone(); + let bridges = ctx.bridges.clone(); + let sip_calls = ctx.sip_calls.clone(); + let sip_cmd_tx = ctx.sip_cmd_tx.clone(); + let sip_event_rx = self.sip_event_rx.clone(); + let sound_manager = ctx.sound_manager.clone(); + let outbound_requests = self.outbound_requests.clone(); + + let sip_handle = tokio::spawn(async move { + let mut event_count: u64 = 0; + loop { + let Some(event) = poll_recv(&sip_event_rx, "SIP", &mut event_count).await else { + break; + }; + + match event { + SipEvent::IncomingCall { + call_id, + digest_auth, + extension, + source_ip, + } => { + info!( + "Incoming call {} from user={} to ext={} (IP: {:?})", + call_id, digest_auth.username, extension, source_ip + ); + + // Check for config-based extension sounds (easter eggs) + if let Ok(ext_num) = extension.parse::() { + if let Some(sound_name) = sound_manager.get_extension_sound(ext_num) { + info!( + "Extension {} maps to sound '{}' (call {})", + ext_num, sound_name, call_id + ); + + let sound_manager = sound_manager.clone(); + let sip_cmd_tx = sip_cmd_tx.clone(); + let sound_name = sound_name.to_string(); + + tokio::spawn(async move { + play_extension_sound_and_hangup( + call_id, + &sound_name, + &sound_manager, + &sip_cmd_tx, + ) + .await; + }); + continue; + } + } + + // Track this call + sip_calls.insert( + call_id, + SipCallInfo { + channel_id: None, + _user_id: None, + _guild_id: None, + tracking_id: None, + }, + ); + + // Verify auth with API and get channel info + let ctx = ctx.clone(); + + tokio::spawn(async move { + handle_incoming_call(ctx, call_id, *digest_auth, extension, source_ip) + .await; + }); + } + + SipEvent::CallEnded { call_id } => { + unregister_call_channel(call_id); + stop_loop(call_id); + + // Check if this was a fax call — clean up fax session + // Fax calls skip on_call_ended (no "hung up" notification) + if let Some((_, (fax_session, cancel_token))) = + ctx.fax_sessions.remove(&call_id) + { + // Cancel the T.38 processing task (if running) before locking + cancel_token.cancel(); + + // Clean up fax audio port + crate::fax::audio_port::remove_fax_audio_port(call_id); + + let mut session = fax_session.lock().await; + debug!( + "Fax call {} ended (channel={}, duration={:.1}s, audio={:.1}s)", + call_id, + session.text_channel_id, + session.created_at.elapsed().as_secs_f64(), + session.audio_duration_secs() + ); + if !session.is_finished() { + // If we received at least one page, the fax data is in the TIFF. + // The remote may have hung up after sending all pages but before + // the T.30 phase E disconnect handshake completed — this is normal. + let pages = session.pages_received(); + if pages > 0 { + debug!( + "Fax call {} ended with {} page(s) received, converting", + call_id, pages + ); + session.state = crate::fax::session::FaxState::Received; + if let Err(e) = session.convert_and_post().await { + error!( + "Failed to convert/post fax for call {}: {}", + call_id, e + ); + session + .post_failure("Failed to process received fax") + .await; + } + } else { + session + .post_failure("Caller hung up before fax completed") + .await; + } + } + sip_calls.remove(&call_id); + continue; + } + + // Voice call ended — notify backend ("hung up" notification) + let backend = backend_for_sip.clone(); + let sip_call_id_str = call_id.to_string(); + tokio::spawn(async move { + backend.on_call_ended(&sip_call_id_str).await; + }); + + if let Some((_, call_info)) = sip_calls.remove(&call_id) { + if let Some(channel_id) = call_info.channel_id { + let should_destroy = { + if let Some(mut bridge) = bridges.get_mut(&channel_id) { + bridge.sip_calls.remove(&call_id); + info!( + "Removed call {} from bridge for channel {} ({} callers remaining)", + call_id, channel_id, bridge.sip_calls.len() + ); + bridge.sip_calls.is_empty() + } else { + false + } + }; + + if should_destroy { + info!( + "Last caller left, destroying bridge for channel {}", + channel_id + ); + cleanup_channel_port(channel_id); + teardown_channel_ring_buffers(channel_id); + + if let Some((_, bridge)) = bridges.remove(&channel_id) { + bridge.discord_connection.disconnect().await; + } + } + } + } + } + + SipEvent::CallTimeout { call_id, rx_count } => { + warn!( + "Call {} timed out due to RTP inactivity (rx_count={}), forcing hangup", + call_id, rx_count + ); + + // If no audio was ever received, report no_audio to the coordinator + // so the Discord embed can show a diagnostic message + if rx_count == 0 { + if let Some(call_info) = sip_calls.get(&call_id) { + if let Some(ref tracking_id) = call_info.tracking_id { + info!( + "Call {} had zero RTP packets, reporting no_audio (tracking_id={})", + call_id, tracking_id + ); + backend_for_sip.report_call_status(tracking_id, "no_audio"); + } + } + } + + let _ = sip_cmd_tx.send(SipCommand::Hangup { call_id }); + } + + SipEvent::OutboundCallAnswered { + tracking_id, + call_id, + } => { + info!( + "Outbound call answered: tracking_id={}, call_id={}", + tracking_id, call_id + ); + + // Check fork group: cancel sibling legs + if let Some(siblings) = + crate::transport::sip::fork_group::mark_answered(&tracking_id, call_id) + { + for sib_id in siblings { + info!( + "Cancelling sibling fork leg: call_id={} (tracking_id={})", + sib_id, tracking_id + ); + // Remove from outbound tracking so its disconnect + // callback won't emit OutboundCallFailed + crate::transport::sip::remove_outbound_tracking(sib_id); + let _ = sip_cmd_tx.send(SipCommand::Hangup { call_id: sib_id }); + } + } + + backend_for_sip.report_call_status(&tracking_id, "answered"); + + let ctx = ctx.clone(); + let outbound_requests = outbound_requests.clone(); + tokio::spawn(async move { + handle_outbound_call_answered( + ctx, + outbound_requests, + tracking_id, + call_id, + ) + .await; + }); + } + + SipEvent::OutboundCallFailed { + tracking_id, + call_id: failed_call_id, + reason, + } => { + warn!( + "Outbound call failed: tracking_id={}, call_id={:?}, reason={}", + tracking_id, failed_call_id, reason + ); + + // Check fork group: only report failure when ALL legs fail + let all_failed = if let Some(cid) = failed_call_id { + crate::transport::sip::fork_group::mark_failed(&tracking_id, cid) + } else { + // No call_id means it never started - check if this was a single-contact call + true + }; + + if all_failed { + info!( + "All fork legs failed for tracking_id={}, reporting failure", + tracking_id + ); + outbound_requests.remove(&tracking_id); + backend_for_sip.report_call_status(&tracking_id, "failed"); + } else { + debug!( + "Fork leg failed but other legs still active for tracking_id={}", + tracking_id + ); + } + } + + SipEvent::T38Offered { + call_id, + remote_ip, + remote_port, + t38_version, + max_bit_rate, + rate_management, + udp_ec, + local_port, + } => { + info!( + "T.38 re-INVITE for call {}: remote={}:{}, local_port={}, version={}, rate={}bps, mgmt={}, ec={}", + call_id, remote_ip, remote_port, local_port, t38_version, max_bit_rate, rate_management, udp_ec + ); + + // Check if this call has a fax session + if let Some(entry) = ctx.fax_sessions.get(&call_id) { + let (fax_session, cancel_token) = entry.value(); + let fax_session = fax_session.clone(); + let cancel_token = cancel_token.clone(); + let sip_cmd_tx = sip_cmd_tx.clone(); + + tokio::spawn(async move { + handle_t38_switch( + call_id, + remote_ip, + remote_port, + local_port, + fax_session, + cancel_token, + sip_cmd_tx, + ) + .await; + }); + } else { + warn!( + "T.38 re-INVITE for call {} but no fax session — rejecting", + call_id + ); + // Hang up since we can't handle T.38 without a fax session + let _ = sip_cmd_tx.send(SipCommand::Hangup { call_id }); + } + } + } + } + }); + + // Handle outbound call requests from the backend + let outbound_backend = self.backend.clone(); + let outbound_sip_cmd_tx = self.sip_cmd_tx.clone(); + let outbound_registrar = crate::services::registrar::GLOBAL_REGISTRAR.get().cloned(); + let outbound_requests_for_handler = self.outbound_requests.clone(); + + let outbound_handle = tokio::spawn(async move { + while let Some(req) = outbound_backend.next_outbound_request().await { + info!( + "Processing outbound call request: call_id={}, user={}", + req.call_id, req.discord_username + ); + + // Look up the user's SIP contact from the registrar + let contacts = if let Some(ref registrar) = outbound_registrar { + registrar.get_contacts_for_discord_user(&req.discord_username) + } else { + Vec::new() + }; + + if contacts.is_empty() { + warn!( + "No SIP contacts for user {} (call_id={})", + req.discord_username, req.call_id + ); + outbound_backend.report_call_status(&req.call_id, "failed"); + continue; + } + + // Store the request so handle_outbound_call_answered can retrieve it + outbound_requests_for_handler.insert(req.call_id.clone(), req.clone()); + + let fork_total = contacts.len(); + info!( + "Forking outbound call to {} contacts for user {} (call_id={})", + fork_total, req.discord_username, req.call_id + ); + + // Ring ALL registered contacts simultaneously + for (contact_uri, source_addr, transport) in &contacts { + // Extract the user part from the Contact URI (e.g., "sip:3001@10.0.1.151:5060" -> "3001") + // The contact_uri has the correct SIP username/extension; source_addr is the NAT'd public address + let user_part = contact_uri + .strip_prefix("sip:") + .or_else(|| contact_uri.strip_prefix("sips:")) + .and_then(|rest| rest.split('@').next()) + .unwrap_or(&req.discord_username); + + let sip_uri = match transport { + crate::services::registrar::SipTransport::Tls => { + format!("sips:{}@{}", user_part, source_addr) + } + crate::services::registrar::SipTransport::Tcp => { + format!("sip:{}@{};transport=tcp", user_part, source_addr) + } + crate::services::registrar::SipTransport::Udp => { + format!("sip:{}@{};transport=udp", user_part, source_addr) + } + }; + + let _ = outbound_sip_cmd_tx.send(SipCommand::MakeOutboundCall { + tracking_id: req.call_id.clone(), + sip_uri, + caller_display_name: Some(req.caller_username.clone()), + fork_total, + }); + } + + outbound_backend.report_call_status(&req.call_id, "ringing"); + } + }); + + // Handle Discord events + let discord_event_rx = self.discord_event_rx.clone(); + + let discord_handle = tokio::spawn(async move { + let mut event_count: u64 = 0; + loop { + let Some(event) = poll_recv(&discord_event_rx, "Discord", &mut event_count).await + else { + break; + }; + + match event { + DiscordEvent::VoiceConnected { + bridge_id, + guild_id, + channel_id, + } => { + info!( + "Discord voice connected: bridge={}, guild={}, channel={}", + bridge_id, guild_id, channel_id + ); + } + + DiscordEvent::VoiceDisconnected { bridge_id } => { + debug!("Discord voice disconnected: bridge={}", bridge_id); + } + } + } + }); + + // Health check task + let bridges = self.bridges.clone(); + let pending_bridges = self.pending_bridges.clone(); + let bridge_ready_notifiers = self.bridge_ready_notifiers.clone(); + let discord_event_tx = self.discord_event_tx.clone(); + let backend_for_health = self.backend.clone(); + let sip_calls_for_health = self.sip_calls.clone(); + let shared_discord_for_health = self.shared_discord.clone(); + let outbound_requests_for_health = self.outbound_requests.clone(); + let sip_cmd_tx_for_health = self.sip_cmd_tx.clone(); + + let health_check_notify_for_loop = health_check_notify.clone(); + let health_check_handle = tokio::spawn(async move { + let mut check_count: u64 = 0; + loop { + let interval = crate::config::AppConfig::bridge().health_check_interval_secs; + tokio::select! { + _ = tokio::time::sleep(Duration::from_secs(interval)) => {}, + _ = health_check_notify_for_loop.notified() => { + info!("Health check woken early by driver disconnect"); + }, + } + check_count += 1; + + // Sweep stale outbound requests (leaked if fork group never resolves) + let before = outbound_requests_for_health.len(); + outbound_requests_for_health + .retain(|_, req| req.created_at.elapsed() < Duration::from_secs(60)); + let swept = before - outbound_requests_for_health.len(); + if swept > 0 { + warn!("Swept {} stale outbound requests (>60s old)", swept); + } + + let active_channel_ids: Vec = bridges + .iter() + .map(|entry| entry.key().to_string()) + .collect(); + + if !active_channel_ids.is_empty() { + let backend = backend_for_health.clone(); + tokio::spawn(async move { + backend.heartbeat(&active_channel_ids).await; + }); + } + + let bridge_cfg = crate::config::AppConfig::bridge(); + + // Collect unhealthy bridges with their reconnection state + // Tuple: (channel_id, guild_id, bridge_id, prev_attempts, prev_reconnect_at) + let mut unhealthy_bridges: Vec<( + Snowflake, + Snowflake, + String, + u32, + Option, + )> = Vec::new(); + // Bridges that exceeded max reconnection attempts — tear them down + let mut exhausted_bridges: Vec = Vec::new(); + + for entry in bridges.iter() { + let channel_id = *entry.key(); + let bridge = entry.value(); + + let is_healthy = bridge.discord_connection.is_healthy(); + let queue_fill = bridge.discord_connection.queue_fill_percent(); + let consecutive_overflows = bridge.discord_connection.consecutive_overflows(); + + if check_count.is_multiple_of(12) { + info!( + "Health check #{}: channel={}, healthy={}, queue={}%, overflows={}, reconnects={}", + check_count, channel_id, is_healthy, queue_fill, consecutive_overflows, + bridge.reconnect_attempts + ); + } + + let needs_reconnect = + !is_healthy || (queue_fill > 90 && consecutive_overflows > 50); + + if needs_reconnect { + // Cooldown: skip if bridge was created/reconnected too recently + let age_secs = bridge.created_at.elapsed().as_secs(); + if age_secs < bridge_cfg.reconnect_min_age_secs { + debug!( + "Bridge for channel {} is unhealthy but too young ({}s < {}s cooldown), skipping", + channel_id, age_secs, bridge_cfg.reconnect_min_age_secs + ); + continue; + } + + // Max attempts: if exceeded, tear down instead of reconnecting + if bridge.reconnect_attempts >= bridge_cfg.reconnect_max_attempts { + error!( + "Bridge for channel {} exceeded max reconnection attempts ({}/{}), tearing down", + channel_id, bridge.reconnect_attempts, bridge_cfg.reconnect_max_attempts + ); + exhausted_bridges.push(channel_id); + continue; + } + + // Exponential backoff: check if enough time has passed since last reconnect + if let Some(last_reconnect) = bridge.last_reconnect_at { + let backoff_secs = bridge_cfg.reconnect_base_delay_secs + * 2u64.saturating_pow(bridge.reconnect_attempts.saturating_sub(1)); + let backoff_secs = + backoff_secs.min(bridge_cfg.reconnect_max_delay_secs); + let elapsed = last_reconnect.elapsed().as_secs(); + if elapsed < backoff_secs { + debug!( + "Bridge for channel {} is unhealthy but in backoff ({}s < {}s), skipping", + channel_id, elapsed, backoff_secs + ); + continue; + } + } + + warn!( + "Bridge for channel {} is UNHEALTHY (attempt {}/{})", + channel_id, + bridge.reconnect_attempts + 1, + bridge_cfg.reconnect_max_attempts + ); + unhealthy_bridges.push(( + channel_id, + bridge.guild_id, + bridge.discord_connection.bridge_id().to_string(), + bridge.reconnect_attempts, + bridge.last_reconnect_at, + )); + } + } + + // Tear down bridges that exhausted reconnection attempts + for channel_id in exhausted_bridges { + if let Some((_, bridge)) = bridges.remove(&channel_id) { + let orphaned_count = bridge.sip_calls.len(); + error!( + "Destroying bridge for channel {} after {} failed reconnection attempts — hanging up {} orphaned calls", + channel_id, bridge.reconnect_attempts, orphaned_count + ); + // Hang up all SIP calls that were on this bridge + for &orphaned_call_id in &bridge.sip_calls { + warn!( + "Hanging up orphaned call {} (bridge for channel {} exhausted reconnects)", + orphaned_call_id, channel_id + ); + let _ = sip_cmd_tx_for_health.send(SipCommand::Hangup { + call_id: orphaned_call_id, + }); + } + cleanup_channel_port(channel_id); + teardown_channel_ring_buffers(channel_id); + bridge.discord_connection.disconnect().await; + } + } + + // Check for orphaned bridges (no SIP calls for grace period) + let mut orphaned_bridges: Vec = Vec::new(); + for entry in bridges.iter() { + let channel_id = *entry.key(); + let bridge = entry.value(); + + if bridge.sip_calls.is_empty() { + let empty_duration = bridge.last_call_time.elapsed().as_secs(); + if empty_duration > empty_bridge_grace_period_secs() { + warn!( + "Bridge for channel {} has no SIP calls for {}s, marking for cleanup", + channel_id, empty_duration + ); + orphaned_bridges.push(channel_id); + } + } else { + // Cross-reference: bridge has sip_calls entries, but do any + // of them actually exist in the coordinator's sip_calls map? + // If none exist, the entries are stale (calls ended without cleanup). + let any_call_exists = bridge + .sip_calls + .iter() + .any(|call_id| sip_calls_for_health.contains_key(call_id)); + + if !any_call_exists + && bridge.last_call_time.elapsed().as_secs() > 30 + && bridge.created_at.elapsed().as_secs() > 60 + { + warn!( + "Bridge for channel {} has {} stale sip_calls entries (none exist in coordinator), \ + last_call={}s ago, age={}s — marking for cleanup", + channel_id, + bridge.sip_calls.len(), + bridge.last_call_time.elapsed().as_secs(), + bridge.created_at.elapsed().as_secs(), + ); + orphaned_bridges.push(channel_id); + } + } + } + + // Destroy orphaned bridges + for channel_id in orphaned_bridges { + if let Some((_, bridge)) = bridges.remove(&channel_id) { + info!( + "Destroying orphaned bridge for channel {} (no SIP calls)", + channel_id + ); + cleanup_channel_port(channel_id); + teardown_channel_ring_buffers(channel_id); + bridge.discord_connection.disconnect().await; + } + } + + // Rate limit: cap reconnections per cycle + let max_per_cycle = bridge_cfg.reconnect_max_per_cycle; + if unhealthy_bridges.len() > max_per_cycle { + warn!( + "Rate limiting reconnections: {} unhealthy bridges but only processing {} per cycle", + unhealthy_bridges.len(), max_per_cycle + ); + unhealthy_bridges.truncate(max_per_cycle); + } + + for (channel_id, guild_id, bridge_id, prev_attempts, _prev_reconnect_at) in + unhealthy_bridges + { + if pending_bridges.contains(&channel_id) { + continue; + } + + let attempt_num = prev_attempts + 1; + warn!( + "Attempting reconnection for unhealthy bridge {} (channel {}, attempt {}/{})", + bridge_id, channel_id, attempt_num, bridge_cfg.reconnect_max_attempts + ); + pending_bridges.insert(channel_id); + + if let Some((_, old_bridge)) = bridges.remove(&channel_id) { + let sip_calls = old_bridge.sip_calls.clone(); + let bot_token = old_bridge.bot_token.clone(); + let old_last_call_time = old_bridge.last_call_time; + teardown_channel_ring_buffers(channel_id); + old_bridge.discord_connection.disconnect().await; + + let new_bridge_id = format!("bridge_{}", channel_id); + match DiscordVoiceConnection::connect( + new_bridge_id.clone(), + &shared_discord_for_health, + guild_id, + channel_id, + discord_event_tx.clone(), + health_check_notify_for_loop.clone(), + ) + .await + { + Ok(new_connection) => { + info!( + "Successfully reconnected bridge {} for channel {} (attempt {}/{})", + new_bridge_id, channel_id, attempt_num, bridge_cfg.reconnect_max_attempts + ); + // Set up fresh ring buffers for reconnected channel + setup_channel_ring_buffers(channel_id); + bridges.insert( + channel_id, + ChannelBridge { + guild_id, + discord_connection: new_connection, + sip_calls: sip_calls.clone(), + bot_token, + last_call_time: old_last_call_time, + created_at: Instant::now(), + reconnect_attempts: attempt_num, + last_reconnect_at: Some(Instant::now()), + }, + ); + + // Cross-reference carried-over sip_calls against the + // coordinator's sip_calls map. If CallEnded fired while + // the bridge was removed from the DashMap, entries will + // be stale — remove them now. + if let Some(mut bridge) = bridges.get_mut(&channel_id) { + let stale: Vec = bridge + .sip_calls + .iter() + .filter(|id| !sip_calls_for_health.contains_key(id)) + .copied() + .collect(); + for id in &stale { + bridge.sip_calls.remove(id); + } + if !stale.is_empty() { + warn!( + "Removed {} stale sip_calls from reconnected bridge {}: {:?}", + stale.len(), channel_id, stale + ); + } + } + } + Err(e) => { + error!( + "Failed to reconnect bridge for channel {} (attempt {}/{}): {}. \ + Bridge removed — {} SIP calls orphaned.", + channel_id, attempt_num, bridge_cfg.reconnect_max_attempts, e, + sip_calls.len() + ); + // Re-insert the bridge entry (without connection) so calls + // aren't silently orphaned — the next health check cycle + // will either retry or tear down after max attempts. + // Since we can't re-insert without a connection, clean up + // the channel port so calls can detect the loss. + cleanup_channel_port(channel_id); + } + } + + pending_bridges.remove(&channel_id); + notify_bridge_ready(&bridge_ready_notifiers, channel_id); + } + } + } + }); + + tokio::select! { + _ = sip_handle => { info!("SIP event handler finished"); } + _ = discord_handle => { info!("Discord event handler finished"); } + _ = health_check_handle => { info!("Health check handler finished"); } + _ = outbound_handle => { info!("Outbound call handler finished"); } + } + + Ok(()) + } +} + +/// Handle an incoming authenticated call +async fn handle_incoming_call( + ctx: BridgeContext, + call_id: CallId, + digest_auth: crate::transport::sip::DigestAuthParams, + extension: String, + source_ip: Option, +) { + let BridgeContext { + backend, + bridges, + pending_bridges, + bridge_ready_notifiers, + sip_calls, + fax_sessions, + discord_event_tx, + sip_cmd_tx, + sound_manager, + shared_discord, + health_check_notify, + } = ctx; + // Route the call via the backend FIRST to determine call type + let decision = backend.route_call(&digest_auth, &extension).await; + + // For non-fax calls: send 183 Session Progress and play connecting sound + let is_fax = matches!(decision, RouteDecision::ConnectFax { .. }); + if !is_fax { + let _ = sip_cmd_tx.send(SipCommand::Send183 { call_id }); + tokio::time::sleep(Duration::from_millis(100)).await; + + if let Some(connecting_samples) = sound_manager.get_connecting_samples() { + let _ = sip_cmd_tx.send(SipCommand::StartConnectingLoop { + call_id, + samples: (*connecting_samples).clone(), + }); + } else { + warn!("No connecting sound configured - caller will hear silence during setup"); + } + } + + match decision { + RouteDecision::Redirect { domain, extension } => { + info!("Call {} needs redirect to {}", call_id, domain); + let _ = sip_cmd_tx.send(SipCommand::Redirect { + call_id, + domain, + extension, + }); + sip_calls.remove(&call_id); + } + + RouteDecision::RejectInvalidCredentials => { + warn!( + "Invalid credentials for call {} (IP: {:?}) - hanging up", + call_id, source_ip + ); + let _ = sip_cmd_tx.send(SipCommand::Hangup { call_id }); + sip_calls.remove(&call_id); + } + + RouteDecision::RejectWithError { error } => { + error!("Call {} rejected: {:?}", call_id, error); + play_error_and_hangup(call_id, error, &sound_manager, &sip_cmd_tx).await; + sip_calls.remove(&call_id); + } + + RouteDecision::ConnectFax { + text_channel_id, + guild_id, + user_id, + bot_token, + } => { + debug!( + "Fax route decision for call {}: text_channel={}, guild={}, user={}", + call_id, text_channel_id, guild_id, user_id + ); + + // Fax calls: answer the SIP call but DON'T connect to Discord voice. + // Instead, create a FaxSession that will receive audio and post to Discord text channel. + + let mut fax_session = match FaxSession::new( + call_id, + text_channel_id, + guild_id, + user_id.clone(), + bot_token, + ) { + Ok(session) => session, + Err(e) => { + error!("Failed to create fax session for call {}: {}", call_id, e); + let _ = sip_cmd_tx.send(SipCommand::Hangup { call_id }); + sip_calls.remove(&call_id); + return; + } + }; + + // Answer the call to establish audio path + let _ = sip_cmd_tx.send(SipCommand::Answer { call_id }); + + // Post "Receiving fax..." message to Discord + if let Err(e) = fax_session.post_receiving_message().await { + error!("Failed to post fax receiving message: {}", e); + let _ = sip_cmd_tx.send(SipCommand::Hangup { call_id }); + sip_calls.remove(&call_id); + return; + } + + // Store fax session with cancellation token for T.38 task shutdown + let fax_session = Arc::new(tokio::sync::Mutex::new(fax_session)); + let cancel_token = CancellationToken::new(); + fax_sessions.insert(call_id, (fax_session.clone(), cancel_token)); + + // Wait briefly for PJSUA to establish media (conf_port assignment) + tokio::time::sleep(Duration::from_millis(500)).await; + + // Create bidirectional fax audio port + let audio_ports = crate::fax::audio_port::create_fax_audio_port(call_id).await; + if audio_ports.is_none() { + warn!( + "Could not create fax audio port for call {} — media may not be ready yet. \ + Will retry when media becomes active.", + call_id + ); + } + + // Spawn fax audio processing task + let fax_session_clone = fax_session.clone(); + let sip_cmd_tx_clone = sip_cmd_tx.clone(); + tokio::spawn(async move { + process_fax_audio(call_id, fax_session_clone, audio_ports, sip_cmd_tx_clone).await; + }); + + debug!( + "Fax session created for call {} -> text channel {}", + call_id, text_channel_id + ); + + // NOTE: No on_call_started notification for fax calls — the "called in" / "hung up" + // Discord embeds are only relevant for voice calls. Fax has its own notifications. + } + + RouteDecision::Connect { + channel_id, + guild_id, + user_id, + bot_token, + } => { + info!( + "Route decision for call {}: channel={}, guild={}, user={}", + call_id, channel_id, guild_id, user_id + ); + + // Check if bot is already connected to a DIFFERENT channel in the SAME guild + // Discord bots can only be in one voice channel per guild + let mut conflicting_channel: Option = None; + for entry in bridges.iter() { + let existing_channel_id = *entry.key(); + let existing_bridge = entry.value(); + + if existing_bridge.guild_id == guild_id && existing_channel_id != channel_id { + conflicting_channel = Some(existing_channel_id); + break; + } + } + + if let Some(existing_channel_id) = conflicting_channel { + warn!( + "Guild {} already has active bridge to channel {} (call {} tried to join channel {})", + guild_id, existing_channel_id, call_id, channel_id + ); + play_error_and_hangup(call_id, CallError::ServerBusy, &sound_manager, &sip_cmd_tx) + .await; + sip_calls.remove(&call_id); + return; + } + + // Check if bridge already exists + let bridge_exists = bridges.contains_key(&channel_id); + let bridge_pending = pending_bridges.contains(&channel_id); + + if bridge_pending && !bridge_exists { + info!( + "Call {} waiting for pending bridge for channel {}", + call_id, channel_id + ); + + // Get or create a Notify for this channel (zero-cost when not waiting) + let notify = bridge_ready_notifiers + .entry(channel_id) + .or_insert_with(|| Arc::new(Notify::new())) + .clone(); + + // Wait for notification with timeout (instant wake-up when bridge is ready) + let wait_result = tokio::time::timeout(Duration::from_secs(15), async { + loop { + // Check if bridge is ready or pending cleared + if bridges.contains_key(&channel_id) + || !pending_bridges.contains(&channel_id) + { + return true; + } + // Check if call ended while waiting + if !sip_calls.contains_key(&call_id) { + return false; + } + notify.notified().await; + } + }) + .await; + + match wait_result { + Ok(true) => { + info!( + "Call {} finished waiting, bridge ready for channel {}", + call_id, channel_id + ); + } + Ok(false) => { + warn!("Call {} ended while waiting for pending bridge", call_id); + return; + } + Err(_) => { + error!( + "Timeout waiting for pending bridge for channel {} (call {})", + channel_id, call_id + ); + play_error_and_hangup( + call_id, + CallError::Unknown, + &sound_manager, + &sip_cmd_tx, + ) + .await; + sip_calls.remove(&call_id); + return; + } + } + } + + let bridge_exists = bridges.contains_key(&channel_id); + + if bridge_exists { + // Join existing bridge + if !sip_calls.contains_key(&call_id) { + warn!("Call {} ended during routing, not joining bridge", call_id); + return; + } + + info!( + "Call {} joining existing bridge for channel {}", + call_id, channel_id + ); + + if let Some(mut call) = sip_calls.get_mut(&call_id) { + call.channel_id = Some(channel_id); + call._user_id = Some(user_id.clone()); + call._guild_id = Some(guild_id); + } + + if let Some(mut bridge) = bridges.get_mut(&channel_id) { + bridge.sip_calls.insert(call_id); + bridge.last_call_time = Instant::now(); + info!( + "Bridge for channel {} now has {} callers", + channel_id, + bridge.sip_calls.len() + ); + } + + register_call_channel(call_id, channel_id); + + // Notify backend + let backend = backend.clone(); + let info = CallStartedInfo { + sip_call_id: call_id.to_string(), + user_id: user_id.clone(), + guild_id: guild_id.to_string(), + channel_id: channel_id.to_string(), + extension: extension.clone(), + }; + tokio::spawn(async move { + backend.on_call_started(&info).await; + }); + + // Answer call first, then play join sound + let _ = sip_cmd_tx.send(SipCommand::Answer { call_id }); + play_discord_join(call_id, &sound_manager, &sip_cmd_tx).await; + } else { + // Create new bridge + if !sip_calls.contains_key(&call_id) { + warn!("Call {} ended during routing, not creating bridge", call_id); + return; + } + + pending_bridges.insert(channel_id); + info!( + "Creating new bridge for channel {} (call {})", + channel_id, call_id + ); + + let bridge_id = format!("bridge_{}", channel_id); + match DiscordVoiceConnection::connect( + bridge_id.clone(), + &shared_discord, + guild_id, + channel_id, + discord_event_tx.clone(), + health_check_notify.clone(), + ) + .await + { + Ok(connection) => { + if !sip_calls.contains_key(&call_id) { + warn!("Call {} ended while connecting to Discord", call_id); + connection.disconnect().await; + pending_bridges.remove(&channel_id); + notify_bridge_ready(&bridge_ready_notifiers, channel_id); + return; + } + + info!("Discord connection established for channel {}", channel_id); + + // Set up Discord→SIP ring buffers for this channel + setup_channel_ring_buffers(channel_id); + + let mut sip_calls_set = HashSet::new(); + sip_calls_set.insert(call_id); + + bridges.insert( + channel_id, + ChannelBridge { + guild_id, + discord_connection: connection, + sip_calls: sip_calls_set, + bot_token: bot_token.clone(), + last_call_time: Instant::now(), + created_at: Instant::now(), + reconnect_attempts: 0, + last_reconnect_at: None, + }, + ); + + pending_bridges.remove(&channel_id); + notify_bridge_ready(&bridge_ready_notifiers, channel_id); + + if let Some(mut call) = sip_calls.get_mut(&call_id) { + call.channel_id = Some(channel_id); + call._user_id = Some(user_id.clone()); + call._guild_id = Some(guild_id); + } + + register_call_channel(call_id, channel_id); + + // Notify backend + let backend = backend.clone(); + let info = CallStartedInfo { + sip_call_id: call_id.to_string(), + user_id: user_id.clone(), + guild_id: guild_id.to_string(), + channel_id: channel_id.to_string(), + extension: extension.clone(), + }; + tokio::spawn(async move { + backend.on_call_started(&info).await; + }); + + // Answer call first, then play join sound + let _ = sip_cmd_tx.send(SipCommand::Answer { call_id }); + play_discord_join(call_id, &sound_manager, &sip_cmd_tx).await; + } + Err(e) => { + pending_bridges.remove(&channel_id); + notify_bridge_ready(&bridge_ready_notifiers, channel_id); + error!("Failed to connect to Discord for call {}: {}", call_id, e); + + play_error_and_hangup( + call_id, + CallError::Unknown, + &sound_manager, + &sip_cmd_tx, + ) + .await; + sip_calls.remove(&call_id); + } + } + } + } + } +} + +/// Handle an outbound call that was answered (phone picked up) +/// +/// This mirrors handle_incoming_call but skips authentication (already done by the DO) +/// and doesn't need 183/Answer (the SIP call is already established). +async fn handle_outbound_call_answered( + ctx: BridgeContext, + outbound_requests: Arc>, + tracking_id: String, + call_id: CallId, +) { + let BridgeContext { + backend, + bridges, + pending_bridges, + bridge_ready_notifiers, + sip_calls, + fax_sessions: _, + discord_event_tx, + sip_cmd_tx, + sound_manager, + shared_discord, + health_check_notify, + } = ctx; + + // Step 1: Retrieve and consume the stored outbound request + let req = match outbound_requests.remove(&tracking_id) { + Some((_, req)) => req, + None => { + error!( + "No stored outbound request for tracking_id={} (call {})", + tracking_id, call_id + ); + let _ = sip_cmd_tx.send(SipCommand::Hangup { call_id }); + return; + } + }; + + // Step 2: Parse guild_id and channel_id + let guild_id: Snowflake = match req.guild_id.parse() { + Ok(id) => id, + Err(e) => { + error!( + "Invalid guild_id '{}' in outbound request: {}", + req.guild_id, e + ); + backend.report_call_status(&req.call_id, "failed"); + let _ = sip_cmd_tx.send(SipCommand::Hangup { call_id }); + return; + } + }; + let channel_id: Snowflake = match req.channel_id.parse() { + Ok(id) => id, + Err(e) => { + error!( + "Invalid channel_id '{}' in outbound request: {}", + req.channel_id, e + ); + backend.report_call_status(&req.call_id, "failed"); + let _ = sip_cmd_tx.send(SipCommand::Hangup { call_id }); + return; + } + }; + + info!( + "Outbound call {} answered, connecting to Discord: guild={}, channel={}", + call_id, guild_id, channel_id + ); + + // Step 3: Track the SIP call + sip_calls.insert( + call_id, + SipCallInfo { + channel_id: None, + _user_id: None, + _guild_id: Some(guild_id), + tracking_id: Some(tracking_id.clone()), + }, + ); + + // Step 4: Play connecting sound loop + if let Some(connecting_samples) = sound_manager.get_connecting_samples() { + let _ = sip_cmd_tx.send(SipCommand::StartConnectingLoop { + call_id, + samples: (*connecting_samples).clone(), + }); + } + + // Step 5: Check for guild conflict (bot already active in this guild) + // For outbound calls, don't try to override the bot if it's already connected + // to any channel in this guild (whether same or different channel). + let mut conflicting_channel: Option = None; + for entry in bridges.iter() { + let existing_channel_id = *entry.key(); + let existing_bridge = entry.value(); + + if existing_bridge.guild_id == guild_id { + conflicting_channel = Some(existing_channel_id); + break; + } + } + // Also check pending bridges (bridge creation in progress) + if conflicting_channel.is_none() && pending_bridges.contains(&channel_id) { + conflicting_channel = Some(channel_id); + } + + if let Some(existing_channel_id) = conflicting_channel { + warn!( + "Guild {} already has active bridge to channel {} (outbound call {} tried channel {})", + guild_id, existing_channel_id, call_id, channel_id + ); + backend.report_call_status(&req.call_id, "failed"); + let _ = sip_cmd_tx.send(SipCommand::Hangup { call_id }); + sip_calls.remove(&call_id); + return; + } + + // Step 6: Create new bridge (no existing bridge in this guild — checked above) + { + pending_bridges.insert(channel_id); + info!( + "Creating new bridge for channel {} (outbound call {})", + channel_id, call_id + ); + + let bridge_id = format!("bridge_{}", channel_id); + match DiscordVoiceConnection::connect( + bridge_id.clone(), + &shared_discord, + guild_id, + channel_id, + discord_event_tx.clone(), + health_check_notify.clone(), + ) + .await + { + Ok(connection) => { + if !sip_calls.contains_key(&call_id) { + warn!( + "Outbound call {} ended while connecting to Discord", + call_id + ); + connection.disconnect().await; + pending_bridges.remove(&channel_id); + notify_bridge_ready(&bridge_ready_notifiers, channel_id); + return; + } + + info!( + "Discord connection established for channel {} (outbound call {})", + channel_id, call_id + ); + + // Set up Discord→SIP ring buffers for this channel + setup_channel_ring_buffers(channel_id); + + let mut sip_calls_set = HashSet::new(); + sip_calls_set.insert(call_id); + + bridges.insert( + channel_id, + ChannelBridge { + guild_id, + discord_connection: connection, + sip_calls: sip_calls_set, + bot_token: req.bot_token.clone(), + last_call_time: Instant::now(), + created_at: Instant::now(), + reconnect_attempts: 0, + last_reconnect_at: None, + }, + ); + + pending_bridges.remove(&channel_id); + notify_bridge_ready(&bridge_ready_notifiers, channel_id); + + if let Some(mut call) = sip_calls.get_mut(&call_id) { + call.channel_id = Some(channel_id); + call._guild_id = Some(guild_id); + } + + register_call_channel(call_id, channel_id); + play_discord_join(call_id, &sound_manager, &sip_cmd_tx).await; + } + Err(e) => { + pending_bridges.remove(&channel_id); + notify_bridge_ready(&bridge_ready_notifiers, channel_id); + error!( + "Failed to connect to Discord for outbound call {}: {}", + call_id, e + ); + backend.report_call_status(&req.call_id, "failed"); + let _ = sip_cmd_tx.send(SipCommand::Hangup { call_id }); + sip_calls.remove(&call_id); + } + } + } +} + +/// Play the discord join sound +async fn play_discord_join( + call_id: CallId, + sound_manager: &SoundManager, + sip_cmd_tx: &Sender, +) { + if let Some(samples) = sound_manager.get_discord_join_samples() { + info!("Playing Discord join sound for call {}", call_id); + let _ = sip_cmd_tx.send(SipCommand::PlayDirectToCall { + call_id, + samples: (*samples).clone(), + }); + } else { + warn!("No discord_join sound configured"); + } +} + +/// Play an error sound and hangup +async fn play_error_and_hangup( + call_id: CallId, + error: CallError, + sound_manager: &SoundManager, + sip_cmd_tx: &Sender, +) { + info!("Playing error audio for call {}: {:?}", call_id, error); + + // The call was already answered with 183, so we can play audio + // Send 200 OK to fully answer before playing error + let _ = sip_cmd_tx.send(SipCommand::Answer { call_id }); + tokio::time::sleep(Duration::from_millis(200)).await; + + if let Some(samples) = sound_manager.get_error_samples(error.sound_name()) { + let _ = sip_cmd_tx.send(SipCommand::PlayDirectToCall { + call_id, + samples: (*samples).clone(), + }); + + // Wait for playback + let duration_ms = (samples.len() as u64 * 1000) / CONF_SAMPLE_RATE as u64; + tokio::time::sleep(Duration::from_millis(duration_ms + 200)).await; + } else { + warn!("No error sound '{}' configured", error.sound_name()); + } + + info!("Hanging up call {} after error audio", call_id); + let _ = sip_cmd_tx.send(SipCommand::Hangup { call_id }); +} + +/// Play an extension-based sound (easter egg) and hangup +/// +/// For streaming sounds (large files), this uses the port-based pull model +/// which provides precise timing controlled by the audio thread. The hangup +/// is handled automatically when playback completes. +/// +/// For test tones, this plays a 440Hz sine wave until the caller hangs up. +async fn play_extension_sound_and_hangup( + call_id: CallId, + sound_name: &str, + sound_manager: &SoundManager, + sip_cmd_tx: &Sender, +) { + info!( + "Playing extension sound '{}' for call {}", + sound_name, call_id + ); + + // Answer the call first + // NOTE: Previously had 200ms delay here which caused RTP timestamp debt + // and initial burst of packets. Now we start streaming immediately. + let _ = sip_cmd_tx.send(SipCommand::Answer { call_id }); + + // Check if this is a test tone (virtual sound) + if sound_manager.is_test_tone(sound_name) { + info!("Starting 440Hz test tone for call {}", call_id); + let _ = sip_cmd_tx.send(SipCommand::StartTestTone { call_id }); + // Don't hangup - plays until caller hangs up + return; + } + + // Check if this is a streaming sound (large file) + if sound_manager.is_streaming(sound_name) { + if let Some(config) = sound_manager.get_streaming(sound_name) { + info!( + "Starting streaming playback '{}' from {} for call {}", + sound_name, + config.path.display(), + call_id + ); + + // Use the new port-based streaming approach + // The audio thread handles timing and the hangup happens automatically when done + let _ = sip_cmd_tx.send(SipCommand::StartStreaming { + call_id, + path: config.path.clone(), + }); + + // Don't hangup here - the streaming player will hangup when done + // or when the call ends (detected via CALL_CONF_PORTS check) + return; + } + } + + // Preloaded sound - play all at once + if let Some(sound) = sound_manager.get_preloaded(sound_name) { + let _ = sip_cmd_tx.send(SipCommand::PlayDirectToCall { + call_id, + samples: (*sound.samples).clone(), + }); + + // Wait for playback + tokio::time::sleep(Duration::from_millis(sound.duration_ms + 200)).await; + } else { + warn!("Sound '{}' not found", sound_name); + } + + info!("Hanging up call {} after extension sound", call_id); + let _ = sip_cmd_tx.send(SipCommand::Hangup { call_id }); +} + +/// Wake up any tasks waiting for a bridge to become ready for the given channel. +/// Also cleans up the Notify entry since it's no longer needed. +fn notify_bridge_ready(notifiers: &DashMap>, channel_id: Snowflake) { + if let Some((_, notify)) = notifiers.remove(&channel_id) { + notify.notify_waiters(); + } +} + +/// Poll a crossbeam channel for the next event, with queue monitoring and periodic logging. +/// +/// Returns `Some(event)` when an event is received, or `None` when the channel is disconnected. +/// Sleeps 10ms when the channel is empty to avoid busy-waiting. +async fn poll_recv(rx: &Receiver, name: &str, event_count: &mut u64) -> Option { + loop { + let queue_len = rx.len(); + if queue_len > 50 && event_count.is_multiple_of(50) { + warn!("{} event queue HIGH: {} events pending", name, queue_len); + } + + match rx.try_recv() { + Ok(event) => { + *event_count += 1; + + if event_count.is_multiple_of(500) { + trace!( + "{} event handler: processed {} events, queue depth: {}", + name, + event_count, + queue_len + ); + } + + return Some(event); + } + Err(crossbeam_channel::TryRecvError::Empty) => { + tokio::time::sleep(Duration::from_millis(10)).await; + } + Err(crossbeam_channel::TryRecvError::Disconnected) => return None, + } + } +} + +/// Fax audio processing task. +/// +/// Runs on a 20ms timer tick (matching the audio frame rate). Each tick: +/// 1. Drains all available RX audio and feeds it to SpanDSP +/// 2. Generates exactly one frame of TX audio from SpanDSP (CED, T.30 signaling) +/// +/// The timer pacing is critical — SpanDSP's fax_tx() advances its internal clock +/// by the number of samples generated. Without pacing, TX runs at >100x real-time +/// and the T.30 state machine expires prematurely. +async fn process_fax_audio( + call_id: CallId, + fax_session: Arc>, + audio_ports: Option, + sip_cmd_tx: Sender, +) { + use crate::transport::sip::CONF_SAMPLE_RATE; + + let samples_per_frame = (CONF_SAMPLE_RATE * 20 / 1000) as usize; // 320 samples = 20ms + let mut read_buf = vec![0i16; samples_per_frame]; + let mut tx_buf = vec![0i16; samples_per_frame]; + + let (mut rx_consumer, mut tx_producer) = match audio_ports { + Some(ports) => (ports.rx_consumer, ports.tx_producer), + None => { + // If we couldn't create the audio port initially, wait and retry + debug!( + "Fax call {} — waiting for audio port to become available...", + call_id + ); + tokio::time::sleep(Duration::from_secs(2)).await; + + match crate::fax::audio_port::create_fax_audio_port(call_id).await { + Some(ports) => (ports.rx_consumer, ports.tx_producer), + None => { + error!( + "Failed to create fax audio port for call {} after retry", + call_id + ); + let mut session = fax_session.lock().await; + session + .post_failure("Failed to establish audio path for fax reception") + .await; + let _ = sip_cmd_tx.send(SipCommand::Hangup { call_id }); + return; + } + } + } + }; + + debug!("Fax audio processing started for call {}", call_id); + + // 20ms interval — matches the conference bridge frame rate. + // This paces TX generation at real-time so SpanDSP's internal clock stays in sync. + let mut interval = tokio::time::interval(Duration::from_millis(20)); + interval.set_missed_tick_behavior(tokio::time::MissedTickBehavior::Skip); + + let mut tx_audio_frames: u64 = 0; + let mut tx_silent_frames: u64 = 0; + let mut rx_frames: u64 = 0; + let mut tick_count: u64 = 0; + + loop { + interval.tick().await; + tick_count += 1; + + let mut session = fax_session.lock().await; + + // 1. Drain all available RX audio and feed to SpanDSP + loop { + if rx_consumer.slots() < samples_per_frame { + break; + } + match rx_consumer.read_chunk(samples_per_frame) { + Ok(chunk) => { + let (first, second) = chunk.as_slices(); + read_buf[..first.len()].copy_from_slice(first); + if !second.is_empty() { + read_buf[first.len()..first.len() + second.len()].copy_from_slice(second); + } + chunk.commit_all(); + session.feed_audio(&read_buf[..samples_per_frame]); + rx_frames += 1; + } + Err(_) => { + debug!("Fax RX ring buffer closed for call {}", call_id); + drop(session); + let _ = sip_cmd_tx.send(SipCommand::Hangup { call_id }); + debug!("Fax audio processing ended for call {}", call_id); + return; + } + } + } + + // 2. Generate exactly one frame of TX audio (20ms at 16kHz = 320 samples) + let tx_generated = session.generate_tx_16k(&mut tx_buf); + if tx_generated > 0 { + tx_audio_frames += 1; + if tx_audio_frames == 1 { + debug!( + "Fax {} TX: first audio frame generated (tick {})", + call_id, tick_count + ); + } + let tx_available = tx_producer.slots(); + let to_write = tx_generated.min(tx_available); + if to_write > 0 { + if let Ok(mut chunk) = tx_producer.write_chunk(to_write) { + let (first, second) = chunk.as_mut_slices(); + let first_len = first.len().min(to_write); + first[..first_len].copy_from_slice(&tx_buf[..first_len]); + if first_len < to_write { + second[..to_write - first_len] + .copy_from_slice(&tx_buf[first_len..to_write]); + } + chunk.commit_all(); + } + } + } else { + tx_silent_frames += 1; + } + + // Log diagnostics every 5 seconds (250 ticks) + if tick_count.is_multiple_of(250) { + let rx_drops = crate::fax::audio_port::get_rx_drop_count(call_id); + if rx_drops > 0 { + warn!( + "Fax {} audio: tick={}, rx={} frames, tx={} audio/{} silent, RX DROPS={}", + call_id, tick_count, rx_frames, tx_audio_frames, tx_silent_frames, rx_drops + ); + } else { + debug!( + "Fax {} audio: tick={}, rx={} frames, tx={} audio/{} silent", + call_id, tick_count, rx_frames, tx_audio_frames, tx_silent_frames + ); + } + } + + // 3. Check for completion / errors / timeout + if session.is_finished() { + if matches!( + session.state, + crate::fax::session::FaxState::Received | crate::fax::session::FaxState::Complete + ) { + debug!("Fax {} reception complete, converting and posting", call_id); + if let Err(e) = session.convert_and_post().await { + error!("Failed to convert/post fax for call {}: {}", call_id, e); + session.post_failure("Failed to process received fax").await; + } + } + let _ = sip_cmd_tx.send(SipCommand::Hangup { call_id }); + break; + } + + if session.is_timed_out() { + warn!("Fax {} timed out during processing", call_id); + session.post_failure("Fax reception timed out").await; + let _ = sip_cmd_tx.send(SipCommand::Hangup { call_id }); + break; + } + } + + debug!("Fax audio processing ended for call {}", call_id); +} + +/// Handle switching a fax session from G.711 to T.38. +/// +/// The T.38 re-INVITE has already been answered synchronously in the PJSUA +/// callback. The pre-bound UDPTL socket is in T38_PRESOCKETS. +/// +/// 1. Takes pre-bound socket from T38_PRESOCKETS, converts to tokio +/// 2. Creates FaxT38Receiver +/// 3. Switches the FaxSession from audio to T.38 mode +/// 4. Removes fax audio port (stops audio capture) +/// 5. Spawns UDPTL processing tasks (rx, tx, timer) +async fn handle_t38_switch( + call_id: CallId, + remote_ip: String, + remote_port: u16, + local_port: u16, + fax_session: Arc>, + cancel_token: CancellationToken, + sip_cmd_tx: Sender, +) { + // 1. Take pre-bound socket from the global map (placed there by the PJSUA callback) + let std_socket = match crate::transport::sip::T38_PRESOCKETS.remove(&*call_id) { + Some((_key, socket)) => socket, + None => { + error!( + "No pre-bound UDPTL socket for call {} in T38_PRESOCKETS", + call_id + ); + let _ = sip_cmd_tx.send(SipCommand::Hangup { call_id }); + return; + } + }; + + // Convert std::net::UdpSocket → tokio::net::UdpSocket + std_socket.set_nonblocking(true).ok(); + let tokio_socket = match tokio::net::UdpSocket::from_std(std_socket) { + Ok(s) => s, + Err(e) => { + error!( + "Failed to convert UDPTL socket to tokio for call {}: {}", + call_id, e + ); + let _ = sip_cmd_tx.send(SipCommand::Hangup { call_id }); + return; + } + }; + let udptl_socket = AsyncUdptlSocket::new(tokio_socket); + + // Connect to remote UDPTL endpoint + let remote_addr = match format!("{}:{}", remote_ip, remote_port).parse() { + Ok(addr) => addr, + Err(e) => { + error!( + "Invalid remote UDPTL address {}:{} for call {}: {}", + remote_ip, remote_port, call_id, e + ); + let _ = sip_cmd_tx.send(SipCommand::Hangup { call_id }); + return; + } + }; + udptl_socket.connect(remote_addr); + + // 2. Create T.38 IFP sender channel + let (tx_ifp_sender, tx_ifp_receiver) = tokio::sync::mpsc::unbounded_channel::>(); + + // 3. Create FaxT38Receiver + let t38_receiver = { + let session = fax_session.lock().await; + let tiff_path = session.tiff_dir.join("received.tiff"); + match FaxT38Receiver::new(&tiff_path, tx_ifp_sender) { + Ok(r) => r, + Err(e) => { + error!( + "Failed to create FaxT38Receiver for call {}: {}", + call_id, e + ); + let _ = sip_cmd_tx.send(SipCommand::Hangup { call_id }); + return; + } + } + }; + + // 4. Switch the session from audio to T.38 + { + let mut session = fax_session.lock().await; + session.switch_to_t38(t38_receiver); + } + + // 5. Remove fax audio port (stop G.711 audio capture) + crate::fax::audio_port::remove_fax_audio_port(call_id); + + info!( + "T.38 switch complete for call {}: local_port={}, remote={}:{}", + call_id, local_port, remote_ip, remote_port + ); + + // 6. Spawn UDPTL processing task + let udptl_socket = Arc::new(udptl_socket); + process_fax_t38( + call_id, + fax_session, + udptl_socket, + tx_ifp_receiver, + cancel_token, + sip_cmd_tx, + ) + .await; +} + +/// T.38 fax processing task. +/// +/// Runs the UDPTL receive loop, timer loop, and TX loop concurrently. +/// Feeds IFP packets to FaxSession (which feeds SpanDSP T38Terminal), +/// and handles completion/errors. +async fn process_fax_t38( + call_id: CallId, + fax_session: Arc>, + udptl_socket: Arc, + mut tx_ifp_receiver: tokio::sync::mpsc::UnboundedReceiver>, + cancel_token: CancellationToken, + sip_cmd_tx: Sender, +) { + info!("T.38 fax processing started for call {}", call_id); + + // TX task: Send outgoing IFP packets from SpanDSP to the UDPTL socket + let udptl_tx = udptl_socket.clone(); + let tx_call_id = call_id; + let tx_handle = tokio::spawn(async move { + let mut tx_count: u64 = 0; + while let Some(ifp_data) = tx_ifp_receiver.recv().await { + tx_count += 1; + debug!( + "UDPTL TX #{} for call {}: {}B IFP", + tx_count, + tx_call_id, + ifp_data.len() + ); + if let Err(e) = udptl_tx.send_ifp(&ifp_data).await { + warn!("UDPTL TX error for call {}: {}", tx_call_id, e); + break; + } + } + info!( + "UDPTL TX task ended for call {} after {} packets", + tx_call_id, tx_count + ); + }); + + // RX + Timer loop (combined to avoid lock contention) + let udptl_rx = udptl_socket.clone(); + let mut timer_interval = tokio::time::interval(Duration::from_millis(20)); + timer_interval.set_missed_tick_behavior(tokio::time::MissedTickBehavior::Skip); + + loop { + tokio::select! { + // Cancelled by CallEnded handler — exit cleanly + _ = cancel_token.cancelled() => { + debug!("T.38 task for call {} cancelled by CallEnded", call_id); + break; + } + + // Receive UDPTL packets + result = udptl_rx.recv_packet() => { + match result { + Ok(packet) => { + debug!( + "UDPTL RX seq={} for call {}: {}B primary + {} redundant", + packet.seq_number, call_id, packet.primary_ifp.len(), packet.redundant_ifps().len() + ); + + let mut session = fax_session.lock().await; + + let completed = session.feed_t38_ifp( + &packet.primary_ifp, + packet.seq_number, + ); + + if completed { + debug!("Fax {} T.38 reception complete, converting and posting", call_id); + if let Err(e) = session.convert_and_post().await { + error!("Failed to convert/post fax for call {}: {}", call_id, e); + session.post_failure("Failed to process received fax").await; + } + let _ = sip_cmd_tx.send(SipCommand::Hangup { call_id }); + break; + } + + if session.is_finished() { + let _ = sip_cmd_tx.send(SipCommand::Hangup { call_id }); + break; + } + } + Err(e) => { + warn!("UDPTL RX error for call {}: {}", call_id, e); + // Single packet errors are OK — continue receiving + } + } + } + + // Timer tick: drive T.38 state machine + _ = timer_interval.tick() => { + let mut session = fax_session.lock().await; + + let completed = session.drive_t38_timer(); + + if completed { + debug!("Fax {} T.38 timer-driven completion", call_id); + if let Err(e) = session.convert_and_post().await { + error!("Failed to convert/post fax for call {}: {}", call_id, e); + session.post_failure("Failed to process received fax").await; + } + let _ = sip_cmd_tx.send(SipCommand::Hangup { call_id }); + break; + } + + if session.is_finished() { + let _ = sip_cmd_tx.send(SipCommand::Hangup { call_id }); + break; + } + + if session.is_timed_out() { + warn!("Fax {} T.38 timed out during processing", call_id); + session.post_failure("Fax reception timed out").await; + let _ = sip_cmd_tx.send(SipCommand::Hangup { call_id }); + break; + } + } + } + } + + // Clean up TX task + tx_handle.abort(); + + debug!("T.38 fax processing ended for call {}", call_id); +} diff --git a/sipcord-bridge/src/config.rs b/sipcord-bridge/src/config.rs new file mode 100644 index 0000000..bce6cf5 --- /dev/null +++ b/sipcord-bridge/src/config.rs @@ -0,0 +1,535 @@ +use anyhow::{Context, Result}; +use std::collections::HashMap; +use std::path::{Path, PathBuf}; +use std::sync::OnceLock; + +/// Global application config (loaded from config.toml) +pub static APP_CONFIG: OnceLock = OnceLock::new(); + +/// Global environment config (parsed once at startup via `envy`) +static ENV_CONFIG: OnceLock = OnceLock::new(); + +fn default_data_dir() -> String { + "/var/lib/sipcord".to_string() +} +fn default_config_path() -> String { + "./config.toml".to_string() +} +fn default_bridge_id() -> String { + "br_unknown".to_string() +} +fn default_sounds_dir() -> String { + "./wav".to_string() +} +fn default_sip_port() -> u16 { + 5060 +} +fn default_rtp_port_start() -> u16 { + 10000 +} +fn default_rtp_port_end() -> u16 { + 15000 +} +fn default_tls_port() -> u16 { + 5061 +} +fn default_tls_refresh() -> u64 { + 3600 +} +fn default_dialplan_path() -> String { + "./dialplan.toml".to_string() +} + +/// All environment variables consumed by the bridge, deserialized once at startup. +#[derive(Debug, Clone, serde::Deserialize)] +pub struct EnvConfig { + // Paths & Identity + #[serde(default = "default_data_dir")] + pub data_dir: String, + #[serde(default = "default_config_path")] + pub config_path: String, + #[serde(default = "default_bridge_id")] + pub bridge_id: String, + #[serde(default = "default_sounds_dir")] + pub sounds_dir: String, + + // Mode + #[serde(default)] + pub dev_mode: bool, + + // SIP + pub sip_public_host: Option, + #[serde(default = "default_sip_port")] + pub sip_port: u16, + #[serde(default = "default_rtp_port_start")] + pub rtp_port_start: u16, + #[serde(default = "default_rtp_port_end")] + pub rtp_port_end: u16, + pub rtp_public_ip: Option, + pub sip_local_host: Option, + pub sip_local_cidr: Option, + + // TLS + pub tls_cert_dir: Option, + #[serde(default = "default_tls_port")] + pub tls_port: u16, + #[serde(default = "default_tls_refresh")] + pub tls_refresh_interval: u64, + + // Static router + pub discord_bot_token: Option, + #[serde(default = "default_dialplan_path")] + pub dialplan_path: String, +} + +impl EnvConfig { + /// Parse environment variables (via `envy`) and store in the global `OnceLock`. + /// Call once at the top of `main()`. + pub fn init() -> Result<()> { + dotenvy::dotenv().ok(); + let cfg: EnvConfig = + envy::from_env().context("Failed to parse environment variables into EnvConfig")?; + ENV_CONFIG + .set(cfg) + .ok() + .context("EnvConfig already initialized")?; + Ok(()) + } + + /// Access the global `EnvConfig` (panics if `init()` was not called). + pub fn global() -> &'static EnvConfig { + ENV_CONFIG + .get() + .expect("EnvConfig not initialized — call EnvConfig::init() first") + } + + /// Build a `SipConfig` from the parsed environment. + pub fn to_sip_config(&self) -> Result { + let public_host = self + .sip_public_host + .clone() + .context("SIP_PUBLIC_HOST required")?; + + let local_net = match (&self.sip_local_host, &self.sip_local_cidr) { + (Some(host), Some(cidr)) => Some(LocalNetConfig { + host: host.clone(), + cidr: cidr.clone(), + }), + _ => None, + }; + + Ok(SipConfig { + public_host, + port: self.sip_port, + rtp_port_start: self.rtp_port_start, + rtp_port_end: self.rtp_port_end, + rtp_public_ip: self.rtp_public_ip.clone(), + local_net, + }) + } + + /// Build a `TlsConfig` from the parsed environment. + pub fn to_tls_config(&self) -> TlsConfig { + let cert_dir = self + .tls_cert_dir + .as_ref() + .map(PathBuf::from) + .unwrap_or_else(|| PathBuf::from(&self.data_dir).join("certs")); + + TlsConfig { + cert_dir, + port: self.tls_port, + refresh_interval_secs: self.tls_refresh_interval, + } + } + + /// Return the SIP public host, falling back to `"0.0.0.0"` when unset. + pub fn sip_public_host_or_default(&self) -> &str { + self.sip_public_host.as_deref().unwrap_or("0.0.0.0") + } + + /// Return the resolved DATA_DIR path, applying the smart fallback: + /// if the default `/var/lib/sipcord` doesn't exist on disk, fall back to `.`. + pub fn resolved_data_dir(&self) -> String { + if self.data_dir == "/var/lib/sipcord" && !Path::new(&self.data_dir).exists() { + ".".to_string() + } else { + self.data_dir.clone() + } + } +} + +/// Application-level configuration from config.toml +#[derive(Debug, Clone, serde::Deserialize)] +pub struct AppConfig { + pub sounds: SoundsConfig, + #[serde(default)] + pub bridge: BridgeConfig, + #[serde(default)] + pub audio: AudioConfig, + #[serde(default)] + pub fax: FaxConfig, +} + +/// Bridge operational settings +#[derive(Debug, Clone, serde::Deserialize)] +#[serde(default)] +pub struct BridgeConfig { + /// Seconds without RTP before a call is considered dead + pub rtp_inactivity_timeout_secs: u64, + /// Seconds to wait for the first RTP packet before declaring no audio + /// (faster than rtp_inactivity_timeout for calls that never receive any audio) + pub no_audio_timeout_secs: u64, + /// Seconds before destroying a bridge with no SIP calls + pub empty_bridge_grace_period_secs: u64, + /// Maximum samples buffered per channel (Discord->SIP direction) + pub max_channel_buffer_samples: usize, + /// API request timeout in seconds + pub api_timeout_secs: u64, + /// Health check interval in seconds + pub health_check_interval_secs: u64, + /// Maximum voice join retry attempts + pub voice_join_max_retries: u32, + /// Delay between voice join retries in seconds + pub voice_join_retry_delay_secs: u64, + /// PJSIP internal log level (0-6, filtered via tracing) + pub pjsip_log_level: u32, + /// Maximum reconnection attempts before tearing down the bridge + pub reconnect_max_attempts: u32, + /// Base delay (seconds) for exponential backoff between reconnections + pub reconnect_base_delay_secs: u64, + /// Maximum backoff delay cap (seconds) + pub reconnect_max_delay_secs: u64, + /// Minimum bridge age (seconds) before it can be reconnected (cooldown) + pub reconnect_min_age_secs: u64, + /// Maximum reconnections allowed per health check cycle + pub reconnect_max_per_cycle: usize, +} + +impl Default for BridgeConfig { + fn default() -> Self { + Self { + rtp_inactivity_timeout_secs: 60, + no_audio_timeout_secs: 10, + empty_bridge_grace_period_secs: 30, + max_channel_buffer_samples: 32000, + api_timeout_secs: 10, + health_check_interval_secs: 5, + voice_join_max_retries: 2, + voice_join_retry_delay_secs: 5, + pjsip_log_level: 4, + reconnect_max_attempts: 5, + reconnect_base_delay_secs: 5, + reconnect_max_delay_secs: 300, + reconnect_min_age_secs: 30, + reconnect_max_per_cycle: 3, + } + } +} + +/// Audio pipeline settings +#[derive(Debug, Clone, serde::Deserialize)] +#[serde(default)] +pub struct AudioConfig { + /// Ring buffer size in samples for Discord audio streaming + pub ring_buffer_samples: usize, + /// Pre-buffer samples before starting Discord audio playback + pub pre_buffer_samples: usize, + /// Amplitude threshold above which audio is considered speech + pub vad_silence_threshold: i16, + /// Amplitude threshold below which audio is considered muted + pub vad_mute_threshold: i16, + /// Consecutive silence frames before stopping speaking state + pub vad_silence_frames_before_stop: u32, +} + +impl Default for AudioConfig { + fn default() -> Self { + Self { + ring_buffer_samples: 96000, + pre_buffer_samples: 14400, + vad_silence_threshold: 200, + vad_mute_threshold: 50, + vad_silence_frames_before_stop: 15, + } + } +} + +/// Fax reception settings +#[derive(Debug, Clone, serde::Deserialize)] +#[serde(default)] +pub struct FaxConfig { + /// Directory for temporary fax files. Defaults to system temp dir. + pub tmp_folder: Option, + /// Filename prefix for fax TIFF/output files (e.g. "fax_") + pub prefix: String, + /// Output image format: "png" or "jpg" + pub output_format: String, +} + +impl Default for FaxConfig { + fn default() -> Self { + Self { + tmp_folder: None, + prefix: "fax_".to_string(), + output_format: "png".to_string(), + } + } +} + +/// Sound configuration section +#[derive(Debug, Clone, serde::Deserialize)] +pub struct SoundsConfig { + #[serde(flatten)] + pub entries: HashMap, +} + +/// Individual sound entry configuration +#[derive(Debug, Clone, serde::Deserialize)] +pub struct SoundEntry { + /// Source file path (relative to sounds directory). None for generated tones. + pub src: Option, + /// Whether to preload into memory (true) or stream from disk (false) + #[serde(default)] + pub preload: bool, + /// Optional extension that triggers this sound (for easter eggs) + #[serde(default)] + pub extension: Option, +} + +impl AppConfig { + /// Load configuration from a TOML file + pub fn load(path: &Path) -> Result { + let contents = std::fs::read_to_string(path) + .with_context(|| format!("Failed to read config file: {}", path.display()))?; + toml::from_str(&contents) + .with_context(|| format!("Failed to parse config file: {}", path.display())) + } + + /// Get the global application config (panics if not initialized) + pub fn global() -> &'static AppConfig { + APP_CONFIG + .get() + .expect("AppConfig not initialized - call AppConfig::load() first") + } + + /// Get bridge config (with defaults if not loaded yet) + pub fn bridge() -> &'static BridgeConfig { + APP_CONFIG.get().map(|c| &c.bridge).unwrap_or_else(|| { + static DEFAULT: OnceLock = OnceLock::new(); + DEFAULT.get_or_init(BridgeConfig::default) + }) + } + + /// Get audio config (with defaults if not loaded yet) + pub fn audio() -> &'static AudioConfig { + APP_CONFIG.get().map(|c| &c.audio).unwrap_or_else(|| { + static DEFAULT: OnceLock = OnceLock::new(); + DEFAULT.get_or_init(AudioConfig::default) + }) + } + + /// Get fax config (with defaults if not loaded yet) + pub fn fax() -> &'static FaxConfig { + APP_CONFIG.get().map(|c| &c.fax).unwrap_or_else(|| { + static DEFAULT: OnceLock = OnceLock::new(); + DEFAULT.get_or_init(FaxConfig::default) + }) + } +} + +#[derive(Debug, Clone)] +pub struct TlsConfig { + pub cert_dir: PathBuf, + pub port: u16, + pub refresh_interval_secs: u64, +} + +#[derive(Debug, Clone)] +pub struct SipConfig { + pub public_host: String, + pub port: u16, + pub rtp_port_start: u16, + pub rtp_port_end: u16, + /// Public IP address to advertise in SDP for RTP media (c= line) + /// If not set, pjsua will use the local interface IP which won't work for NAT + pub rtp_public_ip: Option, + /// Local network support: rewrite Contact headers for clients in local_network to use local_host + /// This allows the bridge to serve both public and local clients simultaneously + pub local_net: Option, +} + +#[derive(Debug, Clone)] +pub struct LocalNetConfig { + /// Local host IP to use in Contact headers for local clients (e.g., 192.168.10.1) + pub host: String, + /// Local network CIDR - clients in this range get local_host in Contact (e.g., 192.168.10.0/24) + pub cidr: String, +} + +impl SipConfig { + /// Load SIP configuration from environment variables. + /// Standalone method for backends that don't need the full Config. + pub fn from_env() -> Result { + EnvConfig::global().to_sip_config() + } +} + +impl TlsConfig { + pub fn cert_path(&self) -> PathBuf { + self.cert_dir.join("bridge.crt") + } + + pub fn key_path(&self) -> PathBuf { + self.cert_dir.join("bridge.key") + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_bridge_config_default() { + let c = BridgeConfig::default(); + assert_eq!(c.rtp_inactivity_timeout_secs, 60); + assert_eq!(c.no_audio_timeout_secs, 10); + assert_eq!(c.empty_bridge_grace_period_secs, 30); + assert_eq!(c.max_channel_buffer_samples, 32000); + assert_eq!(c.api_timeout_secs, 10); + assert_eq!(c.pjsip_log_level, 4); + } + + #[test] + fn test_audio_config_default() { + let c = AudioConfig::default(); + assert_eq!(c.ring_buffer_samples, 96000); + assert_eq!(c.pre_buffer_samples, 14400); + assert_eq!(c.vad_silence_threshold, 200); + assert_eq!(c.vad_mute_threshold, 50); + assert_eq!(c.vad_silence_frames_before_stop, 15); + } + + #[test] + fn test_fax_config_default() { + let c = FaxConfig::default(); + assert!(c.tmp_folder.is_none()); + assert_eq!(c.prefix, "fax_"); + assert_eq!(c.output_format, "png"); + } + + #[test] + fn test_resolved_data_dir_default_missing() { + let env = EnvConfig { + data_dir: "/var/lib/sipcord".to_string(), + config_path: "./config.toml".to_string(), + bridge_id: "br_test".to_string(), + sounds_dir: "./wav".to_string(), + dev_mode: false, + sip_public_host: None, + sip_port: 5060, + rtp_port_start: 10000, + rtp_port_end: 15000, + rtp_public_ip: None, + sip_local_host: None, + sip_local_cidr: None, + tls_cert_dir: None, + tls_port: 5061, + tls_refresh_interval: 3600, + discord_bot_token: None, + dialplan_path: "./dialplan.toml".to_string(), + }; + assert_eq!(env.resolved_data_dir(), "."); + } + + #[test] + fn test_resolved_data_dir_custom() { + let env = EnvConfig { + data_dir: "/tmp".to_string(), + config_path: "./config.toml".to_string(), + bridge_id: "br_test".to_string(), + sounds_dir: "./wav".to_string(), + dev_mode: false, + sip_public_host: None, + sip_port: 5060, + rtp_port_start: 10000, + rtp_port_end: 15000, + rtp_public_ip: None, + sip_local_host: None, + sip_local_cidr: None, + tls_cert_dir: None, + tls_port: 5061, + tls_refresh_interval: 3600, + discord_bot_token: None, + dialplan_path: "./dialplan.toml".to_string(), + }; + assert_eq!(env.resolved_data_dir(), "/tmp"); + } + + #[test] + fn test_to_tls_config_cert_dir_fallback() { + let env = EnvConfig { + data_dir: "/data".to_string(), + config_path: "./config.toml".to_string(), + bridge_id: "br_test".to_string(), + sounds_dir: "./wav".to_string(), + dev_mode: false, + sip_public_host: None, + sip_port: 5060, + rtp_port_start: 10000, + rtp_port_end: 15000, + rtp_public_ip: None, + sip_local_host: None, + sip_local_cidr: None, + tls_cert_dir: None, + tls_port: 5061, + tls_refresh_interval: 3600, + discord_bot_token: None, + dialplan_path: "./dialplan.toml".to_string(), + }; + let tls = env.to_tls_config(); + assert_eq!(tls.cert_dir, PathBuf::from("/data/certs")); + assert_eq!(tls.port, 5061); + } + + #[test] + fn test_tls_config_paths() { + let tls = TlsConfig { + cert_dir: PathBuf::from("/etc/ssl/sipcord"), + port: 5061, + refresh_interval_secs: 3600, + }; + assert_eq!( + tls.cert_path(), + PathBuf::from("/etc/ssl/sipcord/bridge.crt") + ); + assert_eq!(tls.key_path(), PathBuf::from("/etc/ssl/sipcord/bridge.key")); + } + + #[test] + fn test_app_config_load_valid_toml() { + let toml_content = r#" +[sounds] +join = { src = "join.wav", preload = true } + +[bridge] +rtp_inactivity_timeout_secs = 120 + +[audio] +ring_buffer_samples = 48000 + +[fax] +prefix = "test_" +"#; + let dir = std::env::temp_dir().join("sipcord_test_config"); + std::fs::create_dir_all(&dir).ok(); + let path = dir.join("test_config.toml"); + std::fs::write(&path, toml_content).unwrap(); + + let config = AppConfig::load(&path).unwrap(); + assert_eq!(config.bridge.rtp_inactivity_timeout_secs, 120); + assert_eq!(config.audio.ring_buffer_samples, 48000); + assert_eq!(config.fax.prefix, "test_"); + assert!(config.sounds.entries.contains_key("join")); + } +} diff --git a/sipcord-bridge/src/fax/audio_port.rs b/sipcord-bridge/src/fax/audio_port.rs new file mode 100644 index 0000000..e3ffd20 --- /dev/null +++ b/sipcord-bridge/src/fax/audio_port.rs @@ -0,0 +1,344 @@ +//! Fax audio port — bidirectional audio between SIP and SpanDSP. +//! +//! For each fax call, we create a custom conference port that: +//! - Receives audio from the SIP call via `put_frame` → RX ring buffer → fax processing task +//! - Sends SpanDSP transmit audio (CED, T.30) via TX ring buffer → `get_frame` → SIP call +//! +//! This is analogous to the channel_audio.rs ports used for Discord↔SIP audio. + +use crate::transport::sip::ffi::types::{ + ConfPort, SendablePool, SendablePort, CALL_CONF_PORTS, CONF_CHANNELS, CONF_SAMPLE_RATE, + SAMPLES_PER_FRAME, +}; +use crate::transport::sip::CallId; +use dashmap::DashMap; +use parking_lot::Mutex; +use pjsua::*; +use rtrb::{Consumer, Producer}; +use std::collections::HashMap; +use std::sync::atomic::{AtomicU64, Ordering}; +use std::sync::OnceLock; +use tracing::{debug, error, warn}; + +/// Ring buffer capacity for fax audio (i16 mono @ 16kHz). +/// 16000 samples = 1 second of audio, generous buffer for fax processing. +const FAX_AUDIO_RING_BUFFER_SIZE: usize = 16000; + +/// Ring buffer capacity for fax TX audio (SpanDSP → SIP). +/// 3200 samples = 200ms — enough for timing jitter. +const FAX_TX_RING_BUFFER_SIZE: usize = 3200; + +/// Map from CallId → RX ring buffer producer (SIP audio → fax processing task). +/// The put_frame callback pushes audio samples here. +static FAX_RX_PRODUCERS: OnceLock>>> = OnceLock::new(); + +fn get_fax_rx_producers() -> &'static DashMap>> { + FAX_RX_PRODUCERS.get_or_init(DashMap::new) +} + +/// Map from CallId → TX ring buffer consumer (fax processing task → SIP caller). +/// The get_frame callback reads SpanDSP transmit audio from here. +static FAX_TX_CONSUMERS: OnceLock>>> = OnceLock::new(); + +fn get_fax_tx_consumers() -> &'static DashMap>> { + FAX_TX_CONSUMERS.get_or_init(DashMap::new) +} + +/// Map from CallId → RX frame drop count (incremented in put_frame when buffer is full). +static FAX_RX_DROP_COUNTS: OnceLock> = OnceLock::new(); + +fn get_fax_rx_drop_counts() -> &'static DashMap { + FAX_RX_DROP_COUNTS.get_or_init(DashMap::new) +} + +/// Get the number of RX audio frames dropped for a call (buffer full). +/// Returns 0 if no drops have been recorded. +pub fn get_rx_drop_count(call_id: CallId) -> u64 { + get_fax_rx_drop_counts() + .get(&(*call_id as i64)) + .map(|c| c.load(Ordering::Relaxed)) + .unwrap_or(0) +} + +/// Map from CallId → conference slot (for cleanup). +static FAX_CONF_SLOTS: OnceLock>> = OnceLock::new(); + +fn get_fax_slots() -> &'static Mutex> { + FAX_CONF_SLOTS.get_or_init(|| Mutex::new(HashMap::new())) +} + +/// Memory pool for fax ports +static FAX_PORT_POOL: OnceLock> = OnceLock::new(); + +/// Bidirectional ring buffer handles for a fax audio port. +pub struct FaxAudioPorts { + /// RX: SIP audio from caller → fax processing task (feeds SpanDSP fax_rx) + pub rx_consumer: Consumer, + /// TX: SpanDSP transmit audio → SIP caller (CED tones, T.30 signaling) + pub tx_producer: Producer, +} + +/// Create a bidirectional fax audio port for a call and connect it to the call's conference slot. +/// +/// Port creation and conference addition happen on the calling thread. +/// The bidirectional `pjmedia_conf_connect_port` calls are queued to the audio thread +/// to avoid racing with `pjmedia_port_get_frame`. +/// +/// Returns `FaxAudioPorts` with: +/// - `rx_consumer`: reads SIP audio (16kHz mono, 320 samples/20ms frames) +/// - `tx_producer`: writes SpanDSP transmit audio back to the caller +pub async fn create_fax_audio_port(call_id: CallId) -> Option { + // Get the call's conference port + let call_conf_port = { + let ports = CALL_CONF_PORTS.get_or_init(DashMap::new); + ports.get(&call_id).map(|r| *r) + }; + + let call_conf_port: ConfPort = match call_conf_port { + Some(p) if p.is_valid() => p, + _ => { + warn!( + "Cannot create fax audio port for call {} — no valid conference port", + call_id + ); + return None; + } + }; + + // Create RX ring buffer (SIP → fax processing) + let (rx_producer, rx_consumer) = rtrb::RingBuffer::new(FAX_AUDIO_RING_BUFFER_SIZE); + + // Create TX ring buffer (fax processing → SIP) + let (tx_producer, tx_consumer) = rtrb::RingBuffer::new(FAX_TX_RING_BUFFER_SIZE); + + let conf_slot = unsafe { + // Get or create the memory pool for fax ports + let pool = FAX_PORT_POOL.get_or_init(|| { + let pool = pjsua_pool_create(c"fax_ports".as_ptr() as *const _, 4096, 4096); + Mutex::new(SendablePool(pool)) + }); + let pool_ptr = pool.lock().0; + + // Allocate pjmedia_port structure + let port_size = std::mem::size_of::(); + let port = pj_pool_alloc(pool_ptr, port_size) as *mut pjmedia_port; + if port.is_null() { + error!("Failed to allocate fax audio port for call {}", call_id); + return None; + } + std::ptr::write_bytes(port as *mut u8, 0, port_size); + + // Initialize port info + let port_name = format!("fax{}", *call_id); + let port_name_cstr = std::ffi::CString::new(port_name).ok()?; + let signature = 0x4641_5852; // "FAXR" in hex + + pjmedia_port_info_init( + &mut (*port).info, + &pj_str(port_name_cstr.as_ptr() as *mut _), + signature, + CONF_SAMPLE_RATE, + CONF_CHANNELS, + 16, // bits per sample + SAMPLES_PER_FRAME as u32, + ); + + // Set callbacks + (*port).get_frame = Some(fax_port_get_frame); // Sends SpanDSP TX audio back to caller + (*port).put_frame = Some(fax_port_put_frame); // Captures SIP audio for SpanDSP + (*port).on_destroy = Some(fax_port_on_destroy); + + // Store call_id in port_data.ldata for O(1) lookup in callbacks + (*port).port_data.ldata = *call_id as i64; + + // Add to conference bridge + let mut slot: i32 = 0; + let status = pjsua_conf_add_port(pool_ptr, port, &mut slot); + if status != pj_constants__PJ_SUCCESS as i32 { + error!( + "Failed to add fax port to conference for call {}: {}", + call_id, status + ); + return None; + } + + let conf_slot = ConfPort::new(slot); + + // Store ring buffer handles for callbacks + get_fax_rx_producers().insert(*call_id as i64, Mutex::new(rx_producer)); + get_fax_tx_consumers().insert(*call_id as i64, Mutex::new(tx_consumer)); + get_fax_rx_drop_counts().insert(*call_id as i64, AtomicU64::new(0)); + + // Store slot for cleanup + get_fax_slots() + .lock() + .insert(call_id, (SendablePort(port), conf_slot)); + + conf_slot + }; + + // Queue the bidirectional conference connection to the audio thread + // This avoids racing with pjmedia_port_get_frame + let (done_tx, done_rx) = tokio::sync::oneshot::channel(); + use crate::transport::sip::ffi::types::{queue_pjsua_op, PendingPjsuaOp}; + queue_pjsua_op(PendingPjsuaOp::ConnectFaxPort { + call_id, + fax_slot: conf_slot, + call_conf_port, + done_tx, + }); + + match done_rx.await { + Ok(true) => { + debug!( + "Created fax audio port for call {} at slot {} (bidirectional with call conf_port {})", + call_id, conf_slot, call_conf_port + ); + Some(FaxAudioPorts { + rx_consumer, + tx_producer, + }) + } + Ok(false) => { + error!( + "Audio thread failed to connect fax port for call {} — cleaning up", + call_id + ); + remove_fax_audio_port(call_id); + None + } + Err(_) => { + error!( + "Audio thread dropped fax port connection signal for call {} — cleaning up", + call_id + ); + remove_fax_audio_port(call_id); + None + } + } +} + +/// Remove and clean up the fax audio port for a call. +pub fn remove_fax_audio_port(call_id: CallId) { + // Remove ring buffer handles first (stops callbacks from reading/writing) + get_fax_rx_producers().remove(&(*call_id as i64)); + get_fax_tx_consumers().remove(&(*call_id as i64)); + get_fax_rx_drop_counts().remove(&(*call_id as i64)); + + // Remove and clean up the conference port + let removed = get_fax_slots().lock().remove(&call_id); + if let Some((port, slot)) = removed { + unsafe { + // Disconnect from conference + pjsua_conf_remove_port(*slot); + + // Destroy the port + if !port.0.is_null() { + pjmedia_port_destroy(port.0); + } + } + debug!( + "Removed fax audio port for call {} (slot {})", + call_id, slot + ); + } +} + +/// get_frame callback — sends SpanDSP transmit audio (CED, T.30) back to the SIP caller. +/// +/// Reads from the TX ring buffer filled by the fax processing task. +/// Returns silence if no TX audio is available. +unsafe extern "C" fn fax_port_get_frame( + this_port: *mut pjmedia_port, + frame: *mut pjmedia_frame, +) -> pj_status_t { + if this_port.is_null() || frame.is_null() { + return pj_constants__PJ_SUCCESS as pj_status_t; + } + + let call_id_ldata = (*this_port).port_data.ldata; + + if let Some(consumer_entry) = get_fax_tx_consumers().get(&call_id_ldata) { + if let Some(mut consumer) = consumer_entry.try_lock() { + let available = consumer.slots(); + if available >= SAMPLES_PER_FRAME { + if let Ok(chunk) = consumer.read_chunk(SAMPLES_PER_FRAME) { + let (first, second) = chunk.as_slices(); + let buf = (*frame).buf as *mut i16; + let out = std::slice::from_raw_parts_mut(buf, SAMPLES_PER_FRAME); + out[..first.len()].copy_from_slice(first); + if !second.is_empty() { + out[first.len()..first.len() + second.len()].copy_from_slice(second); + } + chunk.commit_all(); + (*frame).type_ = pjmedia_frame_type_PJMEDIA_FRAME_TYPE_AUDIO; + (*frame).size = SAMPLES_PER_FRAME * 2; + return pj_constants__PJ_SUCCESS as pj_status_t; + } + } + } + } + + // No TX audio available — return silence audio frame (not NONE). + // Returning FRAME_TYPE_NONE can cause PJSIP's conference bridge to + // exclude this port from the audio mix, breaking the TX path. + let buf = (*frame).buf as *mut i16; + let out = std::slice::from_raw_parts_mut(buf, SAMPLES_PER_FRAME); + out.fill(0); + (*frame).type_ = pjmedia_frame_type_PJMEDIA_FRAME_TYPE_AUDIO; + (*frame).size = SAMPLES_PER_FRAME * 2; + pj_constants__PJ_SUCCESS as pj_status_t +} + +/// on_destroy callback — no-op since cleanup is done in remove_fax_audio_port(). +/// Required by PJSIP to avoid "on_destroy() not found" warning. +unsafe extern "C" fn fax_port_on_destroy(_this_port: *mut pjmedia_port) -> pj_status_t { + pj_constants__PJ_SUCCESS as pj_status_t +} + +/// put_frame callback — captures SIP audio and pushes to RX ring buffer for SpanDSP. +unsafe extern "C" fn fax_port_put_frame( + this_port: *mut pjmedia_port, + frame: *mut pjmedia_frame, +) -> pj_status_t { + if this_port.is_null() || frame.is_null() { + return pj_constants__PJ_SUCCESS as pj_status_t; + } + + // Only process audio frames with data + if (*frame).type_ != pjmedia_frame_type_PJMEDIA_FRAME_TYPE_AUDIO || (*frame).size == 0 { + return pj_constants__PJ_SUCCESS as pj_status_t; + } + + let call_id_ldata = (*this_port).port_data.ldata; + + // View frame buffer as i16 slice + let num_samples = (*frame).size / 2; + let frame_buf = (*frame).buf as *const i16; + let samples = std::slice::from_raw_parts(frame_buf, num_samples); + + // Push to RX ring buffer + if let Some(producer_entry) = get_fax_rx_producers().get(&call_id_ldata) { + if let Some(mut producer) = producer_entry.try_lock() { + let available = producer.slots(); + if available >= samples.len() { + if let Ok(mut chunk) = producer.write_chunk(samples.len()) { + let (first, second) = chunk.as_mut_slices(); + let first_len = first.len().min(samples.len()); + first[..first_len].copy_from_slice(&samples[..first_len]); + if first_len < samples.len() { + second[..samples.len() - first_len].copy_from_slice(&samples[first_len..]); + } + chunk.commit_all(); + } + } else { + // Buffer full — fax processing is falling behind. Track the drop. + if let Some(counter) = get_fax_rx_drop_counts().get(&call_id_ldata) { + counter.fetch_add(1, Ordering::Relaxed); + } + } + } + } + + pj_constants__PJ_SUCCESS as pj_status_t +} diff --git a/sipcord-bridge/src/fax/discord_poster.rs b/sipcord-bridge/src/fax/discord_poster.rs new file mode 100644 index 0000000..63d0111 --- /dev/null +++ b/sipcord-bridge/src/fax/discord_poster.rs @@ -0,0 +1,224 @@ +//! Discord message poster for fax sessions using serenity's HTTP client. +//! +//! Posts embed messages through the fax lifecycle: +//! - "Receiving fax..." (blurple) when negotiation starts +//! - Replaced with "Fax Received" (green) with page image gallery on success +//! - Edited to "Fax Failed" (red) with reason on failure + +use crate::services::snowflake::Snowflake; +use anyhow::{Context, Result}; +use serenity::all::{ChannelId, MessageId, UserId}; +use serenity::builder::{ + CreateAttachment, CreateEmbed, CreateEmbedFooter, CreateMessage, EditMessage, +}; +use serenity::http::Http; +use serenity::secrets::Token; +use std::sync::Arc; +use tracing::{debug, error, warn}; + +const COLOR_RECEIVING: u32 = 0x5865F2; // Discord blurple +const COLOR_COMPLETE: u32 = 0x57F287; // Green +const COLOR_FAILED: u32 = 0xED4245; // Red +const GALLERY_URL: &str = "https://sipcord.net/fax"; + +pub struct DiscordPoster { + http: Arc, + channel_id: ChannelId, + user_id: String, + /// Cached display name, resolved on first use + display_name: Option, +} + +impl DiscordPoster { + pub fn new(bot_token: String, channel_id: Snowflake, user_id: String) -> Self { + let token: Token = bot_token.parse().expect("invalid Discord bot token"); + Self { + http: Arc::new(Http::new(token)), + channel_id: ChannelId::new(*channel_id), + user_id, + display_name: None, + } + } + + /// Resolve and cache the Discord display name for the user. + async fn resolve_display_name(&mut self) { + if self.display_name.is_some() { + return; + } + let name = match self.user_id.parse::() { + Ok(id) => match UserId::new(id).to_user(&self.http).await { + Ok(user) => user + .global_name + .map(|n| n.to_string()) + .unwrap_or_else(|| user.name.to_string()), + Err(e) => { + warn!("Failed to resolve Discord user {}: {}", self.user_id, e); + self.user_id.clone() + } + }, + Err(_) => self.user_id.clone(), + }; + self.display_name = Some(name); + } + + fn footer(&self) -> CreateEmbedFooter<'_> { + let name = self + .display_name + .as_deref() + .unwrap_or(self.user_id.as_str()); + CreateEmbedFooter::new(format!("From: @{}", name)) + } + + /// Post a "Receiving fax..." status message. Returns the message ID for future edits. + pub async fn post_fax_receiving(&mut self) -> Result { + self.resolve_display_name().await; + + let embed = CreateEmbed::new() + .title("Incoming Fax") + .description("Receiving fax...") + .color(COLOR_RECEIVING) + .footer(self.footer()); + + let msg = self + .channel_id + .widen() + .send_message(&self.http, CreateMessage::new().embed(embed)) + .await + .context("Failed to post fax receiving message")?; + + debug!("Posted fax receiving message: {}", msg.id); + Ok(msg.id.get()) + } + + /// Replace the "Receiving fax..." message with the completed fax and image attachments. + /// + /// Deletes the original status message and posts a new one with embeds + images. + /// Uses one embed per page with a shared URL so Discord renders them as a gallery. + /// `file_ext` is the file extension without dot (e.g. "png" or "jpg"). + /// + /// Discord limits messages to 10 embeds. For faxes with >10 pages, the first 10 + /// pages are shown in the embed gallery, and remaining pages are attached as files. + pub async fn edit_fax_complete( + &self, + message_id: u64, + image_pages: Vec>, + page_count: u32, + file_ext: &str, + ) -> Result<()> { + /// Discord's maximum number of embeds per message. + const MAX_EMBEDS: u32 = 10; + + let embed_count = page_count.min(MAX_EMBEDS); + let has_overflow = page_count > MAX_EMBEDS; + + let description = if page_count == 1 { + "Fax received — 1 page".to_string() + } else if has_overflow { + format!( + "Fax received — {} pages (showing first {})", + page_count, MAX_EMBEDS + ) + } else { + format!("Fax received — {} pages", page_count) + }; + + // One embed per page (up to MAX_EMBEDS) with a shared URL for gallery rendering + let mut embeds = Vec::with_capacity(embed_count as usize); + for i in 0..embed_count { + let filename = format!("fax_page_{}.{}", i + 1, file_ext); + let image_url = format!("attachment://{}", filename); + + let embed = if i == 0 { + CreateEmbed::new() + .title("Fax Received") + .description(description.clone()) + .color(COLOR_COMPLETE) + .url(GALLERY_URL) + .image(image_url) + .footer(self.footer()) + } else { + CreateEmbed::new() + .color(COLOR_COMPLETE) + .url(GALLERY_URL) + .image(image_url) + }; + embeds.push(embed); + } + + // All pages are attached as files (embed pages get rendered in gallery, + // overflow pages appear as plain file attachments) + let attachments: Vec = image_pages + .into_iter() + .enumerate() + .map(|(i, data)| { + CreateAttachment::bytes(data, format!("fax_page_{}.{}", i + 1, file_ext)) + }) + .collect(); + + let mut edit = EditMessage::new().embeds(embeds); + for attachment in attachments { + edit = edit.new_attachment(attachment); + } + + if let Err(e) = self + .channel_id + .widen() + .edit_message(&self.http, MessageId::new(message_id), edit) + .await + { + error!( + "Discord API error editing fax complete (msg={}, {} pages): {}", + message_id, page_count, e + ); + anyhow::bail!("Failed to edit fax complete message: {}", e); + } + + Ok(()) + } + + /// Edit the status message to show a failure reason. + pub async fn edit_fax_failed(&self, message_id: u64, reason: &str) -> Result<()> { + let embed = CreateEmbed::new() + .title("Fax Failed") + .description(reason) + .color(COLOR_FAILED) + .footer(self.footer()); + + if let Err(e) = self + .channel_id + .widen() + .edit_message( + &self.http, + MessageId::new(message_id), + EditMessage::new().embed(embed), + ) + .await + { + error!("Discord API error editing fax failed: {}", e); + } + + Ok(()) + } + + /// Post a standalone failure message (when no "receiving" message was posted). + pub async fn post_fax_failed(&mut self, reason: &str) -> Result<()> { + self.resolve_display_name().await; + + let embed = CreateEmbed::new() + .title("Fax Failed") + .description(reason) + .color(COLOR_FAILED) + .footer(self.footer()); + + if let Err(e) = self + .channel_id + .widen() + .send_message(&self.http, CreateMessage::new().embed(embed)) + .await + { + error!("Discord API error posting fax failed: {}", e); + } + + Ok(()) + } +} diff --git a/sipcord-bridge/src/fax/example.tiff b/sipcord-bridge/src/fax/example.tiff new file mode 100644 index 0000000..d136f25 Binary files /dev/null and b/sipcord-bridge/src/fax/example.tiff differ diff --git a/sipcord-bridge/src/fax/mod.rs b/sipcord-bridge/src/fax/mod.rs new file mode 100644 index 0000000..3193140 --- /dev/null +++ b/sipcord-bridge/src/fax/mod.rs @@ -0,0 +1,18 @@ +//! Incoming fax support — receives faxes over SIP and posts images to Discord. +//! +//! Supports two transport modes: +//! - **G.711 passthrough**: Demodulates fax tones from audio samples (SpanDSP FaxState) +//! - **T.38 native**: Receives IFP packets via UDPTL (SpanDSP T38Terminal) +//! +//! Architecture: +//! - FaxSession: State machine managing a single fax reception (audio or T.38) +//! - DiscordPoster: Posts/edits messages in Discord text channels with fax images +//! - SpanDSP wrapper: FFI to SpanDSP for fax demodulation (FaxReceiver + FaxT38Receiver) +//! - audio_port: Conference bridge port for capturing SIP audio (G.711 mode) +//! - UDPTL: UDP transport for T.38 IFP packets + +pub mod audio_port; +pub mod discord_poster; +pub mod session; +pub mod spandsp; +pub mod tiff_decoder; diff --git a/sipcord-bridge/src/fax/session.rs b/sipcord-bridge/src/fax/session.rs new file mode 100644 index 0000000..8c87487 --- /dev/null +++ b/sipcord-bridge/src/fax/session.rs @@ -0,0 +1,649 @@ +//! FaxSession state machine — manages a single incoming fax reception. +//! +//! Lifecycle: +//! 1. Created when a fax call is answered (ConnectFax route decision) +//! 2. Audio frames are fed via `feed_audio()` +//! 3. SpanDSP demodulates the fax tones into a TIFF file +//! 4. On completion, TIFF is converted to PNG and posted to Discord +//! 5. On failure or timeout, an error message is posted to Discord + +use crate::fax::discord_poster::DiscordPoster; +use crate::fax::spandsp::{FaxReceiver, FaxRxStatus, FaxT38Receiver}; +use crate::fax::tiff_decoder; +use crate::services::snowflake::Snowflake; +use crate::transport::sip::CallId; +use anyhow::Result; +use std::io::Cursor; +use std::path::PathBuf; +use std::time::Instant; +use tracing::{debug, error, info, warn}; + +/// Maximum duration for a fax session before timeout (5 minutes) +const FAX_TIMEOUT_SECS: u64 = 300; + +/// How the fax audio is being received +pub enum FaxSource { + /// G.711 audio passthrough + G711Audio, + /// T.38 UDPTL + T38Udptl, +} + +/// The active receiver — either audio-based or T.38 IFP-based. +enum FaxReceiverKind { + /// G.711 audio passthrough (demodulates fax tones from audio samples) + Audio(FaxReceiver), + /// T.38 UDPTL (receives IFP packets directly) + T38(FaxT38Receiver), +} + +/// Current state of the fax reception +pub enum FaxState { + /// Answered, feeding audio to SpanDSP, waiting for fax negotiation + WaitingForData, + /// SpanDSP confirmed fax negotiation started + Receiving { + /// Number of pages received so far + pages_received: u32, + }, + /// SpanDSP signaled fax complete, awaiting conversion and Discord posting + Received, + /// Fax posted to Discord successfully + Complete, + /// Fax reception failed + Failed(String), +} + +/// A single fax reception session +pub struct FaxSession { + /// SIP call ID for this fax + pub call_id: CallId, + /// Discord text channel to post the fax to + pub text_channel_id: Snowflake, + /// Guild ID (for logging) + pub guild_id: Snowflake, + /// User ID who owns this mapping + pub user_id: String, + /// Current state + pub state: FaxState, + /// How we're receiving the fax + pub source: FaxSource, + /// When this session was created + pub created_at: Instant, + /// Discord poster for this session + pub poster: DiscordPoster, + /// SpanDSP fax receiver (audio or T.38 mode) + receiver: FaxReceiverKind, + /// Temp directory for this fax session's TIFF output + pub tiff_dir: PathBuf, + /// Discord message ID for the "Receiving fax..." status message. + /// Stored separately so it survives state transitions to Complete/Failed. + receiving_message_id: Option, +} + +impl FaxSession { + /// Create a new fax session. Initializes SpanDSP in receive mode. + pub fn new( + call_id: CallId, + text_channel_id: Snowflake, + guild_id: Snowflake, + user_id: String, + bot_token: String, + ) -> Result { + let fax_config = crate::config::AppConfig::fax(); + + // Use configured tmp_folder or system temp dir + let base_dir = fax_config + .tmp_folder + .clone() + .unwrap_or_else(std::env::temp_dir); + + // Generate a unique session ID for the filename + let session_id = format!("{:016x}", { + use std::time::{SystemTime, UNIX_EPOCH}; + let t = SystemTime::now() + .duration_since(UNIX_EPOCH) + .unwrap_or_default(); + // Mix timestamp with call_id using a prime constant for a unique session ID + t.as_nanos() as u64 ^ (*call_id as u64).wrapping_mul(0x517cc1b727220a95) + }); + + let tiff_dir = base_dir.join(format!("{}{}", fax_config.prefix, session_id)); + std::fs::create_dir_all(&tiff_dir)?; + let tiff_path = tiff_dir.join(format!("{}{}.tiff", fax_config.prefix, session_id)); + + let receiver = FaxReceiver::new_audio_receiver(&tiff_path)?; + + let poster = DiscordPoster::new(bot_token, text_channel_id, user_id.clone()); + + Ok(Self { + call_id, + text_channel_id, + guild_id, + user_id, + state: FaxState::WaitingForData, + source: FaxSource::G711Audio, + created_at: Instant::now(), + poster, + receiver: FaxReceiverKind::Audio(receiver), + tiff_dir, + receiving_message_id: None, + }) + } + + /// Feed audio samples from the SIP call (16kHz mono i16). + /// Downsamples to 8kHz and feeds to SpanDSP's fax_rx(). + /// Returns true if the fax is complete and ready for post-processing. + /// Only works in Audio mode — logs a warning and returns false if called in T.38 mode. + pub fn feed_audio(&mut self, samples: &[i16]) -> bool { + // Check for timeout + if self.created_at.elapsed().as_secs() > FAX_TIMEOUT_SECS { + warn!( + "Fax session {} timed out after {}s", + self.call_id, + self.created_at.elapsed().as_secs() + ); + self.state = FaxState::Failed("Fax reception timed out".to_string()); + return false; + } + + if self.is_finished() { + return matches!(self.state, FaxState::Received | FaxState::Complete); + } + + let receiver = match &mut self.receiver { + FaxReceiverKind::Audio(r) => r, + FaxReceiverKind::T38(_) => { + warn!("feed_audio called on T.38 session {}", self.call_id); + return false; + } + }; + + let status = receiver.feed_samples_16k(samples); + self.handle_rx_status(status) + } + + /// Feed a T.38 IFP packet from the UDPTL socket to SpanDSP. + /// Returns true if the fax is complete and ready for post-processing. + /// Only works in T.38 mode. + pub fn feed_t38_ifp(&mut self, data: &[u8], seq: u16) -> bool { + if self.is_finished() { + return matches!(self.state, FaxState::Received | FaxState::Complete); + } + + let receiver = match &mut self.receiver { + FaxReceiverKind::T38(r) => r, + FaxReceiverKind::Audio(_) => { + warn!("feed_t38_ifp called on audio session {}", self.call_id); + return false; + } + }; + + let status = receiver.feed_ifp_packet(data, seq); + self.handle_rx_status(status) + } + + /// Drive the T.38 terminal timer (call every 20ms). + /// Returns true if the fax is complete and ready for post-processing. + pub fn drive_t38_timer(&mut self) -> bool { + if self.is_finished() { + return matches!(self.state, FaxState::Received | FaxState::Complete); + } + + let receiver = match &mut self.receiver { + FaxReceiverKind::T38(r) => r, + FaxReceiverKind::Audio(_) => return false, + }; + + let status = receiver.drive_timer(); + self.handle_rx_status(status) + } + + /// Common handler for FaxRxStatus from either audio or T.38 receiver. + fn handle_rx_status(&mut self, status: FaxRxStatus) -> bool { + // Log stats on completion/error before delegating to pure state transition + match &status { + FaxRxStatus::Complete => { + if let Some(stats) = self.get_stats() { + info!( + "Fax {} complete: {} pages, {}bps, {}x{}, ECM={}, bad_rows={}", + self.call_id, + stats.pages_rx, + stats.bit_rate, + stats.image_width, + stats.image_length, + stats.ecm, + stats.bad_rows + ); + } + } + FaxRxStatus::Error(msg) => { + if let Some(stats) = self.get_stats() { + warn!( + "Fax {} failed: {} ({}bps, {}x{}, ECM={}, pages_rx={}, bad_rows={}, audio={:.1}s)", + self.call_id, + msg, + stats.bit_rate, + stats.image_width, + stats.image_length, + stats.ecm, + stats.pages_rx, + stats.bad_rows, + self.audio_duration_secs() + ); + } else { + warn!( + "Fax {} failed: {} (no stats, audio={:.1}s)", + self.call_id, + msg, + self.audio_duration_secs() + ); + } + } + FaxRxStatus::InProgress => {} + } + + let page_count = self.pages_received(); + apply_rx_status(&mut self.state, status, page_count) + } + + /// Number of pages received so far. + pub fn pages_received(&self) -> u32 { + match &self.receiver { + FaxReceiverKind::Audio(r) => r.pages_received(), + FaxReceiverKind::T38(r) => r.pages_received(), + } + } + + /// Get transfer statistics from SpanDSP. + fn get_stats(&self) -> Option { + match &self.receiver { + FaxReceiverKind::Audio(r) => r.get_stats(), + FaxReceiverKind::T38(r) => r.get_stats(), + } + } + + /// Check if this session has timed out + pub fn is_timed_out(&self) -> bool { + self.created_at.elapsed().as_secs() > FAX_TIMEOUT_SECS + } + + /// Check if the session is in a terminal state + pub fn is_finished(&self) -> bool { + matches!( + self.state, + FaxState::Received | FaxState::Complete | FaxState::Failed(_) + ) + } + + /// Post the initial "Receiving fax..." message to Discord. + /// Called when fax negotiation is detected. + pub async fn post_receiving_message(&mut self) -> Result<()> { + match self.poster.post_fax_receiving().await { + Ok(msg_id) => { + debug!( + "Posted 'Receiving fax...' message {} to channel {} (call {})", + msg_id, self.text_channel_id, self.call_id + ); + self.receiving_message_id = Some(msg_id); + self.state = FaxState::Receiving { pages_received: 0 }; + Ok(()) + } + Err(e) => { + error!( + "Failed to post receiving message to channel {}: {}", + self.text_channel_id, e + ); + self.state = FaxState::Failed(format!("Discord error: {}", e)); + Err(e) + } + } + } + + /// Post a failure message to Discord + pub async fn post_failure(&mut self, reason: &str) { + if let Some(discord_msg_id) = self.receiving_message_id { + if let Err(e) = self.poster.edit_fax_failed(discord_msg_id, reason).await { + error!("Failed to edit fax failure message: {}", e); + } + } else { + // No receiving message was posted — post a standalone failure + if let Err(e) = self.poster.post_fax_failed(reason).await { + error!("Failed to post fax failure message: {}", e); + } + } + self.state = FaxState::Failed(reason.to_string()); + } + + /// Convert the received TIFF to images and post to Discord. + /// Called after fax reception is complete. + pub async fn convert_and_post(&mut self) -> Result<()> { + // Guard against double-processing: if we've already posted (Complete) or failed, + // another caller (e.g., CallEnded racing with T.38 completion) already handled it. + // Note: FaxState::Received is NOT skipped — that's the normal entry state. + if matches!(self.state, FaxState::Complete | FaxState::Failed(_)) { + debug!( + "convert_and_post called on already-finished session {} — skipping", + self.call_id + ); + return Ok(()); + } + + let (tiff_path, pages) = match &self.receiver { + FaxReceiverKind::Audio(r) => ( + r.tiff_output_path().to_path_buf(), + r.pages_received().max(1), + ), + FaxReceiverKind::T38(r) => ( + r.tiff_output_path().to_path_buf(), + r.pages_received().max(1), + ), + }; + let tiff_path = &tiff_path; + + let fax_config = crate::config::AppConfig::fax(); + let (output_format, file_ext) = match fax_config.output_format.as_str() { + "jpg" | "jpeg" => (OutputFormat::Jpeg, "jpg"), + _ => (OutputFormat::Png, "png"), + }; + + debug!( + "Converting TIFF to {} for call {}: {} ({} pages)", + output_format.label(), + self.call_id, + tiff_path.display(), + pages + ); + + let gray_images = tiff_decoder::decode_fax_tiff(tiff_path)?; + let image_pages: Vec> = gray_images + .into_iter() + .map(|img| { + let mut buf = Vec::new(); + image::DynamicImage::ImageLuma8(img) + .write_to(&mut Cursor::new(&mut buf), output_format.image_format()) + .map(|_| buf) + }) + .collect::, _>>()?; + + if image_pages.is_empty() { + self.post_failure("No pages in received fax").await; + anyhow::bail!("No pages in received fax"); + } + + let page_count = image_pages.len() as u32; + + if let Some(discord_msg_id) = self.receiving_message_id { + match self + .poster + .edit_fax_complete(discord_msg_id, image_pages, page_count, file_ext) + .await + { + Ok(()) => { + info!( + "Fax complete: {} pages posted to channel {} (call {})", + page_count, self.text_channel_id, self.call_id + ); + self.state = FaxState::Complete; + } + Err(e) => { + error!("Failed to post completed fax: {}", e); + self.state = FaxState::Failed(format!("Discord upload error: {}", e)); + return Err(e); + } + } + } else { + // If we never posted a "receiving" message (e.g., fast fax), post directly + // This shouldn't normally happen since we post receiving message early + warn!("Fax completed without a receiving message — posting directly"); + match self.poster.post_fax_receiving().await { + Ok(msg_id) => { + self.receiving_message_id = Some(msg_id); + self.poster + .edit_fax_complete(msg_id, image_pages, page_count, file_ext) + .await?; + self.state = FaxState::Complete; + } + Err(e) => { + error!("Failed to post fax: {}", e); + self.state = FaxState::Failed(format!("Discord error: {}", e)); + return Err(e); + } + } + } + + Ok(()) + } + + /// Switch from G.711 audio mode to T.38 UDPTL mode. + /// + /// Replaces the audio receiver with a T.38 receiver. The caller must: + /// 1. Stop feeding audio samples (remove fax audio port) + /// 2. Start the UDPTL processing tasks (rx, tx, timer) + pub fn switch_to_t38(&mut self, t38_receiver: FaxT38Receiver) { + debug!("Fax session {} switching from G.711 to T.38", self.call_id); + self.source = FaxSource::T38Udptl; + self.receiver = FaxReceiverKind::T38(t38_receiver); + } + + /// Generate transmit audio from SpanDSP (CED tones, T.30 signaling). + /// + /// Only works in Audio mode — T.38 uses IFP packets, not audio. + /// `out_buf` should be 320 samples (20ms at 16kHz). + /// Returns the number of 16kHz samples written. + pub fn generate_tx_16k(&mut self, out_buf: &mut [i16]) -> usize { + match &mut self.receiver { + FaxReceiverKind::Audio(r) => r.generate_tx_16k(out_buf), + FaxReceiverKind::T38(_) => 0, + } + } + + /// Get the total audio duration received so far (for debugging). + /// Returns 0 in T.38 mode (no audio samples). + pub fn audio_duration_secs(&self) -> f64 { + match &self.receiver { + FaxReceiverKind::Audio(r) => r.audio_duration_secs(), + FaxReceiverKind::T38(_) => 0.0, + } + } +} + +impl Drop for FaxSession { + fn drop(&mut self) { + let status = match &self.state { + FaxState::WaitingForData => "waiting_for_data", + FaxState::Receiving { .. } => "receiving", + FaxState::Received => "received", + FaxState::Complete => "complete", + FaxState::Failed(reason) => { + debug!("Fax failure reason: {}", reason); + "failed" + } + }; + debug!( + "FaxSession dropped: call={}, channel={}, guild={}, user={}, status={}, duration={:.1}s, audio={:.1}s", + self.call_id, + self.text_channel_id, + self.guild_id, + self.user_id, + status, + self.created_at.elapsed().as_secs_f64(), + self.audio_duration_secs() + ); + if let Err(e) = std::fs::remove_dir_all(&self.tiff_dir) { + debug!( + "Failed to clean up fax temp dir {}: {}", + self.tiff_dir.display(), + e + ); + } else { + debug!("Cleaned up fax temp dir: {}", self.tiff_dir.display()); + } + } +} + +// Pure state transition logic (extracted for testability) + +/// Apply a FaxRxStatus to a FaxState, returning whether the fax is complete. +/// This is the core state transition logic used by `FaxSession::handle_rx_status`. +fn apply_rx_status(state: &mut FaxState, status: FaxRxStatus, page_count: u32) -> bool { + match status { + FaxRxStatus::InProgress => { + if let FaxState::Receiving { pages_received, .. } = state { + *pages_received = page_count; + } + false + } + FaxRxStatus::Complete => { + *state = FaxState::Received; + true + } + FaxRxStatus::Error(msg) => { + *state = FaxState::Failed(msg); + false + } + } +} + +// Output format + +#[derive(Debug, Clone, Copy)] +enum OutputFormat { + Png, + Jpeg, +} + +impl OutputFormat { + fn image_format(self) -> image::ImageFormat { + match self { + OutputFormat::Png => image::ImageFormat::Png, + OutputFormat::Jpeg => image::ImageFormat::Jpeg, + } + } + + fn label(self) -> &'static str { + match self { + OutputFormat::Png => "PNG", + OutputFormat::Jpeg => "JPEG", + } + } +} + +#[cfg(test)] +mod tests { + use super::*; + + // Helper: check if a FaxState is_finished (mirrors FaxSession::is_finished logic) + fn state_is_finished(state: &FaxState) -> bool { + matches!( + state, + FaxState::Received | FaxState::Complete | FaxState::Failed(_) + ) + } + + // is_finished tests + + #[test] + fn is_finished_waiting_for_data() { + assert!(!state_is_finished(&FaxState::WaitingForData)); + } + + #[test] + fn is_finished_receiving() { + assert!(!state_is_finished(&FaxState::Receiving { + pages_received: 0 + })); + } + + #[test] + fn is_finished_received() { + assert!(state_is_finished(&FaxState::Received)); + } + + #[test] + fn is_finished_complete() { + assert!(state_is_finished(&FaxState::Complete)); + } + + #[test] + fn is_finished_failed() { + assert!(state_is_finished(&FaxState::Failed("err".to_string()))); + } + + // is_timed_out tests + + #[test] + fn is_timed_out_fresh() { + // A fresh Instant should not be timed out + let created_at = Instant::now(); + let elapsed = created_at.elapsed().as_secs(); + assert!(elapsed <= FAX_TIMEOUT_SECS); + } + + #[test] + fn is_timed_out_old() { + // An instant created FAX_TIMEOUT_SECS+1 ago should be timed out + let created_at = Instant::now() - std::time::Duration::from_secs(FAX_TIMEOUT_SECS + 1); + assert!(created_at.elapsed().as_secs() > FAX_TIMEOUT_SECS); + } + + // apply_rx_status tests + + #[test] + fn apply_rx_status_in_progress_on_waiting() { + let mut state = FaxState::WaitingForData; + let result = apply_rx_status(&mut state, FaxRxStatus::InProgress, 0); + assert!(!result); + assert!(matches!(state, FaxState::WaitingForData)); + } + + #[test] + fn apply_rx_status_in_progress_on_receiving_updates_pages() { + let mut state = FaxState::Receiving { pages_received: 0 }; + let result = apply_rx_status(&mut state, FaxRxStatus::InProgress, 3); + assert!(!result); + match state { + FaxState::Receiving { pages_received } => assert_eq!(pages_received, 3), + _ => panic!("Expected Receiving state"), + } + } + + #[test] + fn apply_rx_status_complete_transitions_to_received() { + let mut state = FaxState::Receiving { pages_received: 1 }; + let result = apply_rx_status(&mut state, FaxRxStatus::Complete, 1); + assert!(result); + assert!(matches!(state, FaxState::Received)); + } + + #[test] + fn apply_rx_status_error_transitions_to_failed() { + let mut state = FaxState::WaitingForData; + let result = apply_rx_status(&mut state, FaxRxStatus::Error("timeout".to_string()), 0); + assert!(!result); + match state { + FaxState::Failed(msg) => assert_eq!(msg, "timeout"), + _ => panic!("Expected Failed state"), + } + } + + #[test] + fn apply_rx_status_idempotent_on_terminal_complete() { + // Once in Received, InProgress should not change the state + let mut state = FaxState::Received; + let result = apply_rx_status(&mut state, FaxRxStatus::InProgress, 0); + assert!(!result); + assert!(matches!(state, FaxState::Received)); + } + + #[test] + fn apply_rx_status_idempotent_on_terminal_failed() { + let mut state = FaxState::Failed("original".to_string()); + let result = apply_rx_status(&mut state, FaxRxStatus::InProgress, 0); + assert!(!result); + match state { + FaxState::Failed(msg) => assert_eq!(msg, "original"), + _ => panic!("Expected Failed state"), + } + } +} diff --git a/sipcord-bridge/src/fax/spandsp.rs b/sipcord-bridge/src/fax/spandsp.rs new file mode 100644 index 0000000..cb2c173 --- /dev/null +++ b/sipcord-bridge/src/fax/spandsp.rs @@ -0,0 +1,672 @@ +//! SpanDSP wrapper for fax demodulation. +//! +//! Uses the `spandsp` safe wrapper crate to decode G.711 audio into TIFF images. +//! Audio arrives at 16kHz from PJSUA conference bridge; we downsample to 8kHz for SpanDSP. + +use anyhow::{Context, Result}; +use spandsp::fax::FaxState; +use spandsp::logging::{LogLevel, LogShowFlags}; +use spandsp::spandsp_sys; +use spandsp::t30::T30ModemSupport; +use spandsp::t38_terminal::T38Terminal; +use std::path::{Path, PathBuf}; +use tokio::sync::mpsc; +use tracing::{debug, error, info, trace, warn}; + +// T.4 image compression types (bitmask for t30_set_supported_compressions) +const T4_COMPRESSION_T4_1D: i32 = spandsp_sys::t4_image_compression_t_T4_COMPRESSION_T4_1D as i32; +const T4_COMPRESSION_T4_2D: i32 = spandsp_sys::t4_image_compression_t_T4_COMPRESSION_T4_2D as i32; +const T4_COMPRESSION_T6: i32 = spandsp_sys::t4_image_compression_t_T4_COMPRESSION_T6 as i32; + +// T.4 supported image widths (bitmask for t30_set_supported_image_sizes) +// These are #defines in the C header that bindgen doesn't capture as constants. +// Values from spandsp/t4_rx.h: T4_SUPPORT_WIDTH_215MM=0x01, 255MM=0x02, 303MM=0x04 +const T4_SUPPORT_WIDTH_215MM: i32 = 0x01; +const T4_SUPPORT_WIDTH_255MM: i32 = 0x02; +const T4_SUPPORT_WIDTH_303MM: i32 = 0x04; + +// T.4 supported resolutions (bitmask, OR'd into the same sizes parameter) +// Values from spandsp/t4_rx.h +const T4_RESOLUTION_R8_STANDARD: i32 = 0x01; // 204×98 DPI +const T4_RESOLUTION_R8_FINE: i32 = 0x02; // 204×196 DPI +const T4_RESOLUTION_R8_SUPERFINE: i32 = 0x04; // 204×391 DPI +const T4_RESOLUTION_200_200: i32 = 0x40; // 200×200 DPI + +/// Status returned after processing audio +#[derive(Debug, Clone, PartialEq, Eq)] +pub enum FaxRxStatus { + /// Still processing, no state change + InProgress, + /// Fax reception completed successfully + Complete, + /// Error during reception + Error(String), +} + +/// Callbacks from SpanDSP via the T.30 phase handlers. +/// These track progress but don't drive control flow — FaxSession checks +/// the receiver's state after each feed_samples() call. +struct FaxCallbackState { + /// Whether phase B (negotiation) has been entered + negotiation_started: bool, + /// Number of pages received (phase D count) + pages_received: u32, + /// Final completion code from phase E (-1 = not yet completed) + completion_code: i32, + /// Whether phase E (completion) has fired + completed: bool, +} + +/// Summary statistics from a fax reception. +#[derive(Debug)] +pub struct FaxStats { + pub bit_rate: i32, + pub pages_rx: i32, + pub image_width: i32, + pub image_length: i32, + pub bad_rows: i32, + pub ecm: bool, +} + +// Shared helpers + +/// Configure T.30 session parameters, set output TIFF, and register phase handlers. +fn configure_t30( + t30: &spandsp::t30::T30State, + tiff_path: &str, + callback_state: &mut FaxCallbackState, +) -> Result<()> { + t30.set_rx_file(tiff_path, -1) + .map_err(|e| anyhow::anyhow!("Failed to set rx file: {}", e))?; + + t30.set_supported_modems(T30ModemSupport::default()) + .map_err(|e| anyhow::anyhow!("Failed to set supported modems: {}", e))?; + + t30.set_ecm_capability(true) + .map_err(|e| anyhow::anyhow!("Failed to set ECM: {}", e))?; + + let compressions = T4_COMPRESSION_T4_1D | T4_COMPRESSION_T4_2D | T4_COMPRESSION_T6; + t30.set_supported_compressions(compressions) + .map_err(|e| anyhow::anyhow!("Failed to set compressions: {}", e))?; + + let sizes = T4_SUPPORT_WIDTH_215MM | T4_SUPPORT_WIDTH_255MM | T4_SUPPORT_WIDTH_303MM + | T4_RESOLUTION_R8_STANDARD | T4_RESOLUTION_R8_FINE | T4_RESOLUTION_R8_SUPERFINE + | T4_RESOLUTION_200_200; + t30.set_supported_image_sizes(sizes) + .map_err(|e| anyhow::anyhow!("Failed to set image sizes: {}", e))?; + + let user_data = callback_state as *mut FaxCallbackState as *mut std::ffi::c_void; + unsafe { + t30.set_phase_b_handler_raw(Some(phase_b_handler), user_data); + t30.set_phase_d_handler_raw(Some(phase_d_handler), user_data); + t30.set_phase_e_handler_raw(Some(phase_e_handler), user_data); + } + + Ok(()) +} + +/// Configure a SpanDSP logging state to route messages to tracing. +unsafe fn configure_log_state(log_state: *mut spandsp_sys::logging_state_t) { + if log_state.is_null() { + return; + } + let log_level = LogLevel::Flow as i32 | LogShowFlags::TAG.bits(); + spandsp_sys::span_log_set_level(log_state, log_level); + spandsp_sys::span_log_set_message_handler( + log_state, + Some(spandsp_log_handler), + std::ptr::null_mut(), + ); +} + +/// Check fax reception completion status from callback state. +fn check_completion(state: &FaxCallbackState) -> FaxRxStatus { + if state.completed { + match spandsp::t30::T30State::completion_code(state.completion_code) { + Some(err) if err.is_ok() => FaxRxStatus::Complete, + Some(err) => FaxRxStatus::Error(format!("Fax failed: {}", err)), + None => FaxRxStatus::Error(format!( + "Fax failed with unknown T.30 error code {}", + state.completion_code + )), + } + } else { + FaxRxStatus::InProgress + } +} + +/// Extract transfer statistics from a T.30 state. +fn get_fax_stats(t30: &spandsp::t30::T30State) -> FaxStats { + let stats = t30.get_transfer_statistics(); + FaxStats { + bit_rate: stats.bit_rate, + pages_rx: stats.pages_rx, + image_width: stats.image_width, + image_length: stats.image_length, + bad_rows: stats.bad_rows, + ecm: stats.error_correcting_mode != 0, + } +} + +// Pure resampling helpers (extracted for testability) + +/// Downsample 16kHz→8kHz by averaging consecutive pairs. +/// Appends `samples` to `buf` (accumulator), drains pairs, returns 8kHz samples. +/// Leftover odd samples remain in `buf` for the next call. +fn downsample_16k_to_8k(buf: &mut Vec, samples: &[i16]) -> Vec { + buf.extend_from_slice(samples); + let pairs = buf.len() / 2; + if pairs == 0 { + return Vec::new(); + } + let mut out = Vec::with_capacity(pairs); + for i in 0..pairs { + let a = buf[i * 2] as i32; + let b = buf[i * 2 + 1] as i32; + out.push(((a + b) / 2) as i16); + } + let consumed = pairs * 2; + buf.drain(..consumed); + out +} + +/// Upsample 8kHz→16kHz by duplicating each sample. +/// Writes to `out`, returns number of 16kHz samples written (= input_len * 2). +fn upsample_8k_to_16k(samples_8k: &[i16], out: &mut [i16]) -> usize { + for (i, &s) in samples_8k.iter().enumerate() { + out[i * 2] = s; + out[i * 2 + 1] = s; + } + samples_8k.len() * 2 +} + +// Audio-based fax receiver + +/// SpanDSP fax receiver — wraps `FaxState` for receiving faxes from audio. +pub struct FaxReceiver { + fax: FaxState, + tiff_path: PathBuf, + callback_state: Box, + /// Downsampling buffer: accumulates 16kHz samples, emits 8kHz + downsample_buf: Vec, + /// Total 8kHz samples fed to SpanDSP + samples_fed: usize, +} + +// FaxState is Send (via unsafe impl in the spandsp crate). +// Box and Vec are Send. +// We ensure exclusive access via tokio::sync::Mutex in FaxSession. +unsafe impl Send for FaxReceiver {} + +impl FaxReceiver { + /// Create a new fax receiver in audio mode. + /// + /// Initializes SpanDSP in receive mode and sets the output TIFF path. + pub fn new_audio_receiver(tiff_path: &Path) -> Result { + let tiff_path_str = tiff_path.to_str().context("Invalid TIFF path")?; + + let fax = FaxState::new(false) + .map_err(|e| anyhow::anyhow!("Failed to initialize SpanDSP fax state: {}", e))?; + + let t30 = fax + .get_t30_state() + .map_err(|e| anyhow::anyhow!("Failed to get T.30 state: {}", e))?; + + let mut callback_state = Box::new(FaxCallbackState { + negotiation_started: false, + pages_received: 0, + completion_code: -1, + completed: false, + }); + + configure_t30(&t30, tiff_path_str, &mut callback_state)?; + + // Route SpanDSP log messages to tracing. + // We use raw spandsp_sys functions since LoggingState doesn't + // support borrowed pointers from parent objects safely yet. + unsafe { + configure_log_state(spandsp_sys::fax_get_logging_state(fax.as_ptr())); + configure_log_state(spandsp_sys::t30_get_logging_state(t30.as_ptr())); + } + + debug!( + "SpanDSP fax receiver initialized, output: {}", + tiff_path.display() + ); + + Ok(Self { + fax, + tiff_path: tiff_path.to_path_buf(), + callback_state, + downsample_buf: Vec::with_capacity(640), // 2 frames worth + samples_fed: 0, + }) + } + + /// Feed 16kHz mono i16 audio samples (from PJSUA conference bridge). + /// + /// Downsamples to 8kHz and passes to SpanDSP's `fax_rx()`. + /// Returns the current reception status. + pub fn feed_samples_16k(&mut self, samples: &[i16]) -> FaxRxStatus { + let mut downsampled = downsample_16k_to_8k(&mut self.downsample_buf, samples); + if downsampled.is_empty() { + return self.current_status(); + } + self.feed_samples_8k(&mut downsampled) + } + + /// Feed 8kHz mono i16 audio samples directly to SpanDSP. + fn feed_samples_8k(&mut self, samples: &mut [i16]) -> FaxRxStatus { + if samples.is_empty() { + return self.current_status(); + } + + let _result = self.fax.rx(samples); + self.samples_fed += samples.len(); + + if self.samples_fed.is_multiple_of(80000) { + // Log every 10 seconds of audio + trace!("SpanDSP fed {}s of audio", self.samples_fed as f64 / 8000.0,); + } + + self.current_status() + } + + /// Check the current status based on callback state. + fn current_status(&self) -> FaxRxStatus { + check_completion(&self.callback_state) + } + + /// Number of pages received so far. + pub fn pages_received(&self) -> u32 { + self.callback_state.pages_received + } + + /// Get the output TIFF file path. + pub fn tiff_output_path(&self) -> &Path { + &self.tiff_path + } + + /// Generate transmit audio from SpanDSP (CED tones, T.30 signaling). + /// + /// SpanDSP generates at 8kHz; we upsample to 16kHz for the conference bridge. + /// `out_buf` must be large enough for 16kHz samples (e.g., 320 for 20ms). + /// Returns the number of 16kHz samples written. + pub fn generate_tx_16k(&mut self, out_buf: &mut [i16]) -> usize { + let max_8k_samples = out_buf.len() / 2; + let mut buf_8k = vec![0i16; max_8k_samples]; + let generated = self.fax.tx(&mut buf_8k); + if generated == 0 { + return 0; + } + upsample_8k_to_16k(&buf_8k[..generated], out_buf) + } + + /// Total seconds of audio fed (at 8kHz). + pub fn audio_duration_secs(&self) -> f64 { + self.samples_fed as f64 / 8000.0 + } + + /// Get transfer statistics from SpanDSP (for logging). + pub fn get_stats(&self) -> Option { + let t30 = self.fax.get_t30_state().ok()?; + Some(get_fax_stats(&t30)) + } +} + +// T.38 IFP-based receiver (UDPTL mode) + +/// State passed to the T.38 TX packet handler callback. +/// When SpanDSP wants to send an IFP packet, we push it into the mpsc channel. +struct TxCallbackState { + sender: mpsc::UnboundedSender>, +} + +/// SpanDSP fax receiver using T.38 IFP packets (via T38Terminal). +/// +/// Instead of demodulating audio, this receives IFP packets from the UDPTL +/// socket and feeds them to SpanDSP's T38Terminal, which handles the T.30 +/// protocol directly over T.38. +pub struct FaxT38Receiver { + terminal: T38Terminal, + tiff_path: PathBuf, + callback_state: Box, + _tx_callback_state: Box, +} + +// T38Terminal is Send (via unsafe impl in spandsp-rs crate). +// Box and Box are Send. +// We ensure exclusive access via tokio::sync::Mutex in FaxSession. +unsafe impl Send for FaxT38Receiver {} + +impl FaxT38Receiver { + /// Create a new T.38 fax receiver. + /// + /// `tiff_path`: Where to write the received fax TIFF file. + /// `tx_ifp_sender`: Channel for outgoing IFP packets (sent to UDPTL socket). + pub fn new(tiff_path: &Path, tx_ifp_sender: mpsc::UnboundedSender>) -> Result { + let tiff_path_str = tiff_path.to_str().context("Invalid TIFF path")?; + + let tx_callback_state = Box::new(TxCallbackState { + sender: tx_ifp_sender, + }); + let tx_user_data = &*tx_callback_state as *const TxCallbackState as *mut std::ffi::c_void; + + let terminal = unsafe { + T38Terminal::new_raw(false, Some(tx_packet_handler), tx_user_data) + .map_err(|e| anyhow::anyhow!("Failed to initialize T38Terminal: {}", e))? + }; + + let t30 = terminal + .get_t30_state() + .map_err(|e| anyhow::anyhow!("Failed to get T.30 state from T38Terminal: {}", e))?; + + let mut callback_state = Box::new(FaxCallbackState { + negotiation_started: false, + pages_received: 0, + completion_code: -1, + completed: false, + }); + + configure_t30(&t30, tiff_path_str, &mut callback_state)?; + + unsafe { + configure_log_state(spandsp_sys::t38_terminal_get_logging_state( + terminal.as_ptr(), + )); + configure_log_state(spandsp_sys::t30_get_logging_state(t30.as_ptr())); + + let t38_core = terminal + .get_t38_core_state() + .map_err(|e| anyhow::anyhow!("Failed to get T38Core: {}", e))?; + configure_log_state(spandsp_sys::t38_core_get_logging_state(t38_core.as_ptr())); + } + + debug!( + "T.38 fax receiver initialized, output: {}", + tiff_path.display() + ); + + Ok(Self { + terminal, + tiff_path: tiff_path.to_path_buf(), + callback_state, + _tx_callback_state: tx_callback_state, + }) + } + + /// Feed a received IFP packet from the UDPTL socket to SpanDSP. + pub fn feed_ifp_packet(&self, data: &[u8], seq: u16) -> FaxRxStatus { + let t38_core = match self.terminal.get_t38_core_state() { + Ok(core) => core, + Err(e) => { + error!("Failed to get T38Core for rx: {}", e); + return FaxRxStatus::Error(format!("T38Core error: {}", e)); + } + }; + + if let Err(e) = t38_core.rx_ifp_packet(data, seq) { + warn!("T38Core rx_ifp_packet error: {} (seq={})", e, seq); + // Don't return error — packet loss is expected in UDPTL + } + + self.current_status() + } + + /// Drive the T.38 terminal's timer. Call every 20ms (160 samples at 8kHz). + /// + /// This advances the T.30 state machine. Returns the current reception status. + pub fn drive_timer(&self) -> FaxRxStatus { + // 160 samples = 20ms at 8kHz + let _result = self.terminal.send_timeout(160); + self.current_status() + } + + /// Check current status based on T.30 callback state. + fn current_status(&self) -> FaxRxStatus { + check_completion(&self.callback_state) + } + + /// Number of pages received so far. + pub fn pages_received(&self) -> u32 { + self.callback_state.pages_received + } + + /// Get the output TIFF file path. + pub fn tiff_output_path(&self) -> &Path { + &self.tiff_path + } + + /// Get transfer statistics from SpanDSP. + pub fn get_stats(&self) -> Option { + let t30 = self.terminal.get_t30_state().ok()?; + Some(get_fax_stats(&t30)) + } +} + +// SpanDSP C callbacks + +/// T.38 TX packet handler callback. +/// +/// Called by SpanDSP when it wants to send an IFP packet to the remote endpoint. +/// We push the packet into an mpsc channel, which the UDPTL socket task reads from. +/// +/// Signature matches `t38_tx_packet_handler_t`: +/// `fn(s: *mut t38_core_state_t, user_data: *mut c_void, buf: *const u8, len: i32, count: i32) -> i32` +unsafe extern "C" fn tx_packet_handler( + _s: *mut spandsp_sys::t38_core_state_t, + user_data: *mut std::ffi::c_void, + buf: *const u8, + len: i32, + count: i32, +) -> i32 { + if user_data.is_null() || buf.is_null() || len <= 0 { + return -1; + } + let state = &*(user_data as *const TxCallbackState); + let data = std::slice::from_raw_parts(buf, len as usize); + debug!("SpanDSP TX IFP: {}B (count={})", len, count); + // Send the packet `count` times as SpanDSP requests. + // For indicator packets (CNG, CED, DIS), count is typically 3 — these + // must be sent multiple times because early packets have no UDPTL + // redundancy history for error recovery. + let send_count = count.max(1) as usize; + for _ in 0..send_count { + match state.sender.send(data.to_vec()) { + Ok(()) => {} + Err(_) => { + // Channel closed — UDPTL socket task has ended + warn!("SpanDSP TX IFP channel closed"); + return -1; + } + } + } + 0 +} + +/// Phase B handler: called when T.30 negotiation starts. +unsafe extern "C" fn phase_b_handler(user_data: *mut std::ffi::c_void, result: i32) -> i32 { + if !user_data.is_null() { + let state = &mut *(user_data as *mut FaxCallbackState); + state.negotiation_started = true; + info!( + "SpanDSP phase B: fax negotiation started (result={})", + result + ); + } + 0 // T30_ERR_OK +} + +/// Phase D handler: called when a page is received. +unsafe extern "C" fn phase_d_handler(user_data: *mut std::ffi::c_void, result: i32) -> i32 { + if !user_data.is_null() { + let state = &mut *(user_data as *mut FaxCallbackState); + state.pages_received += 1; + info!( + "SpanDSP phase D: page {} received (result={})", + state.pages_received, result + ); + } + 0 // T30_ERR_OK +} + +/// Phase E handler: called when fax reception completes (success or failure). +unsafe extern "C" fn phase_e_handler(user_data: *mut std::ffi::c_void, completion_code: i32) { + if !user_data.is_null() { + let state = &mut *(user_data as *mut FaxCallbackState); + state.completion_code = completion_code; + state.completed = true; + + let reason = match spandsp::t30::T30State::completion_code(completion_code) { + Some(err) if err.is_ok() => "OK".to_string(), + Some(err) => format!("{}", err), + None => format!("unknown code {}", completion_code), + }; + + if completion_code == 0 { + info!( + "SpanDSP phase E: fax complete, {} pages received", + state.pages_received + ); + } else { + warn!( + "SpanDSP phase E: fax failed after {} pages — T.30 error {}: {}", + state.pages_received, completion_code, reason + ); + } + } +} + +/// SpanDSP log handler: routes SpanDSP log messages to tracing. +unsafe extern "C" fn spandsp_log_handler( + _user_data: *mut std::ffi::c_void, + level: i32, + text: *const std::ffi::c_char, +) { + if text.is_null() { + return; + } + let msg = std::ffi::CStr::from_ptr(text).to_string_lossy(); + let msg = msg.trim_end(); // SpanDSP messages often have trailing newlines + + match level { + l if l <= LogLevel::Error as i32 => error!(target: "spandsp", "{}", msg), + l if l <= LogLevel::Warning as i32 => warn!(target: "spandsp", "{}", msg), + l if l <= LogLevel::Flow as i32 => debug!(target: "spandsp", "{}", msg), + _ => trace!(target: "spandsp", "{}", msg), + } +} + +#[cfg(test)] +mod tests { + use super::*; + + // check_completion tests + + #[test] + fn check_completion_not_completed_returns_in_progress() { + let state = FaxCallbackState { + negotiation_started: false, + pages_received: 0, + completion_code: -1, + completed: false, + }; + assert_eq!(check_completion(&state), FaxRxStatus::InProgress); + } + + #[test] + fn check_completion_completed_code_0_returns_complete() { + let state = FaxCallbackState { + negotiation_started: true, + pages_received: 1, + completion_code: 0, + completed: true, + }; + assert_eq!(check_completion(&state), FaxRxStatus::Complete); + } + + #[test] + fn check_completion_completed_bad_code_returns_error() { + let state = FaxCallbackState { + negotiation_started: true, + pages_received: 0, + completion_code: 42, + completed: true, + }; + match check_completion(&state) { + FaxRxStatus::Error(msg) => assert!( + msg.contains("42") || msg.contains("failed") || msg.contains("Fax"), + "Error message should reference the code: {}", + msg + ), + other => panic!("Expected Error, got {:?}", other), + } + } + + // downsample_16k_to_8k tests + + #[test] + fn downsample_even_count() { + let mut buf = Vec::new(); + let samples: Vec = vec![100, 200, 300, 400, 500, 600, 700, 800, 900, 1000]; + let out = downsample_16k_to_8k(&mut buf, &samples); + assert_eq!(out.len(), 5); + assert_eq!(out[0], 150); // (100+200)/2 + assert_eq!(out[1], 350); // (300+400)/2 + assert_eq!(out[2], 550); + assert_eq!(out[3], 750); + assert_eq!(out[4], 950); + assert!(buf.is_empty()); + } + + #[test] + fn downsample_odd_count_preserves_leftover() { + let mut buf = Vec::new(); + let samples: Vec = vec![100, 200, 300]; + let out = downsample_16k_to_8k(&mut buf, &samples); + assert_eq!(out.len(), 1); + assert_eq!(out[0], 150); + assert_eq!(buf.len(), 1); + assert_eq!(buf[0], 300); + } + + #[test] + fn downsample_sequential_calls_bridge_accumulator() { + let mut buf = Vec::new(); + // First call: 3 samples → 1 output, 1 leftover + let out1 = downsample_16k_to_8k(&mut buf, &[10, 20, 30]); + assert_eq!(out1, vec![15]); + assert_eq!(buf, vec![30]); + + // Second call: leftover 30 + new [40, 50] = [30, 40, 50] → 1 output, 1 leftover + let out2 = downsample_16k_to_8k(&mut buf, &[40, 50]); + assert_eq!(out2, vec![35]); // (30+40)/2 + assert_eq!(buf, vec![50]); + } + + #[test] + fn downsample_single_sample_returns_empty() { + let mut buf = Vec::new(); + let out = downsample_16k_to_8k(&mut buf, &[42]); + assert!(out.is_empty()); + assert_eq!(buf, vec![42]); + } + + // upsample_8k_to_16k tests + + #[test] + fn upsample_basic() { + let input: Vec = vec![100, 200, 300, 400]; + let mut out = vec![0i16; 8]; + let written = upsample_8k_to_16k(&input, &mut out); + assert_eq!(written, 8); + assert_eq!(out, vec![100, 100, 200, 200, 300, 300, 400, 400]); + } + + #[test] + fn upsample_empty_input() { + let mut out = vec![0i16; 8]; + let written = upsample_8k_to_16k(&[], &mut out); + assert_eq!(written, 0); + } +} diff --git a/sipcord-bridge/src/fax/tiff_decoder.rs b/sipcord-bridge/src/fax/tiff_decoder.rs new file mode 100644 index 0000000..a138c08 --- /dev/null +++ b/sipcord-bridge/src/fax/tiff_decoder.rs @@ -0,0 +1,1300 @@ +//! Self-contained fax TIFF decoder. +//! +//! Handles CCITT Group 3 (1D + 2D) and Group 4 compressed TIFFs as written +//! by SpanDSP, including FillOrder=2 (LSB-first) and T4Options with 2D encoding. +//! +//! Huffman table data derived from the ITU-T T.4 standard. +//! Bit-reading approach inspired by the `fax` crate (MIT licensed). + +use anyhow::{bail, Result}; +use image::GrayImage; +use std::path::Path; +use std::sync::OnceLock; +use tracing::debug; + +// Public API + +/// Maximum TIFF file size (50 MB). Well above any reasonable fax output from SpanDSP, +/// but prevents OOM from malformed files. +const MAX_TIFF_SIZE: u64 = 50 * 1024 * 1024; + +/// Decode all pages of a fax TIFF file into grayscale images. +pub fn decode_fax_tiff(path: &Path) -> Result> { + if !path.exists() { + bail!("TIFF file not found: {}", path.display()); + } + let file_size = std::fs::metadata(path)?.len(); + if file_size > MAX_TIFF_SIZE { + bail!( + "TIFF file too large: {} bytes (max {} bytes)", + file_size, + MAX_TIFF_SIZE + ); + } + let data = std::fs::read(path)?; + let pages = parse_tiff_ifds(&data)?; + let mut images = Vec::with_capacity(pages.len()); + + for (i, page) in pages.iter().enumerate() { + debug!( + "TIFF page {}: {}x{}, compression={}, fill_order={}, t4_options={}", + i + 1, + page.width, + page.height, + page.compression, + page.fill_order, + page.t4_options + ); + + let mut strip_data = Vec::new(); + for (off, len) in page.strip_offsets.iter().zip(&page.strip_byte_counts) { + let start = *off as usize; + let end = start + *len as usize; + if end > data.len() { + bail!( + "TIFF strip extends past file: offset={}, count={}, file_len={}", + off, + len, + data.len() + ); + } + strip_data.extend_from_slice(&data[start..end]); + } + + // FillOrder=2: reverse bits in every byte + if page.fill_order == 2 { + for b in strip_data.iter_mut() { + *b = BIT_REVERSE_LUT[*b as usize]; + } + } + + let transitions_per_line = match page.compression { + 3 => decode_group3(&strip_data, page.width, page.height, page.t4_options)?, + 4 => decode_group4(&strip_data, page.width, page.height)?, + other => bail!("Unsupported TIFF compression: {}", other), + }; + + let img = assemble_image( + &transitions_per_line, + page.width, + page.height, + page.photometric, + ); + + // Correct aspect ratio for non-square pixels (e.g., 204×98 DPI standard fax) + let img = correct_aspect_ratio(img, page.x_resolution, page.y_resolution); + images.push(img); + } + + Ok(images) +} + +// TIFF IFD Parser + +struct TiffPage { + width: u32, + height: u32, + compression: u32, + fill_order: u32, + t4_options: u32, + photometric: u32, + strip_offsets: Vec, + strip_byte_counts: Vec, + x_resolution: Option<(u32, u32)>, // numerator, denominator (RATIONAL) + y_resolution: Option<(u32, u32)>, // numerator, denominator (RATIONAL) +} + +fn parse_tiff_ifds(data: &[u8]) -> Result> { + if data.len() < 8 { + bail!("TIFF file too short"); + } + let le = match (data[0], data[1]) { + (0x49, 0x49) => true, + (0x4D, 0x4D) => false, + _ => bail!("Not a TIFF file"), + }; + let magic = read_u16(data, 2, le); + if magic != 42 { + bail!("Bad TIFF magic: {}", magic); + } + + let mut ifd_offset = read_u32(data, 4, le) as usize; + let mut pages = Vec::new(); + + while ifd_offset != 0 { + if ifd_offset + 2 > data.len() { + break; + } + let num_entries = read_u16(data, ifd_offset, le) as usize; + let mut width = 0u32; + let mut height = 0u32; + let mut compression = 1u32; + let mut fill_order = 1u32; + let mut t4_options = 0u32; + let mut photometric = 0u32; + let mut strip_offsets = Vec::new(); + let mut strip_byte_counts = Vec::new(); + let mut x_resolution: Option<(u32, u32)> = None; + let mut y_resolution: Option<(u32, u32)> = None; + + for i in 0..num_entries { + let entry_off = ifd_offset + 2 + i * 12; + if entry_off + 12 > data.len() { + break; + } + let tag = read_u16(data, entry_off, le); + let typ = read_u16(data, entry_off + 2, le); + let count = read_u32(data, entry_off + 4, le); + let val_off = entry_off + 8; + + match tag { + 256 => width = read_ifd_value(data, val_off, typ, le), + 257 => height = read_ifd_value(data, val_off, typ, le), + 259 => compression = read_ifd_value(data, val_off, typ, le), + 262 => photometric = read_ifd_value(data, val_off, typ, le), + 266 => fill_order = read_ifd_value(data, val_off, typ, le), + 273 => strip_offsets = read_ifd_array(data, val_off, typ, count, le), + 278 => { /* rows_per_strip — not needed */ } + 279 => strip_byte_counts = read_ifd_array(data, val_off, typ, count, le), + 282 => x_resolution = read_ifd_rational(data, val_off, le), + 283 => y_resolution = read_ifd_rational(data, val_off, le), + 292 => t4_options = read_ifd_value(data, val_off, typ, le), + _ => {} + } + } + + pages.push(TiffPage { + width, + height, + compression, + fill_order, + t4_options, + photometric, + strip_offsets, + strip_byte_counts, + x_resolution, + y_resolution, + }); + + let next_off_pos = ifd_offset + 2 + num_entries * 12; + if next_off_pos + 4 > data.len() { + break; + } + ifd_offset = read_u32(data, next_off_pos, le) as usize; + } + + if pages.is_empty() { + bail!("No IFDs found in TIFF"); + } + Ok(pages) +} + +fn read_u16(data: &[u8], off: usize, le: bool) -> u16 { + if le { + u16::from_le_bytes([data[off], data[off + 1]]) + } else { + u16::from_be_bytes([data[off], data[off + 1]]) + } +} + +fn read_u32(data: &[u8], off: usize, le: bool) -> u32 { + if le { + u32::from_le_bytes([data[off], data[off + 1], data[off + 2], data[off + 3]]) + } else { + u32::from_be_bytes([data[off], data[off + 1], data[off + 2], data[off + 3]]) + } +} + +fn read_ifd_value(data: &[u8], val_off: usize, typ: u16, le: bool) -> u32 { + match typ { + 1 | 6 => data[val_off] as u32, // BYTE / SBYTE + 3 | 8 => read_u16(data, val_off, le) as u32, // SHORT / SSHORT + 4 | 9 => read_u32(data, val_off, le), // LONG / SLONG + _ => read_u32(data, val_off, le), + } +} + +fn read_ifd_array(data: &[u8], val_off: usize, typ: u16, count: u32, le: bool) -> Vec { + let item_size = match typ { + 1 | 6 => 1, + 3 | 8 => 2, + 4 | 9 => 4, + _ => 4, + }; + let total_bytes = count as usize * item_size; + // If data fits in the 4-byte value field, read inline; otherwise follow the pointer + let base = if total_bytes <= 4 { + val_off + } else { + read_u32(data, val_off, le) as usize + }; + + let mut result = Vec::with_capacity(count as usize); + for i in 0..count as usize { + let off = base + i * item_size; + let v = match typ { + 1 | 6 => data.get(off).copied().unwrap_or(0) as u32, + 3 | 8 => { + if off + 2 <= data.len() { + read_u16(data, off, le) as u32 + } else { + 0 + } + } + _ => { + if off + 4 <= data.len() { + read_u32(data, off, le) + } else { + 0 + } + } + }; + result.push(v); + } + result +} + +/// Read a TIFF RATIONAL value (type=5): two u32s (numerator, denominator) at an offset. +/// The IFD value field contains an offset pointer to the 8-byte rational data. +fn read_ifd_rational(data: &[u8], val_off: usize, le: bool) -> Option<(u32, u32)> { + let offset = read_u32(data, val_off, le) as usize; + if offset + 8 > data.len() { + return None; + } + let numerator = read_u32(data, offset, le); + let denominator = read_u32(data, offset + 4, le); + if denominator == 0 { + return None; + } + Some((numerator, denominator)) +} + +// FillOrder=2 bit-reversal LUT + +const BIT_REVERSE_LUT: [u8; 256] = { + let mut lut = [0u8; 256]; + let mut i = 0u16; + while i < 256 { + let b = i as u8; + lut[i as usize] = ((b & 0x80) >> 7) + | ((b & 0x40) >> 5) + | ((b & 0x20) >> 3) + | ((b & 0x10) >> 1) + | ((b & 0x08) << 1) + | ((b & 0x04) << 3) + | ((b & 0x02) << 5) + | ((b & 0x01) << 7); + i += 1; + } + lut +}; + +// Bit Reader + +struct BitReader<'a> { + data: &'a [u8], + pos: usize, + partial: u32, + valid: u8, +} + +impl<'a> BitReader<'a> { + fn new(data: &'a [u8]) -> Self { + let mut r = BitReader { + data, + pos: 0, + partial: 0, + valid: 0, + }; + r.fill(); + r + } + + fn fill(&mut self) { + while self.valid <= 24 && self.pos < self.data.len() { + self.partial |= (self.data[self.pos] as u32) << (24 - self.valid); + self.valid += 8; + self.pos += 1; + } + } + + /// Look at the next `n` bits (MSB-aligned, returned in lower bits). + fn peek(&self, n: u8) -> Option { + if self.valid >= n { + Some((self.partial >> (32 - n)) as u16) + } else { + None + } + } + + /// Consume `n` bits and refill. + fn consume(&mut self, n: u8) { + self.partial <<= n; + self.valid -= n; + self.fill(); + } + + /// Scan for EOL pattern: at least 11 zero bits followed by a 1 bit. + /// Returns true if found, false if data exhausted. + fn scan_for_eol(&mut self) -> bool { + loop { + match self.peek(12) { + Some(1) => { + self.consume(12); + return true; + } + Some(_) => self.consume(1), + None => { + // Try consuming remaining zeros + if self.valid > 0 { + self.consume(1); + } else { + return false; + } + } + } + } + } +} + +// Huffman Lookup Tables + +/// Entry in a flat LUT: (decoded_value, bits_to_consume). 0xFFFF = invalid. +type LutEntry = (u16, u8); + +const INVALID_ENTRY: LutEntry = (0xFFFF, 0); + +/// White terminating + makeup codes from T.4 standard. +/// Format: (bit_pattern, bit_length, run_length) +const WHITE_CODES: &[(u16, u8, u16)] = &[ + (0b00110101, 8, 0), + (0b000111, 6, 1), + (0b0111, 4, 2), + (0b1000, 4, 3), + (0b1011, 4, 4), + (0b1100, 4, 5), + (0b1110, 4, 6), + (0b1111, 4, 7), + (0b10011, 5, 8), + (0b10100, 5, 9), + (0b00111, 5, 10), + (0b01000, 5, 11), + (0b001000, 6, 12), + (0b000011, 6, 13), + (0b110100, 6, 14), + (0b110101, 6, 15), + (0b101010, 6, 16), + (0b101011, 6, 17), + (0b0100111, 7, 18), + (0b0001100, 7, 19), + (0b0001000, 7, 20), + (0b0010111, 7, 21), + (0b0000011, 7, 22), + (0b0000100, 7, 23), + (0b0101000, 7, 24), + (0b0101011, 7, 25), + (0b0010011, 7, 26), + (0b0100100, 7, 27), + (0b0011000, 7, 28), + (0b00000010, 8, 29), + (0b00000011, 8, 30), + (0b00011010, 8, 31), + (0b00011011, 8, 32), + (0b00010010, 8, 33), + (0b00010011, 8, 34), + (0b00010100, 8, 35), + (0b00010101, 8, 36), + (0b00010110, 8, 37), + (0b00010111, 8, 38), + (0b00101000, 8, 39), + (0b00101001, 8, 40), + (0b00101010, 8, 41), + (0b00101011, 8, 42), + (0b00101100, 8, 43), + (0b00101101, 8, 44), + (0b00000100, 8, 45), + (0b00000101, 8, 46), + (0b00001010, 8, 47), + (0b00001011, 8, 48), + (0b01010010, 8, 49), + (0b01010011, 8, 50), + (0b01010100, 8, 51), + (0b01010101, 8, 52), + (0b00100100, 8, 53), + (0b00100101, 8, 54), + (0b01011000, 8, 55), + (0b01011001, 8, 56), + (0b01011010, 8, 57), + (0b01011011, 8, 58), + (0b01001010, 8, 59), + (0b01001011, 8, 60), + (0b00110010, 8, 61), + (0b00110011, 8, 62), + (0b00110100, 8, 63), + // Makeup codes + (0b11011, 5, 64), + (0b10010, 5, 128), + (0b010111, 6, 192), + (0b0110111, 7, 256), + (0b00110110, 8, 320), + (0b00110111, 8, 384), + (0b01100100, 8, 448), + (0b01100101, 8, 512), + (0b01101000, 8, 576), + (0b01100111, 8, 640), + (0b011001100, 9, 704), + (0b011001101, 9, 768), + (0b011010010, 9, 832), + (0b011010011, 9, 896), + (0b011010100, 9, 960), + (0b011010101, 9, 1024), + (0b011010110, 9, 1088), + (0b011010111, 9, 1152), + (0b011011000, 9, 1216), + (0b011011001, 9, 1280), + (0b011011010, 9, 1344), + (0b011011011, 9, 1408), + (0b010011000, 9, 1472), + (0b010011001, 9, 1536), + (0b010011010, 9, 1600), + (0b011000, 6, 1664), + (0b010011011, 9, 1728), + // Extended makeup (shared with black) + (0b00000001000, 11, 1792), + (0b00000001100, 11, 1856), + (0b00000001101, 11, 1920), + (0b000000010010, 12, 1984), + (0b000000010011, 12, 2048), + (0b000000010100, 12, 2112), + (0b000000010101, 12, 2176), + (0b000000010110, 12, 2240), + (0b000000010111, 12, 2304), + (0b000000011100, 12, 2368), + (0b000000011101, 12, 2432), + (0b000000011110, 12, 2496), + (0b000000011111, 12, 2560), +]; + +const BLACK_CODES: &[(u16, u8, u16)] = &[ + (0b0000110111, 10, 0), + (0b010, 3, 1), + (0b11, 2, 2), + (0b10, 2, 3), + (0b011, 3, 4), + (0b0011, 4, 5), + (0b0010, 4, 6), + (0b00011, 5, 7), + (0b000101, 6, 8), + (0b000100, 6, 9), + (0b0000100, 7, 10), + (0b0000101, 7, 11), + (0b0000111, 7, 12), + (0b00000100, 8, 13), + (0b00000111, 8, 14), + (0b000011000, 9, 15), + (0b0000010111, 10, 16), + (0b0000011000, 10, 17), + (0b0000001000, 10, 18), + (0b00001100111, 11, 19), + (0b00001101000, 11, 20), + (0b00001101100, 11, 21), + (0b00000110111, 11, 22), + (0b00000101000, 11, 23), + (0b00000010111, 11, 24), + (0b00000011000, 11, 25), + (0b000011001010, 12, 26), + (0b000011001011, 12, 27), + (0b000011001100, 12, 28), + (0b000011001101, 12, 29), + (0b000001101000, 12, 30), + (0b000001101001, 12, 31), + (0b000001101010, 12, 32), + (0b000001101011, 12, 33), + (0b000011010010, 12, 34), + (0b000011010011, 12, 35), + (0b000011010100, 12, 36), + (0b000011010101, 12, 37), + (0b000011010110, 12, 38), + (0b000011010111, 12, 39), + (0b000001101100, 12, 40), + (0b000001101101, 12, 41), + (0b000011011010, 12, 42), + (0b000011011011, 12, 43), + (0b000001010100, 12, 44), + (0b000001010101, 12, 45), + (0b000001010110, 12, 46), + (0b000001010111, 12, 47), + (0b000001100100, 12, 48), + (0b000001100101, 12, 49), + (0b000001010010, 12, 50), + (0b000001010011, 12, 51), + (0b000000100100, 12, 52), + (0b000000110111, 12, 53), + (0b000000111000, 12, 54), + (0b000000100111, 12, 55), + (0b000000101000, 12, 56), + (0b000001011000, 12, 57), + (0b000001011001, 12, 58), + (0b000000101011, 12, 59), + (0b000000101100, 12, 60), + (0b000001011010, 12, 61), + (0b000001100110, 12, 62), + (0b000001100111, 12, 63), + // Makeup codes + (0b0000001111, 10, 64), + (0b000011001000, 12, 128), + (0b000011001001, 12, 192), + (0b000001011011, 12, 256), + (0b000000110011, 12, 320), + (0b000000110100, 12, 384), + (0b000000110101, 12, 448), + (0b0000001101100, 13, 512), + (0b0000001101101, 13, 576), + (0b0000001001010, 13, 640), + (0b0000001001011, 13, 704), + (0b0000001001100, 13, 768), + (0b0000001001101, 13, 832), + (0b0000001110010, 13, 896), + (0b0000001110011, 13, 960), + (0b0000001110100, 13, 1024), + (0b0000001110101, 13, 1088), + (0b0000001110110, 13, 1152), + (0b0000001110111, 13, 1216), + (0b0000001010010, 13, 1280), + (0b0000001010011, 13, 1344), + (0b0000001010100, 13, 1408), + (0b0000001010101, 13, 1472), + (0b0000001011010, 13, 1536), + (0b0000001011011, 13, 1600), + (0b0000001100100, 13, 1664), + (0b0000001100101, 13, 1728), + // Extended makeup (shared with white) + (0b00000001000, 11, 1792), + (0b00000001100, 11, 1856), + (0b00000001101, 11, 1920), + (0b000000010010, 12, 1984), + (0b000000010011, 12, 2048), + (0b000000010100, 12, 2112), + (0b000000010101, 12, 2176), + (0b000000010110, 12, 2240), + (0b000000010111, 12, 2304), + (0b000000011100, 12, 2368), + (0b000000011101, 12, 2432), + (0b000000011110, 12, 2496), + (0b000000011111, 12, 2560), +]; + +/// 2D mode codes from T.4 standard. +/// Format: (bit_pattern, bit_length, Mode) +#[derive(Copy, Clone, Debug)] +enum Mode { + Pass, + Horizontal, + Vertical(i8), +} + +const MODE_CODES: &[(u16, u8, Mode)] = &[ + (0b0001, 4, Mode::Pass), + (0b001, 3, Mode::Horizontal), + (0b1, 1, Mode::Vertical(0)), + (0b011, 3, Mode::Vertical(1)), + (0b000011, 6, Mode::Vertical(2)), + (0b0000011, 7, Mode::Vertical(3)), + (0b010, 3, Mode::Vertical(-1)), + (0b000010, 6, Mode::Vertical(-2)), + (0b0000010, 7, Mode::Vertical(-3)), +]; + +const WHITE_LUT_BITS: u8 = 12; +const BLACK_LUT_BITS: u8 = 13; +const MODE_LUT_BITS: u8 = 7; + +fn white_lut() -> &'static [LutEntry] { + static LUT: OnceLock> = OnceLock::new(); + LUT.get_or_init(|| build_lut(WHITE_CODES, WHITE_LUT_BITS)) +} + +fn black_lut() -> &'static [LutEntry] { + static LUT: OnceLock> = OnceLock::new(); + LUT.get_or_init(|| build_lut(BLACK_CODES, BLACK_LUT_BITS)) +} + +fn mode_lut() -> &'static [(u8, Mode)] { + static LUT: OnceLock> = OnceLock::new(); + LUT.get_or_init(|| { + let size = 1usize << MODE_LUT_BITS; + let mut lut = vec![(0u8, Mode::Pass); size]; + // Mark all as invalid first (len=0) + for entry in lut.iter_mut() { + entry.0 = 0; + } + for &(pattern, len, mode) in MODE_CODES { + let shift = MODE_LUT_BITS - len; + let base = (pattern as usize) << shift; + for suffix in 0..(1usize << shift) { + lut[base | suffix] = (len, mode); + } + } + lut + }) +} + +fn build_lut(codes: &[(u16, u8, u16)], lut_bits: u8) -> Vec { + let size = 1usize << lut_bits; + let mut lut = vec![INVALID_ENTRY; size]; + for &(pattern, len, value) in codes { + if len > lut_bits { + continue; + } + let shift = lut_bits - len; + let base = (pattern as usize) << shift; + for suffix in 0..(1usize << shift) { + lut[base | suffix] = (value, len); + } + } + lut +} + +// Huffman Decoders + +#[derive(Copy, Clone, PartialEq)] +enum Color { + White, + Black, +} + +impl Color { + fn flip(self) -> Self { + match self { + Color::White => Color::Black, + Color::Black => Color::White, + } + } +} + +/// Decode a single run-length code (terminating or makeup). +fn decode_run(reader: &mut BitReader, color: Color) -> Option { + match color { + Color::White => { + let bits = reader.peek(WHITE_LUT_BITS)?; + let (val, len) = white_lut()[bits as usize]; + if val == 0xFFFF { + return None; + } + reader.consume(len); + Some(val) + } + Color::Black => { + let bits = reader.peek(BLACK_LUT_BITS)?; + let (val, len) = black_lut()[bits as usize]; + if val == 0xFFFF { + return None; + } + reader.consume(len); + Some(val) + } + } +} + +/// Decode a full run: sum makeup codes until a terminating code (< 64). +fn decode_full_run(reader: &mut BitReader, color: Color) -> Option { + let mut total = 0u16; + loop { + let n = decode_run(reader, color)?; + total += n; + if n < 64 { + return Some(total); + } + } +} + +/// Decode a 2D mode code. +fn decode_mode(reader: &mut BitReader) -> Option { + let bits = reader.peek(MODE_LUT_BITS)?; + let (len, mode) = mode_lut()[bits as usize]; + if len == 0 { + return None; + } + reader.consume(len); + Some(mode) +} + +// Reference Line Helpers (for 2D decoding) + +/// Find b1: the next transition on the reference line of the opposite color, +/// at or after position `a0`. +fn find_b1(reference: &[u16], a0: u16, current_color: Color, width: u16) -> u16 { + // Reference transitions alternate white->black (index 0), black->white (index 1), ... + // We need the first transition in reference that is > a0 and corresponds to the opposite color. + // Color at position 0 is White. Transition at index i flips to: + // i even -> Black (end of white run) + // i odd -> White (end of black run) + // The color AFTER transition[i] is: even=Black, odd=White + // We want opposite of current_color. + // b1 is the first changing element on the reference line to the right of a0 + // whose color is opposite to the current color on the coding line. + + let want_black_transition = current_color == Color::White; + // If want_black_transition, we want an even-indexed transition (white->black) + // If want_white_transition, we want an odd-indexed transition (black->white) + + for (i, &t) in reference.iter().enumerate() { + if t <= a0 { + continue; + } + let is_even = i % 2 == 0; + if is_even == want_black_transition { + return t; + } + } + width +} + +/// Find b2: the next transition after b1 on the reference line. +fn find_b2(reference: &[u16], b1: u16, width: u16) -> u16 { + for &t in reference { + if t > b1 { + return t; + } + } + width +} + +// 1D Line Decoder (Modified Huffman) + +fn decode_line_1d(reader: &mut BitReader, width: u16) -> Option> { + let mut transitions = Vec::new(); + let mut a0 = 0u16; + let mut color = Color::White; + + while a0 < width { + let run = decode_full_run(reader, color)?; + a0 += run; + if a0 < width { + transitions.push(a0); + } + color = color.flip(); + } + Some(transitions) +} + +// 2D Line Decoder (Modified READ) + +fn decode_line_2d(reader: &mut BitReader, reference: &[u16], width: u16) -> Option> { + let mut transitions = Vec::new(); + let mut a0 = 0u16; + let mut color = Color::White; + + loop { + if a0 >= width { + break; + } + + let mode = decode_mode(reader)?; + match mode { + Mode::Pass => { + let b1 = find_b1(reference, a0, color, width); + let b2 = find_b2(reference, b1, width); + a0 = b2; + // Color doesn't change after pass + } + Mode::Vertical(delta) => { + let b1 = find_b1(reference, a0, color, width); + let a1 = (b1 as i32 + delta as i32).max(0) as u16; + if a1 >= width { + // Line ends + break; + } + transitions.push(a1); + a0 = a1; + color = color.flip(); + } + Mode::Horizontal => { + let run1 = decode_full_run(reader, color)?; + let run2 = decode_full_run(reader, color.flip())?; + let a1 = a0 + run1; + let a2 = a1 + run2; + transitions.push(a1); + if a2 >= width { + break; + } + transitions.push(a2); + a0 = a2; + // Color returns to original after horizontal + } + } + } + + Some(transitions) +} + +// Group 3 Image Driver + +fn decode_group3(data: &[u8], width: u32, height: u32, t4_options: u32) -> Result>> { + let w = width as u16; + let is_2d = (t4_options & 1) != 0; + let has_fill_bits = (t4_options & 4) != 0; + let mut reader = BitReader::new(data); + let mut lines: Vec> = Vec::with_capacity(height as usize); + let mut reference: Vec = Vec::new(); + + // Scan for the first EOL + if !reader.scan_for_eol() { + bail!("No EOL found at start of Group 3 data"); + } + + for _ in 0..height { + // After EOL, if 2D, read the tag bit: 1=1D, 0=2D + let use_2d = if is_2d { + match reader.peek(1) { + Some(tag) => { + reader.consume(1); + tag == 0 + } + None => break, + } + } else { + false + }; + + let line = if use_2d { + match decode_line_2d(&mut reader, &reference, w) { + Some(l) => l, + None => break, + } + } else { + match decode_line_1d(&mut reader, w) { + Some(l) => l, + None => break, + } + }; + + reference = line.clone(); + lines.push(line); + + // Skip fill bits (zero-pad to byte boundary before EOL) if enabled + if has_fill_bits { + // Consume zeros until we see the EOL pattern + loop { + match reader.peek(12) { + Some(0x001) => break, // Found EOL (000000000001) + Some(v) if (v >> 11) == 0 => reader.consume(1), // Leading zero + _ => break, + } + } + } + + // Try to read EOL + match reader.peek(12) { + Some(0x001) => { + reader.consume(12); + let mut consecutive_eols = 1u32; + + // Check for RTC (Return To Control): 6 consecutive EOLs + // In 2D mode each EOL has a tag bit, so check EOL+tag sequences + loop { + if is_2d { + // Peek EOL (12 bits) + tag (1 bit) = 13 bits + match reader.peek(13) { + Some(v) if (v >> 1) == 0x001 => { + reader.consume(13); + consecutive_eols += 1; + if consecutive_eols >= 6 { + return Ok(lines); + } + } + _ => break, + } + } else { + match reader.peek(12) { + Some(0x001) => { + reader.consume(12); + consecutive_eols += 1; + if consecutive_eols >= 6 { + return Ok(lines); + } + } + _ => break, + } + } + } + } + _ => { + // No EOL found — might be end of data + } + } + } + + if lines.is_empty() { + bail!("Group 3 decoder produced no lines"); + } + Ok(lines) +} + +// Group 4 Image Driver + +fn decode_group4(data: &[u8], width: u32, height: u32) -> Result>> { + let w = width as u16; + let mut reader = BitReader::new(data); + let mut lines: Vec> = Vec::with_capacity(height as usize); + let mut reference: Vec = Vec::new(); + + for _ in 0..height { + // Check for EOFB (End Of Facsimile Block): two consecutive EOL codes + if let Some(v) = reader.peek(12) { + if v == 0x001 { + // Possible EOFB — check for second EOL + break; + } + } + + let line = match decode_line_2d(&mut reader, &reference, w) { + Some(l) => l, + None => break, + }; + + reference = line.clone(); + lines.push(line); + } + + if lines.is_empty() { + bail!("Group 4 decoder produced no lines"); + } + Ok(lines) +} + +// Pixel Assembly + +/// Scale image to correct for non-square pixel aspect ratios. +/// +/// Fax standard resolution uses 204×98 DPI (non-square pixels). Without correction, +/// the image appears vertically compressed (stretched when rendered at 1:1). +fn correct_aspect_ratio( + img: GrayImage, + x_res: Option<(u32, u32)>, + y_res: Option<(u32, u32)>, +) -> GrayImage { + let (x_num, x_den) = match x_res { + Some(r) => r, + None => return img, + }; + let (y_num, y_den) = match y_res { + Some(r) => r, + None => return img, + }; + + let x_dpi = x_num as f64 / x_den as f64; + let y_dpi = y_num as f64 / y_den as f64; + let ratio = x_dpi / y_dpi; + + // Skip scaling if pixels are approximately square (within 5%) + if ratio > 0.95 && ratio < 1.05 { + return img; + } + + let (w, h) = img.dimensions(); + if ratio > 1.0 { + // X resolution higher than Y — scale height up to match + let new_height = (h as f64 * ratio).round() as u32; + debug!( + "Correcting fax aspect ratio: {:.0}×{:.0} DPI, scaling {}×{} → {}×{}", + x_dpi, y_dpi, w, h, w, new_height + ); + image::imageops::resize(&img, w, new_height, image::imageops::FilterType::Lanczos3) + } else { + // Y resolution higher than X — scale width up to match + let new_width = (w as f64 / ratio).round() as u32; + debug!( + "Correcting fax aspect ratio: {:.0}×{:.0} DPI, scaling {}×{} → {}×{}", + x_dpi, y_dpi, w, h, new_width, h + ); + image::imageops::resize(&img, new_width, h, image::imageops::FilterType::Lanczos3) + } +} + +fn assemble_image(lines: &[Vec], width: u32, height: u32, photometric: u32) -> GrayImage { + // photometric 0 = WhiteIsZero (normal for fax: 0=white, 1=black) + // photometric 1 = BlackIsZero + let invert = photometric == 1; + + let actual_height = lines.len().min(height as usize); + let white = if invert { 0u8 } else { 255u8 }; + let mut img = GrayImage::from_pixel(width, actual_height as u32, image::Luma([white])); + let w = width as usize; + + for (y, transitions) in lines.iter().enumerate().take(actual_height) { + let row_start = y * w; + let row = &mut img.as_mut()[row_start..row_start + w]; + let mut color = white; + let mut x = 0usize; + for &t in transitions { + let t = (t as usize).min(w); + if t > x { + row[x..t].fill(color); + x = t; + } + color = if color == 255 { 0 } else { 255 }; + } + if x < w { + row[x..].fill(color); + } + } + + img +} + +#[cfg(test)] +mod tests { + use super::*; + + /// Test against the example fax TIFF bundled in src/fax/. + /// This is a real SpanDSP-produced TIFF: compression=3, FillOrder=2, T4Options=5 (2D + fill bits). + #[test] + fn test_decode_example_tiff() { + let path = Path::new(env!("CARGO_MANIFEST_DIR")).join("src/fax/example.tiff"); + let images = decode_fax_tiff(&path).expect("Failed to decode example.tiff"); + + assert_eq!(images.len(), 1, "Expected 1 page"); + let img = &images[0]; + + // Width stays at standard fax width; height may be scaled by aspect ratio correction + assert_eq!(img.width(), 1728, "Standard fax width"); + // Original pixel height is 2199. If resolution tags indicate non-square pixels + // (e.g., 204×98 DPI), the image will be scaled up vertically. + assert!( + img.height() >= 2199, + "Height should be >= original 2199 (may be scaled for aspect ratio), got {}", + img.height() + ); + + // Spot-check: top-left area should be mostly white (header region) + let white_count: usize = (0..100) + .flat_map(|y| (0..100).map(move |x| (x, y))) + .filter(|&(x, y)| img.get_pixel(x, y).0[0] == 255) + .count(); + assert!( + white_count > 9000, + "Top-left 100x100 should be mostly white, got {} white pixels", + white_count + ); + + // There should be some black pixels (the fax has content) + let total_black: usize = img.pixels().filter(|p| p.0[0] == 0).count(); + assert!( + total_black > 1000, + "Image should contain black pixels (fax content), got {}", + total_black + ); + } + + /// Verify that resolution tags are parsed from example.tiff. + #[test] + fn test_parse_resolution_tags() { + let path = Path::new(env!("CARGO_MANIFEST_DIR")).join("src/fax/example.tiff"); + let data = std::fs::read(&path).expect("Failed to read example.tiff"); + let pages = parse_tiff_ifds(&data).expect("Failed to parse IFDs"); + + assert_eq!(pages.len(), 1); + let page = &pages[0]; + + // SpanDSP writes resolution tags for fax TIFFs + assert!( + page.x_resolution.is_some(), + "Expected XResolution tag in example.tiff" + ); + assert!( + page.y_resolution.is_some(), + "Expected YResolution tag in example.tiff" + ); + + let (x_num, x_den) = page.x_resolution.unwrap(); + let (y_num, y_den) = page.y_resolution.unwrap(); + let x_dpi = x_num as f64 / x_den as f64; + let y_dpi = y_num as f64 / y_den as f64; + + // Standard fax resolutions: 204 or 200 DPI horizontal, 98 or 196 or 200 DPI vertical + assert!( + x_dpi > 100.0 && x_dpi < 300.0, + "XResolution {:.1} DPI out of expected range", + x_dpi + ); + assert!( + y_dpi > 50.0 && y_dpi < 400.0, + "YResolution {:.1} DPI out of expected range", + y_dpi + ); + } + + // BIT_REVERSE_LUT tests + + #[test] + fn bit_reverse_lut_spot_checks() { + assert_eq!(BIT_REVERSE_LUT[0x00], 0x00); + assert_eq!(BIT_REVERSE_LUT[0xFF], 0xFF); + assert_eq!(BIT_REVERSE_LUT[0x80], 0x01); + assert_eq!(BIT_REVERSE_LUT[0x01], 0x80); + assert_eq!(BIT_REVERSE_LUT[0xAA], 0x55); + assert_eq!(BIT_REVERSE_LUT[0x55], 0xAA); + } + + #[test] + fn bit_reverse_lut_double_reverse_is_identity() { + for i in 0..=255u8 { + assert_eq!( + BIT_REVERSE_LUT[BIT_REVERSE_LUT[i as usize] as usize], i, + "Double reverse should be identity for {}", + i + ); + } + } + + // BitReader tests + + #[test] + fn bit_reader_peek_and_consume() { + // 0xA5 = 10100101 + let data = [0xA5]; + let mut reader = BitReader::new(&data); + + // Peek first 4 bits: 1010 = 10 + assert_eq!(reader.peek(4), Some(0b1010)); + + // Consume 4, then peek next 4: 0101 = 5 + reader.consume(4); + assert_eq!(reader.peek(4), Some(0b0101)); + } + + #[test] + fn bit_reader_peek_more_than_available() { + let data = [0xFF]; // 8 bits + let reader = BitReader::new(&data); + // 8 bits available, asking for 9 should fail + assert!(reader.peek(9).is_none()); + } + + #[test] + fn bit_reader_scan_for_eol_found() { + // EOL = 000000000001 (11 zeros + 1) + // Byte-aligned: 0x00 0x01 = 00000000 00000001 + // That's 15 zeros then 1 — contains the 11+1 EOL pattern + let data = [0x00, 0x01]; + let mut reader = BitReader::new(&data); + assert!(reader.scan_for_eol()); + } + + #[test] + fn bit_reader_scan_for_eol_not_found() { + // All ones — no EOL pattern + let data = [0xFF, 0xFF]; + let mut reader = BitReader::new(&data); + assert!(!reader.scan_for_eol()); + } + + #[test] + fn bit_reader_scan_for_eol_empty() { + let data: [u8; 0] = []; + let mut reader = BitReader::new(&data); + assert!(!reader.scan_for_eol()); + } + + // assemble_image tests + + #[test] + fn assemble_image_white_is_zero_transitions() { + // photometric=0 (WhiteIsZero): white=255, black=0 + // Width 300, transitions at [100, 200]: white 0-99, black 100-199, white 200-299 + let lines = vec![vec![100u16, 200u16]]; + let img = assemble_image(&lines, 300, 1, 0); + + assert_eq!(img.width(), 300); + assert_eq!(img.height(), 1); + + // White region: 0-99 + assert_eq!(img.get_pixel(0, 0).0[0], 255); + assert_eq!(img.get_pixel(99, 0).0[0], 255); + // Black region: 100-199 + assert_eq!(img.get_pixel(100, 0).0[0], 0); + assert_eq!(img.get_pixel(199, 0).0[0], 0); + // White region: 200-299 + assert_eq!(img.get_pixel(200, 0).0[0], 255); + assert_eq!(img.get_pixel(299, 0).0[0], 255); + } + + #[test] + fn assemble_image_black_is_zero_inverted() { + // photometric=1 (BlackIsZero): white=0, black=255 + // Same transitions — colors should be inverted + let lines = vec![vec![100u16, 200u16]]; + let img = assemble_image(&lines, 300, 1, 1); + + // "White" region (value 0): 0-99 + assert_eq!(img.get_pixel(0, 0).0[0], 0); + assert_eq!(img.get_pixel(99, 0).0[0], 0); + // "Black" region (value 255): 100-199 + assert_eq!(img.get_pixel(100, 0).0[0], 255); + assert_eq!(img.get_pixel(199, 0).0[0], 255); + // "White" region (value 0): 200-299 + assert_eq!(img.get_pixel(200, 0).0[0], 0); + assert_eq!(img.get_pixel(299, 0).0[0], 0); + } + + #[test] + fn assemble_image_empty_transitions_all_white() { + // No transitions = entire row is white + let lines = vec![vec![]]; + let img = assemble_image(&lines, 100, 1, 0); + + for x in 0..100 { + assert_eq!(img.get_pixel(x, 0).0[0], 255, "Pixel {} should be white", x); + } + } + + // decode_fax_tiff error cases + + #[test] + fn decode_fax_tiff_missing_file() { + let path = Path::new("/tmp/nonexistent_fax_test_file_12345.tiff"); + let result = decode_fax_tiff(path); + assert!(result.is_err()); + let err = result.unwrap_err().to_string(); + assert!( + err.contains("not found"), + "Error should mention file not found: {}", + err + ); + } + + #[test] + fn test_assemble_image_uses_actual_height_not_declared() { + // Simulate a short fax: declared height is 100, but only 30 lines decoded + let width = 10u32; + let declared_height = 100u32; + let lines: Vec> = (0..30) + .map(|_| vec![5, 10]) // each line: 5 white pixels, 5 black pixels + .collect(); + + let img = assemble_image(&lines, width, declared_height, 0); + + // Image height should be the actual line count, not the declared height + assert_eq!( + img.height(), + 30, + "Image height should match actual decoded lines (30), not declared height (100)" + ); + assert_eq!(img.width(), width); + } + + #[test] + fn test_assemble_image_full_page_unchanged() { + // When lines.len() == declared height, nothing changes + let width = 10u32; + let height = 50u32; + let lines: Vec> = (0..50) + .map(|_| vec![5, 10]) + .collect(); + + let img = assemble_image(&lines, width, height, 0); + assert_eq!(img.height(), 50); + assert_eq!(img.width(), width); + } +} diff --git a/sipcord-bridge/src/lib.rs b/sipcord-bridge/src/lib.rs new file mode 100644 index 0000000..bded370 --- /dev/null +++ b/sipcord-bridge/src/lib.rs @@ -0,0 +1,18 @@ +//! Sipcord Bridge - SIP to Discord Voice Bridge +//! +//! A generic SIP-to-Discord voice bridge library. Provides all the core +//! functionality for bridging SIP phone calls to Discord voice channels, +//! including fax (G.711 and T.38) support. +//! +//! Backends implement the `routing::Backend` trait to control call routing +//! and authentication. A built-in `StaticBackend` (TOML dialplan) is included. + +#![feature(portable_simd)] + +pub mod audio; +pub mod call; +pub mod config; +pub mod fax; +pub mod routing; +pub mod services; +pub mod transport; diff --git a/sipcord-bridge/src/main.rs b/sipcord-bridge/src/main.rs new file mode 100644 index 0000000..bc3edd1 --- /dev/null +++ b/sipcord-bridge/src/main.rs @@ -0,0 +1,126 @@ +//! Sipcord Bridge - Static Router Binary +//! +//! Standalone SIP-to-Discord voice bridge using a TOML dialplan. + +#![feature(portable_simd)] + +use std::path::PathBuf; +use std::sync::Arc; + +use anyhow::{Context, Result}; +use tracing::{error, info}; +use tracing_subscriber::{layer::SubscriberExt, util::SubscriberInitExt}; + +use sipcord_bridge::call::BridgeCoordinator; +use sipcord_bridge::config::{AppConfig, EnvConfig, SipConfig, APP_CONFIG}; +use sipcord_bridge::routing::static_router::StaticBackend; +use sipcord_bridge::transport::discord::SharedDiscordClient; +use sipcord_bridge::transport::sip::SipTransport; + +#[tokio::main] +async fn main() -> Result<()> { + rustls::crypto::ring::default_provider() + .install_default() + .expect("Failed to install rustls crypto provider"); + + tracing_subscriber::registry() + .with( + tracing_subscriber::EnvFilter::try_from_default_env() + .unwrap_or_else(|_| "sipcord_bridge=info,pjsip=warn".into()), + ) + .with(tracing_subscriber::fmt::layer()) + .init(); + + info!("Starting Sipcord Bridge v{}", env!("CARGO_PKG_VERSION")); + + EnvConfig::init()?; + + let config_path = PathBuf::from(&EnvConfig::global().config_path); + let app_config = AppConfig::load(&config_path)?; + APP_CONFIG + .set(app_config) + .expect("AppConfig already initialized"); + info!("Loaded config from {}", config_path.display()); + + run_static_router().await +} + +async fn run_static_router() -> Result<()> { + let bot_token = EnvConfig::global() + .discord_bot_token + .clone() + .context("DISCORD_BOT_TOKEN required")?; + let sip_config = SipConfig::from_env()?; + + // Load dialplan + let dialplan_path = PathBuf::from(&EnvConfig::global().dialplan_path); + let backend = Arc::new(StaticBackend::load(&dialplan_path, bot_token.clone())?); + + // Create SIP transport (no TLS for static router) + let sip_transport = SipTransport::new(sip_config.clone(), None); + let sip_event_tx = sip_transport.event_sender(); + + // Create channel for outbound call events (SIP callbacks still emit these) + let (outbound_event_tx, mut outbound_event_rx) = tokio::sync::mpsc::channel(100); + sipcord_bridge::transport::sip::set_outbound_event_sender(outbound_event_tx); + + // Forward outbound call events to the main SIP event channel + let sip_event_tx_for_outbound = sip_event_tx.clone(); + tokio::spawn(async move { + while let Some(event) = outbound_event_rx.recv().await { + let _ = sip_event_tx_for_outbound.send(event); + } + }); + + // Create shared Discord client + let shared_discord = SharedDiscordClient::new(&bot_token) + .await + .expect("Failed to create shared Discord client"); + info!("Shared Discord client initialized"); + + let bridge = BridgeCoordinator::new( + backend, + sip_transport.commands(), + sip_transport.events(), + shared_discord, + ); + + info!("Starting components..."); + + let mut sip_handle = tokio::spawn(async move { + if let Err(e) = sip_transport.run().await { + error!("SIP server error: {}", e); + } + }); + + let mut bridge_handle = tokio::spawn(async move { + if let Err(e) = bridge.run().await { + error!("Bridge coordinator error: {}", e); + } + }); + + info!( + "Static router running on {}:{}", + sip_config.public_host, sip_config.port + ); + + tokio::select! { + _ = tokio::signal::ctrl_c() => info!("Shutdown signal received"), + sip_res = &mut sip_handle => { if let Err(e) = sip_res { error!("SIP task failed: {}", e); } }, + bridge_res = &mut bridge_handle => { if let Err(e) = bridge_res { error!("Bridge task failed: {}", e); } }, + } + + info!("Shutting down..."); + + std::thread::spawn(|| { + std::thread::sleep(std::time::Duration::from_secs(2)); + std::process::exit(0); + }); + + sip_handle.abort(); + bridge_handle.abort(); + sipcord_bridge::transport::sip::shutdown_pjsua(); + + info!("Shutdown complete"); + Ok(()) +} diff --git a/sipcord-bridge/src/routing/mod.rs b/sipcord-bridge/src/routing/mod.rs new file mode 100644 index 0000000..3e4f708 --- /dev/null +++ b/sipcord-bridge/src/routing/mod.rs @@ -0,0 +1,101 @@ +pub mod static_router; + +use crate::services::snowflake::Snowflake; +use crate::transport::sip::DigestAuthParams; +use async_trait::async_trait; + +/// Outbound call request from the backend (e.g., Discord /call command) +#[derive(Debug, Clone)] +pub struct OutboundCallRequest { + pub call_id: String, + pub discord_username: String, + pub guild_id: String, + pub channel_id: String, + pub bot_token: String, + pub caller_username: String, + pub created_at: std::time::Instant, +} + +/// Result of routing an incoming SIP call +pub enum RouteDecision { + /// Connect to this Discord voice channel + Connect { + channel_id: Snowflake, + guild_id: Snowflake, + user_id: String, + bot_token: String, + }, + /// Handle as incoming fax — post to a Discord text channel + ConnectFax { + text_channel_id: Snowflake, + guild_id: Snowflake, + user_id: String, + bot_token: String, + }, + /// Redirect to another bridge server + Redirect { domain: String, extension: String }, + /// Reject with invalid credentials (no error sound, just hangup) + RejectInvalidCredentials, + /// Play an error sound and hangup + RejectWithError { error: CallError }, +} + +/// Errors that trigger audio playback before hangup +#[derive(Debug, Clone, Copy)] +pub enum CallError { + NoChannelMapping, + NoPermissions, + DiscordApiError, + ServerBusy, + Unknown, +} + +impl CallError { + /// Get the sound name for this error type + pub fn sound_name(&self) -> &'static str { + match self { + CallError::NoChannelMapping => "no_channel_mapping", + CallError::NoPermissions => "no_permissions", + CallError::DiscordApiError => "server_is_busy", + CallError::ServerBusy => "server_is_busy", + CallError::Unknown => "unknown_error", + } + } +} + +/// Info about a call that just started (for backend tracking) +pub struct CallStartedInfo { + pub sip_call_id: String, + pub user_id: String, + pub guild_id: String, + pub channel_id: String, + pub extension: String, +} + +/// The routing backend — tells the bridge who to connect and when. +/// +/// This is the open-source boundary: the core bridge knows how to connect +/// SIP <-> Discord audio. The Backend tells it *who* to connect and *when*. +#[async_trait] +pub trait Backend: Send + Sync { + /// Get the Discord bot token + fn bot_token(&self) -> &str; + + /// Route an incoming SIP call (authenticate + get destination) + async fn route_call(&self, digest_auth: &DigestAuthParams, extension: &str) -> RouteDecision; + + /// Notify that a call has started + async fn on_call_started(&self, info: &CallStartedInfo); + + /// Notify that a call has ended + async fn on_call_ended(&self, sip_call_id: &str); + + /// Send heartbeat for active channels + async fn heartbeat(&self, active_channel_ids: &[String]); + + /// Report outbound call status back to the backend + fn report_call_status(&self, call_id: &str, status: &str); + + /// Get the next outbound call request (None if backend doesn't support outbound) + async fn next_outbound_request(&self) -> Option; +} diff --git a/sipcord-bridge/src/routing/static_router.rs b/sipcord-bridge/src/routing/static_router.rs new file mode 100644 index 0000000..44afcc5 --- /dev/null +++ b/sipcord-bridge/src/routing/static_router.rs @@ -0,0 +1,208 @@ +//! Static dialplan router — routes calls based on a TOML file. +//! +//! This is the open-source-friendly backend that doesn't require the SIPcord API. +//! It reads a `dialplan.toml` file mapping extensions to Discord voice channels. +//! +//! Required env var: `DISCORD_BOT_TOKEN` +//! +//! Example `dialplan.toml`: +//! ```toml +//! [extensions] +//! 1000 = { guild = 123456789012345678, channel = 987654321012345678 } +//! 2000 = { guild = 123456789012345678, channel = 111222333444555666 } +//! ``` + +use std::collections::HashMap; +use std::path::Path; + +use async_trait::async_trait; +use serde::Deserialize; +use tracing::info; + +use crate::routing::{Backend, CallError, CallStartedInfo, OutboundCallRequest, RouteDecision}; +use crate::services::snowflake::Snowflake; +use crate::transport::sip::DigestAuthParams; + +#[derive(Deserialize, Clone)] +struct ExtensionTarget { + guild: Snowflake, + channel: Snowflake, +} + +#[derive(Deserialize)] +struct Dialplan { + extensions: HashMap, +} + +/// Static file-based routing backend. +/// +/// Routes calls by looking up the dialed extension in a TOML dialplan file. +/// No authentication is performed — any caller dialing a known extension is connected. +/// Outbound calls are not supported. +pub struct StaticBackend { + bot_token: String, + extensions: HashMap, +} + +impl StaticBackend { + /// Load the dialplan from a TOML file. `bot_token` comes from the environment. + pub fn load(path: &Path, bot_token: String) -> anyhow::Result { + let content = std::fs::read_to_string(path) + .map_err(|e| anyhow::anyhow!("Failed to read {}: {}", path.display(), e))?; + let dialplan: Dialplan = toml::from_str(&content) + .map_err(|e| anyhow::anyhow!("Failed to parse {}: {}", path.display(), e))?; + + info!( + "Loaded dialplan from {} ({} extensions)", + path.display(), + dialplan.extensions.len(), + ); + for (ext, target) in &dialplan.extensions { + info!( + " ext {} -> guild {} channel {}", + ext, target.guild, target.channel + ); + } + + Ok(Self { + bot_token, + extensions: dialplan.extensions, + }) + } +} + +#[async_trait] +impl Backend for StaticBackend { + fn bot_token(&self) -> &str { + &self.bot_token + } + + async fn route_call(&self, _digest_auth: &DigestAuthParams, extension: &str) -> RouteDecision { + match self.extensions.get(extension) { + Some(target) => RouteDecision::Connect { + channel_id: target.channel, + guild_id: target.guild, + user_id: "static".to_string(), + bot_token: self.bot_token.clone(), + }, + None => { + tracing::warn!("Extension {} not found in dialplan", extension); + RouteDecision::RejectWithError { + error: CallError::NoChannelMapping, + } + } + } + } + + async fn on_call_started(&self, info: &CallStartedInfo) { + info!( + "Call started: {} -> channel {} (ext {})", + info.sip_call_id, info.channel_id, info.extension + ); + } + + async fn on_call_ended(&self, sip_call_id: &str) { + info!("Call ended: {}", sip_call_id); + } + + async fn heartbeat(&self, _active_channel_ids: &[String]) {} + + fn report_call_status(&self, _call_id: &str, _status: &str) {} + + async fn next_outbound_request(&self) -> Option { + // Static router doesn't support outbound calls — block forever + std::future::pending().await + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_load_valid_dialplan() { + let toml_content = r#" +[extensions] +1000 = { guild = 123456789012345678, channel = 987654321012345678 } +2000 = { guild = 123456789012345678, channel = 111222333444555666 } +"#; + let dir = std::env::temp_dir().join("sipcord_test_dialplan"); + std::fs::create_dir_all(&dir).ok(); + let path = dir.join("test_dialplan.toml"); + std::fs::write(&path, toml_content).unwrap(); + + let backend = StaticBackend::load(&path, "test_token".to_string()).unwrap(); + assert_eq!(backend.extensions.len(), 2); + assert!(backend.extensions.contains_key("1000")); + assert!(backend.extensions.contains_key("2000")); + } + + #[test] + fn test_route_known_extension() { + let toml_content = r#" +[extensions] +1000 = { guild = 111, channel = 222 } +"#; + let dir = std::env::temp_dir().join("sipcord_test_dialplan"); + std::fs::create_dir_all(&dir).ok(); + let path = dir.join("test_route.toml"); + std::fs::write(&path, toml_content).unwrap(); + + let backend = StaticBackend::load(&path, "tok".to_string()).unwrap(); + + let rt = tokio::runtime::Builder::new_current_thread() + .build() + .unwrap(); + rt.block_on(async { + let decision = backend + .route_call(&DigestAuthParams::default(), "1000") + .await; + match decision { + RouteDecision::Connect { channel_id, .. } => { + assert_eq!(channel_id, Snowflake::new(222)); + } + _ => panic!("Expected Connect"), + } + }); + } + + #[test] + fn test_route_unknown_extension() { + let toml_content = r#" +[extensions] +1000 = { guild = 111, channel = 222 } +"#; + let dir = std::env::temp_dir().join("sipcord_test_dialplan"); + std::fs::create_dir_all(&dir).ok(); + let path = dir.join("test_route_unknown.toml"); + std::fs::write(&path, toml_content).unwrap(); + + let backend = StaticBackend::load(&path, "tok".to_string()).unwrap(); + + let rt = tokio::runtime::Builder::new_current_thread() + .build() + .unwrap(); + rt.block_on(async { + let decision = backend + .route_call(&DigestAuthParams::default(), "9999") + .await; + match decision { + RouteDecision::RejectWithError { error } => { + assert!(matches!(error, CallError::NoChannelMapping)); + } + _ => panic!("Expected RejectWithError"), + } + }); + } + + #[test] + fn test_load_malformed_toml() { + let dir = std::env::temp_dir().join("sipcord_test_dialplan"); + std::fs::create_dir_all(&dir).ok(); + let path = dir.join("test_bad.toml"); + std::fs::write(&path, "this is not valid toml [[[").unwrap(); + + let result = StaticBackend::load(&path, "tok".to_string()); + assert!(result.is_err()); + } +} diff --git a/sipcord-bridge/src/services/auth_cache.rs b/sipcord-bridge/src/services/auth_cache.rs new file mode 100644 index 0000000..e22fc8c --- /dev/null +++ b/sipcord-bridge/src/services/auth_cache.rs @@ -0,0 +1,274 @@ +//! SIP credential cache for local digest auth verification +//! +//! Caches HA1 hashes returned by the API so that repeat REGISTER requests +//! can be verified locally without an API round-trip. On cache miss or +//! verification failure, falls through to the API. +//! +//! Also tracks consecutive auth failures per username to rate-limit +//! users with bad credentials (429 cooldown after N failures). + +use md5::{Digest, Md5}; +use moka::sync::Cache; +use std::sync::{Arc, OnceLock}; +use std::time::Duration; + +use crate::transport::sip::DigestAuthParams; + +/// Global auth cache instance accessible from C callbacks +static AUTH_CACHE: OnceLock> = OnceLock::new(); + +/// Result of checking digest auth against the cache +pub enum VerifyResult { + /// Cache hit and credentials verified successfully + Verified, + /// Cache had an entry but credentials didn't match (wrong password or stale cache) + Mismatch, + /// No cache entry for this username + Miss, +} + +/// Data returned from a successful REGISTER authentication +#[derive(Clone, Debug)] +pub struct RegisterData { + pub sip_username: String, + /// None if user has allow_inbound_calls disabled + pub discord_username: Option, + /// Pre-computed HA1 hash for caching + pub ha1: Option, +} + +/// Cached credential entry for a SIP user +#[derive(Clone, Debug)] +pub struct CachedAuth { + /// Pre-computed MD5(username:sipcord:password) + pub ha1: String, + /// Cached registration data + pub register_data: RegisterData, +} + +/// In-memory credential cache with TTL +pub struct AuthCache { + cache: Cache, + /// Consecutive auth failure count per username (TTL = cooldown period) + failures: Cache, + /// Number of failures before cooldown kicks in + max_failures: u32, +} + +impl AuthCache { + /// Create a new cache with the given TTL for entries + pub fn new(ttl: Duration, failure_cooldown: Duration, max_failures: u32) -> Self { + Self { + cache: Cache::builder() + .time_to_live(ttl) + .max_capacity(10_000) + .build(), + failures: Cache::builder() + .time_to_live(failure_cooldown) + .max_capacity(10_000) + .build(), + max_failures, + } + } + + /// Set this cache as the global instance + pub fn set_global(cache: Arc) { + let _ = AUTH_CACHE.set(cache); + } + + /// Get the global auth cache instance + pub fn global() -> Option<&'static Arc> { + AUTH_CACHE.get() + } + + /// Record a failed auth attempt, returns the new failure count + pub fn record_failure(&self, username: &str) -> u32 { + let count = self.failures.get(username).unwrap_or(0) + 1; + self.failures.insert(username.to_string(), count); + count + } + + /// Clear failure count on successful auth + pub fn clear_failures(&self, username: &str) { + self.failures.invalidate(username); + } + + /// Check if a username is in auth cooldown (too many failures) + pub fn is_in_cooldown(&self, username: &str) -> bool { + self.failures.get(username).unwrap_or(0) >= self.max_failures + } + + /// Try to verify digest auth locally using cached HA1. + /// Returns Some(cached_data) on success, None on miss or mismatch. + pub fn verify(&self, digest: &DigestAuthParams) -> Option { + let cached = self.cache.get(&digest.username)?; + + if verify_digest_with_ha1(&cached.ha1, digest) { + Some(cached) + } else { + // Mismatch - password may have changed, evict stale entry + self.cache.invalidate(&digest.username); + None + } + } + + /// Check digest auth against the cache, distinguishing miss from mismatch. + pub fn check(&self, digest: &DigestAuthParams) -> VerifyResult { + match self.cache.get(&digest.username) { + Some(cached) => { + if verify_digest_with_ha1(&cached.ha1, digest) { + VerifyResult::Verified + } else { + self.cache.invalidate(&digest.username); + VerifyResult::Mismatch + } + } + None => VerifyResult::Miss, + } + } + + /// Store a successful auth result in the cache + pub fn insert(&self, username: &str, ha1: &str, register_data: RegisterData) { + self.cache.insert( + username.to_string(), + CachedAuth { + ha1: ha1.to_string(), + register_data, + }, + ); + } +} + +/// Compute MD5 hex digest of a string +fn md5_hex(input: &str) -> String { + let mut hasher = Md5::new(); + hasher.update(input.as_bytes()); + format!("{:x}", hasher.finalize()) +} + +/// Verify SIP digest auth using a pre-computed HA1 hash +fn verify_digest_with_ha1(ha1: &str, params: &DigestAuthParams) -> bool { + let ha2 = md5_hex(&format!("{}:{}", params.method, params.uri)); + + let expected = match (¶ms.qop, ¶ms.nc, ¶ms.cnonce) { + (Some(qop), Some(nc), Some(cnonce)) if qop == "auth" => md5_hex(&format!( + "{}:{}:{}:{}:{}:{}", + ha1, params.nonce, nc, cnonce, qop, ha2 + )), + _ => md5_hex(&format!("{}:{}:{}", ha1, params.nonce, ha2)), + }; + + params.response.eq_ignore_ascii_case(&expected) +} + +#[cfg(test)] +mod tests { + use super::*; + use std::time::Duration; + + + #[test] + fn test_md5_hex_empty() { + assert_eq!(md5_hex(""), "d41d8cd98f00b204e9800998ecf8427e"); + } + + #[test] + fn test_md5_hex_hello() { + assert_eq!(md5_hex("hello"), "5d41402abc4b2a76b9719d911017c592"); + } + + + #[test] + fn test_verify_digest_without_qop() { + // Compute expected values manually + let ha1 = md5_hex("alice:sipcord:password123"); + let ha2 = md5_hex("REGISTER:sip:sipcord"); + let nonce = "dcd98b7102dd2f0e8b11d0f600bfb0c093"; + let response = md5_hex(&format!("{}:{}:{}", ha1, nonce, ha2)); + + let params = DigestAuthParams { + username: "alice".to_string(), + realm: "sipcord".to_string(), + nonce: nonce.to_string(), + uri: "sip:sipcord".to_string(), + response, + method: "REGISTER".to_string(), + qop: None, + nc: None, + cnonce: None, + }; + + assert!(verify_digest_with_ha1(&ha1, ¶ms)); + } + + #[test] + fn test_verify_digest_with_qop_auth() { + let ha1 = md5_hex("bob:sipcord:secret"); + let ha2 = md5_hex("REGISTER:sip:sipcord"); + let nonce = "abc123"; + let nc = "00000001"; + let cnonce = "0a4f113b"; + let response = md5_hex(&format!("{}:{}:{}:{}:auth:{}", ha1, nonce, nc, cnonce, ha2)); + + let params = DigestAuthParams { + username: "bob".to_string(), + realm: "sipcord".to_string(), + nonce: nonce.to_string(), + uri: "sip:sipcord".to_string(), + response, + method: "REGISTER".to_string(), + qop: Some("auth".to_string()), + nc: Some(nc.to_string()), + cnonce: Some(cnonce.to_string()), + }; + + assert!(verify_digest_with_ha1(&ha1, ¶ms)); + } + + #[test] + fn test_verify_digest_wrong_response() { + let ha1 = md5_hex("alice:sipcord:password123"); + let params = DigestAuthParams { + username: "alice".to_string(), + realm: "sipcord".to_string(), + nonce: "nonce".to_string(), + uri: "sip:sipcord".to_string(), + response: "0000000000000000000000000000dead".to_string(), + method: "REGISTER".to_string(), + qop: None, + nc: None, + cnonce: None, + }; + + assert!(!verify_digest_with_ha1(&ha1, ¶ms)); + } + + + #[test] + fn test_auth_cache_record_failure() { + let cache = AuthCache::new(Duration::from_secs(300), Duration::from_secs(60), 3); + assert_eq!(cache.record_failure("user1"), 1); + assert_eq!(cache.record_failure("user1"), 2); + assert_eq!(cache.record_failure("user1"), 3); + } + + #[test] + fn test_auth_cache_clear_failures() { + let cache = AuthCache::new(Duration::from_secs(300), Duration::from_secs(60), 3); + cache.record_failure("user1"); + cache.record_failure("user1"); + cache.clear_failures("user1"); + assert!(!cache.is_in_cooldown("user1")); + } + + #[test] + fn test_auth_cache_cooldown_threshold() { + let cache = AuthCache::new(Duration::from_secs(300), Duration::from_secs(60), 3); + assert!(!cache.is_in_cooldown("user1")); + cache.record_failure("user1"); + cache.record_failure("user1"); + assert!(!cache.is_in_cooldown("user1")); + cache.record_failure("user1"); + assert!(cache.is_in_cooldown("user1")); + } +} diff --git a/sipcord-bridge/src/services/ban.rs b/sipcord-bridge/src/services/ban.rs new file mode 100644 index 0000000..2e8ecaa --- /dev/null +++ b/sipcord-bridge/src/services/ban.rs @@ -0,0 +1,58 @@ +//! Ban system trait definition +//! +//! The trait is defined here so FFI callbacks in the library can call ban checks. +//! When no implementation is registered (e.g. standalone/static-router mode), +//! ban checks are simply skipped. + +use std::net::IpAddr; +use std::sync::{Arc, OnceLock}; + +/// Result of checking/recording a ban +#[derive(Debug, Clone, Copy)] +pub struct BanCheckResult { + /// Current offense level for this IP (progressive timeout key) + pub offense_level: u32, + /// Whether the IP is currently timed out or banned + pub is_banned: bool, + /// Whether this is a permanent ban (vs progressive timeout) + pub is_permanent: bool, + /// Timeout duration in seconds (0 if not timed out) + pub timeout_secs: u64, + /// Whether we should log this attempt + pub should_log: bool, +} + +/// Result of clearing all ban data +#[derive(Debug)] +pub struct ClearResult { + pub bans_cleared: u64, + pub registers_cleared: u64, +} + +/// Trait for ban checking — implemented by the adapter, consumed by FFI callbacks +pub trait BanCheck: Send + Sync { + fn is_enabled(&self) -> bool; + fn is_whitelisted(&self, ip: &IpAddr) -> bool; + fn check_banned(&self, ip: &IpAddr) -> BanCheckResult; + fn record_offense(&self, ip: IpAddr, reason: &str) -> BanCheckResult; + fn record_permanent_ban(&self, ip: IpAddr, reason: &str) -> BanCheckResult; + /// Record a REGISTER request from an IP. Returns true if rate limited. + fn record_register(&self, ip: IpAddr) -> bool; + fn clear_all(&self) -> Result>; + /// Config accessors for extension-length checks in callbacks + fn suspicious_extension_min_length(&self) -> usize; + fn suspicious_extension_max_length(&self) -> usize; + fn permaban_extension_min_length(&self) -> usize; +} + +static GLOBAL_BAN_CHECK: OnceLock> = OnceLock::new(); + +/// Register a global ban checker (called by the adapter at init time) +pub fn set_global(checker: Arc) { + let _ = GLOBAL_BAN_CHECK.set(checker); +} + +/// Get the global ban checker (None if not registered) +pub fn global() -> Option<&'static Arc> { + GLOBAL_BAN_CHECK.get() +} diff --git a/sipcord-bridge/src/services/mod.rs b/sipcord-bridge/src/services/mod.rs new file mode 100644 index 0000000..2a55d28 --- /dev/null +++ b/sipcord-bridge/src/services/mod.rs @@ -0,0 +1,5 @@ +pub mod auth_cache; +pub mod ban; +pub mod registrar; +pub mod snowflake; +pub mod sound; diff --git a/sipcord-bridge/src/services/registrar.rs b/sipcord-bridge/src/services/registrar.rs new file mode 100644 index 0000000..ab1a3b9 --- /dev/null +++ b/sipcord-bridge/src/services/registrar.rs @@ -0,0 +1,321 @@ +//! SIP Registration Storage +//! +//! Tracks SIP REGISTER'ed users so we know which phones are online +//! and can route inbound calls (Discord -> SIP) to them. + +use dashmap::DashMap; +use std::net::SocketAddr; +use std::sync::{Arc, OnceLock}; +use std::time::{Duration, Instant}; +use tracing::debug; + +/// Global registrar instance (set during initialization) +pub static GLOBAL_REGISTRAR: OnceLock> = OnceLock::new(); + +/// Transport protocol used for a SIP registration +#[derive(Debug, Clone, Copy, PartialEq)] +pub enum SipTransport { + Udp, + Tcp, + Tls, +} + +/// A single SIP registration (one phone/device) +#[derive(Debug, Clone)] +pub struct Registration { + pub sip_username: String, + /// None if user has allow_inbound_calls disabled + pub discord_username: Option, + /// From Contact header (client-advertised URI) + pub contact_uri: String, + /// Actual transport source (for NAT traversal) + pub source_addr: SocketAddr, + /// Transport protocol used to register + pub transport: SipTransport, + /// When this registration expires + pub expires_at: Instant, + /// When this registration was created/refreshed + pub registered_at: Instant, +} + +/// Manages SIP registrations for all users +pub struct Registrar { + /// SIP username -> list of registrations (multiple phones per user) + registrations: DashMap>, + /// Discord username -> SIP username (reverse lookup for inbound calls) + discord_to_sip: DashMap, +} + +impl Registrar { + pub fn new() -> Self { + Self { + registrations: DashMap::new(), + discord_to_sip: DashMap::new(), + } + } + + /// Add or update a registration. + pub fn add_registration(&self, reg: Registration) { + let sip_username = reg.sip_username.clone(); + let discord_username = reg.discord_username.clone(); + + // Update or insert into registrations + let mut regs = self.registrations.entry(sip_username.clone()).or_default(); + + // Check if this source_addr already has a registration - update it + if let Some(existing) = regs + .iter_mut() + .find(|r| r.source_addr == reg.source_addr && r.contact_uri == reg.contact_uri) + { + // If discord_username changed, remove the old reverse mapping + if existing.discord_username != reg.discord_username { + if let Some(ref old_du) = existing.discord_username { + self.discord_to_sip.remove(old_du); + } + } + + existing.expires_at = reg.expires_at; + existing.registered_at = reg.registered_at; + existing.contact_uri = reg.contact_uri.clone(); + existing.discord_username = reg.discord_username.clone(); + + // Update reverse lookup if discord_username is set + if let Some(ref du) = discord_username { + self.discord_to_sip.insert(du.clone(), sip_username.clone()); + } + + return; + } + + regs.push(reg); + drop(regs); + + // Update reverse lookup + if let Some(ref du) = discord_username { + self.discord_to_sip.insert(du.clone(), sip_username.clone()); + } + } + + /// Remove expired registrations. + pub fn remove_expired(&self) { + let now = Instant::now(); + + let mut to_clean = Vec::new(); + for entry in self.registrations.iter() { + let sip_username = entry.key().clone(); + let has_expired = entry.value().iter().any(|r| r.expires_at <= now); + if has_expired { + to_clean.push(sip_username); + } + } + + for sip_username in to_clean { + if let Some(mut regs) = self.registrations.get_mut(&sip_username) { + let discord_username_before = regs.iter().find_map(|r| r.discord_username.clone()); + + regs.retain(|r| r.expires_at > now); + + if regs.is_empty() { + drop(regs); + self.registrations.remove(&sip_username); + + // Clean up reverse lookup + if let Some(du) = discord_username_before { + self.discord_to_sip.remove(&du); + } + } + } + } + } + + /// Get source addresses for a SIP user (for debug capture) + pub fn get_source_addrs_for_sip_user(&self, sip_username: &str) -> Vec { + let now = Instant::now(); + match self.registrations.get(sip_username) { + Some(regs) => regs + .iter() + .filter(|r| r.expires_at > now) + .map(|r| r.source_addr) + .collect(), + None => Vec::new(), + } + } + + /// Get contacts for a Discord user (for inbound calling) + pub fn get_contacts_for_discord_user( + &self, + discord_username: &str, + ) -> Vec<(String, SocketAddr, SipTransport)> { + let sip_username = match self.discord_to_sip.get(discord_username) { + Some(entry) => entry.value().clone(), + None => return Vec::new(), + }; + + let now = Instant::now(); + match self.registrations.get(&sip_username) { + Some(regs) => regs + .iter() + .filter(|r| r.expires_at > now) + .map(|r| (r.contact_uri.clone(), r.source_addr, r.transport)) + .collect(), + None => Vec::new(), + } + } +} + +#[cfg(test)] +mod tests { + use super::*; + use std::net::SocketAddr; + + fn make_reg( + sip_user: &str, + discord_user: Option<&str>, + addr: &str, + contact: &str, + expires_secs: u64, + ) -> Registration { + Registration { + sip_username: sip_user.to_string(), + discord_username: discord_user.map(|s| s.to_string()), + contact_uri: contact.to_string(), + source_addr: addr.parse::().unwrap(), + transport: SipTransport::Udp, + expires_at: Instant::now() + Duration::from_secs(expires_secs), + registered_at: Instant::now(), + } + } + + #[test] + fn test_add_and_lookup() { + let reg = Registrar::new(); + reg.add_registration(make_reg( + "alice", + None, + "1.2.3.4:5060", + "sip:alice@1.2.3.4", + 300, + )); + let addrs = reg.get_source_addrs_for_sip_user("alice"); + assert_eq!(addrs.len(), 1); + assert_eq!(addrs[0], "1.2.3.4:5060".parse::().unwrap()); + } + + #[test] + fn test_discord_reverse_lookup() { + let reg = Registrar::new(); + reg.add_registration(make_reg( + "bob", + Some("bob#1234"), + "5.6.7.8:5060", + "sip:bob@5.6.7.8", + 300, + )); + let contacts = reg.get_contacts_for_discord_user("bob#1234"); + assert_eq!(contacts.len(), 1); + assert_eq!(contacts[0].0, "sip:bob@5.6.7.8"); + } + + #[test] + fn test_update_existing_registration() { + let reg = Registrar::new(); + reg.add_registration(make_reg( + "alice", + None, + "1.2.3.4:5060", + "sip:alice@1.2.3.4", + 300, + )); + // Same source_addr + contact_uri -> update in place + reg.add_registration(make_reg( + "alice", + None, + "1.2.3.4:5060", + "sip:alice@1.2.3.4", + 600, + )); + let addrs = reg.get_source_addrs_for_sip_user("alice"); + assert_eq!(addrs.len(), 1); // Should not duplicate + } + + #[test] + fn test_multiple_registrations_per_user() { + let reg = Registrar::new(); + reg.add_registration(make_reg( + "alice", + None, + "1.2.3.4:5060", + "sip:alice@1.2.3.4", + 300, + )); + reg.add_registration(make_reg( + "alice", + None, + "5.6.7.8:5060", + "sip:alice@5.6.7.8", + 300, + )); + let addrs = reg.get_source_addrs_for_sip_user("alice"); + assert_eq!(addrs.len(), 2); + } + + #[test] + fn test_remove_expired() { + let reg = Registrar::new(); + // Add one that expires immediately + let mut expired_reg = make_reg("alice", None, "1.2.3.4:5060", "sip:alice@1.2.3.4", 0); + expired_reg.expires_at = Instant::now() - Duration::from_secs(1); + reg.add_registration(expired_reg); + // Add one that's still valid + reg.add_registration(make_reg( + "alice", + None, + "5.6.7.8:5060", + "sip:alice@5.6.7.8", + 300, + )); + + reg.remove_expired(); + let addrs = reg.get_source_addrs_for_sip_user("alice"); + assert_eq!(addrs.len(), 1); + assert_eq!(addrs[0], "5.6.7.8:5060".parse::().unwrap()); + } + + #[test] + fn test_get_contacts_for_discord_user_expired_filtered() { + let reg = Registrar::new(); + let mut expired_reg = make_reg( + "charlie", + Some("charlie#0001"), + "1.2.3.4:5060", + "sip:charlie@1.2.3.4", + 0, + ); + expired_reg.expires_at = Instant::now() - Duration::from_secs(1); + reg.add_registration(expired_reg); + + reg.add_registration(make_reg( + "charlie", + Some("charlie#0001"), + "5.6.7.8:5060", + "sip:charlie@5.6.7.8", + 300, + )); + + let contacts = reg.get_contacts_for_discord_user("charlie#0001"); + assert_eq!(contacts.len(), 1); + assert_eq!(contacts[0].0, "sip:charlie@5.6.7.8"); + } +} + +/// Start the periodic cleanup task +pub fn spawn_cleanup_task(registrar: Arc) { + tokio::spawn(async move { + let mut interval = tokio::time::interval(Duration::from_secs(30)); + loop { + interval.tick().await; + registrar.remove_expired(); + debug!("Registrar cleanup complete"); + } + }); +} diff --git a/sipcord-bridge/src/services/snowflake.rs b/sipcord-bridge/src/services/snowflake.rs new file mode 100644 index 0000000..c98cfc8 --- /dev/null +++ b/sipcord-bridge/src/services/snowflake.rs @@ -0,0 +1,181 @@ +//! Discord Snowflake ID — type-safe wrapper around u64. +//! +//! Snowflakes encode a millisecond timestamp (bits 22+) relative to the +//! Discord epoch (2015-01-01T00:00:00.000Z), plus worker/process/sequence +//! metadata in the lower 22 bits. + +use std::fmt; +use std::ops::Deref; + +/// Discord epoch: 2015-01-01T00:00:00.000Z in Unix millis +const DISCORD_EPOCH_MS: u64 = 1_420_070_400_000; + +/// Smallest plausible snowflake (~17 digits). +/// Corresponds to roughly mid-2015, shortly after Discord launched. +/// 21_154_535_154_122_752 = (1433289600000 - DISCORD_EPOCH_MS) << 22 (2015-06-03) +const MIN_SNOWFLAKE: u64 = 21_154_535_154_122_752; + +/// Largest plausible snowflake — year 2100 relative to Discord epoch. +/// (2_682_288_000_000 << 22) ≈ 11.2e18, still well within u64. +const MAX_SNOWFLAKE: u64 = 2_682_288_000_000 << 22; + +/// A Discord Snowflake ID. +#[derive(Clone, Copy, PartialEq, Eq, Hash, PartialOrd, Ord, Default)] +pub struct Snowflake(u64); + +impl Snowflake { + /// Wrap a raw u64 as a Snowflake (no validation). + pub const fn new(value: u64) -> Self { + Self(value) + } + + /// The raw u64 value. + pub const fn get(self) -> u64 { + self.0 + } + + /// Whether this is a plausible Discord snowflake. + /// + /// Checks that the value is at least 17 digits (all real Discord IDs are) + /// and that the embedded timestamp falls between Discord's launch (~mid 2015) + /// and the year 2100. + pub const fn is_valid(self) -> bool { + self.0 >= MIN_SNOWFLAKE && self.0 <= MAX_SNOWFLAKE + } + + /// Milliseconds since Unix epoch encoded in this snowflake. + pub const fn timestamp_ms(self) -> u64 { + (self.0 >> 22) + DISCORD_EPOCH_MS + } +} + +// Transparent access as u64 + +impl Deref for Snowflake { + type Target = u64; + fn deref(&self) -> &u64 { + &self.0 + } +} + +impl From for Snowflake { + fn from(v: u64) -> Self { + Self(v) + } +} + +impl From for u64 { + fn from(s: Snowflake) -> u64 { + s.0 + } +} + +impl std::str::FromStr for Snowflake { + type Err = std::num::ParseIntError; + fn from_str(s: &str) -> Result { + s.parse::().map(Self) + } +} + +// Display / Debug + +impl fmt::Display for Snowflake { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "{}", self.0) + } +} + +impl fmt::Debug for Snowflake { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "Snowflake({})", self.0) + } +} + +// Serde — deserialise from integer OR string (Discord uses both) + +impl serde::Serialize for Snowflake { + fn serialize(&self, serializer: S) -> Result { + self.0.serialize(serializer) + } +} + +impl<'de> serde::Deserialize<'de> for Snowflake { + fn deserialize>(deserializer: D) -> Result { + struct Visitor; + + impl<'de> serde::de::Visitor<'de> for Visitor { + type Value = Snowflake; + + fn expecting(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.write_str("a snowflake as integer or string") + } + + fn visit_u64(self, v: u64) -> Result { + Ok(Snowflake(v)) + } + + fn visit_str(self, v: &str) -> Result { + v.parse::().map(Snowflake).map_err(E::custom) + } + } + + deserializer.deserialize_any(Visitor) + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn zero_is_invalid() { + assert!(!Snowflake::new(0).is_valid()); + } + + #[test] + fn too_short_is_invalid() { + // 9 digits — way too small to be a Discord snowflake + assert!(!Snowflake::new(123_456_789).is_valid()); + // 16 digits — still too short + assert!(!Snowflake::new(1_000_000_000_000_000).is_valid()); + } + + #[test] + fn real_snowflakes_are_valid() { + // Discord's own system messages channel + assert!(Snowflake::new(80_351_110_224_678_912).is_valid()); + // A typical modern channel ID + assert!(Snowflake::new(1_098_765_432_101_234_567).is_valid()); + } + + #[test] + fn timestamp_decodes() { + let s = Snowflake::new(80_351_110_224_678_912); + assert!(s.timestamp_ms() > DISCORD_EPOCH_MS); + // Should be sometime in 2015 + let year_2016 = 1_451_606_400_000u64; // 2016-01-01 Unix ms + assert!(s.timestamp_ms() < year_2016); + } + + #[test] + fn deref_to_u64() { + let s = Snowflake::new(80_351_110_224_678_912); + let v: u64 = *s; + assert_eq!(v, 80_351_110_224_678_912); + } + + #[test] + fn serde_roundtrip_integer() { + let s = Snowflake::new(80_351_110_224_678_912); + let json = serde_json::to_string(&s).unwrap(); + assert_eq!(json, "80351110224678912"); + let back: Snowflake = serde_json::from_str(&json).unwrap(); + assert_eq!(back, s); + } + + #[test] + fn serde_from_string() { + let back: Snowflake = serde_json::from_str("\"80351110224678912\"").unwrap(); + assert_eq!(back.get(), 80_351_110_224_678_912); + } +} diff --git a/sipcord-bridge/src/services/sound/mod.rs b/sipcord-bridge/src/services/sound/mod.rs new file mode 100644 index 0000000..59ec9c9 --- /dev/null +++ b/sipcord-bridge/src/services/sound/mod.rs @@ -0,0 +1,236 @@ +//! Sound management for SIP call audio +//! +//! Provides a SoundManager that loads sounds from config.toml with two modes: +//! - Preloaded: Loaded into memory at startup for fast playback (system sounds) +//! - Streaming: Loaded on-demand from disk for large files (easter eggs) +//! +//! All audio files must be pre-resampled to 16kHz mono - no runtime resampling. + +mod streaming; + +use crate::audio::{flac, wav}; +use crate::config::{AppConfig, SoundEntry}; +use crate::transport::sip::CONF_SAMPLE_RATE; +use anyhow::{Context, Result}; +use std::collections::HashMap; +use std::path::{Path, PathBuf}; +use std::sync::Arc; +use tracing::{debug, info, warn}; + +pub use streaming::StreamingPlayer; + +/// A preloaded sound ready for immediate playback +#[derive(Debug, Clone)] +pub struct PreloadedSound { + /// PCM samples at 16kHz mono - NO RESAMPLING at runtime + pub samples: Arc>, + /// Duration in milliseconds + pub duration_ms: u64, +} + +/// Configuration for a streaming sound (loaded on-demand) +#[derive(Debug, Clone)] +pub struct StreamingConfig { + /// Full path to the audio file + pub path: PathBuf, +} + +/// Sound manager for loading and playing audio files +pub struct SoundManager { + /// Preloaded sounds (preload=true) - in memory, ready for playback + preloaded: HashMap, + /// Streaming configs (preload=false) - path only, loaded on demand + streaming: HashMap, + /// Extension -> sound name mapping for easter eggs + pub extension_map: HashMap, + /// Base directory for sound files + sounds_dir: PathBuf, +} + +impl SoundManager { + /// Create a new SoundManager and load sounds from config + pub fn new(sounds_dir: PathBuf) -> Result { + let config = AppConfig::global(); + let mut manager = Self { + preloaded: HashMap::new(), + streaming: HashMap::new(), + extension_map: HashMap::new(), + sounds_dir, + }; + + manager.load_sounds(&config.sounds.entries)?; + Ok(manager) + } + + /// Load all sounds from config entries + fn load_sounds(&mut self, entries: &HashMap) -> Result<()> { + let mut preloaded_count = 0; + let mut streaming_count = 0; + let mut virtual_count = 0; + + for (name, entry) in entries { + // Build extension map for easter eggs and test tones + if let Some(ext) = entry.extension { + self.extension_map.insert(ext, name.clone()); + debug!("Registered extension {} -> sound '{}'", ext, name); + } + + // Handle virtual sounds (no src file - generated dynamically) + let Some(ref src) = entry.src else { + virtual_count += 1; + info!("Registered virtual sound '{}' (no file, generated)", name); + continue; + }; + + let file_path = self.sounds_dir.join(src); + + if entry.preload { + // Load and store in memory + match self.load_preloaded_sound(&file_path, name) { + Ok(sound) => { + info!( + "Preloaded sound '{}': {} samples ({} ms) from {}", + name, + sound.samples.len(), + sound.duration_ms, + src + ); + self.preloaded.insert(name.clone(), sound); + preloaded_count += 1; + } + Err(e) => { + warn!("Failed to preload sound '{}' from {}: {}", name, src, e); + } + } + } else { + // Just store path for streaming + if file_path.exists() { + self.streaming.insert( + name.clone(), + StreamingConfig { + path: file_path.clone(), + }, + ); + streaming_count += 1; + info!("Registered streaming sound '{}' from {}", name, src); + } else { + warn!( + "Streaming sound '{}' file not found: {}", + name, + file_path.display() + ); + } + } + } + + info!( + "SoundManager loaded {} preloaded, {} streaming, {} virtual sounds, {} extensions", + preloaded_count, + streaming_count, + virtual_count, + self.extension_map.len() + ); + + Ok(()) + } + + /// Load a preloaded sound from a file + fn load_preloaded_sound(&self, path: &Path, name: &str) -> Result { + let data = std::fs::read(path) + .with_context(|| format!("Failed to read sound file: {}", path.display()))?; + + let samples = self.parse_audio(&data, name)?; + + let duration_ms = (samples.len() as u64 * 1000) / CONF_SAMPLE_RATE as u64; + + Ok(PreloadedSound { + samples: Arc::new(samples), + duration_ms, + }) + } + + /// Parse audio data (auto-detect WAV or FLAC format) + /// Expects 16kHz mono - panics if wrong sample rate + fn parse_audio(&self, data: &[u8], name: &str) -> Result> { + // Check for FLAC magic number: "fLaC" + if data.len() >= 4 && &data[0..4] == b"fLaC" { + debug!("Detected FLAC format for '{}'", name); + let (samples, rate) = flac::parse_flac(data) + .with_context(|| format!("Failed to parse FLAC for sound '{}'", name))?; + if rate != CONF_SAMPLE_RATE { + anyhow::bail!( + "Sound '{}' has wrong sample rate: {} Hz (expected {} Hz). Pre-resample the file.", + name, rate, CONF_SAMPLE_RATE + ); + } + return Ok(samples); + } + + // Check for WAV magic number: "RIFF" + if data.len() >= 4 && &data[0..4] == b"RIFF" { + debug!("Detected WAV format for '{}'", name); + let (samples, rate) = wav::parse_wav(data) + .with_context(|| format!("Failed to parse WAV for sound '{}'", name))?; + if rate != CONF_SAMPLE_RATE { + anyhow::bail!( + "Sound '{}' has wrong sample rate: {} Hz (expected {} Hz). Pre-resample the file.", + name, rate, CONF_SAMPLE_RATE + ); + } + return Ok(samples); + } + + anyhow::bail!( + "Unknown audio format for '{}': header bytes {:?}", + name, + &data[..4.min(data.len())] + ) + } + + /// Get a preloaded sound by name + pub fn get_preloaded(&self, name: &str) -> Option<&PreloadedSound> { + self.preloaded.get(name) + } + + /// Get a streaming config by name + pub fn get_streaming(&self, name: &str) -> Option<&StreamingConfig> { + self.streaming.get(name) + } + + /// Check if a sound is configured for streaming + pub fn is_streaming(&self, name: &str) -> bool { + self.streaming.contains_key(name) + } + + /// Check if a sound is a virtual sound (test tone) + pub fn is_test_tone(&self, name: &str) -> bool { + name == "test_tone" + } + + /// Get the sound name for an extension (if configured) + pub fn get_extension_sound(&self, extension: u32) -> Option<&str> { + self.extension_map.get(&extension).map(|s| s.as_str()) + } + + /// Get the connecting sound samples (used for early media loop) + pub fn get_connecting_samples(&self) -> Option>> { + self.preloaded.get("connecting").map(|s| s.samples.clone()) + } + + /// Get the discord_join sound samples + pub fn get_discord_join_samples(&self) -> Option>> { + self.preloaded + .get("discord_join") + .map(|s| s.samples.clone()) + } + + /// Get error sound samples by error type + pub fn get_error_samples(&self, error_type: &str) -> Option>> { + self.preloaded.get(error_type).map(|s| s.samples.clone()) + } +} + +/// Create an Arc-wrapped SoundManager for sharing across async tasks +pub fn create_sound_manager(sounds_dir: PathBuf) -> Result> { + Ok(Arc::new(SoundManager::new(sounds_dir)?)) +} diff --git a/sipcord-bridge/src/services/sound/streaming.rs b/sipcord-bridge/src/services/sound/streaming.rs new file mode 100644 index 0000000..326139a --- /dev/null +++ b/sipcord-bridge/src/services/sound/streaming.rs @@ -0,0 +1,273 @@ +//! Streaming audio player for large files +//! +//! Provides a file-backed streaming player that reads audio from disk +//! on-demand rather than loading the entire file into memory. +//! +//! Uses Symphonia for FLAC decoding (pure Rust). + +use crate::transport::sip::CONF_SAMPLE_RATE; +use anyhow::{Context, Result}; +use std::collections::VecDeque; +use std::fs::File; +use std::path::Path; +use symphonia::core::audio::{AudioBufferRef, Signal}; +use symphonia::core::codecs::{DecoderOptions, CODEC_TYPE_NULL}; +use symphonia::core::formats::FormatOptions; +use symphonia::core::io::MediaSourceStream; +use symphonia::core::meta::MetadataOptions; +use symphonia::core::probe::Hint; + +/// Streaming player for large audio files +/// +/// Reads FLAC frames on-demand to avoid loading entire file into memory. +pub struct StreamingPlayer { + /// Symphonia format reader + format: Box, + /// Symphonia decoder + decoder: Box, + /// Track ID we're decoding + track_id: u32, + /// Buffer of decoded samples ready for playback + samples_buffer: VecDeque, + /// Whether we've reached end of file + eof: bool, + /// Total samples read from file (for debugging) + total_samples_read: u64, + /// Total samples delivered via get_frame (for debugging) + total_samples_delivered: u64, +} + +impl StreamingPlayer { + /// Create a new streaming player for a FLAC file + pub fn new(path: &Path) -> Result { + let file = File::open(path) + .with_context(|| format!("Failed to open streaming file: {}", path.display()))?; + + let mss = MediaSourceStream::new(Box::new(file), Default::default()); + + let mut hint = Hint::new(); + if let Some(ext) = path.extension().and_then(|e| e.to_str()) { + hint.with_extension(ext); + } + + let probed = symphonia::default::get_probe() + .format( + &hint, + mss, + &FormatOptions::default(), + &MetadataOptions::default(), + ) + .with_context(|| format!("Failed to probe format: {}", path.display()))?; + + let format = probed.format; + + // Find the first audio track + let track = format + .tracks() + .iter() + .find(|t| t.codec_params.codec != CODEC_TYPE_NULL) + .ok_or_else(|| anyhow::anyhow!("No audio track found in {}", path.display()))?; + + let track_id = track.id; + + // Verify sample rate + let sample_rate = track + .codec_params + .sample_rate + .ok_or_else(|| anyhow::anyhow!("No sample rate in track"))?; + + if sample_rate != CONF_SAMPLE_RATE { + anyhow::bail!( + "Streaming file {} has wrong sample rate: {} Hz (expected {} Hz)", + path.display(), + sample_rate, + CONF_SAMPLE_RATE + ); + } + + let channels = track.codec_params.channels.map(|c| c.count()).unwrap_or(1); + + let n_frames = track.codec_params.n_frames; + + tracing::info!( + "Created Symphonia streaming player for {}: {}Hz, {} channels, n_frames={:?}", + path.display(), + sample_rate, + channels, + n_frames + ); + + let decoder = symphonia::default::get_codecs() + .make(&track.codec_params, &DecoderOptions::default()) + .with_context(|| "Failed to create decoder")?; + + Ok(Self { + format, + decoder, + track_id, + samples_buffer: VecDeque::with_capacity(4096), + eof: false, + total_samples_read: 0, + total_samples_delivered: 0, + }) + } + + /// Get the next frame of samples (320 samples for 20ms at 16kHz) + /// + /// Returns None when the file is finished. + pub fn get_frame(&mut self, frame_size: usize) -> Option> { + // Fill buffer if needed + while self.samples_buffer.len() < frame_size && !self.eof { + if !self.read_more_samples() { + self.eof = true; + } + } + + // Return None if no samples available + if self.samples_buffer.is_empty() { + return None; + } + + // Drain requested samples (or all remaining) + let count = frame_size.min(self.samples_buffer.len()); + let samples: Vec = self.samples_buffer.drain(..count).collect(); + self.total_samples_delivered += samples.len() as u64; + + // Pad with silence if we got fewer than requested + if samples.len() < frame_size { + let mut padded = samples; + padded.resize(frame_size, 0); + return Some(padded); + } + + Some(samples) + } + + /// Check if playback is complete + pub fn is_finished(&self) -> bool { + let finished = self.eof && self.samples_buffer.is_empty(); + if finished { + tracing::info!( + "StreamingPlayer finished: read {} samples, delivered {} samples", + self.total_samples_read, + self.total_samples_delivered, + ); + } + finished + } + + /// Read more samples from the file into the buffer + /// Returns false when EOF is reached + fn read_more_samples(&mut self) -> bool { + loop { + let packet = match self.format.next_packet() { + Ok(packet) => packet, + Err(symphonia::core::errors::Error::IoError(e)) + if e.kind() == std::io::ErrorKind::UnexpectedEof => + { + return false; + } + Err(e) => { + tracing::debug!("Error reading packet: {}", e); + return false; + } + }; + + // Skip packets from other tracks + if packet.track_id() != self.track_id { + continue; + } + + match self.decoder.decode(&packet) { + Ok(decoded) => { + // Convert to i16 samples + let samples_added = convert_audio_buffer(&decoded, &mut self.samples_buffer); + self.total_samples_read += samples_added as u64; + return true; + } + Err(symphonia::core::errors::Error::DecodeError(e)) => { + tracing::debug!("Decode error: {}", e); + continue; + } + Err(e) => { + tracing::debug!("Fatal decode error: {}", e); + return false; + } + } + } + } +} + +/// Convert Symphonia audio buffer to i16 samples and add to buffer +fn convert_audio_buffer(audio: &AudioBufferRef, samples_buffer: &mut VecDeque) -> usize { + let mut count = 0; + + match audio { + AudioBufferRef::S16(buf) => { + let channels = buf.spec().channels.count(); + let frames = buf.frames(); + + for frame_idx in 0..frames { + if channels == 1 { + let sample = buf.chan(0)[frame_idx]; + samples_buffer.push_back(sample); + count += 1; + } else { + // Stereo to mono: average channels + let mut sum: i32 = 0; + for ch in 0..channels { + sum += buf.chan(ch)[frame_idx] as i32; + } + let mono = (sum / channels as i32) as i16; + samples_buffer.push_back(mono); + count += 1; + } + } + } + AudioBufferRef::S32(buf) => { + let channels = buf.spec().channels.count(); + let frames = buf.frames(); + + for frame_idx in 0..frames { + if channels == 1 { + let sample = (buf.chan(0)[frame_idx] >> 16) as i16; + samples_buffer.push_back(sample); + count += 1; + } else { + let mut sum: i64 = 0; + for ch in 0..channels { + sum += buf.chan(ch)[frame_idx] as i64; + } + let mono = ((sum / channels as i64) >> 16) as i16; + samples_buffer.push_back(mono); + count += 1; + } + } + } + AudioBufferRef::F32(buf) => { + let channels = buf.spec().channels.count(); + let frames = buf.frames(); + + for frame_idx in 0..frames { + if channels == 1 { + let sample = (buf.chan(0)[frame_idx] * 32767.0) as i16; + samples_buffer.push_back(sample); + count += 1; + } else { + let mut sum: f32 = 0.0; + for ch in 0..channels { + sum += buf.chan(ch)[frame_idx]; + } + let mono = ((sum / channels as f32) * 32767.0) as i16; + samples_buffer.push_back(mono); + count += 1; + } + } + } + _ => { + tracing::warn!("Unsupported audio buffer format"); + } + } + + count +} diff --git a/sipcord-bridge/src/transport/discord/mod.rs b/sipcord-bridge/src/transport/discord/mod.rs new file mode 100644 index 0000000..1251978 --- /dev/null +++ b/sipcord-bridge/src/transport/discord/mod.rs @@ -0,0 +1,1264 @@ +mod voice; + +use crate::audio::simd; +use crate::services::snowflake::Snowflake; +use anyhow::Result; +use audioadapter::Adapter; +use audioadapter_buffers::direct::SequentialSliceOfVecs; +use crossbeam_channel::Sender; +use dashmap::DashMap; +use parking_lot::Mutex; +use rtrb::Producer; +use rubato::{ + Async, FixedAsync, Resampler, SincInterpolationParameters, SincInterpolationType, + WindowFunction, +}; +use serenity::all::{ChannelId, Client, Context, EventHandler, FullEvent, GatewayIntents, GuildId}; +use serenity::async_trait; +use serenity::secrets::Token; +use songbird::driver::DecodeMode; +use songbird::tracks::PlayMode; +use songbird::{ + Config, CoreEvent, Event, EventContext, EventHandler as VoiceEventHandler, Songbird, TrackEvent, +}; +use std::collections::HashMap; +use std::sync::atomic::{AtomicBool, AtomicU32, AtomicU64, Ordering}; +use std::sync::Arc; +use std::sync::OnceLock; +use std::time::{SystemTime, UNIX_EPOCH}; +use tokio::sync::oneshot; +use tracing::{debug, error, info, trace, warn}; + +// Direct audio path: SIP audio thread → Discord +// Uses lock-free ring buffer for real-time audio streaming + +/// Global registry of channel_id → audio sender for direct SIP→Discord audio path. +/// This allows the pjsua audio thread to send directly to Discord without going through tokio. +static DISCORD_AUDIO_SENDERS: OnceLock> = OnceLock::new(); + +// Discord→SIP direct path: Discord VoiceTick → ring buffer → SIP audio thread +// Uses lock-free ring buffer to bypass tokio/crossbeam async round-trip + +/// Per-channel ring buffer producers for Discord→SIP audio. +/// VoiceReceiver writes resampled i16 mono @ 16kHz here. +/// channel_port_get_frame reads from the consumer side (in transport/sip/channel_audio.rs). +static DISCORD_TO_SIP_PRODUCERS: OnceLock>>> = + OnceLock::new(); + +fn get_discord_to_sip_producers() -> &'static DashMap>> { + DISCORD_TO_SIP_PRODUCERS.get_or_init(DashMap::new) +} + +/// Register a ring buffer producer for Discord→SIP audio on a channel. +pub fn register_discord_to_sip_producer(channel_id: Snowflake, producer: rtrb::Producer) { + debug!( + "Registering Discord→SIP ring buffer producer for channel {}", + channel_id + ); + get_discord_to_sip_producers().insert(channel_id, Mutex::new(producer)); +} + +/// Unregister the ring buffer producer for a channel. +pub fn unregister_discord_to_sip_producer(channel_id: Snowflake) { + debug!( + "Unregistering Discord→SIP ring buffer producer for channel {}", + channel_id + ); + get_discord_to_sip_producers().remove(&channel_id); +} + +/// Write resampled audio directly to the Discord→SIP ring buffer. +/// Called from VoiceReceiver on the Songbird event loop. +/// Returns true if audio was written, false if no producer registered or buffer full. +fn write_discord_to_sip(channel_id: Snowflake, samples_16k: &[i16]) -> bool { + let Some(producer_entry) = get_discord_to_sip_producers().get(&channel_id) else { + return false; + }; + let Some(mut producer) = producer_entry.try_lock() else { + return false; + }; + let slots = producer.slots(); + if slots >= samples_16k.len() { + if let Ok(mut chunk) = producer.write_chunk(samples_16k.len()) { + let (first, second) = chunk.as_mut_slices(); + let first_len = first.len(); + first.copy_from_slice(&samples_16k[..first_len]); + if !second.is_empty() { + second.copy_from_slice(&samples_16k[first_len..]); + } + chunk.commit_all(); + } + true + } else { + // Ring buffer full - drop this frame + trace!( + "Discord→SIP ring buffer full for channel {} (need {}, have {})", + channel_id, + samples_16k.len(), + slots + ); + false + } +} + +fn get_audio_senders() -> &'static DashMap { + DISCORD_AUDIO_SENDERS.get_or_init(DashMap::new) +} + +/// Combined resampler + ring buffer producer, locked together (always accessed together) +struct AudioPipeline { + resampler: ResamplerState, + producer: Producer, +} + +/// Cached VAD config values (read once at creation, never change at runtime) +struct CachedVadConfig { + silence_threshold: i16, + mute_threshold: i16, + silence_frames_before_stop: u32, +} + +/// Wrapper for the audio sender with resampler state and ring buffer producer +struct DirectAudioSender { + /// Resampler + ring buffer producer locked together (one lock instead of two per frame) + pipeline: Mutex, + /// Cached VAD config (avoids AppConfig::audio() call every 20ms frame) + vad_config: CachedVadConfig, + /// VAD: Counter for consecutive silent frames + silence_frame_count: AtomicU32, + /// VAD: Whether we're currently sending speech + is_speaking: AtomicBool, + /// Health tracking: consecutive overflow errors + consecutive_overflows: AtomicU64, +} + +/// Consolidated resampler state with pre-allocated buffers +struct ResamplerState { + resampler: Async, + /// Pre-allocated buffer for i16→f64 conversion (capacity: 320) + input_f64: Vec, + /// Pre-allocated buffer for mono→stereo f32 output (capacity: 1920) + stereo_f32: Vec, +} + +impl ResamplerState { + fn new() -> Self { + Self { + resampler: create_resampler(), + input_f64: Vec::with_capacity(320), + stereo_f32: Vec::with_capacity(1920), + } + } +} + +/// Create a high-quality sinc resampler for 16kHz → 48kHz +fn create_resampler() -> Async { + let params = SincInterpolationParameters { + sinc_len: 256, + f_cutoff: 0.95, + interpolation: SincInterpolationType::Linear, + oversampling_factor: 256, + window: WindowFunction::BlackmanHarris2, + }; + + // 16kHz to 48kHz = ratio of 3.0, mono input, 320 samples per chunk (20ms at 16kHz) + Async::new_sinc( + 48000.0 / 16000.0, // resample ratio (3.0x) + 1.1, // max relative ratio (allow slight variation) + ¶ms, + 320, // chunk size (samples per frame at 16kHz) + 1, // mono channel + FixedAsync::Input, // fixed input size + ) + .expect("Failed to create resampler") +} + +/// RAII guard for a registered Discord audio sender. +/// Automatically unregisters the sender when dropped. +pub struct RegisteredAudioSender { + channel_id: Snowflake, +} + +impl RegisteredAudioSender { + /// Register a Discord audio sender for direct SIP→Discord audio path. + pub fn new(channel_id: Snowflake, producer: Producer) -> Self { + debug!("Registering direct audio sender for channel {}", channel_id); + let audio_cfg = crate::config::AppConfig::audio(); + get_audio_senders().insert( + channel_id, + DirectAudioSender { + pipeline: Mutex::new(AudioPipeline { + resampler: ResamplerState::new(), + producer, + }), + vad_config: CachedVadConfig { + silence_threshold: audio_cfg.vad_silence_threshold, + mute_threshold: audio_cfg.vad_mute_threshold, + silence_frames_before_stop: audio_cfg.vad_silence_frames_before_stop, + }, + silence_frame_count: AtomicU32::new(0), + is_speaking: AtomicBool::new(false), + consecutive_overflows: AtomicU64::new(0), + }, + ); + Self { channel_id } + } +} + +impl Drop for RegisteredAudioSender { + fn drop(&mut self) { + debug!( + "Unregistering direct audio sender for channel {}", + self.channel_id + ); + get_audio_senders().remove(&self.channel_id); + } +} + +/// Send audio directly from SIP to Discord, bypassing tokio. +/// This is called from the pjsua audio thread. +/// +/// samples: PCM i16 mono at sample_rate (typically 16kHz from pjsua) +/// Returns true if audio was sent, false if no sender registered for this channel. +pub fn send_audio_to_discord_direct( + channel_id: Snowflake, + samples: &[i16], + sample_rate: u32, +) -> bool { + use std::sync::atomic::AtomicU64; + static SEND_COUNT: AtomicU64 = AtomicU64::new(0); + let count = SEND_COUNT.fetch_add(1, Ordering::Relaxed); + + let Some(sender) = get_audio_senders().get(&channel_id) else { + return false; + }; + + // VAD constants from cached config (no per-frame AppConfig lookup) + let silence_threshold = sender.vad_config.silence_threshold; + let mute_threshold = sender.vad_config.mute_threshold; + let silence_frames_before_stop = sender.vad_config.silence_frames_before_stop; + + // SIMD-accelerated amplitude detection for VAD + let input_max_amp = simd::max_abs_i16(samples); + + // Check for muted audio + let is_muted = input_max_amp < mute_threshold; + let has_speech = input_max_amp > silence_threshold; + let was_speaking = sender.is_speaking.load(Ordering::Relaxed); + let prev_silence_count = sender.silence_frame_count.load(Ordering::Relaxed); + + // Update VAD state (for diagnostics) + if is_muted { + sender + .silence_frame_count + .store(silence_frames_before_stop, Ordering::Relaxed); + sender.is_speaking.store(false, Ordering::Relaxed); + } else if has_speech { + sender.silence_frame_count.store(0, Ordering::Relaxed); + sender.is_speaking.store(true, Ordering::Relaxed); + } else { + let new_count = prev_silence_count.saturating_add(1); + sender + .silence_frame_count + .store(new_count, Ordering::Relaxed); + if new_count >= silence_frames_before_stop || !was_speaking { + sender.is_speaking.store(false, Ordering::Relaxed); + } + } + + // Lock the audio pipeline once for both resampling and ring buffer push + // (previously two separate Mutex acquisitions per frame) + let mut pipeline = sender.pipeline.lock(); + // Destructure to allow simultaneous borrows of resampler and producer + let AudioPipeline { + ref mut resampler, + ref mut producer, + } = *pipeline; + let rs = resampler; + + let f32_samples_len; + + if sample_rate != DISCORD_SAMPLE_RATE { + // Convert i16 to f64 for rubato, reusing pre-allocated buffer + rs.input_f64.clear(); + rs.input_f64 + .extend(samples.iter().map(|&s| s as f64 / 32768.0)); + + let input_len = rs.input_f64.len(); + + // Process through sinc resampler (maintains state across calls) + // rubato 1.0 uses audioadapter traits - wrap our mono Vec in a sequential slice of vecs + let input_channels = vec![std::mem::take(&mut rs.input_f64)]; + let input_adapter = match SequentialSliceOfVecs::new(&input_channels, 1, input_len) { + Ok(a) => a, + Err(e) => { + warn!( + "Failed to create input adapter for channel {}: {:?}", + channel_id, e + ); + let resampled_i16 = resample_audio(samples, sample_rate, DISCORD_SAMPLE_RATE); + rs.stereo_f32.clear(); + for &s in &resampled_i16 { + let f = s as f32 / 32768.0; + rs.stereo_f32.push(f); + rs.stereo_f32.push(f); + } + f32_samples_len = rs.stereo_f32.len(); + let ring_slots = producer.slots(); + if ring_slots >= f32_samples_len { + if let Ok(mut chunk) = producer.write_chunk(f32_samples_len) { + let (first, second) = chunk.as_mut_slices(); + let first_len = first.len(); + first.copy_from_slice(&rs.stereo_f32[..first_len]); + if !second.is_empty() { + second.copy_from_slice(&rs.stereo_f32[first_len..]); + } + chunk.commit_all(); + } + sender.consecutive_overflows.store(0, Ordering::Relaxed); + } + return !rs.stereo_f32.is_empty(); + } + }; + match rs.resampler.process(&input_adapter, 0, None) { + Ok(output_buffer) => { + let out_frames = output_buffer.frames(); + let out_channels = output_buffer.channels(); + if out_frames == 0 { + // Resampler buffering - send silence to keep timing + if count.is_multiple_of(50) { + warn!( + "Resampler returned empty output (buffering?) input={}", + input_len + ); + } + rs.stereo_f32.clear(); + rs.stereo_f32.resize(1920, 0.0f32); // 20ms of stereo silence at 48kHz + } else { + // Extract the mono channel data from the interleaved buffer + let data = output_buffer.take_data(); + let output_mono_len; + // Convert mono f64 to stereo f32, reusing pre-allocated buffer + rs.stereo_f32.clear(); + if out_channels == 1 { + output_mono_len = data.len(); + for sample in &data { + let s = *sample as f32; + rs.stereo_f32.push(s); + rs.stereo_f32.push(s); + } + } else { + // Extract first channel from interleaved data + output_mono_len = data.len() / out_channels; + for sample in data.iter().step_by(out_channels) { + let s = *sample as f32; + rs.stereo_f32.push(s); + rs.stereo_f32.push(s); + } + } + // Log resampler input/output ratio + if count.is_multiple_of(50) { + debug!( + "Resampler: input={} samples, output={} samples (ratio={:.2}, expected=3.0)", + input_len, output_mono_len, output_mono_len as f64 / input_len as f64 + ); + debug!( + "SIP→Discord #{}: mono_out={}, stereo_out={} samples ({} bytes as f32)", + count, + output_mono_len, + rs.stereo_f32.len(), + rs.stereo_f32.len() * 4 + ); + } + } + } + Err(e) => { + warn!( + "Resampler error for channel {}: {:?} (falling back to linear)", + channel_id, e + ); + // Fallback to simple linear interpolation, reusing buffer + let resampled_i16 = resample_audio(samples, sample_rate, DISCORD_SAMPLE_RATE); + rs.stereo_f32.clear(); + for &s in &resampled_i16 { + let f = s as f32 / 32768.0; + rs.stereo_f32.push(f); + rs.stereo_f32.push(f); + } + } + } + } else { + // Already at 48kHz - just convert to stereo f32, reusing buffer + rs.stereo_f32.clear(); + for &sample in samples { + let s = sample as f32 / 32768.0; + rs.stereo_f32.push(s); + rs.stereo_f32.push(s); + } + } + + f32_samples_len = rs.stereo_f32.len(); + + // Push samples to the ring buffer (same lock, no second acquisition) + let ring_slots = producer.slots(); + let samples_to_push = f32_samples_len; + + // Log every 50 packets (1 second at 20ms/packet) + if count.is_multiple_of(50) { + let ring_total = voice::ring_buffer_samples(); + let buffer_fill = ring_total - ring_slots; + let fill_ms = buffer_fill as f64 / 48000.0 / 2.0 * 1000.0; + debug!( + "SIP→Discord direct #{}: channel={}, pushing {} samples, ring buffer: {}/{} ({:.0}ms), input_amp={}", + count, channel_id, samples_to_push, buffer_fill, ring_total, fill_ms, input_max_amp + ); + } + + if ring_slots >= samples_to_push { + // Enough space - push all samples + if let Ok(mut chunk) = producer.write_chunk(samples_to_push) { + let (first, second) = chunk.as_mut_slices(); + let first_len = first.len(); + first.copy_from_slice(&rs.stereo_f32[..first_len]); + if !second.is_empty() { + second.copy_from_slice(&rs.stereo_f32[first_len..]); + } + chunk.commit_all(); + } + sender.consecutive_overflows.store(0, Ordering::Relaxed); + } else { + // Ring buffer full - drop samples (overflow) + let consecutive = sender.consecutive_overflows.fetch_add(1, Ordering::Relaxed) + 1; + if consecutive <= 10 || consecutive % 50 == 0 { + warn!( + "Ring buffer overflow for channel {} (consecutive: {}, need {} slots, have {})", + channel_id, consecutive, samples_to_push, ring_slots + ); + } + } + + true +} + +fn silence_threshold() -> i16 { + use std::sync::OnceLock; + static CACHED: OnceLock = OnceLock::new(); + *CACHED.get_or_init(|| crate::config::AppConfig::audio().vad_silence_threshold) +} + +pub use voice::{resample_audio, resample_audio_into, StreamingAudioSource, DISCORD_SAMPLE_RATE}; + +/// Events emitted by the Discord module +#[derive(Debug, Clone)] +pub enum DiscordEvent { + /// Successfully connected to a voice channel + VoiceConnected { + bridge_id: String, + guild_id: Snowflake, + channel_id: Snowflake, + }, + /// Disconnected from voice channel + VoiceDisconnected { bridge_id: String }, +} + +/// Shared Discord client that maintains a single gateway connection. +/// +/// Instead of creating a new Serenity Client per SIP call (which opens a new +/// gateway WebSocket each time), we create ONE client at startup and reuse its +/// Songbird manager to join/leave voice channels. This reduces gateway connections +/// from N-per-call to exactly 1. +pub struct SharedDiscordClient { + songbird: Arc, + bot_user_id: AtomicU64, + _client_handle: tokio::task::JoinHandle<()>, +} + +impl SharedDiscordClient { + /// Create the shared Discord client. Call once at bridge startup. + /// + /// This opens a single gateway WebSocket connection that stays alive for + /// the bridge's lifetime. The returned Songbird manager is used by all + /// voice connections to join/leave channels. + pub async fn new(bot_token: &str) -> Result> { + info!("Creating shared Discord client (single gateway connection)"); + + let intents = GatewayIntents::GUILDS | GatewayIntents::GUILD_VOICE_STATES; + + let songbird_config = Config::default().decode_mode(DecodeMode::Decode(Default::default())); + let songbird = Songbird::serenity_from_config(songbird_config); + + let (ready_tx, ready_rx) = oneshot::channel::(); + let ready_tx = Arc::new(tokio::sync::Mutex::new(Some(ready_tx))); + + let token: Token = bot_token + .parse() + .map_err(|e| anyhow::anyhow!("Invalid bot token: {}", e))?; + + let mut client = Client::builder(token, intents) + .event_handler(Arc::new(SharedClientEventHandler { ready_tx })) + .voice_manager(songbird.clone()) + .await?; + + let client_handle = tokio::spawn(async move { + if let Err(e) = client.start().await { + error!("Shared Discord client error: {}", e); + } + }); + + // Wait for gateway Ready event to get the bot's user ID + let bot_user_id = + match tokio::time::timeout(std::time::Duration::from_secs(15), ready_rx).await { + Ok(Ok(id)) => { + info!("Shared Discord client ready, bot user ID: {}", id); + id + } + _ => { + error!( + "Failed to get bot user ID from shared client, feedback filtering may not work" + ); + 0 + } + }; + + // Let gateway stabilize + tokio::time::sleep(std::time::Duration::from_millis(500)).await; + + Ok(Arc::new(Self { + songbird, + bot_user_id: AtomicU64::new(bot_user_id), + _client_handle: client_handle, + })) + } + + /// Get the shared Songbird manager for joining/leaving voice channels. + pub fn songbird(&self) -> &Arc { + &self.songbird + } + + /// Get the bot's user ID (for filtering own audio in VoiceTick). + pub fn bot_user_id(&self) -> Snowflake { + Snowflake::new(self.bot_user_id.load(Ordering::Relaxed)) + } +} + +/// Serenity event handler for the shared client +struct SharedClientEventHandler { + ready_tx: Arc>>>, +} + +#[async_trait] +impl EventHandler for SharedClientEventHandler { + async fn dispatch(&self, _ctx: &Context, event: &FullEvent) { + if let FullEvent::Ready { data_about_bot, .. } = event { + info!( + "Shared Discord bot connected as {} (ID: {})", + data_about_bot.user.name, data_about_bot.user.id + ); + if let Some(tx) = self.ready_tx.lock().await.take() { + let _ = tx.send(data_about_bot.user.id.get()); + } + } + } +} + +/// Inner state for Discord voice connection +struct DiscordVoiceConnectionInner { + bridge_id: String, + guild_id: Snowflake, + channel_id: Snowflake, + songbird: Arc, + event_tx: Sender, + /// Health tracking: timestamp (ms since epoch) of last audio received from Discord (VoiceTick) + last_audio_received: Arc, + /// RAII guard: auto-unregisters the audio sender on drop + _audio_sender: RegisteredAudioSender, + /// Shared flag to deactivate VoiceReceiver handlers on disconnect + voice_receiver_active: Arc, + /// Set by VoiceReceiver when an unexpected DriverDisconnect event fires. + /// Checked by is_healthy() so the health check can react immediately. + driver_disconnected: Arc, +} + +/// A voice connection to a single Discord voice channel. +/// +/// Uses the shared Discord client's Songbird manager to join/leave channels +/// without creating new gateway connections. Each connection manages its own +/// audio pipeline (ring buffer, resampler, event handlers). +/// +/// This type is Clone-able (uses Arc internally) to allow sharing across async tasks. +#[derive(Clone)] +pub struct DiscordVoiceConnection { + inner: Arc, +} + +impl DiscordVoiceConnection { + /// Join a Discord voice channel using the shared client's Songbird manager. + /// + /// This does NOT create a new gateway connection — it reuses the single + /// shared client established at startup. Only the voice channel join/leave + /// is per-call. + pub async fn connect( + bridge_id: String, + shared_client: &Arc, + guild_id: Snowflake, + channel_id: Snowflake, + event_tx: Sender, + health_check_notify: Arc, + ) -> Result { + info!( + "Joining voice channel {} in guild {} for bridge {} (using shared client)", + channel_id, guild_id, bridge_id + ); + + let songbird = shared_client.songbird().clone(); + let bot_user_id = shared_client.bot_user_id(); + + // Join the voice channel with retry logic + let guild = GuildId::new(*guild_id); + let channel = ChannelId::new(*channel_id); + + let bridge_cfg = crate::config::AppConfig::bridge(); + let max_retries = bridge_cfg.voice_join_max_retries; + let retry_delay_secs = bridge_cfg.voice_join_retry_delay_secs; + + let mut last_error = None; + for attempt in 1..=max_retries { + if attempt > 1 { + info!( + "Retry attempt {} for joining voice channel {} (bridge {})", + attempt, channel_id, bridge_id + ); + } + + match songbird.join(guild, channel).await { + Ok(handler_lock) => { + info!( + "Joined voice channel {} in guild {} for bridge {}{}", + channel_id, + guild_id, + bridge_id, + if attempt > 1 { + format!(" (attempt {})", attempt) + } else { + String::new() + } + ); + + // Create the streaming audio source with ring buffer for sending audio to Discord + let (streaming_source, producer) = StreamingAudioSource::new(); + + // Register the ring buffer producer for direct SIP→Discord audio path + // This allows the pjsua audio thread to bypass tokio entirely + let audio_sender = RegisteredAudioSender::new(channel_id, producer); + + // Create shared timestamp for health tracking + let now_ms = SystemTime::now() + .duration_since(UNIX_EPOCH) + .unwrap() + .as_millis() as u64; + let last_audio_received = Arc::new(AtomicU64::new(now_ms)); + + // Set up audio receiver for incoming Discord voice + // and start the streaming audio source for outgoing audio + let voice_receiver_active = Arc::new(AtomicBool::new(true)); + let driver_disconnected = Arc::new(AtomicBool::new(false)); + { + let mut handler = handler_lock.lock().await; + + // CRITICAL: Clear any stale event handlers from previous bridges + // that may have accumulated on this guild's Call handler. + // Without this, each connect() adds 5 more handlers that never + // get removed, causing N duplicate audio processing per VoiceTick. + handler.remove_all_global_events(); + + // Register for VoiceTick events (decoded audio every 20ms) + // Also register for SpeakingStateUpdate to track SSRC-to-user mappings + // And driver events to monitor connection health + let receiver = VoiceReceiver::new( + bridge_id.clone(), + channel_id, + bot_user_id, + last_audio_received.clone(), + voice_receiver_active.clone(), + driver_disconnected.clone(), + health_check_notify, + ); + handler.add_global_event( + Event::Core(CoreEvent::SpeakingStateUpdate), + receiver.clone(), + ); + handler + .add_global_event(Event::Core(CoreEvent::VoiceTick), receiver.clone()); + handler.add_global_event( + Event::Core(CoreEvent::DriverConnect), + receiver.clone(), + ); + handler.add_global_event( + Event::Core(CoreEvent::DriverDisconnect), + receiver.clone(), + ); + handler.add_global_event(Event::Core(CoreEvent::DriverReconnect), receiver); + + // Start playing the streaming audio source immediately + // Track stays playing so Songbird always reads from the queue, + // preventing overflow. VAD filters which frames we push to the queue. + let input = streaming_source.into_input(); + let track_handle = handler.play_input(input); + + // Register track event handlers to monitor playback state + // This helps diagnose why Songbird might stop consuming audio + let track_handler = TrackEventHandler { + bridge_id: bridge_id.clone(), + }; + // Listen for track state changes (only End and Error are concerning) + track_handle + .add_event(Event::Track(TrackEvent::Play), track_handler.clone()) + .ok(); + track_handle + .add_event(Event::Track(TrackEvent::Pause), track_handler.clone()) + .ok(); + track_handle + .add_event(Event::Track(TrackEvent::End), track_handler.clone()) + .ok(); + track_handle + .add_event(Event::Track(TrackEvent::Error), track_handler) + .ok(); + + // Track stays playing - never pause it to avoid buffer underruns. + // Songbird needs to continuously read from the queue. + info!("Started streaming audio source for bridge {}", bridge_id); + + let _ = event_tx.send(DiscordEvent::VoiceConnected { + bridge_id: bridge_id.clone(), + guild_id, + channel_id, + }); + + // We don't need the track_handle anymore - track always plays + drop(track_handle); + + return Ok(Self { + inner: Arc::new(DiscordVoiceConnectionInner { + bridge_id, + guild_id, + channel_id, + songbird, + event_tx, + last_audio_received, + _audio_sender: audio_sender, + voice_receiver_active, + driver_disconnected, + }), + }); + } + } + Err(e) => { + error!( + "Failed to join voice channel (attempt {}/{}): {:?}", + attempt, max_retries, e + ); + last_error = Some(e); + + if attempt < max_retries { + info!( + "Waiting {} seconds before retry for bridge {}", + retry_delay_secs, bridge_id + ); + tokio::time::sleep(std::time::Duration::from_secs(retry_delay_secs)).await; + } + } + } + } + + // All retries failed + anyhow::bail!( + "Failed to join voice channel after {} attempts: {:?}", + max_retries, + last_error + ) + } + + /// Send audio to the Discord voice channel + /// + /// The samples should be PCM i16 at the given sample_rate (mono). + /// This function handles resampling to Discord's 48kHz stereo format. + /// Implements VAD (Voice Activity Detection) to only send audio when speech is detected. + /// Note: This is synchronous to minimize latency - no async overhead. + /// Check if the Discord connection is healthy. + /// + /// Returns true if VoiceTick events have been received within the last 5 seconds. + /// This indicates that Songbird is actively processing audio from Discord. + pub fn is_healthy(&self) -> bool { + // Immediate fail if the Songbird driver disconnected unexpectedly + if self.inner.driver_disconnected.load(Ordering::Relaxed) { + return false; + } + + let now_ms = SystemTime::now() + .duration_since(UNIX_EPOCH) + .unwrap() + .as_millis() as u64; + + let last_recv = self.inner.last_audio_received.load(Ordering::Relaxed); + let recv_age_ms = now_ms.saturating_sub(last_recv); + + // Consider unhealthy if no VoiceTick for 5 seconds + recv_age_ms < 5000 + } + + /// Get the current audio ring buffer fill percentage (0-100). + /// + /// High values (>80%) indicate backpressure - Discord consumer is falling behind. + pub fn queue_fill_percent(&self) -> u8 { + // Read from the direct audio sender registry + get_audio_senders() + .get(&self.inner.channel_id) + .map(|s| { + let pipeline = s.pipeline.lock(); + let slots_free = pipeline.producer.slots(); + let total = voice::ring_buffer_samples(); + let filled = total.saturating_sub(slots_free); + ((filled * 100) / total).min(100) as u8 + }) + .unwrap_or(0) + } + + /// Get the number of consecutive overflow errors. + /// + /// High values indicate the Discord audio consumer has stopped reading. + pub fn consecutive_overflows(&self) -> u64 { + // Read from the direct audio sender registry + get_audio_senders() + .get(&self.inner.channel_id) + .map(|s| s.consecutive_overflows.load(Ordering::Relaxed)) + .unwrap_or(0) + } + + /// Get the bridge ID for this connection. + pub fn bridge_id(&self) -> &str { + &self.inner.bridge_id + } + + /// Leave the voice channel and disconnect. + /// + /// This only leaves the voice channel — it does NOT shut down the shared + /// Discord client, which stays alive for other connections. + pub async fn disconnect(self) { + info!("Disconnecting bridge {} from Discord", self.inner.bridge_id); + + // Deactivate the VoiceReceiver to prevent stale event processing. + // This is a safety net: even if remove_all_global_events misses something + // (e.g. race with reconnect), the old handler becomes a no-op. + self.inner + .voice_receiver_active + .store(false, Ordering::Relaxed); + + // Audio sender is auto-unregistered when DiscordVoiceConnectionInner is dropped + + let guild = GuildId::new(*self.inner.guild_id); + + // Stop all tracks and clear event handlers before leaving. + // This ensures old StreamingAudioSource instances stop being polled + // and no stale VoiceReceiver handlers survive on the Call handler. + if let Some(handler_lock) = self.inner.songbird.get(guild) { + let mut handler = handler_lock.lock().await; + handler.remove_all_global_events(); + handler.stop(); + } + + let _ = self.inner.songbird.leave(guild).await; + + // Small delay to let Songbird fully release resources before any reconnection + tokio::time::sleep(tokio::time::Duration::from_millis(100)).await; + + let _ = self.inner.event_tx.send(DiscordEvent::VoiceDisconnected { + bridge_id: self.inner.bridge_id.clone(), + }); + } +} + +/// Track event handler to monitor audio playback state +/// This helps diagnose why Songbird might stop consuming audio +#[derive(Clone)] +struct TrackEventHandler { + bridge_id: String, +} + +#[async_trait] +impl VoiceEventHandler for TrackEventHandler { + async fn act(&self, ctx: &EventContext<'_>) -> Option { + if let EventContext::Track(track_list) = ctx { + for (state, _handle) in track_list.iter() { + // Only log concerning states at warn/error level + match state.playing { + PlayMode::Stop => { + error!( + "TRACK STOPPED for bridge {} - this will cause queue overflow!", + self.bridge_id + ); + } + PlayMode::End => { + error!( + "TRACK ENDED for bridge {} - this will cause queue overflow!", + self.bridge_id + ); + } + PlayMode::Play | PlayMode::Pause => { + // Normal state changes - log at trace level + trace!( + "Track event for bridge {}: mode={:?}, position={:?}", + self.bridge_id, + state.playing, + state.position, + ); + } + _ => { + trace!( + "Track event for bridge {}: mode={:?}", + self.bridge_id, + state.playing, + ); + } + } + } + } + None + } +} + +/// Pre-allocated buffers for audio mixing to avoid per-tick allocations +struct MixingBuffer { + /// Mixed audio in i32 for headroom (1920 samples = 20ms @ 48kHz stereo) + mixed: Vec, + /// Stereo output after clamping to i16 (1920 samples) + stereo_out: Vec, + /// Mono output for SIP (960 samples = 20ms @ 48kHz mono) + mono_out: Vec, + /// Pre-allocated buffer for 48kHz→16kHz resampled output (avoids per-tick Vec allocation) + resample_buf: Vec, +} + +impl MixingBuffer { + fn new() -> Self { + Self { + mixed: vec![0i32; 1920], + stereo_out: vec![0i16; 1920], + mono_out: vec![0i16; 960], + // 960 mono samples at 48kHz → ~320 at 16kHz (ratio 3:1) + resample_buf: Vec::with_capacity(960), + } + } +} + +/// Voice event receiver for capturing audio +#[derive(Clone)] +struct VoiceReceiver { + bridge_id: String, + /// Discord channel ID for direct ring buffer writes + channel_id: Snowflake, + /// The bot's own user ID - used to filter out our own audio from VoiceTick + bot_user_id: Snowflake, + /// Map from SSRC to user ID - populated from SpeakingStateUpdate events + ssrc_to_user: Arc>>, + /// Shared timestamp for health tracking - updated when audio is received + last_audio_received: Arc, + /// Pre-allocated mixing buffers to avoid allocations in hot path + mixing_buffer: Arc>, + /// Safety flag: set to false on disconnect to make stale handlers no-op. + /// Prevents accumulated handlers from processing audio after their bridge disconnects. + active: Arc, + /// Set when an unexpected DriverDisconnect fires, so is_healthy() returns false immediately. + driver_disconnected: Arc, + /// Notify the health check loop to wake up immediately on driver disconnect. + health_check_notify: Arc, +} + +impl VoiceReceiver { + fn new( + bridge_id: String, + channel_id: Snowflake, + bot_user_id: Snowflake, + last_audio_received: Arc, + active: Arc, + driver_disconnected: Arc, + health_check_notify: Arc, + ) -> Self { + Self { + bridge_id, + channel_id, + bot_user_id, + ssrc_to_user: Arc::new(Mutex::new(HashMap::new())), + last_audio_received, + mixing_buffer: Arc::new(Mutex::new(MixingBuffer::new())), + active, + driver_disconnected, + health_check_notify, + } + } +} + +#[async_trait] +impl VoiceEventHandler for VoiceReceiver { + async fn act(&self, ctx: &EventContext<'_>) -> Option { + // Safety net: if this receiver has been deactivated (bridge disconnected), + // skip all processing to prevent stale handlers from corrupting audio. + if !self.active.load(Ordering::Relaxed) { + return None; + } + + match ctx { + EventContext::SpeakingStateUpdate(speaking) => { + // Track SSRC-to-user mappings for filtering out bot's own audio + if let Some(user_id) = speaking.user_id { + let user_id_snowflake = Snowflake::new(user_id.0); + let mut map = self.ssrc_to_user.lock(); + map.insert(speaking.ssrc, user_id_snowflake); + if user_id_snowflake == self.bot_user_id { + debug!( + "Recorded bot's own SSRC {} for bridge {}", + speaking.ssrc, self.bridge_id + ); + } else { + trace!( + "Recorded SSRC {} -> user {} for bridge {}", + speaking.ssrc, + user_id_snowflake, + self.bridge_id + ); + } + } + debug!("Speaking state update: {:?}", speaking); + } + EventContext::DriverConnect(info) => { + info!( + "Songbird DRIVER CONNECTED for bridge {}: channel={:?}, ssrc={:?}, session_id={:?}", + self.bridge_id, info.channel_id, info.ssrc, info.session_id + ); + } + EventContext::DriverDisconnect(info) => { + // Check if this was a requested disconnect (normal) or unexpected + let is_requested = info + .reason + .as_ref() + .map(|r| format!("{:?}", r).contains("Requested")) + .unwrap_or(false); + if is_requested { + debug!( + "Songbird driver disconnected (requested) for bridge {}: channel={:?}", + self.bridge_id, info.channel_id + ); + } else { + // Unexpected disconnect - this is a problem! + error!( + "Songbird DRIVER DISCONNECTED unexpectedly for bridge {}: channel={:?}, reason={:?}", + self.bridge_id, info.channel_id, info.reason + ); + // Signal unhealthy immediately so the health check can react + // within ~1s instead of waiting for the next 5s tick. + self.driver_disconnected.store(true, Ordering::Relaxed); + self.health_check_notify.notify_one(); + } + } + EventContext::DriverReconnect(info) => { + warn!( + "Songbird DRIVER RECONNECTING for bridge {}: channel={:?}, ssrc={:?}", + self.bridge_id, info.channel_id, info.ssrc + ); + } + EventContext::VoiceTick(tick) => { + static TICK_COUNT: AtomicU64 = AtomicU64::new(0); + let count = TICK_COUNT.fetch_add(1, Ordering::Relaxed); + + // Update health tracking timestamp - VoiceTick arriving means Discord is alive + let now_ms = SystemTime::now() + .duration_since(UNIX_EPOCH) + .unwrap() + .as_millis() as u64; + self.last_audio_received.store(now_ms, Ordering::Relaxed); + + // Log every 250 ticks (5 seconds at 20ms per tick) + let should_log = count.is_multiple_of(250); + + // Use try_lock to avoid blocking on the event loop - skip tick if contended + let ssrc_map = self.ssrc_to_user.try_lock(); + + // Try to get mixing buffer - skip tick if contended (shouldn't happen normally) + let mut mixing_buf = match self.mixing_buffer.try_lock() { + Some(buf) => buf, + None => { + if should_log { + trace!("VoiceTick: Skipping tick due to mixing buffer contention"); + } + return None; + } + }; + + let speaker_count = tick.speaking.len(); + let silent_count = tick.silent.len(); + let mut skipped_self = false; + let mut has_audio = false; + let mut max_len: usize = 0; + + if should_log { + trace!( + "VoiceTick #{}: {} speaking, {} silent users", + count, + speaker_count, + silent_count + ); + } + + // Reset the mixing buffer for this tick + // Only clear as much as we'll use (optimization for fewer speakers) + let buffer_capacity = mixing_buf.mixed.len(); + + for (ssrc, voice_data) in tick.speaking.iter() { + // CRITICAL: Skip our own SSRC to prevent feedback loop + // When we send audio to Discord, it comes back in VoiceTick. + // If we don't filter it out, we get: SIP -> Discord -> SIP -> Discord -> ... + if let Some(ref map) = ssrc_map { + if let Some(&user_id) = map.get(ssrc) { + if user_id == self.bot_user_id { + skipped_self = true; + if should_log { + trace!( + "VoiceTick: Skipping bot's own SSRC {} to prevent feedback", + ssrc + ); + } + continue; + } + } + } + + if let Some(ref decoded) = voice_data.decoded_voice { + if decoded.is_empty() { + if should_log { + trace!("VoiceTick: SSRC {} has empty decoded_voice", ssrc); + } + continue; + } + + if should_log || count < 10 { + trace!( + "VoiceTick: SSRC {} has {} decoded samples", + ssrc, + decoded.len() + ); + } + + let len = decoded.len().min(buffer_capacity); + + if !has_audio { + // First speaker - widen i16 to i32 using SIMD + simd::widen_i16_to_i32(&decoded[..len], &mut mixing_buf.mixed[..len]); + max_len = len; + has_audio = true; + } else { + // Mix in additional speakers using SIMD accumulate + let mix_len = len.min(max_len); + simd::accumulate_i16_to_i32( + &decoded[..mix_len], + &mut mixing_buf.mixed[..mix_len], + ); + // Handle case where this speaker has more samples + if len > max_len { + simd::widen_i16_to_i32( + &decoded[max_len..len], + &mut mixing_buf.mixed[max_len..len], + ); + max_len = len; + } + } + } else if should_log { + trace!( + "VoiceTick: SSRC {} has no decoded_voice (decode mode not enabled?)", + ssrc + ); + } + } + + // Log when we filtered out our own audio + if skipped_self && should_log { + trace!("VoiceTick: Filtered out bot's own audio to prevent feedback loop"); + } + + // Diagnostic: Log when there are speakers but no decoded audio + // This helps identify when Discord is sending data but decode isn't working + let other_speaker_count = if skipped_self { + speaker_count.saturating_sub(1) + } else { + speaker_count + }; + if !has_audio && other_speaker_count > 0 { + // Count speakers without decoded audio + static NO_DECODE_COUNT: AtomicU64 = AtomicU64::new(0); + let no_decode = NO_DECODE_COUNT.fetch_add(1, Ordering::Relaxed) + 1; + if no_decode <= 10 || no_decode.is_multiple_of(50) { + warn!( + "VoiceTick #{}: {} speakers but no decoded audio! (no_decode_count={})", + count, other_speaker_count, no_decode + ); + } + } + + // If we have audio, convert and send it using pre-allocated buffers + if has_audio && max_len > 0 { + // Destructure to allow simultaneous borrows of different fields + let MixingBuffer { + ref mixed, + ref mut stereo_out, + ref mut mono_out, + ref mut resample_buf, + } = *mixing_buf; + + // Convert i32 -> i16 with saturation using SIMD + let stereo_len = max_len.min(stereo_out.len()); + simd::clamp_i32_to_i16(&mixed[..stereo_len], &mut stereo_out[..stereo_len]); + + // Convert stereo to mono for SIP using SIMD + let mono_len = (stereo_len / 2).min(mono_out.len()); + simd::stereo_to_mono_i16(&stereo_out[..stereo_len], &mut mono_out[..mono_len]); + + // Check max amplitude for VAD using SIMD + let max_amp = simd::max_abs_i16(&mono_out[..mono_len]); + + // VAD: Only send audio if it's above the silence threshold + // This prevents feedback loops and reduces unnecessary traffic + // Use same threshold as SIP→Discord for consistency + if max_amp < silence_threshold() { + if should_log { + trace!( + "VoiceTick: VAD filtering silence (max_amp={} < threshold={})", + max_amp, + silence_threshold() + ); + } + } else if mono_len > 0 { + trace!( + "VoiceTick: {} speakers, {} mono samples, max amp: {}", + speaker_count, + mono_len, + max_amp + ); + + // Direct ring buffer path: resample 48kHz→16kHz and write to ring buffer + // This bypasses the entire tokio async round-trip through call/mod.rs + // Uses pre-allocated resample_buf to avoid per-tick Vec allocation + resample_audio_into( + &mono_out[..mono_len], + DISCORD_SAMPLE_RATE, + 16000, // CONF_SAMPLE_RATE + resample_buf, + ); + if !resample_buf.is_empty() { + write_discord_to_sip(self.channel_id, resample_buf); + } + } + } + } + _ => {} + } + None + } +} diff --git a/sipcord-bridge/src/transport/discord/voice.rs b/sipcord-bridge/src/transport/discord/voice.rs new file mode 100644 index 0000000..f1ed71f --- /dev/null +++ b/sipcord-bridge/src/transport/discord/voice.rs @@ -0,0 +1,239 @@ +//! Voice/audio utilities for Discord + +use parking_lot::Mutex; +use rtrb::Consumer; +use songbird::input::{Input, RawAdapter}; +use std::io::{Read, Seek, SeekFrom}; +use std::sync::atomic::{AtomicBool, AtomicU64, Ordering}; +use symphonia_core::io::MediaSource; + +/// Discord expects 48kHz stereo audio +pub const DISCORD_SAMPLE_RATE: u32 = 48000; +const DISCORD_CHANNELS: u16 = 2; + +/// Ring buffer capacity in samples (f32 stereo pairs) +pub fn ring_buffer_samples() -> usize { + use std::sync::OnceLock; + static CACHED: OnceLock = OnceLock::new(); + *CACHED.get_or_init(|| crate::config::AppConfig::audio().ring_buffer_samples) +} + +/// Pre-buffer threshold in samples before we start outputting +fn pre_buffer_samples() -> usize { + use std::sync::OnceLock; + static CACHED: OnceLock = OnceLock::new(); + *CACHED.get_or_init(|| crate::config::AppConfig::audio().pre_buffer_samples) +} + +/// Resample audio from one sample rate to another using linear interpolation. +/// This is a fallback - prefer using the rubato sinc resampler for quality. +pub fn resample_audio(samples: &[i16], from_rate: u32, to_rate: u32) -> Vec { + let mut output = Vec::new(); + resample_audio_into(samples, from_rate, to_rate, &mut output); + output +} + +/// Resample audio into a pre-allocated buffer (avoids per-call Vec allocation). +/// Clears `output` and fills it with resampled data. +pub fn resample_audio_into(samples: &[i16], from_rate: u32, to_rate: u32, output: &mut Vec) { + output.clear(); + + if from_rate == to_rate { + output.extend_from_slice(samples); + return; + } + + if samples.is_empty() { + return; + } + + let ratio = from_rate as f64 / to_rate as f64; + let output_len = ((samples.len() as f64) / ratio).ceil() as usize; + output.reserve(output_len.saturating_sub(output.capacity())); + + for i in 0..output_len { + let src_pos = i as f64 * ratio; + let src_idx = src_pos as usize; + let frac = src_pos - src_idx as f64; + + let sample = if src_idx + 1 < samples.len() { + let s0 = samples[src_idx] as f64; + let s1 = samples[src_idx + 1] as f64; + (s0 + (s1 - s0) * frac) as i16 + } else if src_idx < samples.len() { + samples[src_idx] + } else { + 0 + }; + + output.push(sample); + } +} + +/// Streaming audio source using a lock-free ring buffer. +/// +/// This implements Symphonia's MediaSource trait and provides raw f32 PCM data. +/// Uses rtrb for lock-free, wait-free audio streaming - no spinning or blocking. +/// +/// The Mutex around Consumer is required to satisfy MediaSource's Sync bound, +/// but since only one thread (Songbird's audio thread) ever accesses it, +/// there's never any contention - it's essentially just satisfying the type system. +pub struct StreamingAudioSource { + /// Ring buffer consumer for audio samples (f32) + /// Wrapped in Mutex to satisfy Sync bound (no actual contention) + consumer: Mutex>, + /// Read count for logging (atomic — single reader, no contention) + read_count: AtomicU64, + /// Whether we've pre-buffered enough to start output (atomic — single reader) + pre_buffered: AtomicBool, + /// Underrun count for diagnostics (atomic — single reader) + underrun_count: AtomicU64, +} + +impl StreamingAudioSource { + /// Create a new streaming audio source with ring buffer. + /// + /// Returns the source and the rtrb Producer to push audio samples. + /// Samples should be f32 interleaved stereo at 48kHz, normalized to [-1.0, 1.0]. + pub fn new() -> (Self, rtrb::Producer) { + let (producer, consumer) = rtrb::RingBuffer::new(ring_buffer_samples()); + + ( + Self { + consumer: Mutex::new(consumer), + read_count: AtomicU64::new(0), + pre_buffered: AtomicBool::new(false), + underrun_count: AtomicU64::new(0), + }, + producer, + ) + } + + /// Create a Songbird Input from this streaming source + pub fn into_input(self) -> Input { + RawAdapter::new(self, DISCORD_SAMPLE_RATE, DISCORD_CHANNELS as u32).into() + } +} + +impl Read for StreamingAudioSource { + fn read(&mut self, buf: &mut [u8]) -> std::io::Result { + let count = self.read_count.fetch_add(1, Ordering::Relaxed) + 1; + + // How many f32 samples can we fit in the output buffer? + let samples_requested = buf.len() / 4; // 4 bytes per f32 + + let mut consumer = self.consumer.lock(); + let samples_available = consumer.slots(); + + // Pre-buffering: wait until ring buffer has enough before starting + if !self.pre_buffered.load(Ordering::Relaxed) { + if samples_available >= pre_buffer_samples() { + self.pre_buffered.store(true, Ordering::Relaxed); + let ms_buffered = samples_available as f64 / 48000.0 / 2.0 * 1000.0; + tracing::info!( + "StreamingAudioSource: Pre-buffer complete ({} samples, {:.0}ms), starting output", + samples_available, ms_buffered + ); + } else { + // Still pre-buffering - return silence + if count.is_multiple_of(50) { + let ms_buffered = samples_available as f64 / 48000.0 / 2.0 * 1000.0; + let ms_target = pre_buffer_samples() as f64 / 48000.0 / 2.0 * 1000.0; + tracing::debug!( + "StreamingAudioSource: Pre-buffering {}/{} samples ({:.0}ms / {:.0}ms)", + samples_available, + pre_buffer_samples(), + ms_buffered, + ms_target + ); + } + buf.fill(0); + return Ok(buf.len()); + } + } + + // Log buffer status periodically + if count.is_multiple_of(50) { + let ms_buffered = samples_available as f64 / 48000.0 / 2.0 * 1000.0; + tracing::debug!( + "StreamingAudioSource #{}: ring buffer has {} samples ({:.1}ms), Songbird wants {} samples", + count, samples_available, ms_buffered, samples_requested + ); + } + + // Read as many samples as we can from ring buffer + let samples_to_read = samples_requested.min(samples_available); + + if samples_to_read == 0 { + // Buffer empty - underrun + let underruns = self.underrun_count.fetch_add(1, Ordering::Relaxed) + 1; + if underruns <= 5 || underruns.is_multiple_of(100) { + tracing::warn!( + "StreamingAudioSource: Ring buffer empty, filling with silence (underruns: {})", + underruns + ); + } + buf.fill(0); + return Ok(buf.len()); + } + + // Read samples from ring buffer directly into output buffer + let chunk = consumer.read_chunk(samples_to_read).unwrap(); + let (first, second) = chunk.as_slices(); + + // Bulk copy f32 samples as raw bytes (memcpy instead of per-sample loop) + let first_bytes = + unsafe { std::slice::from_raw_parts(first.as_ptr() as *const u8, first.len() * 4) }; + buf[..first_bytes.len()].copy_from_slice(first_bytes); + if !second.is_empty() { + let second_bytes = unsafe { + std::slice::from_raw_parts(second.as_ptr() as *const u8, second.len() * 4) + }; + buf[first_bytes.len()..first_bytes.len() + second_bytes.len()] + .copy_from_slice(second_bytes); + } + chunk.commit_all(); + + // Fill remainder with silence if we didn't have enough + let bytes_written = samples_to_read * 4; + if bytes_written < buf.len() { + buf[bytes_written..].fill(0); + if count.is_multiple_of(50) || count < 10 { + let silence_samples = (buf.len() - bytes_written) / 4; + tracing::debug!( + "StreamingAudioSource #{}: Partial read, filled {} samples with silence", + count, + silence_samples + ); + } + } + + Ok(buf.len()) + } +} + +impl Seek for StreamingAudioSource { + fn seek(&mut self, _pos: SeekFrom) -> std::io::Result { + // Live streams are not seekable + Err(std::io::Error::new( + std::io::ErrorKind::Unsupported, + "Live streams are not seekable", + )) + } +} + +impl MediaSource for StreamingAudioSource { + fn is_seekable(&self) -> bool { + false + } + + fn byte_len(&self) -> Option { + None // Unknown length for live streams + } +} + +impl Drop for StreamingAudioSource { + fn drop(&mut self) { + tracing::debug!("StreamingAudioSource dropped (call ending)"); + } +} diff --git a/sipcord-bridge/src/transport/mod.rs b/sipcord-bridge/src/transport/mod.rs new file mode 100644 index 0000000..c736288 --- /dev/null +++ b/sipcord-bridge/src/transport/mod.rs @@ -0,0 +1,2 @@ +pub mod discord; +pub mod sip; diff --git a/sipcord-bridge/src/transport/sip/audio_thread.rs b/sipcord-bridge/src/transport/sip/audio_thread.rs new file mode 100644 index 0000000..027e516 --- /dev/null +++ b/sipcord-bridge/src/transport/sip/audio_thread.rs @@ -0,0 +1,863 @@ +//! Audio processing thread and RTP activity tracking +//! +//! This module handles: +//! - Audio thread lifecycle (start/stop) +//! - Per-frame audio processing for SIP <-> Discord +//! - RTP inactivity timeout detection + +use super::channel_audio::{complete_pending_channel_registration, get_active_channels_into}; +use super::ffi::types::*; +use crate::audio::simd; +use crate::services::snowflake::Snowflake; +use crossbeam_channel::Sender; +use crossbeam_queue::SegQueue; +use parking_lot::Mutex; +use pjsua::*; +use std::mem::MaybeUninit; +use std::sync::atomic::{AtomicU64, Ordering}; +use std::time::Instant; + +/// Frame counter for when we first see active channels (for debug logging) +/// This is reset when the audio thread starts to prevent subtraction overflow +static FIRST_ACTIVE_CHANNEL_FRAME: AtomicU64 = AtomicU64::new(0); + +fn drain_queue(queue: &SegQueue, name: &str) { + let mut count = 0; + while queue.pop().is_some() { + count += 1; + } + if count > 0 { + tracing::warn!("Drained {} stale {} from previous audio thread", count, name); + } +} + +/// Start the audio processing thread +/// +/// This thread periodically: +/// - Gets audio frames from the conference (SIP -> callback) +/// - Puts audio frames to the conference (from AUDIO_OUT_BUFFERS -> SIP) +pub fn start_audio_thread() { + if AUDIO_THREAD_RUNNING.swap(true, Ordering::SeqCst) { + tracing::warn!("Audio thread already running"); + return; + } + + // Reset the "ready" flag - we'll set it after processing the first frame + AUDIO_THREAD_READY.store(false, Ordering::SeqCst); + + // Reset the first-active-channel frame counter to prevent subtraction overflow + // when the audio thread restarts with a new frame_count + FIRST_ACTIVE_CHANNEL_FRAME.store(0, Ordering::SeqCst); + + let handle = std::thread::spawn(|| { + // Catch any panics in the audio thread + let result = std::panic::catch_unwind(std::panic::AssertUnwindSafe(|| { + tracing::info!( + "Audio processing thread started [thread: {:?}]", + std::thread::current().id() + ); + + // Drain stale ops from previous audio thread lifecycle + drain_queue(&PENDING_PJSUA_OPS, "PENDING_PJSUA_OPS"); + drain_queue(&PENDING_CONF_CONNECTIONS, "PENDING_CONF_CONNECTIONS"); + drain_queue(&PENDING_CHANNEL_COMPLETIONS, "PENDING_CHANNEL_COMPLETIONS"); + + // Register this thread with PJLIB so we can call PJSUA functions + // The thread descriptor must remain valid for the thread's lifetime + let mut thread_desc: pj_thread_desc = [0; 64]; + let mut thread_ptr: *mut pj_thread_t = std::ptr::null_mut(); + let thread_name = c"audio_thread"; + + unsafe { + let is_registered = pj_thread_is_registered(); + if is_registered == 0 { + let status = pj_thread_register( + thread_name.as_ptr(), + thread_desc.as_mut_ptr(), + &mut thread_ptr, + ); + if status != pj_constants__PJ_SUCCESS as i32 { + tracing::error!("Failed to register audio thread with PJLIB: {}", status); + return; + } + tracing::debug!("Audio thread registered with PJLIB successfully"); + } else { + tracing::debug!("Audio thread already registered with PJLIB"); + } + } + + // Allocate frame buffer (16-bit samples) + let frame_size_bytes = SAMPLES_PER_FRAME * 2; // 2 bytes per i16 sample + let mut frame_buffer: Vec = vec![0u8; frame_size_bytes]; + let mut timestamp: u64 = 0; + let mut frame_count: u64 = 0; + + let mut active_channels: Vec = Vec::with_capacity(32); + let mut drain_buf: Vec = vec![0i16; SAMPLES_PER_FRAME]; + let silence: Vec = vec![0i16; SAMPLES_PER_FRAME]; + + // Use deadline-based timing instead of duration-based timing. + // This prevents sleep overrun from accumulating frame after frame. + let frame_duration = std::time::Duration::from_millis(FRAME_PTIME_MS as u64); + let mut next_frame_deadline = Instant::now() + frame_duration; + + while AUDIO_THREAD_RUNNING.load(Ordering::SeqCst) { + let start = std::time::Instant::now(); + + // Process one frame + unsafe { + process_audio_frame( + &mut frame_buffer, + &mut timestamp, + &mut frame_count, + &mut active_channels, + &mut drain_buf, + &silence, + ); + } + + // After the first frame, mark audio thread as ready and process any pending + // channel registrations. This ensures the conference bridge is actively being + // clocked when we make connections via pjsua_conf_connect. + if frame_count == 1 { + AUDIO_THREAD_READY.store(true, Ordering::SeqCst); + tracing::debug!("Audio thread ready after first frame, processing pending channel completions"); + process_pending_channel_completions(); + } + + // Process any pending conference connections (must be done in audio thread + // to avoid conflicts with pjmedia_port_get_frame) + process_pending_conf_connections(frame_count); + + // Process any pending PJSUA operations (answer, hangup, play) + // These must run in the audio thread to avoid deadlocks with conf_connect/disconnect + process_pending_pjsua_ops(); + + // Track frame processing time for latency diagnostics + let processing_elapsed = start.elapsed(); + let processing_ms = processing_elapsed.as_secs_f64() * 1000.0; + + // Warn if processing took longer than frame time (20ms) - this causes audio crunch + if processing_ms > FRAME_PTIME_MS as f64 { + tracing::warn!( + "AUDIO OVERRUN: Frame #{} processing took {:.2}ms (>{}ms), audio will crunch!", + frame_count, processing_ms, FRAME_PTIME_MS + ); + } else if processing_ms > (FRAME_PTIME_MS as f64 * 0.8) { + // Warn if approaching the limit (>80% of frame time) + tracing::debug!( + "Audio frame #{} processing took {:.2}ms (approaching {}ms limit)", + frame_count, + processing_ms, + FRAME_PTIME_MS + ); + } + + // Log every 5 seconds (250 frames at 20ms each) that we're still alive + if frame_count.is_multiple_of(250) { + let call_ids: Vec = COUNTED_CALL_IDS + .get() + .map(|ids| ids.lock().iter().copied().collect()) + .unwrap_or_default(); + + tracing::debug!( + "Audio thread: frame #{}, active_calls={}, call_ids={:?}", + frame_count, + call_ids.len(), + call_ids + ); + } + + // Deadline-based sleep: sleep until the next frame deadline, not for a duration. + // This compensates for any sleep overrun on the next frame. + let now = Instant::now(); + if next_frame_deadline > now { + std::thread::sleep(next_frame_deadline - now); + } + // Advance deadline for next frame (even if we're behind, keep the cadence) + next_frame_deadline += frame_duration; + + // If we've fallen more than 5 frames behind (100ms), reset the deadline + // to avoid a burst of catch-up frames that would cause audio glitches + if next_frame_deadline + std::time::Duration::from_millis(100) < Instant::now() { + tracing::warn!( + "Audio thread fell behind by >100ms, resetting deadline (frame #{})", + frame_count + ); + next_frame_deadline = Instant::now() + frame_duration; + } + } + + tracing::debug!( + "Audio processing thread exiting - AUDIO_THREAD_RUNNING is false, frame_count={}", + frame_count + ); + })); + + if let Err(e) = result { + tracing::error!("AUDIO THREAD PANICKED: {:?}", e); + } + }); + + // Store the handle for joining later + let handle_storage = AUDIO_THREAD_HANDLE.get_or_init(|| Mutex::new(None)); + *handle_storage.lock() = Some(handle); +} + +/// Stop the audio processing thread +pub fn stop_audio_thread() { + let active_calls = COUNTED_CALL_IDS + .get() + .map(|ids| ids.lock().len()) + .unwrap_or(0); + tracing::debug!( + "Stopping audio thread (active_media_calls={}, was_running={})", + active_calls, + AUDIO_THREAD_RUNNING.load(Ordering::SeqCst) + ); + AUDIO_THREAD_RUNNING.store(false, Ordering::SeqCst); + AUDIO_THREAD_READY.store(false, Ordering::SeqCst); + + // Wait for the thread to stop with a bounded timeout. + // If the thread is blocked on a conference bridge lock, we don't want + // shutdown to hang indefinitely. The 2s force-exit timer in main.rs + // is a final backstop, but this avoids relying on a hard process exit. + if let Some(handle_storage) = AUDIO_THREAD_HANDLE.get() { + if let Some(handle) = handle_storage.lock().take() { + tracing::debug!("Joining audio thread (2s timeout)..."); + let (done_tx, done_rx) = std::sync::mpsc::channel(); + let join_thread = std::thread::spawn(move || { + let result = handle.join(); + let _ = done_tx.send(result); + }); + match done_rx.recv_timeout(std::time::Duration::from_secs(2)) { + Ok(Ok(())) => { + tracing::debug!("Audio thread joined successfully"); + } + Ok(Err(e)) => { + tracing::error!("Audio thread panicked: {:?}", e); + } + Err(_) => { + tracing::warn!("Audio thread join timed out after 2s, detaching"); + // Detach the join thread — the audio thread will be + // cleaned up by process exit + drop(join_thread); + } + } + } + } +} + +/// Process any pending channel registration completions +/// Called from the audio thread after it has processed its first frame +fn process_pending_channel_completions() { + let mut count = 0; + while let Some((call_id, conf_port)) = PENDING_CHANNEL_COMPLETIONS.pop() { + tracing::debug!( + "Completing deferred channel registration: call {} -> conf_port {}", + call_id, + conf_port + ); + complete_pending_channel_registration(call_id, conf_port); + count += 1; + } + + if count > 0 { + tracing::debug!("Processed {} pending channel completions", count); + } else { + tracing::debug!("No pending channel completions to process"); + } +} + +/// Process any pending conference connections +/// Called from the audio thread every frame to handle newly registered calls +fn process_pending_conf_connections(_frame_count: u64) { + use super::channel_audio::complete_conf_connections; + + let mut count = 0; + while let Some((call_id, channel_id)) = PENDING_CONF_CONNECTIONS.pop() { + tracing::debug!( + "Audio thread making conference connections: call {} -> channel {}", + call_id, + channel_id + ); + complete_conf_connections(call_id, channel_id); + count += 1; + } + + if count > 0 { + tracing::debug!( + "Audio thread processed {} pending conference connections", + count + ); + } +} + +/// Process any pending PJSUA operations +/// Called from the audio thread every frame to handle queued operations +/// that would deadlock if called from other threads during audio processing +fn is_call_valid(call_id: CallId) -> bool { + unsafe { + let mut ci = MaybeUninit::::uninit(); + let status = pjsua_call_get_info(*call_id, ci.as_mut_ptr()); + if status != pj_constants__PJ_SUCCESS as i32 { + return false; + } + let ci = ci.assume_init(); + ci.state != pjsip_inv_state_PJSIP_INV_STATE_DISCONNECTED + } +} + +fn process_pending_pjsua_ops() { + use super::ffi::direct_player::play_audio_to_call_direct_internal; + use super::ffi::streaming_player::start_streaming_to_call; + + let mut count = 0; + while let Some(op) = PENDING_PJSUA_OPS.pop() { + // Validate that the call still exists before processing the op + let call_id = match &op { + PendingPjsuaOp::PlayDirect { call_id, .. } => Some(*call_id), + PendingPjsuaOp::StartLoop { call_id, .. } => Some(*call_id), + PendingPjsuaOp::StartStreaming { call_id, .. } => Some(*call_id), + PendingPjsuaOp::StartTestTone { call_id } => Some(*call_id), + PendingPjsuaOp::Hangup { call_id } => Some(*call_id), + PendingPjsuaOp::ConnectFaxPort { call_id, .. } => Some(*call_id), + }; + if let Some(cid) = call_id { + if !is_call_valid(cid) { + tracing::warn!("Skipping stale op for dead call {}: {:?}", cid, op); + // For ConnectFaxPort, signal failure so the caller doesn't hang + if let PendingPjsuaOp::ConnectFaxPort { done_tx, .. } = op { + let _ = done_tx.send(false); + } + continue; + } + } + count += 1; + match op { + PendingPjsuaOp::PlayDirect { call_id, samples } => { + tracing::debug!( + "Audio thread: executing PlayDirect for call {} ({} samples)", + call_id, + samples.len() + ); + // Stop any active looping player for this call first + // This ensures a seamless transition from connecting sound to join sound + super::ffi::looping_player::stop_loop(call_id); + + if let Err(e) = play_audio_to_call_direct_internal(call_id, &samples) { + tracing::warn!("Failed to play direct audio to call {}: {}", call_id, e); + } + } + PendingPjsuaOp::StartStreaming { + call_id, + path, + hangup_on_complete, + } => { + tracing::debug!( + "Audio thread: executing StartStreaming for call {} ({})", + call_id, + path.display() + ); + // Stop any active looping player for this call first + super::ffi::looping_player::stop_loop(call_id); + + if let Err(e) = start_streaming_to_call(call_id, &path, hangup_on_complete) { + tracing::warn!("Failed to start streaming for call {}: {}", call_id, e); + } + } + PendingPjsuaOp::StartTestTone { call_id } => { + tracing::debug!("Audio thread: executing StartTestTone for call {}", call_id); + // Stop any active looping player for this call first + super::ffi::looping_player::stop_loop(call_id); + + if let Err(e) = super::ffi::test_tone::start_test_tone_to_call(call_id) { + tracing::warn!("Failed to start test tone for call {}: {}", call_id, e); + } + } + PendingPjsuaOp::Hangup { call_id } => { + tracing::debug!("Audio thread: executing Hangup for call {}", call_id); + // Stop any active looping player for this call first + super::ffi::looping_player::stop_loop(call_id); + // Hangup the call + unsafe { + pjsua::pjsua_call_hangup(*call_id, 200, std::ptr::null(), std::ptr::null()); + } + } + PendingPjsuaOp::StartLoop { call_id, samples } => { + tracing::debug!("Audio thread: executing StartLoop for call {}", call_id); + if let Err(e) = super::ffi::looping_player::start_loop(call_id, samples) { + tracing::error!( + "Failed to start connecting loop for call {}: {}", + call_id, + e + ); + } + } + PendingPjsuaOp::ConnectFaxPort { + call_id, + fax_slot, + call_conf_port, + done_tx, + } => { + tracing::debug!( + "Audio thread: connecting fax port for call {} (fax_slot={}, call_port={})", + call_id, + fax_slot, + call_conf_port + ); + let success = unsafe { + let conf = super::ffi::frame_utils::get_conference_bridge(); + if let Some(conf) = conf { + let s1 = pjmedia_conf_connect_port( + conf, + *call_conf_port as u32, + *fax_slot as u32, + 0, + ); + let s2 = pjmedia_conf_connect_port( + conf, + *fax_slot as u32, + *call_conf_port as u32, + 0, + ); + if s1 != pj_constants__PJ_SUCCESS as i32 { + tracing::error!( + "Failed to connect call {} -> fax slot {}: {}", + call_id, + fax_slot, + s1 + ); + } + if s2 != pj_constants__PJ_SUCCESS as i32 { + tracing::error!( + "Failed to connect fax slot {} -> call {}: {}", + fax_slot, + call_id, + s2 + ); + } + s1 == pj_constants__PJ_SUCCESS as i32 + && s2 == pj_constants__PJ_SUCCESS as i32 + } else { + tracing::error!("Cannot get conference bridge for fax port connection"); + false + } + }; + let _ = done_tx.send(success); + } + } + } + + if count > 0 { + tracing::debug!("Audio thread processed {} pending PJSUA operations", count); + } +} + +/// Queue a channel registration completion for when the audio thread is ready +/// Returns true if queued, false if audio thread is ready (caller should complete immediately) +pub fn queue_pending_channel_completion(call_id: CallId, conf_port: ConfPort) -> bool { + if AUDIO_THREAD_READY.load(Ordering::SeqCst) { + // Audio thread is ready, caller should complete immediately + return false; + } + + // Queue for later processing + PENDING_CHANNEL_COMPLETIONS.push((call_id, conf_port)); + tracing::debug!( + "Queued pending channel completion: call {} -> conf_port {} (audio thread not ready yet)", + call_id, + conf_port + ); + true +} + +/// Process one audio frame (called from audio thread) +/// +/// This function handles per-channel audio isolation using a SINGLE clock tick: +/// 1. Clock the conference ONCE via pjmedia_port_get_frame (runs all codecs, jitter buffers, etc.) +/// 2. During that tick, channel_port_put_frame callbacks receive audio from connected calls +/// 3. Drain the per-channel SIP->Discord buffers and send to Discord +/// +/// This architecture ensures the conference only advances once per 20ms frame, regardless of +/// how many channels are active. Previously, we clocked once PER CHANNEL which caused audio +/// to run at N*speed (stuttering, delays) when N channels were active. +unsafe fn process_audio_frame( + frame_buffer: &mut [u8], + timestamp: &mut u64, + frame_count: &mut u64, + active_channels: &mut Vec, + drain_buf: &mut [i16], + silence: &[i16], +) { + use super::channel_audio::drain_sip_to_discord_audio; + + *frame_count += 1; + + // Increment global frame counter for channel port caching + // This ensures channel_port_get_frame only drains buffers once per tick + AUDIO_FRAME_COUNTER.fetch_add(1, std::sync::atomic::Ordering::Relaxed); + + let port_guard = match CONF_MASTER_PORT.get() { + Some(guard) => guard, + None => { + if (*frame_count).is_multiple_of(500) { + tracing::warn!("Audio thread: No master port configured"); + } + return; + } + }; + + let master_port = port_guard.lock().0; + if master_port.is_null() { + if (*frame_count).is_multiple_of(500) { + tracing::warn!("Audio thread: Master port is null"); + } + return; + } + + // Log every 5 seconds (250 frames at 20ms each) + let should_log = (*frame_count).is_multiple_of(250); + + // Get snapshots of channel mappings (reuses allocation) + get_active_channels_into(active_channels); + + if should_log { + tracing::trace!("Audio thread: {} active channels", active_channels.len()); + } + + // Log when we first start processing active channels + let first_active = FIRST_ACTIVE_CHANNEL_FRAME.load(Ordering::Relaxed); + if first_active == 0 && !active_channels.is_empty() { + FIRST_ACTIVE_CHANNEL_FRAME.store(*frame_count, Ordering::Relaxed); + tracing::info!( + "Audio thread frame #{}: FIRST frame with active channels: {:?}", + *frame_count, + active_channels + ); + } + + // CRITICAL: Clock the conference EXACTLY ONCE per frame + // This runs ALL the internal processing: + // - Jitter buffers for all calls + // - Codec decode/encode for all calls + // - Mixing for all connected ports + // - Calls channel_port_get_frame for Discord->SIP (provides audio TO calls) + // - Calls channel_port_put_frame for SIP->Discord (receives audio FROM calls) + let mut clock_frame = pjmedia_frame { + type_: pjmedia_frame_type_PJMEDIA_FRAME_TYPE_AUDIO, + buf: frame_buffer.as_mut_ptr() as *mut _, + size: frame_buffer.len() as pj_size_t, + timestamp: pj_timestamp { u64_: *timestamp }, + bit_info: 0, + }; + pjmedia_port_get_frame(master_port, &mut clock_frame); + + // Now drain the SIP->Discord buffers that were filled by channel_port_put_frame callbacks + // during the conference tick above. + // Lock callbacks ONCE per frame (not per channel) to avoid N Mutex acquisitions. + if !active_channels.is_empty() { + let callbacks_guard = CALLBACKS.get().map(|c| c.lock()); + let on_audio_frame = callbacks_guard + .as_ref() + .and_then(|g| g.as_ref()) + .map(|h| &h.on_audio_frame); + + for &channel_id in active_channels.iter() { + // Drain one frame's worth of audio into pre-allocated buffer + let n = drain_sip_to_discord_audio(channel_id, drain_buf); + + // ALWAYS send something to keep Discord stream alive (even if just silence) + let samples: &[i16] = if n > 0 { &drain_buf[..n] } else { silence }; + + // Log periodically + if should_log { + let max_sample = simd::max_abs_i16(samples); + tracing::trace!( + "SIP->Discord: {} samples from channel {}, max_amp={}", + samples.len(), + channel_id, + max_sample + ); + } + + // Emit audio for THIS channel specifically + if let Some(on_audio_frame) = on_audio_frame { + on_audio_frame(channel_id, samples, CONF_SAMPLE_RATE); + } + } + } + + // Increment timestamp + *timestamp += SAMPLES_PER_FRAME as u64; +} + +// RTP activity tracking + +/// Get the total RTP packets received for a call +/// Returns None if call doesn't exist or stats unavailable +fn get_call_rtp_rx_count(call_id: CallId) -> Option { + unsafe { + let mut stat = MaybeUninit::::uninit(); + let status = pjsua_call_get_stream_stat(*call_id, 0, stat.as_mut_ptr()); + if status != pj_constants__PJ_SUCCESS as i32 { + return None; + } + let stat = stat.assume_init(); + // rtcp.rx.pkt contains total RTP packets received + Some(stat.rtcp.rx.pkt as u64) + } +} + +/// Set the event sender for timeout events +pub fn set_timeout_event_sender(tx: Sender) { + let sender = TIMEOUT_EVENT_TX.get_or_init(|| Mutex::new(None)); + *sender.lock() = Some(tx); +} + +/// Initialize RTP activity tracking for a call +pub fn init_call_rtp_tracking(call_id: CallId) { + let activity_map = + CALL_RTP_ACTIVITY.get_or_init(|| Mutex::new(std::collections::HashMap::new())); + // Start with count 0 - the periodic check will update with actual values + activity_map.lock().insert(call_id, (0, Instant::now())); + tracing::debug!("Initialized RTP tracking for call {}", call_id); +} + +/// Remove RTP activity tracking for a call +pub fn remove_call_rtp_tracking(call_id: CallId) { + if let Some(activity_map) = CALL_RTP_ACTIVITY.get() { + activity_map.lock().remove(&call_id); + tracing::debug!("Removed RTP tracking for call {}", call_id); + } +} + +/// Check all tracked calls for RTP inactivity and emit timeout events +/// +/// This must be called from the PJSUA thread context, not from the audio thread, +/// because it calls pjsua_call_get_stream_stat() which requires PJSUA thread synchronization. +pub fn check_rtp_inactivity() { + let Some(activity_map) = CALL_RTP_ACTIVITY.get() else { + return; + }; + + // Collect all tracked calls first, then release the lock before calling PJSUA + let tracked_calls: Vec<(CallId, u64, Instant)> = { + let map = activity_map.lock(); + map.iter() + .map(|(&call_id, &(rx_count, last_activity))| (call_id, rx_count, last_activity)) + .collect() + }; + + let mut timed_out_calls: Vec<(CallId, u64)> = Vec::new(); + let mut updates = Vec::new(); + + // Now iterate without holding the lock + for (call_id, last_rx_count, last_activity) in tracked_calls { + let current_rx = match get_call_rtp_rx_count(call_id) { + Some(count) => count, + None => { + // Call stats unavailable - likely dead call + // Don't wait for on_call_state_cb which may never fire + tracing::warn!( + "Call {} RTP stats unavailable, treating as timed out", + call_id + ); + timed_out_calls.push((call_id, 0)); + continue; + } + }; + + if current_rx > last_rx_count { + // Activity detected - queue update + updates.push((call_id, current_rx)); + } else { + // No new packets — use a shorter timeout if we never received any audio + let timeout = if current_rx == 0 { + no_audio_timeout_secs() + } else { + rtp_inactivity_timeout_secs() + }; + let elapsed = last_activity.elapsed().as_secs(); + if elapsed > timeout { + tracing::warn!( + "Call {} timed out: no RTP activity for {}s (rx_count={}, timeout={}s)", + call_id, + elapsed, + current_rx, + timeout + ); + timed_out_calls.push((call_id, current_rx)); + } + } + } + + // Apply updates + if !updates.is_empty() { + let mut map = activity_map.lock(); + for (call_id, rx_count) in updates { + map.insert(call_id, (rx_count, Instant::now())); + } + } + + // Emit timeout events for dead calls + if !timed_out_calls.is_empty() { + // Remove timed out calls from tracking + { + let mut map = activity_map.lock(); + for &(call_id, _) in &timed_out_calls { + map.remove(&call_id); + } + } + + if let Some(sender_lock) = TIMEOUT_EVENT_TX.get() { + if let Some(ref tx) = *sender_lock.lock() { + for (call_id, rx_count) in timed_out_calls { + let _ = tx.send(super::SipEvent::CallTimeout { call_id, rx_count }); + } + } + } + } +} + +/// Validate all entries in COUNTED_CALL_IDS are still valid PJSUA calls +/// Removes stale entries and returns the number removed. +/// This should be called periodically from the SIP event loop. +pub fn validate_counted_calls() -> usize { + let Some(counted_ids) = COUNTED_CALL_IDS.get() else { + return 0; + }; + + let call_ids: Vec = counted_ids.lock().iter().copied().collect(); + let mut removed = 0; + + // Get RTP tracking info for cross-reference + let rtp_tracked_calls: std::collections::HashSet = CALL_RTP_ACTIVITY + .get() + .map(|m| m.lock().keys().copied().collect()) + .unwrap_or_default(); + + for call_id in call_ids { + unsafe { + let mut ci = MaybeUninit::::uninit(); + let status = pjsua_call_get_info(*call_id, ci.as_mut_ptr()); + + let should_remove = if status != pj_constants__PJ_SUCCESS as i32 { + tracing::warn!( + "Stale call {} in COUNTED_CALL_IDS: pjsua_call_get_info failed (status={})", + call_id, + status + ); + true + } else { + let ci = ci.assume_init(); + if ci.state == pjsip_inv_state_PJSIP_INV_STATE_DISCONNECTED { + tracing::warn!( + "Stale call {} in COUNTED_CALL_IDS: already DISCONNECTED", + call_id + ); + true + } else if !rtp_tracked_calls.contains(&call_id) { + // Call is in COUNTED but NOT being tracked for RTP activity. + // However, REMOTE_HOLD intentionally removes RTP tracking + // (phones send no RTP during hold), so don't treat those as stale. + if ci.media_status == pjsua_call_media_status_PJSUA_CALL_MEDIA_REMOTE_HOLD { + false + } else { + tracing::warn!( + "Stale call {} in COUNTED_CALL_IDS: not in RTP tracking (state={}, media={})", + call_id, + ci.state, + ci.media_status + ); + true + } + } else { + false + } + }; + + if should_remove { + counted_ids.lock().remove(&call_id); + remove_call_rtp_tracking(call_id); + removed += 1; + } + } + } + + if removed > 0 { + let remaining = counted_ids.lock().len(); + tracing::warn!( + "Removed {} stale calls from COUNTED_CALL_IDS, {} remaining", + removed, + remaining + ); + if remaining == 0 { + stop_audio_thread(); + } + } + + removed +} + +/// Scan all pjsua call slots and force-hangup zombie calls. +/// +/// Unlike `validate_counted_calls()` which only checks COUNTED_CALL_IDS (authenticated calls), +/// this scans the raw pjsua call array for slots that are stuck — e.g. calls rejected early +/// (banned IPs, 401 challenges, spam) where the SIP transaction never completed and the slot +/// was never freed. +/// +/// A call is considered a zombie if: +/// - It's been in a non-CONFIRMED state (NULL, CALLING, INCOMING, EARLY, CONNECTING) for +/// more than 2 minutes (SIP transaction timeout is 32s, so 2min is very generous) +/// - It's in DISCONNECTED state but the slot hasn't been freed (shouldn't happen, but safety net) +pub fn cleanup_zombie_pjsua_calls() -> usize { + let max_calls: u32 = 128; // Must match cfg_ptr.max_calls in init.rs + let mut cleaned = 0; + + unsafe { + for i in 0..max_calls { + let call_id = i as pjsua_call_id; + let mut ci = MaybeUninit::::uninit(); + let status = pjsua_call_get_info(call_id, ci.as_mut_ptr()); + + if status != pj_constants__PJ_SUCCESS as i32 { + // Slot is free (no inv), this is fine + continue; + } + + let ci = ci.assume_init(); + + // Skip calls that are actively connected (CONFIRMED state) — those are real calls + if ci.state == pjsip_inv_state_PJSIP_INV_STATE_CONFIRMED { + continue; + } + + // For non-CONFIRMED calls, check how long they've been alive. + // total_duration is time since call->start_time for non-CONFIRMED/DISCONNECTED calls. + let age = ci.total_duration.sec as u64; + + // 2 minutes is very generous — SIP transaction timeout (Timer B) is 32 seconds, + // and even slow auth flows should complete within 30 seconds + if age > 120 { + let state_name = super::ffi::init::InvState::from(ci.state); + + tracing::warn!( + "Zombie pjsua call slot {}: state={}, age={}s — force hanging up", + call_id, + state_name, + age + ); + + pjsua_call_hangup(call_id, 500, std::ptr::null(), std::ptr::null()); + cleaned += 1; + } + } + } + + if cleaned > 0 { + tracing::warn!("Force-cleaned {} zombie pjsua call slots", cleaned); + } + + cleaned +} diff --git a/sipcord-bridge/src/transport/sip/callbacks.rs b/sipcord-bridge/src/transport/sip/callbacks.rs new file mode 100644 index 0000000..720061f --- /dev/null +++ b/sipcord-bridge/src/transport/sip/callbacks.rs @@ -0,0 +1,1509 @@ +//! PJSUA C callbacks for incoming calls, call state, media state, and DTMF +//! +//! This module handles the C callbacks that PJSUA invokes for SIP events. + +use super::audio_thread::{ + init_call_rtp_tracking, queue_pending_channel_completion, remove_call_rtp_tracking, + start_audio_thread, stop_audio_thread, +}; +use std::fmt; + +/// Media direction (Rust wrapper for pjmedia_dir) +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +pub enum MediaDir { + None, + Encoding, + Decoding, + EncodingDecoding, + Unknown(u32), +} + +impl From for MediaDir { + fn from(dir: u32) -> Self { + match dir { + x if x == pjmedia_dir_PJMEDIA_DIR_NONE => MediaDir::None, + x if x == pjmedia_dir_PJMEDIA_DIR_ENCODING => MediaDir::Encoding, + x if x == pjmedia_dir_PJMEDIA_DIR_DECODING => MediaDir::Decoding, + x if x == pjmedia_dir_PJMEDIA_DIR_ENCODING_DECODING => MediaDir::EncodingDecoding, + x => MediaDir::Unknown(x), + } + } +} + +impl fmt::Display for MediaDir { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self { + MediaDir::None => write!(f, "NONE"), + MediaDir::Encoding => write!(f, "ENCODING"), + MediaDir::Decoding => write!(f, "DECODING"), + MediaDir::EncodingDecoding => write!(f, "ENCODING_DECODING"), + MediaDir::Unknown(x) => write!(f, "UNKNOWN({})", x), + } + } +} +use super::channel_audio::{ + complete_pending_channel_registration, disconnect_call_for_hold, get_channel_slot, +}; +use super::ffi::types::*; +use super::ffi::utils::{extract_sip_username, pj_str_to_string}; +use dashmap::DashMap; +use parking_lot::Mutex; +use pjsua::*; +use std::ffi::CString; +use std::mem::MaybeUninit; +use std::net::IpAddr; +use std::os::raw::{c_char, c_int}; +use std::ptr; + +/// Global sender for outbound call events (set during initialization) +static OUTBOUND_EVENT_TX: std::sync::OnceLock> = + std::sync::OnceLock::new(); + +/// Pre-bound UDPTL sockets from synchronous T.38 re-INVITE handling. +/// Keyed by raw pjsua_call_id (i32). The async handler takes the socket +/// from this map to create the tokio UdptlSocket. +pub static T38_PRESOCKETS: std::sync::LazyLock> = + std::sync::LazyLock::new(DashMap::new); + +/// Set the outbound event sender (called from main.rs) +pub fn set_outbound_event_sender(tx: tokio::sync::mpsc::Sender) { + let _ = OUTBOUND_EVENT_TX.set(tx); +} + +/// Extract source IP address from pjsip_rx_data +pub unsafe fn extract_source_ip(rdata: *const pjsip_rx_data) -> Option { + if rdata.is_null() { + return None; + } + + // pjsip stores source info in pkt_info.src_name as a C string (null-terminated char array) + let src_name = &(*rdata).pkt_info.src_name; + + // Find the null terminator + let len = src_name + .iter() + .position(|&c| c == 0) + .unwrap_or(src_name.len()); + + // Convert to Rust string + let ip_str = std::str::from_utf8(std::slice::from_raw_parts( + src_name.as_ptr() as *const u8, + len, + )) + .ok()?; + + // pjsip's src_name contains only the IP address (port is in src_port), + // so parse directly as IpAddr. This handles both IPv4 and IPv6. + ip_str.parse().ok() +} + +/// Extract User-Agent header from pjsip_rx_data +pub unsafe fn extract_user_agent(rdata: *const pjsip_rx_data) -> Option { + if rdata.is_null() { + return None; + } + + let msg = (*rdata).msg_info.msg; + if msg.is_null() { + return None; + } + + // Find User-Agent header by name + let hdr_name = CString::new("User-Agent").ok()?; + let name = pj_str(hdr_name.as_ptr() as *mut c_char); + + let hdr = pjsip_msg_find_hdr_by_name(msg, &name, ptr::null_mut()); + if hdr.is_null() { + return None; + } + + // Cast to generic string header + let str_hdr = hdr as *const pjsip_generic_string_hdr; + if str_hdr.is_null() { + return None; + } + + // Extract the header value + let value = pj_str_to_string(&(*str_hdr).hvalue); + if value.is_empty() { + None + } else { + Some(value) + } +} + +/// Check if User-Agent indicates a SIPVicious scanner or similar tool +pub fn is_sipvicious_scanner(user_agent: &str) -> bool { + let ua_lower = user_agent.to_lowercase(); + ua_lower.contains("friendly-scanner") + || ua_lower.contains("sipvicious") + || ua_lower.contains("scanner") +} + +/// Extract SIP Digest auth parameters from Authorization header +pub unsafe fn extract_digest_auth_from_rdata( + rdata: *mut pjsip_rx_data, +) -> Option { + if rdata.is_null() { + return None; + } + + let rdata = &*rdata; + let msg = rdata.msg_info.msg; + if msg.is_null() { + return None; + } + + // Find Authorization header by type (pjsip parses it into a structured format) + let hdr = pjsip_msg_find_hdr(msg, pjsip_hdr_e_PJSIP_H_AUTHORIZATION, ptr::null_mut()); + + if hdr.is_null() { + tracing::debug!("No Authorization header found"); + return None; + } + + // Cast to authorization header type + let auth_hdr = hdr as *const pjsip_authorization_hdr; + if auth_hdr.is_null() { + return None; + } + + // Check the scheme is Digest + let scheme = pj_str_to_string(&(*auth_hdr).scheme); + tracing::debug!("Authorization scheme: {}", scheme); + + if scheme.to_lowercase() != "digest" { + tracing::debug!( + "Authorization header is not Digest auth (scheme: {})", + scheme + ); + return None; + } + + // Extract digest credentials from the parsed structure + let digest = &(*auth_hdr).credential.digest; + + let params = DigestAuthParams { + username: pj_str_to_string(&digest.username), + realm: pj_str_to_string(&digest.realm), + nonce: pj_str_to_string(&digest.nonce), + uri: pj_str_to_string(&digest.uri), + response: pj_str_to_string(&digest.response), + method: String::new(), // Will be set by caller + qop: { + let qop = pj_str_to_string(&digest.qop); + if qop.is_empty() { + None + } else { + Some(qop) + } + }, + nc: { + let nc = pj_str_to_string(&digest.nc); + if nc.is_empty() { + None + } else { + Some(nc) + } + }, + cnonce: { + let cnonce = pj_str_to_string(&digest.cnonce); + if cnonce.is_empty() { + None + } else { + Some(cnonce) + } + }, + }; + + tracing::debug!( + "Extracted Digest auth: user={}, realm={}, nonce={}, uri={}, response={}", + params.username, + params.realm, + params.nonce, + params.uri, + params.response + ); + + // Validate we have the required fields + if params.username.is_empty() + || params.realm.is_empty() + || params.nonce.is_empty() + || params.uri.is_empty() + || params.response.is_empty() + { + tracing::warn!("Digest auth missing required fields: {:?}", params); + return None; + } + + Some(params) +} + +/// Send 401 Unauthorized response with WWW-Authenticate header +pub unsafe fn send_401_challenge(call_id: CallId, www_auth: &str) { + // Create the WWW-Authenticate header + let hdr_name = CString::new("WWW-Authenticate").unwrap(); + let hdr_value = CString::new(www_auth).unwrap(); + + // Create msg_data with the WWW-Authenticate header + let mut msg_data = MaybeUninit::::uninit(); + pjsua_msg_data_init(msg_data.as_mut_ptr()); + let msg_data_ptr = msg_data.assume_init_mut(); + + // Create a pool for the header + let pool = pjsua_pool_create(c"auth".as_ptr(), 512, 512); + if pool.is_null() { + tracing::error!("Failed to create pool for 401 challenge"); + pjsua_call_hangup(*call_id, 500, ptr::null(), ptr::null()); + return; + } + + // Create the header + let name = pj_str(hdr_name.as_ptr() as *mut c_char); + let value = pj_str(hdr_value.as_ptr() as *mut c_char); + + let hdr = pjsip_generic_string_hdr_create(pool, &name, &value); + + if !hdr.is_null() { + // Add header to the list using pj_list_insert_before (insert at end of list) + pj_list_insert_before( + &mut msg_data_ptr.hdr_list as *mut _ as *mut pj_list_type, + hdr as *mut pj_list_type, + ); + } + + // Send 401 response - this will cause pjsua to send the response and then + // the client should retry with Authorization header + let reason = CString::new("Unauthorized").unwrap(); + let reason_pj = pj_str(reason.as_ptr() as *mut c_char); + + let status = pjsua_call_answer(*call_id, 401, &reason_pj, msg_data_ptr); + if status != pj_constants__PJ_SUCCESS as i32 { + tracing::warn!( + "Failed to send 401 challenge for call {}: {}", + call_id, + status + ); + // Hangup if we can't send challenge + pjsua_call_hangup(*call_id, 500, ptr::null(), ptr::null()); + } + + // DO NOT release the pool here - PJSUA may still need the header data + // after pjsua_call_answer returns. The pool will be cleaned up when + // pjsua is destroyed. This leaks ~512 bytes per 401 challenge but + // prevents use-after-free crashes. + // TODO: Track pools per-call and release them in on_call_state when call ends +} + +/// Send 302 Moved Temporarily response with Contact header pointing to another bridge +/// Used for multi-region channel conflict resolution - redirects caller to the active region +pub unsafe fn send_302_redirect(call_id: CallId, target_domain: &str, extension: &str) { + // CRITICAL: Check if call is still valid and in a state that can receive responses + // Race condition: caller may hang up during async API auth, causing the call to be + // DISCONNECTED before we get here. Calling pjsua_call_answer on a disconnected call + // can corrupt PJSUA internal state and deadlock the SIP worker thread. + let mut ci = MaybeUninit::::uninit(); + if pjsua_call_get_info(*call_id, ci.as_mut_ptr()) != pj_constants__PJ_SUCCESS as i32 { + tracing::warn!("Call {} no longer valid, skipping 302 redirect", call_id); + return; + } + let ci = ci.assume_init(); + + // Only send redirect if call is in INCOMING or EARLY state + // (i.e., we haven't sent a final response yet and call hasn't been disconnected) + if ci.state == pjsip_inv_state_PJSIP_INV_STATE_DISCONNECTED { + tracing::warn!( + "Call {} already disconnected, skipping 302 redirect to {}", + call_id, + target_domain + ); + return; + } + if ci.state != pjsip_inv_state_PJSIP_INV_STATE_INCOMING + && ci.state != pjsip_inv_state_PJSIP_INV_STATE_EARLY + { + tracing::warn!( + "Call {} in unexpected state {} for 302 redirect, skipping", + call_id, + ci.state + ); + return; + } + + // Create the Contact header: sip:extension@target_domain + let contact_uri = format!("sip:{}@{}", extension, target_domain); + let hdr_name = CString::new("Contact").unwrap(); + let hdr_value = CString::new(contact_uri).unwrap(); + + // Create msg_data with the Contact header + let mut msg_data = MaybeUninit::::uninit(); + pjsua_msg_data_init(msg_data.as_mut_ptr()); + let msg_data_ptr = msg_data.assume_init_mut(); + + // Create a pool for the header + let pool = pjsua_pool_create(c"redirect".as_ptr(), 512, 512); + if pool.is_null() { + tracing::error!("Failed to create pool for 302 redirect"); + pjsua_call_hangup(*call_id, 500, ptr::null(), ptr::null()); + return; + } + + // Create the header + let name = pj_str(hdr_name.as_ptr() as *mut c_char); + let value = pj_str(hdr_value.as_ptr() as *mut c_char); + + let hdr = pjsip_generic_string_hdr_create(pool, &name, &value); + + if !hdr.is_null() { + // Add header to the list using pj_list_insert_before (insert at end of list) + pj_list_insert_before( + &mut msg_data_ptr.hdr_list as *mut _ as *mut pj_list_type, + hdr as *mut pj_list_type, + ); + } + + // Send 302 response + let reason = CString::new("Moved Temporarily").unwrap(); + let reason_pj = pj_str(reason.as_ptr() as *mut c_char); + + let status = pjsua_call_answer(*call_id, 302, &reason_pj, msg_data_ptr); + if status != pj_constants__PJ_SUCCESS as i32 { + tracing::warn!( + "Failed to send 302 redirect for call {}: {}", + call_id, + status + ); + // Hangup if we can't send redirect + pjsua_call_hangup(*call_id, 500, ptr::null(), ptr::null()); + } else { + tracing::info!( + "Sent 302 redirect for call {} to {}", + call_id, + target_domain + ); + } + + // DO NOT release the pool here - PJSUA may still need the header data + // after pjsua_call_answer returns. Same issue as send_401_challenge. +} + +// PJSUA C callbacks + +pub unsafe extern "C" fn on_incoming_call_cb( + _acc_id: pjsua_acc_id, + raw_call_id: pjsua_call_id, + rdata: *mut pjsip_rx_data, +) { + let call_id = CallId::new(raw_call_id); + let mut ci = MaybeUninit::::uninit(); + if pjsua_call_get_info(*call_id, ci.as_mut_ptr()) != pj_constants__PJ_SUCCESS as i32 { + return; + } + let ci = ci.assume_init(); + + // Extract From and To URIs + let from_uri = pj_str_to_string(&ci.remote_info); + let to_uri = pj_str_to_string(&ci.local_info); + + // Extract username from From URI (caller's SIP username) + let sip_username = extract_sip_username(&from_uri); + + // Extract extension from To URI (the number they dialed) + let extension = extract_sip_username(&to_uri); + + // Extract source IP for ban checking + let source_ip = extract_source_ip(rdata); + + // Check if IP is banned or timed out - silently drop + if let Some(ip) = source_ip { + if let Some(ban_mgr) = crate::services::ban::global() { + if ban_mgr.is_enabled() && !ban_mgr.is_whitelisted(&ip) { + let result = ban_mgr.check_banned(&ip); + if result.is_banned { + if result.should_log { + let ban_type = if result.is_permanent { + "permanently banned" + } else { + "timed out" + }; + tracing::debug!( + "Blocked {} IP: {} (call {}, offense_level={})", + ban_type, + ip, + call_id, + result.offense_level + ); + } + pjsua_call_hangup(*call_id, 403, ptr::null(), ptr::null()); + return; + } + } + } + } + + // Check User-Agent for SIPVicious scanners - instant permaban + if let Some(user_agent) = extract_user_agent(rdata) { + if is_sipvicious_scanner(&user_agent) { + if let Some(ip) = source_ip { + if let Some(ban_mgr) = crate::services::ban::global() { + if ban_mgr.is_enabled() && !ban_mgr.is_whitelisted(&ip) { + let result = ban_mgr.record_permanent_ban(ip, "sipvicious_scanner"); + if result.should_log { + tracing::warn!( + "PERMABAN IP {} - SIPVicious scanner detected: User-Agent='{}' (call {})", + ip, user_agent, call_id + ); + } + } + } + } else { + tracing::warn!( + "SIPVicious scanner detected but no IP available: User-Agent='{}' (call {})", + user_agent, + call_id + ); + } + pjsua_call_hangup(*call_id, 403, ptr::null(), ptr::null()); + return; + } + } + + // Extension-length ban checks use config values + if let Some(ban_mgr) = crate::services::ban::global() { + let ext_len = extension.len(); + let is_numeric = extension.chars().all(|c: char| c.is_ascii_digit()); + + // Check for very long extension (permaban, likely fraud) + if ext_len >= ban_mgr.permaban_extension_min_length() && is_numeric { + if let Some(ip) = source_ip { + if ban_mgr.is_enabled() && !ban_mgr.is_whitelisted(&ip) { + let result = ban_mgr.record_permanent_ban(ip, "very_long_extension"); + if result.should_log { + tracing::warn!( + "PERMABAN IP {} for very long extension: {} ({} digits, call {})", + ip, + extension, + ext_len, + call_id + ); + } + } + } else { + tracing::warn!( + "Rejecting very long extension: {} ({} digits, call {})", + extension, + ext_len, + call_id + ); + } + pjsua_call_hangup(*call_id, 404, ptr::null(), ptr::null()); + return; + } + + // Check for mid-length suspicious extension (progressive timeout) + if ext_len >= ban_mgr.suspicious_extension_min_length() + && ext_len <= ban_mgr.suspicious_extension_max_length() + && is_numeric + { + if let Some(ip) = source_ip { + if ban_mgr.is_enabled() && !ban_mgr.is_whitelisted(&ip) { + let result = ban_mgr.record_offense(ip, "suspicious_extension"); + if result.should_log { + tracing::warn!( + "Timed out IP {} for suspicious extension: {} (call {}, offense_level={}, timeout={}s)", + ip, extension, call_id, result.offense_level, result.timeout_secs + ); + } + } + } else { + tracing::warn!( + "Rejecting suspicious extension: {} ({} digits, call {})", + extension, + ext_len, + call_id + ); + } + pjsua_call_hangup(*call_id, 404, ptr::null(), ptr::null()); + return; + } + } + + // Try to extract Digest auth params from Authorization header + let digest_params = extract_digest_auth_from_rdata(rdata); + + tracing::info!( + "Incoming call {} from {} to extension {} (auth: {})", + call_id, + sip_username, + extension, + if digest_params.is_some() { + "present" + } else { + "none" + } + ); + + // Check if we have Authorization header with Digest auth + if let Some(mut params) = digest_params { + // We have Digest auth, fill in remaining fields + params.method = "INVITE".to_string(); + + tracing::info!( + "Digest auth: user={}, realm={}, nonce={}, response={}", + params.username, + params.realm, + params.nonce, + params.response + ); + + // NOTE: We no longer answer with 200 OK here. + // The bridge coordinator will: + // 1. Send 183 Session Progress (early media) to start playing connecting sound + // 2. Connect to Discord + // 3. Send 200 OK once Discord is ready + // + // This allows the caller to hear "connecting..." while waiting for Discord. + + // Trigger callbacks with Digest auth params + // The bridge coordinator handles the call flow from here + if let Some(callbacks) = CALLBACKS.get() { + if let Some(ref handlers) = *callbacks.lock() { + (handlers.on_incoming_call)(call_id, sip_username, extension.clone(), source_ip); + (handlers.on_call_authenticated)(call_id, params, extension, source_ip); + } + } + } else { + // No Authorization header - send 401 challenge + tracing::info!("No auth header, sending 401 challenge for call {}", call_id); + + // Generate a cryptographically random nonce + let nonce = { + let bytes: [u8; 16] = rand::random(); + bytes + .iter() + .map(|b| format!("{:02x}", b)) + .collect::() + }; + + // Create WWW-Authenticate header value + // Format: Digest realm="sipcord", nonce="xxx", algorithm=MD5, qop="auth" + let www_auth = format!( + "Digest realm=\"{}\", nonce=\"{}\", algorithm=MD5, qop=\"auth\"", + SIP_REALM, nonce + ); + + // Send 401 Unauthorized with WWW-Authenticate header + send_401_challenge(call_id, &www_auth); + } +} + +pub unsafe extern "C" fn on_dtmf_digit_cb(raw_call_id: pjsua_call_id, digit: c_int) { + let call_id = CallId::new(raw_call_id); + let digit_char = char::from_u32(digit as u32).unwrap_or('?'); + + // Forward DTMF to callback handler (buffering done in mod.rs) + if let Some(callbacks) = CALLBACKS.get() { + if let Some(ref handlers) = *callbacks.lock() { + (handlers.on_dtmf)(call_id, digit_char); + } + } +} + +pub unsafe extern "C" fn on_call_state_cb(raw_call_id: pjsua_call_id, _e: *mut pjsip_event) { + let call_id = CallId::new(raw_call_id); + let mut ci = MaybeUninit::::uninit(); + if pjsua_call_get_info(*call_id, ci.as_mut_ptr()) != pj_constants__PJ_SUCCESS as i32 { + return; + } + let ci = ci.assume_init(); + + // Check for outbound call state changes + if let Some(tracking_id) = super::get_outbound_tracking_id(call_id) { + // This is an outbound call (Discord -> SIP) + if ci.state == pjsip_inv_state_PJSIP_INV_STATE_EARLY { + // Ringing (180 Ringing or 183 Session Progress) + // Ringing is tracked via ws_client::report_call_status from the bridge coordinator + tracing::info!( + "Outbound call {} ringing (tracking_id={})", + call_id, + tracking_id + ); + } else if ci.state == pjsip_inv_state_PJSIP_INV_STATE_CONFIRMED { + tracing::info!( + "Outbound call {} answered (tracking_id={})", + call_id, + tracking_id + ); + // Emit answered event - the SIP event handler in bridge/mod.rs picks this up + if let Some(event_tx) = OUTBOUND_EVENT_TX.get() { + let _ = event_tx.try_send(super::SipEvent::OutboundCallAnswered { + tracking_id: tracking_id.clone(), + call_id, + }); + } + } else if ci.state == pjsip_inv_state_PJSIP_INV_STATE_DISCONNECTED { + let tracking_id = super::remove_outbound_tracking(call_id); + if let Some(tid) = tracking_id { + let last_status = ci.last_status; + let last_status_text = pj_str_to_string(&ci.last_status_text); + tracing::info!( + "Outbound call {} disconnected (tracking_id={}, status={} {})", + call_id, + tid, + last_status, + last_status_text + ); + if let Some(event_tx) = OUTBOUND_EVENT_TX.get() { + let _ = event_tx.try_send(super::SipEvent::OutboundCallFailed { + tracking_id: tid, + call_id: Some(call_id), + reason: format!("{} {}", last_status, last_status_text), + }); + } + } + // Fall through to normal disconnect handling below — + // outbound calls ARE tracked in sip_calls/bridges and need + // proper cleanup (on_call_ended → CallEnded event). + } + // For non-disconnect states, return early - outbound calls don't use the normal flow + if ci.state != pjsip_inv_state_PJSIP_INV_STATE_DISCONNECTED { + return; + } + } + + // Check if call ended + if ci.state == pjsip_inv_state_PJSIP_INV_STATE_DISCONNECTED { + // Clean up audio buffer + if let Some(buffers) = AUDIO_OUT_BUFFERS.get() { + buffers.remove(&call_id); + } + + // Clean up RTP activity tracking + remove_call_rtp_tracking(call_id); + + let counted_ids = + COUNTED_CALL_IDS.get_or_init(|| Mutex::new(std::collections::HashSet::new())); + let (was_counted, new_count) = { + let mut ids = counted_ids.lock(); + let was_counted = ids.remove(&call_id); + (was_counted, ids.len()) + }; + + // An authenticated call needs cleanup if it was in COUNTED_CALL_IDS (normal + // case, or REMOTE_HOLD which now stays counted) OR if it has a CALL_CHANNELS + // entry (which persists through LOCAL_HOLD). Without this, calls that + // disconnect during LOCAL_HOLD would skip cleanup, leaving the bridge and + // Discord connection alive forever. + let was_authenticated = was_counted + || CALL_CHANNELS + .get() + .map(|c| c.contains_key(&call_id)) + .unwrap_or(false); + + if was_authenticated { + tracing::info!("Call {} ended (active_media_calls={})", call_id, new_count); + + if let Some(callbacks) = CALLBACKS.get() { + if let Some(ref handlers) = *callbacks.lock() { + (handlers.on_call_ended)(call_id); + } + } + + if new_count == 0 { + tracing::debug!("Last call ended, stopping audio thread"); + stop_audio_thread(); + } + } + // Spam/unauthenticated calls - no logging, no callbacks + } +} + +pub unsafe extern "C" fn on_call_media_state_cb(raw_call_id: pjsua_call_id) { + let call_id = CallId::new(raw_call_id); + let mut ci = MaybeUninit::::uninit(); + if pjsua_call_get_info(*call_id, ci.as_mut_ptr()) != pj_constants__PJ_SUCCESS as i32 { + return; + } + let ci = ci.assume_init(); + + // Log media state changes (debug level for general changes, specific states logged at info) + let media_status_str = if ci.media_status == pjsua_call_media_status_PJSUA_CALL_MEDIA_NONE { + "NONE" + } else if ci.media_status == pjsua_call_media_status_PJSUA_CALL_MEDIA_ACTIVE { + "ACTIVE" + } else if ci.media_status == pjsua_call_media_status_PJSUA_CALL_MEDIA_LOCAL_HOLD { + "LOCAL_HOLD" + } else if ci.media_status == pjsua_call_media_status_PJSUA_CALL_MEDIA_REMOTE_HOLD { + "REMOTE_HOLD" + } else if ci.media_status == pjsua_call_media_status_PJSUA_CALL_MEDIA_ERROR { + "ERROR" + } else { + "UNKNOWN" + }; + + tracing::info!( + "Call {} media state changed to: {} (status={})", + call_id, + media_status_str, + ci.media_status + ); + + // Check if media is active + if ci.media_status == pjsua_call_media_status_PJSUA_CALL_MEDIA_ACTIVE { + // Get the conference port for this call + let conf_port = ConfPort::new(pjsua_call_get_conf_port(*call_id)); + + // Log media direction for diagnostics + let media_dir = if ci.media_cnt > 0 { ci.media[0].dir } else { 0 }; + let dir = MediaDir::from(media_dir); + + // Check if call is already registered with a channel + let pending_channel = CALL_CHANNELS + .get() + .and_then(|c| c.get(&call_id).map(|r| *r)); + + // Get codec info including ptime + let mut stream_info = MaybeUninit::::uninit(); + let codec_info = if pjsua_call_get_stream_info(*call_id, 0, stream_info.as_mut_ptr()) + == pj_constants__PJ_SUCCESS as i32 + { + let si = stream_info.assume_init(); + // si.info is a union, for audio it's pjmedia_stream_info + let audio_info = si.info.aud; + let codec_name = std::ffi::CStr::from_ptr( + audio_info.fmt.encoding_name.ptr as *const std::ffi::c_char, + ) + .to_string_lossy(); + let clock_rate = audio_info.fmt.clock_rate; + let channel_cnt = audio_info.fmt.channel_cnt; + // Get ptime from the param field (need to dereference pointer) + let param = &*audio_info.param; + let ptime = param.setting.frm_per_pkt as u32 * param.info.frm_ptime as u32; + format!( + "{} @ {}Hz {}ch, ptime={}ms, frm_per_pkt={}, frm_ptime={}", + codec_name, + clock_rate, + channel_cnt, + ptime, + param.setting.frm_per_pkt, + param.info.frm_ptime + ) + } else { + "unknown".to_string() + }; + + tracing::info!( + "Call {} MEDIA ACTIVE: conf_port={}, media_dir={}, media_cnt={}, call_state={}, pending_channel={:?}, codec={}", + call_id, conf_port, dir, ci.media_cnt, ci.state, pending_channel, codec_info + ); + + if conf_port.is_valid() { + tracing::info!( + "Call {} media active, storing conference port {} (NOT connecting to master yet)", + call_id, + conf_port + ); + + // Store the conf_port for this call - connections will be made when + // the channel is assigned via register_call_channel() + // This enables per-channel audio isolation: calls in different channels + // won't hear each other. + // + // If this call is already registered with a channel and the + // conf_port changed (due to re-INVITE/media renegotiation), we must + // reconnect it to maintain audio flow. + let old_conf_port = { + let ports = CALL_CONF_PORTS.get_or_init(DashMap::new); + let old = ports.get(&call_id).map(|r| *r); + ports.insert(call_id, conf_port); + old + }; + + // If conf_port changed and call is registered with a channel, reconnect it + if let Some(old_port) = old_conf_port { + if old_port != conf_port { + tracing::info!( + "Call {} conf_port changed from {} to {} (media renegotiation), reconnecting", + call_id, old_port, conf_port + ); + + // Get the channel this call is registered with + let channel_id = { + if let Some(channels) = CALL_CHANNELS.get() { + channels.get(&call_id).map(|r| *r) + } else { + None + } + }; + + if let Some(channel_id) = channel_id { + // Reconnect to channel port (bidirectional) + if let Some(channel_slot) = get_channel_slot(channel_id) { + // Disconnect old (both directions) + pjsua_conf_disconnect(*channel_slot, *old_port); + pjsua_conf_disconnect(*old_port, *channel_slot); + // Connect new (both directions) + pjsua_conf_connect(*channel_slot, *conf_port); + pjsua_conf_connect(*conf_port, *channel_slot); + tracing::info!( + "Reconnected channel {} port (slot {}) <-> call {} (new port {})", + channel_id, + channel_slot, + call_id, + conf_port + ); + } + + // Reconnect to other calls in the same channel + let other_calls: Vec<(CallId, ConfPort)> = { + let channel_calls = CHANNEL_CALLS.get(); + let call_ports = CALL_CONF_PORTS.get(); + if let (Some(cc), Some(cp)) = (channel_calls, call_ports) { + let cc_guard = cc.read(); + if let Some(calls) = cc_guard.get(&channel_id) { + calls + .iter() + .filter(|&&other_id| other_id != call_id) + .filter_map(|&other_id| { + cp.get(&other_id).map(|r| (other_id, *r)) + }) + .collect() + } else { + vec![] + } + } else { + vec![] + } + }; + + for (other_id, other_port) in other_calls { + // Disconnect old bidirectional connections + pjsua_conf_disconnect(*old_port, *other_port); + pjsua_conf_disconnect(*other_port, *old_port); + + // Connect new bidirectional connections + pjsua_conf_connect(*conf_port, *other_port); + pjsua_conf_connect(*other_port, *conf_port); + + tracing::info!( + "Reconnected call {} (new port {}) <-> call {} (port {}) in channel {}", + call_id, conf_port, other_id, other_port, channel_id + ); + } + } + } + } + + tracing::info!( + "Call {} conf_port {} stored, awaiting channel registration", + call_id, + conf_port + ); + + // Initialize RTP activity tracking for this call + init_call_rtp_tracking(call_id); + + // Track this call_id and start audio thread if this is the first active call + // IMPORTANT: Start audio thread BEFORE completing pending channel registration! + // The PJMEDIA conference bridge needs to be actively clocked when connections + // are made, otherwise the connections may not work properly. + let counted_ids = + COUNTED_CALL_IDS.get_or_init(|| Mutex::new(std::collections::HashSet::new())); + let (is_new, count) = { + let mut ids = counted_ids.lock(); + let is_new = ids.insert(call_id); + (is_new, ids.len()) + }; + + // Only count this call if we haven't already (prevents double-counting on re-INVITE) + if is_new { + tracing::info!( + "Call {} media ACTIVE, active_media_calls={}", + call_id, + count + ); + + if count == 1 { + tracing::info!("First active call, starting audio thread"); + start_audio_thread(); + } + } else { + tracing::warn!( + "Call {} media ACTIVE but already counted! Skipping.", + call_id + ); + } + + // If returning from hold (is_new=true but call already in CHANNEL_CALLS), + // remove from CHANNEL_CALLS so complete_pending_channel_registration does + // a full fresh bidirectional reconnection. PJSUA may have changed the + // underlying media stream during the hold/unhold re-INVITE cycle. + // For first-time active calls, the call won't be in CHANNEL_CALLS yet, + // so this is a no-op. + if is_new { + if let Some(channel_id) = CALL_CHANNELS + .get() + .and_then(|c| c.get(&call_id).map(|r| *r)) + { + let channel_calls = CHANNEL_CALLS + .get_or_init(|| parking_lot::RwLock::new(std::collections::HashMap::new())); + let mut map = channel_calls.write(); + if let Some(calls) = map.get_mut(&channel_id) { + if calls.remove(&call_id) { + if calls.is_empty() { + map.remove(&channel_id); + } + tracing::info!( + "Call {} returning from hold - removed from CHANNEL_CALLS for fresh reconnection", + call_id + ); + } + } + } + } + + // If the call was already registered with a channel (Discord connected before + // media was ready), complete the registration now. This must happen AFTER + // the audio thread has actually started processing (not just spawned). + // queue_pending_channel_completion returns true if queued (thread not ready), + // false if we should complete immediately (thread is ready). + if !queue_pending_channel_completion(call_id, conf_port) { + tracing::info!( + "Audio thread already ready, completing channel registration immediately for call {}", + call_id + ); + complete_pending_channel_registration(call_id, conf_port); + } + } else { + tracing::warn!("Call {} has invalid conference port", call_id); + } + } else if ci.media_status == pjsua_call_media_status_PJSUA_CALL_MEDIA_NONE { + // Media went to NONE - this could happen during call setup/teardown + let active_calls = COUNTED_CALL_IDS + .get() + .map(|ids| ids.lock().len()) + .unwrap_or(0); + tracing::warn!( + "Call {} media went to NONE, active_media_calls={}", + call_id, + active_calls + ); + } else if ci.media_status == pjsua_call_media_status_PJSUA_CALL_MEDIA_ERROR { + // Media error - this is bad! + let active_calls = COUNTED_CALL_IDS + .get() + .map(|ids| ids.lock().len()) + .unwrap_or(0); + tracing::error!( + "Call {} media ERROR! active_media_calls={}", + call_id, + active_calls + ); + } else if ci.media_status == pjsua_call_media_status_PJSUA_CALL_MEDIA_LOCAL_HOLD { + tracing::info!("Call {} put on LOCAL_HOLD - disconnecting audio", call_id); + + // Disconnect the call from its channel without full teardown. + // CALL_CHANNELS and CALL_CONF_PORTS are preserved so the existing + // ACTIVE code path can reconnect when the call comes off hold. + disconnect_call_for_hold(call_id); + + // Remove from COUNTED_CALL_IDS and stop audio thread if no other active calls + let counted_ids = + COUNTED_CALL_IDS.get_or_init(|| Mutex::new(std::collections::HashSet::new())); + let (was_counted, new_count) = { + let mut ids = counted_ids.lock(); + let was_counted = ids.remove(&call_id); + (was_counted, ids.len()) + }; + + if was_counted { + tracing::info!( + "Call {} removed from active calls on hold (active_media_calls={})", + call_id, + new_count + ); + if new_count == 0 { + tracing::debug!("No active calls remaining after hold, stopping audio thread"); + stop_audio_thread(); + } + } + } else if ci.media_status == pjsua_call_media_status_PJSUA_CALL_MEDIA_REMOTE_HOLD { + // Remote end put us on hold (e.g., Cisco hold button). + // Keep conference connections AND audio thread running — many phones resume + // RTP without sending a re-INVITE, so we never get an ACTIVE callback. + // By keeping everything connected, audio naturally resumes when RTP flows again. + // + // Do NOT send re-INVITE or UPDATE — some phones (Cisco 7960G) reject UPDATE + // with 405 and disconnect the call, and re-INVITE fails with 70013 because + // the hold transaction is still active. + // + // Pause RTP inactivity tracking — phones send no RTP during hold. + remove_call_rtp_tracking(call_id); + + tracing::info!( + "Call {} put on REMOTE_HOLD - keeping audio connected (RTP tracking paused)", + call_id + ); + } +} + +/// T.38 offer parameters extracted from SDP +#[derive(Debug)] +pub struct T38OfferParams { + pub remote_ip: String, + pub remote_port: u16, + pub t38_version: u8, + pub max_bit_rate: u32, + pub rate_management: String, + pub udp_ec: String, +} + +/// Check if an SDP offer contains a T.38 media line (`m=image ... udptl t38`). +unsafe fn sdp_has_t38(offer: *const pjmedia_sdp_session) -> Option { + if offer.is_null() { + return None; + } + + for i in 0..(*offer).media_count { + let m = (*offer).media[i as usize]; + if m.is_null() { + continue; + } + + // Check media type == "image" + let media_type = pj_str_to_string(&(*m).desc.media); + if media_type != "image" { + continue; + } + + // Check transport contains "udptl" + let transport = pj_str_to_string(&(*m).desc.transport); + if !transport.to_lowercase().contains("udptl") { + continue; + } + + // This is a T.38 media line + let remote_port = (*m).desc.port; + if remote_port == 0 { + continue; // Disabled media line + } + + // Extract IP from connection line (media-level c= or session-level c=) + let conn = if !(*m).conn.is_null() { + (*m).conn + } else if !(*offer).conn.is_null() { + (*offer).conn + } else { + tracing::warn!("T.38 SDP offer has no connection line"); + continue; + }; + let remote_ip = pj_str_to_string(&(*conn).addr); + + // Extract T.38 attributes with defaults + let mut t38_version: u8 = 0; + let mut max_bit_rate: u32 = 14400; + let mut rate_management = "transferredTCF".to_string(); + let mut udp_ec = "t38UDPRedundancy".to_string(); + + for j in 0..(*m).attr_count { + let attr = (*m).attr[j as usize]; + if attr.is_null() { + continue; + } + let name = pj_str_to_string(&(*attr).name); + let value = pj_str_to_string(&(*attr).value); + + match name.as_str() { + "T38FaxVersion" => { + t38_version = value.parse().unwrap_or(0); + } + "T38MaxBitRate" => { + max_bit_rate = value.parse().unwrap_or(14400); + } + "T38FaxRateManagement" => { + rate_management = value; + } + "T38FaxUdpEC" => { + udp_ec = value; + } + _ => {} + } + } + + return Some(T38OfferParams { + remote_ip, + remote_port, + t38_version, + max_bit_rate, + rate_management, + udp_ec, + }); + } + + None +} + +/// Callback for incoming re-INVITE with SDP offer. +/// +/// When a phone sends a hold re-INVITE (with `a=sendonly`), pjsua would normally +/// respond with `a=recvonly` and enter REMOTE_HOLD, stopping RTP. Since we're a +/// bridge (not a PBX), we don't want hold semantics — we want audio to keep flowing. +/// +/// Also detects T.38 re-INVITEs (`m=image udptl t38`) and emits a T38Offered event +/// to the bridge coordinator, which handles the mode switch. +/// +/// Two-pronged fix for hold: +/// 1. Set PJSUA_CALL_SET_MEDIA_DIR flag to force def_dir to ENCODING_DECODING +/// 2. Strip hold direction attributes (sendonly/recvonly/inactive) from the SDP +/// negotiator's cloned remote offer. Without this, the negotiator rewrites our +/// answer to recvonly per RFC 3264, regardless of the flag. +pub unsafe extern "C" fn on_call_rx_reinvite_cb( + raw_call_id: pjsua_call_id, + offer: *const pjmedia_sdp_session, + rdata: *mut pjsip_rx_data, + _reserved: *mut std::os::raw::c_void, + _async: *mut pj_bool_t, + code: *mut pjsip_status_code, + opt: *mut pjsua_call_setting, +) { + let call_id = CallId::new(raw_call_id); + + // Check for T.38 offer BEFORE applying hold-stripping logic + if let Some(t38_params) = sdp_has_t38(offer) { + tracing::info!( + "Call {} received T.38 re-INVITE: remote={}:{}, version={}, rate={}, ec={}", + call_id, + t38_params.remote_ip, + t38_params.remote_port, + t38_params.t38_version, + t38_params.max_bit_rate, + t38_params.udp_ec + ); + + // Handle T.38 re-INVITE by sending 200 OK at the dialog level, + // completely bypassing pjsip's inv session and pjsua's media handling. + // + // Why dialog-level? Three layers of pjsip fight us: + // 1. pjsua_media_channel_init() crashes on T.38 (not audio) + // 2. pjsip_inv_answer() asserts inv->last_answer (not set yet) + // 3. pjsip_inv_send_msg() triggers on_media_update → crash + // + // By using pjsip_dlg_send_response() directly, we send the 200 OK + // without touching the inv session's media machinery. We then cancel + // the SDP offer and set code=488 so pjsua skips all media processing. + + // 1. Bind a std::net::UdpSocket within the configured RTP port range + // so firewall rules (which typically allow only the RTP range) also pass fax traffic. + let env_config = crate::config::EnvConfig::global(); + let rtp_start = env_config.rtp_port_start; + let rtp_end = env_config.rtp_port_end; + let std_socket = { + let mut bound = None; + for port in rtp_start..=rtp_end { + match std::net::UdpSocket::bind(("0.0.0.0", port)) { + Ok(s) => { + bound = Some(s); + break; + } + Err(_) => continue, + } + } + match bound { + Some(s) => s, + None => { + tracing::error!( + "Call {}: failed to bind UDPTL socket in RTP range {}-{}", + call_id, + rtp_start, + rtp_end + ); + pjsua_call_hangup(*call_id, 500, ptr::null(), ptr::null()); + return; + } + } + }; + let local_port = match std_socket.local_addr() { + Ok(addr) => addr.port(), + Err(e) => { + tracing::error!("Call {}: failed to get UDPTL local addr: {}", call_id, e); + pjsua_call_hangup(*call_id, 500, ptr::null(), ptr::null()); + return; + } + }; + + // 2. Navigate rdata → tsx → dlg → inv + if rdata.is_null() { + tracing::error!("Call {}: rdata null for T.38 re-INVITE", call_id); + pjsua_call_hangup(*call_id, 500, ptr::null(), ptr::null()); + return; + } + let tsx = pjsip_rdata_get_tsx(rdata); + if tsx.is_null() { + tracing::error!("Call {}: no transaction for T.38 re-INVITE", call_id); + pjsua_call_hangup(*call_id, 500, ptr::null(), ptr::null()); + return; + } + let dlg = pjsip_tsx_get_dlg(tsx); + if dlg.is_null() { + tracing::error!("Call {}: no dialog for T.38 re-INVITE", call_id); + pjsua_call_hangup(*call_id, 500, ptr::null(), ptr::null()); + return; + } + let inv = pjsip_dlg_get_inv_session(dlg); + if inv.is_null() { + tracing::error!("Call {}: no inv session for T.38 re-INVITE", call_id); + pjsua_call_hangup(*call_id, 500, ptr::null(), ptr::null()); + return; + } + + // 3. Build and parse T.38 SDP + // Use RTP_PUBLIC_IP for the SDP c= line, matching what pjsua uses for audio SDP. + // Many SIP devices (e.g. Cisco ATAs) cannot resolve hostnames in SDP and will + // silently fall back to the audio endpoint, sending UDPTL to the wrong port. + let config = crate::config::EnvConfig::global(); + let local_ip = config + .rtp_public_ip + .clone() + .unwrap_or_else(|| config.sip_public_host_or_default().to_string()); + tracing::debug!("Using {} for T.38 SDP c= line", local_ip); + let sess_id = std::time::SystemTime::now() + .duration_since(std::time::UNIX_EPOCH) + .unwrap_or_default() + .as_secs(); + let sdp_str = format!( + "v=0\r\n\ + o=- {} {} IN IP4 {}\r\n\ + s=T.38 Fax\r\n\ + c=IN IP4 {}\r\n\ + t=0 0\r\n\ + m=image {} udptl t38\r\n\ + a=T38FaxVersion:0\r\n\ + a=T38MaxBitRate:14400\r\n\ + a=T38FaxRateManagement:transferredTCF\r\n\ + a=T38FaxMaxBuffer:260\r\n\ + a=T38FaxMaxDatagram:316\r\n\ + a=T38FaxUdpEC:t38UDPRedundancy\r\n", + sess_id, sess_id, local_ip, local_ip, local_port + ); + + let pool = pjsua_pool_create(c"t38sdp".as_ptr(), 1024, 256); + if pool.is_null() { + tracing::error!("Call {}: failed to create pool for T.38 SDP", call_id); + pjsua_call_hangup(*call_id, 500, ptr::null(), ptr::null()); + return; + } + + let sdp_bytes = sdp_str.as_bytes(); + let mut sdp: *mut pjmedia_sdp_session = ptr::null_mut(); + let status = pjmedia_sdp_parse( + pool, + sdp_bytes.as_ptr() as *mut std::os::raw::c_char, + sdp_bytes.len(), + &mut sdp, + ); + if status != pj_constants__PJ_SUCCESS as i32 || sdp.is_null() { + tracing::error!( + "Call {}: failed to parse T.38 SDP (status={})", + call_id, + status + ); + pj_pool_release(pool); + pjsua_call_hangup(*call_id, 500, ptr::null(), ptr::null()); + return; + } + + // 4. Create 200 OK at dialog level (bypasses inv session media handling) + let mut tdata: *mut pjsip_tx_data = ptr::null_mut(); + let status = pjsip_dlg_create_response(dlg, rdata, 200, ptr::null(), &mut tdata); + if status != pj_constants__PJ_SUCCESS as i32 || tdata.is_null() { + tracing::error!( + "Call {}: pjsip_dlg_create_response failed (status={})", + call_id, + status + ); + pj_pool_release(pool); + pjsua_call_hangup(*call_id, 500, ptr::null(), ptr::null()); + return; + } + + // Attach SDP body to the 200 OK + let mut body: *mut pjsip_msg_body = ptr::null_mut(); + let status = pjsip_create_sdp_body((*tdata).pool, sdp, &mut body); + if status != pj_constants__PJ_SUCCESS as i32 || body.is_null() { + tracing::error!( + "Call {}: pjsip_create_sdp_body failed (status={})", + call_id, + status + ); + pjsip_tx_data_dec_ref(tdata); + pj_pool_release(pool); + pjsua_call_hangup(*call_id, 500, ptr::null(), ptr::null()); + return; + } + (*(*tdata).msg).body = body; + + // 5. Send 200 OK directly through the dialog transaction + let status = pjsip_dlg_send_response(dlg, tsx, tdata); + if status != pj_constants__PJ_SUCCESS as i32 { + tracing::error!( + "Call {}: pjsip_dlg_send_response failed (status={})", + call_id, + status + ); + pj_pool_release(pool); + pjsua_call_hangup(*call_id, 500, ptr::null(), ptr::null()); + return; + } + + // 6. Cancel the SDP offer on the negotiator (REMOTE_OFFER → DONE). + // This prevents pjsip from trying to negotiate or reject later. + if !(*inv).neg.is_null() { + pjmedia_sdp_neg_cancel_offer((*inv).neg); + } + + // 7. Tell pjsua to skip ALL media processing for this re-INVITE. + // Setting code != 200 makes pjsua_call_on_rx_offer goto on_return + // immediately, avoiding apply_call_setting/pjsua_media_channel_init. + // + // After this, pjsip's inv session will try to send a 488 rejection + // via pjsip_dlg_send_response(dlg, tsx, tdata). But the transaction + // was already terminated by our 200 OK above (INVITE UAS tsx → + // TERMINATED after 2xx per sip_transaction.c:3172). The terminated + // tsx's state handler returns PJ_EIGNORED for TX_MSG events, so the + // 488 is never sent on the wire. + if !code.is_null() { + *code = 488; + } + + tracing::info!( + "Sent T.38 200 OK for call {} (local={}:{}) via dialog", + call_id, + local_ip, + local_port + ); + + // 8. Store pre-bound socket for async UDPTL handler + T38_PRESOCKETS.insert(raw_call_id, std_socket); + + // 9. Emit T38Offered event (with local_port so handler knows which port) + if let Some(event_tx) = OUTBOUND_EVENT_TX.get() { + let _ = event_tx.try_send(super::SipEvent::T38Offered { + call_id, + remote_ip: t38_params.remote_ip, + remote_port: t38_params.remote_port, + t38_version: t38_params.t38_version, + max_bit_rate: t38_params.max_bit_rate, + rate_management: t38_params.rate_management, + udp_ec: t38_params.udp_ec, + local_port, + }); + } + + return; + } + + // Normal re-INVITE (audio): apply hold-stripping logic + // Set MEDIA_DIR flag to force sendrecv as default direction + if !opt.is_null() { + (*opt).flag |= pjsua_call_flag_PJSUA_CALL_SET_MEDIA_DIR; + (*opt).media_dir[0] = pjmedia_dir_PJMEDIA_DIR_ENCODING_DECODING; + } + + // Strip hold direction from the SDP negotiator's cloned remote offer. + // The negotiator clones the offer before this callback, so we must modify + // the clone (via rdata → tsx → dlg → inv → neg → neg_remote_sdp). + // Without this, update_media_direction() in sdp_neg.c rewrites our answer + // from sendrecv to recvonly when the remote offer has sendonly. + let stripped = strip_hold_from_neg_remote(call_id, rdata); + + tracing::info!( + "Call {} received re-INVITE, forcing sendrecv (sdp_stripped={})", + call_id, + stripped + ); +} + +/// Strip hold direction attributes from the SDP negotiator's remote offer clone. +/// Returns true if any hold attributes were found and removed. +unsafe fn strip_hold_from_neg_remote(call_id: CallId, rdata: *mut pjsip_rx_data) -> bool { + if rdata.is_null() { + tracing::warn!("Call {}: rdata null, cannot strip hold from offer", call_id); + return false; + } + + // rdata → transaction → dialog → inv session → SDP negotiator + let tsx = pjsip_rdata_get_tsx(rdata); + if tsx.is_null() { + tracing::warn!("Call {}: no transaction for re-INVITE", call_id); + return false; + } + + let dlg = pjsip_tsx_get_dlg(tsx); + if dlg.is_null() { + tracing::warn!("Call {}: no dialog for re-INVITE", call_id); + return false; + } + + let inv = pjsip_dlg_get_inv_session(dlg); + if inv.is_null() { + tracing::warn!("Call {}: no inv session for re-INVITE", call_id); + return false; + } + + let neg = (*inv).neg; + if neg.is_null() { + tracing::warn!("Call {}: no SDP negotiator", call_id); + return false; + } + + // Get the negotiator's cloned remote offer + let mut remote: *const pjmedia_sdp_session = ptr::null(); + let status = pjmedia_sdp_neg_get_neg_remote(neg, &mut remote); + if status != pj_constants__PJ_SUCCESS as i32 || remote.is_null() { + tracing::warn!( + "Call {}: failed to get remote SDP from negotiator (status={})", + call_id, + status + ); + return false; + } + + // Modify the clone in-place: strip hold direction attributes. + // Cast away const — safe because neg_remote_sdp is a deep clone, not the original. + // Removing these makes the SDP negotiator treat the offer as sendrecv (RFC 3264 default). + let remote_mut = remote as *mut pjmedia_sdp_session; + let mut stripped_any = false; + + for i in 0..(*remote_mut).media_count { + let m = (*remote_mut).media[i as usize]; + if m.is_null() { + continue; + } + + let sendonly = c"sendonly".as_ptr(); + let recvonly = c"recvonly".as_ptr(); + let inactive = c"inactive".as_ptr(); + + let had_sendonly = !pjmedia_sdp_media_find_attr2(m, sendonly, ptr::null()).is_null(); + let had_recvonly = !pjmedia_sdp_media_find_attr2(m, recvonly, ptr::null()).is_null(); + let had_inactive = !pjmedia_sdp_media_find_attr2(m, inactive, ptr::null()).is_null(); + + if had_sendonly || had_recvonly || had_inactive { + pjmedia_sdp_media_remove_all_attr(m, sendonly); + pjmedia_sdp_media_remove_all_attr(m, recvonly); + pjmedia_sdp_media_remove_all_attr(m, inactive); + stripped_any = true; + + tracing::debug!( + "Call {} media {}: stripped hold direction (sendonly={}, recvonly={}, inactive={})", + call_id, + i, + had_sendonly, + had_recvonly, + had_inactive + ); + } + } + + stripped_any +} diff --git a/sipcord-bridge/src/transport/sip/channel_audio.rs b/sipcord-bridge/src/transport/sip/channel_audio.rs new file mode 100644 index 0000000..cce3e91 --- /dev/null +++ b/sipcord-bridge/src/transport/sip/channel_audio.rs @@ -0,0 +1,1044 @@ +//! Per-channel audio isolation for Discord <-> SIP audio routing +//! +//! This module handles: +//! - Custom buffer ports for per-channel Discord->SIP audio +//! - Channel registration and call mapping +//! - Audio buffer management + +use super::ffi::frame_utils::get_conference_bridge; +use super::ffi::types::*; +use crate::services::snowflake::Snowflake; +use dashmap::DashMap; +use parking_lot::{Mutex, RwLock}; +use pjsua::*; +use rtrb::Consumer; +use std::collections::{HashMap, VecDeque}; +use std::sync::atomic::Ordering; +use std::sync::Arc; +use std::sync::OnceLock; +use std::time::{Duration, Instant}; + +// Discord→SIP ring buffer consumers (written by Discord, read by audio thread) + +/// Per-channel ring buffer consumers for the Discord→SIP audio path. +/// VoiceReceiver writes resampled i16 mono @ 16kHz directly to the producer side. +/// channel_port_get_frame reads from the consumer side here. +static DISCORD_TO_SIP_CONSUMERS: OnceLock>>> = + OnceLock::new(); + +fn get_discord_to_sip_consumers() -> &'static DashMap>> { + DISCORD_TO_SIP_CONSUMERS.get_or_init(DashMap::new) +} + +/// Register a ring buffer consumer for Discord→SIP audio on a channel. +pub fn register_discord_to_sip(channel_id: Snowflake, consumer: Consumer) { + tracing::debug!( + "Registering Discord→SIP ring buffer consumer for channel {}", + channel_id + ); + get_discord_to_sip_consumers().insert(channel_id, Mutex::new(consumer)); +} + +/// Unregister the ring buffer consumer for a channel. +pub fn unregister_discord_to_sip(channel_id: Snowflake) { + tracing::debug!( + "Unregistering Discord→SIP ring buffer consumer for channel {}", + channel_id + ); + get_discord_to_sip_consumers().remove(&channel_id); +} + +// Custom buffer port callbacks for per-channel Discord->SIP audio + +/// Custom get_frame callback for channel buffer ports +/// Called by PJSUA/conference bridge to pull audio for RTP transmission +/// +/// This is called by PJSUA from its own thread during RTP transmission. +/// With multiple callers in the same channel, PJSUA calls this multiple times +/// (once per call) within microseconds. Without caching, N callers would drain +/// N*320 samples per 20ms tick, emptying the buffer N times faster than it fills. +/// +/// Time-based caching ensures all callers in the same tick share the same audio frame. +pub unsafe extern "C" fn channel_port_get_frame( + this_port: *mut pjmedia_port, + frame: *mut pjmedia_frame, +) -> pj_status_t { + use std::sync::atomic::AtomicU64; + + static GET_FRAME_CALL_COUNT: AtomicU64 = AtomicU64::new(0); + static CACHE_HIT_COUNT: AtomicU64 = AtomicU64::new(0); + let call_count = GET_FRAME_CALL_COUNT.fetch_add(1, Ordering::Relaxed); + + // Log first 10 calls to confirm this callback is being invoked + if call_count < 10 { + tracing::trace!( + "channel_port_get_frame called (call #{}, port={:p})", + call_count, + this_port + ); + } else if call_count == 10 { + tracing::trace!("channel_port_get_frame: suppressing further per-call logs"); + } + + if this_port.is_null() || frame.is_null() { + return -1; // PJ_EINVAL + } + + let channel_id = Snowflake::new((*this_port).port_data.ldata as u64); + if *channel_id == 0 { + (*frame).type_ = pjmedia_frame_type_PJMEDIA_FRAME_TYPE_NONE; + (*frame).size = 0; + return pj_constants__PJ_SUCCESS as pj_status_t; + } + + // Time-based caching to prevent multi-caller drain + // If called within 15ms of last drain, return cached samples + let now = Instant::now(); + let cache_window = Duration::from_millis(15); // PJSUA sends RTP every 20ms + + let cache = CHANNEL_DRAIN_CACHE.get_or_init(DashMap::new); + + // Stack-allocated buffer for fresh samples (zero heap allocation on miss path) + let mut stack_buf = [0i16; SAMPLES_PER_FRAME]; + + // Check cache first - if valid, return cached samples (cheap Arc::clone) + let (samples_ptr, samples_len): (*const i16, usize) = if let Some(entry) = + cache.get(&channel_id) + { + let (last_time, cached, cached_len) = entry.value(); + if now.duration_since(*last_time) < cache_window { + // Cache hit - use cached Arc data directly (zero-copy) + let hits = CACHE_HIT_COUNT.fetch_add(1, Ordering::Relaxed) + 1; + if call_count.is_multiple_of(500) { + tracing::trace!( + "channel_port_get_frame #{}: CACHE HIT for channel={} ({}ms since last drain, {} total hits)", + call_count, channel_id, now.duration_since(*last_time).as_millis(), hits + ); + } + (cached.as_ptr(), *cached_len) + } else { + // Cache expired - need to drop the read ref before draining + drop(entry); + + // Drain fresh samples into stack buffer + let n = get_samples_from_buffer(channel_id, &mut stack_buf); + // Store in cache as Arc<[i16]> (single allocation for Arc+data) + let fresh_arc: Arc<[i16]> = Arc::from(&stack_buf[..n]); + cache.insert(channel_id, (now, fresh_arc, n)); + + if call_count.is_multiple_of(500) { + tracing::trace!( + "channel_port_get_frame #{}: channel={}, drained {} samples (cache expired)", + call_count, + channel_id, + n + ); + } + (stack_buf.as_ptr(), n) + } + } else { + // No cache entry - drain fresh samples into stack buffer + let n = get_samples_from_buffer(channel_id, &mut stack_buf); + let fresh_arc: Arc<[i16]> = Arc::from(&stack_buf[..n]); + cache.insert(channel_id, (now, fresh_arc, n)); + + if call_count.is_multiple_of(500) { + tracing::trace!( + "channel_port_get_frame #{}: channel={}, drained {} samples (no cache)", + call_count, + channel_id, + n + ); + } + (stack_buf.as_ptr(), n) + }; + + // Log cache statistics periodically (every 10 seconds at 50 calls/sec) + if call_count.is_multiple_of(500) { + let hits = CACHE_HIT_COUNT.load(Ordering::Relaxed); + let hit_rate = (hits * 100).checked_div(call_count).unwrap_or(0); + tracing::trace!( + "channel_port_get_frame stats: {} calls, {} cache hits ({}% hit rate)", + call_count, + hits, + hit_rate + ); + } + + if samples_len > 0 { + let samples = std::slice::from_raw_parts(samples_ptr, samples_len); + super::ffi::frame_utils::fill_audio_frame(frame, samples); + } else { + super::ffi::frame_utils::fill_silence_frame(frame); + } + + pj_constants__PJ_SUCCESS as pj_status_t +} + +/// Get samples from the Discord→SIP ring buffer for a channel. +/// Fills the caller-provided buffer and returns the number of samples written. +/// `buf` must be at least SAMPLES_PER_FRAME in length. +fn get_samples_from_buffer(channel_id: Snowflake, buf: &mut [i16; SAMPLES_PER_FRAME]) -> usize { + use std::sync::atomic::AtomicU64; + static DRAIN_COUNT: AtomicU64 = AtomicU64::new(0); + static UNDERRUN_COUNT: AtomicU64 = AtomicU64::new(0); + + if let Some(consumer_entry) = get_discord_to_sip_consumers().get(&channel_id) { + if let Some(mut consumer) = consumer_entry.try_lock() { + let available = consumer.slots(); + if available >= SAMPLES_PER_FRAME { + let count = DRAIN_COUNT.fetch_add(1, Ordering::Relaxed); + if count.is_multiple_of(250) { + tracing::debug!( + "Discord->SIP drain: channel={}, available={}, draining {}", + channel_id, + available, + SAMPLES_PER_FRAME + ); + } + if let Ok(chunk) = consumer.read_chunk(SAMPLES_PER_FRAME) { + let (first, second) = chunk.as_slices(); + buf[..first.len()].copy_from_slice(first); + if !second.is_empty() { + buf[first.len()..first.len() + second.len()].copy_from_slice(second); + } + chunk.commit_all(); + } + return SAMPLES_PER_FRAME; + } else if available > 0 { + // Partial buffer - drain what we have, zero-fill the rest + let underruns = UNDERRUN_COUNT.fetch_add(1, Ordering::Relaxed) + 1; + if underruns <= 10 || underruns.is_multiple_of(100) { + tracing::warn!( + "BUFFER UNDERRUN (Discord->SIP): channel={}, only {} available (need {}), total: {}", + channel_id, available, SAMPLES_PER_FRAME, underruns + ); + } + buf[available..].fill(0); + if let Ok(chunk) = consumer.read_chunk(available) { + let (first, second) = chunk.as_slices(); + buf[..first.len()].copy_from_slice(first); + if !second.is_empty() { + buf[first.len()..first.len() + second.len()].copy_from_slice(second); + } + chunk.commit_all(); + } + return available; + } + } + } + + 0 // No audio available +} + +/// Custom put_frame callback for channel buffer ports +/// Called by PJSUA/conference bridge when sending audio TO this port (SIP -> Discord) +/// This captures audio from calls connected to this channel's port +pub unsafe extern "C" fn channel_port_put_frame( + this_port: *mut pjmedia_port, + frame: *mut pjmedia_frame, +) -> pj_status_t { + use std::sync::atomic::AtomicU64; + + static PUT_FRAME_CALL_COUNT: AtomicU64 = AtomicU64::new(0); + let call_count = PUT_FRAME_CALL_COUNT.fetch_add(1, Ordering::Relaxed); + + if this_port.is_null() || frame.is_null() { + return pj_constants__PJ_SUCCESS as pj_status_t; + } + + // Only process audio frames with data + if (*frame).type_ != pjmedia_frame_type_PJMEDIA_FRAME_TYPE_AUDIO || (*frame).size == 0 { + return pj_constants__PJ_SUCCESS as pj_status_t; + } + + let channel_id = Snowflake::new((*this_port).port_data.ldata as u64); + if *channel_id == 0 { + return pj_constants__PJ_SUCCESS as pj_status_t; + } + + // Log first 10 calls to confirm this callback is being invoked + if call_count < 10 { + tracing::trace!( + "channel_port_put_frame called (call #{}, port={:p}, channel={}, frame_size={})", + call_count, + this_port, + channel_id, + (*frame).size + ); + } else if call_count == 10 { + tracing::trace!("channel_port_put_frame: suppressing further per-call logs"); + } + + // View frame buffer as i16 slice (zero-copy) + let num_samples = (*frame).size / 2; + let frame_buf = (*frame).buf as *const i16; + let samples = std::slice::from_raw_parts(frame_buf, num_samples); + + // Store in the SIP->Discord buffer for this channel + let buffers = CHANNEL_AUDIO_IN.get_or_init(DashMap::new); + let mut buffer = buffers + .entry(channel_id) + .or_insert_with(|| VecDeque::with_capacity(max_channel_buffer_samples())); + + // Limit buffer size (same as Discord->SIP direction) + let max_buffer = max_channel_buffer_samples(); + let buf_len = buffer.len(); + if buf_len + samples.len() > max_buffer { + let to_drop = (buf_len + samples.len()).saturating_sub(max_buffer); + if to_drop > 0 { + let drop_count = to_drop.min(buf_len); + buffer.drain(..drop_count); + if call_count.is_multiple_of(250) { + tracing::warn!( + "SIP->Discord buffer overflow: channel {} dropping {} samples", + channel_id, + to_drop + ); + } + } + } + + buffer.extend(samples.iter().copied()); + + // Log periodically + if call_count.is_multiple_of(500) { + tracing::debug!( + "channel_port_put_frame #{}: channel={}, added {} samples, buffer now {}", + call_count, + channel_id, + samples.len(), + buffer.len() + ); + } + + pj_constants__PJ_SUCCESS as pj_status_t +} + +/// Custom on_destroy callback for channel buffer ports +pub unsafe extern "C" fn channel_port_on_destroy(this_port: *mut pjmedia_port) -> pj_status_t { + if !this_port.is_null() { + // Remove from reverse mapping + let port_key = this_port as usize; + if let Some(mapping) = PORT_TO_CHANNEL.get() { + mapping.lock().remove(&port_key); + } + } + pj_constants__PJ_SUCCESS as pj_status_t +} + +// Conference connection helpers (shared by connect/disconnect paths) + +/// Connect a call bidirectionally to other calls in the channel + channel port. +/// +/// Uses `pjmedia_conf_connect_port` directly to bypass PJSUA_LOCK. +/// `other_calls` should be (call_id, conf_port) pairs for existing calls in the channel. +unsafe fn connect_call_to_channel( + conf: *mut pjmedia_conf, + call_id: CallId, + conf_port: ConfPort, + channel_id: Snowflake, + other_calls: &[(CallId, ConfPort)], +) { + // Connect this call to other calls in the same channel + for &(other_call_id, other_conf_port) in other_calls { + let status1 = + pjmedia_conf_connect_port(conf, *conf_port as u32, *other_conf_port as u32, 0); + let status2 = + pjmedia_conf_connect_port(conf, *other_conf_port as u32, *conf_port as u32, 0); + + if status1 == pj_constants__PJ_SUCCESS as i32 && status2 == pj_constants__PJ_SUCCESS as i32 + { + tracing::debug!( + "Connected call {} (port {}) <-> call {} (port {}) in channel {}", + call_id, + conf_port, + other_call_id, + other_conf_port, + channel_id + ); + } else { + tracing::warn!( + "Failed to connect calls {} and {} in channel {}: status1={}, status2={}", + call_id, + other_call_id, + channel_id, + status1, + status2 + ); + } + } + + // Connect call to channel's conference port bidirectionally + if let Some(channel_slot) = get_or_create_channel_port(channel_id) { + // Channel port -> call (Discord audio reaches this call) + let status1 = pjmedia_conf_connect_port(conf, *channel_slot as u32, *conf_port as u32, 0); + // Call -> channel port (SIP audio goes to channel for Discord) + let status2 = pjmedia_conf_connect_port(conf, *conf_port as u32, *channel_slot as u32, 0); + + if status1 != pj_constants__PJ_SUCCESS as i32 { + tracing::warn!( + "Failed to connect channel {} slot {} -> call {}: {}", + channel_id, + channel_slot, + call_id, + status1 + ); + } + if status2 != pj_constants__PJ_SUCCESS as i32 { + tracing::warn!( + "Failed to connect call {} -> channel {} slot {}: {}", + call_id, + channel_id, + channel_slot, + status2 + ); + } + if status1 == pj_constants__PJ_SUCCESS as i32 && status2 == pj_constants__PJ_SUCCESS as i32 + { + tracing::debug!( + "Connected channel {} port (slot {}) <-> call {} (port {}) bidirectionally", + channel_id, + channel_slot, + call_id, + conf_port + ); + } + } +} + +/// Disconnect a call from other calls in the channel + channel port. +/// +/// Uses `pjmedia_conf_disconnect_port` directly to bypass PJSUA_LOCK. +/// `remaining_calls` should be call IDs still in the channel (excluding the departing call). +unsafe fn disconnect_call_from_channel( + conf: *mut pjmedia_conf, + call_id: CallId, + conf_port: ConfPort, + channel_id: Snowflake, + remaining_calls: &[CallId], +) { + let conf_ports = CALL_CONF_PORTS.get_or_init(DashMap::new); + + // Disconnect from other calls in the channel (both directions) + for &other_call_id in remaining_calls { + if let Some(other_conf_port) = conf_ports.get(&other_call_id).map(|r| *r) { + pjmedia_conf_disconnect_port(conf, *conf_port as u32, *other_conf_port as u32); + pjmedia_conf_disconnect_port(conf, *other_conf_port as u32, *conf_port as u32); + tracing::debug!( + "Disconnected call {} from call {} in channel {}", + call_id, + other_call_id, + channel_id + ); + } + } + + // Disconnect from channel port (both directions) + if let Some(channel_slot) = get_channel_slot(channel_id) { + pjmedia_conf_disconnect_port(conf, *channel_slot as u32, *conf_port as u32); + pjmedia_conf_disconnect_port(conf, *conf_port as u32, *channel_slot as u32); + tracing::debug!( + "Disconnected channel {} slot {} <-> call {} (port {}) bidirectionally", + channel_id, + channel_slot, + call_id, + conf_port + ); + } +} + +// Per-channel audio isolation functions + +/// Register a call with its Discord channel for audio isolation +/// +/// This function: +/// 1. Stores the call -> channel mapping (always, even if media not ready) +/// 2. Adds the call to the channel's call set +/// 3. Queues the conference connections for the audio thread to process +/// (pjsua_conf_connect conflicts with pjmedia_port_get_frame if called from different threads) +pub fn register_call_channel(call_id: CallId, channel_id: Snowflake) { + // Always store the call -> channel mapping first, even if media isn't ready yet + // This allows complete_pending_channel_registration to finish the job when media becomes active + { + let channels = CALL_CHANNELS.get_or_init(DashMap::new); + channels.insert(call_id, channel_id); + tracing::debug!("Stored call {} -> channel {} mapping", call_id, channel_id); + } + + // Get the conf_port for this call + let conf_port = { + let ports = CALL_CONF_PORTS.get_or_init(DashMap::new); + ports.get(&call_id).map(|r| *r) + }; + + let Some(_conf_port) = conf_port else { + tracing::debug!( + "Call {} registered for channel {} but media not active yet - will connect when ready", + call_id, + channel_id + ); + return; + }; + + // Add call to channel's call set (this enables audio buffering for this channel) + { + let channel_calls = CHANNEL_CALLS.get_or_init(|| RwLock::new(HashMap::new())); + let mut map = channel_calls.write(); + let calls = map.entry(channel_id).or_default(); + calls.insert(call_id); + tracing::debug!( + "Added call {} to channel {} ({} calls in channel)", + call_id, + channel_id, + calls.len() + ); + } + + // Queue the conference connections to be made by the audio thread + // This is necessary because pjsua_conf_connect conflicts with the audio thread's + // pjmedia_port_get_frame calls if made from a different thread + PENDING_CONF_CONNECTIONS.push((call_id, channel_id)); + tracing::debug!( + "Queued conference connections for call {} -> channel {} (will be processed by audio thread)", + call_id, channel_id + ); +} + +/// Complete the conference connections for a call (called from audio thread) +/// +/// This makes the actual conference connections that were queued by register_call_channel. +/// Must be called from the audio thread to avoid conflicts with pjmedia_port_get_frame. +/// Uses pjmedia_conf_connect_port directly to bypass PJSUA_LOCK (avoiding deadlocks). +pub fn complete_conf_connections(call_id: CallId, channel_id: Snowflake) { + // Get the conf_port for this call + let conf_port = { + let ports = CALL_CONF_PORTS.get_or_init(DashMap::new); + ports.get(&call_id).map(|r| *r) + }; + + let Some(conf_port) = conf_port else { + tracing::warn!( + "complete_conf_connections: call {} has no conf_port - skipping", + call_id + ); + return; + }; + + // Get the conference bridge pointer (needed for pjmedia_conf_connect_port) + let conf = unsafe { get_conference_bridge() }; + let Some(conf) = conf else { + tracing::error!( + "complete_conf_connections: could not get conference bridge pointer for call {}", + call_id + ); + return; + }; + + // Get other calls in this channel to connect bidirectionally + let other_calls: Vec<(CallId, ConfPort)> = { + let channel_calls = CHANNEL_CALLS.get_or_init(|| RwLock::new(HashMap::new())); + let conf_ports = CALL_CONF_PORTS.get_or_init(DashMap::new); + let map = channel_calls.read(); + if let Some(calls) = map.get(&channel_id) { + calls + .iter() + .filter(|&&other_id| other_id != call_id) + .filter_map(|&other_id| conf_ports.get(&other_id).map(|r| (other_id, *r))) + .collect() + } else { + vec![] + } + }; + + unsafe { + connect_call_to_channel(conf, call_id, conf_port, channel_id, &other_calls); + } + + tracing::debug!( + "Completed conference connections for call {} (port {}) in channel {}", + call_id, + conf_port, + channel_id + ); +} + +/// Complete a pending channel registration when media becomes active +/// +/// Called from on_call_media_state_cb when a call's media becomes ACTIVE. +/// If the call was already registered with a channel (via register_call_channel) +/// but media wasn't ready at that time, this completes the audio connections. +pub fn complete_pending_channel_registration(call_id: CallId, conf_port: ConfPort) { + // Check if this call has a pending channel registration + let channel_id = { + let channels = CALL_CHANNELS.get_or_init(DashMap::new); + channels.get(&call_id).map(|r| *r) + }; + + let Some(channel_id) = channel_id else { + // No pending registration - call hasn't been assigned to a channel yet + tracing::debug!( + "complete_pending_channel_registration: call {} has no pending channel registration (will be registered later)", + call_id + ); + return; + }; + + // Check if already in CHANNEL_CALLS (already connected) + let already_connected = { + let channel_calls = CHANNEL_CALLS.get_or_init(|| RwLock::new(HashMap::new())); + let map = channel_calls.read(); + map.get(&channel_id) + .map(|calls| calls.contains(&call_id)) + .unwrap_or(false) + }; + + if already_connected { + tracing::debug!( + "Call {} already connected to channel {} - skipping", + call_id, + channel_id + ); + return; + } + + tracing::debug!( + "Completing pending channel registration: call {} -> channel {} (conf_port {})", + call_id, + channel_id, + conf_port + ); + + // Get existing calls in this channel and add our call + let existing_calls: Vec = { + let channel_calls = CHANNEL_CALLS.get_or_init(|| RwLock::new(HashMap::new())); + let mut map = channel_calls.write(); + let calls = map.entry(channel_id).or_default(); + let existing: Vec = calls.iter().copied().collect(); + calls.insert(call_id); + existing + }; + + // Get the conference bridge pointer (needed for pjmedia_conf_connect_port) + let conf = unsafe { get_conference_bridge() }; + let Some(conf) = conf else { + tracing::error!( + "complete_pending_channel_registration: could not get conference bridge pointer for call {}", + call_id + ); + return; + }; + + // Connect this call to other calls in the same channel + channel port + let conf_ports = CALL_CONF_PORTS.get_or_init(DashMap::new); + let other_calls: Vec<(CallId, ConfPort)> = existing_calls + .iter() + .filter_map(|&other_id| conf_ports.get(&other_id).map(|r| (other_id, *r))) + .collect(); + + unsafe { + connect_call_to_channel(conf, call_id, conf_port, channel_id, &other_calls); + } + + tracing::debug!( + "Completed pending registration: call {} (port {}) for channel {} ({} total calls)", + call_id, + conf_port, + channel_id, + existing_calls.len() + 1 + ); +} + +/// Temporarily disconnect a held call from its channel without full teardown +/// +/// Unlike unregister_call_channel(), this keeps CALL_CHANNELS and CALL_CONF_PORTS +/// mappings intact so the call can be reconnected when it comes off hold. +/// The existing ACTIVE code path in on_call_media_state_cb handles reconnection +/// via complete_pending_channel_registration(). +/// +/// This function: +/// 1. Removes the call from CHANNEL_CALLS (stops audio buffering for this channel if empty) +/// 2. Disconnects conf_port from channel port (both directions) +/// 3. Disconnects conf_port from other calls in the channel (both directions) +/// 4. Clears audio buffers and drain cache if no other calls remain in the channel +pub fn disconnect_call_for_hold(call_id: CallId) { + // Look up channel_id from CALL_CHANNELS (keep the mapping for reconnection) + let channel_id = { + let channels = CALL_CHANNELS.get_or_init(DashMap::new); + channels.get(&call_id).map(|r| *r) + }; + + let Some(channel_id) = channel_id else { + tracing::debug!( + "disconnect_call_for_hold: call {} not registered with any channel", + call_id + ); + return; + }; + + // Look up conf_port from CALL_CONF_PORTS (keep the mapping for reconnection) + let conf_port = { + let ports = CALL_CONF_PORTS.get_or_init(DashMap::new); + ports.get(&call_id).map(|r| *r) + }; + + // Remove call from CHANNEL_CALLS and get remaining calls + let remaining_calls: Vec = { + let channel_calls = CHANNEL_CALLS.get_or_init(|| RwLock::new(HashMap::new())); + let mut map = channel_calls.write(); + if let Some(calls) = map.get_mut(&channel_id) { + calls.remove(&call_id); + let remaining: Vec = calls.iter().copied().collect(); + // If set becomes empty, remove the key so get_active_channels_into() excludes it + // and send_audio_to_channel() stops buffering + if calls.is_empty() { + map.remove(&channel_id); + } + remaining + } else { + Vec::new() + } + }; + + // Disconnect conference ports + if let Some(conf_port) = conf_port { + let conf = unsafe { get_conference_bridge() }; + + if let Some(conf) = conf { + unsafe { + disconnect_call_from_channel( + conf, + call_id, + conf_port, + channel_id, + &remaining_calls, + ); + } + } else { + tracing::warn!( + "disconnect_call_for_hold: could not get conference bridge pointer for call {}", + call_id + ); + } + } + + // If no other calls remain in the channel, clear stale buffers + if remaining_calls.is_empty() { + if let Some(audio_in) = CHANNEL_AUDIO_IN.get() { + audio_in.remove(&channel_id); + } + if let Some(drain_cache) = CHANNEL_DRAIN_CACHE.get() { + drain_cache.remove(&channel_id); + } + tracing::debug!( + "Hold: cleared audio buffers for channel {} (no remaining calls)", + channel_id + ); + } + + tracing::info!( + "Call {} put on hold - disconnected from channel {} ({} calls remaining)", + call_id, + channel_id, + remaining_calls.len() + ); +} + +/// Unregister a call from its Discord channel +/// +/// This function: +/// 1. Removes the call from channel mappings +/// 2. Disconnects this call from other calls in the same channel +/// 3. Disconnects from channel port +/// 4. Cleans up the conf_port mapping +/// +/// Does NOT clean up the channel port automatically. +/// The bridge code should call cleanup_channel_port() when the bridge is destroyed +/// to avoid race conditions with other calls joining the same channel. +pub fn unregister_call_channel(call_id: CallId) { + // Get and remove the channel_id for this call + let channel_id = { + let channels = CALL_CHANNELS.get_or_init(DashMap::new); + channels.remove(&call_id).map(|(_, v)| v) + }; + + // Get and remove the conf_port for this call + let conf_port = { + let ports = CALL_CONF_PORTS.get_or_init(DashMap::new); + ports.remove(&call_id).map(|(_, v)| v) + }; + + let Some(channel_id) = channel_id else { + // Call wasn't registered with a channel (e.g., hung up before auth) + tracing::debug!("Call {} was not registered with any channel", call_id); + return; + }; + + // Remove call from channel's call set and get remaining calls + let remaining_calls: Vec = { + let channel_calls = CHANNEL_CALLS.get_or_init(|| RwLock::new(HashMap::new())); + let mut map = channel_calls.write(); + if let Some(calls) = map.get_mut(&channel_id) { + calls.remove(&call_id); + let remaining: Vec = calls.iter().copied().collect(); + // Clean up empty channels + if calls.is_empty() { + map.remove(&channel_id); + // Also clean up the channel's audio input buffer + if let Some(audio_in) = CHANNEL_AUDIO_IN.get() { + audio_in.remove(&channel_id); + } + } + remaining + } else { + Vec::new() + } + }; + + // Disconnect this call from other calls in the channel and from channel/master ports + if let Some(conf_port) = conf_port { + let conf = unsafe { get_conference_bridge() }; + + if let Some(conf) = conf { + unsafe { + disconnect_call_from_channel( + conf, + call_id, + conf_port, + channel_id, + &remaining_calls, + ); + } + } else { + tracing::warn!( + "unregister_call_channel: could not get conference bridge pointer for call {}", + call_id + ); + } + } + + tracing::debug!( + "Unregistered call {} from channel {} ({} calls remaining)", + call_id, + channel_id, + remaining_calls.len() + ); +} + +/// Get or create the conference port for a channel +/// Returns the conf_slot for this channel's port +/// +/// Creates a CUSTOM BUFFER PORT (not a null port) that: +/// - Provides audio to the conference via get_frame (pulls from Discord→SIP ring buffer) +/// - Discards put_frame (we only provide audio, not receive it) +pub fn get_or_create_channel_port(channel_id: Snowflake) -> Option { + let ports = CHANNEL_CONF_PORTS.get_or_init(|| Mutex::new(HashMap::new())); + let mut ports = ports.lock(); + + if let Some(&(_, slot)) = ports.get(&channel_id) { + return Some(slot); + } + + // Create a new custom buffer port for this channel + unsafe { + // Get or create the memory pool + let pool = CHANNEL_PORT_POOL.get_or_init(|| { + let pool = pjsua_pool_create(c"channel_ports".as_ptr() as *const _, 4096, 4096); + Mutex::new(SendablePool(pool)) + }); + let pool_ptr = pool.lock().0; + + // Allocate pjmedia_port structure (zero-initialized) + let port_size = std::mem::size_of::(); + let port = pj_pool_alloc(pool_ptr, port_size) as *mut pjmedia_port; + if port.is_null() { + tracing::error!("Failed to allocate channel port for {}", channel_id); + return None; + } + // Zero-initialize the port structure + std::ptr::write_bytes(port as *mut u8, 0, port_size); + + // Create port name + let port_name = format!("ch{}", channel_id); + let port_name_cstr = std::ffi::CString::new(port_name).ok()?; + + // Initialize port info using pjmedia_port_info_init + // Signature: we use a custom one to identify our ports + let signature = 0x4348_414E; // "CHAN" in hex + pjmedia_port_info_init( + &mut (*port).info, + &pj_str(port_name_cstr.as_ptr() as *mut _), + signature, + CONF_SAMPLE_RATE, + CONF_CHANNELS, + 16, // bits per sample + SAMPLES_PER_FRAME as u32, + ); + + // Set our custom callbacks + (*port).get_frame = Some(channel_port_get_frame); + (*port).put_frame = Some(channel_port_put_frame); + (*port).on_destroy = Some(channel_port_on_destroy); + + // Add to conference + let mut slot: i32 = 0; + let status = pjsua_conf_add_port(pool_ptr, port, &mut slot); + + if status != pj_constants__PJ_SUCCESS as i32 { + tracing::error!( + "Failed to add channel port to conference for {}: {}", + channel_id, + status + ); + return None; + } + + // Store channel_id in port_data.ldata for O(1) lookup in callbacks + // (avoids Mutex acquisition on every get_frame/put_frame call) + (*port).port_data.ldata = *channel_id as i64; + + // Also register in reverse mapping for on_destroy callback cleanup + let port_to_channel = PORT_TO_CHANNEL.get_or_init(|| Mutex::new(HashMap::new())); + port_to_channel.lock().insert(port as usize, channel_id); + + let conf_slot = ConfPort::new(slot); + tracing::debug!( + "Created custom buffer port for channel {} at slot {} (port_ptr={:p})", + channel_id, + conf_slot, + port + ); + ports.insert(channel_id, (SendablePort(port), conf_slot)); + Some(conf_slot) + } +} + +/// Get the conf_slot for a channel (if it exists) +pub fn get_channel_slot(channel_id: Snowflake) -> Option { + let ports = CHANNEL_CONF_PORTS.get()?; + let ports = ports.lock(); + ports.get(&channel_id).map(|&(_, slot)| slot) +} + +/// Clean up a channel's conference port +/// This should be called by the bridge code when it's certain no calls remain +/// (not automatically when CHANNEL_CALLS is empty, to avoid race conditions) +pub fn cleanup_channel_port(channel_id: Snowflake) { + let Some(ports) = CHANNEL_CONF_PORTS.get() else { + return; + }; + + let removed = { + let mut ports = ports.lock(); + ports.remove(&channel_id) + }; + + if let Some((port, slot)) = removed { + // Remove from reverse mapping first + if let Some(mapping) = PORT_TO_CHANNEL.get() { + mapping.lock().remove(&(port.0 as usize)); + } + + unsafe { + // Remove from conference bridge + let status = pjsua_conf_remove_port(*slot); + if status != pj_constants__PJ_SUCCESS as i32 { + tracing::warn!( + "Failed to remove channel port {} from conference: {}", + slot, + status + ); + } + + // Destroy the port (calls on_destroy callback) + if !port.0.is_null() { + pjmedia_port_destroy(port.0); + } + } + tracing::debug!( + "Cleaned up channel port for channel {} (slot {})", + channel_id, + slot + ); + } +} + +/// Drain one frame of SIP->Discord audio for a channel into a provided buffer. +/// Returns the number of samples written (0 if no audio available). +/// `buf` must be at least SAMPLES_PER_FRAME in length. +pub fn drain_sip_to_discord_audio(channel_id: Snowflake, buf: &mut [i16]) -> usize { + use std::sync::atomic::AtomicU64; + static DRAIN_COUNT: AtomicU64 = AtomicU64::new(0); + + let Some(buffers) = CHANNEL_AUDIO_IN.get() else { + return 0; + }; + + let Some(mut buffer) = buffers.get_mut(&channel_id) else { + return 0; + }; + + if buffer.len() >= SAMPLES_PER_FRAME { + let count = DRAIN_COUNT.fetch_add(1, Ordering::Relaxed); + if count.is_multiple_of(250) { + tracing::debug!( + "SIP->Discord drain #{}: channel={}, buffer has {} samples, draining {}", + count, + channel_id, + buffer.len(), + SAMPLES_PER_FRAME + ); + } + // Drain directly into the provided buffer + let (front, back) = buffer.as_slices(); + if front.len() >= SAMPLES_PER_FRAME { + buf[..SAMPLES_PER_FRAME].copy_from_slice(&front[..SAMPLES_PER_FRAME]); + } else { + buf[..front.len()].copy_from_slice(front); + let remaining = SAMPLES_PER_FRAME - front.len(); + buf[front.len()..SAMPLES_PER_FRAME].copy_from_slice(&back[..remaining]); + } + buffer.drain(..SAMPLES_PER_FRAME); + SAMPLES_PER_FRAME + } else if !buffer.is_empty() { + // Return what we have (partial frame) - better than nothing + let available = buffer.len(); + tracing::trace!( + "SIP->Discord partial drain: channel={}, only {} samples available", + channel_id, + available + ); + let (front, back) = buffer.as_slices(); + if front.len() >= available { + buf[..available].copy_from_slice(&front[..available]); + } else { + buf[..front.len()].copy_from_slice(front); + let remaining = available - front.len(); + buf[front.len()..available].copy_from_slice(&back[..remaining]); + } + buffer.drain(..available); + available + } else { + 0 + } +} + +/// Clear stale audio buffers and drain cache for a channel. +/// Called during reconnection teardown to ensure fresh audio state. +pub fn clear_channel_stale_audio(channel_id: Snowflake) { + if let Some(audio_in) = CHANNEL_AUDIO_IN.get() { + audio_in.remove(&channel_id); + } + if let Some(drain_cache) = CHANNEL_DRAIN_CACHE.get() { + drain_cache.remove(&channel_id); + } +} + +/// Fill a provided Vec with the active channel IDs (reuses allocation). +/// Uses RwLock::read() — non-exclusive, never blocks other readers (audio thread). +pub fn get_active_channels_into(out: &mut Vec) { + out.clear(); + let channel_calls = CHANNEL_CALLS.get_or_init(|| RwLock::new(HashMap::new())); + let map = channel_calls.read(); + out.extend(map.keys()); +} diff --git a/sipcord-bridge/src/transport/sip/ffi/direct_player.rs b/sipcord-bridge/src/transport/sip/ffi/direct_player.rs new file mode 100644 index 0000000..edeae4f --- /dev/null +++ b/sipcord-bridge/src/transport/sip/ffi/direct_player.rs @@ -0,0 +1,161 @@ +//! Direct player port for playing audio to a single call +//! +//! This module provides one-shot audio playback (e.g., join sounds) that +//! bypasses the channel buffer and plays directly to a specific call. + +use super::types::*; +use anyhow::Result; +use parking_lot::Mutex; +use pjsua::*; +use std::collections::HashMap; + +/// Custom get_frame callback for direct player ports +/// Returns samples from the player's buffer, advancing position each call +pub unsafe extern "C" fn direct_player_get_frame( + this_port: *mut pjmedia_port, + frame: *mut pjmedia_frame, +) -> pj_status_t { + use std::sync::atomic::{AtomicU64, Ordering}; + + static GET_FRAME_CALL_COUNT: AtomicU64 = AtomicU64::new(0); + let call_count = GET_FRAME_CALL_COUNT.fetch_add(1, Ordering::Relaxed); + + // Log first 10 calls to confirm this callback is being invoked + if call_count < 10 { + tracing::trace!( + "direct_player_get_frame called (call #{}, port={:p})", + call_count, + this_port + ); + } else if call_count == 10 { + tracing::trace!("direct_player_get_frame: suppressing further per-call logs"); + } + + if this_port.is_null() || frame.is_null() { + return -1; // PJ_EINVAL + } + + let port_key = this_port as usize; + + // Get samples from the player's buffer and fill frame directly (no intermediate Vec) + { + let state = DIRECT_PLAYER_STATE.get_or_init(|| Mutex::new(HashMap::new())); + let mut state = state.lock(); + + if let Some((buffer, pos)) = state.get_mut(&port_key) { + if *pos < buffer.len() { + let end = (*pos + SAMPLES_PER_FRAME).min(buffer.len()); + super::frame_utils::fill_audio_frame(frame, &buffer[*pos..end]); + *pos = end; + } else { + super::frame_utils::fill_silence_frame(frame); // Playback complete + } + } else { + super::frame_utils::fill_silence_frame(frame); + } + } + + pj_constants__PJ_SUCCESS as pj_status_t +} + +/// Custom on_destroy callback for direct player ports +pub unsafe extern "C" fn direct_player_on_destroy(this_port: *mut pjmedia_port) -> pj_status_t { + if !this_port.is_null() { + let port_key = this_port as usize; + if let Some(state) = DIRECT_PLAYER_STATE.get() { + state.lock().remove(&port_key); + } + tracing::debug!("Direct player port destroyed: {:p}", this_port); + } + pj_constants__PJ_SUCCESS as pj_status_t +} + +/// Play audio directly to a specific call's conference port using a custom player port. +/// This bypasses the channel buffer - used for join sounds to avoid overflow. +/// +/// The player port connects directly to the call's conf_port, so only that caller +/// hears the audio. Other callers and Discord users don't hear it. +/// +/// This queues the operation to be executed by the audio thread to avoid +/// deadlocks with the audio thread's pjsua_conf_connect/disconnect calls. +pub fn play_audio_to_call_direct(call_id: CallId, samples: &[i16]) -> Result<()> { + use super::types::{queue_pjsua_op, PendingPjsuaOp}; + + tracing::debug!( + "Queueing PlayDirect for call {} ({} samples)", + call_id, + samples.len() + ); + queue_pjsua_op(PendingPjsuaOp::PlayDirect { + call_id, + samples: samples.to_vec(), + }); + Ok(()) +} + +/// Internal implementation of play_audio_to_call_direct +/// Called from the audio thread to actually create and connect the player +pub fn play_audio_to_call_direct_internal(call_id: CallId, samples: &[i16]) -> Result<()> { + use super::frame_utils::{create_and_connect_port, PortCallbacks}; + + // Get call's conference port + let call_conf_port = CALL_CONF_PORTS + .get() + .and_then(|p| p.get(&call_id).map(|r| *r)) + .ok_or_else(|| anyhow::anyhow!("No conf_port for call {}", call_id))?; + + // Store samples in the player state BEFORE creating port (get_frame needs them) + // We'll clean up if port creation fails + let guard = unsafe { + let callbacks = PortCallbacks { + get_frame: direct_player_get_frame, + put_frame: super::frame_utils::noop_put_frame, + on_destroy: Some(direct_player_on_destroy), + }; + + // Pre-store samples so get_frame can find them even during pjsua_conf_add_port + // We'll use a temporary key (0) and fix it after we get the actual port pointer + let guard = create_and_connect_port( + &DIRECT_PLAYER_POOL, + b"direct_players\0", + "dplay", + call_id, + 0x4450_4C59, // "DPLY" + callbacks, + call_conf_port, + ); + + match guard { + Ok(guard) => { + // Now store samples with the actual port key + let state = DIRECT_PLAYER_STATE.get_or_init(|| Mutex::new(HashMap::new())); + state.lock().insert(guard.port_key, (samples.to_vec(), 0)); + + tracing::debug!( + "Playing {} samples directly to call {} (player_slot={}, call_port={})", + samples.len(), + call_id, + guard.slot, + call_conf_port + ); + + guard + } + Err(e) => return Err(e), + } + }; + + // Schedule cleanup after playback duration + // The ConfPortGuard handles pjsua_conf_remove_port when dropped + let sample_count = samples.len(); + let duration_ms = (sample_count as u64 * 1000) / CONF_SAMPLE_RATE as u64 + 100; + + std::thread::spawn(move || { + std::thread::sleep(std::time::Duration::from_millis(duration_ms)); + // Drop the guard to remove from conference + // on_destroy callback will clean up DIRECT_PLAYER_STATE + drop(guard); + }); + + Ok(()) +} diff --git a/sipcord-bridge/src/transport/sip/ffi/frame_utils.rs b/sipcord-bridge/src/transport/sip/ffi/frame_utils.rs new file mode 100644 index 0000000..625d0c9 --- /dev/null +++ b/sipcord-bridge/src/transport/sip/ffi/frame_utils.rs @@ -0,0 +1,184 @@ +//! Shared frame utilities for pjmedia ports +//! +//! Provides common helpers for filling audio frames and a shared no-op +//! put_frame callback used by ports that only produce audio. + +use super::types::{ + CallId, ConfPort, SendablePool, CONF_CHANNELS, CONF_MASTER_PORT, CONF_SAMPLE_RATE, + SAMPLES_PER_FRAME, +}; +use anyhow::Result; +use parking_lot::Mutex; +use pjsua::*; +use std::sync::OnceLock; + +/// Get the pjmedia_conf pointer from the master port +/// The conference bridge pointer is stored in master_port->port_data.pdata +/// Returns None if master port is not initialized +/// +/// This is public so other modules (direct_player, looping_player) can use it +/// to bypass PJSUA_LOCK when connecting/disconnecting ports. +pub unsafe fn get_conference_bridge() -> Option<*mut pjmedia_conf> { + let port_guard = CONF_MASTER_PORT.get()?; + let master_port = port_guard.lock().0; + if master_port.is_null() { + return None; + } + let conf = (*master_port).port_data.pdata as *mut pjmedia_conf; + if conf.is_null() { + return None; + } + Some(conf) +} + +/// Write audio samples into a pjmedia_frame, padding with silence if fewer +/// than SAMPLES_PER_FRAME samples are provided. +/// +/// # Safety +/// `frame` must be a valid, non-null pointer to a pjmedia_frame with a buffer +/// large enough for SAMPLES_PER_FRAME i16 samples. +pub unsafe fn fill_audio_frame(frame: *mut pjmedia_frame, samples: &[i16]) { + let frame_buf = (*frame).buf as *mut i16; + std::ptr::copy_nonoverlapping(samples.as_ptr(), frame_buf, samples.len()); + // Pad with silence if we got fewer samples than a full frame + if samples.len() < SAMPLES_PER_FRAME { + std::ptr::write_bytes( + frame_buf.add(samples.len()), + 0, + SAMPLES_PER_FRAME - samples.len(), + ); + } + (*frame).size = (SAMPLES_PER_FRAME * 2) as pj_size_t; + (*frame).type_ = pjmedia_frame_type_PJMEDIA_FRAME_TYPE_AUDIO; +} + +/// Fill a pjmedia_frame with silence. +/// +/// # Safety +/// `frame` must be a valid, non-null pointer to a pjmedia_frame with a buffer +/// large enough for SAMPLES_PER_FRAME i16 samples. +pub unsafe fn fill_silence_frame(frame: *mut pjmedia_frame) { + let frame_buf = (*frame).buf as *mut u8; + std::ptr::write_bytes(frame_buf, 0, SAMPLES_PER_FRAME * 2); + (*frame).size = (SAMPLES_PER_FRAME * 2) as pj_size_t; + (*frame).type_ = pjmedia_frame_type_PJMEDIA_FRAME_TYPE_AUDIO; +} + +/// No-op put_frame callback for ports that only produce audio. +/// +/// # Safety +/// Called by the pjmedia conference bridge. +pub unsafe extern "C" fn noop_put_frame( + _this_port: *mut pjmedia_port, + _frame: *mut pjmedia_frame, +) -> pj_status_t { + pj_constants__PJ_SUCCESS as pj_status_t +} + +// Conference port guard and creation helper + +/// Callbacks for a custom pjmedia port. +pub struct PortCallbacks { + pub get_frame: unsafe extern "C" fn(*mut pjmedia_port, *mut pjmedia_frame) -> pj_status_t, + pub put_frame: unsafe extern "C" fn(*mut pjmedia_port, *mut pjmedia_frame) -> pj_status_t, + pub on_destroy: Option pj_status_t>, +} + +/// RAII guard for a conference port. Removes port from conference on drop. +pub struct ConfPortGuard { + pub slot: ConfPort, + pub port_key: usize, +} + +impl Drop for ConfPortGuard { + fn drop(&mut self) { + unsafe { + pjsua_conf_remove_port(*self.slot); + } + tracing::debug!( + "ConfPortGuard: removed conf port slot={} (port={:p})", + self.slot, + self.port_key as *const () + ); + } +} + +/// Allocate a pjmedia port, init it, add to conference, and connect to a call's conf port. +/// Returns a `ConfPortGuard` that auto-cleans-up on drop. +/// +/// # Safety +/// Must be called from the audio thread or while holding appropriate locks. +pub unsafe fn create_and_connect_port( + pool: &OnceLock>, + pool_name: &[u8], + name_prefix: &str, + call_id: CallId, + signature: u32, + callbacks: PortCallbacks, + call_conf_port: ConfPort, +) -> Result { + // Get or create the memory pool + let pool = pool.get_or_init(|| { + let p = pjsua_pool_create(pool_name.as_ptr() as *const _, 4096, 4096); + Mutex::new(SendablePool(p)) + }); + let pool_ptr = pool.lock().0; + + // Allocate pjmedia_port structure + let port_size = std::mem::size_of::(); + let port = pj_pool_alloc(pool_ptr, port_size) as *mut pjmedia_port; + if port.is_null() { + anyhow::bail!( + "Failed to allocate {} port for call {}", + name_prefix, + call_id + ); + } + std::ptr::write_bytes(port as *mut u8, 0, port_size); + + // Create port name + let port_name = format!("{}{}", name_prefix, call_id); + let port_name_cstr = std::ffi::CString::new(port_name) + .map_err(|e| anyhow::anyhow!("Invalid port name: {}", e))?; + + // Initialize port info + pjmedia_port_info_init( + &mut (*port).info, + &pj_str(port_name_cstr.as_ptr() as *mut _), + signature, + CONF_SAMPLE_RATE, + CONF_CHANNELS, + 16, + SAMPLES_PER_FRAME as u32, + ); + + // Set callbacks + (*port).get_frame = Some(callbacks.get_frame); + (*port).put_frame = Some(callbacks.put_frame); + (*port).on_destroy = callbacks.on_destroy; + + // Add to conference + let mut player_slot: i32 = 0; + let status = pjsua_conf_add_port(pool_ptr, port, &mut player_slot); + if status != pj_constants__PJ_SUCCESS as i32 { + anyhow::bail!("Failed to add {} port to conf: {}", name_prefix, status); + } + + // Connect player port to the target call's port + let conf = get_conference_bridge(); + let Some(conf) = conf else { + pjsua_conf_remove_port(player_slot); + anyhow::bail!("Failed to get conference bridge for {} port", name_prefix); + }; + + let status = pjmedia_conf_connect_port(conf, player_slot as u32, *call_conf_port as u32, 0); + if status != pj_constants__PJ_SUCCESS as i32 { + pjsua_conf_remove_port(player_slot); + anyhow::bail!("Failed to connect {} port to call: {}", name_prefix, status); + } + + Ok(ConfPortGuard { + slot: ConfPort::new(player_slot), + port_key: port as usize, + }) +} diff --git a/sipcord-bridge/src/transport/sip/ffi/init.rs b/sipcord-bridge/src/transport/sip/ffi/init.rs new file mode 100644 index 0000000..58f771f --- /dev/null +++ b/sipcord-bridge/src/transport/sip/ffi/init.rs @@ -0,0 +1,918 @@ +//! PJSUA initialization and core control functions +//! +//! This module handles: +//! - PJSUA initialization and configuration +//! - TLS transport creation and hot-reload +//! - Shutdown and thread registration + +use super::super::audio_thread::stop_audio_thread; +use std::fmt; + +/// SIP invite session state (Rust wrapper for pjsip_inv_state) +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +pub enum InvState { + Null, + Calling, + Incoming, + Early, + Connecting, + Confirmed, + Disconnected, + Unknown(u32), +} + +impl From for InvState { + fn from(state: u32) -> Self { + match state { + x if x == pjsip_inv_state_PJSIP_INV_STATE_NULL => InvState::Null, + x if x == pjsip_inv_state_PJSIP_INV_STATE_CALLING => InvState::Calling, + x if x == pjsip_inv_state_PJSIP_INV_STATE_INCOMING => InvState::Incoming, + x if x == pjsip_inv_state_PJSIP_INV_STATE_EARLY => InvState::Early, + x if x == pjsip_inv_state_PJSIP_INV_STATE_CONNECTING => InvState::Connecting, + x if x == pjsip_inv_state_PJSIP_INV_STATE_CONFIRMED => InvState::Confirmed, + x if x == pjsip_inv_state_PJSIP_INV_STATE_DISCONNECTED => InvState::Disconnected, + x => InvState::Unknown(x), + } + } +} + +impl fmt::Display for InvState { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self { + InvState::Null => write!(f, "NULL"), + InvState::Calling => write!(f, "CALLING"), + InvState::Incoming => write!(f, "INCOMING"), + InvState::Early => write!(f, "EARLY"), + InvState::Connecting => write!(f, "CONNECTING"), + InvState::Confirmed => write!(f, "CONFIRMED"), + InvState::Disconnected => write!(f, "DISCONNECTED"), + InvState::Unknown(x) => write!(f, "UNKNOWN({})", x), + } + } +} +use super::super::callbacks::{ + on_call_media_state_cb, on_call_rx_reinvite_cb, on_call_state_cb, on_dtmf_digit_cb, + on_incoming_call_cb, +}; +use super::super::nat::{ + on_rx_request_nat_fixup_cb, on_rx_response_nat_fixup_cb, on_tx_request_cb, on_tx_response_cb, +}; +use super::super::register_handler::on_rx_request_cb; +use super::types::*; +use crate::config::{SipConfig, TlsConfig}; +use anyhow::{Context, Result}; +use ipnet::Ipv4Net; +use parking_lot::Mutex; +use pjsua::*; +use std::ffi::CString; +use std::mem::MaybeUninit; +use std::os::raw::{c_char, c_int}; +use std::ptr; +use std::sync::atomic::Ordering; + +/// Known PJSIP error conditions detected from log messages. +/// +/// PJSIP's log callback only provides (level, string) — no structured error codes. +/// We pattern-match known messages to classify them into actionable variants. +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +pub enum PjsipEvent { + /// All call slots exhausted — new INVITEs are rejected with 486 Busy Here + TooManyCalls, + /// SSL/TLS handshake failed with a remote peer + SslHandshakeError, + /// Failed to send a SIP response + SendResponseFailed, + /// ICE negotiation failed + IceNegotiationFailed, + /// Transport error (TCP/UDP) + TransportError, + /// No matching codec for call + NoMatchingCodec, + /// SIP SUBSCRIBE for an unsupported event package (e.g. presence, dialog) + /// — pjsip responds 489 Bad Event, which is correct; just noisy at ERROR level + BadEventSubscription, + /// Unclassified message — logged at pjsip's original level + Unclassified, +} + +impl PjsipEvent { + /// Try to classify a pjsip log message into a known event. + /// Returns the event variant and optionally an upgraded log level + /// (None = use pjsip's original level). + fn classify(msg: &str) -> (Self, Option) { + // Level overrides: 0=error, 1=error, 2=warn, 3=info + if msg.contains("too many calls") { + (Self::TooManyCalls, Some(0)) + } else if msg.contains("SSL_ERROR_SSL") || msg.contains("SSL_ERROR_SYSCALL") { + (Self::SslHandshakeError, None) + } else if msg.contains("Unable to send") && msg.contains("response") { + (Self::SendResponseFailed, Some(1)) + } else if msg.contains("ICE") && msg.contains("failed") { + (Self::IceNegotiationFailed, None) + } else if msg.contains("Transport") && msg.contains("error") { + (Self::TransportError, Some(1)) + } else if msg.contains("No matching codec") { + (Self::NoMatchingCodec, None) + } else if msg.contains("Unable to create server subscription") { + // SIP clients SUBSCRIBE to presence/dialog after REGISTER — expected and harmless + (Self::BadEventSubscription, Some(4)) + } else { + (Self::Unclassified, None) + } + } + + /// Short tag for structured logging + fn as_str(self) -> &'static str { + match self { + Self::TooManyCalls => "TOO_MANY_CALLS", + Self::SslHandshakeError => "SSL_HANDSHAKE_ERROR", + Self::SendResponseFailed => "SEND_RESPONSE_FAILED", + Self::IceNegotiationFailed => "ICE_NEGOTIATION_FAILED", + Self::TransportError => "TRANSPORT_ERROR", + Self::NoMatchingCodec => "NO_MATCHING_CODEC", + Self::BadEventSubscription => "BAD_EVENT_SUBSCRIBE", + Self::Unclassified => "UNCLASSIFIED", + } + } +} + +/// Extract "IP:PORT" from a PJSIP SSL error message. +/// +/// PJSIP ssl_sock logs include `peer: IP:PORT` at the end of the message. +/// Returns the "IP:PORT" substring, or None if not found. +fn extract_ssl_peer(msg: &str) -> Option<&str> { + let idx = msg.find("peer: ")?; + let rest = &msg[idx + 6..]; + let trimmed = rest.trim(); + if trimmed.is_empty() { + None + } else { + Some(trimmed) + } +} + +/// PJSIP log callback - redirects logs to Rust tracing +/// +/// This function is called by PJSIP for each log message instead of printing to stdout. +/// We map PJSIP log levels to tracing levels, with overrides for known critical messages +/// that pjsip under-reports (e.g. "too many calls" logged at level 2/warn → upgraded to error). +unsafe extern "C" fn pjsip_log_callback(level: c_int, data: *const c_char, _len: c_int) { + if data.is_null() { + return; + } + + let c_str = std::ffi::CStr::from_ptr(data); + let msg = c_str.to_string_lossy(); + let msg = msg.trim_end(); + + let (event, level_override) = PjsipEvent::classify(msg); + let effective_level = level_override.unwrap_or(level as u8); + + if event == PjsipEvent::SslHandshakeError { + // Extract peer IP for structured logging context + let peer = extract_ssl_peer(msg).unwrap_or("unknown"); + tracing::warn!(target: "pjsip", event = "SSL_HANDSHAKE_ERROR", peer = peer, "{}", msg); + return; + } + + if event != PjsipEvent::Unclassified { + let tag = event.as_str(); + match effective_level { + 0 | 1 => tracing::error!(target: "pjsip", event = tag, "{}", msg), + 2 => tracing::warn!(target: "pjsip", event = tag, "{}", msg), + 3 => tracing::info!(target: "pjsip", event = tag, "{}", msg), + 4 => tracing::debug!(target: "pjsip", event = tag, "{}", msg), + _ => tracing::trace!(target: "pjsip", event = tag, "{}", msg), + } + } else { + match effective_level { + 0 | 1 => tracing::error!(target: "pjsip", "{}", msg), + 2 => tracing::warn!(target: "pjsip", "{}", msg), + 3 => tracing::info!(target: "pjsip", "{}", msg), + 4 => tracing::debug!(target: "pjsip", "{}", msg), + _ => tracing::trace!(target: "pjsip", "{}", msg), + } + } +} + +/// Set the global callback handlers +pub fn set_callbacks(handlers: CallbackHandlers) { + let callbacks = CALLBACKS.get_or_init(|| Mutex::new(None)); + *callbacks.lock() = Some(handlers); +} + +/// Initialize pjsua with optional TLS support +pub fn init_pjsua(config: &SipConfig, tls_config: Option<&TlsConfig>) -> Result<()> { + // Initialize public host config for Contact header rewriting on outgoing responses. + // pjsua derives Contact from the TCP connection's local address (private IP), but + // external clients need the public hostname to route BYE back to us. + PUBLIC_HOST_CONFIG.get_or_init(|| { + if !config.public_host.is_empty() { + tracing::info!( + "Public host Contact rewriting enabled: {}:{}", + config.public_host, + config.port + ); + Some((config.public_host.clone(), config.port)) + } else { + None + } + }); + + // Initialize local network config for Contact header and SDP rewriting + LOCAL_NET_CONFIG.get_or_init(|| { + config.local_net.as_ref().and_then(|ln| { + match ln.cidr.parse::() { + Ok(net) => { + tracing::info!( + "Local network rewriting enabled: {} -> {} for CIDR {}, RTP public IP: {:?}", + config.public_host, ln.host, ln.cidr, config.rtp_public_ip + ); + Some((ln.host.clone(), net, config.port, config.rtp_public_ip.clone())) + } + Err(e) => { + tracing::error!("Invalid SIP_LOCAL_CIDR '{}': {}", ln.cidr, e); + None + } + } + }) + }); + + unsafe { + // Create pjsua instance + let status = pjsua_create(); + if status != pj_constants__PJ_SUCCESS as i32 { + anyhow::bail!("Failed to create pjsua: {}", status); + } + + // Disable automatic UDP->TCP switch for large SIP messages. + // pjsip switches to TCP when a request exceeds 1300 bytes, but for + // outbound calls to NATted clients, the client's UDP NAT mapping + // won't accept TCP connections. We must respect the transport the + // client registered with. + { + extern "C" { + static mut pjsip_sip_cfg_var: pjsip_cfg_t; + } + pjsip_sip_cfg_var.endpt.disable_tcp_switch = pj_constants__PJ_TRUE as _; + tracing::info!("Disabled automatic UDP->TCP switch for large SIP messages"); + } + + // Configure pjsua + let mut cfg = MaybeUninit::::uninit(); + pjsua_config_default(cfg.as_mut_ptr()); + let cfg_ptr = cfg.assume_init_mut(); + + // Allow enough concurrent call slots for real calls + spam that's being rejected. + // Compile-time PJSUA_MAX_CALLS is set to 128 in config_site.h. + cfg_ptr.max_calls = 128; + + // Set callbacks + cfg_ptr.cb.on_incoming_call = Some(on_incoming_call_cb); + cfg_ptr.cb.on_call_state = Some(on_call_state_cb); + cfg_ptr.cb.on_call_media_state = Some(on_call_media_state_cb); + cfg_ptr.cb.on_dtmf_digit = Some(on_dtmf_digit_cb); + cfg_ptr.cb.on_call_rx_reinvite = Some(on_call_rx_reinvite_cb); + + // Logging config - redirect PJSIP logs to Rust tracing + let mut log_cfg = MaybeUninit::::uninit(); + pjsua_logging_config_default(log_cfg.as_mut_ptr()); + let log_cfg_ptr = log_cfg.assume_init_mut(); + let configured_level = crate::config::AppConfig::bridge().pjsip_log_level; + tracing::info!("PJSIP log level from config: {}", configured_level); + log_cfg_ptr.level = configured_level as _; + log_cfg_ptr.console_level = configured_level as _; // Must match level — cb is gated by console_level + log_cfg_ptr.cb = Some(pjsip_log_callback); // Our callback replaces default console output + + // Media config + let mut media_cfg = MaybeUninit::::uninit(); + pjsua_media_config_default(media_cfg.as_mut_ptr()); + let media_cfg_ptr = media_cfg.assume_init_mut(); + + // Configure conference bridge for 16kHz mono + // This is the internal sample rate - pjsua will resample from codecs as needed + media_cfg_ptr.clock_rate = CONF_SAMPLE_RATE; + media_cfg_ptr.snd_clock_rate = CONF_SAMPLE_RATE; + media_cfg_ptr.channel_count = CONF_CHANNELS; + media_cfg_ptr.audio_frame_ptime = FRAME_PTIME_MS; + // Set default SDP ptime to match internal frame ptime + // If these differ, there can be timing mismatches + media_cfg_ptr.ptime = FRAME_PTIME_MS; + + // Log the media config + tracing::info!( + "Media config: clock_rate={}, snd_clock_rate={}, audio_frame_ptime={}, ptime={}", + media_cfg_ptr.clock_rate, + media_cfg_ptr.snd_clock_rate, + media_cfg_ptr.audio_frame_ptime, + media_cfg_ptr.ptime + ); + + // Initialize pjsua + let status = pjsua_init(cfg_ptr, log_cfg_ptr, media_cfg_ptr); + if status != pj_constants__PJ_SUCCESS as i32 { + anyhow::bail!("Failed to init pjsua: {}", status); + } + + // Create UDP transport + let mut t_cfg = MaybeUninit::::uninit(); + pjsua_transport_config_default(t_cfg.as_mut_ptr()); + let t_cfg_ptr = t_cfg.assume_init_mut(); + t_cfg_ptr.port = config.port as u32; + + // Set public address if specified - keep CString alive until transport is created + let public_host_cstring = if !config.public_host.is_empty() { + let host = CString::new(config.public_host.as_str()).context("Invalid public host")?; + t_cfg_ptr.public_addr = pj_str(host.as_ptr() as *mut c_char); + Some(host) + } else { + None + }; + + let mut transport_id: c_int = 0; + let status = pjsua_transport_create( + pjsip_transport_type_e_PJSIP_TRANSPORT_UDP, + t_cfg_ptr, + &mut transport_id, + ); + + // CString can be dropped now + drop(public_host_cstring); + + if status != pj_constants__PJ_SUCCESS as i32 { + anyhow::bail!("Failed to create UDP transport: {}", status); + } + + // Create TCP transport on the same port + let mut tcp_cfg = MaybeUninit::::uninit(); + pjsua_transport_config_default(tcp_cfg.as_mut_ptr()); + let tcp_cfg_ptr = tcp_cfg.assume_init_mut(); + tcp_cfg_ptr.port = config.port as u32; + + // Set public address for TCP - keep CString alive + let tcp_public_host_cstring = if !config.public_host.is_empty() { + let host = + CString::new(config.public_host.as_str()).context("Invalid public host for TCP")?; + tcp_cfg_ptr.public_addr = pj_str(host.as_ptr() as *mut c_char); + Some(host) + } else { + None + }; + + let mut tcp_transport_id: c_int = 0; + let status = pjsua_transport_create( + pjsip_transport_type_e_PJSIP_TRANSPORT_TCP, + tcp_cfg_ptr, + &mut tcp_transport_id, + ); + + drop(tcp_public_host_cstring); + + if status != pj_constants__PJ_SUCCESS as i32 { + anyhow::bail!("Failed to create TCP transport: {}", status); + } + + tracing::info!("TCP transport created on port {}", config.port); + + // Create TLS transport if configured (skip gracefully if certs missing) + if let Some(tls) = tls_config { + if !create_tls_transport(tls, &config.public_host)? { + tracing::warn!("TLS transport not created - running without TLS"); + } + } + + // Start pjsua + let status = pjsua_start(); + if status != pj_constants__PJ_SUCCESS as i32 { + anyhow::bail!("Failed to start pjsua: {}", status); + } + + // Configure codec priorities to keep INVITE SDP small. + // Without this, PJSUA offers every compiled codec (~16 entries) plus a + // T.140 text stream, producing an INVITE of ~1750 bytes. UDP packets + // over ~1300 bytes get IP-fragmented and are silently dropped by many + // NAT routers, which completely breaks outbound calls. + // + // Strategy: disable everything, then re-enable only what we need, + // ordered by quality (highest priority = preferred in SDP negotiation). + { + // Disable all audio codecs first + let all = CString::new("*").unwrap(); + pjsua_codec_set_priority(&pj_str(all.as_ptr() as *mut c_char), 0); + + // Re-enable desired codecs (highest priority = preferred in negotiation). + // NOTE: G722 is registered internally at 16000Hz in PJSIP despite the + // RFC 3551 SDP convention of advertising clock_rate=8000. + let codecs: &[(&str, u8)] = &[ + ("opus/48000", 255), // Best quality: adaptive, wideband/fullband + ("G722/16000", 254), // Wideband 16kHz, widely supported + ("AMR/8000", 252), // Adaptive narrowband + ("PCMU/8000", 200), // G.711 mu-law, ubiquitous fallback + ("PCMA/8000", 199), // G.711 A-law, ubiquitous fallback + ("telephone-event", 200), // DTMF support (all sample rates) + ]; + + for (name, priority) in codecs { + let codec_id = CString::new(*name).unwrap(); + let status = + pjsua_codec_set_priority(&pj_str(codec_id.as_ptr() as *mut c_char), *priority); + if status != pj_constants__PJ_SUCCESS as i32 { + tracing::warn!("Failed to set codec priority for {}: {}", name, status); + } + } + + tracing::info!( + "Codec priorities configured: {}", + codecs + .iter() + .map(|(n, p)| format!("{}={}", n, p)) + .collect::>() + .join(", ") + ); + } + + // Register custom module to handle REGISTER requests and Contact header rewriting + // pjsua's high-level API only handles call-related events, but SIP clients + // send REGISTER to register with the server. We intercept these at the PJSIP level. + // We also intercept outgoing responses to rewrite Contact headers for local clients. + static mut REGISTER_MODULE: pjsip_module = pjsip_module { + prev: ptr::null_mut(), + next: ptr::null_mut(), + name: pj_str_t { + ptr: ptr::null_mut(), + slen: 0, + }, + id: -1, + priority: pjsip_module_priority_PJSIP_MOD_PRIORITY_APPLICATION as i32, + load: None, + start: None, + stop: None, + unload: None, + on_rx_request: Some(on_rx_request_cb), + on_rx_response: None, + on_tx_request: Some(on_tx_request_cb), + on_tx_response: Some(on_tx_response_cb), + on_tsx_state: None, + }; + + // Set module name (must be done at runtime since pj_str needs mutable ptr) + static MOD_NAME: &[u8] = b"mod-sipcord\0"; + REGISTER_MODULE.name = pj_str(MOD_NAME.as_ptr() as *mut c_char); + + // Get endpoint and register module + let endpt = pjsua_get_pjsip_endpt(); + if !endpt.is_null() { + let status = pjsip_endpt_register_module(endpt, &raw mut REGISTER_MODULE); + if status != pj_constants__PJ_SUCCESS as i32 { + tracing::warn!("Failed to register REGISTER handler module: {}", status); + } else { + tracing::info!("Registered REGISTER handler module"); + // Store the module pointer so register_handler can create + // UAS transactions for deferred REGISTER responses. + super::super::register_handler::set_register_module_ptr(&raw mut REGISTER_MODULE); + } + } else { + tracing::warn!("Could not get PJSIP endpoint for module registration"); + } + + // Register NAT fixup module for far-end NAT traversal + // This rewrites private IPs in Contact headers and SDP bodies of incoming + // requests (INVITEs from NATted phones) and responses (for outbound calls) + // to the actual public source IP, fixing RTP delivery for phones behind NAT. + // + // Priority 15 = runs BEFORE TSX_LAYER(16). This is critical because the + // TSX layer's on_rx_response matches responses to transactions and then + // synchronously triggers the full dialog + invite session processing chain + // (updating remote target from Contact, SDP negotiation, ACK sending). + // If NAT fixup ran after the TSX layer (as it did at priority 28), the + // dialog would see the original private IPs, causing ACK and RTP to be + // sent to unreachable private addresses. + static mut NAT_FIXUP_MODULE: pjsip_module = pjsip_module { + prev: ptr::null_mut(), + next: ptr::null_mut(), + name: pj_str_t { + ptr: ptr::null_mut(), + slen: 0, + }, + id: -1, + priority: 15, // Just before TSX_LAYER(16), after TRANSPORT_LAYER(8) + load: None, + start: None, + stop: None, + unload: None, + on_rx_request: Some(on_rx_request_nat_fixup_cb), + on_rx_response: Some(on_rx_response_nat_fixup_cb), + on_tx_request: None, + on_tx_response: None, + on_tsx_state: None, + }; + + static NAT_FIXUP_MOD_NAME: &[u8] = b"mod-nat-fixup\0"; + NAT_FIXUP_MODULE.name = pj_str(NAT_FIXUP_MOD_NAME.as_ptr() as *mut c_char); + + if !endpt.is_null() { + let status = pjsip_endpt_register_module(endpt, &raw mut NAT_FIXUP_MODULE); + if status != pj_constants__PJ_SUCCESS as i32 { + tracing::warn!("Failed to register NAT fixup module: {}", status); + } else { + tracing::info!("Registered NAT fixup module (priority 15, before TSX layer)"); + } + } + + // Disable sound device and get the conference master port + // This allows us to manually control audio I/O + let master_port = pjsua_set_no_snd_dev(); + if master_port.is_null() { + anyhow::bail!("Failed to set null sound device"); + } + + // Verify the master port's actual sample rate + let master_port_info = &(*master_port).info; + let aud_fmt = &master_port_info.fmt.det.aud; + let actual_clock_rate = aud_fmt.clock_rate; + let actual_channel_count = aud_fmt.channel_count; + let actual_frame_time_usec = aud_fmt.frame_time_usec; + let actual_bits_per_sample = aud_fmt.bits_per_sample; + // Calculate samples per frame from frame time + let actual_samples_per_frame = (actual_clock_rate * actual_frame_time_usec) / 1_000_000; + + tracing::info!( + "Master port ACTUAL config: clock_rate={}, channels={}, frame_time={}us, bits={}, samples_per_frame={}", + actual_clock_rate, actual_channel_count, actual_frame_time_usec, actual_bits_per_sample, actual_samples_per_frame + ); + + // CRITICAL: Verify the conference bridge is actually at our configured rate + if actual_clock_rate != CONF_SAMPLE_RATE { + tracing::error!( + "SAMPLE RATE MISMATCH! Requested {}Hz but got {}Hz - audio will play at wrong speed!", + CONF_SAMPLE_RATE, actual_clock_rate + ); + } + + // Store the master port for audio thread access + let conf_port = CONF_MASTER_PORT.get_or_init(|| Mutex::new(SendablePort(ptr::null_mut()))); + conf_port.lock().0 = master_port; + + tracing::info!( + "Conference bridge configured: {}Hz, {} channel(s), {}ms frames ({} samples/frame)", + CONF_SAMPLE_RATE, + CONF_CHANNELS, + FRAME_PTIME_MS, + SAMPLES_PER_FRAME + ); + + // Create a local account for receiving calls + let mut acc_cfg = MaybeUninit::::uninit(); + pjsua_acc_config_default(acc_cfg.as_mut_ptr()); + let acc_cfg_ptr = acc_cfg.assume_init_mut(); + + // Local account ID - keep CString alive until account is added + let local_uri = CString::new(format!("sip:sipcord@{}", config.public_host)) + .context("Invalid local URI")?; + acc_cfg_ptr.id = pj_str(local_uri.as_ptr() as *mut c_char); + + // Enable incoming calls without registration + acc_cfg_ptr.register_on_acc_add = pj_constants__PJ_FALSE as i32; + + // Disable SIP session timers (RFC 4028). The bridge has its own RTP + // inactivity timeouts, and session timer UPDATEs break when the remote + // side is behind NAT (the UPDATE targets the Contact URI which may be + // unreachable, causing retransmit storms and eventual 408 disconnect). + acc_cfg_ptr.use_timer = pjsua_sip_timer_use_PJSUA_SIP_TIMER_INACTIVE; + + // Configure RTP port range for media + // port is the starting port, port_range is how many consecutive ports to try + acc_cfg_ptr.rtp_cfg.port = config.rtp_port_start as u32; + acc_cfg_ptr.rtp_cfg.port_range = (config.rtp_port_end - config.rtp_port_start) as u32; + + // Set public IP for RTP if configured - this is advertised in SDP c= line + // Without this, pjsua uses the local interface IP which won't work for NAT + let rtp_public_ip_cstring = if let Some(ref public_ip) = config.rtp_public_ip { + let ip_cstr = CString::new(public_ip.as_str()).context("Invalid RTP public IP")?; + acc_cfg_ptr.rtp_cfg.public_addr = pj_str(ip_cstr.as_ptr() as *mut c_char); + tracing::info!( + "Account RTP config: port={}, port_range={} (ports {}-{}), public_addr={}", + acc_cfg_ptr.rtp_cfg.port, + acc_cfg_ptr.rtp_cfg.port_range, + config.rtp_port_start, + config.rtp_port_end, + public_ip + ); + Some(ip_cstr) + } else { + tracing::warn!( + "RTP_PUBLIC_IP not set - SDP will advertise local IP, external calls won't work!" + ); + tracing::info!( + "Account RTP config: port={}, port_range={} (ports {}-{})", + acc_cfg_ptr.rtp_cfg.port, + acc_cfg_ptr.rtp_cfg.port_range, + config.rtp_port_start, + config.rtp_port_end + ); + None + }; + + let mut acc_id: pjsua_acc_id = 0; + let status = pjsua_acc_add(acc_cfg_ptr, pj_constants__PJ_TRUE as i32, &mut acc_id); + + // CStrings can be dropped now + drop(local_uri); + drop(rtp_public_ip_cstring); + + if status != pj_constants__PJ_SUCCESS as i32 { + anyhow::bail!("Failed to add account: {}", status); + } + + Ok(()) + } +} + +/// Create TLS transport for SIP-over-TLS +/// Returns Ok(true) if created, Ok(false) if skipped due to missing certs +fn create_tls_transport(tls_config: &TlsConfig, public_host: &str) -> Result { + // Check cert files exist before doing anything + let cert_path = tls_config.cert_path(); + let key_path = tls_config.key_path(); + + if !cert_path.exists() { + tracing::warn!( + "TLS certificate not found: {} - TLS disabled until cert is obtained", + cert_path.display() + ); + return Ok(false); + } + if !key_path.exists() { + tracing::warn!( + "TLS private key not found: {} - TLS disabled until cert is obtained", + key_path.display() + ); + return Ok(false); + } + + tracing::info!("TLS cert path: {}", cert_path.display()); + tracing::info!("TLS key path: {}", key_path.display()); + + unsafe { + let mut t_cfg = MaybeUninit::::uninit(); + pjsua_transport_config_default(t_cfg.as_mut_ptr()); + let t_cfg_ptr = t_cfg.assume_init_mut(); + + // Set TLS port + t_cfg_ptr.port = tls_config.port as u32; + + // Set public address + let public_host_cstring = CString::new(public_host).context("Invalid public host")?; + t_cfg_ptr.public_addr = pj_str(public_host_cstring.as_ptr() as *mut c_char); + + let cert_path_cstring = + CString::new(cert_path.to_str().unwrap()).context("Invalid cert path")?; + let key_path_cstring = + CString::new(key_path.to_str().unwrap()).context("Invalid key path")?; + + // Set certificate and key + t_cfg_ptr.tls_setting.cert_file = pj_str(cert_path_cstring.as_ptr() as *mut c_char); + t_cfg_ptr.tls_setting.privkey_file = pj_str(key_path_cstring.as_ptr() as *mut c_char); + + // Also set CA list to the cert file (contains the chain) so pjsip sends full chain + t_cfg_ptr.tls_setting.ca_list_file = pj_str(cert_path_cstring.as_ptr() as *mut c_char); + + // Create TLS transport + let mut transport_id: c_int = 0; + let status = pjsua_transport_create( + pjsip_transport_type_e_PJSIP_TRANSPORT_TLS, + t_cfg_ptr, + &mut transport_id, + ); + + // CStrings can be dropped now + drop(public_host_cstring); + drop(cert_path_cstring); + drop(key_path_cstring); + + if status != pj_constants__PJ_SUCCESS as i32 { + anyhow::bail!("Failed to create TLS transport: {}", status); + } + + // Store transport ID for potential reload + let tls_id = TLS_TRANSPORT_ID.get_or_init(|| Mutex::new(None)); + *tls_id.lock() = Some(transport_id); + + tracing::info!( + "TLS transport created on port {} (transport_id={})", + tls_config.port, + transport_id + ); + + Ok(true) + } +} + +/// Reload TLS transport with updated certificates, or create it if it didn't exist +/// +/// This should only be called when there are no active calls. +/// Returns Ok(true) if reload/create was successful, Ok(false) if skipped (certs missing or calls active). +pub fn reload_tls_transport(tls_config: &TlsConfig, public_host: &str) -> Result { + // Check active calls - don't reload if calls are active + let active_calls = COUNTED_CALL_IDS + .get() + .map(|ids| ids.lock().len()) + .unwrap_or(0); + if active_calls > 0 { + tracing::info!("Skipping TLS reload: {} active calls", active_calls); + return Ok(false); + } + + // Check if we have an existing TLS transport to close first + let tls_id_lock = TLS_TRANSPORT_ID.get_or_init(|| Mutex::new(None)); + let old_transport_id = { + let guard = tls_id_lock.lock(); + *guard + }; + + if let Some(old_id) = old_transport_id { + tracing::info!("Closing existing TLS transport (id={})", old_id); + + unsafe { + // Close old transport + let status = pjsua_transport_close(old_id, pj_constants__PJ_FALSE as i32); + if status != pj_constants__PJ_SUCCESS as i32 { + tracing::warn!("Failed to close old TLS transport: {}", status); + // Continue anyway - we'll try to create a new one + } + } + + // Clear the stored transport ID + { + let mut guard = tls_id_lock.lock(); + *guard = None; + } + } else { + tracing::info!("No existing TLS transport - creating new one"); + } + + // Create new TLS transport (returns false if certs missing) + let created = create_tls_transport(tls_config, public_host)?; + + if created { + // Clear reload pending flag + TLS_RELOAD_PENDING.store(false, Ordering::SeqCst); + tracing::info!("TLS transport created/reloaded successfully"); + } + + Ok(created) +} + +/// Set TLS reload pending flag +pub fn set_tls_reload_pending(pending: bool) { + TLS_RELOAD_PENDING.store(pending, Ordering::SeqCst); +} + +/// Get the count of active media calls +pub fn active_media_call_count() -> usize { + COUNTED_CALL_IDS + .get() + .map(|ids| ids.lock().len()) + .unwrap_or(0) +} + +/// Process pjsua events (call from event loop) +pub fn process_pjsua_events(timeout_ms: u32) -> Result<()> { + unsafe { + pj_thread_sleep(timeout_ms); + } + Ok(()) +} + +/// Answer an incoming call with 200 OK +/// +/// This calls pjsua_call_answer directly. We previously queued this to the audio +/// thread to avoid deadlocks, but the actual deadlock was with pjsua_conf_connect +/// (now fixed by using pjmedia_conf_connect_port). Calling answer from the SIP +/// command thread is safe and avoids blocking the audio thread. +pub fn answer_call(call_id: CallId) { + unsafe { + // Get call info to check state before answering + let mut ci = MaybeUninit::::uninit(); + if pjsua_call_get_info(*call_id, ci.as_mut_ptr()) == pj_constants__PJ_SUCCESS as i32 { + let ci = ci.assume_init(); + let state = InvState::from(ci.state); + tracing::info!( + "Answering call {} with 200 OK (current_state={}, media_status={})", + call_id, + state, + ci.media_status + ); + } else { + tracing::info!( + "Answering call {} with 200 OK (couldn't get call info)", + call_id + ); + } + + // Call directly - this is safe now that we use pjmedia_conf_connect_port + // instead of pjsua_conf_connect in the audio thread + let status = pjsua_call_answer(*call_id, 200, ptr::null(), ptr::null()); + if status != pj_constants__PJ_SUCCESS as i32 { + tracing::warn!("Failed to answer call {}: status={}", call_id, status); + } else { + tracing::info!("Call {} answered with 200 OK successfully", call_id); + } + } +} + +/// Send 183 Session Progress (establishes early media for connecting sound) +/// +/// This sends SDP to the caller, allowing them to hear audio before the call is +/// fully answered with 200 OK. Used to play the "connecting" sound while we +/// wait for Discord to connect. +pub fn send_183_session_progress(call_id: CallId) { + unsafe { + // Get call info to check state before sending 183 + let mut ci = MaybeUninit::::uninit(); + if pjsua_call_get_info(*call_id, ci.as_mut_ptr()) == pj_constants__PJ_SUCCESS as i32 { + let ci = ci.assume_init(); + let state = InvState::from(ci.state); + tracing::info!( + "Sending 183 Session Progress for call {} (current_state={}, media_status={})", + call_id, + state, + ci.media_status + ); + } else { + tracing::info!( + "Sending 183 Session Progress for call {} (couldn't get call info)", + call_id + ); + } + + // Create reason string + let reason = CString::new("Session Progress").unwrap(); + let reason_pj = pj_str(reason.as_ptr() as *mut c_char); + + let status = pjsua_call_answer(*call_id, 183, &reason_pj, ptr::null()); + if status != pj_constants__PJ_SUCCESS as i32 { + tracing::warn!("Failed to send 183 for call {}: status={}", call_id, status); + } else { + tracing::info!("Call {} sent 183 Session Progress successfully", call_id); + } + } +} + +/// Hangup a call +pub fn hangup_call(call_id: CallId) { + unsafe { + pjsua_call_hangup(*call_id, 0, ptr::null(), ptr::null()); + } +} + +/// Shutdown pjsua and clean up resources +pub fn shutdown_pjsua() { + tracing::info!("Shutting down pjsua..."); + + // Stop and join audio thread first (must complete before pjsua_destroy) + stop_audio_thread(); + + unsafe { + // Destroy pjsua + tracing::info!("Calling pjsua_destroy..."); + pjsua_destroy(); + } + + tracing::info!("pjsua shutdown complete"); +} + +/// Register the current thread with PJLIB so it can safely call PJSUA functions. +/// +/// Must be called once per thread before any PJSUA calls (except from the main thread +/// that called pjsua_create, which is already registered). +/// +/// Returns true if registration succeeded (or thread was already registered). +pub fn register_thread_with_pjlib(thread_name: &str) -> bool { + unsafe { + // Check if already registered + if pj_thread_is_registered() == pj_constants__PJ_TRUE as i32 { + return true; + } + + // Thread descriptor must live for the lifetime of the thread. + // Using a thread-local static to ensure it stays alive. + thread_local! { + static THREAD_DESC: std::cell::UnsafeCell = + const { std::cell::UnsafeCell::new([0; 64]) }; + } + + THREAD_DESC.with(|desc| { + let name = CString::new(thread_name).unwrap_or_default(); + let mut thread_handle: *mut pj_thread_t = std::ptr::null_mut(); + + let status = pj_thread_register( + name.as_ptr() as *mut c_char, + (*desc.get()).as_mut_ptr(), + &mut thread_handle, + ); + + status == pj_constants__PJ_SUCCESS as i32 + }) + } +} diff --git a/sipcord-bridge/src/transport/sip/ffi/looping_player.rs b/sipcord-bridge/src/transport/sip/ffi/looping_player.rs new file mode 100644 index 0000000..65631a6 --- /dev/null +++ b/sipcord-bridge/src/transport/sip/ffi/looping_player.rs @@ -0,0 +1,227 @@ +//! Looping audio player for early media +//! +//! Provides a looping player that plays audio repeatedly until stopped. +//! Used for the "connecting" sound during call setup (183 Session Progress). + +use super::types::*; +use anyhow::Result; +use parking_lot::Mutex; +use pjsua::*; +use std::collections::HashMap; +use std::sync::atomic::{AtomicBool, Ordering}; +use std::sync::OnceLock; + +/// Global state for looping players: call_id -> LoopingPlayerState +pub static LOOPING_PLAYERS: OnceLock>> = OnceLock::new(); + +/// Memory pool for looping player ports +pub static LOOPING_PLAYER_POOL: OnceLock> = OnceLock::new(); + +/// Port key -> (samples, position, is_active) mapping for get_frame callback +pub static LOOPING_PLAYER_DATA: OnceLock>> = + OnceLock::new(); + +/// Data needed by the get_frame callback +pub struct LoopingPlayerData { + pub samples: Vec, + pub position: usize, + pub is_active: AtomicBool, +} + +/// State for a looping player +pub struct LoopingPlayerState { + /// Conference slot for this player + pub conf_slot: ConfPort, + /// Port pointer (for cleanup) + pub port_key: usize, +} + +/// Custom get_frame callback for looping player ports +/// Returns samples from the player's buffer, looping back to start when reaching end +pub unsafe extern "C" fn looping_player_get_frame( + this_port: *mut pjmedia_port, + frame: *mut pjmedia_frame, +) -> pj_status_t { + use std::sync::atomic::{AtomicU64, Ordering as AtomicOrdering}; + + static GET_FRAME_CALL_COUNT: AtomicU64 = AtomicU64::new(0); + let call_count = GET_FRAME_CALL_COUNT.fetch_add(1, AtomicOrdering::Relaxed); + + // Log first 10 calls to confirm this callback is being invoked + if call_count < 10 { + tracing::trace!( + "looping_player_get_frame called (call #{}, port={:p})", + call_count, + this_port + ); + } else if call_count == 10 { + tracing::trace!("looping_player_get_frame: suppressing further per-call logs"); + } + + if this_port.is_null() || frame.is_null() { + return -1; + } + + let port_key = this_port as usize; + + // Get samples from the player's buffer and fill frame directly (no intermediate Vec) + { + let data = LOOPING_PLAYER_DATA.get_or_init(|| Mutex::new(HashMap::new())); + let mut data = data.lock(); + + if let Some(player_data) = data.get_mut(&port_key) { + if player_data.is_active.load(Ordering::SeqCst) && !player_data.samples.is_empty() { + let pos = player_data.position; + let end = (pos + SAMPLES_PER_FRAME).min(player_data.samples.len()); + super::frame_utils::fill_audio_frame(frame, &player_data.samples[pos..end]); + + // Advance position, loop back if at end + player_data.position = if end >= player_data.samples.len() { + 0 + } else { + end + }; + } else { + super::frame_utils::fill_silence_frame(frame); + } + } else { + super::frame_utils::fill_silence_frame(frame); + } + } + + pj_constants__PJ_SUCCESS as pj_status_t +} + +/// Custom on_destroy callback for looping player ports +pub unsafe extern "C" fn looping_player_on_destroy(this_port: *mut pjmedia_port) -> pj_status_t { + if !this_port.is_null() { + let port_key = this_port as usize; + if let Some(data) = LOOPING_PLAYER_DATA.get() { + data.lock().remove(&port_key); + } + tracing::debug!("Looping player port destroyed: {:p}", this_port); + } + pj_constants__PJ_SUCCESS as pj_status_t +} + +/// Start a looping player for a call +/// +/// Creates a pjmedia_port that loops the given samples and connects it to the call. +/// The loop continues until stop_loop is called. +pub fn start_loop(call_id: CallId, samples: Vec) -> Result<()> { + use super::frame_utils::{create_and_connect_port, PortCallbacks}; + + // Check if already looping for this call + { + let players = LOOPING_PLAYERS.get_or_init(|| Mutex::new(HashMap::new())); + if players.lock().contains_key(&call_id) { + tracing::warn!("Looping player already exists for call {}", call_id); + return Ok(()); + } + } + + // Get call's conference port + let call_conf_port = CALL_CONF_PORTS + .get() + .and_then(|p| p.get(&call_id).map(|r| *r)) + .ok_or_else(|| { + anyhow::anyhow!("No conf_port for call {} - media not ready yet", call_id) + })?; + + let guard = unsafe { + let callbacks = PortCallbacks { + get_frame: looping_player_get_frame, + put_frame: super::frame_utils::noop_put_frame, + on_destroy: Some(looping_player_on_destroy), + }; + + let guard = create_and_connect_port( + &LOOPING_PLAYER_POOL, + b"looping_players\0", + "loop", + call_id, + 0x4C4F_4F50, // "LOOP" + callbacks, + call_conf_port, + )?; + + // Store samples in the player data with the actual port key + { + let data = LOOPING_PLAYER_DATA.get_or_init(|| Mutex::new(HashMap::new())); + data.lock().insert( + guard.port_key, + LoopingPlayerData { + samples, + position: 0, + is_active: AtomicBool::new(true), + }, + ); + } + + tracing::debug!( + "Started looping player for call {} (player_slot={}, call_port={})", + call_id, + guard.slot, + call_conf_port + ); + + guard + }; + + // Store player state (we manually manage the guard via stop_loop) + let players = LOOPING_PLAYERS.get_or_init(|| Mutex::new(HashMap::new())); + players.lock().insert( + call_id, + LoopingPlayerState { + conf_slot: guard.slot, + port_key: guard.port_key, + }, + ); + + // Forget the guard - stop_loop will handle cleanup manually + // (looping player needs explicit stop, not drop-based cleanup) + std::mem::forget(guard); + + Ok(()) +} + +/// Stop and clean up looping player for a call +pub fn stop_loop(call_id: CallId) { + let state = { + let players = LOOPING_PLAYERS.get_or_init(|| Mutex::new(HashMap::new())); + players.lock().remove(&call_id) + }; + + if let Some(state) = state { + // Mark as inactive (get_frame will return silence) + if let Some(data) = LOOPING_PLAYER_DATA.get() { + if let Some(player_data) = data.lock().get(&state.port_key) { + player_data.is_active.store(false, Ordering::SeqCst); + } + } + + // Remove from conference + tracing::trace!( + "stop_loop: BEFORE pjsua_conf_remove_port({}) for call {} [thread: {:?}]", + state.conf_slot, + call_id, + std::thread::current().id() + ); + unsafe { + pjsua_conf_remove_port(*state.conf_slot); + } + tracing::trace!( + "stop_loop: AFTER pjsua_conf_remove_port({}) for call {}", + state.conf_slot, + call_id + ); + + tracing::debug!( + "Stopped looping player for call {} (slot={})", + call_id, + state.conf_slot + ); + } else { + tracing::debug!("No looping player to stop for call {}", call_id); + } +} diff --git a/sipcord-bridge/src/transport/sip/ffi/mod.rs b/sipcord-bridge/src/transport/sip/ffi/mod.rs new file mode 100644 index 0000000..27473bc --- /dev/null +++ b/sipcord-bridge/src/transport/sip/ffi/mod.rs @@ -0,0 +1,31 @@ +//! Low-level pjsua FFI wrapper +//! +//! This module provides safe(r) Rust wrappers around the pjsua C library. +//! Pure FFI code only — application-level logic lives in the parent `sip` module. +//! +//! ## Module Structure +//! +//! - `types` - Constants, statics, wrapper types, DigestAuthParams, CallbackHandlers +//! - `utils` - String conversion utilities +//! - `init` - PJSUA initialization, TLS transport, shutdown +//! - `direct_player` - Direct player port for join sounds +//! - `streaming_player` - Streaming player for large files +//! - `looping_player` - Looping player for early media +//! - `test_tone` - Test tone generator (440Hz sine wave) +//! - `frame_utils` - Shared frame helpers and conference port guard + +// pub(super) so parent sip/ modules can access internal submodules directly +pub(super) mod direct_player; +pub(crate) mod frame_utils; +pub(super) mod init; +pub(super) mod looping_player; +pub(super) mod streaming_player; +pub(super) mod test_tone; +pub mod types; +pub(super) mod utils; + +// Re-export public API for external consumers (crate::transport::sip::*) +pub use direct_player::*; +pub use init::*; +pub use looping_player::*; +pub use types::*; diff --git a/sipcord-bridge/src/transport/sip/ffi/streaming_player.rs b/sipcord-bridge/src/transport/sip/ffi/streaming_player.rs new file mode 100644 index 0000000..4eb8c09 --- /dev/null +++ b/sipcord-bridge/src/transport/sip/ffi/streaming_player.rs @@ -0,0 +1,261 @@ +//! Streaming audio player port for large files +//! +//! This module provides a PJSUA conference port that streams audio from a FLAC file +//! to a specific call. Unlike direct_player (which buffers all samples in memory), +//! this reads from disk on-demand for large files (e.g., easter egg audio). +//! +//! ## Design: Pull Model +//! +//! The streaming player uses a "pull" model where PJSUA's conference bridge calls +//! `streaming_get_frame` when it needs audio samples. This ensures precise timing +//! controlled by the audio thread's deadline-based scheduler, avoiding the timing +//! drift issues of tokio::sleep-based "push" models. +//! +//! ## Hangup Detection +//! +//! The `streaming_get_frame` callback checks if the call still exists in +//! `CALL_CONF_PORTS`. If the call has ended, it marks the player as finished +//! and returns silence. This handles mid-stream hangups cleanly. + +use super::types::*; +use crate::services::sound::StreamingPlayer; +use anyhow::Result; +use parking_lot::Mutex; +use pjsua::*; +use std::collections::HashMap; +use std::path::Path; +use std::sync::OnceLock; + +/// Global state for streaming players: port_ptr -> StreamingPlayerState +pub static STREAMING_PLAYER_STATE: OnceLock>> = + OnceLock::new(); + +/// Memory pool for streaming player ports +pub static STREAMING_PLAYER_POOL: OnceLock> = OnceLock::new(); + +/// State for a streaming player port +pub struct StreamingPlayerState { + /// The file-backed streaming player + pub player: StreamingPlayer, + /// Call ID (for hangup detection) + pub call_id: CallId, + /// Whether playback is finished (EOF or call ended) + pub finished: bool, + /// Whether to hangup when playback completes + pub hangup_on_complete: bool, +} + +/// Custom get_frame callback for streaming player ports +/// +/// This is called by the PJSUA conference bridge when it needs audio samples. +/// The timing is controlled by the audio thread's deadline-based scheduler, +/// ensuring precise 20ms frame intervals. +pub unsafe extern "C" fn streaming_get_frame( + this_port: *mut pjmedia_port, + frame: *mut pjmedia_frame, +) -> pj_status_t { + if this_port.is_null() || frame.is_null() { + return -1; // PJ_EINVAL + } + + let port_key = this_port as usize; + + // Get samples from the streaming player + let samples = { + let state = STREAMING_PLAYER_STATE.get_or_init(|| Mutex::new(HashMap::new())); + let mut state = state.lock(); + + if let Some(player_state) = state.get_mut(&port_key) { + // Check if call still exists (hangup detection) + if !player_state.finished { + let call_exists = CALL_CONF_PORTS + .get() + .map(|p| p.contains_key(&player_state.call_id)) + .unwrap_or(false); + + if !call_exists { + tracing::debug!( + "Call {} ended, stopping streaming (port {:p})", + player_state.call_id, + this_port + ); + player_state.finished = true; + } + } + + if player_state.finished { + // Already finished - return silence + Vec::new() + } else { + // Try to get the next frame from the streaming player + match player_state.player.get_frame(SAMPLES_PER_FRAME) { + Some(samples) => { + // Check if this was the last frame + if player_state.player.is_finished() { + player_state.finished = true; + tracing::debug!( + "Streaming playback finished for call {} (EOF)", + player_state.call_id + ); + } + samples + } + None => { + // No more samples - mark finished + player_state.finished = true; + tracing::debug!( + "Streaming playback finished for call {} (no more samples)", + player_state.call_id + ); + Vec::new() + } + } + } + } else { + Vec::new() + } + }; + + // Fill frame buffer + if !samples.is_empty() { + super::frame_utils::fill_audio_frame(frame, &samples); + } else { + super::frame_utils::fill_silence_frame(frame); + } + + pj_constants__PJ_SUCCESS as pj_status_t +} + +/// Custom on_destroy callback for streaming player ports +pub unsafe extern "C" fn streaming_on_destroy(this_port: *mut pjmedia_port) -> pj_status_t { + if !this_port.is_null() { + let port_key = this_port as usize; + if let Some(state) = STREAMING_PLAYER_STATE.get() { + state.lock().remove(&port_key); + } + tracing::debug!("Streaming player port destroyed: {:p}", this_port); + } + pj_constants__PJ_SUCCESS as pj_status_t +} + +/// Start streaming audio from a file to a call +/// +/// This creates a PJSUA conference port backed by a StreamingPlayer and connects +/// it to the specified call. The audio thread's conference bridge will call +/// `streaming_get_frame` every 20ms to pull samples. +/// +/// # Arguments +/// * `call_id` - The call to stream audio to +/// * `path` - Path to the FLAC file +/// * `hangup_on_complete` - Whether to hangup the call when playback finishes +pub fn start_streaming_to_call( + call_id: CallId, + path: &Path, + hangup_on_complete: bool, +) -> Result<()> { + use super::frame_utils::{create_and_connect_port, PortCallbacks}; + + // Create the streaming player + let player = StreamingPlayer::new(path)?; + + // Get call's conference port + let call_conf_port = CALL_CONF_PORTS + .get() + .and_then(|p| p.get(&call_id).map(|r| *r)) + .ok_or_else(|| { + anyhow::anyhow!("No conf_port for call {} - media not ready yet", call_id) + })?; + + let guard = unsafe { + let callbacks = PortCallbacks { + get_frame: streaming_get_frame, + put_frame: super::frame_utils::noop_put_frame, + on_destroy: Some(streaming_on_destroy), + }; + + let guard = create_and_connect_port( + &STREAMING_PLAYER_POOL, + b"streaming_players\0", + "strm", + call_id, + 0x5354_524D, // "STRM" + callbacks, + call_conf_port, + )?; + + // Store player state with the actual port key + { + let state = STREAMING_PLAYER_STATE.get_or_init(|| Mutex::new(HashMap::new())); + state.lock().insert( + guard.port_key, + StreamingPlayerState { + player, + call_id, + finished: false, + hangup_on_complete, + }, + ); + } + + tracing::info!( + "Started streaming {} to call {} (player_slot={}, call_port={})", + path.display(), + call_id, + guard.slot, + call_conf_port + ); + + guard + }; + + let port_key = guard.port_key; + + // Spawn a cleanup thread that watches for completion + // The ConfPortGuard handles pjsua_conf_remove_port when dropped + std::thread::spawn(move || { + loop { + std::thread::sleep(std::time::Duration::from_millis(100)); + + let (finished, hangup, call_id) = { + let state = STREAMING_PLAYER_STATE.get_or_init(|| Mutex::new(HashMap::new())); + let state = state.lock(); + + if let Some(player_state) = state.get(&port_key) { + ( + player_state.finished, + player_state.hangup_on_complete, + player_state.call_id, + ) + } else { + // State already removed - we're done + break; + } + }; + + if finished { + // Small delay to ensure last frame is sent + std::thread::sleep(std::time::Duration::from_millis(50)); + + // Drop guard to remove from conference + // on_destroy callback will clean up STREAMING_PLAYER_STATE + drop(guard); + + tracing::debug!( + "Cleaned up streaming player (port={:p})", + port_key as *const () + ); + + // Hangup if requested + if hangup { + tracing::info!("Hanging up call {} after streaming playback", call_id); + use super::types::queue_pjsua_op; + queue_pjsua_op(PendingPjsuaOp::Hangup { call_id }); + } + + break; + } + } + }); + + Ok(()) +} diff --git a/sipcord-bridge/src/transport/sip/ffi/test_tone.rs b/sipcord-bridge/src/transport/sip/ffi/test_tone.rs new file mode 100644 index 0000000..3c2f398 --- /dev/null +++ b/sipcord-bridge/src/transport/sip/ffi/test_tone.rs @@ -0,0 +1,220 @@ +//! Test tone player for diagnostic audio +//! +//! Provides a 440Hz sine wave generator that plays to a specific call +//! until the caller hangs up. Used for audio pipeline testing. + +use super::streaming_player::STREAMING_PLAYER_POOL; +use super::types::*; +use anyhow::Result; +use parking_lot::Mutex; +use pjsua::*; +use std::collections::HashMap; +use std::sync::OnceLock; + +/// Precomputed 440Hz tone lookup table (one exact period = 400 samples at 16kHz) +/// gcd(16000, 440) = 40, so period = 16000/40 = 400 samples +static TONE_LUT: OnceLock> = OnceLock::new(); + +fn tone_lut() -> &'static [i16] { + TONE_LUT.get_or_init(|| { + (0..400) + .map(|i| { + let t = i as f64 / CONF_SAMPLE_RATE as f64; + (f64::sin(2.0 * std::f64::consts::PI * 440.0 * t) * 16000.0) as i16 + }) + .collect() + }) +} + +/// Global state for test tone players: port_ptr -> TestToneState +pub static TEST_TONE_STATE: OnceLock>> = OnceLock::new(); + +/// State for a test tone player port +pub struct TestToneState { + /// Call ID (for hangup detection) + pub call_id: CallId, + /// Current phase of the sine wave (in samples) + pub phase: u64, + /// Whether playback is finished (call ended) + pub finished: bool, +} + +/// Custom get_frame callback for test tone player ports +/// +/// Generates a 440Hz sine wave until the call ends. +pub unsafe extern "C" fn test_tone_get_frame( + this_port: *mut pjmedia_port, + frame: *mut pjmedia_frame, +) -> pj_status_t { + if this_port.is_null() || frame.is_null() { + return -1; // PJ_EINVAL + } + + let port_key = this_port as usize; + + // Get samples from precomputed LUT and fill frame directly + { + let state = TEST_TONE_STATE.get_or_init(|| Mutex::new(HashMap::new())); + let mut state = state.lock(); + + if let Some(tone_state) = state.get_mut(&port_key) { + // Check if call still exists (hangup detection) + if !tone_state.finished { + let call_exists = CALL_CONF_PORTS + .get() + .map(|p| p.contains_key(&tone_state.call_id)) + .unwrap_or(false); + + if !call_exists { + tracing::debug!( + "Call {} ended, stopping test tone (port {:p})", + tone_state.call_id, + this_port + ); + tone_state.finished = true; + } + } + + if tone_state.finished { + super::frame_utils::fill_silence_frame(frame); + } else { + // Copy from precomputed LUT with wraparound (two memcpy calls max) + let lut = tone_lut(); + let lut_len = lut.len(); + let phase = (tone_state.phase as usize) % lut_len; + tone_state.phase += SAMPLES_PER_FRAME as u64; + + let first_chunk = (lut_len - phase).min(SAMPLES_PER_FRAME); + let frame_buf = (*frame).buf as *mut i16; + std::ptr::copy_nonoverlapping( + lut[phase..phase + first_chunk].as_ptr(), + frame_buf, + first_chunk, + ); + + if first_chunk < SAMPLES_PER_FRAME { + let remaining = SAMPLES_PER_FRAME - first_chunk; + std::ptr::copy_nonoverlapping( + lut.as_ptr(), + frame_buf.add(first_chunk), + remaining, + ); + } + + (*frame).size = (SAMPLES_PER_FRAME * 2) as pj_size_t; + (*frame).type_ = pjmedia_frame_type_PJMEDIA_FRAME_TYPE_AUDIO; + } + } else { + super::frame_utils::fill_silence_frame(frame); + } + } + + pj_constants__PJ_SUCCESS as pj_status_t +} + +/// Custom on_destroy callback for test tone player ports +pub unsafe extern "C" fn test_tone_on_destroy(this_port: *mut pjmedia_port) -> pj_status_t { + if !this_port.is_null() { + let port_key = this_port as usize; + if let Some(state) = TEST_TONE_STATE.get() { + state.lock().remove(&port_key); + } + tracing::debug!("Test tone player port destroyed: {:p}", this_port); + } + pj_constants__PJ_SUCCESS as pj_status_t +} + +/// Start playing a 440Hz test tone to a call +/// +/// The tone plays indefinitely until the caller hangs up. No automatic hangup. +pub fn start_test_tone_to_call(call_id: CallId) -> Result<()> { + use super::frame_utils::{create_and_connect_port, PortCallbacks}; + + // Get call's conference port + let call_conf_port = CALL_CONF_PORTS + .get() + .and_then(|p| p.get(&call_id).map(|r| *r)) + .ok_or_else(|| { + anyhow::anyhow!("No conf_port for call {} - media not ready yet", call_id) + })?; + + let guard = unsafe { + let callbacks = PortCallbacks { + get_frame: test_tone_get_frame, + put_frame: super::frame_utils::noop_put_frame, + on_destroy: Some(test_tone_on_destroy), + }; + + let guard = create_and_connect_port( + &STREAMING_PLAYER_POOL, + b"streaming_players\0", + "tone", + call_id, + 0x544F_4E45, // "TONE" + callbacks, + call_conf_port, + )?; + + // Store player state with the actual port key + { + let state = TEST_TONE_STATE.get_or_init(|| Mutex::new(HashMap::new())); + state.lock().insert( + guard.port_key, + TestToneState { + call_id, + phase: 0, + finished: false, + }, + ); + } + + tracing::info!( + "Started 440Hz test tone for call {} (player_slot={}, call_port={})", + call_id, + guard.slot, + call_conf_port + ); + + guard + }; + + let port_key = guard.port_key; + + // Spawn a cleanup thread that watches for when the call ends + // The ConfPortGuard handles pjsua_conf_remove_port when dropped + std::thread::spawn(move || { + loop { + std::thread::sleep(std::time::Duration::from_millis(100)); + + let finished = { + let state = TEST_TONE_STATE.get_or_init(|| Mutex::new(HashMap::new())); + let state = state.lock(); + + if let Some(tone_state) = state.get(&port_key) { + tone_state.finished + } else { + // State already removed - we're done + break; + } + }; + + if finished { + // Small delay to ensure last frame is sent + std::thread::sleep(std::time::Duration::from_millis(50)); + + // Drop guard to remove from conference + // on_destroy callback will clean up TEST_TONE_STATE + drop(guard); + + tracing::debug!( + "Cleaned up test tone player (port={:p})", + port_key as *const () + ); + + break; + } + } + }); + + Ok(()) +} diff --git a/sipcord-bridge/src/transport/sip/ffi/types.rs b/sipcord-bridge/src/transport/sip/ffi/types.rs new file mode 100644 index 0000000..fa279e1 --- /dev/null +++ b/sipcord-bridge/src/transport/sip/ffi/types.rs @@ -0,0 +1,477 @@ +//! Low-level pjsua wrapper types and constants +//! +//! This module provides safe(r) Rust wrappers around the pjsua C library. +//! +//! ## Audio Architecture +//! +//! When using `pjsua_set_no_snd_dev()`, we take control of audio I/O: +//! - pjsua's conference bridge handles codec negotiation and mixing +//! - We periodically call `get_frame`/`put_frame` to exchange audio with the conference +//! - The conference outputs 16kHz mono PCM regardless of call codec (G.711, Opus, etc.) +//! - We resample to/from Discord's 48kHz stereo + +use crate::services::snowflake::Snowflake; +use crossbeam_channel::Sender; +use crossbeam_queue::SegQueue; +use dashmap::DashMap; +use ipnet::Ipv4Net; +use parking_lot::{Mutex, RwLock}; +use pjsua::*; +use std::collections::{HashMap, HashSet, VecDeque}; +use std::net::IpAddr; +use std::ops::Deref; +use std::path::PathBuf; +use std::sync::atomic::AtomicBool; +use std::sync::Arc; +use std::sync::OnceLock; +use std::time::Instant; + +// CallId newtype + +/// Type-safe wrapper around `pjsua_call_id` (i32). +/// +/// Prevents accidental confusion with conference port IDs, account IDs, +/// and other bare `i32` values in the pjsua API. +#[derive(Clone, Copy, PartialEq, Eq, Hash, PartialOrd, Ord)] +pub struct CallId(i32); + +impl CallId { + /// Sentinel for "no call" / invalid call ID. + pub const INVALID: CallId = CallId(-1); + + pub const fn new(value: i32) -> Self { + Self(value) + } + + pub const fn get(self) -> i32 { + self.0 + } + + pub const fn is_valid(self) -> bool { + self.0 >= 0 + } +} + +impl Deref for CallId { + type Target = i32; + fn deref(&self) -> &i32 { + &self.0 + } +} + +impl From for CallId { + fn from(v: i32) -> Self { + Self(v) + } +} + +impl From for i32 { + fn from(c: CallId) -> i32 { + c.0 + } +} + +impl std::fmt::Display for CallId { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(f, "{}", self.0) + } +} + +impl std::fmt::Debug for CallId { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(f, "CallId({})", self.0) + } +} + +// ConfPort newtype + +/// Type-safe wrapper around conference port slot IDs (`i32`). +/// +/// Prevents accidental confusion with `CallId`, account IDs, +/// and other bare `i32` values in the pjsua API. +#[derive(Clone, Copy, PartialEq, Eq, Hash, PartialOrd, Ord)] +pub struct ConfPort(i32); + +impl ConfPort { + /// Sentinel for "no port" / invalid conf port. + pub const INVALID: ConfPort = ConfPort(-1); + + pub const fn new(value: i32) -> Self { + Self(value) + } + + pub const fn get(self) -> i32 { + self.0 + } + + pub const fn is_valid(self) -> bool { + self.0 >= 0 + } +} + +impl Deref for ConfPort { + type Target = i32; + fn deref(&self) -> &i32 { + &self.0 + } +} + +impl From for ConfPort { + fn from(v: i32) -> Self { + Self(v) + } +} + +impl From for i32 { + fn from(c: ConfPort) -> i32 { + c.0 + } +} + +impl std::fmt::Display for ConfPort { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(f, "{}", self.0) + } +} + +impl std::fmt::Debug for ConfPort { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(f, "ConfPort({})", self.0) + } +} + +/// SIP Digest auth parameters extracted from Authorization header +#[derive(Debug, Clone, Default)] +pub struct DigestAuthParams { + pub username: String, + pub realm: String, + pub nonce: String, + pub uri: String, + pub response: String, + pub method: String, + pub qop: Option, + pub nc: Option, + pub cnonce: Option, +} + +/// Callback handlers for SIP events +pub struct CallbackHandlers { + pub on_incoming_call: Box) + Send + Sync>, + pub on_call_authenticated: + Box) + Send + Sync>, + pub on_dtmf: Box, + pub on_call_ended: Box, + /// Audio frame callback: (channel_id, samples, sample_rate) + /// channel_id is the Discord channel ID (Snowflake) for per-channel routing + pub on_audio_frame: AudioFrameCallback, +} + +/// Callback type for audio frame delivery: (channel_id, samples, sample_rate) +pub type AudioFrameCallback = Box; + +/// Realm for our SIP server +pub const SIP_REALM: &str = "sipcord"; + +/// Conference bridge sample rate (16kHz) +pub const CONF_SAMPLE_RATE: u32 = 16000; + +/// Conference bridge channels (mono) +pub const CONF_CHANNELS: u32 = 1; + +/// Audio frame duration in ms +pub const FRAME_PTIME_MS: u32 = 20; + +/// Samples per frame = sample_rate * ptime / 1000 +pub const SAMPLES_PER_FRAME: usize = (CONF_SAMPLE_RATE * FRAME_PTIME_MS / 1000) as usize; + +// Config accessors — cached on first call via OnceLock (config is immutable at runtime). + +pub fn rtp_inactivity_timeout_secs() -> u64 { + static CACHED: OnceLock = OnceLock::new(); + *CACHED.get_or_init(|| crate::config::AppConfig::bridge().rtp_inactivity_timeout_secs) +} + +/// Shorter timeout for calls that never receive any RTP at all +pub fn no_audio_timeout_secs() -> u64 { + static CACHED: OnceLock = OnceLock::new(); + *CACHED.get_or_init(|| crate::config::AppConfig::bridge().no_audio_timeout_secs) +} + +pub fn empty_bridge_grace_period_secs() -> u64 { + static CACHED: OnceLock = OnceLock::new(); + *CACHED.get_or_init(|| crate::config::AppConfig::bridge().empty_bridge_grace_period_secs) +} + +pub fn max_channel_buffer_samples() -> usize { + static CACHED: OnceLock = OnceLock::new(); + *CACHED.get_or_init(|| crate::config::AppConfig::bridge().max_channel_buffer_samples) +} + +/// Wrapper for pjmedia_port pointer that is Send +/// Safety: pjsua is single-threaded and we only access this from the audio thread +pub struct SendablePort(pub *mut pjmedia_port); +unsafe impl Send for SendablePort {} +unsafe impl Sync for SendablePort {} + +/// Wrapper for pj_pool_t pointer +pub struct SendablePool(pub *mut pj_pool_t); +unsafe impl Send for SendablePool {} +unsafe impl Sync for SendablePool {} + +/// Type alias for local network config: (local_host, parsed_cidr, port, rtp_public_ip) +pub type LocalNetConfig = (String, Ipv4Net, u16, Option); + +/// Type alias for drain cache entry: (last_drain_time, cached_samples, sample_count) +/// Using Arc<[i16]> for single allocation (no separate Vec header). +/// Cache hit becomes Arc::clone() (zero-copy). +pub type DrainCacheEntry = (Instant, Arc<[i16]>, usize); + +/// Type alias for direct player entry: (samples buffer, current read position) +pub type DirectPlayerEntry = (Vec, usize); + +// Global statics + +/// Global callback handlers (pjsua uses global callbacks) +pub static CALLBACKS: OnceLock>> = OnceLock::new(); + +/// Audio output buffers per call (Discord -> SIP) +/// Using DashMap for lock-free concurrent access on audio hot path +pub static AUDIO_OUT_BUFFERS: OnceLock>> = OnceLock::new(); + +/// Master conference port (returned by pjsua_set_no_snd_dev) +pub static CONF_MASTER_PORT: OnceLock> = OnceLock::new(); + +/// Local network config for Contact header and SDP rewriting +/// Stored as (local_host, parsed_cidr, port, rtp_public_ip) for efficient lookup in the callback +/// rtp_public_ip is the IP that pjsua advertises in SDP - we replace it with local_host for local clients +pub static LOCAL_NET_CONFIG: OnceLock> = OnceLock::new(); + +/// Public host config for rewriting private IPs in Contact headers sent to external clients. +/// pjsua derives Contact from the TCP connection's local address (e.g. 10.0.1.7), but external +/// clients need the public hostname to route in-dialog requests (BYE) back to us. +/// Stored as (public_host, sip_port). +pub static PUBLIC_HOST_CONFIG: OnceLock> = OnceLock::new(); + +/// Flag to indicate audio thread should stop +pub static AUDIO_THREAD_RUNNING: AtomicBool = AtomicBool::new(false); + +/// Audio thread handle for joining on shutdown +pub static AUDIO_THREAD_HANDLE: OnceLock>>> = + OnceLock::new(); + +/// Flag indicating the audio thread has processed at least one frame +/// This is used to defer channel registration completions until the conference +/// bridge is actively being clocked. +pub static AUDIO_THREAD_READY: AtomicBool = AtomicBool::new(false); + +/// Queue of pending channel registrations to complete once audio thread is ready +/// Stores (call_id, conf_port) pairs that need complete_pending_channel_registration called +/// Uses lock-free SegQueue for zero-contention push/pop on the 50Hz audio thread +pub static PENDING_CHANNEL_COMPLETIONS: SegQueue<(CallId, ConfPort)> = SegQueue::new(); + +/// Queue of pending conference connections to be made by the audio thread +/// Stores (call_id, channel_id) pairs that need their conference connections made +/// This is used because pjsua_conf_connect conflicts with the audio thread's +/// pjmedia_port_get_frame calls if made from a different thread +/// Uses lock-free SegQueue for zero-contention push/pop on the 50Hz audio thread +pub static PENDING_CONF_CONNECTIONS: SegQueue<(CallId, Snowflake)> = SegQueue::new(); + +/// Pending PJSUA operations that must be executed by the audio thread +/// These operations modify the conference bridge and must be synchronized with get_frame +#[derive(Debug)] +pub enum PendingPjsuaOp { + /// Play samples directly to a call (for join sounds) + /// Note: This also stops any active looping player for the call first + PlayDirect { call_id: CallId, samples: Vec }, + /// Start streaming audio from a file to a call (for large easter egg files) + /// Uses pull model for precise timing - audio thread pulls frames as needed + StartStreaming { + call_id: CallId, + path: PathBuf, + hangup_on_complete: bool, + }, + /// Start playing a 440Hz test tone to a call (plays until caller hangs up) + StartTestTone { call_id: CallId }, + /// Hangup a call (used internally for cleanup after streaming) + Hangup { call_id: CallId }, + /// Start a looping audio player for early media (connecting sound) + /// Must run on audio thread to avoid race with pjmedia_port_get_frame + StartLoop { call_id: CallId, samples: Vec }, + /// Connect a fax audio port bidirectionally in the conference bridge. + /// Must run on the audio thread to avoid racing with pjmedia_port_get_frame. + /// The oneshot sender signals completion back to the async caller. + ConnectFaxPort { + call_id: CallId, + fax_slot: ConfPort, + call_conf_port: ConfPort, + done_tx: tokio::sync::oneshot::Sender, + }, +} + +/// Queue of pending PJSUA operations to be executed by the audio thread +/// Uses lock-free SegQueue for zero-contention push/pop on the 50Hz audio thread +pub static PENDING_PJSUA_OPS: SegQueue = SegQueue::new(); + +/// Set of call_ids with active media (used to start/stop audio thread) +/// This prevents double-counting or decrementing calls that were never counted +pub static COUNTED_CALL_IDS: OnceLock>> = OnceLock::new(); + +/// TLS transport ID (for reload support) +pub static TLS_TRANSPORT_ID: OnceLock>> = OnceLock::new(); + +/// Flag indicating TLS reload is pending +pub static TLS_RELOAD_PENDING: AtomicBool = AtomicBool::new(false); + +/// Per-call RTP activity tracking: call_id -> (last_rx_packet_count, last_activity_time) +/// Used to detect dead calls when SIP BYE is not received +pub static CALL_RTP_ACTIVITY: OnceLock>> = OnceLock::new(); + +/// Event sender for timeout events (set during callback setup) +pub static TIMEOUT_EVENT_TX: OnceLock>>> = + OnceLock::new(); + +// Per-channel audio isolation statics + +/// call_id -> conf_port mapping (for connecting/disconnecting calls) +/// Using DashMap for lock-free concurrent access on audio hot path +pub static CALL_CONF_PORTS: OnceLock> = OnceLock::new(); + +/// call_id -> channel_id mapping (which Discord channel each call belongs to) +/// Using DashMap for lock-free concurrent access on audio hot path +pub static CALL_CHANNELS: OnceLock> = OnceLock::new(); + +/// channel_id -> set of call_ids (all calls in each channel) +/// Uses RwLock: audio thread takes .read() (non-exclusive, 50Hz), call lifecycle takes .write() +pub static CHANNEL_CALLS: OnceLock>>> = OnceLock::new(); + +/// channel_id -> audio INPUT buffer (SIP -> Discord, per-channel) +/// Filled by channel_port_put_frame callback, drained by audio thread for Discord +/// Using DashMap for lock-free concurrent access on audio hot path +pub static CHANNEL_AUDIO_IN: OnceLock>> = OnceLock::new(); + +/// Per-channel conference ports: channel_id -> (pjmedia_port*, conf_slot) +/// Each channel gets its own CUSTOM BUFFER port for isolated Discord->SIP audio routing +/// Unlike null ports, these actually provide audio to the conference via get_frame callback +pub static CHANNEL_CONF_PORTS: OnceLock>> = + OnceLock::new(); + +/// Reverse mapping: port_ptr -> channel_id (for get_frame callback to find the right buffer) +pub static PORT_TO_CHANNEL: OnceLock>> = OnceLock::new(); + +/// Memory pool for creating channel ports +pub static CHANNEL_PORT_POOL: OnceLock> = OnceLock::new(); + +/// Global audio frame counter (incremented once per audio thread tick) +/// Used to prevent channel ports from being drained multiple times per frame +pub static AUDIO_FRAME_COUNTER: std::sync::atomic::AtomicU64 = std::sync::atomic::AtomicU64::new(0); + +/// Per-channel time-based cache: channel_id -> (last_drain_time, cached_samples) +/// If get_frame is called multiple times within 15ms (same PJSUA tick), we return the cached samples. +/// This prevents N callers from draining N*320 samples when they should all share the same frame. +/// Using DashMap for lock-free concurrent access on audio hot path +pub static CHANNEL_DRAIN_CACHE: OnceLock> = OnceLock::new(); + +// Direct player statics + +/// Direct player state: port_ptr -> (samples buffer, current read position) +/// Used for playing audio directly to a single call without going through channel buffer +pub static DIRECT_PLAYER_STATE: OnceLock>> = + OnceLock::new(); + +/// Memory pool for direct player ports +pub static DIRECT_PLAYER_POOL: OnceLock> = OnceLock::new(); + +/// Queue a PJSUA operation to be executed by the audio thread +pub fn queue_pjsua_op(op: PendingPjsuaOp) { + PENDING_PJSUA_OPS.push(op); +} + +#[cfg(test)] +mod tests { + use super::*; + + + #[test] + fn test_call_id_invalid() { + assert_eq!(CallId::INVALID.get(), -1); + assert!(!CallId::INVALID.is_valid()); + } + + #[test] + fn test_call_id_valid() { + assert!(CallId::new(0).is_valid()); + assert!(CallId::new(5).is_valid()); + } + + #[test] + fn test_call_id_deref() { + let id = CallId::new(42); + let val: &i32 = &id; + assert_eq!(*val, 42); + } + + #[test] + fn test_call_id_from_into() { + let id: CallId = 7.into(); + assert_eq!(id.get(), 7); + let raw: i32 = id.into(); + assert_eq!(raw, 7); + } + + #[test] + fn test_call_id_display_debug() { + let id = CallId::new(3); + assert_eq!(format!("{}", id), "3"); + assert_eq!(format!("{:?}", id), "CallId(3)"); + } + + + #[test] + fn test_conf_port_invalid() { + assert_eq!(ConfPort::INVALID.get(), -1); + assert!(!ConfPort::INVALID.is_valid()); + } + + #[test] + fn test_conf_port_valid() { + assert!(ConfPort::new(0).is_valid()); + assert!(ConfPort::new(5).is_valid()); + } + + #[test] + fn test_conf_port_deref() { + let port = ConfPort::new(10); + let val: &i32 = &port; + assert_eq!(*val, 10); + } + + #[test] + fn test_conf_port_from_into() { + let port: ConfPort = 9.into(); + assert_eq!(port.get(), 9); + let raw: i32 = port.into(); + assert_eq!(raw, 9); + } + + #[test] + fn test_conf_port_display_debug() { + let port = ConfPort::new(4); + assert_eq!(format!("{}", port), "4"); + assert_eq!(format!("{:?}", port), "ConfPort(4)"); + } + + + #[test] + fn test_digest_auth_params_default() { + let params = DigestAuthParams::default(); + assert!(params.username.is_empty()); + assert!(params.realm.is_empty()); + assert!(params.nonce.is_empty()); + assert!(params.uri.is_empty()); + assert!(params.response.is_empty()); + assert!(params.method.is_empty()); + assert!(params.qop.is_none()); + assert!(params.nc.is_none()); + assert!(params.cnonce.is_none()); + } +} diff --git a/sipcord-bridge/src/transport/sip/ffi/utils.rs b/sipcord-bridge/src/transport/sip/ffi/utils.rs new file mode 100644 index 0000000..04a1c78 --- /dev/null +++ b/sipcord-bridge/src/transport/sip/ffi/utils.rs @@ -0,0 +1,67 @@ +//! Utility functions for pjsua wrapper + +use pjsua::pj_str_t; + +/// Convert a pj_str_t to a Rust String +/// +/// # Safety +/// The pj_str_t must point to valid memory for `slen` bytes. +pub unsafe fn pj_str_to_string(s: &pj_str_t) -> String { + if s.ptr.is_null() || s.slen <= 0 { + return String::new(); + } + + let slice = std::slice::from_raw_parts(s.ptr as *const u8, s.slen as usize); + String::from_utf8_lossy(slice).to_string() +} + +/// Extract username from SIP URI (e.g., "" -> "username") +pub fn extract_sip_username(uri: &str) -> String { + // Remove angle brackets if present + let uri = uri.trim_start_matches('<').trim_end_matches('>'); + + // Remove "sip:" prefix + let uri = uri.strip_prefix("sip:").unwrap_or(uri); + + // Take everything before @ as username + if let Some(at_pos) = uri.find('@') { + uri[..at_pos].to_string() + } else { + uri.to_string() + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_extract_sip_username_full_uri() { + assert_eq!(extract_sip_username(""), "alice"); + } + + #[test] + fn test_extract_sip_username_no_brackets() { + assert_eq!(extract_sip_username("sip:bob@domain"), "bob"); + } + + #[test] + fn test_extract_sip_username_no_sip_prefix() { + assert_eq!(extract_sip_username("charlie@host"), "charlie"); + } + + #[test] + fn test_extract_sip_username_no_at() { + assert_eq!(extract_sip_username("sip:dave"), "dave"); + } + + #[test] + fn test_extract_sip_username_with_port() { + assert_eq!(extract_sip_username(""), "eve"); + } + + #[test] + fn test_extract_sip_username_empty() { + assert_eq!(extract_sip_username(""), ""); + } +} diff --git a/sipcord-bridge/src/transport/sip/fork_group.rs b/sipcord-bridge/src/transport/sip/fork_group.rs new file mode 100644 index 0000000..8eae67f --- /dev/null +++ b/sipcord-bridge/src/transport/sip/fork_group.rs @@ -0,0 +1,278 @@ +//! Fork group tracking for multi-contact outbound call forking. +//! +//! When a user has multiple SIP phones registered, the bridge rings all of them +//! simultaneously. A "fork group" tracks the set of forked call legs for a single +//! logical call (identified by tracking_id). When one leg answers, the siblings +//! are cancelled. When all legs fail, the failure is reported. + +use super::CallId; +use dashmap::DashMap; +use std::collections::HashSet; +use std::sync::OnceLock; +use tracing::{debug, info}; + +/// Global fork group registry, keyed by tracking_id. +static FORK_GROUPS: OnceLock> = OnceLock::new(); + +fn groups() -> &'static DashMap { + FORK_GROUPS.get_or_init(DashMap::new) +} + +struct ForkGroup { + /// Call IDs that were successfully started (active legs) + sibling_call_ids: HashSet, + /// Call IDs that have failed (answered-then-disconnected, or never-answered) + failed_call_ids: HashSet, + /// Number of calls that failed to even start (MakeOutboundCall returned error) + initial_failures: usize, + /// The call_id that answered first (if any) + answered_call_id: Option, + /// Total number of fork attempts (successful starts + initial failures) + expected_total: usize, +} + +/// Register a successfully started call leg in a fork group. +/// +/// Called from `process_sip_command` after `make_outbound_call` succeeds. +pub fn add_member(tracking_id: &str, call_id: CallId, fork_total: usize) { + let mut entry = groups() + .entry(tracking_id.to_string()) + .or_insert_with(|| ForkGroup { + sibling_call_ids: HashSet::new(), + failed_call_ids: HashSet::new(), + initial_failures: 0, + answered_call_id: None, + expected_total: fork_total, + }); + entry.sibling_call_ids.insert(call_id); + debug!( + "Fork group {}: added call_id={}, members={}/{}", + tracking_id, + call_id, + entry.sibling_call_ids.len() + entry.initial_failures, + fork_total + ); +} + +/// Track a call that failed to start (make_outbound_call returned error). +/// +/// Called from `process_sip_command` when `make_outbound_call` fails. +pub fn add_initial_failure(tracking_id: &str, fork_total: usize) { + let mut entry = groups() + .entry(tracking_id.to_string()) + .or_insert_with(|| ForkGroup { + sibling_call_ids: HashSet::new(), + failed_call_ids: HashSet::new(), + initial_failures: 0, + answered_call_id: None, + expected_total: fork_total, + }); + entry.initial_failures += 1; + debug!( + "Fork group {}: initial failure, failures={}/{}", + tracking_id, + entry.initial_failures + entry.failed_call_ids.len(), + fork_total + ); +} + +/// Mark a fork leg as answered. Returns the sibling call_ids to cancel (if this +/// is the first answer), or `None` if another leg already answered. +/// +/// The fork group is removed after this call since the logical call is resolved. +pub fn mark_answered(tracking_id: &str, call_id: CallId) -> Option> { + // Use remove to get exclusive ownership - prevents races between two simultaneous answers + let (_, mut group) = groups().remove(tracking_id)?; + + if group.answered_call_id.is_some() { + // Another leg already answered - this shouldn't happen with DashMap remove, + // but handle it gracefully + info!( + "Fork group {}: call_id={} answered but already resolved", + tracking_id, call_id + ); + return None; + } + + group.answered_call_id = Some(call_id); + + // Collect siblings to cancel (all members except the one that answered, minus already-failed) + let siblings: Vec = group + .sibling_call_ids + .iter() + .filter(|&&id| id != call_id && !group.failed_call_ids.contains(&id)) + .copied() + .collect(); + + info!( + "Fork group {}: call_id={} answered, cancelling {} siblings", + tracking_id, + call_id, + siblings.len() + ); + + Some(siblings) +} + +/// Mark a fork leg as failed. Returns `true` if ALL legs have now failed +/// (meaning the logical call should be reported as failed to the DO). +/// +/// The fork group is removed when all legs have failed. +pub fn mark_failed(tracking_id: &str, call_id: CallId) -> bool { + let mut entry = match groups().get_mut(tracking_id) { + Some(e) => e, + None => { + // Group already removed (answered or all-failed) — this is a late failure + // from a leg that was being cancelled. Not an error. + debug!( + "Fork group {}: call_id={} failed but group already resolved", + tracking_id, call_id + ); + return false; + } + }; + + // If this group was already answered, don't count this failure + if entry.answered_call_id.is_some() { + return false; + } + + entry.sibling_call_ids.remove(&call_id); + entry.failed_call_ids.insert(call_id); + + let total_resolved = entry.failed_call_ids.len() + entry.initial_failures; + let all_failed = total_resolved >= entry.expected_total; + + debug!( + "Fork group {}: call_id={} failed, resolved={}/{}, all_failed={}", + tracking_id, call_id, total_resolved, entry.expected_total, all_failed + ); + + if all_failed { + // Drop the mutable ref before removing + drop(entry); + groups().remove(tracking_id); + } + + all_failed +} + +#[cfg(test)] +mod tests { + use super::*; + + /// Generate a unique tracking ID per test to avoid interference with the global DashMap + fn unique_id(base: &str) -> String { + use std::sync::atomic::{AtomicU64, Ordering}; + static COUNTER: AtomicU64 = AtomicU64::new(0); + format!("{}_{}", base, COUNTER.fetch_add(1, Ordering::Relaxed)) + } + + #[test] + fn test_add_members_and_answer() { + let tid = unique_id("answer"); + let c1 = CallId::new(100); + let c2 = CallId::new(101); + let c3 = CallId::new(102); + + add_member(&tid, c1, 3); + add_member(&tid, c2, 3); + add_member(&tid, c3, 3); + + // c1 answers -> siblings c2, c3 returned for cancellation + let siblings = mark_answered(&tid, c1).unwrap(); + assert_eq!(siblings.len(), 2); + assert!(siblings.contains(&c2)); + assert!(siblings.contains(&c3)); + } + + #[test] + fn test_all_failed() { + let tid = unique_id("allfail"); + let c1 = CallId::new(200); + let c2 = CallId::new(201); + + add_member(&tid, c1, 2); + add_member(&tid, c2, 2); + + assert!(!mark_failed(&tid, c1)); // 1/2 failed + assert!(mark_failed(&tid, c2)); // 2/2 failed -> all_failed = true + } + + #[test] + fn test_answer_on_already_resolved() { + let tid = unique_id("double_answer"); + let c1 = CallId::new(300); + let c2 = CallId::new(301); + + add_member(&tid, c1, 2); + add_member(&tid, c2, 2); + + // First answer removes the group + mark_answered(&tid, c1); + + // Second answer -> group gone, returns None + assert!(mark_answered(&tid, c2).is_none()); + } + + #[test] + fn test_failed_on_already_resolved() { + let tid = unique_id("fail_after_answer"); + let c1 = CallId::new(400); + let c2 = CallId::new(401); + + add_member(&tid, c1, 2); + add_member(&tid, c2, 2); + + mark_answered(&tid, c1); + + // Late failure after answer -> returns false + assert!(!mark_failed(&tid, c2)); + } + + #[test] + fn test_initial_failures_plus_call_failures() { + let tid = unique_id("mixed_fail"); + let c1 = CallId::new(500); + + // 3 total forks: 1 started, 2 failed to start + add_member(&tid, c1, 3); + add_initial_failure(&tid, 3); + add_initial_failure(&tid, 3); + + // The one started leg also fails -> all_failed + assert!(mark_failed(&tid, c1)); + } + + #[test] + fn test_single_member_fork_group() { + let tid = unique_id("single"); + let c1 = CallId::new(600); + + add_member(&tid, c1, 1); + + // Single member answers -> empty siblings list + let siblings = mark_answered(&tid, c1).unwrap(); + assert!(siblings.is_empty()); + } + + #[test] + fn test_some_fail_then_answer() { + let tid = unique_id("partial_fail_answer"); + let c1 = CallId::new(700); + let c2 = CallId::new(701); + let c3 = CallId::new(702); + + add_member(&tid, c1, 3); + add_member(&tid, c2, 3); + add_member(&tid, c3, 3); + + // c1 fails first + assert!(!mark_failed(&tid, c1)); + + // c2 answers -> should only cancel c3 (c1 already failed) + let siblings = mark_answered(&tid, c2).unwrap(); + assert_eq!(siblings.len(), 1); + assert!(siblings.contains(&c3)); + } +} diff --git a/sipcord-bridge/src/transport/sip/mod.rs b/sipcord-bridge/src/transport/sip/mod.rs new file mode 100644 index 0000000..0134f8d --- /dev/null +++ b/sipcord-bridge/src/transport/sip/mod.rs @@ -0,0 +1,579 @@ +pub mod ffi; + +mod audio_thread; +mod callbacks; +mod channel_audio; +pub mod fork_group; +mod nat; +mod register_handler; + +// Re-export everything from the pjsua FFI module +pub use self::ffi::*; + +// Re-export from mixed/application-level modules +pub use audio_thread::{ + check_rtp_inactivity, cleanup_zombie_pjsua_calls, set_timeout_event_sender, + validate_counted_calls, +}; +pub use callbacks::{set_outbound_event_sender, T38_PRESOCKETS}; +pub use channel_audio::{ + cleanup_channel_port, clear_channel_stale_audio, register_call_channel, + register_discord_to_sip, unregister_call_channel, unregister_discord_to_sip, +}; +pub use register_handler::{set_register_event_sender, set_sip_command_sender, PendingRegisterTsx}; + +use crate::config::{SipConfig, TlsConfig}; +use crate::transport::discord::send_audio_to_discord_direct; +use anyhow::Result; +use crossbeam_channel::{bounded, Receiver, Sender}; +use dashmap::DashMap; +use parking_lot::RwLock; +use std::net::IpAddr; +use std::path::PathBuf; +use std::sync::Arc; +use tracing::{debug, error, info, trace}; + +/// Events emitted by the SIP module +#[derive(Debug, Clone)] +pub enum SipEvent { + /// Incoming call received with SIP Digest auth params and extension + IncomingCall { + call_id: CallId, + /// SIP Digest auth parameters (boxed to reduce enum size) + digest_auth: Box, + /// Extension being called (from To header) + extension: String, + /// Source IP address of the caller + source_ip: Option, + }, + /// Call ended + CallEnded { call_id: CallId }, + /// Call timed out due to RTP inactivity (no audio received for extended period) + /// rx_count is the total RTP packets received before timeout (0 = never got any audio) + CallTimeout { call_id: CallId, rx_count: u64 }, + /// Outbound call was answered + OutboundCallAnswered { + tracking_id: String, + call_id: CallId, + }, + /// Outbound call failed (rejected, timeout, error) + OutboundCallFailed { + tracking_id: String, + call_id: Option, + reason: String, + }, + /// Remote sent a T.38 re-INVITE (fax switching from G.711 to T.38 UDPTL) + /// The re-INVITE has already been answered synchronously with a 200 OK; + /// the pre-bound UDPTL socket is stored in T38_PRESOCKETS. + T38Offered { + call_id: CallId, + /// Remote IP for UDPTL packets + remote_ip: String, + /// Remote UDPTL port + remote_port: u16, + /// T.38 version from SDP (typically 0) + t38_version: u8, + /// Max bit rate from SDP (typically 14400) + max_bit_rate: u32, + /// Rate management method ("transferredTCF" or "localTCF") + rate_management: String, + /// UDP error correction ("t38UDPRedundancy" or "t38UDPFEC") + udp_ec: String, + /// Our local UDPTL port (pre-bound in callback) + local_port: u16, + }, +} + +/// Commands that can be sent to the SIP module +#[derive(Debug)] +pub enum SipCommand { + /// Play audio directly to a call (bypasses channel buffer) + /// Used for join sounds to avoid buffer overflow with Discord audio + PlayDirectToCall { call_id: CallId, samples: Vec }, + /// Start a looping audio player for early media (183 Session Progress) + StartConnectingLoop { call_id: CallId, samples: Vec }, + /// Hangup a call + Hangup { call_id: CallId }, + /// Answer a call with 200 OK (after Discord connects successfully) + Answer { call_id: CallId }, + /// Send 183 Session Progress (establishes early media for connecting sound) + Send183 { call_id: CallId }, + /// Start streaming audio from a file to a call (for large files like easter eggs) + /// Uses pull model for precise timing - hangs up automatically when done + StartStreaming { call_id: CallId, path: PathBuf }, + /// Start playing a 440Hz test tone to a call + /// Plays until the caller hangs up + StartTestTone { call_id: CallId }, + /// Send 302 redirect to another bridge server + /// Must be processed in the PJSUA thread to avoid deadlocking with internal PJSIP state + Redirect { + call_id: CallId, + domain: String, + extension: String, + }, + /// Make an outbound call to a SIP URI (for inbound Discord->SIP calls) + MakeOutboundCall { + tracking_id: String, + sip_uri: String, + caller_display_name: Option, + /// Total number of fork legs for this tracking_id (for multi-contact forking) + fork_total: usize, + }, + /// Complete a deferred REGISTER response via a UAS transaction. + /// Sent by the async auth handler after API verification. + RespondRegister { + pending: PendingRegisterTsx, + auth_ok: bool, + }, +} + +/// Active call state (tracked by SIP module before authentication completes) +#[derive(Debug)] +pub struct CallState; + +/// SIP transport — owns the pjsua event loop and all SIP state. +/// +/// Creates its own event/command channels internally. Use `events()` and `commands()` +/// to get handles for communication with the bridge coordinator. +pub struct SipTransport { + config: SipConfig, + tls_config: Option, + event_tx: Sender, + event_rx: Receiver, + command_tx: Sender, + command_rx: Receiver, + calls: Arc>, + pjsua_initialized: Arc>, +} + +impl SipTransport { + pub fn new(config: SipConfig, tls_config: Option) -> Self { + let (event_tx, event_rx) = bounded(1000); + let (command_tx, command_rx) = bounded(1000); + Self { + config, + tls_config, + event_tx, + event_rx, + command_tx, + command_rx, + calls: Arc::new(DashMap::new()), + pjsua_initialized: Arc::new(RwLock::new(false)), + } + } + + /// Get a receiver for SIP events (incoming calls, call ended, etc.) + pub fn events(&self) -> Receiver { + self.event_rx.clone() + } + + /// Get a sender for SIP commands (hangup, answer, send audio, etc.) + pub fn commands(&self) -> Sender { + self.command_tx.clone() + } + + /// Get a sender for SIP events (used to inject outbound call events) + pub fn event_sender(&self) -> Sender { + self.event_tx.clone() + } + + /// Start the SIP transport + pub async fn run(&self) -> Result<()> { + info!( + "Starting SIP server on {}:{}", + self.config.public_host, self.config.port + ); + + if let Some(ref tls) = self.tls_config { + info!("TLS enabled on port {}", tls.port); + } + + // Initialize pjsua in a blocking task since it's not async-safe + let config = self.config.clone(); + let tls_config = self.tls_config.clone(); + let calls = self.calls.clone(); + let event_tx = self.event_tx.clone(); + let initialized = self.pjsua_initialized.clone(); + let command_rx = self.command_rx.clone(); + + // Spawn pjsua event loop in a blocking thread + // IMPORTANT: All PJSUA calls must be made from this thread to avoid deadlocks + let pjsua_handle = tokio::task::spawn_blocking(move || { + if let Err(e) = + run_pjsua_loop(config, tls_config, calls, event_tx, initialized, command_rx) + { + error!("pjsua loop error: {}", e); + } + }); + + pjsua_handle.await?; + Ok(()) + } +} + +/// Run the pjsua event loop (blocking) +/// +/// IMPORTANT: All PJSUA calls (answer, hangup, etc.) must be made from this thread +/// to avoid deadlocks with PJSIP's internal worker threads. +fn run_pjsua_loop( + config: SipConfig, + tls_config: Option, + calls: Arc>, + event_tx: Sender, + initialized: Arc>, + command_rx: Receiver, +) -> Result<()> { + // Initialize pjsua with optional TLS + init_pjsua(&config, tls_config.as_ref())?; + *initialized.write() = true; + + // Register this thread with PJLIB so we can safely call PJSUA functions. + // This is required because tokio::task::spawn_blocking creates a new thread + // that isn't automatically registered with PJLIB. + if !register_thread_with_pjlib("pjsua_event_loop") { + tracing::warn!("Failed to register event loop thread with PJLIB"); + } + + // Note: Audio thread is started on-demand when first call becomes active + // (see on_call_media_state_cb in callbacks.rs) + + info!("pjsua initialized, waiting for calls..."); + + // Set up timeout event sender for RTP inactivity detection + set_timeout_event_sender(event_tx.clone()); + + // Set up callbacks + set_callbacks(CallbackHandlers { + on_incoming_call: Box::new({ + let calls = calls.clone(); + move |call_id, sip_username, extension, source_ip| { + debug!( + "Incoming call {} from {} to extension {} (IP: {:?})", + call_id, sip_username, extension, source_ip + ); + + // Track call (actual state is managed via events after authentication) + calls.insert(call_id, CallState); + } + }), + on_call_authenticated: Box::new({ + let event_tx = event_tx.clone(); + move |call_id, digest_auth, extension, source_ip| { + info!( + "Call {} authenticated: user={}", + call_id, digest_auth.username + ); + + let _ = event_tx.send(SipEvent::IncomingCall { + call_id, + digest_auth: Box::new(digest_auth), + extension, + source_ip, + }); + } + }), + on_dtmf: Box::new({ + move |call_id, digit| { + debug!( + "DTMF {} on call {} (ignored - using dialed number)", + digit, call_id + ); + } + }), + on_call_ended: Box::new({ + let calls = calls.clone(); + let event_tx = event_tx.clone(); + move |call_id| { + calls.remove(&call_id); + let _ = event_tx.send(SipEvent::CallEnded { call_id }); + } + }), + on_audio_frame: Box::new({ + move |channel_id, samples, sample_rate| { + // DIRECT PATH: Send audio directly to Discord, bypassing tokio entirely. + // This is called from the pjsua audio thread and sends directly to the + // crossbeam channel that feeds Songbird's StreamingAudioSource. + use std::sync::atomic::{AtomicU64, Ordering}; + static DIRECT_AUDIO_COUNT: AtomicU64 = AtomicU64::new(0); + let count = DIRECT_AUDIO_COUNT.fetch_add(1, Ordering::Relaxed); + + let sent = send_audio_to_discord_direct(channel_id, samples, sample_rate); + + if !sent && count.is_multiple_of(250) { + // No sender registered for this channel - bridge might not be ready yet + trace!( + "No Discord sender for channel {} (direct audio dropped, count={})", + channel_id, + count + ); + } + } + }), + }); + + // Run pjsua event loop + let mut loop_count: u64 = 0; + loop { + // Process any pending SIP commands (non-blocking) + // These must be processed in the PJSUA thread to avoid deadlocks + while let Ok(cmd) = command_rx.try_recv() { + process_sip_command(cmd, &calls); + } + + // Sleep briefly to allow PJSIP worker threads to process events + // Note: PJSIP has its own internal worker threads that handle the ioqueue + process_pjsua_events(10)?; + + loop_count += 1; + + // Every ~5 seconds (500 iterations at 10ms each), check for RTP inactivity + // This must be done from the PJSUA thread, not the audio thread + if loop_count.is_multiple_of(500) { + check_rtp_inactivity(); + } + + // Every ~30 seconds (3000 iterations at 10ms each), validate COUNTED_CALL_IDS + // This catches stale calls that weren't properly cleaned up by on_call_state_cb + if loop_count.is_multiple_of(3000) { + validate_counted_calls(); + } + + // Every ~60 seconds (6000 iterations at 10ms each), scan ALL pjsua call slots + // for zombie calls that are stuck (rejected calls where the SIP transaction + // never completed, or calls where handle_incoming_call panicked/hung) + if loop_count.is_multiple_of(6000) { + cleanup_zombie_pjsua_calls(); + } + } +} + +/// Process a SIP command in the PJSUA thread +/// +/// This must be called from the PJSUA event loop thread to avoid deadlocks. +fn process_sip_command(cmd: SipCommand, calls: &Arc>) { + match cmd { + SipCommand::PlayDirectToCall { call_id, samples } => { + // Play audio directly to a call (bypasses channel buffer) + if let Err(e) = play_audio_to_call_direct(call_id, &samples) { + tracing::error!("Failed to play direct audio to call {}: {}", call_id, e); + } + } + SipCommand::StartConnectingLoop { call_id, samples } => { + // Queue to audio thread to avoid race with pjmedia_port_get_frame + queue_pjsua_op(PendingPjsuaOp::StartLoop { call_id, samples }); + } + SipCommand::Hangup { call_id } => { + // Stop any looping audio first + stop_loop(call_id); + // Always try to hangup - PJSUA will handle if call doesn't exist + // Remove from our tracking if present + calls.remove(&call_id); + hangup_call(call_id); + } + SipCommand::Answer { call_id } => { + answer_call(call_id); + } + SipCommand::Send183 { call_id } => { + send_183_session_progress(call_id); + } + SipCommand::StartStreaming { call_id, path } => { + // Queue streaming to audio thread (handles timing and hangup detection) + queue_pjsua_op(PendingPjsuaOp::StartStreaming { + call_id, + path, + hangup_on_complete: true, // Easter egg calls hangup when done + }); + } + SipCommand::StartTestTone { call_id } => { + // Queue test tone to audio thread + queue_pjsua_op(PendingPjsuaOp::StartTestTone { call_id }); + } + SipCommand::Redirect { + call_id, + domain, + extension, + } => { + // Stop any connecting loop first + stop_loop(call_id); + // Send 302 from the PJSUA thread (safe - no deadlock with PJSIP internals) + unsafe { + callbacks::send_302_redirect(call_id, &domain, &extension); + } + calls.remove(&call_id); + } + SipCommand::MakeOutboundCall { + tracking_id, + sip_uri, + caller_display_name, + fork_total, + } => { + info!( + "Making outbound call: tracking_id={}, uri={}, caller={:?}, fork={}/{}", + tracking_id, sip_uri, caller_display_name, fork_total, fork_total + ); + match make_outbound_call(&sip_uri, caller_display_name.as_deref()) { + Ok(call_id) => { + // Store tracking_id -> call_id mapping + let outbound_calls = OUTBOUND_CALL_TRACKING.get_or_init(DashMap::new); + outbound_calls.insert(call_id, tracking_id.clone()); + // Register in fork group + fork_group::add_member(&tracking_id, call_id, fork_total); + info!( + "Outbound call started: tracking_id={}, call_id={}", + tracking_id, call_id + ); + calls.insert(call_id, CallState); + } + Err(e) => { + error!( + "Failed to make outbound call (tracking_id={}): {}", + tracking_id, e + ); + // Track the initial failure in fork group + fork_group::add_initial_failure(&tracking_id, fork_total); + } + } + } + SipCommand::RespondRegister { pending, auth_ok } => { + // Complete a deferred REGISTER response. Must run on the pjsua + // thread because pjsip_tsx_send_msg is not thread-safe. + unsafe { + use pjsua::*; + use std::os::raw::c_char; + + let tsx = pending.tsx.0; + let tdata = pending.tdata.0; + + if tsx.is_null() || tdata.is_null() { + tracing::warn!("RespondRegister: null tsx or tdata"); + return; + } + + if auth_ok { + // Add Expires header to the pre-built 200 OK + let expires_str = format!("{}", pending.expires); + let hdr_name = std::ffi::CString::new("Expires").unwrap(); + let hdr_value = std::ffi::CString::new(expires_str).unwrap(); + + let name = pj_str(hdr_name.as_ptr() as *mut c_char); + let value = pj_str(hdr_value.as_ptr() as *mut c_char); + let hdr = pjsip_generic_string_hdr_create((*tdata).pool, &name, &value); + if !hdr.is_null() { + pj_list_insert_before( + &mut (*(*tdata).msg).hdr as *mut pjsip_hdr as *mut pj_list_type, + hdr as *mut pj_list_type, + ); + } + } else { + // Rewrite the pre-built 200 to a 403 Forbidden + (*(*tdata).msg).line.status.code = 403; + let reason = b"Forbidden\0"; + let ptr = pj_pool_alloc((*tdata).pool, reason.len()) as *mut u8; + std::ptr::copy_nonoverlapping(reason.as_ptr(), ptr, reason.len()); + (*(*tdata).msg).line.status.reason.ptr = ptr as *mut c_char; + (*(*tdata).msg).line.status.reason.slen = + (reason.len() - 1) as std::os::raw::c_long; + } + + let status = pjsip_tsx_send_msg(tsx, tdata); + if status != pj_constants__PJ_SUCCESS as i32 { + tracing::warn!( + "Failed to send deferred REGISTER response ({}): {}", + if auth_ok { 200 } else { 403 }, + status + ); + } + } + } + } +} + +/// Tracking map for outbound calls: pjsua call_id -> tracking_id +static OUTBOUND_CALL_TRACKING: std::sync::OnceLock> = + std::sync::OnceLock::new(); + +/// Get the tracking ID for an outbound call (if any) +pub fn get_outbound_tracking_id(call_id: CallId) -> Option { + OUTBOUND_CALL_TRACKING + .get() + .and_then(|m| m.get(&call_id).map(|v| v.clone())) +} + +/// Remove and return the tracking ID for an outbound call +pub fn remove_outbound_tracking(call_id: CallId) -> Option { + OUTBOUND_CALL_TRACKING + .get() + .and_then(|m| m.remove(&call_id).map(|(_, v)| v)) +} + +/// Make an outbound SIP call using pjsua +/// +/// If `caller_display_name` is provided, it sets the From header display name +/// to show who initiated the call from Discord (e.g., "Discord: username"). +fn make_outbound_call(sip_uri: &str, caller_display_name: Option<&str>) -> Result { + unsafe { + let uri = std::ffi::CString::new(sip_uri).map_err(|e| e.to_string())?; + let mut call_id: ::pjsua::pjsua_call_id = -1; + + // Explicit call settings: audio only, no video, no T.140 text. + // The default txt_cnt=1 adds an m=text stream to the SDP, bloating + // the INVITE beyond the ~1300-byte UDP fragmentation threshold. + let mut opt = std::mem::MaybeUninit::<::pjsua::pjsua_call_setting>::uninit(); + ::pjsua::pjsua_call_setting_default(opt.as_mut_ptr()); + let opt_ptr = opt.assume_init_mut(); + opt_ptr.aud_cnt = 1; + opt_ptr.vid_cnt = 0; + opt_ptr.txt_cnt = 0; + + // Set up msg_data with custom From header if caller display name provided + let mut msg_data = std::mem::MaybeUninit::<::pjsua::pjsua_msg_data>::uninit(); + ::pjsua::pjsua_msg_data_init(msg_data.as_mut_ptr()); + let msg_data_ptr = msg_data.assume_init_mut(); + + // Build the From URI with display name: "name" + // The local_uri field overrides the From header in the outgoing INVITE + let from_uri_cstring; + if let Some(name) = caller_display_name { + // Get the account's SIP URI to use as the address part + let mut acc_info = std::mem::MaybeUninit::<::pjsua::pjsua_acc_info>::uninit(); + let acc_uri = if ::pjsua::pjsua_acc_get_info(0, acc_info.as_mut_ptr()) + == ::pjsua::pj_constants__PJ_SUCCESS as i32 + { + let ai = acc_info.assume_init(); + let uri_str = std::ffi::CStr::from_ptr(ai.acc_uri.ptr) + .to_string_lossy() + .into_owned(); + uri_str + } else { + "sip:sipcord@localhost".to_string() + }; + + // Sanitize display name: whitelist printable ASCII, strip control chars + // and characters that could break SIP header parsing or enable injection + let sanitized: String = name + .chars() + .filter(|c| *c >= ' ' && *c != '"' && *c != '<' && *c != '>' && *c != '\\') + .take(64) + .collect(); + let from_uri = format!("\"{}\" <{}>", sanitized, acc_uri); + from_uri_cstring = std::ffi::CString::new(from_uri).map_err(|e| e.to_string())?; + msg_data_ptr.local_uri = + ::pjsua::pj_str(from_uri_cstring.as_ptr() as *mut std::os::raw::c_char); + } + + let status = ::pjsua::pjsua_call_make_call( + 0, // default account + &::pjsua::pj_str(uri.as_ptr() as *mut std::os::raw::c_char), + opt_ptr, // call settings (no text stream) + std::ptr::null_mut(), // user data + msg_data_ptr, // msg_data with custom From header + &mut call_id, + ); + + if status != ::pjsua::pj_constants__PJ_SUCCESS as i32 { + return Err(format!("pjsua_call_make_call failed: {}", status)); + } + + Ok(CallId::new(call_id)) + } +} diff --git a/sipcord-bridge/src/transport/sip/nat.rs b/sipcord-bridge/src/transport/sip/nat.rs new file mode 100644 index 0000000..8be097e --- /dev/null +++ b/sipcord-bridge/src/transport/sip/nat.rs @@ -0,0 +1,909 @@ +//! NAT-related SIP rewriting for Contact headers and SDP bodies +//! +//! This module consolidates all NAT rewriting logic: +//! - Local-network rewriting (tx path): rewrites Contact + SDP in outgoing +//! requests/responses so that local-network clients use the local IP +//! - Far-end NAT fixup (rx path): rewrites private IPs in incoming responses +//! to the actual public source IP + +use super::ffi::types::*; +use super::ffi::utils::pj_str_to_string; +use pjsua::*; +use std::ffi::CString; +use std::net::Ipv4Addr; +use std::os::raw::c_char; +use std::ptr; + +// Private helpers + +/// Remove dynamic payload types (96+) from `m=` lines when they lack a corresponding +/// `a=rtpmap:` attribute in that media section. This prevents PJSIP's SDP validator +/// from rejecting the SDP with PJMEDIA_SDP_EMISSINGRTPMAP. +/// +/// Returns `Some(sanitized)` if any orphan dynamic PTs were stripped, `None` if no changes. +fn sanitize_sdp_missing_rtpmap(sdp: &str) -> Option { + // Split SDP into lines, grouping by media sections. + // Session-level lines come before the first m= line. + // Each m= line starts a new media section that includes all following a=/b=/c= lines + // until the next m= line or end of SDP. + + let lines: Vec<&str> = sdp.lines().collect(); + let mut result_lines: Vec = Vec::with_capacity(lines.len()); + let mut changed = false; + + // Find media section boundaries (indices of m= lines) + let mut section_starts: Vec = Vec::new(); + for (i, line) in lines.iter().enumerate() { + if line.starts_with("m=") { + section_starts.push(i); + } + } + + // Session-level lines (before first m= line) pass through unchanged + let first_m = section_starts.first().copied().unwrap_or(lines.len()); + for line in &lines[..first_m] { + result_lines.push(line.to_string()); + } + + // Process each media section + for (sec_idx, &start) in section_starts.iter().enumerate() { + let end = section_starts + .get(sec_idx + 1) + .copied() + .unwrap_or(lines.len()); + + let m_line = lines[start]; + let section_lines = &lines[start + 1..end]; + + // Parse m= line: m= ... + let parts: Vec<&str> = m_line.split_whitespace().collect(); + if parts.len() < 4 { + // Malformed m= line, pass through + for line in &lines[start..end] { + result_lines.push(line.to_string()); + } + continue; + } + + let transport = parts[2]; + + // Only sanitize RTP-based transports (not UDPTL for T.38, etc.) + if !transport.starts_with("RTP/") { + for line in &lines[start..end] { + result_lines.push(line.to_string()); + } + continue; + } + + // Collect rtpmap PTs declared in this section + let mut rtpmap_pts: std::collections::HashSet = std::collections::HashSet::new(); + for line in section_lines { + // a=rtpmap:96 opus/48000/2 + if let Some(rest) = line.strip_prefix("a=rtpmap:") { + if let Some(pt_str) = rest.split_whitespace().next() { + if let Ok(pt) = pt_str.parse::() { + rtpmap_pts.insert(pt); + } + } + } + } + + // Check which PTs in the m= line need stripping + let payload_types = &parts[3..]; + let mut kept_pts: Vec<&str> = Vec::new(); + let mut stripped_pts: Vec = Vec::new(); + + for pt_str in payload_types { + if let Ok(pt) = pt_str.parse::() { + if pt >= 96 && !rtpmap_pts.contains(&pt) { + stripped_pts.push(pt); + continue; + } + } + kept_pts.push(pt_str); + } + + if stripped_pts.is_empty() { + // No changes needed for this section + for line in &lines[start..end] { + result_lines.push(line.to_string()); + } + continue; + } + + // If stripping all PTs would leave none, leave the m= line unchanged + if kept_pts.is_empty() { + for line in &lines[start..end] { + result_lines.push(line.to_string()); + } + continue; + } + + changed = true; + + // Rebuild m= line with kept PTs only + let new_m_line = format!( + "{} {} {} {}", + parts[0], + parts[1], + parts[2], + kept_pts.join(" ") + ); + result_lines.push(new_m_line); + + // Copy section attribute lines, stripping a=fmtp: for removed PTs + let stripped_set: std::collections::HashSet = stripped_pts.into_iter().collect(); + for line in section_lines { + if let Some(rest) = line.strip_prefix("a=fmtp:") { + if let Some(pt_str) = rest.split_whitespace().next() { + if let Ok(pt) = pt_str.parse::() { + if stripped_set.contains(&pt) { + continue; // skip fmtp for stripped PT + } + } + } + } + result_lines.push(line.to_string()); + } + } + + if changed { + Some(result_lines.join("\r\n") + "\r\n") + } else { + None + } +} + +/// Check if an IPv4 address is in RFC 1918 private space +fn is_rfc1918(ip: Ipv4Addr) -> bool { + let o = ip.octets(); + (o[0] == 10) || (o[0] == 172 && (16..=31).contains(&o[1])) || (o[0] == 192 && o[1] == 168) +} + +/// Extract the destination IPv4 address from `pjsip_tx_data` transport info. +/// +/// Returns `None` if transport info is invalid or the address is not IPv4. +unsafe fn extract_dst_ipv4(tdata: *const pjsip_tx_data) -> Option { + if (*tdata).tp_info.transport.is_null() || (*tdata).tp_info.dst_addr_len <= 0 { + return None; + } + + let dst_addr = &(*tdata).tp_info.dst_addr; + // PJ_AF_INET is typically 2 (same as AF_INET on most systems) + if dst_addr.addr.sa_family == 2 { + let addr_in = &dst_addr.ipv4; + let ip_bytes = addr_in.sin_addr.s_addr.to_ne_bytes(); + Some(Ipv4Addr::new( + ip_bytes[0], + ip_bytes[1], + ip_bytes[2], + ip_bytes[3], + )) + } else { + None + } +} + +/// Rewrite the Contact header's host and port via pool allocation. +/// +/// Uses vtable-based URI unwrapping (`p_get_uri`) to safely handle both +/// bare `pjsip_sip_uri` and `pjsip_name_addr`-wrapped URIs. +/// Returns `true` if the rewrite succeeded. +unsafe fn rewrite_contact_host( + pool: *mut pj_pool_t, + msg: *mut pjsip_msg, + new_host: &str, + new_port: u16, +) -> bool { + let contact_hdr = pjsip_msg_find_hdr(msg, pjsip_hdr_e_PJSIP_H_CONTACT, ptr::null_mut()) + as *mut pjsip_contact_hdr; + if contact_hdr.is_null() { + return false; + } + + let uri = (*contact_hdr).uri; + if uri.is_null() { + return false; + } + + // Unwrap via vtable to handle pjsip_name_addr wrapping + let uri_vptr = (*(uri as *const pjsip_uri)).vptr; + if uri_vptr.is_null() { + return false; + } + let get_uri_fn = match (*uri_vptr).p_get_uri { + Some(f) => f, + None => return false, + }; + let sip_uri_raw = get_uri_fn(uri as *mut std::os::raw::c_void); + if sip_uri_raw.is_null() { + return false; + } + let sip_uri = sip_uri_raw as *mut pjsip_sip_uri; + if (*sip_uri).host.ptr.is_null() || (*sip_uri).host.slen <= 0 { + return false; + } + + let Ok(host_cstr) = CString::new(new_host) else { + return false; + }; + let host_len = new_host.len(); + let pool_str = pj_pool_alloc(pool, host_len + 1) as *mut c_char; + if pool_str.is_null() { + return false; + } + + ptr::copy_nonoverlapping(host_cstr.as_ptr(), pool_str, host_len + 1); + (*sip_uri).host.ptr = pool_str; + (*sip_uri).host.slen = host_len as i64; + (*sip_uri).port = new_port as i32; + true +} + +/// Replace `old_ip` with `new_ip` inside the SDP body of `msg`, allocating +/// the replacement string from `pool`. Only rewrites `c=` (connection) and +/// `o=` (origin) lines to avoid corrupting attribute values that may +/// coincidentally contain the same IP string. Returns `true` if a +/// substitution was made. +unsafe fn rewrite_sdp_body( + pool: *mut pj_pool_t, + msg: *mut pjsip_msg, + old_ip: &str, + new_ip: &str, +) -> bool { + let body = (*msg).body; + if body.is_null() || (*body).len == 0 || (*body).data.is_null() { + return false; + } + + let body_slice = std::slice::from_raw_parts((*body).data as *const u8, (*body).len as usize); + let Ok(sdp_str) = std::str::from_utf8(body_slice) else { + return false; + }; + + // Line-by-line replacement: only rewrite c= and o= lines + let mut changed = false; + let new_sdp: String = sdp_str + .lines() + .map(|line| { + if (line.starts_with("c=") || line.starts_with("o=")) && line.contains(old_ip) { + changed = true; + line.replace(old_ip, new_ip) + } else { + line.to_string() + } + }) + .collect::>() + .join("\r\n"); + + // Preserve trailing CRLF + let new_sdp = new_sdp + "\r\n"; + + if !changed { + return false; + } + + let new_len = new_sdp.len(); + let new_body_ptr = pj_pool_alloc(pool, new_len) as *mut u8; + if new_body_ptr.is_null() { + return false; + } + + ptr::copy_nonoverlapping(new_sdp.as_ptr(), new_body_ptr, new_len); + (*body).data = new_body_ptr as *mut _; + (*body).len = new_len as u32; + true +} + +/// Unified local-network rewriting for outgoing tx data. +/// +/// Checks `LOCAL_NET_CONFIG`, verifies the destination is in the configured +/// CIDR, and rewrites the Contact header + SDP body. +/// +/// `direction` is used only for log messages ("request" or "response"). +unsafe fn rewrite_local_network_tdata(tdata: *mut pjsip_tx_data, direction: &str) -> bool { + let Some(Some((local_host, local_cidr, port, rtp_public_ip))) = LOCAL_NET_CONFIG.get() else { + return false; + }; + + if tdata.is_null() { + return false; + } + + let Some(dst_ip) = extract_dst_ipv4(tdata) else { + return false; + }; + + if !local_cidr.contains(&dst_ip) { + return false; + } + + let msg = (*tdata).msg; + if msg.is_null() { + return false; + } + + let mut changed = false; + + // Rewrite Contact header + if rewrite_contact_host((*tdata).pool, msg, local_host, *port) { + tracing::debug!( + "Rewrote {} Contact header for local client {}: host -> {}:{}", + direction, + dst_ip, + local_host, + port + ); + changed = true; + } + + // Rewrite SDP body if we have an RTP public IP to replace + if let Some(public_ip) = rtp_public_ip { + if rewrite_sdp_body((*tdata).pool, msg, public_ip, local_host) { + tracing::debug!( + "Rewrote {} SDP for local client {}: {} -> {}", + direction, + dst_ip, + public_ip, + local_host + ); + changed = true; + } + } + + changed +} + +/// Rewrite private IPs in Contact headers for external (non-local) clients. +/// +/// pjsua derives the Contact URI from the TCP/UDP connection's local address, +/// which is the bridge's private IP (e.g. 10.0.1.7) when running behind NAT. +/// External clients need the public hostname (e.g. bridge-usw1.sipcord.net) so +/// they can route in-dialog requests like BYE back to us. Without this fix, +/// phones that try to send BYE to the private IP will silently fail. +unsafe fn rewrite_private_contact_for_external(tdata: *mut pjsip_tx_data, direction: &str) -> bool { + let Some(Some((public_host, port))) = PUBLIC_HOST_CONFIG.get() else { + return false; + }; + + if tdata.is_null() { + return false; + } + + let msg = (*tdata).msg; + if msg.is_null() { + return false; + } + + // Find Contact header + let contact_hdr = pjsip_msg_find_hdr(msg, pjsip_hdr_e_PJSIP_H_CONTACT, ptr::null_mut()) + as *mut pjsip_contact_hdr; + if contact_hdr.is_null() { + return false; + } + + let uri = (*contact_hdr).uri; + if uri.is_null() { + return false; + } + + // Unwrap via vtable to handle pjsip_name_addr wrapping + let uri_vptr = (*(uri as *const pjsip_uri)).vptr; + if uri_vptr.is_null() { + return false; + } + let get_uri_fn = match (*uri_vptr).p_get_uri { + Some(f) => f, + None => return false, + }; + let sip_uri_raw = get_uri_fn(uri as *mut std::os::raw::c_void); + if sip_uri_raw.is_null() { + return false; + } + let sip_uri = sip_uri_raw as *mut pjsip_sip_uri; + if (*sip_uri).host.ptr.is_null() || (*sip_uri).host.slen <= 0 { + return false; + } + + let host = pj_str_to_string(&(*sip_uri).host); + + // Only rewrite if Contact host is a private (RFC 1918) IP + let contact_ip: Ipv4Addr = match host.parse() { + Ok(ip) => ip, + Err(_) => return false, // Already a hostname, no rewrite needed + }; + + if !is_rfc1918(contact_ip) { + return false; // Public IP, no rewrite needed + } + + // Skip if destination is also private (local-network rewrite handles that) + if let Some(dst_ip) = extract_dst_ipv4(tdata) { + if is_rfc1918(dst_ip) { + return false; + } + } + + // Rewrite Contact to public host + if rewrite_contact_host((*tdata).pool, msg, public_host, *port) { + tracing::debug!( + "Rewrote {} Contact for external client: {} -> {}:{}", + direction, + host, + public_host, + port + ); + return true; + } + + false +} + +// Public callbacks + +/// Callback to rewrite Contact header and SDP body in outgoing responses. +/// +/// Two rewrites are applied in order: +/// 1. Local-network rewrite: for clients on the local CIDR, use the local IP +/// 2. Public-host rewrite: for external clients, replace private Contact IPs +/// with the public hostname so they can route BYE back to us +pub unsafe extern "C" fn on_tx_response_cb(tdata: *mut pjsip_tx_data) -> pj_status_t { + let local_rewrite = rewrite_local_network_tdata(tdata, "response"); + let public_rewrite = rewrite_private_contact_for_external(tdata, "response"); + + // If we modified headers, the buffer was already serialized by mod-msg-print + // (priority 8, before our module at priority 32). Invalidate and re-encode + // so the changes actually reach the wire. + if local_rewrite || public_rewrite { + pjsip_tx_data_invalidate_msg(tdata); + pjsip_tx_data_encode(tdata); + } + + pj_constants__PJ_SUCCESS as pj_status_t +} + +/// Callback to rewrite Contact header and SDP body in outgoing requests. +/// Same dual-rewrite logic as the response path. +pub unsafe extern "C" fn on_tx_request_cb(tdata: *mut pjsip_tx_data) -> pj_status_t { + let local_rewrite = rewrite_local_network_tdata(tdata, "request"); + let public_rewrite = rewrite_private_contact_for_external(tdata, "request"); + + // If we modified headers, the buffer was already serialized by mod-msg-print + // (priority 8, before our module at priority 32). Invalidate and re-encode + // so the changes actually reach the wire. + if local_rewrite || public_rewrite { + pjsip_tx_data_invalidate_msg(tdata); + pjsip_tx_data_encode(tdata); + } + + pj_constants__PJ_SUCCESS as pj_status_t +} + +/// Callback to fix far-end NAT traversal in incoming SIP requests (INVITEs). +/// +/// When a phone behind NAT sends an INVITE, its SDP body contains private IPs: +/// - SDP `c=IN IP4 192.168.x.x` -> We'd send RTP to an unreachable private IP +/// +/// This callback detects the NAT condition (private SDP IP != packet source IP) +/// and rewrites the SDP before PJSIP's invite/dialog layer processes it, +/// so the media session uses the correct public address. +pub unsafe extern "C" fn on_rx_request_nat_fixup_cb(rdata: *mut pjsip_rx_data) -> pj_bool_t { + if rdata.is_null() { + return pj_constants__PJ_FALSE as pj_bool_t; + } + + let msg = (*rdata).msg_info.msg; + if msg.is_null() { + return pj_constants__PJ_FALSE as pj_bool_t; + } + + // Only process requests (safety check) + if (*msg).type_ != pjsip_msg_type_e_PJSIP_REQUEST_MSG { + return pj_constants__PJ_FALSE as pj_bool_t; + } + + // Only process INVITE and re-INVITE (they carry SDP with media addresses) + let method_id = (*msg).line.req.method.id; + if method_id != pjsip_method_e_PJSIP_INVITE_METHOD { + return pj_constants__PJ_FALSE as pj_bool_t; + } + + // Check if there's a body (SDP) + let body = (*msg).body; + if body.is_null() || (*body).len == 0 || (*body).data.is_null() { + return pj_constants__PJ_FALSE as pj_bool_t; + } + + // Extract source IP from packet info + let src_name = &(*rdata).pkt_info.src_name; + let name_len = src_name + .iter() + .position(|&c| c == 0) + .unwrap_or(src_name.len()); + let src_ip_str = match std::str::from_utf8(std::slice::from_raw_parts( + src_name.as_ptr() as *const u8, + name_len, + )) { + Ok(s) => s, + Err(_) => return pj_constants__PJ_FALSE as pj_bool_t, + }; + let src_ip: Ipv4Addr = match src_ip_str.parse() { + Ok(ip) => ip, + Err(_) => return pj_constants__PJ_FALSE as pj_bool_t, + }; + + // Parse SDP to find c= line IP and check if it's a private address + let body_slice = std::slice::from_raw_parts((*body).data as *const u8, (*body).len as usize); + let sdp_str = match std::str::from_utf8(body_slice) { + Ok(s) => s, + Err(_) => return pj_constants__PJ_FALSE as pj_bool_t, + }; + + // Find any connection address in the SDP that needs NAT fixup. + // Check ALL c= lines (session-level and per-media) — if any contain a + // private IP different from the packet source, rewrite the SDP. + let mut needs_rewrite = false; + let mut private_ip_str: Option<&str> = None; + for line in sdp_str.lines() { + if let Some(addr_str) = line.strip_prefix("c=IN IP4 ") { + let addr_str = addr_str.trim(); + if let Ok(sdp_ip) = addr_str.parse::() { + if is_rfc1918(sdp_ip) && sdp_ip != src_ip { + needs_rewrite = true; + private_ip_str = Some(addr_str); + break; + } + } + } + } + + if needs_rewrite { + if let Some(private_ip) = private_ip_str { + let pool = (*rdata).tp_info.pool; + if !pool.is_null() { + if rewrite_sdp_body(pool, msg, private_ip, src_ip_str) { + tracing::debug!( + "NAT fixup (INVITE): SDP rewritten {} -> {} (from {}:{})", + private_ip, + src_ip_str, + src_ip_str, + (*rdata).pkt_info.src_port + ); + } + } + } + } + + // Also rewrite Contact header if present and has private IP + let contact_hdr = pjsip_msg_find_hdr(msg, pjsip_hdr_e_PJSIP_H_CONTACT, ptr::null_mut()) + as *mut pjsip_contact_hdr; + if !contact_hdr.is_null() { + let uri = (*contact_hdr).uri; + if !uri.is_null() { + let uri_vptr = (*(uri as *const pjsip_uri)).vptr; + if !uri_vptr.is_null() { + if let Some(get_uri_fn) = (*uri_vptr).p_get_uri { + let sip_uri_raw = get_uri_fn(uri as *mut std::os::raw::c_void); + if !sip_uri_raw.is_null() { + let sip_uri = sip_uri_raw as *mut pjsip_sip_uri; + let contact_host = pj_str_to_string(&(*sip_uri).host); + if let Ok(contact_ip) = contact_host.parse::() { + if is_rfc1918(contact_ip) && contact_ip != src_ip { + let src_port = (*rdata).pkt_info.src_port as u16; + let pool = (*rdata).tp_info.pool; + if !pool.is_null() { + if let Ok(new_host_cstr) = CString::new(src_ip_str) { + let host_len = src_ip_str.len(); + let pool_str = + pj_pool_alloc(pool, host_len + 1) as *mut c_char; + if !pool_str.is_null() { + ptr::copy_nonoverlapping( + new_host_cstr.as_ptr(), + pool_str, + host_len + 1, + ); + (*sip_uri).host.ptr = pool_str; + (*sip_uri).host.slen = host_len as i64; + (*sip_uri).port = src_port as i32; + tracing::debug!( + "NAT fixup (INVITE): Contact rewritten {} -> {}:{}", + contact_host, + src_ip_str, + src_port + ); + } + } + } + } + } + } + } + } + } + } + + // Sanitize SDP: strip dynamic payload types (96+) that lack a=rtpmap attributes. + // Without this, PJSIP's SDP validator rejects these INVITEs with EMISSINGRTPMAP. + let body = (*msg).body; + if !body.is_null() && (*body).len > 0 && !(*body).data.is_null() { + let body_slice = + std::slice::from_raw_parts((*body).data as *const u8, (*body).len as usize); + if let Ok(sdp_str) = std::str::from_utf8(body_slice) { + if let Some(sanitized) = sanitize_sdp_missing_rtpmap(sdp_str) { + let pool = (*rdata).tp_info.pool; + if !pool.is_null() { + let new_len = sanitized.len(); + let new_body_ptr = pj_pool_alloc(pool, new_len) as *mut u8; + if !new_body_ptr.is_null() { + ptr::copy_nonoverlapping(sanitized.as_ptr(), new_body_ptr, new_len); + (*body).data = new_body_ptr as *mut _; + (*body).len = new_len as u32; + tracing::debug!( + "SDP sanitized: stripped orphan dynamic payload types (from {}:{})", + src_ip_str, + (*rdata).pkt_info.src_port + ); + } + } + } + } + } + + pj_constants__PJ_FALSE as pj_bool_t +} + +/// Callback to fix far-end NAT traversal in incoming SIP responses. +/// +/// When the remote party (phone) is behind NAT, their responses contain +/// private IPs in the Contact header and SDP body: +/// - Contact: `` -> PRACK/ACK routed to unreachable private IP +/// - SDP `c=IN IP4 192.168.x.x` -> RTP sent to unreachable private IP +/// +/// This callback detects NAT (private Contact IP != packet source IP) and +/// rewrites both to the actual public source IP before PJSIP processes the +/// response, so the dialog target and media address are correct. +/// +/// Registered at priority 28 (before dialog/invite layer at 32) to ensure +/// the dialog's remote target uses the corrected address. +pub unsafe extern "C" fn on_rx_response_nat_fixup_cb(rdata: *mut pjsip_rx_data) -> pj_bool_t { + if rdata.is_null() { + return pj_constants__PJ_FALSE as pj_bool_t; + } + + let msg = (*rdata).msg_info.msg; + if msg.is_null() { + return pj_constants__PJ_FALSE as pj_bool_t; + } + + // Only process 1xx/2xx responses (provisional and success) + let status_code = (*msg).line.status.code; + if !(100..300).contains(&status_code) { + return pj_constants__PJ_FALSE as pj_bool_t; + } + + // Extract source IP from pkt_info.src_name (null-terminated char array) + let src_name = &(*rdata).pkt_info.src_name; + let name_len = src_name + .iter() + .position(|&c| c == 0) + .unwrap_or(src_name.len()); + let src_ip_str = match std::str::from_utf8(std::slice::from_raw_parts( + src_name.as_ptr() as *const u8, + name_len, + )) { + Ok(s) => s, + Err(_) => return pj_constants__PJ_FALSE as pj_bool_t, + }; + let src_ip: Ipv4Addr = match src_ip_str.parse() { + Ok(ip) => ip, + Err(_) => return pj_constants__PJ_FALSE as pj_bool_t, // IPv6 or invalid + }; + let src_port = (*rdata).pkt_info.src_port as u16; + + // Find Contact header in the response + let contact_hdr = pjsip_msg_find_hdr(msg, pjsip_hdr_e_PJSIP_H_CONTACT, ptr::null_mut()) + as *mut pjsip_contact_hdr; + if contact_hdr.is_null() { + return pj_constants__PJ_FALSE as pj_bool_t; + } + + // Get the SIP URI from the Contact (unwrap name_addr via vtable). + // The rx path requires vtable-based URI unwrapping (p_get_uri) because + // the Contact URI may be wrapped in a pjsip_name_addr, unlike the tx + // path where we can cast directly. + let uri = (*contact_hdr).uri; + if uri.is_null() { + return pj_constants__PJ_FALSE as pj_bool_t; + } + let uri_vptr = (*(uri as *const pjsip_uri)).vptr; + if uri_vptr.is_null() { + return pj_constants__PJ_FALSE as pj_bool_t; + } + let get_uri_fn = match (*uri_vptr).p_get_uri { + Some(f) => f, + None => return pj_constants__PJ_FALSE as pj_bool_t, + }; + let sip_uri_raw = get_uri_fn(uri as *mut std::os::raw::c_void); + if sip_uri_raw.is_null() { + return pj_constants__PJ_FALSE as pj_bool_t; + } + let sip_uri = sip_uri_raw as *mut pjsip_sip_uri; + + // Parse Contact host as IPv4 + let contact_host = pj_str_to_string(&(*sip_uri).host); + let contact_ip: Ipv4Addr = match contact_host.parse() { + Ok(ip) => ip, + Err(_) => return pj_constants__PJ_FALSE as pj_bool_t, // Hostname, skip + }; + + // Only rewrite if Contact has a private IP that differs from the source + if !is_rfc1918(contact_ip) || contact_ip == src_ip { + return pj_constants__PJ_FALSE as pj_bool_t; + } + + // NAT detected: Contact has private IP, packet came from different (public) IP + tracing::debug!( + "NAT fixup: rewriting Contact {} -> {}:{} (response {} from {}:{})", + contact_host, + src_ip, + src_port, + status_code, + src_ip, + src_port + ); + + // Rewrite Contact URI host to the public source IP + let pool = (*rdata).tp_info.pool; + if !pool.is_null() { + if let Ok(new_host_cstr) = CString::new(src_ip_str) { + let host_len = src_ip_str.len(); + let pool_str = pj_pool_alloc(pool, host_len + 1) as *mut c_char; + if !pool_str.is_null() { + ptr::copy_nonoverlapping(new_host_cstr.as_ptr(), pool_str, host_len + 1); + (*sip_uri).host.ptr = pool_str; + (*sip_uri).host.slen = host_len as i64; + (*sip_uri).port = src_port as i32; + } + } + } + + // Rewrite SDP body: replace private IP with public source IP. + // Parse the SDP c= line directly to get the actual media IP — it may differ + // from the Contact header IP (e.g., dual-homed phone or double NAT). + let body = (*msg).body; + if !body.is_null() && (*body).len > 0 && !(*body).data.is_null() { + let body_slice = + std::slice::from_raw_parts((*body).data as *const u8, (*body).len as usize); + if let Ok(sdp_str) = std::str::from_utf8(body_slice) { + for line in sdp_str.lines() { + if let Some(addr_str) = line.strip_prefix("c=IN IP4 ") { + let addr_str = addr_str.trim(); + if let Ok(sdp_ip) = addr_str.parse::() { + if is_rfc1918(sdp_ip) && sdp_ip != src_ip { + if rewrite_sdp_body(pool, msg, addr_str, src_ip_str) { + tracing::debug!( + "NAT fixup: SDP rewritten {} -> {}", + addr_str, + src_ip_str + ); + } + break; + } + } + } + } + } + } + + // Return FALSE to let other modules also process this response + pj_constants__PJ_FALSE as pj_bool_t +} + +#[cfg(test)] +mod tests { + use super::*; + + + #[test] + fn test_is_rfc1918_10_network() { + assert!(is_rfc1918(Ipv4Addr::new(10, 0, 0, 1))); + assert!(is_rfc1918(Ipv4Addr::new(10, 255, 255, 255))); + } + + #[test] + fn test_is_rfc1918_172_16_network() { + assert!(is_rfc1918(Ipv4Addr::new(172, 16, 0, 1))); + assert!(is_rfc1918(Ipv4Addr::new(172, 31, 255, 255))); + } + + #[test] + fn test_is_rfc1918_192_168_network() { + assert!(is_rfc1918(Ipv4Addr::new(192, 168, 1, 1))); + assert!(is_rfc1918(Ipv4Addr::new(192, 168, 0, 0))); + } + + #[test] + fn test_is_rfc1918_public_addresses() { + assert!(!is_rfc1918(Ipv4Addr::new(8, 8, 8, 8))); + assert!(!is_rfc1918(Ipv4Addr::new(172, 15, 0, 1))); + assert!(!is_rfc1918(Ipv4Addr::new(172, 32, 0, 1))); + assert!(!is_rfc1918(Ipv4Addr::new(192, 167, 1, 1))); + assert!(!is_rfc1918(Ipv4Addr::new(1, 1, 1, 1))); + } + + + #[test] + fn test_sanitize_sdp_orphan_dynamic_pt_stripped() { + let sdp = "v=0\r\n\ + o=- 0 0 IN IP4 0.0.0.0\r\n\ + s=-\r\n\ + c=IN IP4 10.0.0.1\r\n\ + m=audio 5000 RTP/AVP 0 8 96\r\n\ + a=rtpmap:0 PCMU/8000\r\n\ + a=rtpmap:8 PCMA/8000\r\n"; + // PT 96 has no rtpmap -> should be stripped + let result = sanitize_sdp_missing_rtpmap(sdp).unwrap(); + assert!(result.contains("m=audio 5000 RTP/AVP 0 8\r\n")); + assert!(!result.contains("96")); + } + + #[test] + fn test_sanitize_sdp_all_valid_pts_unchanged() { + let sdp = "v=0\r\n\ + o=- 0 0 IN IP4 0.0.0.0\r\n\ + s=-\r\n\ + m=audio 5000 RTP/AVP 0 96\r\n\ + a=rtpmap:0 PCMU/8000\r\n\ + a=rtpmap:96 opus/48000/2\r\n"; + assert!(sanitize_sdp_missing_rtpmap(sdp).is_none()); + } + + #[test] + fn test_sanitize_sdp_non_rtp_transport_skipped() { + let sdp = "v=0\r\n\ + o=- 0 0 IN IP4 0.0.0.0\r\n\ + s=-\r\n\ + m=image 5000 udptl t38\r\n\ + a=T38FaxVersion:0\r\n"; + assert!(sanitize_sdp_missing_rtpmap(sdp).is_none()); + } + + #[test] + fn test_sanitize_sdp_all_pts_orphaned_unchanged() { + // If stripping all dynamic PTs would leave none, m= line stays unchanged + let sdp = "v=0\r\n\ + o=- 0 0 IN IP4 0.0.0.0\r\n\ + s=-\r\n\ + m=audio 5000 RTP/AVP 96 97\r\n"; + // Both 96 and 97 are dynamic with no rtpmap, but stripping both would leave no PTs + assert!(sanitize_sdp_missing_rtpmap(sdp).is_none()); + } + + #[test] + fn test_sanitize_sdp_mixed_valid_and_orphan() { + let sdp = "v=0\r\n\ + o=- 0 0 IN IP4 0.0.0.0\r\n\ + s=-\r\n\ + m=audio 5000 RTP/AVP 0 96 97\r\n\ + a=rtpmap:0 PCMU/8000\r\n\ + a=rtpmap:96 opus/48000/2\r\n\ + a=fmtp:97 mode=20\r\n"; + // PT 97 has no rtpmap -> stripped, its fmtp line also removed + let result = sanitize_sdp_missing_rtpmap(sdp).unwrap(); + assert!(result.contains("m=audio 5000 RTP/AVP 0 96\r\n")); + assert!(!result.contains("97")); + assert!(!result.contains("fmtp:97")); + // fmtp for 96 would be kept if it existed; rtpmap:96 should still be there + assert!(result.contains("a=rtpmap:96 opus/48000/2")); + } + + #[test] + fn test_sanitize_sdp_malformed_m_line() { + let sdp = "v=0\r\n\ + o=- 0 0 IN IP4 0.0.0.0\r\n\ + s=-\r\n\ + m=audio 5000\r\n"; + // Malformed m= line (< 4 parts) -> passes through unchanged + assert!(sanitize_sdp_missing_rtpmap(sdp).is_none()); + } +} diff --git a/sipcord-bridge/src/transport/sip/register_handler.rs b/sipcord-bridge/src/transport/sip/register_handler.rs new file mode 100644 index 0000000..a0a7777 --- /dev/null +++ b/sipcord-bridge/src/transport/sip/register_handler.rs @@ -0,0 +1,585 @@ +//! PJSIP module for REGISTER request handling +//! +//! This module handles: +//! - REGISTER requests with 401 challenge / Digest auth verification +//! - Storing registrations in the Registrar for inbound call routing + +use super::callbacks::{ + extract_digest_auth_from_rdata, extract_source_ip, extract_user_agent, is_sipvicious_scanner, +}; +use super::ffi::types::*; +use super::ffi::utils::pj_str_to_string; +use pjsua::*; +use std::ffi::CString; +use std::net::SocketAddr; +use std::os::raw::c_char; +use std::ptr; +use std::sync::atomic::{AtomicPtr, Ordering}; + +// Sendable pointer wrappers for pjsip types (used to move tsx/tdata across +// threads via the SipCommand channel). These MUST only be dereferenced from +// the pjsua event-loop thread. + +pub struct SendableTsx(pub *mut pjsip_transaction); +unsafe impl Send for SendableTsx {} + +pub struct SendableTdata(pub *mut pjsip_tx_data); +unsafe impl Send for SendableTdata {} + +/// A REGISTER transaction awaiting async auth verification. +/// Created in the pjsip callback, consumed in `process_sip_command`. +pub struct PendingRegisterTsx { + pub tsx: SendableTsx, + pub tdata: SendableTdata, + pub expires: u32, +} + +impl std::fmt::Debug for PendingRegisterTsx { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + f.debug_struct("PendingRegisterTsx") + .field("expires", &self.expires) + .finish() + } +} + +// Globals + +/// Channel for sending register events to the async verification task. +static REGISTER_EVENT_TX: std::sync::OnceLock> = + std::sync::OnceLock::new(); + +/// Sender half of the SIP command channel (for deferred REGISTER responses). +static SIP_COMMAND_TX: std::sync::OnceLock> = + std::sync::OnceLock::new(); + +/// Pointer to the registered pjsip_module, needed for `pjsip_tsx_create_uas2`. +static REGISTER_MODULE_PTR: AtomicPtr = AtomicPtr::new(ptr::null_mut()); + +pub fn set_register_event_sender(tx: tokio::sync::mpsc::Sender) { + let _ = REGISTER_EVENT_TX.set(tx); +} + +pub fn set_sip_command_sender(tx: crossbeam_channel::Sender) { + let _ = SIP_COMMAND_TX.set(tx); +} + +pub fn set_register_module_ptr(ptr: *mut pjsip_module) { + REGISTER_MODULE_PTR.store(ptr, Ordering::Release); +} + +// Helpers + +/// Initialize a pjsip_hdr as a list head (equivalent to pj_list_init C macro). +#[inline] +unsafe fn pj_list_init_hdr(hdr: *mut pjsip_hdr) { + (*hdr).next = hdr as *mut _; + (*hdr).prev = hdr as *mut _; +} + +/// Send a simple stateless SIP response (no custom headers). +unsafe fn send_simple_response(rdata: *mut pjsip_rx_data, status_code: u16, reason: &str) { + let endpt = pjsua_get_pjsip_endpt(); + if !endpt.is_null() { + let reason_cstr = CString::new(reason).unwrap(); + let reason_pj = pj_str(reason_cstr.as_ptr() as *mut c_char); + pjsip_endpt_respond_stateless( + endpt, + rdata, + status_code.into(), + &reason_pj, + ptr::null(), + ptr::null(), + ); + } +} + +/// Send a stateless 200 OK with an Expires header. +unsafe fn send_register_ok(rdata: *mut pjsip_rx_data, expires: u32) { + let endpt = pjsua_get_pjsip_endpt(); + if endpt.is_null() { + return; + } + + let expires_str = format!("{}", expires); + let hdr_name = CString::new("Expires").unwrap(); + let hdr_value = CString::new(expires_str).unwrap(); + + let pool = pjsua_pool_create(c"register_ok".as_ptr(), 512, 512); + if !pool.is_null() { + let name = pj_str(hdr_name.as_ptr() as *mut c_char); + let value = pj_str(hdr_value.as_ptr() as *mut c_char); + let hdr = pjsip_generic_string_hdr_create(pool, &name, &value); + + if !hdr.is_null() { + let hdr_list = pj_pool_alloc(pool, std::mem::size_of::()) as *mut pjsip_hdr; + if !hdr_list.is_null() { + pj_list_init_hdr(hdr_list); + pj_list_insert_before(hdr_list as *mut pj_list_type, hdr as *mut pj_list_type); + + let status = pjsip_endpt_respond_stateless( + endpt, + rdata, + 200, + ptr::null(), + hdr_list, + ptr::null(), + ); + if status != pj_constants__PJ_SUCCESS as i32 { + tracing::warn!("Failed to respond 200 OK to REGISTER: {}", status); + } + // Release pool — pjsip_endpt_respond_stateless clones what it + // needs into rdata's pool, so our header pool can be freed now. + pj_pool_release(pool); + return; + } + } + // Header creation failed — release the pool before falling through + pj_pool_release(pool); + } + + // Fallback: respond without Expires header + let status = + pjsip_endpt_respond_stateless(endpt, rdata, 200, ptr::null(), ptr::null(), ptr::null()); + if status != pj_constants__PJ_SUCCESS as i32 { + tracing::warn!("Failed to respond 200 OK to REGISTER: {}", status); + } +} + +/// Detect transport type (UDP/TCP/TLS) from the incoming request. +unsafe fn detect_transport(rdata: *mut pjsip_rx_data) -> crate::services::registrar::SipTransport { + if !(*rdata).tp_info.transport.is_null() { + let tp_type = (*(*rdata).tp_info.transport).key.type_ as u32; + if tp_type == pjsip_transport_type_e_PJSIP_TRANSPORT_TLS { + crate::services::registrar::SipTransport::Tls + } else if tp_type == pjsip_transport_type_e_PJSIP_TRANSPORT_TCP { + crate::services::registrar::SipTransport::Tcp + } else { + crate::services::registrar::SipTransport::Udp + } + } else { + crate::services::registrar::SipTransport::Udp + } +} + +/// Create a UAS transaction + pre-built response tdata for deferred REGISTER +/// responses. Returns `None` if transaction creation fails (caller should fall +/// back to stateless response). +unsafe fn create_register_tsx( + rdata: *mut pjsip_rx_data, + expires: u32, +) -> Option { + let endpt = pjsua_get_pjsip_endpt(); + let module_ptr = REGISTER_MODULE_PTR.load(Ordering::Acquire); + + if endpt.is_null() || module_ptr.is_null() { + return None; + } + + // Create UAS transaction + let mut tsx: *mut pjsip_transaction = ptr::null_mut(); + let status = pjsip_tsx_create_uas2(module_ptr, rdata, ptr::null_mut(), &mut tsx); + if status != pj_constants__PJ_SUCCESS as i32 || tsx.is_null() { + return None; + } + + // Feed the request to the transaction (starts Timer F, stores headers) + pjsip_tsx_recv_msg(tsx, rdata); + + // Pre-build a 200 OK response while rdata is still valid. + // The status code / reason will be modified before sending if auth fails. + let mut tdata: *mut pjsip_tx_data = ptr::null_mut(); + let status = pjsip_endpt_create_response(endpt, rdata, 200, ptr::null(), &mut tdata); + if status != pj_constants__PJ_SUCCESS as i32 || tdata.is_null() { + pjsip_tsx_terminate(tsx, 500); + return None; + } + + Some(PendingRegisterTsx { + tsx: SendableTsx(tsx), + tdata: SendableTdata(tdata), + expires, + }) +} + +// Main callback + +/// Callback to handle incoming SIP requests (for REGISTER support) +/// +/// SIP clients send REGISTER requests to register with the server. pjsua's high-level +/// API doesn't handle REGISTER since it's designed as a client library. We intercept +/// REGISTER requests here. +/// +/// Flow: +/// 1. REGISTER without Authorization header -> 401 with WWW-Authenticate challenge +/// 2. REGISTER with Authorization header: +/// a. Cache hit + verified -> immediate 200 OK (stateless) +/// b. Cache hit + mismatch -> immediate 403 Forbidden (stateless) +/// c. Cache miss -> defer via UAS transaction, verify via API, respond later +pub unsafe extern "C" fn on_rx_request_cb(rdata: *mut pjsip_rx_data) -> pj_bool_t { + if rdata.is_null() { + return pj_constants__PJ_FALSE as pj_bool_t; + } + + let msg = (*rdata).msg_info.msg; + if msg.is_null() { + return pj_constants__PJ_FALSE as pj_bool_t; + } + + // Check if this is a REGISTER request + let method_id = (*msg).line.req.method.id; + if method_id != pjsip_method_e_PJSIP_REGISTER_METHOD { + // Not REGISTER, let other modules handle it + return pj_constants__PJ_FALSE as pj_bool_t; + } + + // Extract source IP for logging and ban checking + let source_ip = extract_source_ip(rdata); + let ip_str = source_ip + .map(|ip| ip.to_string()) + .unwrap_or_else(|| "unknown".to_string()); + + // Extract source port + let source_port = (*rdata).pkt_info.src_port as u16; + + // Ban checks: skip if banning disabled or IP is whitelisted + if let Some(ip) = source_ip { + if let Some(ban_mgr) = crate::services::ban::global() { + if ban_mgr.is_enabled() && !ban_mgr.is_whitelisted(&ip) { + // Check if IP is banned + let result = ban_mgr.check_banned(&ip); + if result.is_banned { + tracing::debug!("Rejecting REGISTER from banned IP {}", ip); + send_simple_response(rdata, 403, "Forbidden"); + return pj_constants__PJ_TRUE as pj_bool_t; + } + } + } + } + + // Check User-Agent for SIPVicious scanners - instant permaban + if let Some(user_agent) = extract_user_agent(rdata) { + if is_sipvicious_scanner(&user_agent) { + if let Some(ip) = source_ip { + if let Some(ban_mgr) = crate::services::ban::global() { + if ban_mgr.is_enabled() && !ban_mgr.is_whitelisted(&ip) { + let result = + ban_mgr.record_permanent_ban(ip, "sipvicious_scanner_register"); + if result.should_log { + tracing::warn!( + "PERMABAN IP {} - SIPVicious scanner detected in REGISTER: User-Agent='{}'", + ip, user_agent + ); + } + } + } + } else { + tracing::warn!( + "SIPVicious scanner detected in REGISTER but no IP available: User-Agent='{}'", + user_agent + ); + } + send_simple_response(rdata, 403, "Forbidden"); + return pj_constants__PJ_TRUE as pj_bool_t; + } + } + + // Rate limit REGISTER requests + if let Some(ip) = source_ip { + if let Some(ban_mgr) = crate::services::ban::global() { + if ban_mgr.is_enabled() && !ban_mgr.is_whitelisted(&ip) && ban_mgr.record_register(ip) { + tracing::debug!("Rejecting REGISTER from {} - rate limit exceeded", ip); + send_simple_response(rdata, 429, "Too Many Requests"); + return pj_constants__PJ_TRUE as pj_bool_t; + } + } + } + + // Try to extract Digest auth params from Authorization header + let digest_params = extract_digest_auth_from_rdata(rdata); + + if let Some(mut params) = digest_params { + // Has auth - fill in REGISTER method + params.method = "REGISTER".to_string(); + + // Check auth failure cooldown before processing + if let Some(cache) = crate::services::auth_cache::AuthCache::global() { + if cache.is_in_cooldown(¶ms.username) { + tracing::debug!( + "Rejecting REGISTER from {} (user={}) - auth cooldown active", + ip_str, + params.username + ); + send_simple_response(rdata, 429, "Too Many Requests"); + return pj_constants__PJ_TRUE as pj_bool_t; + } + } + + // Extract fields needed for all code paths + let contact_uri = extract_contact_uri(rdata); + let expires = extract_expires(rdata); + let source_addr = source_ip.map(|ip| SocketAddr::new(ip, source_port)); + let transport = detect_transport(rdata); + + // Auth cache verification + if let Some(cache) = crate::services::auth_cache::AuthCache::global() { + use crate::services::auth_cache::VerifyResult; + match cache.check(¶ms) { + VerifyResult::Verified => { + // Cache hit, auth OK — fast-path 200 OK + tracing::debug!( + "REGISTER auth OK (cached): user={} from {}", + params.username, + ip_str + ); + send_register_ok(rdata, expires); + // Send to async handler for registrar update + if let Some(tx) = REGISTER_EVENT_TX.get() { + let _ = tx.try_send(RegisterRequest { + digest_auth: params, + contact_uri: contact_uri.unwrap_or_default(), + source_addr, + transport, + expires, + pending_tsx: None, + }); + } + return pj_constants__PJ_TRUE as pj_bool_t; + } + VerifyResult::Mismatch => { + // Wrong password (cached HA1 didn't match) — 403 + tracing::debug!( + "REGISTER auth mismatch (cached): user={} from {}", + params.username, + ip_str + ); + send_simple_response(rdata, 403, "Forbidden"); + // Send to async so API can re-verify (cache may be stale + // after a password change) and update failure counts + if let Some(tx) = REGISTER_EVENT_TX.get() { + let _ = tx.try_send(RegisterRequest { + digest_auth: params, + contact_uri: contact_uri.unwrap_or_default(), + source_addr, + transport, + expires, + pending_tsx: None, + }); + } + return pj_constants__PJ_TRUE as pj_bool_t; + } + VerifyResult::Miss => { + // No cached HA1 — need API round-trip. + // Create a UAS transaction so we can respond after the + // async handler completes, without blocking pjsip. + tracing::debug!( + "REGISTER cache miss: user={} from {}, deferring to API", + params.username, + ip_str + ); + if let Some(pending) = create_register_tsx(rdata, expires) { + if let Some(tx) = REGISTER_EVENT_TX.get() { + let _ = tx.try_send(RegisterRequest { + digest_auth: params, + contact_uri: contact_uri.unwrap_or_default(), + source_addr, + transport, + expires, + pending_tsx: Some(pending), + }); + } + return pj_constants__PJ_TRUE as pj_bool_t; + } + // Transaction creation failed — fall through to stateless + // 200 OK below (same behaviour as before this change). + tracing::warn!( + "Failed to create tsx for deferred REGISTER, falling back to stateless 200" + ); + } + } + } + + // Default path: stateless 200 OK + async verification + // (non-sipcord builds, auth cache unavailable, or tsx creation failed) + tracing::debug!( + "REGISTER with auth from {} (user={}), responding 200 OK (stateless)", + ip_str, + params.username + ); + if let Some(tx) = REGISTER_EVENT_TX.get() { + let _ = tx.try_send(RegisterRequest { + digest_auth: params, + contact_uri: contact_uri.unwrap_or_default(), + source_addr, + transport, + expires, + pending_tsx: None, + }); + } + send_register_ok(rdata, expires); + } else { + // No Authorization header - send 401 challenge + tracing::debug!( + "REGISTER without auth from {}, sending 401 challenge", + ip_str + ); + + let endpt = pjsua_get_pjsip_endpt(); + if endpt.is_null() { + tracing::error!("Failed to get PJSIP endpoint for REGISTER 401 response"); + return pj_constants__PJ_TRUE as pj_bool_t; + } + + // Generate a cryptographically random nonce + let nonce = { + let bytes: [u8; 16] = rand::random(); + bytes + .iter() + .map(|b| format!("{:02x}", b)) + .collect::() + }; + + let www_auth = format!( + "Digest realm=\"{}\", nonce=\"{}\", algorithm=MD5, qop=\"auth\"", + SIP_REALM, nonce + ); + + // Create WWW-Authenticate header + let hdr_name = CString::new("WWW-Authenticate").unwrap(); + let hdr_value = CString::new(www_auth).unwrap(); + + let pool = pjsua_pool_create(c"register_401".as_ptr(), 512, 512); + if pool.is_null() { + tracing::error!("Failed to create pool for REGISTER 401 response"); + return pj_constants__PJ_TRUE as pj_bool_t; + } + + let name = pj_str(hdr_name.as_ptr() as *mut c_char); + let value = pj_str(hdr_value.as_ptr() as *mut c_char); + let hdr = pjsip_generic_string_hdr_create(pool, &name, &value); + + if !hdr.is_null() { + let hdr_list = pj_pool_alloc(pool, std::mem::size_of::()) as *mut pjsip_hdr; + if !hdr_list.is_null() { + pj_list_init_hdr(hdr_list); + pj_list_insert_before(hdr_list as *mut pj_list_type, hdr as *mut pj_list_type); + + let status = pjsip_endpt_respond_stateless( + endpt, + rdata, + 401, + ptr::null(), + hdr_list, + ptr::null(), + ); + + if status != pj_constants__PJ_SUCCESS as i32 { + tracing::warn!("Failed to respond 401 to REGISTER: {}", status); + } + } + } + // Release pool — pjsip_endpt_respond_stateless clones headers internally + pj_pool_release(pool); + } + + // Return TRUE to indicate we handled this request + pj_constants__PJ_TRUE as pj_bool_t +} + +// Extraction helpers + +/// Extract Contact URI from REGISTER request +unsafe fn extract_contact_uri(rdata: *mut pjsip_rx_data) -> Option { + if rdata.is_null() { + return None; + } + + let msg = (*rdata).msg_info.msg; + if msg.is_null() { + return None; + } + + let contact_hdr = pjsip_msg_find_hdr(msg, pjsip_hdr_e_PJSIP_H_CONTACT, ptr::null_mut()) + as *const pjsip_contact_hdr; + + if contact_hdr.is_null() { + return None; + } + + let uri = (*contact_hdr).uri; + if uri.is_null() { + return None; + } + + // The Contact header URI is typically a pjsip_name_addr wrapping a pjsip_sip_uri. + // We must unwrap it via the vtable's p_get_uri (equivalent to pjsip_uri_get_uri() + // which is an inline C function not available through FFI). + let uri_vptr = (*(uri as *const pjsip_uri)).vptr; + if uri_vptr.is_null() { + return None; + } + let get_uri_fn = (*uri_vptr).p_get_uri?; + let sip_uri_raw = get_uri_fn(uri as *mut std::os::raw::c_void); + if sip_uri_raw.is_null() { + return None; + } + let sip_uri = sip_uri_raw as *const pjsip_sip_uri; + if (*sip_uri).host.ptr.is_null() || (*sip_uri).host.slen <= 0 { + return None; + } + + let host = pj_str_to_string(&(*sip_uri).host); + let port = (*sip_uri).port; + let user = if !(*sip_uri).user.ptr.is_null() && (*sip_uri).user.slen > 0 { + Some(pj_str_to_string(&(*sip_uri).user)) + } else { + None + }; + + let uri_str = match (user, port) { + (Some(u), p) if p > 0 => format!("sip:{}@{}:{}", u, host, p), + (Some(u), _) => format!("sip:{}@{}", u, host), + (None, p) if p > 0 => format!("sip:{}:{}", host, p), + (None, _) => format!("sip:{}", host), + }; + + Some(uri_str) +} + +/// Extract Expires value from REGISTER request (header or Contact param) +unsafe fn extract_expires(rdata: *mut pjsip_rx_data) -> u32 { + if rdata.is_null() { + return 3600; + } + + let msg = (*rdata).msg_info.msg; + if msg.is_null() { + return 3600; + } + + // Try Expires header first + let expires_hdr = pjsip_msg_find_hdr(msg, pjsip_hdr_e_PJSIP_H_EXPIRES, ptr::null_mut()) + as *const pjsip_expires_hdr; + + if !expires_hdr.is_null() { + return (*expires_hdr).ivalue as u32; + } + + // Default + 3600 +} + +// Types + +/// Data passed to the async register verification task +#[derive(Debug)] +pub struct RegisterRequest { + pub digest_auth: DigestAuthParams, + pub contact_uri: String, + pub source_addr: Option, + pub transport: crate::services::registrar::SipTransport, + pub expires: u32, + /// When set, the async handler must send the auth result back via + /// `SipCommand::RespondRegister` so the pjsip thread can complete + /// the UAS transaction. + pub pending_tsx: Option, +} diff --git a/wav/JoonaKouvolalainen.flac b/wav/JoonaKouvolalainen.flac new file mode 100644 index 0000000..bdb0788 Binary files /dev/null and b/wav/JoonaKouvolalainen.flac differ diff --git a/wav/connecting.wav b/wav/connecting.wav new file mode 100644 index 0000000..b3cc41b Binary files /dev/null and b/wav/connecting.wav differ diff --git a/wav/discord_join.wav b/wav/discord_join.wav new file mode 100644 index 0000000..2b9cdb7 Binary files /dev/null and b/wav/discord_join.wav differ diff --git a/wav/hold.flac b/wav/hold.flac new file mode 100644 index 0000000..b60625d Binary files /dev/null and b/wav/hold.flac differ diff --git a/wav/no_channel_map.mp3 b/wav/no_channel_map.mp3 new file mode 100644 index 0000000..c4a0f05 Binary files /dev/null and b/wav/no_channel_map.mp3 differ diff --git a/wav/no_channel_mapping.wav b/wav/no_channel_mapping.wav new file mode 100644 index 0000000..1c191ed Binary files /dev/null and b/wav/no_channel_mapping.wav differ diff --git a/wav/no_permissions.wav b/wav/no_permissions.wav new file mode 100644 index 0000000..d7af2a7 Binary files /dev/null and b/wav/no_permissions.wav differ diff --git a/wav/no_perms.mp3 b/wav/no_perms.mp3 new file mode 100644 index 0000000..a359956 Binary files /dev/null and b/wav/no_perms.mp3 differ diff --git a/wav/nokia.flac b/wav/nokia.flac new file mode 100644 index 0000000..0bca5ae Binary files /dev/null and b/wav/nokia.flac differ diff --git a/wav/serverisbusy.wav b/wav/serverisbusy.wav new file mode 100644 index 0000000..182d168 Binary files /dev/null and b/wav/serverisbusy.wav differ diff --git a/wav/unknown.wav b/wav/unknown.wav new file mode 100644 index 0000000..7b584a1 Binary files /dev/null and b/wav/unknown.wav differ diff --git a/wav/unknown_error.mp3 b/wav/unknown_error.mp3 new file mode 100644 index 0000000..81cd54b Binary files /dev/null and b/wav/unknown_error.mp3 differ