mirror of
https://github.com/markqvist/LXST.git
synced 2026-04-27 22:25:41 +00:00
Sync upstream
This commit is contained in:
parent
a0098cf74f
commit
571e9add63
11 changed files with 899 additions and 204 deletions
|
|
@ -44,18 +44,20 @@ class Mixer(LocalSource, LocalSink):
|
|||
def stop(self):
|
||||
self.should_run = False
|
||||
|
||||
def set_source_max_frames(self, source, max_frames):
|
||||
with self.insert_lock:
|
||||
if not source in self.incoming_frames: self.incoming_frames[source] = deque(maxlen=max_frames)
|
||||
else: self.incoming_frames[source] = deque(self.incoming_frames[source], maxlen=max_frames)
|
||||
|
||||
def can_receive(self, from_source):
|
||||
if not from_source in self.incoming_frames:
|
||||
return True
|
||||
elif len(self.incoming_frames[from_source]) < self.MAX_FRAMES:
|
||||
return True
|
||||
else:
|
||||
return False
|
||||
if not from_source in self.incoming_frames: return True
|
||||
elif len(self.incoming_frames[from_source]) < self.MAX_FRAMES: return True
|
||||
else: return False
|
||||
|
||||
def handle_frame(self, frame, source, decoded=False):
|
||||
with self.insert_lock:
|
||||
if not source in self.incoming_frames:
|
||||
self.incoming_frames[source] = deque(maxlen=self.MAX_FRAMES)
|
||||
self.incoming_frames[source] = deque(maxlen=self.MAX_FRAMES)
|
||||
|
||||
if not self.channels:
|
||||
self.channels = source.channels
|
||||
|
|
|
|||
1
LXST/Platforms/__init__.py
Normal file
1
LXST/Platforms/__init__.py
Normal file
|
|
@ -0,0 +1 @@
|
|||
from . import android
|
||||
0
LXST/Platforms/android/__init__.py
Normal file
0
LXST/Platforms/android/__init__.py
Normal file
707
LXST/Platforms/android/soundcard.py
Normal file
707
LXST/Platforms/android/soundcard.py
Normal file
|
|
@ -0,0 +1,707 @@
|
|||
import atexit
|
||||
import collections.abc
|
||||
import time
|
||||
import re
|
||||
import threading
|
||||
import numpy
|
||||
import RNS
|
||||
|
||||
if RNS.vendor.platformutils.get_platform() == "android":
|
||||
try: from jnius import autoclass, cast
|
||||
except Exception as e:
|
||||
RNS.log(f"Could load module for native Java interface access on Android: {e}")
|
||||
raise e
|
||||
|
||||
class _AndroidAudio:
|
||||
COMMUNICATION_MODE_TYPES = ["Internal Earpiece",
|
||||
"Bluetooth SCO",
|
||||
"BLE Headset",
|
||||
"Hearing Aid",
|
||||
"Wired Headphones",
|
||||
"Wired Headset"]
|
||||
|
||||
IGNORED_DEVICE_TYPES = ["Telephony",
|
||||
"Remote Submix"]
|
||||
|
||||
ADD_VIRT_RINGER_TYPES = ["Internal Speaker"]
|
||||
|
||||
DEFAULT_SINK = "Internal Speaker"
|
||||
FALLBACK_SINKS = ["Internal Speaker",
|
||||
"Ringer Speaker",
|
||||
"Hearing Aid",
|
||||
"Wired Headset",
|
||||
"USB Headset",
|
||||
"BLE Headset",
|
||||
"Bluetooth SCO",
|
||||
"BLE Speaker",
|
||||
"Bluetooth A2DP",
|
||||
"Wired Headphones",
|
||||
"Analog Line",
|
||||
"Digital Line",
|
||||
"USB Device",
|
||||
"USB Accessory",
|
||||
"HDMI"]
|
||||
|
||||
DEFAULT_SOURCE = "Internal Microphone"
|
||||
FALLBACK_SOURCES = ["Internal Microphone",
|
||||
"Hearing Aid",
|
||||
"Wired Headset",
|
||||
"USB Headset",
|
||||
"BLE Headset",
|
||||
"Bluetooth SCO",
|
||||
"Analog Line",
|
||||
"Digital Line",
|
||||
"USB Device",
|
||||
"USB Accessory"]
|
||||
|
||||
VIRTUAL_DEVICE_OFFSET = 0xFFFF
|
||||
|
||||
def __init__(self):
|
||||
self._client_name = None
|
||||
self.available_devices = []
|
||||
self.android_api_version = None
|
||||
try:
|
||||
self.android_api_version = autoclass('android.os.Build$VERSION').SDK_INT
|
||||
Context = autoclass('android.content.Context')
|
||||
activity = autoclass('org.kivy.android.PythonActivity').mActivity
|
||||
|
||||
if activity == None:
|
||||
RNS.log(f"Could not obtain application context, instance may be running in a service context.", RNS.LOG_DEBUG)
|
||||
android_service = autoclass('org.kivy.android.PythonService').mService
|
||||
activity = android_service.getApplication().getApplicationContext()
|
||||
if activity != None: RNS.log(f"Successfully obtained application context from service", RNS.LOG_DEBUG)
|
||||
|
||||
if activity == None:
|
||||
RNS.log(f"Failed to obtain application context for audio stream acquisition", RNS.LOG_ERROR)
|
||||
raise ValueError("No application context available for audio stream acquisition")
|
||||
|
||||
self.AudioManager = activity.getSystemService(autoclass("android.media.AudioManager"))
|
||||
self.AudioDeviceInfo = autoclass("android.media.AudioDeviceInfo")
|
||||
adi = self.AudioDeviceInfo
|
||||
|
||||
# Populate device type descriptions from JNI
|
||||
self.device_type_descriptions = {
|
||||
adi.TYPE_AUX_LINE: "Aux Line", # 0x13 - API level 23
|
||||
adi.TYPE_BLUETOOTH_A2DP: "Bluetooth A2DP", # 0x08 - API level 23
|
||||
adi.TYPE_BLUETOOTH_SCO: "Bluetooth SCO", # 0x07 - API level 23
|
||||
adi.TYPE_BUILTIN_EARPIECE: "Internal Earpiece", # 0x01 - API level 23
|
||||
adi.TYPE_BUILTIN_MIC: "Internal Microphone", # 0x0f - API level 23
|
||||
adi.TYPE_BUILTIN_SPEAKER: "Internal Speaker", # 0x02 - API level 23
|
||||
adi.TYPE_DOCK: "Dock", # 0x0d - API level 23
|
||||
adi.TYPE_FM: "FM", # 0x0e - API level 23
|
||||
adi.TYPE_FM_TUNER: "FM Tuner", # 0x10 - API level 23
|
||||
adi.TYPE_HDMI: "HDMI", # 0x09 - API level 23
|
||||
adi.TYPE_HDMI_ARC: "HDMI ARC", # 0x0a - API level 23
|
||||
adi.TYPE_IP: "IP", # 0x14 - API level 23
|
||||
adi.TYPE_LINE_ANALOG: "Analog Line", # 0x05 - API level 23
|
||||
adi.TYPE_LINE_DIGITAL: "Digital Line", # 0x06 - API level 23
|
||||
adi.TYPE_TELEPHONY: "Telephony", # 0x12 - API level 23
|
||||
adi.TYPE_TV_TUNER: "TV Tuner", # 0x11 - API level 23
|
||||
adi.TYPE_UNKNOWN: "Unknown", # 0x00 - API level 23
|
||||
adi.TYPE_USB_ACCESSORY: "USB Accessory", # 0x0c - API level 23
|
||||
adi.TYPE_USB_DEVICE: "USB Device", # 0x0b - API level 23
|
||||
adi.TYPE_WIRED_HEADPHONES: "Wired Headphones", # 0x04 - API level 23
|
||||
adi.TYPE_WIRED_HEADSET: "Wired Headset", # 0x03 - API level 23
|
||||
adi.TYPE_BUS: "Bus", # 0x15 - API level 24
|
||||
}
|
||||
|
||||
if self.android_api_version >= 26:
|
||||
self.device_type_descriptions[adi.TYPE_USB_HEADSET] = "USB Headset" # 0x16 - API level 26
|
||||
|
||||
if self.android_api_version >= 28:
|
||||
self.device_type_descriptions[adi.TYPE_HEARING_AID] = "Hearing Aid" # 0x17 - API level 28
|
||||
|
||||
if self.android_api_version >= 30:
|
||||
self.device_type_descriptions[adi.TYPE_BUILTIN_SPEAKER_SAFE] = "Ringer Speaker" # 0x18 - API level 30
|
||||
|
||||
if self.android_api_version >= 31:
|
||||
self.device_type_descriptions[adi.TYPE_BLE_HEADSET] = "BLE Headset" # 0x1a - API level 31
|
||||
self.device_type_descriptions[adi.TYPE_BLE_SPEAKER] = "BLE Speaker" # 0x1b - API level 31
|
||||
self.device_type_descriptions[adi.TYPE_HDMI_EARC] = "HDMI EARC" # 0x1d - API level 31
|
||||
self.device_type_descriptions[adi.TYPE_REMOTE_SUBMIX] = "Remote Submix" # 0x19 - API level 31
|
||||
|
||||
if self.android_api_version >= 33:
|
||||
self.device_type_descriptions[adi.TYPE_BLE_BROADCAST] = "BLE Broadcast" # 0x1e - API level 33
|
||||
|
||||
if self.android_api_version >= 34:
|
||||
self.device_type_descriptions[adi.TYPE_DOCK_ANALOG] = "Analog Dock" # 0x1f - API level 34
|
||||
|
||||
if self.android_api_version >= 36:
|
||||
self.device_type_descriptions[adi.TYPE_MULTICHANNEL_GROUP] = "Multichannel Group" # 0x20 - API level 36
|
||||
|
||||
added_ids = []
|
||||
available_devices = self.AudioManager.getAvailableCommunicationDevices()
|
||||
for device in available_devices:
|
||||
try:
|
||||
device_id = device.getId(); device_type = device.getType(); channel_counts = device.getChannelCounts()
|
||||
if len(channel_counts) == 0: channel_counts = [1, 2]
|
||||
if not device_id in added_ids:
|
||||
if 1 in channel_counts or 2 in channel_counts:
|
||||
type_description = self.device_type_descriptions[device_type] if device_type in self.device_type_descriptions else "Unrecognized"
|
||||
if not type_description in self.IGNORED_DEVICE_TYPES:
|
||||
d = {"id": device_id, "name": device.getProductName(), "type": device_type, "type_description": type_description, "channel_counts": channel_counts,
|
||||
"is_source": device.isSource(), "is_sink": device.isSink(), "is_comms": True, "is_virtual": False}
|
||||
added_ids.append(device_id)
|
||||
self.available_devices.append(d)
|
||||
|
||||
if type_description in self.ADD_VIRT_RINGER_TYPES:
|
||||
d = {"id": device_id+self.VIRTUAL_DEVICE_OFFSET, "name": device.getProductName(), "type": device_type, "type_description": "Ringer Speaker",
|
||||
"channel_counts": channel_counts, "is_source": device.isSource(), "is_sink": device.isSink(), "is_comms": False, "is_virtual": True}
|
||||
self.available_devices.append(d)
|
||||
|
||||
except Exception as e:
|
||||
RNS.log(f"An error occurred while mapping available communications devices: {e}", RNS.LOG_ERROR)
|
||||
RNS.trace_exception(e)
|
||||
|
||||
available_devices = self.AudioManager.getDevices(self.AudioManager.GET_DEVICES_ALL)
|
||||
for device in available_devices:
|
||||
try:
|
||||
device_id = device.getId(); device_type = device.getType(); channel_counts = device.getChannelCounts()
|
||||
if len(channel_counts) == 0: channel_counts = [1, 2]
|
||||
if not device_id in added_ids:
|
||||
if 1 in channel_counts or 2 in channel_counts:
|
||||
type_description = self.device_type_descriptions[device_type] if device_type in self.device_type_descriptions else "Unrecognized"
|
||||
if not type_description in self.IGNORED_DEVICE_TYPES:
|
||||
d = {"id": device_id, "name": device.getProductName(), "type": device_type, "type_description": type_description, "channel_counts": channel_counts,
|
||||
"is_source": device.isSource(), "is_sink": device.isSink(), "is_comms": False, "is_virtual": False}
|
||||
added_ids.append(device_id)
|
||||
self.available_devices.append(d)
|
||||
|
||||
except Exception as e:
|
||||
RNS.log(f"An error occurred while mapping available audio devices: {e}", RNS.LOG_ERROR)
|
||||
RNS.trace_exception(e)
|
||||
|
||||
# TODO: Remove debug
|
||||
# RNS.log(f"Discovered audio devices:", RNS.LOG_DEBUG)
|
||||
# for d in self.available_devices:
|
||||
# RNS.log(f" {d}", RNS.LOG_DEBUG)
|
||||
|
||||
except Exception as e:
|
||||
RNS.log(f"Error while initializing Android audio backend: {e}", RNS.LOG_ERROR)
|
||||
RNS.trace_exception(e)
|
||||
|
||||
def _shutdown(self): pass
|
||||
|
||||
@property
|
||||
def name(self): return self._client_name
|
||||
|
||||
@name.setter
|
||||
def name(self, name): self._client_name = name
|
||||
|
||||
@property
|
||||
def source_list(self):
|
||||
device_list = []
|
||||
for d in self.available_devices:
|
||||
if d["is_source"]:
|
||||
type_description = d["type_description"]; name = d["name"]; did = d["id"]
|
||||
device_list.append({"name": f"{type_description} {name}", "id": did})
|
||||
|
||||
return device_list
|
||||
|
||||
def source_info(self, source_id):
|
||||
for d in self.available_devices:
|
||||
if d["id"] == source_id:
|
||||
type_description = d["type_description"]; name = d["name"]; did = d["id"]
|
||||
if 2 in d["channel_counts"]: channels = 2
|
||||
elif 1 in d["channel_counts"]: channels = 1
|
||||
else: raise ValueError(f"Unsupported channel count on source {type_description} {name} ({source_id})")
|
||||
return {"latency": 0, "configured_latency": 0, "channels": channels, "name": f"{type_description} {name}", "device.class": "sound", "device.api": "JNI", "device.bus": "unknown"}
|
||||
|
||||
return None
|
||||
|
||||
@property
|
||||
def sink_list(self):
|
||||
device_list = []
|
||||
for d in self.available_devices:
|
||||
if d["is_sink"]:
|
||||
type_description = d["type_description"]; name = d["name"]; did = d["id"]
|
||||
device_list.append({"name": f"{type_description} {name}", "id": did})
|
||||
|
||||
return device_list
|
||||
|
||||
def sink_info(self, sink_id):
|
||||
for d in self.available_devices:
|
||||
if d["id"] == sink_id:
|
||||
type_description = d["type_description"]; name = d["name"]; did = d["id"]
|
||||
if 2 in d["channel_counts"]: channels = 2
|
||||
elif 1 in d["channel_counts"]: channels = 1
|
||||
else: raise ValueError(f"Unsupported channel count on source {type_description} {name} ({sink_id})")
|
||||
return {"latency": 0, "configured_latency": 0, "channels": channels, "name": f"{type_description} {name}", "device.class": "sound", "device.api": "JNI", "device.bus": "unknown"}
|
||||
|
||||
return None
|
||||
|
||||
@property
|
||||
def server_info(self):
|
||||
default_source_id = None
|
||||
default_sink_id = None
|
||||
|
||||
for d in self.available_devices:
|
||||
if d["type_description"] == self.DEFAULT_SOURCE:
|
||||
default_source_id = d["id"]
|
||||
break
|
||||
|
||||
if not default_source_id:
|
||||
RNS.log(f"Default sink not found, searching for fallback...", RNS.LOG_DEBUG)
|
||||
for fallback_source in self.FALLBACK_SOURCES:
|
||||
if default_source_id != None: break
|
||||
for d in self.available_devices:
|
||||
if d["is_source"] == True and d["type_description"] == fallback_source:
|
||||
RNS.log(f"Found fallback source: {fallback_source}", RNS.LOG_DEBUG)
|
||||
default_source_id = d["id"]
|
||||
break
|
||||
|
||||
for d in self.available_devices:
|
||||
if d["type_description"] == self.DEFAULT_SINK:
|
||||
default_sink_id = d["id"]
|
||||
break
|
||||
|
||||
if not default_sink_id:
|
||||
RNS.log(f"Default sink not found, searching for fallback...", RNS.LOG_DEBUG)
|
||||
for fallback_sink in self.FALLBACK_SINKS:
|
||||
if default_sink_id != None: break
|
||||
for d in self.available_devices:
|
||||
if d["is_sink"] == True and d["type_description"] == fallback_sink:
|
||||
RNS.log(f"Found fallback sink: {fallback_sink}", RNS.LOG_DEBUG)
|
||||
default_sink_id = d["id"]
|
||||
break
|
||||
|
||||
if not default_sink_id or not default_source_id: RNS.log(f"Failed to find default devices. Available devices on this system are: {self.available_devices}", RNS.LOG_ERROR)
|
||||
if not default_source_id: raise OSError("Could not determine default audio input device, no suitable device available")
|
||||
if not default_sink_id: raise OSError("Could not determine default audio output device, no suitable device available")
|
||||
info = {"server version": "1.0.0", "server name": "Android Audio", "default sink id": default_sink_id, "default source id": default_source_id}
|
||||
return info
|
||||
|
||||
_audio = _AndroidAudio()
|
||||
atexit.register(_audio._shutdown)
|
||||
|
||||
def all_speakers(): return [_Speaker(id=s['id']) for s in _audio.sink_list]
|
||||
|
||||
def default_speaker():
|
||||
name = _audio.server_info["default sink id"]
|
||||
return get_speaker(name)
|
||||
|
||||
def get_speaker(id, low_latency=False):
|
||||
speakers = _audio.sink_list
|
||||
return _Speaker(id=_match_soundcard(id, speakers)['id'], low_latency=low_latency)
|
||||
|
||||
def all_microphones(include_loopback=False, exclude_monitors=True):
|
||||
if not exclude_monitors: include_loopback = not exclude_monitors
|
||||
mics = [_Microphone(id=m['id']) for m in _audio.source_list]
|
||||
if not include_loopback: return [m for m in mics if m._get_info()['device.class'] != 'monitor']
|
||||
else: return mics
|
||||
|
||||
def default_microphone():
|
||||
name = _audio.server_info['default source id']
|
||||
return get_microphone(name, include_loopback=True)
|
||||
|
||||
def get_microphone(id, include_loopback=False, exclude_monitors=True):
|
||||
if not exclude_monitors: include_loopback = not exclude_monitors
|
||||
microphones = _audio.source_list
|
||||
return _Microphone(id=_match_soundcard(id, microphones, include_loopback)['id'])
|
||||
|
||||
def _match_soundcard(id, soundcards, include_loopback=False):
|
||||
soundcards_by_id = {soundcard['id']: soundcard for soundcard in soundcards}
|
||||
soundcards_by_name = {soundcard['name']: soundcard for soundcard in soundcards}
|
||||
|
||||
if id in soundcards_by_id: return soundcards_by_id[id]
|
||||
|
||||
for name, soundcard in soundcards_by_name.items():
|
||||
if id in name: return soundcard
|
||||
|
||||
pattern = ".*".join(id)
|
||||
for name, soundcard in soundcards_by_name.items():
|
||||
if re.match(pattern, name): return soundcard
|
||||
raise IndexError(f"no soundcard with id {id}")
|
||||
|
||||
def get_name(): return _audio.name
|
||||
|
||||
def set_name(name): _audio.name = name
|
||||
|
||||
|
||||
class _SoundCard:
|
||||
def __init__(self, *, id, low_latency=False):
|
||||
self._id = id
|
||||
self._low_latency = low_latency
|
||||
|
||||
@property
|
||||
def channels(self): return self._get_info()['channels']
|
||||
|
||||
@property
|
||||
def id(self): return self._id
|
||||
|
||||
@property
|
||||
def name(self): return self._get_info()['name']
|
||||
|
||||
def _get_info(self): return _audio.source_info(self._id)
|
||||
|
||||
|
||||
class _Speaker(_SoundCard):
|
||||
|
||||
def __repr__(self):
|
||||
return '<Speaker {} ({} channels)>'.format(self.name, self.channels)
|
||||
|
||||
def player(self, samplerate, channels=None, blocksize=None, low_latency=None):
|
||||
if channels is None: channels = self.channels
|
||||
return _Player(self._id, samplerate, channels, blocksize, low_latency)
|
||||
|
||||
def play(self, data, samplerate, channels=None, blocksize=None):
|
||||
if channels is None: channels = self.channels
|
||||
with _Player(self._id, samplerate, channels, blocksize) as s: s.play(data)
|
||||
|
||||
def _get_info(self): return _audio.sink_info(self._id)
|
||||
|
||||
|
||||
class _Microphone(_SoundCard):
|
||||
|
||||
def __repr__(self):
|
||||
if self.isloopback: return '<Loopback {} ({} channels)>'.format(self.name, self.channels)
|
||||
else: return '<Microphone {} ({} channels)>'.format(self.name, self.channels)
|
||||
|
||||
@property
|
||||
def isloopback(self):
|
||||
return False
|
||||
|
||||
def recorder(self, samplerate, channels=None, blocksize=None):
|
||||
if channels is None: channels = self.channels
|
||||
return _Recorder(self._id, samplerate, channels, blocksize)
|
||||
|
||||
def record(self, numframes, samplerate, channels=None, blocksize=None):
|
||||
if channels is None: channels = self.channels
|
||||
with _Recorder(self._id, samplerate, channels, blocksize) as r: return r.record(numframes)
|
||||
|
||||
|
||||
class _Stream:
|
||||
TYPE_MAP_FACTOR = numpy.iinfo("int16").max
|
||||
|
||||
def __init__(self, id, samplerate, channels, blocksize=None, name="outputstream", low_latency=None):
|
||||
self._id = id
|
||||
self._samplerate = samplerate
|
||||
self._name = name
|
||||
self._blocksize = blocksize
|
||||
self.channels = channels
|
||||
self.bit_depth = 16
|
||||
self.audio_track = None
|
||||
self.audio_record = None
|
||||
self.audio_mode = "normal"
|
||||
self.enabled_comms = False
|
||||
self.low_latency_allowed = low_latency
|
||||
|
||||
try:
|
||||
Context = autoclass('android.content.Context')
|
||||
activity = autoclass('org.kivy.android.PythonActivity').mActivity
|
||||
|
||||
if activity == None:
|
||||
RNS.log(f"Could not obtain application context, instance may be running in a service context.", RNS.LOG_DEBUG)
|
||||
android_service = autoclass('org.kivy.android.PythonService').mService
|
||||
activity = android_service.getApplication().getApplicationContext()
|
||||
if activity != None: RNS.log(f"Successfully obtained application context from service", RNS.LOG_DEBUG)
|
||||
|
||||
if activity == None:
|
||||
RNS.log(f"Failed to obtain application context for audio stream acquisition", RNS.LOG_ERROR)
|
||||
raise ValueError("No application context available for audio stream acquisition")
|
||||
|
||||
self.AudioManager = activity.getSystemService(autoclass("android.media.AudioManager"))
|
||||
self.AudioTrack = autoclass("android.media.AudioTrack")
|
||||
self.AudioFormat = autoclass("android.media.AudioFormat")
|
||||
self.AudioDeviceInfo = autoclass("android.media.AudioDeviceInfo")
|
||||
|
||||
self.audio_encoding = self.AudioFormat.ENCODING_PCM_16BIT
|
||||
self.audio_track_mode = self.AudioTrack.MODE_STREAM
|
||||
|
||||
target_device_info = None
|
||||
for d in _audio.available_devices:
|
||||
if d["id"] == self._id:
|
||||
target_device_info = d
|
||||
break
|
||||
|
||||
if not target_device_info:
|
||||
RNS.log(f"Could not acquire target audio device with ID {self._id}, using fallback", RNS.LOG_WARNING)
|
||||
self.audio_track_profile = self.AudioManager.STREAM_VOICE_CALL
|
||||
|
||||
else:
|
||||
self.audio_track_profile = self.AudioManager.STREAM_VOICE_CALL
|
||||
|
||||
# We can only select by sink for now, as Android insists on auto-
|
||||
# selecting matching sources in the setCommunicationDevice API
|
||||
if target_device_info["is_sink"]:
|
||||
target_device_id = target_device_info["id"]
|
||||
if target_device_info["is_virtual"]: target_device_id -= _audio.VIRTUAL_DEVICE_OFFSET
|
||||
available_devices = self.AudioManager.getAvailableCommunicationDevices()
|
||||
for device in available_devices:
|
||||
device_id = device.getId(); device_type = device.getType()
|
||||
if target_device_id == device_id:
|
||||
if _audio.android_api_version >= 34:
|
||||
RNS.log(f"Running on API level {_audio.android_api_version}, setting via setCommunicationDevice", RNS.LOG_DEBUG)
|
||||
if device_type in _audio.device_type_descriptions and _audio.device_type_descriptions[device_type] in _audio.COMMUNICATION_MODE_TYPES:
|
||||
self.AudioManager.setMode(self.AudioManager.MODE_IN_COMMUNICATION)
|
||||
self.audio_mode = "communication"
|
||||
self.enabled_comms = True
|
||||
RNS.log("Enabled communications audio mode", RNS.LOG_DEBUG)
|
||||
|
||||
elif target_device_info["type_description"] == "Ringer Speaker":
|
||||
self.AudioManager.setMode(self.AudioManager.MODE_NORMAL)
|
||||
self.audio_mode = "ringer"
|
||||
RNS.log("Enabled ringer audio mode", RNS.LOG_DEBUG)
|
||||
|
||||
else:
|
||||
self.AudioManager.setMode(self.AudioManager.MODE_NORMAL)
|
||||
self.audio_mode = "normal"
|
||||
RNS.log("Enabled nomal audio mode", RNS.LOG_DEBUG)
|
||||
|
||||
if self.AudioManager.setCommunicationDevice(device):
|
||||
RNS.log(f"Successfully configured communication device to: {device} / {device.getType()}", RNS.LOG_DEBUG)
|
||||
break
|
||||
|
||||
else:
|
||||
RNS.log(f"Running on API level {_audio.android_api_version}, setting via setSpeakerphoneOn", RNS.LOG_DEBUG)
|
||||
if device_type in _audio.device_type_descriptions and _audio.device_type_descriptions[device_type] in _audio.COMMUNICATION_MODE_TYPES:
|
||||
self.AudioManager.setMode(self.AudioManager.MODE_IN_COMMUNICATION)
|
||||
self.AudioManager.setSpeakerphoneOn(False)
|
||||
RNS.log("Enabled communications audio mode", RNS.LOG_DEBUG)
|
||||
else:
|
||||
# API levels < 34, we'll apparently have to set communications mode
|
||||
# no matter what, since otherwise the microphone will be muted.
|
||||
self.AudioManager.setMode(self.AudioManager.MODE_IN_COMMUNICATION)
|
||||
self.AudioManager.setSpeakerphoneOn(True)
|
||||
RNS.log("Enabled communications audio mode", RNS.LOG_DEBUG)
|
||||
|
||||
if self.channels == 1:
|
||||
self.audio_format_out = self.AudioFormat.CHANNEL_OUT_MONO
|
||||
self.audio_format_in = self.AudioFormat.CHANNEL_IN_MONO
|
||||
|
||||
elif self.channels == 2:
|
||||
self.audio_format_out = self.AudioFormat.CHANNEL_OUT_STEREO
|
||||
self.audio_format_in = self.AudioFormat.CHANNEL_IN_STEREO
|
||||
|
||||
else: raise ValueError(f"Unsupported channel count {channels} on Android audio backend")
|
||||
|
||||
self.min_buffer_playback = self.AudioTrack.getMinBufferSize(self._samplerate, self.audio_format_out, self.audio_encoding);
|
||||
self.min_buffer_recording = self.AudioTrack.getMinBufferSize(self._samplerate, self.audio_format_in, self.audio_encoding);
|
||||
self.bytes_per_sample = (self.bit_depth//8)*self.channels
|
||||
|
||||
self._samplerate = int(self.AudioManager.getProperty(self.AudioManager.PROPERTY_OUTPUT_SAMPLE_RATE))
|
||||
self.optimal_frames_per_buffer = int(self.AudioManager.getProperty(self.AudioManager.PROPERTY_OUTPUT_FRAMES_PER_BUFFER))
|
||||
|
||||
except Exception as e:
|
||||
RNS.log(f"Could not initialize Android audio context for {self}: {e}", RNS.LOG_ERROR)
|
||||
RNS.trace_exception(e)
|
||||
|
||||
def __enter__(self):
|
||||
if isinstance(self.channels, collections.abc.Iterable): channel_count = len(self.channels)
|
||||
elif isinstance(self.channels, int): channel_count = self.channels
|
||||
else: raise TypeError('channels must be iterable or integer')
|
||||
|
||||
numchannels = self.channels if isinstance(self.channels, int) else len(self.channels)
|
||||
self._connect_stream()
|
||||
if not self.audio_track and not self.audio_record:
|
||||
RNS.log(f"Failed to acquire audio stream for {self}", RNS.LOG_ERROR)
|
||||
return None
|
||||
|
||||
self.channels = numchannels
|
||||
return self
|
||||
|
||||
def __exit__(self, exc_type, exc_value, traceback):
|
||||
if self.audio_track:
|
||||
self.audio_track.stop()
|
||||
self.audio_track.release()
|
||||
|
||||
if self.audio_record:
|
||||
self.audio_record.stop()
|
||||
self.audio_record.release()
|
||||
|
||||
if self.enabled_comms:
|
||||
RNS.log(f"{self} clearing communication device", RNS.LOG_DEBUG)
|
||||
self.AudioManager.clearCommunicationDevice()
|
||||
|
||||
@property
|
||||
def latency(self):
|
||||
# TODO: Get actual stream latency via JNI here
|
||||
return 0.001
|
||||
|
||||
class _Player(_Stream):
|
||||
def _connect_stream(self):
|
||||
try:
|
||||
AudioAttributes = autoclass("android.media.AudioAttributes")
|
||||
AudioAttributesBuilder = autoclass("android.media.AudioAttributes$Builder")
|
||||
AudioFormat = autoclass("android.media.AudioFormat")
|
||||
AudioFormatBuilder = autoclass("android.media.AudioFormat$Builder")
|
||||
AudioTrack = autoclass("android.media.AudioTrack")
|
||||
AudioTrackBuilder = autoclass("android.media.AudioTrack$Builder")
|
||||
|
||||
self._target_buffer_samples = None
|
||||
self._play_engaged = False
|
||||
self._low_latency = False
|
||||
self._low_latency_activated = False
|
||||
self._successful_buffer_frames = None
|
||||
self._last_underruns = 0
|
||||
self._write_mode = AudioTrack.WRITE_BLOCKING
|
||||
self._sample_time = 1.0/self._samplerate
|
||||
self._target_buffer_ms = 125
|
||||
self._overrun_wait = (self._target_buffer_ms*0.1)/1000
|
||||
self._overrun_lock = 0
|
||||
|
||||
aa_builder = AudioAttributesBuilder()
|
||||
if self.audio_mode == "normal":
|
||||
RNS.log(f"Enabling stream properties for normal mode", RNS.LOG_DEBUG)
|
||||
aa_builder.setUsage(AudioAttributes.USAGE_MEDIA)
|
||||
aa_builder.setContentType(AudioAttributes.CONTENT_TYPE_UNKNOWN)
|
||||
self.performance_mode = AudioTrack.PERFORMANCE_MODE_LOW_LATENCY
|
||||
if self.low_latency_allowed: self._low_latency = True
|
||||
|
||||
elif self.audio_mode == "communication":
|
||||
RNS.log(f"Enabling stream properties for communication mode", RNS.LOG_DEBUG)
|
||||
aa_builder.setUsage(AudioAttributes.USAGE_VOICE_COMMUNICATION)
|
||||
aa_builder.setContentType(AudioAttributes.CONTENT_TYPE_SPEECH)
|
||||
self.performance_mode = AudioTrack.PERFORMANCE_MODE_LOW_LATENCY
|
||||
if self.low_latency_allowed: self._low_latency = True
|
||||
|
||||
elif self.audio_mode == "ringer":
|
||||
RNS.log(f"Enabling stream properties for ringer mode", RNS.LOG_DEBUG)
|
||||
aa_builder.setUsage(AudioAttributes.USAGE_NOTIFICATION_RINGTONE)
|
||||
aa_builder.setContentType(AudioAttributes.CONTENT_TYPE_UNKNOWN)
|
||||
self.performance_mode = AudioTrack.PERFORMANCE_MODE_NONE
|
||||
|
||||
else:
|
||||
RNS.log(f"Enabling stream properties for non-specific mode", RNS.LOG_DEBUG)
|
||||
aa_builder.setUsage(AudioAttributes.USAGE_MEDIA)
|
||||
aa_builder.setContentType(AudioAttributes.CONTENT_TYPE_UNKNOWN)
|
||||
self.performance_mode = AudioTrack.PERFORMANCE_MODE_NONE
|
||||
|
||||
aa_builder.setAllowedCapturePolicy(AudioAttributes.ALLOW_CAPTURE_BY_NONE)
|
||||
self.audio_attributes = aa_builder.build()
|
||||
|
||||
af_builder = AudioFormatBuilder()
|
||||
af_builder.setSampleRate(int(self._samplerate))
|
||||
af_builder.setEncoding(self.audio_encoding)
|
||||
af_builder.setChannelMask(self.audio_format_out)
|
||||
self.audio_format = af_builder.build()
|
||||
|
||||
at_builder = AudioTrackBuilder()
|
||||
at_builder.setAudioAttributes(self.audio_attributes)
|
||||
at_builder.setAudioFormat(self.audio_format)
|
||||
at_builder.setBufferSizeInBytes(self.min_buffer_playback)
|
||||
at_builder.setPerformanceMode(self.performance_mode)
|
||||
self.audio_track = at_builder.build()
|
||||
|
||||
if self._low_latency: self._low_latency_setup()
|
||||
|
||||
except Exception as e:
|
||||
RNS.log(f"Error while connecting output audio stream via JNI: {e}", RNS.LOG_ERROR)
|
||||
RNS.trace_exception(e)
|
||||
|
||||
def enable_low_latency(self):
|
||||
self.low_latency_allowed = True
|
||||
self._low_latency = True
|
||||
self._low_latency_setup()
|
||||
if self.audio_track and self._play_engaged:
|
||||
self.audio_track.setBufferSizeInFrames(self._target_buffer_samples)
|
||||
|
||||
def _low_latency_setup(self):
|
||||
AudioTrack = autoclass("android.media.AudioTrack")
|
||||
self._write_mode = AudioTrack.WRITE_NON_BLOCKING
|
||||
self._target_buffer_samples = int((self._target_buffer_ms/1000.0)/(1.0/self._samplerate))
|
||||
self.audio_track.setBufferSizeInFrames(self._target_buffer_samples)
|
||||
self._low_latency_activated = True
|
||||
|
||||
def play(self, frame):
|
||||
if not self.audio_track: return
|
||||
|
||||
input_samples = frame*self.TYPE_MAP_FACTOR
|
||||
data = input_samples.astype(numpy.int16)
|
||||
|
||||
if data.ndim == 1: data = data[:, None] # Force 2D array
|
||||
if data.ndim != 2: raise TypeError(f"data must be 1d or 2d, not {data.ndim}d")
|
||||
if data.shape[1] == 1 and self.channels != 1: data = numpy.tile(data, [1, self.channels])
|
||||
if data.shape[1] != self.channels: raise TypeError(f"second dimension of data must be equal to the number of channels, not {data.shape[1]}")
|
||||
|
||||
while data.nbytes > 0:
|
||||
samples_bytes = data.ravel().tobytes()
|
||||
written_bytes = self.audio_track.write(samples_bytes, 0, len(samples_bytes), self._write_mode)
|
||||
written_samples = written_bytes//self.bytes_per_sample
|
||||
|
||||
if self._low_latency_activated:
|
||||
if written_samples > 0:
|
||||
written_time = written_samples*self._sample_time
|
||||
min_wait = written_time*0.25
|
||||
self._overrun_lock = time.time()+(written_time*1.0)
|
||||
time.sleep(min_wait)
|
||||
|
||||
if written_bytes == 0:
|
||||
if time.time() > self._overrun_lock:
|
||||
remaining_frame_samples = len(data)
|
||||
written_samples = remaining_frame_samples
|
||||
# TODO: Remove debug
|
||||
# RNS.log(f"Buffer overrun. Target buffer samples {self._target_buffer_samples}. Needed to write {remaining_frame_samples} samples / {len(samples_bytes)} bytes. Discarding {written_samples} input samples.")
|
||||
|
||||
data = data[written_samples:]
|
||||
|
||||
if not self._play_engaged:
|
||||
self.audio_track.play()
|
||||
self._play_engaged = True
|
||||
if self._target_buffer_samples: self.audio_track.setBufferSizeInFrames(self._target_buffer_samples)
|
||||
|
||||
underruns = self.audio_track.getUnderrunCount()
|
||||
if underruns > self._last_underruns:
|
||||
delta = underruns-self._last_underruns
|
||||
self._last_underruns = underruns
|
||||
# TODO: Remove debug
|
||||
# RNS.log(f"{delta} underruns on {self}")
|
||||
|
||||
class _Recorder(_Stream):
|
||||
def __init__(self, *args, **kwargs):
|
||||
super(_Recorder, self).__init__(*args, **kwargs)
|
||||
self.AudioRecord = autoclass("android.media.AudioRecord")
|
||||
self._pending_chunk = numpy.zeros((0, ), dtype='float32')
|
||||
|
||||
def _connect_stream(self):
|
||||
try:
|
||||
AudioSource = autoclass("android.media.MediaRecorder$AudioSource")
|
||||
|
||||
self.audio_record = self.AudioRecord(AudioSource.VOICE_COMMUNICATION, self._samplerate, self.audio_format_in, self.audio_encoding, self.min_buffer_recording)
|
||||
self.audio_record.startRecording()
|
||||
|
||||
except Exception as e:
|
||||
RNS.log(f"Error while connecting input audio stream via JNI: {e}", RNS.LOG_ERROR)
|
||||
RNS.trace_exception(e)
|
||||
|
||||
def _record_chunk(self):
|
||||
try:
|
||||
audio_data = bytearray(self.min_buffer_recording)
|
||||
bytes_read = self.audio_record.read(audio_data, 0, self.min_buffer_recording, self.audio_record.READ_NON_BLOCKING)
|
||||
if bytes_read == 0: time.sleep(0.005)
|
||||
|
||||
if bytes_read == self.audio_record.ERROR_INVALID_OPERATION: RNS.log(f"Invalid operation error from JNI on {self}", RNS.LOG_ERROR)
|
||||
elif bytes_read == self.audio_record.ERROR_BAD_VALUE: RNS.log(f"Bad value error from JNI on {self}", RNS.LOG_ERROR)
|
||||
else:
|
||||
recorded_samples = numpy.frombuffer(audio_data[:bytes_read], dtype="int16")/self.TYPE_MAP_FACTOR
|
||||
return recorded_samples.astype("float32")
|
||||
|
||||
except Exception as e:
|
||||
RNS.log(f"Error while reading audio chunk: {e}", RNS.LOG_ERROR)
|
||||
RNS.trace_exception(e)
|
||||
return None
|
||||
|
||||
|
||||
def record(self, numframes=None):
|
||||
if numframes is None: return numpy.reshape(numpy.concatenate([self.flush().ravel(), self._record_chunk()]), [-1, self.channels])
|
||||
else:
|
||||
captured_data = [self._pending_chunk]
|
||||
captured_frames = self._pending_chunk.shape[0] / self.channels
|
||||
if captured_frames >= numframes:
|
||||
keep, self._pending_chunk = numpy.split(self._pending_chunk, [int(numframes * self.channels)])
|
||||
return numpy.reshape(keep, [-1, self.channels])
|
||||
|
||||
else:
|
||||
while captured_frames < numframes:
|
||||
chunk = self._record_chunk()
|
||||
captured_data.append(chunk)
|
||||
captured_frames += len(chunk)/self.channels
|
||||
|
||||
to_split = int(len(chunk) - (captured_frames - numframes) * self.channels)
|
||||
captured_data[-1], self._pending_chunk = numpy.split(captured_data[-1], [to_split])
|
||||
return numpy.reshape(numpy.concatenate(captured_data), [-1, self.channels])
|
||||
|
||||
def flush(self):
|
||||
last_chunk = numpy.reshape(self._pending_chunk, [-1, self.channels])
|
||||
self._pending_chunk = numpy.zeros((0, ), dtype="float32")
|
||||
return last_chunk
|
||||
|
|
@ -77,6 +77,7 @@ class Telephone(SignallingReceiver):
|
|||
self.speaker_device = None
|
||||
self.microphone_device = None
|
||||
self.ringer_device = None
|
||||
self.low_latency_output = False
|
||||
|
||||
threading.Thread(target=self.__jobs, daemon=True).start()
|
||||
RNS.log(f"{self} listening on {RNS.prettyhexrep(self.destination.hash)}", RNS.LOG_DEBUG)
|
||||
|
|
@ -134,6 +135,14 @@ class Telephone(SignallingReceiver):
|
|||
self.ringtone_gain = gain
|
||||
RNS.log(f"{self} ringtone set to {self.ringtone_path}", RNS.LOG_DEBUG)
|
||||
|
||||
def set_low_latency_output(self, enabled):
|
||||
if enabled:
|
||||
self.low_latency_output = True
|
||||
RNS.log(f"{self} low-latency output enabled", RNS.LOG_DEBUG)
|
||||
else:
|
||||
self.low_latency_output = False
|
||||
RNS.log(f"{self} low-latency output disabled", RNS.LOG_DEBUG)
|
||||
|
||||
def __jobs(self):
|
||||
while self.destination != None:
|
||||
time.sleep(self.JOB_INTERVAL)
|
||||
|
|
@ -251,6 +260,7 @@ class Telephone(SignallingReceiver):
|
|||
self.__start_pipelines()
|
||||
RNS.log(f"Call setup complete for {RNS.prettyhexrep(identity.hash)}", RNS.LOG_DEBUG)
|
||||
if callable(self.__established_callback): self.__established_callback(self.active_call.get_remote_identity())
|
||||
if self.low_latency_output: self.audio_output.enable_low_latency()
|
||||
return True
|
||||
|
||||
def hangup(self):
|
||||
|
|
@ -393,12 +403,12 @@ class Telephone(SignallingReceiver):
|
|||
self.__prepare_dialling_pipelines()
|
||||
self.transmit_mixer = Mixer(target_frame_ms=self.target_frame_time_ms)
|
||||
self.audio_input = LineSource(preferred_device=self.microphone_device, target_frame_ms=self.target_frame_time_ms, codec=Raw(), sink=self.transmit_mixer)
|
||||
# self.audio_input = OpusFileSource("/home/markqvist/Information/Source/LXST/docs/425.opus", loop=True, target_frame_ms=self.target_frame_time_ms, codec=Raw(), sink=self.transmit_mixer, timed=True)
|
||||
self.transmit_pipeline = Pipeline(source=self.transmit_mixer,
|
||||
codec=self.transmit_codec,
|
||||
sink=Packetizer(self.active_call, failure_callback=self.__packetizer_failure))
|
||||
|
||||
self.active_call.audio_source = LinkSource(link=self.active_call, signalling_receiver=self, sink=self.receive_mixer)
|
||||
self.receive_mixer.set_source_max_frames(self.active_call.audio_source, 2)
|
||||
|
||||
self.signal(Signalling.STATUS_ESTABLISHED, self.active_call)
|
||||
|
||||
|
|
@ -496,6 +506,7 @@ class Telephone(SignallingReceiver):
|
|||
RNS.log(f"Call setup complete for {RNS.prettyhexrep(self.active_call.get_remote_identity().hash)}", RNS.LOG_DEBUG)
|
||||
self.call_status = signal
|
||||
if callable(self.__established_callback): self.__established_callback(self.active_call.get_remote_identity())
|
||||
if self.low_latency_output: self.audio_output.enable_low_latency()
|
||||
|
||||
def __str__(self):
|
||||
return f"<lxst.telephony/{RNS.hexrep(self.identity.hash, delimit=False)}>"
|
||||
|
|
@ -17,14 +17,33 @@ class LinuxBackend():
|
|||
else: self.device = soundcard.default_speaker()
|
||||
RNS.log(f"Using output device {self.device}", RNS.LOG_DEBUG)
|
||||
|
||||
def flush(self):
|
||||
self.recorder.flush()
|
||||
def flush(self): self.recorder.flush()
|
||||
|
||||
def get_player(self, samples_per_frame=None):
|
||||
def get_player(self, samples_per_frame=None, low_latency=None):
|
||||
return self.device.player(samplerate=self.samplerate, blocksize=samples_per_frame)
|
||||
|
||||
def release_player(self): pass
|
||||
|
||||
class AndroidBackend():
|
||||
SAMPLERATE = 48000
|
||||
|
||||
def __init__(self, preferred_device=None, samplerate=SAMPLERATE):
|
||||
from .Platforms.android import soundcard
|
||||
self.samplerate = samplerate
|
||||
self.soundcard = soundcard
|
||||
if preferred_device:
|
||||
try: self.device = self.soundcard.get_speaker(preferred_device)
|
||||
except: self.device = soundcard.default_speaker()
|
||||
else: self.device = soundcard.default_speaker()
|
||||
RNS.log(f"Using output device {self.device}", RNS.LOG_DEBUG)
|
||||
|
||||
def flush(self): self.recorder.flush()
|
||||
|
||||
def get_player(self, samples_per_frame=None, low_latency=None):
|
||||
return self.device.player(samplerate=self.samplerate, blocksize=samples_per_frame, low_latency=low_latency)
|
||||
|
||||
def release_player(self): pass
|
||||
|
||||
class DarwinBackend():
|
||||
SAMPLERATE = 48000
|
||||
|
||||
|
|
@ -38,10 +57,9 @@ class DarwinBackend():
|
|||
else: self.device = soundcard.default_speaker()
|
||||
RNS.log(f"Using output device {self.device}", RNS.LOG_DEBUG)
|
||||
|
||||
def flush(self):
|
||||
self.recorder.flush()
|
||||
def flush(self): self.recorder.flush()
|
||||
|
||||
def get_player(self, samples_per_frame=None):
|
||||
def get_player(self, samples_per_frame=None, low_latency=None):
|
||||
return self.device.player(samplerate=self.samplerate, blocksize=samples_per_frame)
|
||||
|
||||
def release_player(self): pass
|
||||
|
|
@ -62,46 +80,36 @@ class WindowsBackend():
|
|||
else: self.device = soundcard.default_speaker()
|
||||
RNS.log(f"Using output device {self.device}", RNS.LOG_DEBUG)
|
||||
|
||||
def flush(self):
|
||||
self.recorder.flush()
|
||||
def flush(self): self.recorder.flush()
|
||||
|
||||
def get_player(self, samples_per_frame=None):
|
||||
def get_player(self, samples_per_frame=None, low_latency=None):
|
||||
self.com_init(0)
|
||||
return self.device.player(samplerate=self.samplerate, blocksize=samples_per_frame)
|
||||
|
||||
def release_player(self): self.com_release()
|
||||
|
||||
def get_backend():
|
||||
if RNS.vendor.platformutils.is_linux():
|
||||
return LinuxBackend
|
||||
elif RNS.vendor.platformutils.is_windows():
|
||||
return WindowsBackend
|
||||
elif RNS.vendor.platformutils.is_darwin():
|
||||
return DarwinBackend
|
||||
else:
|
||||
return None
|
||||
if RNS.vendor.platformutils.is_linux(): return LinuxBackend
|
||||
elif RNS.vendor.platformutils.is_windows(): return WindowsBackend
|
||||
elif RNS.vendor.platformutils.is_darwin(): return DarwinBackend
|
||||
elif RNS.vendor.platformutils.is_android(): return AndroidBackend
|
||||
else: return None
|
||||
|
||||
Backend = get_backend()
|
||||
|
||||
class Sink():
|
||||
def handle_frame(self, frame, source):
|
||||
pass
|
||||
def handle_frame(self, frame, source): pass
|
||||
def can_receive(self, from_source=None): return True
|
||||
|
||||
def can_receive(self, from_source=None):
|
||||
return True
|
||||
|
||||
class RemoteSink(Sink):
|
||||
pass
|
||||
|
||||
class LocalSink(Sink):
|
||||
pass
|
||||
class RemoteSink(Sink): pass
|
||||
class LocalSink(Sink): pass
|
||||
|
||||
class LineSink(LocalSink):
|
||||
MAX_FRAMES = 6
|
||||
AUTOSTART_MIN = 1
|
||||
FRAME_TIMEOUT = 8
|
||||
|
||||
def __init__(self, preferred_device=None, autodigest=True):
|
||||
def __init__(self, preferred_device=None, autodigest=True, low_latency=False):
|
||||
self.preferred_device = preferred_device
|
||||
self.frame_deque = deque(maxlen=self.MAX_FRAMES)
|
||||
self.should_run = False
|
||||
|
|
@ -114,6 +122,7 @@ class LineSink(LocalSink):
|
|||
self.autodigest = autodigest
|
||||
self.autostart_min = self.AUTOSTART_MIN
|
||||
self.buffer_max_height = self.MAX_FRAMES-3
|
||||
self.low_latency = low_latency
|
||||
|
||||
self.preferred_samplerate = Backend.SAMPLERATE
|
||||
self.backend = Backend(preferred_device=self.preferred_device, samplerate=self.preferred_samplerate)
|
||||
|
|
@ -124,13 +133,13 @@ class LineSink(LocalSink):
|
|||
self.frame_time = None
|
||||
self.output_latency = 0
|
||||
self.max_latency = 0
|
||||
|
||||
self.__wants_low_latency = False
|
||||
|
||||
def can_receive(self, from_source=None):
|
||||
with self.insert_lock:
|
||||
if len(self.frame_deque) < self.buffer_max_height:
|
||||
return True
|
||||
else:
|
||||
return False
|
||||
if len(self.frame_deque) < self.buffer_max_height: return True
|
||||
else: return False
|
||||
|
||||
def handle_frame(self, frame, source=None):
|
||||
with self.insert_lock:
|
||||
|
|
@ -142,8 +151,7 @@ class LineSink(LocalSink):
|
|||
RNS.log(f"{self} starting at {self.samples_per_frame} samples per frame, {self.channels} channels", RNS.LOG_DEBUG)
|
||||
|
||||
if self.autodigest and not self.should_run:
|
||||
if len(self.frame_deque) >= self.autostart_min:
|
||||
self.start()
|
||||
if len(self.frame_deque) >= self.autostart_min: self.start()
|
||||
|
||||
def start(self):
|
||||
if not self.should_run:
|
||||
|
|
@ -154,18 +162,21 @@ class LineSink(LocalSink):
|
|||
def stop(self):
|
||||
self.should_run = False
|
||||
|
||||
def enable_low_latency(self):
|
||||
self.__wants_low_latency = True
|
||||
|
||||
def __digest_job(self):
|
||||
with self.digest_lock:
|
||||
if not RNS.vendor.platformutils.is_darwin(): backend_samples_per_frame = self.samples_per_frame
|
||||
else: backend_samples_per_frame = None
|
||||
|
||||
with self.backend.get_player(samples_per_frame=backend_samples_per_frame) as player:
|
||||
with self.backend.get_player(samples_per_frame=backend_samples_per_frame, low_latency=self.low_latency) as player:
|
||||
while self.should_run:
|
||||
frames_ready = len(self.frame_deque)
|
||||
if frames_ready:
|
||||
self.output_latency = len(self.frame_deque)*self.frame_time
|
||||
self.max_latency = self.buffer_max_height*self.frame_time
|
||||
self.underrun_at = None
|
||||
self.underrun_at = None
|
||||
|
||||
with self.insert_lock: frame = self.frame_deque.popleft()
|
||||
if frame.shape[1] > self.channels: frame = frame[:, 0:self.channels]
|
||||
|
|
@ -184,10 +195,16 @@ class LineSink(LocalSink):
|
|||
if time.time() > self.underrun_at+(self.frame_time*self.frame_timeout):
|
||||
RNS.log(f"No frames available on {self}, stopping playback", RNS.LOG_DEBUG)
|
||||
self.should_run = False
|
||||
else:
|
||||
time.sleep(self.frame_time*0.1)
|
||||
else: time.sleep(self.frame_time*0.1)
|
||||
|
||||
if self.__wants_low_latency:
|
||||
self.__wants_low_latency = False
|
||||
if hasattr(player, "enable_low_latency") and callable(player.enable_low_latency):
|
||||
RNS.log(f"Run-time enabling low-latency mode on {self}", RNS.LOG_DEBUG)
|
||||
player.enable_low_latency()
|
||||
else:
|
||||
RNS.log(f"Could not run-time enable low latency mode on {self}, the operation is not supported by the backend", RNS.LOG_DEBUG)
|
||||
|
||||
self.backend.release_player()
|
||||
|
||||
class PacketSink(RemoteSink):
|
||||
pass
|
||||
class PacketSink(RemoteSink): pass
|
||||
110
LXST/Sources.py
110
LXST/Sources.py
|
|
@ -24,8 +24,29 @@ class LinuxBackend():
|
|||
self.bitdepth = 32
|
||||
RNS.log(f"Using input device {self.device}", RNS.LOG_DEBUG)
|
||||
|
||||
def flush(self):
|
||||
self.recorder.flush()
|
||||
def flush(self): self.recorder.flush()
|
||||
|
||||
def get_recorder(self, samples_per_frame):
|
||||
return self.device.recorder(samplerate=self.SAMPLERATE, blocksize=samples_per_frame)
|
||||
|
||||
def release_recorder(self): pass
|
||||
|
||||
class AndroidBackend():
|
||||
SAMPLERATE = 48000
|
||||
|
||||
def __init__(self, preferred_device=None, samplerate=SAMPLERATE):
|
||||
from .Platforms.android import soundcard
|
||||
self.samplerate = samplerate
|
||||
self.soundcard = soundcard
|
||||
if preferred_device:
|
||||
try: self.device = self.soundcard.get_microphone(preferred_device)
|
||||
except: self.device = self.soundcard.default_microphone()
|
||||
else: self.device = self.soundcard.default_microphone()
|
||||
self.channels = self.device.channels
|
||||
self.bitdepth = 32
|
||||
RNS.log(f"Using input device {self.device}", RNS.LOG_DEBUG)
|
||||
|
||||
def flush(self): self.recorder.flush()
|
||||
|
||||
def get_recorder(self, samples_per_frame):
|
||||
return self.device.recorder(samplerate=self.SAMPLERATE, blocksize=samples_per_frame)
|
||||
|
|
@ -47,8 +68,7 @@ class DarwinBackend():
|
|||
self.bitdepth = 32
|
||||
RNS.log(f"Using input device {self.device}", RNS.LOG_DEBUG)
|
||||
|
||||
def flush(self):
|
||||
self.recorder.flush()
|
||||
def flush(self): self.recorder.flush()
|
||||
|
||||
def get_recorder(self, samples_per_frame):
|
||||
return self.device.recorder(samplerate=self.SAMPLERATE, blocksize=samples_per_frame)
|
||||
|
|
@ -73,8 +93,7 @@ class WindowsBackend():
|
|||
self.bitdepth = 32
|
||||
RNS.log(f"Using input device {self.device}", RNS.LOG_DEBUG)
|
||||
|
||||
def flush(self):
|
||||
self.recorder.flush()
|
||||
def flush(self): self.recorder.flush()
|
||||
|
||||
def get_recorder(self, samples_per_frame):
|
||||
self.com_init(0)
|
||||
|
|
@ -83,25 +102,17 @@ class WindowsBackend():
|
|||
def release_recorder(self): self.com_release()
|
||||
|
||||
def get_backend():
|
||||
if RNS.vendor.platformutils.is_linux():
|
||||
return LinuxBackend
|
||||
elif RNS.vendor.platformutils.is_windows():
|
||||
return WindowsBackend
|
||||
elif RNS.vendor.platformutils.is_darwin():
|
||||
return DarwinBackend
|
||||
else:
|
||||
return None
|
||||
if RNS.vendor.platformutils.is_linux(): return LinuxBackend
|
||||
elif RNS.vendor.platformutils.is_windows(): return WindowsBackend
|
||||
elif RNS.vendor.platformutils.is_darwin(): return DarwinBackend
|
||||
elif RNS.vendor.platformutils.is_android(): return AndroidBackend
|
||||
else: return None
|
||||
|
||||
Backend = get_backend()
|
||||
|
||||
class Source():
|
||||
pass
|
||||
|
||||
class LocalSource(Source):
|
||||
pass
|
||||
|
||||
class RemoteSource(Source):
|
||||
pass
|
||||
class Source(): pass
|
||||
class LocalSource(Source): pass
|
||||
class RemoteSource(Source): pass
|
||||
|
||||
class Loopback(LocalSource, LocalSink):
|
||||
MAX_FRAMES = 128
|
||||
|
|
@ -120,14 +131,11 @@ class Loopback(LocalSource, LocalSink):
|
|||
RNS.log(f"{self} starting", RNS.LOG_DEBUG)
|
||||
self.should_run = True
|
||||
|
||||
def stop(self):
|
||||
self.should_run = False
|
||||
def stop(self): self.should_run = False
|
||||
|
||||
def can_receive(self, from_source=None):
|
||||
if self._sink:
|
||||
return self._sink.can_receive(from_source)
|
||||
else:
|
||||
return True
|
||||
if self._sink: return self._sink.can_receive(from_source)
|
||||
else: return True
|
||||
|
||||
def handle_frame(self, frame, source):
|
||||
with self.loopback_lock:
|
||||
|
|
@ -135,12 +143,10 @@ class Loopback(LocalSource, LocalSink):
|
|||
self.sink.handle_frame(self.codec.decode(frame), self)
|
||||
|
||||
@property
|
||||
def source(self):
|
||||
return self._source
|
||||
def source(self): return self._source
|
||||
|
||||
@source.setter
|
||||
def source(self, source):
|
||||
self._source = source
|
||||
def source(self, source): self._source = source
|
||||
|
||||
class LineSource(LocalSource):
|
||||
MAX_FRAMES = 128
|
||||
|
|
@ -161,22 +167,17 @@ class LineSource(LocalSource):
|
|||
self.sink = sink
|
||||
|
||||
@property
|
||||
def codec(self):
|
||||
return self._codec
|
||||
def codec(self): return self._codec
|
||||
|
||||
@codec.setter
|
||||
def codec(self, codec):
|
||||
if codec == None:
|
||||
self._codec = None
|
||||
elif not issubclass(type(codec), Codec):
|
||||
raise CodecError(f"Invalid codec specified for {self}")
|
||||
if codec == None: self._codec = None
|
||||
elif not issubclass(type(codec), Codec): raise CodecError(f"Invalid codec specified for {self}")
|
||||
else:
|
||||
self._codec = codec
|
||||
|
||||
if self.codec.preferred_samplerate:
|
||||
self.preferred_samplerate = self.codec.preferred_samplerate
|
||||
else:
|
||||
self.preferred_samplerate = Backend.SAMPLERATE
|
||||
if self.codec.preferred_samplerate: self.preferred_samplerate = self.codec.preferred_samplerate
|
||||
else: self.preferred_samplerate = Backend.SAMPLERATE
|
||||
|
||||
if self.codec.frame_quanta_ms:
|
||||
if self.target_frame_ms%self.codec.frame_quanta_ms != 0:
|
||||
|
|
@ -206,8 +207,7 @@ class LineSource(LocalSource):
|
|||
self.ingest_thread = threading.Thread(target=self.__ingest_job, daemon=True)
|
||||
self.ingest_thread.start()
|
||||
|
||||
def stop(self):
|
||||
self.should_run = False
|
||||
def stop(self): self.should_run = False
|
||||
|
||||
def __ingest_job(self):
|
||||
with self.recording_lock:
|
||||
|
|
@ -238,8 +238,7 @@ class OpusFileSource(LocalSource):
|
|||
self.next_frame = None
|
||||
self._codec = None
|
||||
|
||||
if file_path == None:
|
||||
raise TypeError(f"{self} initialised with invalid file path: {file_path}")
|
||||
if file_path == None: raise TypeError(f"{self} initialised with invalid file path: {file_path}")
|
||||
elif os.path.isfile(file_path):
|
||||
self.file = OpusFile(file_path)
|
||||
self.samplerate = self.file.frequency
|
||||
|
|
@ -250,22 +249,19 @@ class OpusFileSource(LocalSource):
|
|||
self.length_ms = (self.sample_count/self.samplerate)*1000
|
||||
RNS.log(f"{self} loaded {RNS.prettytime(self.length_ms/1000)} of audio from {file_path}", RNS.LOG_DEBUG)
|
||||
RNS.log(f"{self} samplerate is {RNS.prettyfrequency(self.samplerate)}, {self.channels} channels, {self.sample_count} samples in total", RNS.LOG_DEBUG)
|
||||
else:
|
||||
raise OSError(f"{self} file {file_path} not found")
|
||||
|
||||
else: raise OSError(f"{self} file {file_path} not found")
|
||||
|
||||
self.codec = codec
|
||||
self.sink = sink
|
||||
|
||||
@property
|
||||
def codec(self):
|
||||
return self._codec
|
||||
def codec(self): return self._codec
|
||||
|
||||
@codec.setter
|
||||
def codec(self, codec):
|
||||
if codec == None:
|
||||
self._codec = None
|
||||
elif not issubclass(type(codec), Codec):
|
||||
raise CodecError(f"Invalid codec specified for {self}")
|
||||
if codec == None: self._codec = None
|
||||
elif not issubclass(type(codec), Codec): raise CodecError(f"Invalid codec specified for {self}")
|
||||
else:
|
||||
self._codec = codec
|
||||
|
||||
|
|
@ -295,8 +291,7 @@ class OpusFileSource(LocalSource):
|
|||
self.ingest_thread = threading.Thread(target=self.__ingest_job, daemon=True)
|
||||
self.ingest_thread.start()
|
||||
|
||||
def stop(self):
|
||||
self.should_run = False
|
||||
def stop(self): self.should_run = False
|
||||
|
||||
def __ingest_job(self):
|
||||
with self.read_lock:
|
||||
|
|
@ -323,5 +318,4 @@ class OpusFileSource(LocalSource):
|
|||
else:
|
||||
time.sleep(self.frame_time*0.1)
|
||||
|
||||
class PacketSource(RemoteSource):
|
||||
pass
|
||||
class PacketSource(RemoteSource): pass
|
||||
|
|
@ -25,7 +25,6 @@ class ReticulumTelephone():
|
|||
KPD_NUMBERS = ["0","1","2","3","4","5","6","7","8","9"]
|
||||
KPD_HEX_ALPHA = ["A","B","C","D","E","F"]
|
||||
KPD_SYMBOLS = ["*","#"]
|
||||
KPD_COMMANDS = ["P","R","M","N","K","-","+"]
|
||||
|
||||
RING_TIME = 30
|
||||
WAIT_TIME = 60
|
||||
|
|
@ -226,15 +225,10 @@ class ReticulumTelephone():
|
|||
|
||||
def enable_keypad(self, driver):
|
||||
if self.service: RNS.log(f"Starting keypad: {driver}", RNS.LOG_DEBUG)
|
||||
self.keypad_driver = driver
|
||||
if driver == "gpio_4x4":
|
||||
from LXST.Primitives.hardware.keypad_gpio_4x4 import Keypad
|
||||
self.keypad = Keypad(callback=self._keypad_event)
|
||||
self.keypad.start()
|
||||
elif driver == "gpio_5x5":
|
||||
from LXST.Primitives.hardware.keypad_gpio_5x5 import Keypad
|
||||
self.keypad = Keypad(callback=self._keypad_event)
|
||||
self.keypad.start()
|
||||
else: raise OSError("Unknown keypad driver specified")
|
||||
|
||||
def enable_hook(self, pin=None):
|
||||
|
|
@ -600,101 +594,52 @@ class ReticulumTelephone():
|
|||
self.became_available()
|
||||
|
||||
if self.is_ringing:
|
||||
if self.keypad_driver == "gpio_4x4":
|
||||
answer_events = event[0] == "D" and event[1] == self.keypad.ec.DOWN
|
||||
answer_events |= event[0] == "hook" and event[1] == self.keypad.ec.UP
|
||||
if answer_events:
|
||||
print(f"Answering call from {RNS.prettyhexrep(self.caller.hash)}")
|
||||
if not self.telephone.answer(self.caller):
|
||||
print(f"Could not answer call from {RNS.prettyhexrep(self.caller.hash)}")
|
||||
elif event[0] == "C" and event[1] == self.keypad.ec.DOWN:
|
||||
print(f"Rejecting call from {RNS.prettyhexrep(self.caller.hash)}")
|
||||
self.telephone.hangup()
|
||||
|
||||
elif self.keypad_driver == "gpio_5x5":
|
||||
answer_events = event[0] == "N" and event[1] == self.keypad.ec.DOWN
|
||||
answer_events |= event[0] == "hook" and event[1] == self.keypad.ec.UP
|
||||
if answer_events:
|
||||
print(f"Answering call from {RNS.prettyhexrep(self.caller.hash)}")
|
||||
if not self.telephone.answer(self.caller):
|
||||
print(f"Could not answer call from {RNS.prettyhexrep(self.caller.hash)}")
|
||||
elif event[0] == "K" and event[1] == self.keypad.ec.DOWN:
|
||||
print(f"Rejecting call from {RNS.prettyhexrep(self.caller.hash)}")
|
||||
self.telephone.hangup()
|
||||
answer_events = event[0] == "D" and event[1] == self.keypad.ec.DOWN
|
||||
answer_events |= event[0] == "hook" and event[1] == self.keypad.ec.UP
|
||||
if answer_events:
|
||||
print(f"Answering call from {RNS.prettyhexrep(self.caller.hash)}")
|
||||
if not self.telephone.answer(self.caller):
|
||||
print(f"Could not answer call from {RNS.prettyhexrep(self.caller.hash)}")
|
||||
elif event[0] == "C" and event[1] == self.keypad.ec.DOWN:
|
||||
print(f"Rejecting call from {RNS.prettyhexrep(self.caller.hash)}")
|
||||
self.telephone.hangup()
|
||||
|
||||
elif self.is_in_call or self.call_is_connecting:
|
||||
if self.keypad_driver == "gpio_4x4":
|
||||
hangup_events = event[0] == "D" and event[1] == self.keypad.ec.DOWN
|
||||
hangup_events |= event[0] == "hook" and event[1] == self.keypad.ec.DOWN
|
||||
if hangup_events:
|
||||
print(f"Hanging up call with {RNS.prettyhexrep(self.caller.hash)}")
|
||||
self.telephone.hangup()
|
||||
|
||||
elif self.keypad_driver == "gpio_5x5":
|
||||
hangup_events = event[0] == "N" and event[1] == self.keypad.ec.DOWN
|
||||
hangup_events |= event[0] == "hook" and event[1] == self.keypad.ec.DOWN
|
||||
if hangup_events:
|
||||
print(f"Hanging up call with {RNS.prettyhexrep(self.caller.hash)}")
|
||||
self.telephone.hangup()
|
||||
hangup_events = event[0] == "D" and event[1] == self.keypad.ec.DOWN
|
||||
hangup_events |= event[0] == "hook" and event[1] == self.keypad.ec.DOWN
|
||||
if hangup_events:
|
||||
print(f"Hanging up call with {RNS.prettyhexrep(self.caller.hash)}")
|
||||
self.telephone.hangup()
|
||||
|
||||
elif self.is_available and self.hw_is_idle:
|
||||
if self.keypad_driver == "gpio_4x4":
|
||||
if event[0] == "A" and event[1] == self.keypad.ec.DOWN:
|
||||
self.hw_input = ""; self.hw_state = self.HW_STATE_DIAL
|
||||
self._update_display()
|
||||
if event[0] in self.KPD_NUMBERS and event[1] == self.keypad.ec.DOWN:
|
||||
self.hw_input += event[0]; self.hw_state = self.HW_STATE_DIAL
|
||||
self._update_display()
|
||||
if event[0] == "A" and event[1] == self.keypad.ec.DOWN:
|
||||
self.hw_input = ""; self.hw_state = self.HW_STATE_DIAL
|
||||
self._update_display()
|
||||
|
||||
if self.keypad_driver == "gpio_5x5":
|
||||
if event[0] == "N" and event[1] == self.keypad.ec.DOWN:
|
||||
self.hw_input = ""; self.hw_state = self.HW_STATE_DIAL
|
||||
self._update_display()
|
||||
if event[0] in self.KPD_NUMBERS and event[1] == self.keypad.ec.DOWN:
|
||||
self.hw_input += event[0]; self.hw_state = self.HW_STATE_DIAL
|
||||
self._update_display()
|
||||
if event[0] in self.KPD_NUMBERS and event[1] == self.keypad.ec.DOWN:
|
||||
self.hw_input += event[0]; self.hw_state = self.HW_STATE_DIAL
|
||||
self._update_display()
|
||||
|
||||
elif self.is_available and self.hw_is_dialing:
|
||||
dial_event = False
|
||||
if self.keypad_driver == "gpio_4x4":
|
||||
if event[1] == self.keypad.ec.DOWN:
|
||||
if event[0] in self.KPD_NUMBERS: self.hw_input += event[0]
|
||||
if event[0] == "A": self.became_available()
|
||||
if event[0] == "B": self.hw_input = self.hw_input[:-1]
|
||||
if event[0] == "C": self.hw_input = ""
|
||||
if event[0] == "D": dial_event = True
|
||||
if event[1] == self.keypad.ec.DOWN:
|
||||
if event[0] in self.KPD_NUMBERS: self.hw_input += event[0]
|
||||
if event[0] == "A": self.became_available()
|
||||
if event[0] == "B": self.hw_input = self.hw_input[:-1]
|
||||
if event[0] == "C": self.hw_input = ""
|
||||
if event[0] == "D": dial_event = True
|
||||
|
||||
if event[0] == "hook" and event[1] == self.keypad.ec.UP: dial_event = True
|
||||
if event[0] == "hook" and event[1] == self.keypad.ec.UP: dial_event = True
|
||||
|
||||
if dial_event:
|
||||
for identity_hash in self.aliases:
|
||||
alias = self.aliases[identity_hash]
|
||||
if self.hw_input == alias:
|
||||
self.hw_input = ""
|
||||
self.hw_state = self.HW_STATE_IDLE
|
||||
self.dial(identity_hash)
|
||||
if dial_event:
|
||||
for identity_hash in self.aliases:
|
||||
alias = self.aliases[identity_hash]
|
||||
if self.hw_input == alias:
|
||||
self.hw_input = ""
|
||||
self.hw_state = self.HW_STATE_IDLE
|
||||
self.dial(identity_hash)
|
||||
|
||||
self._update_display()
|
||||
|
||||
if self.keypad_driver == "gpio_5x5":
|
||||
if event[1] == self.keypad.ec.DOWN:
|
||||
if event[0] in self.KPD_NUMBERS: self.hw_input += event[0]
|
||||
if event[0] == "A": self.became_available()
|
||||
if event[0] == "C": self.hw_input = ""
|
||||
if event[0] == "K": self.hw_input = self.hw_input[:-1]
|
||||
if event[0] == "N": dial_event = True
|
||||
|
||||
if event[0] == "hook" and event[1] == self.keypad.ec.UP: dial_event = True
|
||||
|
||||
if dial_event:
|
||||
for identity_hash in self.aliases:
|
||||
alias = self.aliases[identity_hash]
|
||||
if self.hw_input == alias:
|
||||
self.hw_input = ""
|
||||
self.hw_state = self.HW_STATE_IDLE
|
||||
self.dial(identity_hash)
|
||||
|
||||
self._update_display()
|
||||
self._update_display()
|
||||
|
||||
def sigint_handler(self, signal, frame):
|
||||
self.cleanup()
|
||||
|
|
|
|||
|
|
@ -1 +1 @@
|
|||
__version__ = "0.3.0"
|
||||
__version__ = "0.4.0"
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue