Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

fix: audio converting time [WPB-9705] #3127

Merged
merged 5 commits into from
Jun 25, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -40,7 +40,7 @@ import com.wire.android.ui.home.messagecomposer.model.MessageBundle
import com.wire.android.ui.home.messagecomposer.model.Ping
import com.wire.android.ui.navArgs
import com.wire.android.ui.sharing.SendMessagesSnackbarMessages
import com.wire.android.util.AUDIO_MIME_TYPE
import com.wire.android.util.SUPPORTED_AUDIO_MIME_TYPE
import com.wire.android.util.ImageUtil
import com.wire.android.util.dispatchers.DispatcherProvider
import com.wire.android.util.getAudioLengthInMs
Expand Down Expand Up @@ -209,7 +209,7 @@ class SendMessageViewModel @Inject constructor(
handleAssetMessageBundle(
attachmentUri = messageBundle.attachmentUri,
conversationId = messageBundle.conversationId,
specifiedMimeType = AUDIO_MIME_TYPE,
specifiedMimeType = SUPPORTED_AUDIO_MIME_TYPE,
)
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -17,13 +17,15 @@
*/
package com.wire.android.ui.home.messagecomposer.recordaudio

import android.content.Context
import android.annotation.SuppressLint
import android.media.AudioFormat
import android.media.AudioRecord
import android.media.MediaRecorder
import android.os.Build
import com.wire.android.appLogger
import com.wire.android.util.fileDateTime
import com.wire.android.util.dispatchers.DispatcherProvider
import com.wire.android.util.fileDateTime
import com.wire.kalium.logic.data.asset.KaliumFileSystem
import com.wire.kalium.logic.feature.asset.GetAssetSizeLimitUseCaseImpl.Companion.ASSET_SIZE_DEFAULT_LIMIT_BYTES
import com.wire.kalium.util.DateTimeUtil
import dagger.hilt.android.scopes.ViewModelScoped
import kotlinx.coroutines.CoroutineScope
Expand All @@ -32,13 +34,15 @@ import kotlinx.coroutines.flow.Flow
import kotlinx.coroutines.flow.MutableSharedFlow
import kotlinx.coroutines.flow.asSharedFlow
import kotlinx.coroutines.launch
import java.io.File
import okio.Path
import okio.buffer
import java.io.IOException
import java.nio.ByteBuffer
import java.nio.ByteOrder
import javax.inject.Inject

@ViewModelScoped
class AudioMediaRecorder @Inject constructor(
private val context: Context,
private val kaliumFileSystem: KaliumFileSystem,
private val dispatcherProvider: DispatcherProvider
) {
Expand All @@ -47,88 +51,187 @@ class AudioMediaRecorder @Inject constructor(
CoroutineScope(SupervisorJob() + dispatcherProvider.io())
}

private var mediaRecorder: MediaRecorder? = null
private var audioRecorder: AudioRecord? = null
private var recordingThread: Thread? = null
private var isRecording = false
private var assetLimitInMB: Long = ASSET_SIZE_DEFAULT_LIMIT_BYTES

var originalOutputFile: File? = null
var effectsOutputFile: File? = null
var originalOutputPath: Path? = null
var effectsOutputPath: Path? = null

private val _maxFileSizeReached = MutableSharedFlow<RecordAudioDialogState>()
fun getMaxFileSizeReached(): Flow<RecordAudioDialogState> =
_maxFileSizeReached.asSharedFlow()

@SuppressLint("MissingPermission")
fun setUp(assetLimitInMegabyte: Long) {
if (mediaRecorder == null) {
mediaRecorder = if (Build.VERSION.SDK_INT >= Build.VERSION_CODES.S) {
MediaRecorder(context)
} else {
MediaRecorder()
}

originalOutputFile = kaliumFileSystem
assetLimitInMB = assetLimitInMegabyte
if (audioRecorder == null) {
val bufferSize = AudioRecord.getMinBufferSize(
SAMPLING_RATE,
AUDIO_CHANNELS,
AUDIO_ENCODING
)

audioRecorder = AudioRecord(
MediaRecorder.AudioSource.MIC,
SAMPLING_RATE,
AUDIO_CHANNELS,
AUDIO_ENCODING,
bufferSize
)

originalOutputPath = kaliumFileSystem
.tempFilePath(getRecordingAudioFileName())
.toFile()

effectsOutputFile = kaliumFileSystem
effectsOutputPath = kaliumFileSystem
.tempFilePath(getRecordingAudioEffectsFileName())
.toFile()

mediaRecorder?.setAudioSource(MediaRecorder.AudioSource.MIC)
mediaRecorder?.setAudioSamplingRate(SAMPLING_RATE)
mediaRecorder?.setOutputFormat(MediaRecorder.OutputFormat.MPEG_4)
mediaRecorder?.setAudioEncoder(MediaRecorder.AudioEncoder.AAC)
mediaRecorder?.setAudioChannels(AUDIO_CHANNELS)
mediaRecorder?.setAudioEncodingBitRate(AUDIO_ENCONDING_BIT_RATE)
mediaRecorder?.setMaxFileSize(assetLimitInMegabyte)
mediaRecorder?.setOutputFile(originalOutputFile)

observeAudioFileSize(assetLimitInMegabyte)
}
}

fun startRecording(): Boolean = try {
mediaRecorder?.prepare()
mediaRecorder?.start()
true
} catch (e: IllegalStateException) {
e.printStackTrace()
appLogger.e("[RecordAudio] startRecording: IllegalStateException - ${e.message}")
false
} catch (e: IOException) {
e.printStackTrace()
appLogger.e("[RecordAudio] startRecording: IOException - ${e.message}")
false
}
audioRecorder?.startRecording()
isRecording = true
recordingThread = Thread { writeAudioDataToFile() }
recordingThread?.start()
true
} catch (e: IllegalStateException) {
e.printStackTrace()
appLogger.e("[RecordAudio] startRecording: IllegalStateException - ${e.message}")
false
} catch (e: IOException) {
e.printStackTrace()
appLogger.e("[RecordAudio] startRecording: IOException - ${e.message}")
false
}

private fun writeWavHeader(bufferedSink: okio.BufferedSink, sampleRate: Int, channels: Int, bitsPerSample: Int) {
val byteRate = sampleRate * channels * (bitsPerSample / BITS_PER_BYTE)
val blockAlign = channels * (bitsPerSample / BITS_PER_BYTE)

// We use buffer() to correctly write the string values.
bufferedSink.writeUtf8(CHUNK_ID_RIFF) // Chunk ID
bufferedSink.writeIntLe(PLACEHOLDER_SIZE) // Placeholder for Chunk Size (will be updated later)
bufferedSink.writeUtf8(FORMAT_WAVE) // Format
bufferedSink.writeUtf8(SUBCHUNK1_ID_FMT) // Subchunk1 ID
bufferedSink.writeIntLe(SUBCHUNK1_SIZE_PCM) // Subchunk1 Size (PCM)
bufferedSink.writeShortLe(AUDIO_FORMAT_PCM) // Audio Format (PCM)
bufferedSink.writeShortLe(channels) // Number of Channels
bufferedSink.writeIntLe(sampleRate) // Sample Rate
bufferedSink.writeIntLe(byteRate) // Byte Rate
bufferedSink.writeShortLe(blockAlign) // Block Align
bufferedSink.writeShortLe(bitsPerSample) // Bits Per Sample
bufferedSink.writeUtf8(SUBCHUNK2_ID_DATA) // Subchunk2 ID
bufferedSink.writeIntLe(PLACEHOLDER_SIZE) // Placeholder for Subchunk2 Size (will be updated later)
}

private fun updateWavHeader(filePath: Path) {
val file = filePath.toFile()
val fileSize = file.length().toInt()
val dataSize = fileSize - HEADER_SIZE

val chunkSizeBuffer = ByteBuffer.allocate(INT_SIZE)
chunkSizeBuffer.order(ByteOrder.LITTLE_ENDIAN)
chunkSizeBuffer.putInt(fileSize - CHUNK_ID_SIZE)

val dataSizeBuffer = ByteBuffer.allocate(INT_SIZE)
dataSizeBuffer.order(ByteOrder.LITTLE_ENDIAN)
dataSizeBuffer.putInt(dataSize)

val randomAccessFile = java.io.RandomAccessFile(file, "rw")

// Update Chunk Size
randomAccessFile.seek(CHUNK_SIZE_OFFSET.toLong())
randomAccessFile.write(chunkSizeBuffer.array())

// Update Subchunk2 Size
randomAccessFile.seek(SUBCHUNK2_SIZE_OFFSET.toLong())
randomAccessFile.write(dataSizeBuffer.array())

randomAccessFile.close()

appLogger.i("Updated WAV Header: Chunk Size = ${fileSize - CHUNK_ID_SIZE}, Data Size = $dataSize")
}

fun stop() {
mediaRecorder?.stop()
isRecording = false
audioRecorder?.stop()
recordingThread?.join()
}

fun release() {
mediaRecorder?.release()
audioRecorder?.release()
audioRecorder = null
}

private fun observeAudioFileSize(assetLimitInMegabyte: Long) {
mediaRecorder?.setOnInfoListener { _, what, _ ->
if (what == MediaRecorder.MEDIA_RECORDER_INFO_MAX_FILESIZE_REACHED) {
scope.launch {
_maxFileSizeReached.emit(
RecordAudioDialogState.MaxFileSizeReached(
maxSize = assetLimitInMegabyte.div(SIZE_OF_1MB)
private fun writeAudioDataToFile() {
val data = ByteArray(BUFFER_SIZE)
var sink: okio.Sink? = null

try {
sink = kaliumFileSystem.sink(originalOutputPath!!)
val bufferedSink = sink.buffer()

// Write WAV header
writeWavHeader(bufferedSink, SAMPLING_RATE, AUDIO_CHANNELS, BITS_PER_SAMPLE)

while (isRecording) {
val read = audioRecorder?.read(data, 0, BUFFER_SIZE) ?: 0
if (read > 0) {
bufferedSink.write(data, 0, read)
}

// Check if the file size exceeds the limit
val currentSize = originalOutputPath!!.toFile().length()
if (currentSize > (assetLimitInMB * SIZE_OF_1MB)) {
isRecording = false
scope.launch {
_maxFileSizeReached.emit(
RecordAudioDialogState.MaxFileSizeReached(
maxSize = assetLimitInMB / SIZE_OF_1MB
)
)
)
}
break
}
}

// Close buffer to ensure all data is written
bufferedSink.close()

// Update WAV header with final file size
updateWavHeader(originalOutputPath!!)
} catch (e: IOException) {
e.printStackTrace()
appLogger.e("[RecordAudio] writeAudioDataToFile: IOException - ${e.message}")
} finally {
sink?.close()
}
}

private companion object {
fun getRecordingAudioFileName(): String =
"wire-audio-${DateTimeUtil.currentInstant().fileDateTime()}.m4a"
fun getRecordingAudioEffectsFileName(): String =
"wire-audio-${DateTimeUtil.currentInstant().fileDateTime()}-filter.m4a"
companion object {
fun getRecordingAudioFileName(): String = "wire-audio-${DateTimeUtil.currentInstant().fileDateTime()}.wav"
fun getRecordingAudioEffectsFileName(): String = "wire-audio-${DateTimeUtil.currentInstant().fileDateTime()}-filter.wav"

const val SIZE_OF_1MB = 1024 * 1024
const val AUDIO_CHANNELS = 1
const val AUDIO_CHANNELS = 1 // Mono
const val SAMPLING_RATE = 44100
const val AUDIO_ENCONDING_BIT_RATE = 96000
const val BUFFER_SIZE = 1024
const val AUDIO_ENCODING = AudioFormat.ENCODING_PCM_16BIT
const val BITS_PER_SAMPLE = 16
const val BITS_PER_BYTE = 8
const val HEADER_SIZE = 44
const val CHUNK_ID_SIZE = 8
const val INT_SIZE = 4
const val PLACEHOLDER_SIZE = 0
const val CHUNK_SIZE_OFFSET = 4
const val SUBCHUNK2_SIZE_OFFSET = 40
const val AUDIO_FORMAT_PCM = 1
const val SUBCHUNK1_SIZE_PCM = 16

const val CHUNK_ID_RIFF = "RIFF"
const val FORMAT_WAVE = "WAVE"
const val SUBCHUNK1_ID_FMT = "fmt "
const val SUBCHUNK2_ID_DATA = "data"
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -20,29 +20,38 @@ package com.wire.android.ui.home.messagecomposer.recordaudio
import android.content.Context
import com.waz.audioeffect.AudioEffect
import com.wire.android.appLogger
import javax.inject.Singleton
import com.wire.android.util.dispatchers.DispatcherProvider
import kotlinx.coroutines.withContext
import javax.inject.Inject
import javax.inject.Singleton

@Singleton
class GenerateAudioFileWithEffectsUseCase @Inject constructor() {
class GenerateAudioFileWithEffectsUseCase @Inject constructor(
private val dispatchers: DispatcherProvider,
) {
/**
* Note: This UseCase can't be tested as we cannot mock `AudioEffect` from AVS.
* Generates audio file with effects on received path from the original file path.
*
* @return Unit, as the content of audio with effects will be saved directly to received file path.
*/
operator fun invoke(
suspend operator fun invoke(
context: Context,
originalFilePath: String,
effectsFilePath: String
) {
val audioEffectsResult = AudioEffect(context)
.applyEffectM4A(
originalFilePath,
effectsFilePath,
AudioEffect.AVS_AUDIO_EFFECT_VOCODER_MED,
true
)
effectsFilePath: String,
) = withContext(dispatchers.io()) {
appLogger.i("[$TAG] -> Start generating audio file with effects")

val audioEffect = AudioEffect(context)
val effectType = AudioEffect.AVS_AUDIO_EFFECT_VOCODER_MED
val reduceNoise = true

val audioEffectsResult = audioEffect.applyEffectWav(
originalFilePath,
effectsFilePath,
effectType,
reduceNoise
)

if (audioEffectsResult > -1) {
appLogger.i("[$TAG] -> Audio file with effects generated successfully.")
Expand Down
Loading
Loading