Compare commits
40 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
455a802a16 | ||
|
|
4162dd89bc | ||
|
|
60ab03d85c | ||
|
|
3f6ebf5d86 | ||
|
|
34ab105d10 | ||
|
|
84703fe6af | ||
|
|
4f0f0e9412 | ||
|
|
5648d9a7ee | ||
|
|
b0f4857ca1 | ||
|
|
e1146666dc | ||
|
|
2109d15e6c | ||
|
|
f91f5ad637 | ||
|
|
4b710b5307 | ||
|
|
09c3ef52b7 | ||
|
|
a388caafe9 | ||
|
|
82d92948d5 | ||
|
|
0d72399bc1 | ||
|
|
24ae14cf46 | ||
|
|
269bffdbf9 | ||
|
|
ed1de493f9 | ||
|
|
00c8d9e289 | ||
|
|
39dd5da9cd | ||
|
|
4294573225 | ||
|
|
ea32ed6444 | ||
|
|
802fdf3e0b | ||
|
|
e565d31c59 | ||
|
|
1de01c1c0e | ||
|
|
35c032267c | ||
|
|
c7324e16c7 | ||
|
|
0e84fe12d6 | ||
|
|
66fe1fa003 | ||
|
|
0058b9e191 | ||
|
|
a5384efd9e | ||
|
|
696d61e6d9 | ||
|
|
9a10a1dc06 | ||
|
|
12de66cf25 | ||
|
|
92b4ee21fe | ||
|
|
c8749db90b | ||
|
|
57b6110e72 | ||
|
|
f479beef83 |
@@ -31,7 +31,6 @@ src = [
|
||||
'src/version.c',
|
||||
'src/video_buffer.c',
|
||||
'src/util/acksync.c',
|
||||
'src/util/average.c',
|
||||
'src/util/bytebuf.c',
|
||||
'src/util/file.c',
|
||||
'src/util/intmap.c',
|
||||
|
||||
@@ -4,27 +4,11 @@
|
||||
|
||||
#include "util/log.h"
|
||||
|
||||
#define SC_AUDIO_PLAYER_NDEBUG // comment to debug
|
||||
|
||||
/** Downcast frame_sink to sc_audio_player */
|
||||
/** Downcast frame_sink to sc_v4l2_sink */
|
||||
#define DOWNCAST(SINK) container_of(SINK, struct sc_audio_player, frame_sink)
|
||||
|
||||
#define SC_AV_SAMPLE_FMT AV_SAMPLE_FMT_FLT
|
||||
#define SC_SDL_SAMPLE_FMT AUDIO_F32
|
||||
|
||||
#define SC_AUDIO_OUTPUT_BUFFER_SAMPLES 480 // 10ms at 48000Hz
|
||||
|
||||
// The target number of buffered samples between the producer and the consumer.
|
||||
// This value is directly use for compensation.
|
||||
#define SC_TARGET_BUFFERED_SAMPLES (3 * SC_AUDIO_OUTPUT_BUFFER_SAMPLES)
|
||||
|
||||
// If the consumer is too late, skip samples to keep at most this value
|
||||
#define SC_BUFFERED_SAMPLES_THRESHOLD 2400 // 50ms at 48000Hz
|
||||
|
||||
// Use a ring-buffer of 1 second (at 48000Hz) between the producer and the
|
||||
// consumer. It too big, but it guarantees that the producer and the consumer
|
||||
// will be able to access it in parallel without locking.
|
||||
#define SC_BYTEBUF_SIZE_IN_SAMPLES 48000
|
||||
#define SC_AV_SAMPLE_FMT AV_SAMPLE_FMT_S16
|
||||
#define SC_SDL_SAMPLE_FMT AUDIO_S16
|
||||
|
||||
void
|
||||
sc_audio_player_sdl_callback(void *userdata, uint8_t *stream, int len_int) {
|
||||
@@ -36,49 +20,21 @@ sc_audio_player_sdl_callback(void *userdata, uint8_t *stream, int len_int) {
|
||||
assert(len_int > 0);
|
||||
size_t len = len_int;
|
||||
|
||||
#ifndef SC_AUDIO_PLAYER_NDEBUG
|
||||
LOGD("[Audio] SDL callback requests %" SC_PRIsizet " samples",
|
||||
len / (ap->nb_channels * ap->out_bytes_per_sample));
|
||||
#endif
|
||||
|
||||
size_t read = sc_bytebuf_read_remaining(&ap->buf);
|
||||
size_t max_buffered_bytes = SC_BUFFERED_SAMPLES_THRESHOLD
|
||||
* ap->nb_channels * ap->out_bytes_per_sample;
|
||||
if (read > max_buffered_bytes + len) {
|
||||
size_t skip = read - (max_buffered_bytes + len);
|
||||
#ifndef SC_AUDIO_PLAYER_NDEBUG
|
||||
LOGD("[Audio] Buffered samples threshold exceeded: %" SC_PRIsizet
|
||||
" bytes, skipping %" SC_PRIsizet " bytes", read, skip);
|
||||
#endif
|
||||
// After this callback, exactly max_buffered_bytes will remain
|
||||
sc_bytebuf_skip(&ap->buf, skip);
|
||||
read = max_buffered_bytes + len;
|
||||
}
|
||||
|
||||
// Number of buffered samples (may be negative on underflow)
|
||||
float buffered_samples = ((float) read - len_int)
|
||||
/ (ap->nb_channels * ap->out_bytes_per_sample);
|
||||
sc_average_push(&ap->avg_buffered_samples, buffered_samples);
|
||||
|
||||
if (read) {
|
||||
if (read > len) {
|
||||
read = len;
|
||||
}
|
||||
sc_bytebuf_read(&ap->buf, stream, read);
|
||||
}
|
||||
|
||||
if (read < len) {
|
||||
// Insert silence
|
||||
#ifndef SC_AUDIO_PLAYER_NDEBUG
|
||||
LOGD("[Audio] Buffer underflow, inserting silence: %" SC_PRIsizet
|
||||
" bytes", len - read);
|
||||
#endif
|
||||
memset(stream + read, 0, len - read);
|
||||
}
|
||||
}
|
||||
|
||||
static size_t
|
||||
sc_audio_player_get_buf_size(struct sc_audio_player *ap, size_t samples) {
|
||||
sc_audio_player_get_swr_buf_size(struct sc_audio_player *ap, size_t samples) {
|
||||
assert(ap->nb_channels);
|
||||
assert(ap->out_bytes_per_sample);
|
||||
return samples * ap->nb_channels * ap->out_bytes_per_sample;
|
||||
@@ -86,7 +42,7 @@ sc_audio_player_get_buf_size(struct sc_audio_player *ap, size_t samples) {
|
||||
|
||||
static uint8_t *
|
||||
sc_audio_player_get_swr_buf(struct sc_audio_player *ap, size_t min_samples) {
|
||||
size_t min_buf_size = sc_audio_player_get_buf_size(ap, min_samples);
|
||||
size_t min_buf_size = sc_audio_player_get_swr_buf_size(ap, min_samples);
|
||||
if (min_buf_size < ap->swr_buf_alloc_size) {
|
||||
size_t new_size = min_buf_size + 4096;
|
||||
uint8_t *buf = realloc(ap->swr_buf, new_size);
|
||||
@@ -107,28 +63,8 @@ sc_audio_player_frame_sink_open(struct sc_frame_sink *sink,
|
||||
const AVCodecContext *ctx) {
|
||||
struct sc_audio_player *ap = DOWNCAST(sink);
|
||||
|
||||
SDL_AudioSpec desired = {
|
||||
.freq = ctx->sample_rate,
|
||||
.format = SC_SDL_SAMPLE_FMT,
|
||||
.channels = ctx->ch_layout.nb_channels,
|
||||
.samples = SC_AUDIO_OUTPUT_BUFFER_SAMPLES,
|
||||
.callback = sc_audio_player_sdl_callback,
|
||||
.userdata = ap,
|
||||
};
|
||||
SDL_AudioSpec obtained;
|
||||
|
||||
ap->device = SDL_OpenAudioDevice(NULL, 0, &desired, &obtained, 0);
|
||||
if (!ap->device) {
|
||||
LOGE("Could not open audio device: %s", SDL_GetError());
|
||||
return false;
|
||||
}
|
||||
|
||||
SwrContext *swr_ctx = swr_alloc();
|
||||
if (!swr_ctx) {
|
||||
LOG_OOM();
|
||||
goto error_close_audio_device;
|
||||
}
|
||||
ap->swr_ctx = swr_ctx;
|
||||
SwrContext *swr_ctx = ap->swr_ctx;
|
||||
assert(swr_ctx);
|
||||
|
||||
assert(ctx->sample_rate > 0);
|
||||
assert(ctx->ch_layout.nb_channels > 0);
|
||||
@@ -147,46 +83,39 @@ sc_audio_player_frame_sink_open(struct sc_frame_sink *sink,
|
||||
int ret = swr_init(swr_ctx);
|
||||
if (ret) {
|
||||
LOGE("Failed to initialize the resampling context");
|
||||
goto error_free_swr_ctx;
|
||||
return false;
|
||||
}
|
||||
|
||||
ap->sample_rate = ctx->sample_rate;
|
||||
ap->nb_channels = ctx->ch_layout.nb_channels;
|
||||
ap->out_bytes_per_sample = out_bytes_per_sample;
|
||||
|
||||
size_t bytebuf_size =
|
||||
sc_audio_player_get_buf_size(ap, SC_BYTEBUF_SIZE_IN_SAMPLES);
|
||||
|
||||
bool ok = sc_bytebuf_init(&ap->buf, bytebuf_size);
|
||||
if (!ok) {
|
||||
goto error_free_swr_ctx;
|
||||
}
|
||||
|
||||
ap->safe_empty_buffer = sc_bytebuf_write_remaining(&ap->buf);
|
||||
|
||||
size_t initial_swr_buf_size = sc_audio_player_get_buf_size(ap, 4096);
|
||||
size_t initial_swr_buf_size = sc_audio_player_get_swr_buf_size(ap, 4096);
|
||||
ap->swr_buf = malloc(initial_swr_buf_size);
|
||||
if (!ap->swr_buf) {
|
||||
LOG_OOM();
|
||||
goto error_destroy_bytebuf;
|
||||
return false;
|
||||
}
|
||||
ap->swr_buf_alloc_size = initial_swr_buf_size;
|
||||
|
||||
sc_average_init(&ap->avg_buffered_samples, 32);
|
||||
ap->samples_since_resync = 0;
|
||||
SDL_AudioSpec desired = {
|
||||
.freq = ctx->sample_rate,
|
||||
.format = SC_SDL_SAMPLE_FMT,
|
||||
.channels = ctx->ch_layout.nb_channels,
|
||||
.samples = 512, // ~10ms at 48000Hz
|
||||
.callback = sc_audio_player_sdl_callback,
|
||||
.userdata = ap,
|
||||
};
|
||||
SDL_AudioSpec obtained;
|
||||
|
||||
ap->device = SDL_OpenAudioDevice(NULL, 0, &desired, &obtained, 0);
|
||||
if (!ap->device) {
|
||||
LOGE("Could not open audio device: %s", SDL_GetError());
|
||||
return false;
|
||||
}
|
||||
|
||||
SDL_PauseAudioDevice(ap->device, 0);
|
||||
|
||||
return true;
|
||||
|
||||
error_destroy_bytebuf:
|
||||
sc_bytebuf_destroy(&ap->buf);
|
||||
error_free_swr_ctx:
|
||||
swr_free(&ap->swr_ctx);
|
||||
error_close_audio_device:
|
||||
SDL_CloseAudioDevice(ap->device);
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
static void
|
||||
@@ -196,10 +125,6 @@ sc_audio_player_frame_sink_close(struct sc_frame_sink *sink) {
|
||||
assert(ap->device);
|
||||
SDL_PauseAudioDevice(ap->device, 1);
|
||||
SDL_CloseAudioDevice(ap->device);
|
||||
|
||||
free(ap->swr_buf);
|
||||
sc_bytebuf_destroy(&ap->buf);
|
||||
swr_free(&ap->swr_ctx);
|
||||
}
|
||||
|
||||
static bool
|
||||
@@ -223,12 +148,12 @@ sc_audio_player_frame_sink_push(struct sc_frame_sink *sink, const AVFrame *frame
|
||||
LOGE("Resampling failed: %d", ret);
|
||||
return false;
|
||||
}
|
||||
LOGI("ret=%d dst_nb_samples=%d\n", ret, dst_nb_samples);
|
||||
|
||||
size_t samples_written = ret;
|
||||
size_t swr_buf_size = sc_audio_player_get_buf_size(ap, samples_written);
|
||||
#ifndef SC_AUDIO_PLAYER_NDEBUG
|
||||
LOGI("[Audio] %" SC_PRIsizet " samples written to buffer", samples_written);
|
||||
#endif
|
||||
size_t swr_buf_size = sc_audio_player_get_swr_buf_size(ap, ret);
|
||||
LOGI("== swr_buf_size %lu", swr_buf_size);
|
||||
|
||||
// TODO clock drift compensation
|
||||
|
||||
// It should almost always be possible to write without lock
|
||||
bool can_write_without_lock = swr_buf_size <= ap->safe_empty_buffer;
|
||||
@@ -245,39 +170,36 @@ sc_audio_player_frame_sink_push(struct sc_frame_sink *sink, const AVFrame *frame
|
||||
|
||||
// The next time, it will remain at least the current empty space
|
||||
ap->safe_empty_buffer = sc_bytebuf_write_remaining(&ap->buf);
|
||||
|
||||
// Read the value written by the SDL thread under lock
|
||||
float avg;
|
||||
bool has_avg = sc_average_get(&ap->avg_buffered_samples, &avg);
|
||||
|
||||
SDL_UnlockAudioDevice(ap->device);
|
||||
|
||||
if (has_avg) {
|
||||
ap->samples_since_resync += samples_written;
|
||||
if (ap->samples_since_resync >= ap->sample_rate) {
|
||||
// Resync every second
|
||||
ap->samples_since_resync = 0;
|
||||
|
||||
int diff = SC_TARGET_BUFFERED_SAMPLES - avg;
|
||||
#ifndef SC_AUDIO_PLAYER_NDEBUG
|
||||
LOGI("[Audio] Average buffered samples = %f, compensation %d",
|
||||
avg, diff);
|
||||
#endif
|
||||
// Compensate the diff over 3 seconds (but will be recomputed after
|
||||
// 1 second)
|
||||
int ret = swr_set_compensation(swr_ctx, diff, 3 * ap->sample_rate);
|
||||
if (ret < 0) {
|
||||
LOGW("Resampling compensation failed: %d", ret);
|
||||
// not fatal
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
void
|
||||
sc_audio_player_init(struct sc_audio_player *ap) {
|
||||
bool
|
||||
sc_audio_player_init(struct sc_audio_player *ap,
|
||||
const struct sc_audio_player_callbacks *cbs,
|
||||
void *cbs_userdata) {
|
||||
bool ok = sc_bytebuf_init(&ap->buf, 128 * 1024);
|
||||
if (!ok) {
|
||||
return false;
|
||||
}
|
||||
|
||||
ap->swr_ctx = swr_alloc();
|
||||
if (!ap->swr_ctx) {
|
||||
sc_bytebuf_destroy(&ap->buf);
|
||||
LOG_OOM();
|
||||
return false;
|
||||
}
|
||||
|
||||
ap->safe_empty_buffer = sc_bytebuf_write_remaining(&ap->buf);
|
||||
|
||||
ap->swr_buf = NULL;
|
||||
ap->swr_buf_alloc_size = 0;
|
||||
|
||||
assert(cbs && cbs->on_ended);
|
||||
ap->cbs = cbs;
|
||||
ap->cbs_userdata = cbs_userdata;
|
||||
|
||||
static const struct sc_frame_sink_ops ops = {
|
||||
.open = sc_audio_player_frame_sink_open,
|
||||
.close = sc_audio_player_frame_sink_close,
|
||||
@@ -285,4 +207,12 @@ sc_audio_player_init(struct sc_audio_player *ap) {
|
||||
};
|
||||
|
||||
ap->frame_sink.ops = &ops;
|
||||
return true;
|
||||
}
|
||||
|
||||
void
|
||||
sc_audio_player_destroy(struct sc_audio_player *ap) {
|
||||
sc_bytebuf_destroy(&ap->buf);
|
||||
swr_free(&ap->swr_ctx);
|
||||
free(ap->swr_buf);
|
||||
}
|
||||
|
||||
@@ -5,7 +5,6 @@
|
||||
|
||||
#include <stdbool.h>
|
||||
#include "trait/frame_sink.h"
|
||||
#include <util/average.h>
|
||||
#include <util/bytebuf.h>
|
||||
#include <util/thread.h>
|
||||
|
||||
@@ -36,10 +35,6 @@ struct sc_audio_player {
|
||||
uint8_t *swr_buf;
|
||||
size_t swr_buf_alloc_size;
|
||||
|
||||
// Number of buffered samples (may be negative on underflow)
|
||||
struct sc_average avg_buffered_samples;
|
||||
unsigned samples_since_resync;
|
||||
|
||||
const struct sc_audio_player_callbacks *cbs;
|
||||
void *cbs_userdata;
|
||||
};
|
||||
@@ -48,7 +43,12 @@ struct sc_audio_player_callbacks {
|
||||
void (*on_ended)(struct sc_audio_player *ap, bool success, void *userdata);
|
||||
};
|
||||
|
||||
bool
|
||||
sc_audio_player_init(struct sc_audio_player *ap,
|
||||
const struct sc_audio_player_callbacks *cbs,
|
||||
void *cbs_userdata);
|
||||
|
||||
void
|
||||
sc_audio_player_init(struct sc_audio_player *ap);
|
||||
sc_audio_player_destroy(struct sc_audio_player *ap);
|
||||
|
||||
#endif
|
||||
|
||||
@@ -217,6 +217,17 @@ sc_recorder_on_ended(struct sc_recorder *recorder, bool success,
|
||||
}
|
||||
}
|
||||
|
||||
static void
|
||||
sc_audio_player_on_ended(struct sc_audio_player *ap, bool success,
|
||||
void *userdata) {
|
||||
(void) ap;
|
||||
(void) userdata;
|
||||
|
||||
if (!success) {
|
||||
// TODO
|
||||
}
|
||||
}
|
||||
|
||||
static void
|
||||
sc_video_demuxer_on_ended(struct sc_demuxer *demuxer, bool eos,
|
||||
void *userdata) {
|
||||
@@ -303,6 +314,7 @@ scrcpy(struct scrcpy_options *options) {
|
||||
bool file_pusher_initialized = false;
|
||||
bool recorder_initialized = false;
|
||||
bool recorder_started = false;
|
||||
bool audio_player_initialized = false;
|
||||
#ifdef HAVE_V4L2
|
||||
bool v4l2_sink_initialized = false;
|
||||
#endif
|
||||
@@ -674,7 +686,15 @@ aoa_hid_end:
|
||||
sc_decoder_add_sink(&s->video_decoder, &s->screen.frame_sink);
|
||||
|
||||
if (options->audio) {
|
||||
sc_audio_player_init(&s->audio_player);
|
||||
static const struct sc_audio_player_callbacks audio_player_cbs = {
|
||||
.on_ended = sc_audio_player_on_ended,
|
||||
};
|
||||
if (!sc_audio_player_init(&s->audio_player,
|
||||
&audio_player_cbs, NULL)) {
|
||||
goto end;
|
||||
}
|
||||
audio_player_initialized = true;
|
||||
|
||||
sc_decoder_add_sink(&s->audio_decoder, &s->audio_player.frame_sink);
|
||||
}
|
||||
}
|
||||
@@ -797,6 +817,10 @@ end:
|
||||
sc_recorder_destroy(&s->recorder);
|
||||
}
|
||||
|
||||
if (audio_player_initialized) {
|
||||
sc_audio_player_destroy(&s->audio_player);
|
||||
}
|
||||
|
||||
if (file_pusher_initialized) {
|
||||
sc_file_pusher_join(&s->file_pusher);
|
||||
sc_file_pusher_destroy(&s->file_pusher);
|
||||
|
||||
@@ -338,9 +338,9 @@ sc_v4l2_sink_push(struct sc_v4l2_sink *vs, const AVFrame *frame) {
|
||||
}
|
||||
|
||||
static bool
|
||||
sc_v4l2_frame_sink_open(struct sc_frame_sink *sink, const AVCodecContext *ctx) {
|
||||
sc_v4l2_frame_sink_open(struct sc_frame_sink *sink) {
|
||||
struct sc_v4l2_sink *vs = DOWNCAST(sink);
|
||||
return sc_v4l2_sink_open(vs, ctx);
|
||||
return sc_v4l2_sink_open(vs);
|
||||
}
|
||||
|
||||
static void
|
||||
|
||||
@@ -1,11 +1,7 @@
|
||||
package com.genymobile.scrcpy;
|
||||
|
||||
import com.genymobile.scrcpy.wrappers.ServiceManager;
|
||||
|
||||
import android.annotation.SuppressLint;
|
||||
import android.annotation.TargetApi;
|
||||
import android.content.ComponentName;
|
||||
import android.content.Intent;
|
||||
import android.media.AudioFormat;
|
||||
import android.media.AudioRecord;
|
||||
import android.media.AudioTimestamp;
|
||||
@@ -16,7 +12,6 @@ import android.os.Build;
|
||||
import android.os.Handler;
|
||||
import android.os.HandlerThread;
|
||||
import android.os.Looper;
|
||||
import android.os.SystemClock;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.nio.ByteBuffer;
|
||||
@@ -45,13 +40,10 @@ public final class AudioEncoder {
|
||||
}
|
||||
|
||||
private static final int SAMPLE_RATE = 48000;
|
||||
private static final int CHANNEL_CONFIG = AudioFormat.CHANNEL_IN_STEREO;
|
||||
private static final int CHANNELS = 2;
|
||||
private static final int FORMAT = AudioFormat.ENCODING_PCM_16BIT;
|
||||
private static final int BYTES_PER_SAMPLE = 2;
|
||||
|
||||
private static final int BUFFER_MS = 5; // milliseconds
|
||||
private static final int BUFFER_SIZE = SAMPLE_RATE * CHANNELS * BYTES_PER_SAMPLE * BUFFER_MS / 1000;
|
||||
private static final int BUFFER_MS = 10; // milliseconds
|
||||
private static final int BUFFER_SIZE = SAMPLE_RATE * CHANNELS * BUFFER_MS / 1000;
|
||||
|
||||
private final Streamer streamer;
|
||||
private final int bitRate;
|
||||
@@ -80,9 +72,9 @@ public final class AudioEncoder {
|
||||
|
||||
private static AudioFormat createAudioFormat() {
|
||||
AudioFormat.Builder builder = new AudioFormat.Builder();
|
||||
builder.setEncoding(FORMAT);
|
||||
builder.setEncoding(AudioFormat.ENCODING_PCM_16BIT);
|
||||
builder.setSampleRate(SAMPLE_RATE);
|
||||
builder.setChannelMask(CHANNEL_CONFIG);
|
||||
builder.setChannelMask(CHANNELS == 2 ? AudioFormat.CHANNEL_IN_STEREO : AudioFormat.CHANNEL_IN_MONO);
|
||||
return builder.build();
|
||||
}
|
||||
|
||||
@@ -96,8 +88,7 @@ public final class AudioEncoder {
|
||||
}
|
||||
builder.setAudioSource(MediaRecorder.AudioSource.REMOTE_SUBMIX);
|
||||
builder.setAudioFormat(createAudioFormat());
|
||||
int minBufferSize = AudioRecord.getMinBufferSize(SAMPLE_RATE, CHANNEL_CONFIG, FORMAT);
|
||||
builder.setBufferSizeInBytes(minBufferSize);
|
||||
builder.setBufferSizeInBytes(1024 * 1024);
|
||||
return builder.build();
|
||||
}
|
||||
|
||||
@@ -220,32 +211,6 @@ public final class AudioEncoder {
|
||||
}
|
||||
}
|
||||
|
||||
private static void startWorkaroundAndroid11() {
|
||||
if (Build.VERSION.SDK_INT == Build.VERSION_CODES.R) {
|
||||
// Android 11 requires Apps to be at foreground to record audio.
|
||||
// Normally, each App has its own user ID, so Android checks whether the requesting App has the user ID that's at the foreground.
|
||||
// But Scrcpy server is NOT an App, it's a Java application started from Android shell, so it has the same user ID (2000) with Android
|
||||
// shell ("com.android.shell").
|
||||
// If there is an Activity from Android shell running at foreground, then the permission system will believe Scrcpy is also in the
|
||||
// foreground.
|
||||
if (Build.VERSION.SDK_INT == Build.VERSION_CODES.R) {
|
||||
Intent intent = new Intent(Intent.ACTION_MAIN);
|
||||
intent.addFlags(Intent.FLAG_ACTIVITY_NEW_TASK);
|
||||
intent.addCategory(Intent.CATEGORY_LAUNCHER);
|
||||
intent.setComponent(new ComponentName(FakeContext.PACKAGE_NAME, "com.android.shell.HeapDumpActivity"));
|
||||
ServiceManager.getActivityManager().startActivityAsUserWithFeature(intent);
|
||||
// Wait for activity to start
|
||||
SystemClock.sleep(150);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private static void stopWorkaroundAndroid11() {
|
||||
if (Build.VERSION.SDK_INT == Build.VERSION_CODES.R) {
|
||||
ServiceManager.getActivityManager().forceStopPackage(FakeContext.PACKAGE_NAME);
|
||||
}
|
||||
}
|
||||
|
||||
@TargetApi(Build.VERSION_CODES.M)
|
||||
public void encode() throws IOException {
|
||||
if (Build.VERSION.SDK_INT < Build.VERSION_CODES.R) {
|
||||
@@ -263,6 +228,7 @@ public final class AudioEncoder {
|
||||
try {
|
||||
Codec codec = streamer.getCodec();
|
||||
mediaCodec = createMediaCodec(codec, encoderName);
|
||||
recorder = createAudioRecord();
|
||||
|
||||
mediaCodecThread = new HandlerThread("AudioEncoder");
|
||||
mediaCodecThread.start();
|
||||
@@ -271,19 +237,7 @@ public final class AudioEncoder {
|
||||
mediaCodec.setCallback(new EncoderCallback(), new Handler(mediaCodecThread.getLooper()));
|
||||
mediaCodec.configure(format, null, null, MediaCodec.CONFIGURE_FLAG_ENCODE);
|
||||
|
||||
startWorkaroundAndroid11();
|
||||
try {
|
||||
recorder = createAudioRecord();
|
||||
recorder.startRecording();
|
||||
} catch (UnsupportedOperationException e) {
|
||||
if (Build.VERSION.SDK_INT == Build.VERSION_CODES.R) {
|
||||
Ln.e("Failed to start audio capture");
|
||||
Ln.e("On Android 11, it is only possible to capture in foreground, make sure that the device is unlocked when starting scrcpy.");
|
||||
throw new ConfigurationException("Unsupported audio capture");
|
||||
}
|
||||
} finally {
|
||||
stopWorkaroundAndroid11();
|
||||
}
|
||||
recorder.startRecording();
|
||||
recorderStarted = true;
|
||||
|
||||
final MediaCodec mediaCodecRef = mediaCodec;
|
||||
|
||||
@@ -1,14 +1,8 @@
|
||||
package com.genymobile.scrcpy.wrappers;
|
||||
|
||||
import com.genymobile.scrcpy.FakeContext;
|
||||
import com.genymobile.scrcpy.Ln;
|
||||
|
||||
import android.annotation.SuppressLint;
|
||||
import android.annotation.TargetApi;
|
||||
import android.content.Intent;
|
||||
import android.os.Binder;
|
||||
import android.os.Build;
|
||||
import android.os.Bundle;
|
||||
import android.os.IBinder;
|
||||
import android.os.IInterface;
|
||||
import android.os.Process;
|
||||
@@ -17,15 +11,12 @@ import java.lang.reflect.Field;
|
||||
import java.lang.reflect.InvocationTargetException;
|
||||
import java.lang.reflect.Method;
|
||||
|
||||
@SuppressLint("PrivateApi,DiscouragedPrivateApi")
|
||||
public class ActivityManager {
|
||||
|
||||
private final IInterface manager;
|
||||
private Method getContentProviderExternalMethod;
|
||||
private boolean getContentProviderExternalMethodNewVersion = true;
|
||||
private Method removeContentProviderExternalMethod;
|
||||
private Method startActivityAsUserWithFeatureMethod;
|
||||
private Method forceStopPackageMethod;
|
||||
|
||||
public ActivityManager(IInterface manager) {
|
||||
this.manager = manager;
|
||||
@@ -52,7 +43,6 @@ public class ActivityManager {
|
||||
return removeContentProviderExternalMethod;
|
||||
}
|
||||
|
||||
@TargetApi(Build.VERSION_CODES.Q)
|
||||
private ContentProvider getContentProviderExternal(String name, IBinder token) {
|
||||
try {
|
||||
Method method = getGetContentProviderExternalMethod();
|
||||
@@ -95,55 +85,4 @@ public class ActivityManager {
|
||||
public ContentProvider createSettingsProvider() {
|
||||
return getContentProviderExternal("settings", new Binder());
|
||||
}
|
||||
|
||||
private Method getStartActivityAsUserWithFeatureMethod() throws NoSuchMethodException, ClassNotFoundException {
|
||||
if (startActivityAsUserWithFeatureMethod == null) {
|
||||
Class<?> iApplicationThreadClass = Class.forName("android.app.IApplicationThread");
|
||||
Class<?> profilerInfo = Class.forName("android.app.ProfilerInfo");
|
||||
startActivityAsUserWithFeatureMethod = manager.getClass()
|
||||
.getMethod("startActivityAsUserWithFeature", iApplicationThreadClass, String.class, String.class, Intent.class, String.class,
|
||||
IBinder.class, String.class, int.class, int.class, profilerInfo, Bundle.class, int.class);
|
||||
}
|
||||
return startActivityAsUserWithFeatureMethod;
|
||||
}
|
||||
|
||||
@SuppressWarnings("ConstantConditions")
|
||||
public int startActivityAsUserWithFeature(Intent intent) {
|
||||
try {
|
||||
Method method = getStartActivityAsUserWithFeatureMethod();
|
||||
return (int) method.invoke(
|
||||
/* this */ manager,
|
||||
/* caller */ null,
|
||||
/* callingPackage */ FakeContext.PACKAGE_NAME,
|
||||
/* callingFeatureId */ null,
|
||||
/* intent */ intent,
|
||||
/* resolvedType */ null,
|
||||
/* resultTo */ null,
|
||||
/* resultWho */ null,
|
||||
/* requestCode */ 0,
|
||||
/* startFlags */ 0,
|
||||
/* profilerInfo */ null,
|
||||
/* bOptions */ null,
|
||||
/* userId */ /* UserHandle.USER_CURRENT */ -2);
|
||||
} catch (Throwable e) {
|
||||
Ln.e("Could not invoke method", e);
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
private Method getForceStopPackageMethod() throws NoSuchMethodException {
|
||||
if (forceStopPackageMethod == null) {
|
||||
forceStopPackageMethod = manager.getClass().getMethod("forceStopPackage", String.class, int.class);
|
||||
}
|
||||
return forceStopPackageMethod;
|
||||
}
|
||||
|
||||
public void forceStopPackage(String packageName) {
|
||||
try {
|
||||
Method method = getForceStopPackageMethod();
|
||||
method.invoke(manager, packageName, /* userId */ /* UserHandle.USER_CURRENT */ -2);
|
||||
} catch (Throwable e) {
|
||||
Ln.e("Could not invoke method", e);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user