352 lines
11 KiB
Rust
352 lines
11 KiB
Rust
|
extern crate ferretro;
|
||
|
extern crate ffmpeg4 as ffmpeg;
|
||
|
|
||
|
use std::collections::VecDeque;
|
||
|
use std::env;
|
||
|
use std::path::{Path, PathBuf};
|
||
|
use std::pin::Pin;
|
||
|
|
||
|
use ferretro::retro;
|
||
|
use ferretro::retro::ffi::{PixelFormat, GameGeometry, SystemAvInfo};
|
||
|
use ferretro::retro::wrapper::LibretroWrapper;
|
||
|
|
||
|
use ffmpeg::{codec, filter, format, frame, media, ChannelLayout};
|
||
|
use ffmpeg::{rescale, Rescale};
|
||
|
|
||
|
struct MyEmulator {
|
||
|
retro: retro::wrapper::LibretroWrapper,
|
||
|
audio_buf: Vec<(i16, i16)>,
|
||
|
video_pixfmt: format::Pixel,
|
||
|
video_frames: VecDeque<frame::Video>,
|
||
|
video_encoder: ffmpeg::encoder::Video,
|
||
|
audio_encoder: ffmpeg::encoder::Audio,
|
||
|
sys_path: Option<PathBuf>,
|
||
|
}
|
||
|
|
||
|
impl MyEmulator {
|
||
|
pub fn new(core_path: impl AsRef<Path>, sys_path: &Option<impl AsRef<Path>>, ffstream: format::stream::StreamMut) -> Pin<Box<Self>> {
|
||
|
let lib = libloading::Library::new(core_path).unwrap();
|
||
|
let raw_retro = retro::loading::LibretroApi::from_library(lib).unwrap();
|
||
|
let retro = retro::wrapper::LibretroWrapper::from(raw_retro);
|
||
|
|
||
|
let mut av_info = retro.get_system_av_info();
|
||
|
let video_pixfmt = format::Pixel::RGB555;
|
||
|
|
||
|
let emu = MyEmulator {
|
||
|
retro,
|
||
|
audio_buf: Default::default(),
|
||
|
video_pixfmt,
|
||
|
video_frames: Default::default(),
|
||
|
video_encoder,
|
||
|
audio_encoder,
|
||
|
sys_path: sys_path.map(|x| x.as_ref().to_path_buf()),
|
||
|
};
|
||
|
|
||
|
let mut pin_emu = Box::pin(emu);
|
||
|
retro::wrapper::set_handler(pin_emu.as_mut());
|
||
|
pin_emu.retro.init();
|
||
|
pin_emu
|
||
|
}
|
||
|
|
||
|
pub fn run(&mut self) {
|
||
|
self.retro.run();
|
||
|
|
||
|
let vframe = self.video_frames.pop_front().unwrap();
|
||
|
let mut aframe = frame::Audio::new(
|
||
|
format::Sample::I16(format::sample::Type::Packed),
|
||
|
self.audio_buf.len(),
|
||
|
ChannelLayout::STEREO
|
||
|
);
|
||
|
let aplane: &mut [(i16, i16)] = aframe.plane_mut(0);
|
||
|
aplane.copy_from_slice(self.audio_buf.as_ref());
|
||
|
self.audio_buf.clear();
|
||
|
|
||
|
let mut out = ffmpeg::Packet::empty();
|
||
|
self.video_encoder.encode(&vframe, &mut out);
|
||
|
self.audio_encoder.encode(&aframe, &mut out);
|
||
|
}
|
||
|
}
|
||
|
|
||
|
impl retro::wrapper::Handler for MyEmulator {
|
||
|
fn libretro_core(&mut self) -> &mut LibretroWrapper {
|
||
|
&mut self.retro
|
||
|
}
|
||
|
|
||
|
fn video_refresh(&mut self, data: &[u8], width: u32, height: u32, pitch: u32) {
|
||
|
let mut vframe = frame::Video::new(self.video_pixfmt, width, height);
|
||
|
|
||
|
let stride = vframe.stride(0);
|
||
|
let pitch = pitch as usize;
|
||
|
|
||
|
let vplane = vframe.data_mut(0);
|
||
|
if data.len() == vplane.len() && pitch == stride {
|
||
|
vplane.copy_from_slice(&data);
|
||
|
} else {
|
||
|
for y in 0..(height as usize) {
|
||
|
let ffbegin = y * stride;
|
||
|
let lrbegin = y * pitch;
|
||
|
let min = usize::min(stride, pitch);
|
||
|
let x = vplane[ffbegin..(ffbegin + min)].copy_from_slice(
|
||
|
&data[lrbegin..(lrbegin + min)]
|
||
|
);
|
||
|
}
|
||
|
}
|
||
|
|
||
|
self.video_frames.push_back(vframe);
|
||
|
}
|
||
|
|
||
|
fn audio_sample(&mut self, left: i16, right: i16) {
|
||
|
self.audio_buf.push((left, right));
|
||
|
}
|
||
|
|
||
|
fn audio_sample_batch(&mut self, stereo_pcm: &[i16]) -> usize {
|
||
|
let left_iter = stereo_pcm.iter().step_by(2).cloned();
|
||
|
let right_iter = stereo_pcm.iter().skip(1).step_by(2).cloned();
|
||
|
self.audio_buf.extend(Iterator::zip(left_iter, right_iter));
|
||
|
stereo_pcm.len()
|
||
|
}
|
||
|
|
||
|
fn set_pixel_format(&mut self, format: PixelFormat) -> bool {
|
||
|
self.video_pixfmt = match format {
|
||
|
PixelFormat::ARGB1555 => format::Pixel::RGB555,
|
||
|
PixelFormat::ARGB8888 => format::Pixel::RGB32,
|
||
|
PixelFormat::RGB565 => format::Pixel::RGB565,
|
||
|
};
|
||
|
true
|
||
|
}
|
||
|
|
||
|
fn set_system_av_info(&mut self, system_av_info: SystemAvInfo) -> bool {
|
||
|
self.video_encoder.set_frame_rate(system_av_info.timing.fps.into());
|
||
|
self.audio_encoder.set_rate(system_av_info.timing.sample_rate.round() as i32);
|
||
|
self.set_geometry(system_av_info.geometry);
|
||
|
true
|
||
|
}
|
||
|
|
||
|
fn set_geometry(&mut self, geometry: GameGeometry) -> bool {
|
||
|
self.video_encoder.set_width(geometry.base_width);
|
||
|
self.video_encoder.set_height(geometry.base_height);
|
||
|
self.video_encoder.set_aspect_ratio(geometry.aspect_ratio as f64);
|
||
|
true
|
||
|
}
|
||
|
}
|
||
|
|
||
|
fn filter(
|
||
|
spec: &str,
|
||
|
decoder: &codec::decoder::Audio,
|
||
|
encoder: &codec::encoder::Audio,
|
||
|
) -> Result<filter::Graph, ffmpeg::Error> {
|
||
|
let mut filter = filter::Graph::new();
|
||
|
|
||
|
let args = format!(
|
||
|
"time_base={}:sample_rate={}:sample_fmt={}:channel_layout=0x{:x}",
|
||
|
decoder.time_base(),
|
||
|
decoder.rate(),
|
||
|
decoder.format().name(),
|
||
|
decoder.channel_layout().bits()
|
||
|
);
|
||
|
|
||
|
filter.add(&filter::find("abuffer").unwrap(), "in", &args)?;
|
||
|
filter.add(&filter::find("abuffersink").unwrap(), "out", "")?;
|
||
|
|
||
|
{
|
||
|
let mut out = filter.get("out").unwrap();
|
||
|
|
||
|
out.set_sample_format(encoder.format());
|
||
|
out.set_channel_layout(encoder.channel_layout());
|
||
|
out.set_sample_rate(encoder.rate());
|
||
|
}
|
||
|
|
||
|
filter.output("in", 0)?.input("out", 0)?.parse(spec)?;
|
||
|
filter.validate()?;
|
||
|
|
||
|
println!("{}", filter.dump());
|
||
|
|
||
|
if let Some(codec) = encoder.codec() {
|
||
|
if !codec
|
||
|
.capabilities()
|
||
|
.contains(ffmpeg::codec::capabilities::Capabilities::VARIABLE_FRAME_SIZE)
|
||
|
{
|
||
|
filter
|
||
|
.get("out")
|
||
|
.unwrap()
|
||
|
.sink()
|
||
|
.set_frame_size(encoder.frame_size());
|
||
|
}
|
||
|
}
|
||
|
|
||
|
Ok(filter)
|
||
|
}
|
||
|
|
||
|
struct Transcoder {
|
||
|
stream: usize,
|
||
|
filter: filter::Graph,
|
||
|
decoder: codec::decoder::Audio,
|
||
|
encoder: codec::encoder::Audio,
|
||
|
}
|
||
|
|
||
|
fn transcoder<P: AsRef<Path>>(
|
||
|
ictx: &mut format::context::Input,
|
||
|
octx: &mut format::context::Output,
|
||
|
path: &P,
|
||
|
filter_spec: &str,
|
||
|
) -> Result<Transcoder, ffmpeg::Error> {
|
||
|
let input = ictx
|
||
|
.streams()
|
||
|
.best(media::Type::Audio)
|
||
|
.expect("could not find best audio stream");
|
||
|
let mut decoder = input.codec().decoder().audio()?;
|
||
|
let codec = ffmpeg::encoder::find(octx.format().codec(path, media::Type::Audio))
|
||
|
.expect("failed to find encoder")
|
||
|
.audio()?;
|
||
|
let global = octx
|
||
|
.format()
|
||
|
.flags()
|
||
|
.contains(ffmpeg::format::flag::Flags::GLOBAL_HEADER);
|
||
|
|
||
|
decoder.set_parameters(input.parameters())?;
|
||
|
|
||
|
let mut output = octx.add_stream(codec)?;
|
||
|
let mut encoder = output.codec().encoder().audio()?;
|
||
|
|
||
|
let channel_layout = codec
|
||
|
.channel_layouts()
|
||
|
.map(|cls| cls.best(decoder.channel_layout().channels()))
|
||
|
.unwrap_or(ffmpeg::channel_layout::ChannelLayout::STEREO);
|
||
|
|
||
|
if global {
|
||
|
encoder.set_flags(ffmpeg::codec::flag::Flags::GLOBAL_HEADER);
|
||
|
}
|
||
|
|
||
|
encoder.set_rate(decoder.rate() as i32);
|
||
|
encoder.set_channel_layout(channel_layout);
|
||
|
encoder.set_channels(channel_layout.channels());
|
||
|
encoder.set_format(
|
||
|
codec
|
||
|
.formats()
|
||
|
.expect("unknown supported formats")
|
||
|
.next()
|
||
|
.unwrap(),
|
||
|
);
|
||
|
encoder.set_bit_rate(decoder.bit_rate());
|
||
|
encoder.set_max_bit_rate(decoder.max_bit_rate());
|
||
|
|
||
|
encoder.set_time_base((1, decoder.rate() as i32));
|
||
|
output.set_time_base((1, decoder.rate() as i32));
|
||
|
|
||
|
let encoder = encoder.open_as(codec)?;
|
||
|
output.set_parameters(&encoder);
|
||
|
|
||
|
let filter = filter(filter_spec, &decoder, &encoder)?;
|
||
|
|
||
|
Ok(Transcoder {
|
||
|
stream: input.index(),
|
||
|
filter: filter,
|
||
|
decoder: decoder,
|
||
|
encoder: encoder,
|
||
|
})
|
||
|
}
|
||
|
|
||
|
// Transcode the `best` audio stream of the input file into a the output file while applying a
|
||
|
// given filter. If no filter was specified the stream gets copied (`anull` filter).
|
||
|
//
|
||
|
// Example 1: Transcode *.mp3 file to *.wmv while speeding it up
|
||
|
// transcode-audio in.mp3 out.wmv "atempo=1.2"
|
||
|
//
|
||
|
// Example 2: Overlay an audio file
|
||
|
// transcode-audio in.mp3 out.mp3 "amovie=overlay.mp3 [ov]; [in][ov] amerge [out]"
|
||
|
//
|
||
|
// Example 3: Seek to a specified position (in seconds)
|
||
|
// transcode-audio in.mp3 out.mp3 anull 30
|
||
|
fn main() {
|
||
|
ffmpeg::init().unwrap();
|
||
|
|
||
|
let input = env::args().nth(1).expect("missing input");
|
||
|
let output = env::args().nth(2).expect("missing output");
|
||
|
let filter = env::args().nth(3).unwrap_or_else(|| "anull".to_owned());
|
||
|
let seek = env::args().nth(4).and_then(|s| s.parse::<i64>().ok());
|
||
|
|
||
|
let mut ictx = format::input(&input).unwrap();
|
||
|
let mut octx = format::output(&output).unwrap();
|
||
|
let mut transcoder = transcoder(&mut ictx, &mut octx, &output, &filter).unwrap();
|
||
|
|
||
|
if let Some(position) = seek {
|
||
|
// If the position was given in seconds, rescale it to ffmpegs base timebase.
|
||
|
let position = position.rescale((1, 1), rescale::TIME_BASE);
|
||
|
// If this seek was embedded in the transcoding loop, a call of `flush()`
|
||
|
// for every opened buffer after the successful seek would be advisable.
|
||
|
ictx.seek(position, ..position).unwrap();
|
||
|
}
|
||
|
|
||
|
octx.set_metadata(ictx.metadata().to_owned());
|
||
|
octx.write_header().unwrap();
|
||
|
|
||
|
let in_time_base = transcoder.decoder.time_base();
|
||
|
let out_time_base = octx.stream(0).unwrap().time_base();
|
||
|
|
||
|
let mut decoded = frame::Audio::empty();
|
||
|
let mut encoded = ffmpeg::Packet::empty();
|
||
|
|
||
|
for (stream, mut packet) in ictx.packets() {
|
||
|
if stream.index() == transcoder.stream {
|
||
|
packet.rescale_ts(stream.time_base(), in_time_base);
|
||
|
|
||
|
if let Ok(true) = transcoder.decoder.decode(&packet, &mut decoded) {
|
||
|
let timestamp = decoded.timestamp();
|
||
|
decoded.set_pts(timestamp);
|
||
|
|
||
|
transcoder
|
||
|
.filter
|
||
|
.get("in")
|
||
|
.unwrap()
|
||
|
.source()
|
||
|
.add(&decoded)
|
||
|
.unwrap();
|
||
|
|
||
|
while let Ok(..) = transcoder
|
||
|
.filter
|
||
|
.get("out")
|
||
|
.unwrap()
|
||
|
.sink()
|
||
|
.frame(&mut decoded)
|
||
|
{
|
||
|
if let Ok(true) = transcoder.encoder.encode(&decoded, &mut encoded) {
|
||
|
encoded.set_stream(0);
|
||
|
encoded.rescale_ts(in_time_base, out_time_base);
|
||
|
encoded.write_interleaved(&mut octx).unwrap();
|
||
|
}
|
||
|
}
|
||
|
}
|
||
|
}
|
||
|
}
|
||
|
|
||
|
transcoder
|
||
|
.filter
|
||
|
.get("in")
|
||
|
.unwrap()
|
||
|
.source()
|
||
|
.flush()
|
||
|
.unwrap();
|
||
|
|
||
|
while let Ok(..) = transcoder
|
||
|
.filter
|
||
|
.get("out")
|
||
|
.unwrap()
|
||
|
.sink()
|
||
|
.frame(&mut decoded)
|
||
|
{
|
||
|
if let Ok(true) = transcoder.encoder.encode(&decoded, &mut encoded) {
|
||
|
encoded.set_stream(0);
|
||
|
encoded.rescale_ts(in_time_base, out_time_base);
|
||
|
encoded.write_interleaved(&mut octx).unwrap();
|
||
|
}
|
||
|
}
|
||
|
|
||
|
if let Ok(true) = transcoder.encoder.flush(&mut encoded) {
|
||
|
encoded.set_stream(0);
|
||
|
encoded.rescale_ts(in_time_base, out_time_base);
|
||
|
encoded.write_interleaved(&mut octx).unwrap();
|
||
|
}
|
||
|
|
||
|
octx.write_trailer().unwrap();
|
||
|
}
|