VoiceMac/Classes/FFmpeg/MGMFFmpeg.m
GRMrGecko 62734d7fef Added support for iPhone to MGMFFmpeg. Updated settings in Theme Tester. Updated settings in VoiceMac for the Theme Manager. Added the ability to hide icons in themes. Added the ability to change the font in themes. Fixed issue in MGMAddressBook where getting group members is not accurate. Made it so SIP addresses and Email addresses would not be recognized as phone numbers. Added support for 2 step verification. Added the ability to reload the user phone numbers. Added the ability to place the next message at any point in the previous message. Added the ability to do css based on the class of the message view with %MESSAGECLASSES%. Added the ability to get the codecs available and change the top codec in MGMSIP. Made it stop ring back before hanging up. Added initWithRootElement: to MGMXMLDocument. Added initWithName: and initWithName:stringValue: to MGMXMLElement. Added initWithKind:, setStringValue:, and fixed issues with reading attributes value. Made MGMVoiceUser support 2 step verification API, reload the user phones when it becomes active, and display a message if there is no phone numbers in Google Voice. Fixed issue where Google Chat will not appear in the User Phones list. Added a 2 Step Verification Dialog. Added Codec selection in MGMSIPPane. Fixed issues with MGMMultiSMS where it wouldn't alert the user if the message was blank and it wouldn't make the text field non editable. Added support for new SMS protocol to MGMSMSMessageView. Added support in the FFmpeg install script to build for iOS. Added ability to compile for the Simulator only in the PJProject build script. Made iOS and Mac OS final path go to 2 different folders. Updated to Revision 3466 of PJProject.
2011-09-23 22:02:03 -05:00

4023 lines
153 KiB
Objective-C

//
// MGMFFmpeg.m
// VoiceBase
//
// Created by Mr. Gecko on 2/25/11.
// MGMFFmpeg is a port of ffmpeg.c to Objective-C by Mr. Gecko's Media
// (James Coleman) FFmpeg can be found at http://ffmpeg.org/
// FFmpeg Copyright (c) 2000-2003 Fabrice Bellard
//
// MGMFFmpeg is free software; you can redistribute it and/or
// modify it under the terms of the GNU Lesser General Public
// License as published by the Free Software Foundation; either
// version 2.1 of the License, or (at your option) any later version.
//
// MGMFFmpeg is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
// Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public
// License along with MGMFFmpeg; if not, write to the Free Software
// Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
//
#if MGMSIPENABLED
#import "MGMFFmpeg.h"
NSString * const MGMFFmpegErrorDomain = @"com.MrGeckosMedia.FFmpeg";
static BOOL FFmpegRegistered = NO;
@interface MGMFFmpeg (MGMPrivate)
- (void)parse_options:(int)argc argv:(char **)argv parseSelector:(SEL)parseArgument;
- (void)opt_output_file:(const char *)filename;
- (void)opt_input_file:(const char *)filename;
- (int)read_ffserver_streams:(AVFormatContext *)s file:(const char *)filename;
- (void)choose_sample_fmt:(AVStream *)st codec:(AVCodec *)codec;
- (void)choose_pixel_fmt:(AVStream *)st codec:(AVCodec *)codec;
- (void)new_video_stream:(AVFormatContext *)oc;
- (void)new_audio_stream:(AVFormatContext *)oc;
- (void)new_subtitle_stream:(AVFormatContext *)oc;
- (void)opt_video_codec:(const char *)arg;
- (void)opt_audio_codec:(const char *)arg;
- (void)opt_format:(const char *)arg;
- (void)opt_frame_size:(const char *)arg;
- (int)opt_frame_rate:(const char *)opt arg:(const char *)arg;
- (int)opt_default:(const char *)opt arg:(const char *)arg;
- (void)opt_frame_pix_fmt:(const char *)arg;
- (void)set_context_opts:(void *)ctx options:(void *)opts_ctx flags:(int)flags;
- (void)check_audio_video_sub_inputs:(int *)has_video_ptr audio:(int *)has_audio_ptr subs:(int *)has_subtitle_ptr;
- (enum CodecID)find_codec_or_die:(const char *)name type:(int)type encoder:(int)encoder strict:(int)strict;
- (int64_t)parse_time_or_die:(const char *)context time:(const char *)timestr duration:(int)is_duration;
- (double)parse_number_or_die:(const char *)context number:(const char *)numstr type:(int)type min:(double)min max:(double)max;
- (void)parse_matrix_coeffs:(uint16_t *)dest matrix:(const char *)str;
- (void)opt_codec:(int *)pstream_copy name:(char **)pcodec_name type:(int)codec_type arg:(const char *)arg;
@end
@implementation MGMFFmpeg
+ (id)FFmpeg {
return [[[self alloc] init] autorelease];
}
- (id)init {
if ((self = [super init])) {
last_asked_format = NULL;
nb_input_files = 0;
nb_output_files = 0;
frame_width = 0;
frame_height = 0;
frame_aspect_ratio = 0;
frame_pix_fmt = PIX_FMT_NONE;
audio_sample_fmt = SAMPLE_FMT_NONE;
frame_padtop = 0;
frame_padbottom = 0;
frame_padleft = 0;
frame_padright = 0;
padcolor[0] = 16;
padcolor[1] = 128;
padcolor[2] = 128;
frame_topBand = 0;
frame_bottomBand = 0;
frame_leftBand = 0;
frame_rightBand = 0;
max_frames[0] = INT_MAX;
max_frames[1] = INT_MAX;
max_frames[2] = INT_MAX;
max_frames[3] = INT_MAX;
video_qscale = 0;
intra_matrix = NULL;
inter_matrix = NULL;
video_rc_override_string = NULL;
video_disable = 0;
video_discard = 0;
video_codec_name = NULL;
video_codec_tag = 0;
video_language = NULL;
same_quality = 0;
do_deinterlace = 0;
top_field_first = -1;
me_threshold = 0;
intra_dc_precision = 8;
loop_input = 0;
loop_output = AVFMT_NOOUTPUTLOOP;
qp_hist = 0;
intra_only = 0;
audio_sample_rate = 44100;
channel_layout = 0;
audio_qscale = QSCALE_NONE;
audio_disable = 0;
audio_channels = 1;
audio_codec_name = NULL;
audio_codec_tag = 0;
audio_language = NULL;
subtitle_disable = 0;
subtitle_codec_name = NULL;
subtitle_language = NULL;
subtitle_codec_tag = 0;
mux_preload = 0.5;
mux_max_delay = 0.7;
recording_time = INT64_MAX;
start_time = 0;
rec_timestamp = 0;
input_ts_offset = 0;
do_benchmark = 0;
do_hex_dump = 0;
do_pkt_dump = 0;
do_psnr = 0;
do_pass = 0;
pass_logfilename_prefix = NULL;
audio_stream_copy = 0;
video_stream_copy = 0;
subtitle_stream_copy = 0;
video_sync_method = -1;
audio_sync_method = 0;
audio_drift_threshold = 0.1;
copy_ts = 0;
opt_shortest = 0;
video_global_header = 0;
opt_programid = 0;
copy_initial_nonkeyframes = 0;
rate_emu = 0;
video_channel = 0;
audio_volume = 256;
exit_on_error = 0;
verbose = 1;
thread_count = 1;
video_size = 0;
audio_size = 0;
extra_size = 0;
nb_frames_dup = 0;
nb_frames_drop = 0;
limit_filesize = 0;
force_fps = 0;
pgmyuv_compatibility_hack = 0;
dts_delta_threshold = 10;
sws_flags = SWS_BICUBIC;
samples_size = 0;
video_bitstream_filters = NULL;
audio_bitstream_filters = NULL;
subtitle_bitstream_filters = NULL;
bit_buffer_size = 1024*256;
bit_buffer = NULL;
last_time = -1;
subtitle_out = NULL;
input_tmp = NULL;
stopConverting = NO;
#if !TARGET_OS_IPHONE
stoppedByQuit = NO;
#endif
isConverting = NO;
options[0] = (OptionDef){ "f", HAS_ARG, {@selector(opt_format:)}, "force format", "fmt" };
options[1] = (OptionDef){ "i", HAS_ARG, {@selector(opt_input_file:)}, "input file name", "filename" };
options[2] = (OptionDef){ "map", HAS_ARG | OPT_EXPERT, {@selector(opt_map:)}, "set input stream mapping", "file:stream[:syncfile:syncstream]" };
options[3] = (OptionDef){ "map_meta_data", HAS_ARG | OPT_EXPERT, {@selector(opt_map_meta_data:)}, "set meta data information of outfile from infile", "outfile:infile" };
options[4] = (OptionDef){ "t", OPT_FUNC2 | HAS_ARG, {@selector(opt_recording_time:arg:)}, "record or transcode \"duration\" seconds of audio/video", "duration" };
options[5] = (OptionDef){ "fs", HAS_ARG | OPT_INT64, {(void*)&limit_filesize}, "set the limit file size in bytes", "limit_size" };
options[6] = (OptionDef){ "ss", OPT_FUNC2 | HAS_ARG, {@selector(opt_start_time:arg:)}, "set the start time offset", "time_off" };
options[7] = (OptionDef){ "itsoffset", OPT_FUNC2 | HAS_ARG, {@selector(opt_input_ts_offset:arg:)}, "set the input ts offset", "time_off" };
options[8] = (OptionDef){ "itsscale", HAS_ARG, {@selector(opt_input_ts_scale:)}, "set the input ts scale", "stream:scale" };
options[9] = (OptionDef){ "timestamp", OPT_FUNC2 | HAS_ARG, {@selector(opt_rec_timestamp:arg:)}, "set the timestamp ('now' to set the current time)", "time" };
options[10] = (OptionDef){ "metadata", OPT_FUNC2 | HAS_ARG, {@selector(opt_metadata:arg:)}, "add metadata", "string=string" };
options[11] = (OptionDef){ "dframes", OPT_INT | HAS_ARG, {(void*)&max_frames[AVMEDIA_TYPE_DATA]}, "set the number of data frames to record", "number" };
options[12] = (OptionDef){ "benchmark", OPT_BOOL | OPT_EXPERT, {(void*)&do_benchmark}, "add timings for benchmarking" };
options[13] = (OptionDef){ "timelimit", OPT_FUNC2 | HAS_ARG, {@selector(opt_timelimit:arg:)}, "set max runtime in seconds", "limit" };
options[14] = (OptionDef){ "dump", OPT_BOOL | OPT_EXPERT, {(void*)&do_pkt_dump}, "dump each input packet" };
options[15] = (OptionDef){ "hex", OPT_BOOL | OPT_EXPERT, {(void*)&do_hex_dump}, "when dumping packets, also dump the payload" };
options[16] = (OptionDef){ "re", OPT_BOOL | OPT_EXPERT, {(void*)&rate_emu}, "read input at native frame rate", "" };
options[17] = (OptionDef){ "loop_input", OPT_BOOL | OPT_EXPERT, {(void*)&loop_input}, "loop (current only works with images)" };
options[18] = (OptionDef){ "loop_output", HAS_ARG | OPT_INT | OPT_EXPERT, {(void*)&loop_output}, "number of times to loop output in formats that support looping (0 loops forever)", "" };
options[19] = (OptionDef){ "v", HAS_ARG | OPT_FUNC2, {@selector(opt_verbose:arg:)}, "set ffmpeg verbosity level", "number" };
options[20] = (OptionDef){ "target", HAS_ARG, {@selector(opt_target:)}, "specify target file type (\"vcd\", \"svcd\", \"dvd\", \"dv\", \"dv50\", \"pal-vcd\", \"ntsc-svcd\", ...)", "type" };
options[21] = (OptionDef){ "threads", OPT_FUNC2 | HAS_ARG | OPT_EXPERT, {@selector(opt_thread_count:arg:)}, "thread count", "count" };
options[22] = (OptionDef){ "vsync", HAS_ARG | OPT_INT | OPT_EXPERT, {(void*)&video_sync_method}, "video sync method", "" };
options[23] = (OptionDef){ "async", HAS_ARG | OPT_INT | OPT_EXPERT, {(void*)&audio_sync_method}, "audio sync method", "" };
options[24] = (OptionDef){ "adrift_threshold", HAS_ARG | OPT_FLOAT | OPT_EXPERT, {(void*)&audio_drift_threshold}, "audio drift threshold", "threshold" };
options[25] = (OptionDef){ "vglobal", HAS_ARG | OPT_INT | OPT_EXPERT, {(void*)&video_global_header}, "video global header storage type", "" };
options[26] = (OptionDef){ "copyts", OPT_BOOL | OPT_EXPERT, {(void*)&copy_ts}, "copy timestamps" };
options[27] = (OptionDef){ "shortest", OPT_BOOL | OPT_EXPERT, {(void*)&opt_shortest}, "finish encoding within shortest input" };
options[28] = (OptionDef){ "dts_delta_threshold", HAS_ARG | OPT_FLOAT | OPT_EXPERT, {(void*)&dts_delta_threshold}, "timestamp discontinuity delta threshold", "threshold" };
options[29] = (OptionDef){ "programid", HAS_ARG | OPT_INT | OPT_EXPERT, {(void*)&opt_programid}, "desired program number", "" };
options[30] = (OptionDef){ "xerror", OPT_BOOL, {(void*)&exit_on_error}, "exit on error", "error" };
options[31] = (OptionDef){ "copyinkf", OPT_BOOL | OPT_EXPERT, {(void*)&copy_initial_nonkeyframes}, "copy initial non-keyframes" };
/* video options */
options[32] = (OptionDef){ "b", OPT_FUNC2 | HAS_ARG | OPT_VIDEO, {@selector(opt_bitrate:arg:)}, "set bitrate (in bits/s)", "bitrate" };
options[33] = (OptionDef){ "vb", OPT_FUNC2 | HAS_ARG | OPT_VIDEO, {@selector(opt_bitrate:arg:)}, "set bitrate (in bits/s)", "bitrate" };
options[34] = (OptionDef){ "vframes", OPT_INT | HAS_ARG | OPT_VIDEO, {(void*)&max_frames[AVMEDIA_TYPE_VIDEO]}, "set the number of video frames to record", "number" };
options[35] = (OptionDef){ "r", OPT_FUNC2 | HAS_ARG | OPT_VIDEO, {@selector(opt_frame_rate:arg:)}, "set frame rate (Hz value, fraction or abbreviation)", "rate" };
options[36] = (OptionDef){ "s", HAS_ARG | OPT_VIDEO, {@selector(opt_frame_size:)}, "set frame size (WxH or abbreviation)", "size" };
options[37] = (OptionDef){ "aspect", HAS_ARG | OPT_VIDEO, {@selector(opt_frame_aspect_ratio:)}, "set aspect ratio (4:3, 16:9 or 1.3333, 1.7777)", "aspect" };
options[38] = (OptionDef){ "pix_fmt", HAS_ARG | OPT_EXPERT | OPT_VIDEO, {@selector(opt_frame_pix_fmt:)}, "set pixel format, 'list' as argument shows all the pixel formats supported", "format" };
options[39] = (OptionDef){ "croptop", HAS_ARG | OPT_VIDEO, {@selector(opt_frame_crop_top:)}, "set top crop band size (in pixels)", "size" };
options[40] = (OptionDef){ "cropbottom", HAS_ARG | OPT_VIDEO, {@selector(opt_frame_crop_bottom:)}, "set bottom crop band size (in pixels)", "size" };
options[41] = (OptionDef){ "cropleft", HAS_ARG | OPT_VIDEO, {@selector(opt_frame_crop_left:)}, "set left crop band size (in pixels)", "size" };
options[42] = (OptionDef){ "cropright", HAS_ARG | OPT_VIDEO, {@selector(opt_frame_crop_right:)}, "set right crop band size (in pixels)", "size" };
options[43] = (OptionDef){ "padtop", HAS_ARG | OPT_VIDEO, {@selector(opt_frame_pad_top:)}, "set top pad band size (in pixels)", "size" };
options[44] = (OptionDef){ "padbottom", HAS_ARG | OPT_VIDEO, {@selector(opt_frame_pad_bottom:)}, "set bottom pad band size (in pixels)", "size" };
options[45] = (OptionDef){ "padleft", HAS_ARG | OPT_VIDEO, {@selector(opt_frame_pad_left:)}, "set left pad band size (in pixels)", "size" };
options[46] = (OptionDef){ "padright", HAS_ARG | OPT_VIDEO, {@selector(opt_frame_pad_right:)}, "set right pad band size (in pixels)", "size" };
options[47] = (OptionDef){ "padcolor", HAS_ARG | OPT_VIDEO, {@selector(opt_pad_color:)}, "set color of pad bands (Hex 000000 thru FFFFFF)", "color" };
options[48] = (OptionDef){ "intra", OPT_BOOL | OPT_EXPERT | OPT_VIDEO, {(void*)&intra_only}, "use only intra frames"};
options[49] = (OptionDef){ "vn", OPT_BOOL | OPT_VIDEO, {(void*)&video_disable}, "disable video" };
options[50] = (OptionDef){ "vdt", OPT_INT | HAS_ARG | OPT_EXPERT | OPT_VIDEO, {(void*)&video_discard}, "discard threshold", "n" };
options[51] = (OptionDef){ "qscale", HAS_ARG | OPT_EXPERT | OPT_VIDEO, {@selector(opt_qscale:)}, "use fixed video quantizer scale (VBR)", "q" };
options[52] = (OptionDef){ "rc_override", HAS_ARG | OPT_EXPERT | OPT_VIDEO, {@selector(opt_video_rc_override_string:)}, "rate control override for specific intervals", "override" };
options[53] = (OptionDef){ "vcodec", HAS_ARG | OPT_VIDEO, {@selector(opt_video_codec:)}, "force video codec ('copy' to copy stream)", "codec" };
options[54] = (OptionDef){ "me_threshold", HAS_ARG | OPT_FUNC2 | OPT_EXPERT | OPT_VIDEO, {@selector(opt_me_threshold:arg:)}, "motion estimaton threshold", "threshold" };
options[55] = (OptionDef){ "sameq", OPT_BOOL | OPT_VIDEO, {(void*)&same_quality}, "use same video quality as source (implies VBR)" };
options[56] = (OptionDef){ "pass", HAS_ARG | OPT_VIDEO, {@selector(opt_pass:)}, "select the pass number (1 or 2)", "n" };
options[57] = (OptionDef){ "passlogfile", HAS_ARG | OPT_STRING | OPT_VIDEO, {(void*)&pass_logfilename_prefix}, "select two pass log file name prefix", "prefix" };
options[58] = (OptionDef){ "deinterlace", OPT_BOOL | OPT_EXPERT | OPT_VIDEO, {(void*)&do_deinterlace}, "deinterlace pictures" };
options[59] = (OptionDef){ "psnr", OPT_BOOL | OPT_EXPERT | OPT_VIDEO, {(void*)&do_psnr}, "calculate PSNR of compressed frames" };
options[60] = (OptionDef){ "vstats", OPT_EXPERT | OPT_VIDEO, {@selector(opt_vstats)}, "dump video coding statistics to file" };
options[61] = (OptionDef){ "vstats_file", HAS_ARG | OPT_EXPERT | OPT_VIDEO, {@selector(opt_vstats_file:)}, "dump video coding statistics to file", "file" };
options[62] = (OptionDef){ "intra_matrix", HAS_ARG | OPT_EXPERT | OPT_VIDEO, {@selector(opt_intra_matrix:)}, "specify intra matrix coeffs", "matrix" };
options[63] = (OptionDef){ "inter_matrix", HAS_ARG | OPT_EXPERT | OPT_VIDEO, {@selector(opt_inter_matrix:)}, "specify inter matrix coeffs", "matrix" };
options[64] = (OptionDef){ "top", HAS_ARG | OPT_EXPERT | OPT_VIDEO, {@selector(opt_top_field_first:)}, "top=1/bottom=0/auto=-1 field first", "" };
options[65] = (OptionDef){ "dc", OPT_INT | HAS_ARG | OPT_EXPERT | OPT_VIDEO, {(void*)&intra_dc_precision}, "intra_dc_precision", "precision" };
options[66] = (OptionDef){ "vtag", HAS_ARG | OPT_EXPERT | OPT_VIDEO, {@selector(opt_video_tag:)}, "force video tag/fourcc", "fourcc/tag" };
options[67] = (OptionDef){ "newvideo", OPT_VIDEO, {@selector(opt_new_video_stream)}, "add a new video stream to the current output stream" };
options[68] = (OptionDef){ "vlang", HAS_ARG | OPT_STRING | OPT_VIDEO, {(void *)&video_language}, "set the ISO 639 language code (3 letters) of the current video stream" , "code" };
options[69] = (OptionDef){ "qphist", OPT_BOOL | OPT_EXPERT | OPT_VIDEO, { (void *)&qp_hist }, "show QP histogram" };
options[70] = (OptionDef){ "force_fps", OPT_BOOL | OPT_EXPERT | OPT_VIDEO, {(void*)&force_fps}, "force the selected framerate, disable the best supported framerate selection" };
/* audio options */
options[71] = (OptionDef){ "ab", OPT_FUNC2 | HAS_ARG | OPT_AUDIO, {@selector(opt_bitrate:arg:)}, "set bitrate (in bits/s)", "bitrate" };
options[72] = (OptionDef){ "aframes", OPT_INT | HAS_ARG | OPT_AUDIO, {(void*)&max_frames[AVMEDIA_TYPE_AUDIO]}, "set the number of audio frames to record", "number" };
options[73] = (OptionDef){ "aq", OPT_FLOAT | HAS_ARG | OPT_AUDIO, {(void*)&audio_qscale}, "set audio quality (codec-specific)", "quality", };
options[74] = (OptionDef){ "ar", HAS_ARG | OPT_FUNC2 | OPT_AUDIO, {@selector(opt_audio_rate:arg:)}, "set audio sampling rate (in Hz)", "rate" };
options[75] = (OptionDef){ "ac", HAS_ARG | OPT_FUNC2 | OPT_AUDIO, {@selector(opt_audio_channels:arg:)}, "set number of audio channels", "channels" };
options[76] = (OptionDef){ "an", OPT_BOOL | OPT_AUDIO, {(void*)&audio_disable}, "disable audio" };
options[77] = (OptionDef){ "acodec", HAS_ARG | OPT_AUDIO, {@selector(opt_audio_codec:)}, "force audio codec ('copy' to copy stream)", "codec" };
options[78] = (OptionDef){ "atag", HAS_ARG | OPT_EXPERT | OPT_AUDIO, {@selector(opt_audio_tag:)}, "force audio tag/fourcc", "fourcc/tag" };
options[79] = (OptionDef){ "vol", OPT_INT | HAS_ARG | OPT_AUDIO, {(void*)&audio_volume}, "change audio volume (256=normal)" , "volume" };
options[80] = (OptionDef){ "newaudio", OPT_AUDIO, {@selector(opt_new_audio_stream)}, "add a new audio stream to the current output stream" };
options[81] = (OptionDef){ "alang", HAS_ARG | OPT_STRING | OPT_AUDIO, {(void *)&audio_language}, "set the ISO 639 language code (3 letters) of the current audio stream" , "code" };
options[82] = (OptionDef){ "sample_fmt", HAS_ARG | OPT_EXPERT | OPT_AUDIO, {@selector(opt_audio_sample_fmt:)}, "set sample format, 'list' as argument shows all the sample formats supported", "format" };
/* subtitle options */
options[83] = (OptionDef){ "sn", OPT_BOOL | OPT_SUBTITLE, {(void*)&subtitle_disable}, "disable subtitle" };
options[84] = (OptionDef){ "scodec", HAS_ARG | OPT_SUBTITLE, {@selector(opt_subtitle_codec:)}, "force subtitle codec ('copy' to copy stream)", "codec" };
options[85] = (OptionDef){ "newsubtitle", OPT_SUBTITLE, {@selector(opt_new_subtitle_stream)}, "add a new subtitle stream to the current output stream" };
options[86] = (OptionDef){ "slang", HAS_ARG | OPT_STRING | OPT_SUBTITLE, {(void *)&subtitle_language}, "set the ISO 639 language code (3 letters) of the current subtitle stream" , "code" };
options[87] = (OptionDef){ "stag", HAS_ARG | OPT_EXPERT | OPT_SUBTITLE, {@selector(opt_subtitle_tag:)}, "force subtitle tag/fourcc", "fourcc/tag" };
/* grab options */
options[88] = (OptionDef){ "vc", HAS_ARG | OPT_EXPERT | OPT_VIDEO | OPT_GRAB, {@selector(opt_video_channel:)}, "set video grab channel (DV1394 only)", "channel" };
options[89] = (OptionDef){ "tvstd", HAS_ARG | OPT_EXPERT | OPT_VIDEO | OPT_GRAB, {@selector(opt_video_standard:)}, "set television standard (NTSC, PAL (SECAM))", "standard" };
options[90] = (OptionDef){ "isync", OPT_BOOL | OPT_EXPERT | OPT_GRAB, {(void*)&input_sync}, "sync read on input", "" };
/* muxer options */
options[91] = (OptionDef){ "muxdelay", OPT_FLOAT | HAS_ARG | OPT_EXPERT, {(void*)&mux_max_delay}, "set the maximum demux-decode delay", "seconds" };
options[92] = (OptionDef){ "muxpreload", OPT_FLOAT | HAS_ARG | OPT_EXPERT, {(void*)&mux_preload}, "set the initial demux-decode delay", "seconds" };
options[93] = (OptionDef){ "absf", OPT_FUNC2 | HAS_ARG | OPT_AUDIO | OPT_EXPERT, {@selector(opt_bsf:arg:)}, "", "bitstream_filter" };
options[94] = (OptionDef){ "vbsf", OPT_FUNC2 | HAS_ARG | OPT_VIDEO | OPT_EXPERT, {@selector(opt_bsf:arg:)}, "", "bitstream_filter" };
options[95] = (OptionDef){ "sbsf", OPT_FUNC2 | HAS_ARG | OPT_SUBTITLE | OPT_EXPERT, {@selector(opt_bsf:arg:)}, "", "bitstream_filter" };
options[96] = (OptionDef){ "apre", OPT_FUNC2 | HAS_ARG | OPT_AUDIO | OPT_EXPERT, {@selector(opt_preset:arg:)}, "set the audio options to the indicated preset", "preset" };
options[97] = (OptionDef){ "vpre", OPT_FUNC2 | HAS_ARG | OPT_VIDEO | OPT_EXPERT, {@selector(opt_preset:arg:)}, "set the video options to the indicated preset", "preset" };
options[98] = (OptionDef){ "spre", OPT_FUNC2 | HAS_ARG | OPT_SUBTITLE | OPT_EXPERT, {@selector(opt_preset:arg:)}, "set the subtitle options to the indicated preset", "preset" };
options[99] = (OptionDef){ "fpre", OPT_FUNC2 | HAS_ARG | OPT_EXPERT, {@selector(opt_preset:arg:)}, "set options from indicated preset file", "filename" };
options[100] = (OptionDef){ "default", OPT_FUNC2 | HAS_ARG | OPT_AUDIO | OPT_VIDEO | OPT_EXPERT, {@selector(opt_default:arg:)}, "generic catch all option", "" };
options[101] = (OptionDef){ NULL, };
if (!FFmpegRegistered) {
FFmpegRegistered = YES;
NSLog(@"Registered");
avcodec_register_all();
av_register_all();
}
for(int i=0; i<AVMEDIA_TYPE_NB; i++){
avcodec_opts[i]= avcodec_alloc_context2(i);
}
avformat_opts = avformat_alloc_context();
sws_opts = sws_getContext(16,16,0, 16,16,0, sws_flags, NULL,NULL,NULL);
}
return self;
}
+ (id)FFmpegWithDelegate:(id)theDelegate {
return [[[self alloc] initWithDelegate:theDelegate] autorelease];
}
- (id)initWithDelegate:(id)theDelegate {
if ((self = [self init])) {
delegate = theDelegate;
}
return self;
}
- (void)dealloc {
NSLog(@"Releasing FFmpeg");
if (isConverting) {
stopConverting = YES;
while (isConverting) {
[NSThread sleepUntilDate:[NSDate dateWithTimeIntervalSinceNow:1]];
}
}
/* close files */
for (int i=0; i<nb_output_files; i++) {
/* maybe av_close_output_file ??? */
AVFormatContext *s = output_files[i];
int j;
if (!(s->oformat->flags & AVFMT_NOFILE) && s->pb)
url_fclose(s->pb);
for(j=0;j<s->nb_streams;j++) {
av_metadata_free(&s->streams[j]->metadata);
av_free(s->streams[j]->codec);
av_free(s->streams[j]);
}
for(j=0;j<s->nb_programs;j++) {
av_metadata_free(&s->programs[j]->metadata);
}
for(j=0;j<s->nb_chapters;j++) {
av_metadata_free(&s->chapters[j]->metadata);
}
av_metadata_free(&s->metadata);
av_free(s);
}
for(int i=0;i<nb_input_files;i++)
av_close_input_file(input_files[i]);
av_free(intra_matrix);
av_free(inter_matrix);
av_free(metadata);
av_free(pass_logfilename_prefix);
if (vstats_file)
fclose(vstats_file);
av_free(vstats_filename);
av_free(opt_names);
av_free(video_codec_name);
av_free(audio_codec_name);
av_free(subtitle_codec_name);
av_free(video_standard);
#if CONFIG_POWERPC_PERF
void powerpc_display_perf_report(void);
powerpc_display_perf_report();
#endif /* CONFIG_POWERPC_PERF */
for (int i=0;i<AVMEDIA_TYPE_NB;i++)
av_free(avcodec_opts[i]);
av_free(avformat_opts);
av_free(sws_opts);
av_free(audio_buf);
av_free(audio_out);
allocated_audio_buf_size= allocated_audio_out_size= 0;
av_free(samples);
av_free(subtitle_out);
av_free(input_tmp);
if (last_asked_format!=NULL)
free(last_asked_format);
if (video_rc_override_string!=NULL)
free(video_rc_override_string);
[super dealloc];
}
- (void)setDelegate:(id)theDelegate {
delegate = theDelegate;
}
- (id<MGMFFmpegDelegate>)delegate {
return delegate;
}
- (BOOL)isConverting {
return isConverting;
}
- (void)stopConverting {
if (isConverting)
stopConverting = YES;
}
- (void)sendErrorCode:(int)theCode message:(NSString *)format, ... {
va_list ap;
va_start(ap, format);
NSString *message = [[[NSString alloc] initWithFormat:format arguments:ap] autorelease];
va_end(ap);
NSError *error = [NSError errorWithDomain:MGMFFmpegErrorDomain code:theCode userInfo:[NSDictionary dictionaryWithObject:message forKey:NSLocalizedDescriptionKey]];
if ([delegate respondsToSelector:@selector(receivedError:)])
[delegate receivedError:error];
else
NSLog(@"%@", error);
}
- (void)sendErrorCode:(int)theCode reference:(const char *)theReference message:(int)theError {
char errbuf[128];
const char *errbuf_ptr = errbuf;
if (av_strerror(theError, errbuf, sizeof(errbuf)) < 0)
errbuf_ptr = strerror(AVUNERROR(theError));
[self sendErrorCode:theCode message:@"%s: %s", theReference, errbuf_ptr];
}
- (void)setOptions:(NSArray *)theOptions {
if (isConverting)
return;
char **array = malloc(sizeof(char *)*([theOptions count]+1));
array[0] = "ffmpeg";
for (int i=0; i<[theOptions count]; i++) {
array[i+1] = (char *)[[theOptions objectAtIndex:i] UTF8String];
}
[self parse_options:(int)[theOptions count]+1 argv:array parseSelector:@selector(opt_output_file:)];
free(array);
}
- (void)setOutputFile:(NSString *)theFile {
if (isConverting)
return;
[self opt_output_file:[theFile UTF8String]];
}
- (void)setOutputHandle:(NSFileHandle *)theHandle {
NSString *file = [NSString stringWithFormat:@"pipe:%d", [theHandle fileDescriptor]];
[self setOutputFile:file];
}
- (void)setInputFile:(NSString *)theFile {
if (isConverting)
return;
[self opt_input_file:[theFile UTF8String]];
}
- (void)setInputHandle:(NSFileHandle *)theHandle {
NSString *file = [NSString stringWithFormat:@"pipe:%d", [theHandle fileDescriptor]];
[self setInputFile:file];
}
- (OptionDef *)find_option:(const OptionDef *)po name:(const char *)name {
while (po->name != NULL) {
if (!strcmp(name, po->name))
break;
po++;
}
return (OptionDef *)po;
}
- (void)parse_options:(int)argc argv:(char **)argv parseSelector:(SEL)parseArgument {
const char *opt, *arg;
int optindex, handleoptions=1;
const OptionDef *po;
/* parse options */
optindex = 1;
while (optindex < argc) {
opt = argv[optindex++];
if (handleoptions && opt[0] == '-' && opt[1] != '\0') {
int bool_val = 1;
if (opt[1] == '-' && opt[2] == '\0') {
handleoptions = 0;
continue;
}
opt++;
po = [self find_option:(const OptionDef *)&options name:opt];
if (!po->name && opt[0] == 'n' && opt[1] == 'o') {
/* handle 'no' bool option */
po = [self find_option:(const OptionDef *)&options name:opt + 2];
if (!(po->name && (po->flags & OPT_BOOL)))
goto unknown_opt;
bool_val = 0;
}
if (!po->name)
po = [self find_option:(const OptionDef *)&options name:"default"];
if (!po->name) {
unknown_opt:
[self sendErrorCode:86 message:@"%s: unrecognized option '%s'", argv[0], opt];
return;
}
arg = NULL;
if (po->flags & HAS_ARG) {
arg = argv[optindex++];
if (!arg) {
[self sendErrorCode:87 message:@"%s: missing argument for option '%s'", argv[0], opt];
return;
}
}
if (po->flags & OPT_STRING) {
char *str;
str = av_strdup(arg);
*po->u.str_arg = str;
} else if (po->flags & OPT_BOOL) {
*po->u.int_arg = bool_val;
} else if (po->flags & OPT_INT) {
*po->u.int_arg = [self parse_number_or_die:opt number:arg type:OPT_INT64 min:INT_MIN max:INT_MAX];
} else if (po->flags & OPT_INT64) {
*po->u.int64_arg = [self parse_number_or_die:opt number:arg type:OPT_INT64 min:INT64_MIN max:INT64_MAX];
} else if (po->flags & OPT_FLOAT) {
*po->u.float_arg = [self parse_number_or_die:opt number:arg type:OPT_FLOAT min:-1.0/0.0 max:1.0/0.0];
} else if (po->flags & OPT_FUNC2) {
if (po->u.func_arg!=NULL) {
NSMethodSignature *signature = [self methodSignatureForSelector:po->u.func_arg];
if (signature!=nil) {
NSInvocation *invocation = [NSInvocation invocationWithMethodSignature:signature];
[invocation setSelector:po->u.func_arg];
[invocation setArgument:&opt atIndex:2];
[invocation setArgument:&arg atIndex:3];
[invocation invokeWithTarget:self];
int returnValue = 0;
[invocation getReturnValue:&returnValue];
if (returnValue<0) {
[self sendErrorCode:88 message:@"%s: failed to set value '%s' for option '%s'", argv[0], arg, opt];
return;
}
}
}
} else {
if (po->u.func_arg!=NULL) {
NSMethodSignature *signature = [self methodSignatureForSelector:po->u.func_arg];
if (signature!=nil) {
NSInvocation *invocation = [NSInvocation invocationWithMethodSignature:signature];
[invocation setSelector:po->u.func_arg];
[invocation setArgument:&arg atIndex:2];
[invocation invokeWithTarget:self];
}
}
}
if(po->flags & OPT_EXIT)
return;
} else {
if (parseArgument!=NULL) {
NSMethodSignature *signature = [self methodSignatureForSelector:parseArgument];
if (signature!=nil) {
NSInvocation *invocation = [NSInvocation invocationWithMethodSignature:signature];
[invocation setSelector:parseArgument];
[invocation setArgument:&opt atIndex:2];
[invocation invokeWithTarget:self];
}
}
}
}
}
- (void)opt_format:(const char *)arg {
/* compatibility stuff for pgmyuv */
if (!strcmp(arg, "pgmyuv")) {
pgmyuv_compatibility_hack=1;
// opt_image_format(arg);
arg = "image2";
fprintf(stderr, "pgmyuv format is deprecated, use image2\n");
}
if (last_asked_format!=NULL)
free(last_asked_format);
last_asked_format = malloc(strlen(arg)+1);
strcpy(last_asked_format, arg);
}
- (void)opt_output_file:(const char *)filename {
AVFormatContext *oc;
int err, use_video, use_audio, use_subtitle;
int input_has_video, input_has_audio, input_has_subtitle;
AVFormatParameters params, *ap = &params;
AVOutputFormat *file_oformat;
if (!strcmp(filename, "-"))
filename = "pipe:";
oc = avformat_alloc_context();
if (!oc) {
[self sendErrorCode:1 reference:filename message:AVERROR(ENOMEM)];
return;
}
if (last_asked_format) {
file_oformat = av_guess_format(last_asked_format, NULL, NULL);
if (!file_oformat) {
[self sendErrorCode:2 message:@"Requested output format '%s' is not a suitable output format", last_asked_format];
return;
}
free(last_asked_format);
last_asked_format = NULL;
} else {
file_oformat = av_guess_format(NULL, filename, NULL);
if (!file_oformat) {
[self sendErrorCode:3 message:@"Unable to find a suitable output format for '%s'", filename];
return;
}
}
oc->oformat = file_oformat;
av_strlcpy(oc->filename, filename, sizeof(oc->filename));
if (!strcmp(file_oformat->name, "ffm") &&
av_strstart(filename, "http:", NULL)) {
/* special case for files sent to ffserver: we get the stream
parameters from ffserver */
int err = [self read_ffserver_streams:oc file:filename];
if (err < 0) {
[self sendErrorCode:4 reference:filename message:err];
return;
}
} else {
use_video = file_oformat->video_codec != CODEC_ID_NONE || video_stream_copy || video_codec_name;
use_audio = file_oformat->audio_codec != CODEC_ID_NONE || audio_stream_copy || audio_codec_name;
use_subtitle = file_oformat->subtitle_codec != CODEC_ID_NONE || subtitle_stream_copy || subtitle_codec_name;
/* disable if no corresponding type found and at least one
input file */
if (nb_input_files > 0) {
[self check_audio_video_sub_inputs:&input_has_video audio:&input_has_audio subs:&input_has_subtitle];
if (!input_has_video)
use_video = 0;
if (!input_has_audio)
use_audio = 0;
if (!input_has_subtitle)
use_subtitle = 0;
}
/* manual disable */
if (audio_disable) {
use_audio = 0;
}
if (video_disable) {
use_video = 0;
}
if (subtitle_disable) {
use_subtitle = 0;
}
if (use_video) {
[self new_video_stream:oc];
}
if (use_audio) {
[self new_audio_stream:oc];
}
if (use_subtitle) {
[self new_subtitle_stream:oc];
}
oc->timestamp = rec_timestamp;
for(; metadata_count>0; metadata_count--){
av_metadata_set2(&oc->metadata, metadata[metadata_count-1].key,
metadata[metadata_count-1].value, 0);
}
av_metadata_conv(oc, oc->oformat->metadata_conv, NULL);
}
output_files[nb_output_files++] = oc;
/* check filename in case of an image number is expected */
if (oc->oformat->flags & AVFMT_NEEDNUMBER) {
if (!av_filename_number_test(oc->filename)) {
[self sendErrorCode:5 reference:oc->filename message:AVERROR_NUMEXPECTED];
return;
}
}
if (!(oc->oformat->flags & AVFMT_NOFILE)) {
/* open the file */
if ((err = url_fopen(&oc->pb, filename, URL_WRONLY)) < 0) {
[self sendErrorCode:6 reference:filename message:err];
return;
}
}
memset(ap, 0, sizeof(*ap));
if (av_set_parameters(oc, ap) < 0) {
[self sendErrorCode:7 message:@"%s: Invalid encoding parameters", oc->filename];
return;
}
oc->preload= (int)(mux_preload*AV_TIME_BASE);
oc->max_delay= (int)(mux_max_delay*AV_TIME_BASE);
oc->loop_output = loop_output;
oc->flags |= AVFMT_FLAG_NONBLOCK;
[self set_context_opts:oc options:avformat_opts flags:AV_OPT_FLAG_ENCODING_PARAM];
}
- (void)opt_input_file:(const char *)filename {
AVFormatContext *ic;
AVFormatParameters params, *ap = &params;
AVInputFormat *file_iformat = NULL;
int err, i, ret, rfps, rfps_base;
int64_t timestamp;
if (last_asked_format) {
if (!(file_iformat = av_find_input_format(last_asked_format))) {
[self sendErrorCode:8 message:@"Unknown input format: '%s'", last_asked_format];
return;
}
free(last_asked_format);
last_asked_format = NULL;
}
/* get default parameters from command line */
ic = avformat_alloc_context();
if (!ic) {
[self sendErrorCode:9 reference:filename message:AVERROR(ENOMEM)];
return;
}
memset(ap, 0, sizeof(*ap));
ap->prealloced_context = 1;
ap->sample_rate = audio_sample_rate;
ap->channels = audio_channels;
ap->time_base.den = frame_rate.num;
ap->time_base.num = frame_rate.den;
ap->width = frame_width + frame_padleft + frame_padright;
ap->height = frame_height + frame_padtop + frame_padbottom;
ap->pix_fmt = frame_pix_fmt;
// ap->sample_fmt = audio_sample_fmt; //FIXME:not implemented in libavformat
ap->channel = video_channel;
ap->standard = video_standard;
[self set_context_opts:ic options:avformat_opts flags:AV_OPT_FLAG_DECODING_PARAM];
ic->video_codec_id = [self find_codec_or_die:video_codec_name type:AVMEDIA_TYPE_VIDEO encoder:0 strict:avcodec_opts[AVMEDIA_TYPE_VIDEO]->strict_std_compliance];
ic->audio_codec_id = [self find_codec_or_die:audio_codec_name type:AVMEDIA_TYPE_AUDIO encoder:0 strict:avcodec_opts[AVMEDIA_TYPE_AUDIO]->strict_std_compliance];
ic->subtitle_codec_id = [self find_codec_or_die:subtitle_codec_name type:AVMEDIA_TYPE_SUBTITLE encoder:0 strict:avcodec_opts[AVMEDIA_TYPE_SUBTITLE]->strict_std_compliance];
ic->flags |= AVFMT_FLAG_NONBLOCK;
if(pgmyuv_compatibility_hack)
ic->video_codec_id= CODEC_ID_PGMYUV;
/* open the input file with generic libav function */
err = av_open_input_file(&ic, filename, file_iformat, 0, ap);
if (err < 0) {
[self sendErrorCode:10 reference:filename message:err];
return;
}
if(opt_programid) {
int i, j;
int found=0;
for(i=0; i<ic->nb_streams; i++){
ic->streams[i]->discard= AVDISCARD_ALL;
}
for(i=0; i<ic->nb_programs; i++){
AVProgram *p= ic->programs[i];
if(p->id != opt_programid){
p->discard = AVDISCARD_ALL;
}else{
found=1;
for(j=0; j<p->nb_stream_indexes; j++){
ic->streams[p->stream_index[j]]->discard= AVDISCARD_DEFAULT;
}
}
}
if(!found){
[self sendErrorCode:11 message:@"Specified program id not found"];
return;
}
opt_programid=0;
}
ic->loop_input = loop_input;
/* If not enough info to get the stream parameters, we decode the
first frames to get it. (used in mpeg case for example) */
ret = av_find_stream_info(ic);
if (ret < 0 && verbose >= 0) {
[self sendErrorCode:12 message:@"%s: could not find codec parameters", filename];
return;
}
timestamp = start_time;
/* add the stream start time */
if (ic->start_time != AV_NOPTS_VALUE)
timestamp += ic->start_time;
/* if seeking requested, we execute it */
if (start_time != 0) {
ret = av_seek_frame(ic, -1, timestamp, AVSEEK_FLAG_BACKWARD);
if (ret < 0) {
fprintf(stderr, "%s: could not seek to position %0.3f\n",
filename, (double)timestamp / AV_TIME_BASE);
}
/* reset seek info */
start_time = 0;
}
/* update the current parameters so that they match the one of the input stream */
for(i=0;i<ic->nb_streams;i++) {
AVStream *st = ic->streams[i];
AVCodecContext *enc = st->codec;
avcodec_thread_init(enc, thread_count);
switch(enc->codec_type) {
case AVMEDIA_TYPE_AUDIO:
[self set_context_opts:enc options:avcodec_opts[AVMEDIA_TYPE_AUDIO] flags:AV_OPT_FLAG_AUDIO_PARAM | AV_OPT_FLAG_DECODING_PARAM];
//fprintf(stderr, "\nInput Audio channels: %d", enc->channels);
channel_layout = enc->channel_layout;
audio_channels = enc->channels;
audio_sample_rate = enc->sample_rate;
audio_sample_fmt = enc->sample_fmt;
input_codecs[nb_icodecs++] = avcodec_find_decoder_by_name(audio_codec_name);
if(audio_disable)
st->discard= AVDISCARD_ALL;
break;
case AVMEDIA_TYPE_VIDEO:
[self set_context_opts:enc options:avcodec_opts[AVMEDIA_TYPE_VIDEO] flags:AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_DECODING_PARAM];
frame_height = enc->height;
frame_width = enc->width;
if(ic->streams[i]->sample_aspect_ratio.num)
frame_aspect_ratio=av_q2d(ic->streams[i]->sample_aspect_ratio);
else
frame_aspect_ratio=av_q2d(enc->sample_aspect_ratio);
frame_aspect_ratio *= (float) enc->width / enc->height;
frame_pix_fmt = enc->pix_fmt;
rfps = ic->streams[i]->r_frame_rate.num;
rfps_base = ic->streams[i]->r_frame_rate.den;
if(enc->lowres) {
enc->flags |= CODEC_FLAG_EMU_EDGE;
frame_height >>= enc->lowres;
frame_width >>= enc->lowres;
}
if(me_threshold)
enc->debug |= FF_DEBUG_MV;
if (enc->time_base.den != rfps*enc->ticks_per_frame || enc->time_base.num != rfps_base) {
if (verbose >= 0)
fprintf(stderr,"\nSeems stream %d codec frame rate differs from container frame rate: %2.2f (%d/%d) -> %2.2f (%d/%d)\n",
i, (float)enc->time_base.den / enc->time_base.num, enc->time_base.den, enc->time_base.num,
(float)rfps / rfps_base, rfps, rfps_base);
}
/* update the current frame rate to match the stream frame rate */
frame_rate.num = rfps;
frame_rate.den = rfps_base;
input_codecs[nb_icodecs++] = avcodec_find_decoder_by_name(video_codec_name);
if(video_disable)
st->discard= AVDISCARD_ALL;
else if(video_discard)
st->discard= video_discard;
break;
case AVMEDIA_TYPE_DATA:
break;
case AVMEDIA_TYPE_SUBTITLE:
input_codecs[nb_icodecs++] = avcodec_find_decoder_by_name(subtitle_codec_name);
if(subtitle_disable)
st->discard = AVDISCARD_ALL;
break;
case AVMEDIA_TYPE_ATTACHMENT:
case AVMEDIA_TYPE_UNKNOWN:
nb_icodecs++;
break;
default:
abort();
}
}
input_files[nb_input_files] = ic;
input_files_ts_offset[nb_input_files] = input_ts_offset - (copy_ts ? 0 : timestamp);
/* dump the file content */
if (verbose >= 0)
dump_format(ic, nb_input_files, filename, 0);
nb_input_files++;
video_channel = 0;
av_freep(&video_codec_name);
av_freep(&audio_codec_name);
av_freep(&subtitle_codec_name);
}
- (int)read_ffserver_streams:(AVFormatContext *)s file:(const char *)filename {
int i, err;
AVFormatContext *ic;
int nopts = 0;
err = av_open_input_file(&ic, filename, NULL, FFM_PACKET_SIZE, NULL);
if (err < 0)
return err;
/* copy stream format */
s->nb_streams = ic->nb_streams;
for(i=0;i<ic->nb_streams;i++) {
AVStream *st;
AVCodec *codec;
// FIXME: a more elegant solution is needed
st = av_mallocz(sizeof(AVStream));
memcpy(st, ic->streams[i], sizeof(AVStream));
st->codec = avcodec_alloc_context();
if (!st->codec) {
[self sendErrorCode:13 reference:filename message:AVERROR(ENOMEM)];
return 1;
}
avcodec_copy_context(st->codec, ic->streams[i]->codec);
s->streams[i] = st;
codec = avcodec_find_encoder(st->codec->codec_id);
if (st->codec->codec_type == AVMEDIA_TYPE_AUDIO) {
if (audio_stream_copy) {
st->stream_copy = 1;
} else
[self choose_sample_fmt:st codec:codec];
} else if (st->codec->codec_type == AVMEDIA_TYPE_VIDEO) {
if (video_stream_copy) {
st->stream_copy = 1;
} else
[self choose_pixel_fmt:st codec:codec];
}
if(!st->codec->thread_count)
st->codec->thread_count = 1;
if(st->codec->thread_count>1)
avcodec_thread_init(st->codec, st->codec->thread_count);
if(st->codec->flags & CODEC_FLAG_BITEXACT)
nopts = 1;
}
if (!nopts)
s->timestamp = av_gettime();
av_close_input_file(ic);
return 0;
}
- (void)opt_map:(const char *)arg {
AVStreamMap *m;
char *p;
m = &stream_maps[nb_stream_maps++];
m->file_index = (int)strtol(arg, &p, 0);
if (*p)
p++;
m->stream_index = (int)strtol(p, &p, 0);
if (*p) {
p++;
m->sync_file_index = (int)strtol(p, &p, 0);
if (*p)
p++;
m->sync_stream_index = (int)strtol(p, &p, 0);
} else {
m->sync_file_index = m->file_index;
m->sync_stream_index = m->stream_index;
}
}
- (void)opt_map_meta_data:(const char *)arg {
AVMetaDataMap *m;
char *p;
m = &meta_data_maps[nb_meta_data_maps++];
m->out_file = (int)strtol(arg, &p, 0);
if (*p)
p++;
m->in_file = (int)strtol(p, &p, 0);
}
- (int)opt_recording_time:(const char *)opt arg:(const char *)arg {
recording_time = [self parse_time_or_die:opt time:arg duration:1];
return 0;
}
- (int)opt_start_time:(const char *)opt arg:(const char *)arg {
start_time = [self parse_time_or_die:opt time:arg duration:1];
return 0;
}
- (int)opt_input_ts_offset:(const char *)opt arg:(const char *)arg {
input_ts_offset = [self parse_time_or_die:opt time:arg duration:1];
return 0;
}
- (void)opt_input_ts_scale:(const char *)arg {
unsigned int stream;
double scale;
char *p;
stream = (int)strtol(arg, &p, 0);
if (*p)
p++;
scale= strtod(p, &p);
if(stream >= MAX_STREAMS)
return;
input_files_ts_scale[nb_input_files][stream]= scale;
}
- (int)opt_rec_timestamp:(const char *)opt arg:(const char *)arg {
rec_timestamp = [self parse_time_or_die:opt time:arg duration:0] / 1000000;
return 0;
}
- (int)opt_metadata:(const char *)opt arg:(const char *)arg {
char *mid= strchr(arg, '=');
if(!mid){
[self sendErrorCode:14 message:@"Missing ="];
return 1;
}
*mid++= 0;
metadata_count++;
metadata= av_realloc(metadata, sizeof(*metadata)*metadata_count);
metadata[metadata_count-1].key = av_strdup(arg);
metadata[metadata_count-1].value= av_strdup(mid);
return 0;
}
- (int)opt_timelimit:(const char *)opt arg:(const char *)arg {
#if HAVE_SETRLIMIT
int lim = [self parse_number_or_die:opt number:arg type:OPT_INT64 min:0 max:INT_MAX];
struct rlimit rl = { lim, lim + 1 };
if (setrlimit(RLIMIT_CPU, &rl))
perror("setrlimit");
#else
fprintf(stderr, "Warning: -%s not implemented on this OS\n", opt);
#endif
return 0;
}
- (int)opt_verbose:(const char *)opt arg:(const char *)arg {
verbose = [self parse_number_or_die:opt number:arg type:OPT_INT64 min:-10 max:10];
return 0;
}
- (void)opt_target:(const char *)arg {
enum { PAL, NTSC, FILM, UNKNOWN } norm = UNKNOWN;
static const char *const frame_rates[] = {"25", "30000/1001", "24000/1001"};
if(!strncmp(arg, "pal-", 4)) {
norm = PAL;
arg += 4;
} else if(!strncmp(arg, "ntsc-", 5)) {
norm = NTSC;
arg += 5;
} else if(!strncmp(arg, "film-", 5)) {
norm = FILM;
arg += 5;
} else {
int fr;
/* Calculate FR via float to avoid int overflow */
fr = (int)(frame_rate.num * 1000.0 / frame_rate.den);
if(fr == 25000) {
norm = PAL;
} else if((fr == 29970) || (fr == 23976)) {
norm = NTSC;
} else {
/* Try to determine PAL/NTSC by peeking in the input files */
if(nb_input_files) {
int i, j;
for(j = 0; j < nb_input_files; j++) {
for(i = 0; i < input_files[j]->nb_streams; i++) {
AVCodecContext *c = input_files[j]->streams[i]->codec;
if(c->codec_type != AVMEDIA_TYPE_VIDEO)
continue;
fr = c->time_base.den * 1000 / c->time_base.num;
if(fr == 25000) {
norm = PAL;
break;
} else if((fr == 29970) || (fr == 23976)) {
norm = NTSC;
break;
}
}
if(norm != UNKNOWN)
break;
}
}
}
if(verbose && norm != UNKNOWN)
fprintf(stderr, "Assuming %s for target.\n", norm == PAL ? "PAL" : "NTSC");
}
if(norm == UNKNOWN) {
[self sendErrorCode:15 message:@"Could not determine norm (PAL/NTSC/NTSC-Film) for target.\n"
"Please prefix target with \"pal-\", \"ntsc-\" or \"film-\",\n"
"or set a framerate with \"-r xxx\"."];
return;
}
if(!strcmp(arg, "vcd")) {
[self opt_video_codec:"mpeg1video"];
[self opt_audio_codec:"mp2"];
[self opt_format:"vcd"];
[self opt_frame_size:(norm == PAL ? "352x288" : "352x240")];
[self opt_frame_rate:NULL arg:frame_rates[norm]];
[self opt_default:"g" arg:(norm == PAL ? "15" : "18")];
[self opt_default:"b" arg:"1150000"];
[self opt_default:"maxrate" arg:"1150000"];
[self opt_default:"minrate" arg:"1150000"];
[self opt_default:"bufsize" arg:"327680"]; // 40*1024*8;
[self opt_default:"ab" arg:"224000"];
audio_sample_rate = 44100;
audio_channels = 2;
[self opt_default:"packetsize" arg:"2324"];
[self opt_default:"muxrate" arg:"1411200"]; // 2352 * 75 * 8;
/* We have to offset the PTS, so that it is consistent with the SCR.
SCR starts at 36000, but the first two packs contain only padding
and the first pack from the other stream, respectively, may also have
been written before.
So the real data starts at SCR 36000+3*1200. */
mux_preload= (36000+3*1200) / 90000.0; //0.44
} else if(!strcmp(arg, "svcd")) {
[self opt_video_codec:"mpeg2video"];
[self opt_audio_codec:"mp2"];
[self opt_format:"svcd"];
[self opt_frame_size:(norm == PAL ? "480x576" : "480x480")];
[self opt_frame_rate:NULL arg:frame_rates[norm]];
[self opt_default:"g" arg:(norm == PAL ? "15" : "18")];
[self opt_default:"b" arg:"2040000"];
[self opt_default:"maxrate" arg:"2516000"];
[self opt_default:"minrate" arg:"0"]; //1145000;
[self opt_default:"bufsize" arg:"1835008"]; //224*1024*8;
[self opt_default:"flags" arg:"+scan_offset"];
[self opt_default:"ab" arg:"224000"];
audio_sample_rate = 44100;
[self opt_default:"packetsize" arg:"2324"];
} else if(!strcmp(arg, "dvd")) {
[self opt_video_codec:"mpeg2video"];
[self opt_audio_codec:"ac3"];
[self opt_format:"dvd"];
[self opt_frame_size:(norm == PAL ? "720x576" : "720x480")];
[self opt_frame_rate:NULL arg:frame_rates[norm]];
[self opt_default:"g" arg:(norm == PAL ? "15" : "18")];
[self opt_default:"b" arg:"6000000"];
[self opt_default:"maxrate" arg:"9000000"];
[self opt_default:"minrate" arg:"0"]; //1500000;
[self opt_default:"bufsize" arg:"1835008"]; //224*1024*8;
[self opt_default:"packetsize" arg:"2048"]; // from www.mpucoder.com: DVD sectors contain 2048 bytes of data, this is also the size of one pack.
[self opt_default:"muxrate" arg:"10080000"]; // from mplex project: data_rate = 1260000. mux_rate = data_rate * 8
[self opt_default:"ab" arg:"448000"];
audio_sample_rate = 48000;
} else if(!strncmp(arg, "dv", 2)) {
[self opt_format:"dv"];
[self opt_frame_size:(norm == PAL ? "720x576" : "720x480")];
[self opt_frame_pix_fmt:(!strncmp(arg, "dv50", 4) ? "yuv422p" : (norm == PAL ? "yuv420p" : "yuv411p"))];
[self opt_frame_rate:NULL arg:frame_rates[norm]];
audio_sample_rate = 48000;
audio_channels = 2;
} else {
[self sendErrorCode:16 message:@"Unknown target: %s", arg];
return;
}
}
- (int)opt_thread_count:(const char *)opt arg:(const char *)arg {
thread_count = [self parse_number_or_die:opt number:arg type:OPT_INT64 min:0 max:INT_MAX];
#if !HAVE_THREADS
if (verbose >= 0)
fprintf(stderr, "Warning: not compiled with thread support, using thread emulation\n");
#endif
return 0;
}
- (int)opt_bitrate:(const char *)opt arg:(const char *)arg {
int codec_type = opt[0]=='a' ? AVMEDIA_TYPE_AUDIO : AVMEDIA_TYPE_VIDEO;
[self opt_default:opt arg:arg];
if (av_get_int(avcodec_opts[codec_type], "b", NULL) < 1000)
fprintf(stderr, "WARNING: The bitrate parameter is set too low. It takes bits/s as argument, not kbits/s\n");
return 0;
}
- (int)opt_frame_rate:(const char *)opt arg:(const char *)arg {
if (av_parse_video_frame_rate(&frame_rate, arg) < 0) {
[self sendErrorCode:17 message:@"Incorrect value for %s: %s", opt, arg];
return 1;
}
return 0;
}
- (void)opt_frame_size:(const char *)arg {
if (av_parse_video_frame_size(&frame_width, &frame_height, arg) < 0) {
[self sendErrorCode:18 message:@"Incorrect frame size"];
return;
}
}
- (void)opt_frame_aspect_ratio:(const char *)arg {
int x = 0, y = 0;
double ar = 0;
const char *p;
char *end;
p = strchr(arg, ':');
if (p) {
x = (int)strtol(arg, &end, 10);
if (end == p)
y = (int)strtol(end+1, &end, 10);
if (x > 0 && y > 0)
ar = (double)x / (double)y;
} else
ar = strtod(arg, NULL);
if (!ar) {
[self sendErrorCode:19 message:@"Incorrect aspect ratio specification."];
return;
}
frame_aspect_ratio = ar;
}
- (void)show_pix_fmts {
enum PixelFormat pix_fmt;
printf(
"Pixel formats:\n"
"I.... = Supported Input format for conversion\n"
".O... = Supported Output format for conversion\n"
"..H.. = Hardware accelerated format\n"
"...P. = Paletted format\n"
"....B = Bitstream format\n"
"FLAGS NAME NB_COMPONENTS BITS_PER_PIXEL\n"
"-----\n");
#if !CONFIG_SWSCALE
# define sws_isSupportedInput(x) 0
# define sws_isSupportedOutput(x) 0
#endif
for (pix_fmt = 0; pix_fmt < PIX_FMT_NB; pix_fmt++) {
const AVPixFmtDescriptor *pix_desc = &av_pix_fmt_descriptors[pix_fmt];
printf("%c%c%c%c%c %-16s %d %2d\n",
sws_isSupportedInput (pix_fmt) ? 'I' : '.',
sws_isSupportedOutput(pix_fmt) ? 'O' : '.',
pix_desc->flags & PIX_FMT_HWACCEL ? 'H' : '.',
pix_desc->flags & PIX_FMT_PAL ? 'P' : '.',
pix_desc->flags & PIX_FMT_BITSTREAM ? 'B' : '.',
pix_desc->name,
pix_desc->nb_components,
av_get_bits_per_pixel(pix_desc));
}
}
- (void)opt_frame_pix_fmt:(const char *)arg {
if (strcmp(arg, "list")) {
frame_pix_fmt = av_get_pix_fmt(arg);
if (frame_pix_fmt == PIX_FMT_NONE) {
[self sendErrorCode:20 message:@"Unknown pixel format requested: %s", arg];
return;
}
} else {
[self show_pix_fmts];
}
}
- (void)opt_frame_crop_top:(const char *)arg {
frame_topBand = atoi(arg);
if (frame_topBand < 0) {
[self sendErrorCode:21 message:@"Incorrect top crop size"];
return;
}
if ((frame_topBand) >= frame_height){
[self sendErrorCode:22 message:@"Vertical crop dimensions are outside the range of the original image.\nRemember to crop first and scale second."];
return;
}
frame_height -= frame_topBand;
}
- (void)opt_frame_crop_bottom:(const char *)arg {
frame_bottomBand = atoi(arg);
if (frame_bottomBand < 0) {
[self sendErrorCode:23 message:@"Incorrect bottom crop size"];
return;
}
if ((frame_bottomBand) >= frame_height){
[self sendErrorCode:24 message:@"Vertical crop dimensions are outside the range of the original image.\nRemember to crop first and scale second."];
return;
}
frame_height -= frame_bottomBand;
}
- (void)opt_frame_crop_left:(const char *)arg {
frame_leftBand = atoi(arg);
if (frame_leftBand < 0) {
[self sendErrorCode:25 message:@"Incorrect left crop size"];
return;
}
if ((frame_leftBand) >= frame_width){
[self sendErrorCode:26 message:@"Horizontal crop dimensions are outside the range of the original image.\nRemember to crop first and scale second."];
return;
}
frame_width -= frame_leftBand;
}
- (void)opt_frame_crop_right:(const char *)arg {
frame_rightBand = atoi(arg);
if (frame_rightBand < 0) {
[self sendErrorCode:27 message:@"Incorrect right crop size"];
return;
}
if ((frame_rightBand) >= frame_width){
[self sendErrorCode:28 message:@"Horizontal crop dimensions are outside the range of the original image.\nRemember to crop first and scale second."];
return;
}
frame_width -= frame_rightBand;
}
- (void)opt_frame_pad_top:(const char *)arg {
frame_padtop = atoi(arg);
if (frame_padtop < 0) {
[self sendErrorCode:29 message:@"Incorrect top pad size"];
return;
}
}
- (void)opt_frame_pad_bottom:(const char *)arg {
frame_padbottom = atoi(arg);
if (frame_padbottom < 0) {
[self sendErrorCode:30 message:@"Incorrect bottom pad size"];
return;
}
}
- (void)opt_frame_pad_left:(const char *)arg {
frame_padleft = atoi(arg);
if (frame_padleft < 0) {
[self sendErrorCode:31 message:@"Incorrect left pad size"];
return;
}
}
- (void)opt_frame_pad_right:(const char *)arg {
frame_padright = atoi(arg);
if (frame_padright < 0) {
[self sendErrorCode:32 message:@"Incorrect right pad size"];
return;
}
}
- (void)opt_pad_color:(const char *)arg {
/* Input is expected to be six hex digits similar to
how colors are expressed in html tags (but without the #) */
int rgb = (int)strtol(arg, NULL, 16);
int r,g,b;
r = (rgb >> 16);
g = ((rgb >> 8) & 255);
b = (rgb & 255);
padcolor[0] = RGB_TO_Y(r,g,b);
padcolor[1] = RGB_TO_U(r,g,b,0);
padcolor[2] = RGB_TO_V(r,g,b,0);
}
- (void)opt_qscale:(const char *)arg {
video_qscale = atof(arg);
if (video_qscale <= 0 ||
video_qscale > 255) {
[self sendErrorCode:33 message:@"qscale must be > 0.0 and <= 255"];
return;
}
}
- (void)opt_video_rc_override_string:(const char *)arg {
if (video_rc_override_string!=NULL)
free(video_rc_override_string);
video_rc_override_string = malloc(strlen(arg)+1);
strcpy(video_rc_override_string, arg);
}
- (void)opt_video_codec:(const char *)arg {
[self opt_codec:&video_stream_copy name:&video_codec_name type:AVMEDIA_TYPE_VIDEO arg:arg];
}
- (int)opt_me_threshold:(const char *)opt arg:(const char *)arg {
me_threshold = [self parse_number_or_die:opt number:arg type:OPT_INT64 min:INT_MIN max:INT_MAX];
return 0;
}
- (void)opt_pass:(const char *)pass_str {
int pass;
pass = atoi(pass_str);
if (pass != 1 && pass != 2) {
[self sendErrorCode:34 message:@"pass number can be only 1 or 2"];
return;
}
do_pass = pass;
}
- (void)opt_vstats_file:(const char *)arg {
av_free (vstats_filename);
vstats_filename=av_strdup (arg);
}
- (void)opt_vstats {
char filename[40];
time_t today2 = time(NULL);
struct tm *today = localtime(&today2);
snprintf(filename, sizeof(filename), "vstats_%02d%02d%02d.log", today->tm_hour, today->tm_min,
today->tm_sec);
[self opt_vstats_file:filename];
}
- (void)opt_intra_matrix:(const char *)arg {
intra_matrix = av_mallocz(sizeof(uint16_t) * 64);
[self parse_matrix_coeffs:intra_matrix matrix:arg];
}
- (void)opt_inter_matrix:(const char *)arg {
inter_matrix = av_mallocz(sizeof(uint16_t) * 64);
[self parse_matrix_coeffs:inter_matrix matrix:arg];
}
- (void)opt_top_field_first:(const char *)arg {
top_field_first= atoi(arg);
}
- (void)opt_video_tag:(const char *)arg {
char *tail;
video_codec_tag= (int)strtol(arg, &tail, 0);
if(!tail || *tail)
video_codec_tag= arg[0] + (arg[1]<<8) + (arg[2]<<16) + (arg[3]<<24);
}
- (void)choose_pixel_fmt:(AVStream *)st codec:(AVCodec *)codec {
if(codec && codec->pix_fmts){
const enum PixelFormat *p= codec->pix_fmts;
for(; *p!=-1; p++){
if(*p == st->codec->pix_fmt)
break;
}
if(*p == -1
&& !( st->codec->codec_id==CODEC_ID_MJPEG
&& st->codec->strict_std_compliance <= FF_COMPLIANCE_INOFFICIAL
&& ( st->codec->pix_fmt == PIX_FMT_YUV420P
|| st->codec->pix_fmt == PIX_FMT_YUV422P)))
st->codec->pix_fmt = codec->pix_fmts[0];
}
}
- (void)new_video_stream:(AVFormatContext *)oc {
AVStream *st;
AVCodecContext *video_enc;
enum CodecID codec_id;
st = av_new_stream(oc, oc->nb_streams);
if (!st) {
[self sendErrorCode:35 message:@"Could not alloc stream"];
return;
}
avcodec_get_context_defaults2(st->codec, AVMEDIA_TYPE_VIDEO);
bitstream_filters[nb_output_files][oc->nb_streams - 1]= video_bitstream_filters;
video_bitstream_filters= NULL;
avcodec_thread_init(st->codec, thread_count);
video_enc = st->codec;
if(video_codec_tag)
video_enc->codec_tag= video_codec_tag;
if( (video_global_header&1)
|| (video_global_header==0 && (oc->oformat->flags & AVFMT_GLOBALHEADER))){
video_enc->flags |= CODEC_FLAG_GLOBAL_HEADER;
avcodec_opts[AVMEDIA_TYPE_VIDEO]->flags|= CODEC_FLAG_GLOBAL_HEADER;
}
if(video_global_header&2){
video_enc->flags2 |= CODEC_FLAG2_LOCAL_HEADER;
avcodec_opts[AVMEDIA_TYPE_VIDEO]->flags2|= CODEC_FLAG2_LOCAL_HEADER;
}
if (video_stream_copy) {
st->stream_copy = 1;
video_enc->codec_type = AVMEDIA_TYPE_VIDEO;
video_enc->sample_aspect_ratio =
st->sample_aspect_ratio = av_d2q(frame_aspect_ratio*frame_height/frame_width, 255);
} else {
const char *p;
int i;
AVCodec *codec;
AVRational fps= frame_rate.num ? frame_rate : (AVRational){25,1};
if (video_codec_name) {
codec_id = [self find_codec_or_die:video_codec_name type:AVMEDIA_TYPE_VIDEO encoder:1 strict:video_enc->strict_std_compliance];
codec = avcodec_find_encoder_by_name(video_codec_name);
output_codecs[nb_ocodecs] = codec;
} else {
codec_id = av_guess_codec(oc->oformat, NULL, oc->filename, NULL, AVMEDIA_TYPE_VIDEO);
codec = avcodec_find_encoder(codec_id);
}
video_enc->codec_id = codec_id;
[self set_context_opts:video_enc options:avcodec_opts[AVMEDIA_TYPE_VIDEO] flags:AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_ENCODING_PARAM];
if (codec && codec->supported_framerates && !force_fps)
fps = codec->supported_framerates[av_find_nearest_q_idx(fps, codec->supported_framerates)];
video_enc->time_base.den = fps.num;
video_enc->time_base.num = fps.den;
video_enc->width = frame_width + frame_padright + frame_padleft;
video_enc->height = frame_height + frame_padtop + frame_padbottom;
video_enc->sample_aspect_ratio = av_d2q(frame_aspect_ratio*video_enc->height/video_enc->width, 255);
video_enc->pix_fmt = frame_pix_fmt;
st->sample_aspect_ratio = video_enc->sample_aspect_ratio;
[self choose_pixel_fmt:st codec:codec];
if (intra_only)
video_enc->gop_size = 0;
if (video_qscale || same_quality) {
video_enc->flags |= CODEC_FLAG_QSCALE;
video_enc->global_quality=
st->quality = FF_QP2LAMBDA * video_qscale;
}
if(intra_matrix)
video_enc->intra_matrix = intra_matrix;
if(inter_matrix)
video_enc->inter_matrix = inter_matrix;
p= video_rc_override_string;
for(i=0; p; i++){
int start, end, q;
int e=sscanf(p, "%d,%d,%d", &start, &end, &q);
if(e!=3){
[self sendErrorCode:36 message:@"error parsing rc_override\n"];
return;
}
video_enc->rc_override=
av_realloc(video_enc->rc_override,
sizeof(RcOverride)*(i+1));
video_enc->rc_override[i].start_frame= start;
video_enc->rc_override[i].end_frame = end;
if(q>0){
video_enc->rc_override[i].qscale= q;
video_enc->rc_override[i].quality_factor= 1.0;
}
else{
video_enc->rc_override[i].qscale= 0;
video_enc->rc_override[i].quality_factor= -q/100.0;
}
p= strchr(p, '/');
if(p) p++;
}
video_enc->rc_override_count=i;
if (!video_enc->rc_initial_buffer_occupancy)
video_enc->rc_initial_buffer_occupancy = video_enc->rc_buffer_size*3/4;
video_enc->me_threshold= me_threshold;
video_enc->intra_dc_precision= intra_dc_precision - 8;
if (do_psnr)
video_enc->flags|= CODEC_FLAG_PSNR;
/* two pass mode */
if (do_pass) {
if (do_pass == 1) {
video_enc->flags |= CODEC_FLAG_PASS1;
} else {
video_enc->flags |= CODEC_FLAG_PASS2;
}
}
}
nb_ocodecs++;
if (video_language) {
av_metadata_set2(&st->metadata, "language", video_language, 0);
av_freep(&video_language);
}
/* reset some key parameters */
video_disable = 0;
av_freep(&video_codec_name);
video_stream_copy = 0;
frame_pix_fmt = PIX_FMT_NONE;
}
- (void)opt_new_video_stream {
AVFormatContext *oc;
if (nb_output_files <= 0) {
[self sendErrorCode:37 message:@"At least one output file must be specified"];
return;
}
oc = output_files[nb_output_files - 1];
[self new_video_stream:oc];
}
- (int)opt_audio_rate:(const char *)opt arg:(const char *)arg {
audio_sample_rate = [self parse_number_or_die:opt number:arg type:OPT_INT64 min:0 max:INT_MAX];
return 0;
}
- (int)opt_audio_channels:(const char *)opt arg:(const char *)arg {
audio_channels = [self parse_number_or_die:opt number:arg type:OPT_INT64 min:0 max:INT_MAX];
return 0;
}
- (void)opt_audio_codec:(const char *)arg {
[self opt_codec:&audio_stream_copy name:&audio_codec_name type:AVMEDIA_TYPE_AUDIO arg:arg];
}
- (void)opt_audio_tag:(const char *)arg {
char *tail;
audio_codec_tag= (int)strtol(arg, &tail, 0);
if(!tail || *tail)
audio_codec_tag= arg[0] + (arg[1]<<8) + (arg[2]<<16) + (arg[3]<<24);
}
- (void)choose_sample_fmt:(AVStream *)st codec:(AVCodec *)codec {
if(codec && codec->sample_fmts){
const enum SampleFormat *p= codec->sample_fmts;
for(; *p!=-1; p++){
if(*p == st->codec->sample_fmt)
break;
}
if(*p == -1)
st->codec->sample_fmt = codec->sample_fmts[0];
}
}
- (void)choose_sample_rate:(AVStream *)st codec:(AVCodec *)codec {
if(codec && codec->supported_samplerates){
const int *p= codec->supported_samplerates;
int best = 0;
int best_dist=INT_MAX;
for(; *p; p++){
int dist= abs(st->codec->sample_rate - *p);
if(dist < best_dist){
best_dist= dist;
best= *p;
}
}
if(best_dist){
av_log(st->codec, AV_LOG_WARNING, "Requested sampling rate unsupported using closest supported (%d)\n", best);
}
st->codec->sample_rate= best;
}
}
- (void)new_audio_stream:(AVFormatContext *)oc {
AVStream *st;
AVCodecContext *audio_enc;
enum CodecID codec_id;
st = av_new_stream(oc, oc->nb_streams);
if (!st) {
[self sendErrorCode:38 message:@"Could not alloc stream"];
return;
}
avcodec_get_context_defaults2(st->codec, AVMEDIA_TYPE_AUDIO);
bitstream_filters[nb_output_files][oc->nb_streams - 1]= audio_bitstream_filters;
audio_bitstream_filters= NULL;
avcodec_thread_init(st->codec, thread_count);
audio_enc = st->codec;
audio_enc->codec_type = AVMEDIA_TYPE_AUDIO;
if(audio_codec_tag)
audio_enc->codec_tag= audio_codec_tag;
if (oc->oformat->flags & AVFMT_GLOBALHEADER) {
audio_enc->flags |= CODEC_FLAG_GLOBAL_HEADER;
avcodec_opts[AVMEDIA_TYPE_AUDIO]->flags|= CODEC_FLAG_GLOBAL_HEADER;
}
if (audio_stream_copy) {
st->stream_copy = 1;
audio_enc->channels = audio_channels;
audio_enc->sample_rate = audio_sample_rate;
} else {
AVCodec *codec;
[self set_context_opts:audio_enc options:avcodec_opts[AVMEDIA_TYPE_AUDIO] flags:AV_OPT_FLAG_AUDIO_PARAM | AV_OPT_FLAG_ENCODING_PARAM];
if (audio_codec_name) {
codec_id = [self find_codec_or_die:audio_codec_name type:AVMEDIA_TYPE_AUDIO encoder:1 strict:audio_enc->strict_std_compliance];
codec = avcodec_find_encoder_by_name(audio_codec_name);
output_codecs[nb_ocodecs] = codec;
} else {
codec_id = av_guess_codec(oc->oformat, NULL, oc->filename, NULL, AVMEDIA_TYPE_AUDIO);
codec = avcodec_find_encoder(codec_id);
}
audio_enc->codec_id = codec_id;
if (audio_qscale > QSCALE_NONE) {
audio_enc->flags |= CODEC_FLAG_QSCALE;
audio_enc->global_quality = st->quality = FF_QP2LAMBDA * audio_qscale;
}
audio_enc->channels = audio_channels;
audio_enc->sample_fmt = audio_sample_fmt;
audio_enc->sample_rate = audio_sample_rate;
audio_enc->channel_layout = channel_layout;
if (avcodec_channel_layout_num_channels(channel_layout) != audio_channels)
audio_enc->channel_layout = 0;
[self choose_sample_fmt:st codec:codec];
[self choose_sample_rate:st codec:codec];
}
nb_ocodecs++;
audio_enc->time_base= (AVRational){1, audio_sample_rate};
if (audio_language) {
av_metadata_set2(&st->metadata, "language", audio_language, 0);
av_freep(&audio_language);
}
/* reset some key parameters */
audio_disable = 0;
av_freep(&audio_codec_name);
audio_stream_copy = 0;
}
- (void)opt_new_audio_stream {
AVFormatContext *oc;
if (nb_output_files <= 0) {
[self sendErrorCode:39 message:@"At least one output file must be specified"];
return;
}
oc = output_files[nb_output_files - 1];
[self new_audio_stream:oc];
}
- (void)list_fmts:(void (*)(char *buf, int buf_size, int fmt))get_fmt_string format:(int)nb_fmts {
int i;
char fmt_str[128];
for (i=-1; i < nb_fmts; i++) {
get_fmt_string (fmt_str, sizeof(fmt_str), i);
fprintf(stdout, "%s\n", fmt_str);
}
}
- (void)opt_audio_sample_fmt:(const char *)arg {
if (strcmp(arg, "list"))
audio_sample_fmt = avcodec_get_sample_fmt(arg);
else {
[self list_fmts:avcodec_sample_fmt_string format:SAMPLE_FMT_NB];
}
}
- (void)opt_subtitle_codec:(const char *)arg {
[self opt_codec:&subtitle_stream_copy name:&subtitle_codec_name type:AVMEDIA_TYPE_SUBTITLE arg:arg];
}
- (void)new_subtitle_stream:(AVFormatContext *)oc {
AVStream *st;
AVCodecContext *subtitle_enc;
st = av_new_stream(oc, oc->nb_streams);
if (!st) {
[self sendErrorCode:40 message:@"Could not alloc stream"];
return;
}
avcodec_get_context_defaults2(st->codec, AVMEDIA_TYPE_SUBTITLE);
bitstream_filters[nb_output_files][oc->nb_streams - 1]= subtitle_bitstream_filters;
subtitle_bitstream_filters= NULL;
subtitle_enc = st->codec;
subtitle_enc->codec_type = AVMEDIA_TYPE_SUBTITLE;
if(subtitle_codec_tag)
subtitle_enc->codec_tag= subtitle_codec_tag;
if (subtitle_stream_copy) {
st->stream_copy = 1;
} else {
[self set_context_opts:avcodec_opts[AVMEDIA_TYPE_SUBTITLE] options:subtitle_enc flags:AV_OPT_FLAG_SUBTITLE_PARAM | AV_OPT_FLAG_ENCODING_PARAM];
subtitle_enc->codec_id = [self find_codec_or_die:subtitle_codec_name type:AVMEDIA_TYPE_SUBTITLE encoder:1 strict:subtitle_enc->strict_std_compliance];
output_codecs[nb_ocodecs] = avcodec_find_encoder_by_name(subtitle_codec_name);
}
nb_ocodecs++;
if (subtitle_language) {
av_metadata_set2(&st->metadata, "language", subtitle_language, 0);
av_freep(&subtitle_language);
}
subtitle_disable = 0;
av_freep(&subtitle_codec_name);
subtitle_stream_copy = 0;
}
- (void)opt_new_subtitle_stream {
AVFormatContext *oc;
if (nb_output_files <= 0) {
[self sendErrorCode:41 message:@"At least one output file must be specified"];
return;
}
oc = output_files[nb_output_files - 1];
[self new_subtitle_stream:oc];
}
- (void)opt_subtitle_tag:(const char *)arg {
char *tail;
subtitle_codec_tag= (int)strtol(arg, &tail, 0);
if(!tail || *tail)
subtitle_codec_tag= arg[0] + (arg[1]<<8) + (arg[2]<<16) + (arg[3]<<24);
}
- (void)opt_video_channel:(const char *)arg {
video_channel = (int)strtol(arg, NULL, 0);
}
- (void)opt_video_standard:(const char *)arg {
video_standard = av_strdup(arg);
}
- (int)opt_bsf:(const char *)opt arg:(const char *)arg {
AVBitStreamFilterContext *bsfc= av_bitstream_filter_init(arg); //FIXME split name and args for filter at '='
AVBitStreamFilterContext **bsfp;
if(!bsfc){
[self sendErrorCode:42 message:@"Unknown bitstream filter %s", arg];
return 1;
}
bsfp= *opt == 'v' ? &video_bitstream_filters :
*opt == 'a' ? &audio_bitstream_filters :
&subtitle_bitstream_filters;
while(*bsfp)
bsfp= &(*bsfp)->next;
*bsfp= bsfc;
return 0;
}
- (int)opt_preset:(const char *)opt arg:(const char *)arg {
FILE *f=NULL;
char filename[1000], tmp[1000], tmp2[1000], line[1000];
int i;
const char *base[3]= { getenv("FFMPEG_DATADIR"),
getenv("HOME"),
FFMPEG_DATADIR,
};
if (*opt != 'f') {
for(i=0; i<3 && !f; i++){
if(!base[i])
continue;
snprintf(filename, sizeof(filename), "%s%s/%s.ffpreset", base[i], i != 1 ? "" : "/.ffmpeg", arg);
f= fopen(filename, "r");
if(!f){
char *codec_name= *opt == 'v' ? video_codec_name :
*opt == 'a' ? audio_codec_name :
subtitle_codec_name;
snprintf(filename, sizeof(filename), "%s%s/%s-%s.ffpreset", base[i], i != 1 ? "" : "/.ffmpeg", codec_name, arg);
f= fopen(filename, "r");
}
}
} else {
av_strlcpy(filename, arg, sizeof(filename));
f= fopen(filename, "r");
}
if(!f){
[self sendErrorCode:43 message:@"File for preset '%s' not found", arg];
return 1;
}
while(!feof(f)){
int e= fscanf(f, "%999[^\n]\n", line) - 1;
if(line[0] == '#' && !e)
continue;
e|= sscanf(line, "%999[^=]=%999[^\n]\n", tmp, tmp2) - 2;
if(e){
[self sendErrorCode:44 message:@"%s: Invalid syntax: '%s'", filename, line];
return 1;
}
if(!strcmp(tmp, "acodec")){
[self opt_audio_codec:tmp2];
}else if(!strcmp(tmp, "vcodec")){
[self opt_video_codec:tmp2];
}else if(!strcmp(tmp, "scodec")){
[self opt_subtitle_codec:tmp2];
}else if([self opt_default:tmp arg:tmp2] < 0){
[self sendErrorCode:45 message:@"%s: Invalid option or argument: '%s', parsed as '%s' = '%s'", filename, line, tmp, tmp2];
return 1;
}
}
fclose(f);
return 0;
}
- (int)opt_default:(const char *)opt arg:(const char *)arg {
int type;
int ret= 0;
const AVOption *o= NULL;
int opt_types[]={AV_OPT_FLAG_VIDEO_PARAM, AV_OPT_FLAG_AUDIO_PARAM, 0, AV_OPT_FLAG_SUBTITLE_PARAM, 0};
for(type=0; type<AVMEDIA_TYPE_NB && ret>= 0; type++){
const AVOption *o2 = av_find_opt(avcodec_opts[0], opt, NULL, opt_types[type], opt_types[type]);
if(o2)
ret = av_set_string3(avcodec_opts[type], opt, arg, 1, &o);
}
if(!o)
ret = av_set_string3(avformat_opts, opt, arg, 1, &o);
if(!o && sws_opts)
ret = av_set_string3(sws_opts, opt, arg, 1, &o);
if(!o){
if(opt[0] == 'a')
ret = av_set_string3(avcodec_opts[AVMEDIA_TYPE_AUDIO], opt+1, arg, 1, &o);
else if(opt[0] == 'v')
ret = av_set_string3(avcodec_opts[AVMEDIA_TYPE_VIDEO], opt+1, arg, 1, &o);
else if(opt[0] == 's')
ret = av_set_string3(avcodec_opts[AVMEDIA_TYPE_SUBTITLE], opt+1, arg, 1, &o);
}
if (o && ret < 0) {
[self sendErrorCode:89 message:@"Invalid value '%s' for option '%s'", arg, opt];
return 1;
}
if (!o) {
[self sendErrorCode:90 message:@"Unrecognized option '%s'", opt];
return 1;
}
// av_log(NULL, AV_LOG_ERROR, "%s:%s: %f 0x%0X\n", opt, arg, av_get_double(avcodec_opts, opt, NULL), (int)av_get_int(avcodec_opts, opt, NULL));
//FIXME we should always use avcodec_opts, ... for storing options so there will not be any need to keep track of what i set over this
opt_names= av_realloc(opt_names, sizeof(void*)*(opt_name_count+1));
opt_names[opt_name_count++] = (char *)o->name;
if(avcodec_opts[0]->debug || avformat_opts->debug)
av_log_set_level(AV_LOG_DEBUG);
return 0;
}
- (void)set_context_opts:(void *)ctx options:(void *)opts_ctx flags:(int)flags {
int i;
for(i=0; i<opt_name_count; i++){
char buf[256];
const AVOption *opt;
const char *str= av_get_string(opts_ctx, opt_names[i], &opt, buf, sizeof(buf));
/* if an option with name opt_names[i] is present in opts_ctx then str is non-NULL */
if(str && ((opt->flags & flags) == flags))
av_set_string3(ctx, opt_names[i], str, 1, NULL);
}
}
- (void)check_audio_video_sub_inputs:(int *)has_video_ptr audio:(int *)has_audio_ptr subs:(int *)has_subtitle_ptr {
int has_video, has_audio, has_subtitle, i, j;
AVFormatContext *ic;
has_video = 0;
has_audio = 0;
has_subtitle = 0;
for(j=0;j<nb_input_files;j++) {
ic = input_files[j];
for(i=0;i<ic->nb_streams;i++) {
AVCodecContext *enc = ic->streams[i]->codec;
switch(enc->codec_type) {
case AVMEDIA_TYPE_AUDIO:
has_audio = 1;
break;
case AVMEDIA_TYPE_VIDEO:
has_video = 1;
break;
case AVMEDIA_TYPE_SUBTITLE:
has_subtitle = 1;
break;
case AVMEDIA_TYPE_DATA:
case AVMEDIA_TYPE_ATTACHMENT:
case AVMEDIA_TYPE_UNKNOWN:
break;
default:
abort();
}
}
}
*has_video_ptr = has_video;
*has_audio_ptr = has_audio;
*has_subtitle_ptr = has_subtitle;
}
- (enum CodecID)find_codec_or_die:(const char *)name type:(int)type encoder:(int)encoder strict:(int)strict {
const char *codec_string = encoder ? "encoder" : "decoder";
AVCodec *codec;
if(!name)
return CODEC_ID_NONE;
codec = encoder ?
avcodec_find_encoder_by_name(name) :
avcodec_find_decoder_by_name(name);
if(!codec) {
[self sendErrorCode:46 message:@"Unknown %s '%s'", codec_string, name];
return 0;
}
if(codec->type != type) {
[self sendErrorCode:47 message:@"Invalid %s type '%s'", codec_string, name];
return 0;
}
if(codec->capabilities & CODEC_CAP_EXPERIMENTAL &&
strict > FF_COMPLIANCE_EXPERIMENTAL) {
NSString *message = [NSString stringWithFormat:@"%s '%s' is experimental and might produce bad "
"results.\nAdd '-strict experimental' if you want to use it.\n", codec_string, codec->name];
codec = encoder ?
avcodec_find_encoder(codec->id) :
avcodec_find_decoder(codec->id);
if (!(codec->capabilities & CODEC_CAP_EXPERIMENTAL))
message = [message stringByAppendingFormat:@"Or use the non experimental %s '%s'.\n", codec_string, codec->name];
[self sendErrorCode:48 message:message];
return 0;
}
return codec->id;
}
- (int64_t)parse_time_or_die:(const char *)context time:(const char *)timestr duration:(int)is_duration {
int64_t us = parse_date(timestr, is_duration);
if (us == INT64_MIN) {
[self sendErrorCode:91 message:@"Invalid %s specification for %s: %s", (is_duration ? "duration" : "date"), context, timestr];
return -1;
}
return us;
}
- (double)parse_number_or_die:(const char *)context number:(const char *)numstr type:(int)type min:(double)min max:(double)max {
char *tail;
const char *error;
double d = strtod(numstr, &tail);
if (*tail)
error= "Expected number for %s but found: %s\n";
else if (d < min || d > max)
error= "The value for %s was %s which is not within %f - %f\n";
else if(type == OPT_INT64 && (int64_t)d != d)
error= "Expected int64 for %s but found %s\n";
else
return d;
[self sendErrorCode:92 message:[NSString stringWithUTF8String:error], context, numstr, min, max];
return -1;
}
- (void)parse_matrix_coeffs:(uint16_t *)dest matrix:(const char *)str {
int i;
const char *p = str;
for(i = 0;; i++) {
dest[i] = atoi(p);
if(i == 63)
break;
p = strchr(p, ',');
if(!p) {
[self sendErrorCode:49 message:@"Syntax error in matrix \"%s\" at coeff %d", str, i];
return;
}
p++;
}
}
- (int)read_yesno {
int c = getchar();
int yesno = (toupper(c) == 'Y');
while (c != '\n' && c != EOF)
c = getchar();
return yesno;
}
- (void)opt_codec:(int *)pstream_copy name:(char **)pcodec_name type:(int)codec_type arg:(const char *)arg {
av_freep(pcodec_name);
if (!strcmp(arg, "copy")) {
*pstream_copy = 1;
} else {
*pcodec_name = av_strdup(arg);
}
}
- (int64_t)getutime {
#if HAVE_GETRUSAGE
struct rusage rusage;
getrusage(RUSAGE_SELF, &rusage);
return (rusage.ru_utime.tv_sec * 1000000LL) + rusage.ru_utime.tv_usec;
#elif HAVE_GETPROCESSTIMES
HANDLE proc;
FILETIME c, e, k, u;
proc = GetCurrentProcess();
GetProcessTimes(proc, &c, &e, &k, &u);
return ((int64_t) u.dwHighDateTime << 32 | u.dwLowDateTime) / 10;
#else
return av_gettime();
#endif
}
- (int64_t)getmaxrss {
#if HAVE_GETRUSAGE && HAVE_STRUCT_RUSAGE_RU_MAXRSS
struct rusage rusage;
getrusage(RUSAGE_SELF, &rusage);
return (int64_t)rusage.ru_maxrss * 1024;
#elif HAVE_GETPROCESSMEMORYINFO
HANDLE proc;
PROCESS_MEMORY_COUNTERS memcounters;
proc = GetCurrentProcess();
memcounters.cb = sizeof(memcounters);
GetProcessMemoryInfo(proc, &memcounters, sizeof(memcounters));
return memcounters.PeakPagefileUsage;
#else
return 0;
#endif
}
- (double)psnr:(double)d {
return -10.0*log(d)/log(10.0);
}
- (void)do_video_stats:(AVFormatContext *)os output:(AVOutputStream *)ost size:(int)frame_size {
AVCodecContext *enc;
int frame_number;
double ti1, bitrate, avg_bitrate;
/* this is executed just the first time do_video_stats is called */
if (!vstats_file) {
vstats_file = fopen(vstats_filename, "w");
if (!vstats_file) {
[self sendErrorCode:50 message:@"Error opening %s", vstats_filename];
stopConverting = YES;
return;
}
}
enc = ost->st->codec;
if (enc->codec_type == AVMEDIA_TYPE_VIDEO) {
frame_number = ost->frame_number;
fprintf(vstats_file, "frame= %5d q= %2.1f ", frame_number, enc->coded_frame->quality/(float)FF_QP2LAMBDA);
if (enc->flags&CODEC_FLAG_PSNR)
fprintf(vstats_file, "PSNR= %6.2f ", [self psnr:(enc->coded_frame->error[0]/(enc->width*enc->height*255.0*255.0))]);
fprintf(vstats_file,"f_size= %6d ", frame_size);
/* compute pts value */
ti1 = ost->sync_opts * av_q2d(enc->time_base);
if (ti1 < 0.01)
ti1 = 0.01;
bitrate = (frame_size * 8) / av_q2d(enc->time_base) / 1000.0;
avg_bitrate = (double)(video_size * 8) / ti1 / 1000.0;
fprintf(vstats_file, "s_size= %8.0fkB time= %0.3f br= %7.1fkbits/s avg_br= %7.1fkbits/s ",
(double)video_size / 1024, ti1, bitrate, avg_bitrate);
fprintf(vstats_file,"type= %c\n", av_get_pict_type_char(enc->coded_frame->pict_type));
}
}
- (void)print_sdp:(AVFormatContext **)avc number:(int)n {
char sdp[2048];
avf_sdp_create(avc, n, sdp, sizeof(sdp));
printf("SDP:\n%s\n", sdp);
fflush(stdout);
}
- (double)get_sync_ipts:(const AVOutputStream *)ost {
const AVInputStream *ist = ost->sync_ist;
return (double)(ist->pts - start_time)/AV_TIME_BASE;
}
- (void)write_frame:(AVFormatContext *)s packet:(AVPacket *)pkt codec:(AVCodecContext *)avctx filter:(AVBitStreamFilterContext *)bsfc {
int ret;
while(bsfc){
AVPacket new_pkt= *pkt;
int a= av_bitstream_filter_filter(bsfc, avctx, NULL,
&new_pkt.data, &new_pkt.size,
pkt->data, pkt->size,
pkt->flags & AV_PKT_FLAG_KEY);
if(a>0){
av_free_packet(pkt);
new_pkt.destruct= av_destruct_packet;
} else if(a<0){
[self sendErrorCode:51 message:@"%s failed for stream %d, codec %s", bsfc->filter->name, pkt->stream_index, (avctx->codec ? avctx->codec->name : "copy")];
[self sendErrorCode:51 reference:"" message:a];
if (exit_on_error) {
stopConverting = YES;
return;
}
}
*pkt= new_pkt;
bsfc= bsfc->next;
}
ret= av_interleaved_write_frame(s, pkt);
if(ret < 0){
[self sendErrorCode:52 reference:"av_interleaved_write_frame()" message:ret];
stopConverting = YES;
return;
}
}
- (void)do_subtitle_out:(AVFormatContext *)s output:(AVOutputStream *)ost stream:(AVInputStream *)ist subs:(AVSubtitle *)sub pts:(int64_t)pts {
int subtitle_out_max_size = 1024 * 1024;
int subtitle_out_size, nb, i;
AVCodecContext *enc;
AVPacket pkt;
if (pts == AV_NOPTS_VALUE) {
[self sendErrorCode:53 message:@"Subtitle packets must have a pts"];
if (exit_on_error)
stopConverting = YES;
return;
}
enc = ost->st->codec;
if (!subtitle_out) {
subtitle_out = av_malloc(subtitle_out_max_size);
}
/* Note: DVB subtitle need one packet to draw them and one other
packet to clear them */
/* XXX: signal it in the codec context ? */
if (enc->codec_id == CODEC_ID_DVB_SUBTITLE)
nb = 2;
else
nb = 1;
for(i = 0; i < nb; i++) {
sub->pts = av_rescale_q(pts, ist->st->time_base, AV_TIME_BASE_Q);
// start_display_time is required to be 0
sub->pts += av_rescale_q(sub->start_display_time, (AVRational){1, 1000}, AV_TIME_BASE_Q);
sub->end_display_time -= sub->start_display_time;
sub->start_display_time = 0;
subtitle_out_size = avcodec_encode_subtitle(enc, subtitle_out,
subtitle_out_max_size, sub);
if (subtitle_out_size < 0) {
[self sendErrorCode:54 message:@"Subtitle encoding failed"];
stopConverting = YES;
return;
}
av_init_packet(&pkt);
pkt.stream_index = ost->index;
pkt.data = subtitle_out;
pkt.size = subtitle_out_size;
pkt.pts = av_rescale_q(sub->pts, AV_TIME_BASE_Q, ost->st->time_base);
if (enc->codec_id == CODEC_ID_DVB_SUBTITLE) {
/* XXX: the pts correction is handled here. Maybe handling
it in the codec would be better */
if (i == 0)
pkt.pts += 90 * sub->start_display_time;
else
pkt.pts += 90 * sub->end_display_time;
}
[self write_frame:s packet:&pkt codec:ost->st->codec filter:bitstream_filters[ost->file_index][pkt.stream_index]];
}
}
- (void)do_video_out:(AVFormatContext *)s output:(AVOutputStream *)ost stream:(AVInputStream *)ist picture:(AVFrame *)in_picture size:(int *)frame_size {
int nb_frames, i, ret;
int64_t topBand, bottomBand, leftBand, rightBand;
AVFrame *final_picture, *formatted_picture, *resampling_dst, *padding_src;
AVFrame picture_crop_temp, picture_pad_temp;
AVCodecContext *enc, *dec;
double sync_ipts;
avcodec_get_frame_defaults(&picture_crop_temp);
avcodec_get_frame_defaults(&picture_pad_temp);
enc = ost->st->codec;
dec = ist->st->codec;
sync_ipts = [self get_sync_ipts:ost] / av_q2d(enc->time_base);
/* by default, we output a single frame */
nb_frames = 1;
*frame_size = 0;
if(video_sync_method){
double vdelta = sync_ipts - ost->sync_opts;
//FIXME set to 0.5 after we fix some dts/pts bugs like in avidec.c
if (vdelta < -1.1)
nb_frames = 0;
else if (video_sync_method == 2 || (video_sync_method<0 && (s->oformat->flags & AVFMT_VARIABLE_FPS))){
if(vdelta<=-0.6){
nb_frames=0;
}else if(vdelta>0.6)
ost->sync_opts= lrintf(sync_ipts);
}else if (vdelta > 1.1)
nb_frames = (int)lrintf(vdelta);
//fprintf(stderr, "vdelta:%f, ost->sync_opts:%"PRId64", ost->sync_ipts:%f nb_frames:%d\n", vdelta, ost->sync_opts, [self get_sync_ipts:ost], nb_frames);
if (nb_frames == 0){
++nb_frames_drop;
if (verbose>2)
fprintf(stderr, "*** drop!\n");
}else if (nb_frames > 1) {
nb_frames_dup += nb_frames - 1;
if (verbose>2)
fprintf(stderr, "*** %d dup!\n", nb_frames-1);
}
}else
ost->sync_opts= lrintf(sync_ipts);
nb_frames= FFMIN(nb_frames, max_frames[AVMEDIA_TYPE_VIDEO] - ost->frame_number);
if (nb_frames <= 0)
return;
if (ost->video_crop) {
if (av_picture_crop((AVPicture *)&picture_crop_temp, (AVPicture *)in_picture, dec->pix_fmt, ost->topBand, ost->leftBand) < 0) {
[self sendErrorCode:55 message:@"error cropping picture"];
if (exit_on_error)
stopConverting = YES;
return;
}
formatted_picture = &picture_crop_temp;
} else {
formatted_picture = in_picture;
}
final_picture = formatted_picture;
padding_src = formatted_picture;
resampling_dst = &ost->pict_tmp;
if (ost->video_pad) {
final_picture = &ost->pict_tmp;
if (ost->video_resample) {
if (av_picture_crop((AVPicture *)&picture_pad_temp, (AVPicture *)final_picture, enc->pix_fmt, ost->padtop, ost->padleft) < 0) {
[self sendErrorCode:56 message:@"error padding picture"];
if (exit_on_error)
stopConverting = YES;
return;
}
resampling_dst = &picture_pad_temp;
}
}
if( (ost->resample_height != (ist->st->codec->height - (ost->topBand + ost->bottomBand)))
|| (ost->resample_width != (ist->st->codec->width - (ost->leftBand + ost->rightBand)))
|| (ost->resample_pix_fmt!= ist->st->codec->pix_fmt) ) {
fprintf(stderr,"Input Stream #%d.%d frame size changed to %dx%d, %s\n", ist->file_index, ist->index, ist->st->codec->width, ist->st->codec->height,avcodec_get_pix_fmt_name(ist->st->codec->pix_fmt));
if(!ost->video_resample) {
stopConverting = YES;
return;
}
}
if (ost->video_resample) {
padding_src = NULL;
final_picture = &ost->pict_tmp;
if( (ost->resample_height != (ist->st->codec->height - (ost->topBand + ost->bottomBand)))
|| (ost->resample_width != (ist->st->codec->width - (ost->leftBand + ost->rightBand)))
|| (ost->resample_pix_fmt!= ist->st->codec->pix_fmt) ) {
/* keep bands proportional to the frame size */
topBand = ((int64_t)ist->st->codec->height * ost->original_topBand / ost->original_height) & ~1;
bottomBand = ((int64_t)ist->st->codec->height * ost->original_bottomBand / ost->original_height) & ~1;
leftBand = ((int64_t)ist->st->codec->width * ost->original_leftBand / ost->original_width) & ~1;
rightBand = ((int64_t)ist->st->codec->width * ost->original_rightBand / ost->original_width) & ~1;
/* sanity check to ensure no bad band sizes sneak in */
assert(topBand <= INT_MAX && topBand >= 0);
assert(bottomBand <= INT_MAX && bottomBand >= 0);
assert(leftBand <= INT_MAX && leftBand >= 0);
assert(rightBand <= INT_MAX && rightBand >= 0);
ost->topBand = (int)topBand;
ost->bottomBand = (int)bottomBand;
ost->leftBand = (int)leftBand;
ost->rightBand = (int)rightBand;
ost->resample_height = ist->st->codec->height - (ost->topBand + ost->bottomBand);
ost->resample_width = ist->st->codec->width - (ost->leftBand + ost->rightBand);
ost->resample_pix_fmt= ist->st->codec->pix_fmt;
/* initialize a new scaler context */
sws_freeContext(ost->img_resample_ctx);
sws_flags = (unsigned int)av_get_int(sws_opts, "sws_flags", NULL);
ost->img_resample_ctx = sws_getContext(
ist->st->codec->width - (ost->leftBand + ost->rightBand),
ist->st->codec->height - (ost->topBand + ost->bottomBand),
ist->st->codec->pix_fmt,
ost->st->codec->width - (ost->padleft + ost->padright),
ost->st->codec->height - (ost->padtop + ost->padbottom),
ost->st->codec->pix_fmt,
sws_flags, NULL, NULL, NULL);
if (ost->img_resample_ctx == NULL) {
[self sendErrorCode:57 message:@"Cannot get resampling context"];
stopConverting = YES;
return;
}
}
sws_scale(ost->img_resample_ctx, (const uint8_t *const *)formatted_picture->data, formatted_picture->linesize,
0, ost->resample_height, resampling_dst->data, resampling_dst->linesize);
}
if (ost->video_pad) {
av_picture_pad((AVPicture*)final_picture, (AVPicture *)padding_src,
enc->height, enc->width, enc->pix_fmt,
ost->padtop, ost->padbottom, ost->padleft, ost->padright, padcolor);
}
/* duplicates frame if needed */
for(i=0;i<nb_frames;i++) {
AVPacket pkt;
av_init_packet(&pkt);
pkt.stream_index= ost->index;
if (s->oformat->flags & AVFMT_RAWPICTURE) {
/* raw pictures are written as AVPicture structure to
avoid any copies. We support temorarily the older
method. */
AVFrame* old_frame = enc->coded_frame;
enc->coded_frame = dec->coded_frame; //FIXME/XXX remove this hack
pkt.data= (uint8_t *)final_picture;
pkt.size= sizeof(AVPicture);
pkt.pts= av_rescale_q(ost->sync_opts, enc->time_base, ost->st->time_base);
pkt.flags |= AV_PKT_FLAG_KEY;
[self write_frame:s packet:&pkt codec:ost->st->codec filter:bitstream_filters[ost->file_index][pkt.stream_index]];
enc->coded_frame = old_frame;
} else {
AVFrame big_picture;
big_picture= *final_picture;
/* better than nothing: use input picture interlaced
settings */
big_picture.interlaced_frame = in_picture->interlaced_frame;
if(avcodec_opts[AVMEDIA_TYPE_VIDEO]->flags & (CODEC_FLAG_INTERLACED_DCT|CODEC_FLAG_INTERLACED_ME)){
if(top_field_first == -1)
big_picture.top_field_first = in_picture->top_field_first;
else
big_picture.top_field_first = top_field_first;
}
/* handles sameq here. This is not correct because it may
not be a global option */
if (same_quality) {
big_picture.quality = ist->st->quality;
}else
big_picture.quality = ost->st->quality;
if(!me_threshold)
big_picture.pict_type = 0;
// big_picture.pts = AV_NOPTS_VALUE;
big_picture.pts= ost->sync_opts;
// big_picture.pts= av_rescale(ost->sync_opts, AV_TIME_BASE*(int64_t)enc->time_base.num, enc->time_base.den);
//av_log(NULL, AV_LOG_DEBUG, "%"PRId64" -> encoder\n", ost->sync_opts);
ret = avcodec_encode_video(enc,
bit_buffer, bit_buffer_size,
&big_picture);
if (ret < 0) {
[self sendErrorCode:58 message:@"Video encoding failed"];
stopConverting = YES;
return;
}
if(ret>0){
pkt.data= bit_buffer;
pkt.size= ret;
if(enc->coded_frame->pts != AV_NOPTS_VALUE)
pkt.pts= av_rescale_q(enc->coded_frame->pts, enc->time_base, ost->st->time_base);
/*av_log(NULL, AV_LOG_DEBUG, "encoder -> %"PRId64"/%"PRId64"\n",
pkt.pts != AV_NOPTS_VALUE ? av_rescale(pkt.pts, enc->time_base.den, AV_TIME_BASE*(int64_t)enc->time_base.num) : -1,
pkt.dts != AV_NOPTS_VALUE ? av_rescale(pkt.dts, enc->time_base.den, AV_TIME_BASE*(int64_t)enc->time_base.num) : -1);*/
if(enc->coded_frame->key_frame)
pkt.flags |= AV_PKT_FLAG_KEY;
[self write_frame:s packet:&pkt codec:ost->st->codec filter:bitstream_filters[ost->file_index][pkt.stream_index]];
*frame_size = ret;
video_size += ret;
//fprintf(stderr,"\nFrame: %3d size: %5d type: %d",
// enc->frame_number-1, ret, enc->pict_type);
/* if two pass, output log */
if (ost->logfile && enc->stats_out) {
fprintf(ost->logfile, "%s", enc->stats_out);
}
}
}
ost->sync_opts++;
ost->frame_number++;
}
}
- (void)do_audio_out:(AVFormatContext *)s output:(AVOutputStream *)ost stream:(AVInputStream *)ist buffer:(unsigned char *)buf size:(int)size {
uint8_t *buftmp;
int64_t audio_out_size, audio_buf_size;
int64_t allocated_for_size= size;
int size_out, frame_bytes, ret;
AVCodecContext *enc= ost->st->codec;
AVCodecContext *dec= ist->st->codec;
int osize= av_get_bits_per_sample_format(enc->sample_fmt)/8;
int isize= av_get_bits_per_sample_format(dec->sample_fmt)/8;
const int coded_bps = av_get_bits_per_sample(enc->codec->id);
need_realloc:
audio_buf_size= (allocated_for_size + isize*dec->channels - 1) / (isize*dec->channels);
audio_buf_size= (audio_buf_size*enc->sample_rate + dec->sample_rate) / dec->sample_rate;
audio_buf_size= audio_buf_size*2 + 10000; //safety factors for the deprecated resampling API
audio_buf_size*= osize*enc->channels;
audio_out_size= FFMAX(audio_buf_size, enc->frame_size * osize * enc->channels);
if(coded_bps > 8*osize)
audio_out_size= audio_out_size * coded_bps / (8*osize);
audio_out_size += FF_MIN_BUFFER_SIZE;
if(audio_out_size > INT_MAX || audio_buf_size > INT_MAX){
[self sendErrorCode:59 message:@"Buffer sizes too large"];
stopConverting = YES;
return;
}
av_fast_malloc(&audio_buf, (unsigned int *)&allocated_audio_buf_size, (unsigned int)audio_buf_size);
av_fast_malloc(&audio_out, (unsigned int *)&allocated_audio_out_size, (unsigned int)audio_out_size);
if (!audio_buf || !audio_out){
[self sendErrorCode:60 message:@"Out of memory in do_audio_out"];
stopConverting = YES;
return;
}
if (enc->channels != dec->channels)
ost->audio_resample = 1;
if (ost->audio_resample && !ost->resample) {
if (dec->sample_fmt != SAMPLE_FMT_S16)
fprintf(stderr, "Warning, using s16 intermediate sample format for resampling\n");
ost->resample = av_audio_resample_init(enc->channels, dec->channels,
enc->sample_rate, dec->sample_rate,
enc->sample_fmt, dec->sample_fmt,
16, 10, 0, 0.8);
if (!ost->resample) {
[self sendErrorCode:61 message:@"Can not resample %d channels @ %d Hz to %d channels @ %d Hz", dec->channels, dec->sample_rate, enc->channels, enc->sample_rate];
stopConverting = YES;
return;
}
}
#define MAKE_SFMT_PAIR(a,b) ((a)+SAMPLE_FMT_NB*(b))
if (!ost->audio_resample && dec->sample_fmt!=enc->sample_fmt &&
MAKE_SFMT_PAIR(enc->sample_fmt,dec->sample_fmt)!=ost->reformat_pair) {
if (ost->reformat_ctx)
av_audio_convert_free(ost->reformat_ctx);
ost->reformat_ctx = av_audio_convert_alloc(enc->sample_fmt, 1,
dec->sample_fmt, 1, NULL, 0);
if (!ost->reformat_ctx) {
[self sendErrorCode:62 message:@"Cannot convert %s sample format to %s sample format", avcodec_get_sample_fmt_name(dec->sample_fmt), avcodec_get_sample_fmt_name(enc->sample_fmt)];
stopConverting = YES;
return;
}
ost->reformat_pair=MAKE_SFMT_PAIR(enc->sample_fmt,dec->sample_fmt);
}
if(audio_sync_method){
double delta = [self get_sync_ipts:ost] * enc->sample_rate - ost->sync_opts
- av_fifo_size(ost->fifo)/(ost->st->codec->channels * 2);
double idelta= delta*ist->st->codec->sample_rate / enc->sample_rate;
int byte_delta= ((int)idelta)*2*ist->st->codec->channels;
//FIXME resample delay
if(fabs(delta) > 50){
if(ist->is_start || fabs(delta) > audio_drift_threshold*enc->sample_rate){
if(byte_delta < 0){
byte_delta= FFMAX(byte_delta, -size);
size += byte_delta;
buf -= byte_delta;
if(verbose > 2)
fprintf(stderr, "discarding %d audio samples\n", (int)-delta);
if(!size)
return;
ist->is_start=0;
}else{
input_tmp= av_realloc(input_tmp, byte_delta + size);
if(byte_delta > allocated_for_size - size){
allocated_for_size= byte_delta + (int64_t)size;
goto need_realloc;
}
ist->is_start=0;
memset(input_tmp, 0, byte_delta);
memcpy(input_tmp + byte_delta, buf, size);
buf= input_tmp;
size += byte_delta;
if(verbose > 2)
fprintf(stderr, "adding %d audio samples of silence\n", (int)delta);
}
}else if(audio_sync_method>1){
int comp= av_clip(delta, -audio_sync_method, audio_sync_method);
assert(ost->audio_resample);
if(verbose > 2)
fprintf(stderr, "compensating audio timestamp drift:%f compensation:%d in:%d\n", delta, comp, enc->sample_rate);
// fprintf(stderr, "drift:%f len:%d opts:%"PRId64" ipts:%"PRId64" fifo:%d\n", delta, -1, ost->sync_opts, (int64_t)([self get_sync_ipts:ost] * enc->sample_rate), av_fifo_size(ost->fifo)/(ost->st->codec->channels * 2));
av_resample_compensate(*(struct AVResampleContext**)ost->resample, comp, enc->sample_rate);
}
}
}else
ost->sync_opts= lrintf([self get_sync_ipts:ost] * enc->sample_rate)
- av_fifo_size(ost->fifo)/(ost->st->codec->channels * 2); //FIXME wrong
if (ost->audio_resample) {
buftmp = audio_buf;
size_out = audio_resample(ost->resample,
(short *)buftmp, (short *)buf,
size / (ist->st->codec->channels * isize));
size_out = size_out * enc->channels * osize;
} else {
buftmp = buf;
size_out = size;
}
if (!ost->audio_resample && dec->sample_fmt!=enc->sample_fmt) {
const void *ibuf[6]= {buftmp};
void *obuf[6]= {audio_buf};
int istride[6]= {isize};
int ostride[6]= {osize};
int len= size_out/istride[0];
if (av_audio_convert(ost->reformat_ctx, obuf, ostride, ibuf, istride, len)<0) {
[self sendErrorCode:63 message:@"av_audio_convert() failed"];
if (exit_on_error)
stopConverting = YES;
return;
}
buftmp = audio_buf;
size_out = len*osize;
}
/* now encode as many frames as possible */
if (enc->frame_size > 1) {
/* output resampled raw samples */
if (av_fifo_realloc2(ost->fifo, av_fifo_size(ost->fifo) + size_out) < 0) {
[self sendErrorCode:64 message:@"av_fifo_realloc2() failed"];
stopConverting = YES;
return;
}
av_fifo_generic_write(ost->fifo, buftmp, size_out, NULL);
frame_bytes = enc->frame_size * osize * enc->channels;
while (av_fifo_size(ost->fifo) >= frame_bytes) {
AVPacket pkt;
av_init_packet(&pkt);
av_fifo_generic_read(ost->fifo, audio_buf, frame_bytes, NULL);
//FIXME pass ost->sync_opts as AVFrame.pts in avcodec_encode_audio()
ret = avcodec_encode_audio(enc, audio_out, (int)audio_out_size,
(short *)audio_buf);
if (ret < 0) {
[self sendErrorCode:65 message:@"Audio encoding failed"];
stopConverting = YES;
return;
}
audio_size += ret;
pkt.stream_index= ost->index;
pkt.data= audio_out;
pkt.size= ret;
if(enc->coded_frame && enc->coded_frame->pts != AV_NOPTS_VALUE)
pkt.pts= av_rescale_q(enc->coded_frame->pts, enc->time_base, ost->st->time_base);
pkt.flags |= AV_PKT_FLAG_KEY;
[self write_frame:s packet:&pkt codec:ost->st->codec filter:bitstream_filters[ost->file_index][pkt.stream_index]];
ost->sync_opts += enc->frame_size;
}
} else {
AVPacket pkt;
av_init_packet(&pkt);
ost->sync_opts += size_out / (osize * enc->channels);
/* output a pcm frame */
/* determine the size of the coded buffer */
size_out /= osize;
if (coded_bps)
size_out = size_out*coded_bps/8;
if(size_out > audio_out_size){
[self sendErrorCode:66 message:@"Internal error, buffer size too small"];
stopConverting = YES;
return;
}
//FIXME pass ost->sync_opts as AVFrame.pts in avcodec_encode_audio()
ret = avcodec_encode_audio(enc, audio_out, size_out,
(short *)buftmp);
if (ret < 0) {
[self sendErrorCode:67 message:@"Audio encoding failed"];
stopConverting = YES;
return;
}
audio_size += ret;
pkt.stream_index= ost->index;
pkt.data= audio_out;
pkt.size= ret;
if(enc->coded_frame && enc->coded_frame->pts != AV_NOPTS_VALUE)
pkt.pts= av_rescale_q(enc->coded_frame->pts, enc->time_base, ost->st->time_base);
pkt.flags |= AV_PKT_FLAG_KEY;
[self write_frame:s packet:&pkt codec:ost->st->codec filter:bitstream_filters[ost->file_index][pkt.stream_index]];
}
}
- (void)pre_process_video_frame:(AVInputStream *)ist picture:(AVPicture *)picture buffer:(void **)bufp {
AVCodecContext *dec;
AVPicture *picture2;
AVPicture picture_tmp;
uint8_t *buf = 0;
dec = ist->st->codec;
/* deinterlace : must be done before any resize */
if (do_deinterlace) {
int size;
/* create temporary picture */
size = avpicture_get_size(dec->pix_fmt, dec->width, dec->height);
buf = av_malloc(size);
if (!buf)
return;
picture2 = &picture_tmp;
avpicture_fill(picture2, buf, dec->pix_fmt, dec->width, dec->height);
if(avpicture_deinterlace(picture2, picture,
dec->pix_fmt, dec->width, dec->height) < 0) {
/* if error, do not deinterlace */
fprintf(stderr, "Deinterlacing failed\n");
av_free(buf);
buf = NULL;
picture2 = picture;
}
} else {
picture2 = picture;
}
if (picture != picture2)
*picture = *picture2;
*bufp = buf;
}
- (int)output_packet:(AVInputStream *)ist index:(int)ist_index table:(AVOutputStream **)ost_table streams:(int)nb_ostreams packet:(const AVPacket *)pkt {
AVFormatContext *os;
AVOutputStream *ost;
int ret, i;
int got_picture;
AVFrame picture;
void *buffer_to_free;
AVSubtitle subtitle, *subtitle_to_free;
int got_subtitle;
AVPacket avpkt;
int bps = av_get_bits_per_sample_format(ist->st->codec->sample_fmt)>>3;
if(ist->next_pts == AV_NOPTS_VALUE)
ist->next_pts= ist->pts;
if (pkt == NULL) {
/* EOF handling */
av_init_packet(&avpkt);
avpkt.data = NULL;
avpkt.size = 0;
goto handle_eof;
} else {
avpkt = *pkt;
}
if(pkt->dts != AV_NOPTS_VALUE)
ist->next_pts = ist->pts = av_rescale_q(pkt->dts, ist->st->time_base, AV_TIME_BASE_Q);
//while we have more to decode or while the decoder did output something on EOF
while (avpkt.size > 0 || (!pkt && ist->next_pts != ist->pts)) {
uint8_t *data_buf, *decoded_data_buf;
int data_size, decoded_data_size;
handle_eof:
ist->pts= ist->next_pts;
if(avpkt.size && avpkt.size != pkt->size &&
((!ist->showed_multi_packet_warning && verbose>0) || verbose>1)){
fprintf(stderr, "Multiple frames in a packet from stream %d\n", pkt->stream_index);
ist->showed_multi_packet_warning=1;
}
/* decode the packet if needed */
decoded_data_buf = NULL; /* fail safe */
decoded_data_size= 0;
data_buf = avpkt.data;
data_size = avpkt.size;
subtitle_to_free = NULL;
if (ist->decoding_needed) {
switch(ist->st->codec->codec_type) {
case AVMEDIA_TYPE_AUDIO:{
if(pkt && samples_size < FFMAX(pkt->size*sizeof(*samples), AVCODEC_MAX_AUDIO_FRAME_SIZE)) {
samples_size = FFMAX(pkt->size*sizeof(*samples), AVCODEC_MAX_AUDIO_FRAME_SIZE);
av_free(samples);
samples= av_malloc(samples_size);
}
decoded_data_size= samples_size;
/* XXX: could avoid copy if PCM 16 bits with same
endianness as CPU */
ret = avcodec_decode_audio3(ist->st->codec, samples, &decoded_data_size,
&avpkt);
if (ret < 0)
goto fail_decode;
avpkt.data += ret;
avpkt.size -= ret;
data_size = ret;
/* Some bug in mpeg audio decoder gives */
/* decoded_data_size < 0, it seems they are overflows */
if (decoded_data_size <= 0) {
/* no audio frame */
continue;
}
decoded_data_buf = (uint8_t *)samples;
ist->next_pts += ((int64_t)AV_TIME_BASE/bps * decoded_data_size) /
(ist->st->codec->sample_rate * ist->st->codec->channels);
break;}
case AVMEDIA_TYPE_VIDEO:
decoded_data_size = (ist->st->codec->width * ist->st->codec->height * 3) / 2;
/* XXX: allocate picture correctly */
avcodec_get_frame_defaults(&picture);
ret = avcodec_decode_video2(ist->st->codec,
&picture, &got_picture, &avpkt);
ist->st->quality= picture.quality;
if (ret < 0)
goto fail_decode;
if (!got_picture) {
/* no picture yet */
goto discard_packet;
}
if (ist->st->codec->time_base.num != 0) {
int ticks= ist->st->parser ? ist->st->parser->repeat_pict+1 : ist->st->codec->ticks_per_frame;
ist->next_pts += ((int64_t)AV_TIME_BASE *
ist->st->codec->time_base.num * ticks) /
ist->st->codec->time_base.den;
}
avpkt.size = 0;
break;
case AVMEDIA_TYPE_SUBTITLE:
ret = avcodec_decode_subtitle2(ist->st->codec,
&subtitle, &got_subtitle, &avpkt);
if (ret < 0)
goto fail_decode;
if (!got_subtitle) {
goto discard_packet;
}
subtitle_to_free = &subtitle;
avpkt.size = 0;
break;
default:
goto fail_decode;
}
} else {
switch(ist->st->codec->codec_type) {
case AVMEDIA_TYPE_AUDIO:
ist->next_pts += ((int64_t)AV_TIME_BASE * ist->st->codec->frame_size) /
ist->st->codec->sample_rate;
break;
case AVMEDIA_TYPE_VIDEO:
if (ist->st->codec->time_base.num != 0) {
int ticks= ist->st->parser ? ist->st->parser->repeat_pict+1 : ist->st->codec->ticks_per_frame;
ist->next_pts += ((int64_t)AV_TIME_BASE *
ist->st->codec->time_base.num * ticks) /
ist->st->codec->time_base.den;
}
break;
default:
break;
}
ret = avpkt.size;
avpkt.size = 0;
}
buffer_to_free = NULL;
if (ist->st->codec->codec_type == AVMEDIA_TYPE_VIDEO) {
[self pre_process_video_frame:ist picture:(AVPicture *)&picture buffer:&buffer_to_free];
}
// preprocess audio (volume)
if (ist->st->codec->codec_type == AVMEDIA_TYPE_AUDIO) {
if (audio_volume != 256) {
short *volp;
volp = samples;
for(i=0;i<(decoded_data_size / sizeof(short));i++) {
int v = ((*volp) * audio_volume + 128) >> 8;
if (v < -32768) v = -32768;
if (v > 32767) v = 32767;
*volp++ = v;
}
}
}
/* frame rate emulation */
if (rate_emu) {
int64_t pts = av_rescale(ist->pts, 1000000, AV_TIME_BASE);
int64_t now = av_gettime() - ist->start;
if (pts > now)
usleep((unsigned int)(pts - now));
}
/* if output time reached then transcode raw format,
encode packets and output them */
if (start_time == 0 || ist->pts >= start_time)
for(i=0;i<nb_ostreams;i++) {
int frame_size;
ost = ost_table[i];
if (ost->source_index == ist_index) {
os = output_files[ost->file_index];
/* set the input output pts pairs */
//ost->sync_ipts = (double)(ist->pts + input_files_ts_offset[ist->file_index] - start_time)/ AV_TIME_BASE;
if (ost->encoding_needed) {
assert(ist->decoding_needed);
switch(ost->st->codec->codec_type) {
case AVMEDIA_TYPE_AUDIO:
[self do_audio_out:os output:ost stream:ist buffer:decoded_data_buf size:decoded_data_size];
break;
case AVMEDIA_TYPE_VIDEO:
[self do_video_out:os output:ost stream:ist picture:&picture size:&frame_size];
if (vstats_filename && frame_size)
[self do_video_stats:os output:ost size:frame_size];
break;
case AVMEDIA_TYPE_SUBTITLE:
[self do_subtitle_out:os output:ost stream:ist subs:&subtitle pts:pkt->pts];
break;
default:
abort();
}
} else {
AVFrame avframe; //FIXME/XXX remove this
AVPacket opkt;
int64_t ost_tb_start_time= av_rescale_q(start_time, AV_TIME_BASE_Q, ost->st->time_base);
av_init_packet(&opkt);
if ((!ost->frame_number && !(pkt->flags & AV_PKT_FLAG_KEY)) && !copy_initial_nonkeyframes)
continue;
/* no reencoding needed : output the packet directly */
/* force the input stream PTS */
avcodec_get_frame_defaults(&avframe);
ost->st->codec->coded_frame= &avframe;
avframe.key_frame = pkt->flags & AV_PKT_FLAG_KEY;
if(ost->st->codec->codec_type == AVMEDIA_TYPE_AUDIO)
audio_size += data_size;
else if (ost->st->codec->codec_type == AVMEDIA_TYPE_VIDEO) {
video_size += data_size;
ost->sync_opts++;
}
opkt.stream_index= ost->index;
if(pkt->pts != AV_NOPTS_VALUE)
opkt.pts= av_rescale_q(pkt->pts, ist->st->time_base, ost->st->time_base) - ost_tb_start_time;
else
opkt.pts= AV_NOPTS_VALUE;
if (pkt->dts == AV_NOPTS_VALUE)
opkt.dts = av_rescale_q(ist->pts, AV_TIME_BASE_Q, ost->st->time_base);
else
opkt.dts = av_rescale_q(pkt->dts, ist->st->time_base, ost->st->time_base);
opkt.dts -= ost_tb_start_time;
opkt.duration = (int)av_rescale_q(pkt->duration, ist->st->time_base, ost->st->time_base);
opkt.flags= pkt->flags;
//FIXME remove the following 2 lines they shall be replaced by the bitstream filters
if( ost->st->codec->codec_id != CODEC_ID_H264
&& ost->st->codec->codec_id != CODEC_ID_MPEG1VIDEO
&& ost->st->codec->codec_id != CODEC_ID_MPEG2VIDEO
) {
if(av_parser_change(ist->st->parser, ost->st->codec, &opkt.data, &opkt.size, data_buf, data_size, pkt->flags & AV_PKT_FLAG_KEY))
opkt.destruct= av_destruct_packet;
} else {
opkt.data = data_buf;
opkt.size = data_size;
}
[self write_frame:os packet:&opkt codec:ost->st->codec filter:bitstream_filters[ost->file_index][opkt.stream_index]];
ost->st->codec->frame_number++;
ost->frame_number++;
av_free_packet(&opkt);
}
}
}
av_free(buffer_to_free);
/* XXX: allocate the subtitles in the codec ? */
if (subtitle_to_free) {
if (subtitle_to_free->rects != NULL) {
for (i = 0; i < subtitle_to_free->num_rects; i++) {
av_freep(&subtitle_to_free->rects[i]->pict.data[0]);
av_freep(&subtitle_to_free->rects[i]->pict.data[1]);
av_freep(&subtitle_to_free->rects[i]);
}
av_freep(&subtitle_to_free->rects);
}
subtitle_to_free->num_rects = 0;
subtitle_to_free = NULL;
}
}
discard_packet:
if (pkt == NULL) {
/* EOF handling */
for(i=0;i<nb_ostreams;i++) {
ost = ost_table[i];
if (ost->source_index == ist_index) {
AVCodecContext *enc= ost->st->codec;
os = output_files[ost->file_index];
if(ost->st->codec->codec_type == AVMEDIA_TYPE_AUDIO && enc->frame_size <=1)
continue;
if(ost->st->codec->codec_type == AVMEDIA_TYPE_VIDEO && (os->oformat->flags & AVFMT_RAWPICTURE))
continue;
if (ost->encoding_needed) {
for(;;) {
AVPacket pkt;
int fifo_bytes;
av_init_packet(&pkt);
pkt.stream_index= ost->index;
switch(ost->st->codec->codec_type) {
case AVMEDIA_TYPE_AUDIO:
fifo_bytes = av_fifo_size(ost->fifo);
ret = 0;
/* encode any samples remaining in fifo */
if (fifo_bytes > 0) {
int osize = av_get_bits_per_sample_format(enc->sample_fmt) >> 3;
int fs_tmp = enc->frame_size;
av_fifo_generic_read(ost->fifo, samples, fifo_bytes, NULL);
if (enc->codec->capabilities & CODEC_CAP_SMALL_LAST_FRAME) {
enc->frame_size = fifo_bytes / (osize * enc->channels);
} else { /* pad */
int frame_bytes = enc->frame_size*osize*enc->channels;
if (samples_size < frame_bytes) {
stopConverting = YES;
goto fail_decode;
}
memset((uint8_t*)samples+fifo_bytes, 0, frame_bytes - fifo_bytes);
}
ret = avcodec_encode_audio(enc, bit_buffer, bit_buffer_size, samples);
pkt.duration = (int)av_rescale((int64_t)enc->frame_size*ost->st->time_base.den,
ost->st->time_base.num, enc->sample_rate);
enc->frame_size = fs_tmp;
}
if(ret <= 0) {
ret = avcodec_encode_audio(enc, bit_buffer, bit_buffer_size, NULL);
}
if (ret < 0) {
[self sendErrorCode:68 message:@"Audio encoding failed"];
stopConverting = YES;
goto fail_decode;
}
audio_size += ret;
pkt.flags |= AV_PKT_FLAG_KEY;
break;
case AVMEDIA_TYPE_VIDEO:
ret = avcodec_encode_video(enc, bit_buffer, bit_buffer_size, NULL);
if (ret < 0) {
[self sendErrorCode:69 message:@"Video encoding failed"];
stopConverting = YES;
goto fail_decode;
}
video_size += ret;
if(enc->coded_frame && enc->coded_frame->key_frame)
pkt.flags |= AV_PKT_FLAG_KEY;
if (ost->logfile && enc->stats_out) {
fprintf(ost->logfile, "%s", enc->stats_out);
}
break;
default:
ret=-1;
}
if(ret<=0)
break;
pkt.data= bit_buffer;
pkt.size= ret;
if(enc->coded_frame && enc->coded_frame->pts != AV_NOPTS_VALUE)
pkt.pts= av_rescale_q(enc->coded_frame->pts, enc->time_base, ost->st->time_base);
[self write_frame:os packet:&pkt codec:ost->st->codec filter:bitstream_filters[ost->file_index][pkt.stream_index]];
}
}
}
}
}
return 0;
fail_decode:
return -1;
}
- (int)copy_chapters:(int)infile output:(int)outfile {
AVFormatContext *is = input_files[infile];
AVFormatContext *os = output_files[outfile];
int i;
for (i = 0; i < is->nb_chapters; i++) {
AVChapter *in_ch = is->chapters[i], *out_ch;
AVMetadataTag *t = NULL;
int64_t ts_off = av_rescale_q(start_time - input_files_ts_offset[infile],
AV_TIME_BASE_Q, in_ch->time_base);
int64_t rt = (recording_time == INT64_MAX) ? INT64_MAX :
av_rescale_q(recording_time, AV_TIME_BASE_Q, in_ch->time_base);
if (in_ch->end < ts_off)
continue;
if (rt != INT64_MAX && in_ch->start > rt + ts_off)
break;
out_ch = av_mallocz(sizeof(AVChapter));
if (!out_ch)
return AVERROR(ENOMEM);
out_ch->id = in_ch->id;
out_ch->time_base = in_ch->time_base;
out_ch->start = FFMAX(0, in_ch->start - ts_off);
out_ch->end = FFMIN(rt, in_ch->end - ts_off);
while ((t = av_metadata_get(in_ch->metadata, "", t, AV_METADATA_IGNORE_SUFFIX)))
av_metadata_set2(&out_ch->metadata, t->key, t->value, 0);
os->nb_chapters++;
os->chapters = av_realloc(os->chapters, sizeof(AVChapter)*os->nb_chapters);
if (!os->chapters)
return AVERROR(ENOMEM);
os->chapters[os->nb_chapters - 1] = out_ch;
}
return 0;
}
- (int)read_file:(const char *)filename buffer:(char **)bufptr size:(size_t *)size {
FILE *f = fopen(filename, "rb");
if (!f) {
fprintf(stderr, "Cannot read file '%s': %s\n", filename, strerror(errno));
return AVERROR(errno);
}
fseek(f, 0, SEEK_END);
*size = ftell(f);
fseek(f, 0, SEEK_SET);
*bufptr = av_malloc((unsigned int)*size + 1);
if (!*bufptr) {
fprintf(stderr, "Could not allocate file buffer\n");
fclose(f);
return AVERROR(ENOMEM);
}
fread(*bufptr, 1, *size, f);
(*bufptr)[*size++] = '\0';
fclose(f);
return 0;
}
- (void)send_report:(AVFormatContext **)theOutput_files inputFiles:(AVFormatContext **)theInput_files table:(AVOutputStream **)ost_table streams:(int)nb_ostreams lastReport:(int)is_last_report {
char buf[1024];
AVOutputStream *ost;
AVFormatContext *oc = theOutput_files[0], *input = theInput_files[0];
int64_t total_size;
AVCodecContext *enc;
int frame_number = 0, fps = 0, vid = 0;
double quality = 0, bitrate = 0, time = 0, pts = 0;
if (!is_last_report) {
int64_t cur_time;
/* display the report every 0.5 seconds */
cur_time = av_gettime();
if (last_time == -1) {
last_time = cur_time;
return;
}
if ((cur_time - last_time) < 500000)
return;
last_time = cur_time;
}
total_size = url_fsize(oc->pb);
if(total_size<0) // FIXME improve url_fsize() so it works with non seekable output too
total_size= url_ftell(oc->pb);
buf[0] = '\0';
time = 1e10;
for (int i=0; i<nb_ostreams; i++) {
ost = ost_table[i];
enc = ost->st->codec;
if (vid && enc->codec_type == AVMEDIA_TYPE_VIDEO)
quality = (!ost->st->stream_copy ? enc->coded_frame->quality/(float)FF_QP2LAMBDA : -1);
if (!vid && enc->codec_type == AVMEDIA_TYPE_VIDEO) {
float t = (av_gettime()-timer_start) / 1000000.0;
frame_number = ost->frame_number;
fps = ((t>1)?(int)(frame_number/t+0.5) : 0);
quality = (!ost->st->stream_copy ? enc->coded_frame->quality/(float)FF_QP2LAMBDA : -1);
if(qp_hist){
int j;
int qp= (int)lrintf(enc->coded_frame->quality/(float)FF_QP2LAMBDA);
if(qp>=0 && qp<FF_ARRAY_ELEMS(qp_histogram))
qp_histogram[qp]++;
for(j=0; j<32; j++)
snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "%X", (int)lrintf(log(qp_histogram[j]+1)/log(2)));
}
if (enc->flags&CODEC_FLAG_PSNR){
int j;
double error, error_sum=0;
double scale, scale_sum=0;
char type[3]= {'Y','U','V'};
snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), " PSNR=");
for(j=0; j<3; j++){
if(is_last_report){
error= enc->error[j];
scale= enc->width*enc->height*255.0*255.0*frame_number;
}else{
error= enc->coded_frame->error[j];
scale= enc->width*enc->height*255.0*255.0;
}
if(j) scale/=4;
error_sum += error;
scale_sum += scale;
snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "%c:%2.2f ", type[j], [self psnr:(error/scale)]);
}
snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "*:%2.2f ", [self psnr:(error_sum/scale_sum)]);
}
vid = 1;
}
/* compute min output value */
pts = (double)ost->st->pts.val * av_q2d(ost->st->time_base);
if ((pts < time) && (pts > 0))
time = pts;
}
if (time < 0.01)
time = 0.01;
bitrate = (double)(total_size * 8) / time / 1000.0;
if (verbose || is_last_report) {
if (verbose >= 0 && buf[0]!='\0')
fprintf(stderr, "%s \r", buf);
fflush(stderr);
}
if (is_last_report && verbose >= 0){
int64_t raw= audio_size + video_size + extra_size;
fprintf(stderr, "\n");
fprintf(stderr, "video:%1.0fkB audio:%1.0fkB global headers:%1.0fkB muxing overhead %f%%\n",
video_size/1024.0,
audio_size/1024.0,
extra_size/1024.0,
100.0*(total_size - raw)/raw
);
}
if ([delegate respondsToSelector:@selector(updateStatus:estimatedTime:currentFrame:fps:quality:size:bitrate:time:video:)]) {
double percentDone, estimatedTime, durration = input->duration/1000000;
percentDone = time/durration;
estimatedTime = (durration-time)/(time-previousTime);
[delegate updateStatus:percentDone estimatedTime:estimatedTime currentFrame:frame_number fps:fps quality:quality size:total_size bitrate:bitrate time:time video:vid];
}
previousTime = time;
}
- (int)av_transcode:(AVFormatContext **)theOutput_files ouputCount:(int)theNB_output_files inputFiles:(AVFormatContext **)theInput_files inputCount:(int)theNB_input_files maps:(AVStreamMap *)theStream_maps mapsCount:(int)theNB_stream_maps {
int ret = 0, i, j, k, n, nb_istreams = 0, nb_ostreams = 0;
AVFormatContext *is, *os;
AVCodecContext *codec, *icodec;
AVOutputStream *ost, **ost_table = NULL;
AVInputStream *ist, **ist_table = NULL;
AVInputFile *file_table;
char error[1024];
int want_sdp = 1;
uint8_t no_packet[MAX_FILES]={0};
int no_packet_count=0;
file_table= av_mallocz(theNB_input_files * sizeof(AVInputFile));
if (!file_table)
goto fail;
/* input stream init */
j = 0;
for(i=0;i<theNB_input_files;i++) {
is = theInput_files[i];
file_table[i].ist_index = j;
file_table[i].nb_streams = is->nb_streams;
j += is->nb_streams;
}
nb_istreams = j;
ist_table = av_mallocz(nb_istreams * sizeof(AVInputStream *));
if (!ist_table)
goto fail;
for(i=0;i<nb_istreams;i++) {
ist = av_mallocz(sizeof(AVInputStream));
if (!ist)
goto fail;
ist_table[i] = ist;
}
j = 0;
for(i=0;i<theNB_input_files;i++) {
is = theInput_files[i];
for(k=0;k<is->nb_streams;k++) {
ist = ist_table[j++];
ist->st = is->streams[k];
ist->file_index = i;
ist->index = k;
ist->discard = 1; /* the stream is discarded by default
(changed later) */
if (rate_emu) {
ist->start = av_gettime();
}
}
}
/* output stream init */
nb_ostreams = 0;
for(i=0;i<theNB_output_files;i++) {
os = theOutput_files[i];
if (!os->nb_streams) {
dump_format(theOutput_files[i], i, theOutput_files[i]->filename, 1);
[self sendErrorCode:70 message:@"Output file #%d does not contain any stream", i];
stopConverting = YES;
return -1;
}
nb_ostreams += os->nb_streams;
}
if (theNB_stream_maps > 0 && theNB_stream_maps != nb_ostreams) {
[self sendErrorCode:71 message:@"Number of stream maps must match number of output streams"];
stopConverting = YES;
return -1;
}
/* Sanity check the mapping args -- do the input files & streams exist? */
for(i=0;i<theNB_stream_maps;i++) {
int fi = theStream_maps[i].file_index;
int si = theStream_maps[i].stream_index;
if (fi < 0 || fi > theNB_input_files - 1 ||
si < 0 || si > file_table[fi].nb_streams - 1) {
[self sendErrorCode:72 message:@"Could not find input stream #%d.%d", fi, si];
stopConverting = YES;
return -1;
}
fi = theStream_maps[i].sync_file_index;
si = theStream_maps[i].sync_stream_index;
if (fi < 0 || fi > theNB_input_files - 1 ||
si < 0 || si > file_table[fi].nb_streams - 1) {
[self sendErrorCode:73 message:@"Could not find sync stream #%d.%d", fi, si];
stopConverting = YES;
return -1;
}
}
ost_table = av_mallocz(sizeof(AVOutputStream *) * nb_ostreams);
if (!ost_table)
goto fail;
for(i=0;i<nb_ostreams;i++) {
ost = av_mallocz(sizeof(AVOutputStream));
if (!ost)
goto fail;
ost_table[i] = ost;
}
n = 0;
for(k=0;k<theNB_output_files;k++) {
os = theOutput_files[k];
for(i=0;i<os->nb_streams;i++,n++) {
int found;
ost = ost_table[n];
ost->file_index = k;
ost->index = i;
ost->st = os->streams[i];
if (theNB_stream_maps > 0) {
ost->source_index = file_table[stream_maps[n].file_index].ist_index +
theStream_maps[n].stream_index;
/* Sanity check that the stream types match */
if (ist_table[ost->source_index]->st->codec->codec_type != ost->st->codec->codec_type) {
int i= ost->file_index;
dump_format(theOutput_files[i], i, theOutput_files[i]->filename, 1);
[self sendErrorCode:74 message:@"Codec type mismatch for mapping #%d.%d -> #%d.%d", theStream_maps[n].file_index, theStream_maps[n].stream_index, ost->file_index, ost->index];
stopConverting = YES;
return -1;
}
} else {
int best_nb_frames=-1;
/* get corresponding input stream index : we select the first one with the right type */
found = 0;
for(j=0;j<nb_istreams;j++) {
int skip=0;
ist = ist_table[j];
if(opt_programid){
int pi,si;
AVFormatContext *f= theInput_files[ ist->file_index ];
skip=1;
for(pi=0; pi<f->nb_programs; pi++){
AVProgram *p= f->programs[pi];
if(p->id == opt_programid)
for(si=0; si<p->nb_stream_indexes; si++){
if(f->streams[ p->stream_index[si] ] == ist->st)
skip=0;
}
}
}
if (ist->discard && ist->st->discard != AVDISCARD_ALL && !skip &&
ist->st->codec->codec_type == ost->st->codec->codec_type) {
if(best_nb_frames < ist->st->codec_info_nb_frames){
best_nb_frames= ist->st->codec_info_nb_frames;
ost->source_index = j;
found = 1;
}
}
}
if (!found) {
if(! opt_programid) {
/* try again and reuse existing stream */
for(j=0;j<nb_istreams;j++) {
ist = ist_table[j];
if ( ist->st->codec->codec_type == ost->st->codec->codec_type
&& ist->st->discard != AVDISCARD_ALL) {
ost->source_index = j;
found = 1;
}
}
}
if (!found) {
int i= ost->file_index;
dump_format(theOutput_files[i], i, theOutput_files[i]->filename, 1);
[self sendErrorCode:75 message:@"Could not find input stream matching output stream #%d.%d", ost->file_index, ost->index];
stopConverting = YES;
return -1;
}
}
}
ist = ist_table[ost->source_index];
ist->discard = 0;
ost->sync_ist = (theNB_stream_maps > 0) ?
ist_table[file_table[theStream_maps[n].sync_file_index].ist_index +
theStream_maps[n].sync_stream_index] : ist;
}
}
/* for each output stream, we compute the right encoding parameters */
for(i=0;i<nb_ostreams;i++) {
AVMetadataTag *t = NULL;
ost = ost_table[i];
os = theOutput_files[ost->file_index];
ist = ist_table[ost->source_index];
codec = ost->st->codec;
icodec = ist->st->codec;
while ((t = av_metadata_get(ist->st->metadata, "", t, AV_METADATA_IGNORE_SUFFIX))) {
av_metadata_set2(&ost->st->metadata, t->key, t->value, AV_METADATA_DONT_OVERWRITE);
}
ost->st->disposition = ist->st->disposition;
codec->bits_per_raw_sample= icodec->bits_per_raw_sample;
codec->chroma_sample_location = icodec->chroma_sample_location;
if (ost->st->stream_copy) {
/* if stream_copy is selected, no need to decode or encode */
codec->codec_id = icodec->codec_id;
codec->codec_type = icodec->codec_type;
if(!codec->codec_tag){
if( !os->oformat->codec_tag
|| av_codec_get_id (os->oformat->codec_tag, icodec->codec_tag) == codec->codec_id
|| av_codec_get_tag(os->oformat->codec_tag, icodec->codec_id) <= 0)
codec->codec_tag = icodec->codec_tag;
}
codec->bit_rate = icodec->bit_rate;
codec->extradata= icodec->extradata;
codec->extradata_size= icodec->extradata_size;
if(av_q2d(icodec->time_base)*icodec->ticks_per_frame > av_q2d(ist->st->time_base) && av_q2d(ist->st->time_base) < 1.0/1000){
codec->time_base = icodec->time_base;
codec->time_base.num *= icodec->ticks_per_frame;
}else
codec->time_base = ist->st->time_base;
switch(codec->codec_type) {
case AVMEDIA_TYPE_AUDIO:
if(audio_volume != 256) {
[self sendErrorCode:76 message:@"-acodec copy and -vol are incompatible (frames are not decoded)"];
stopConverting = YES;
return -1;
}
codec->channel_layout = icodec->channel_layout;
codec->sample_rate = icodec->sample_rate;
codec->channels = icodec->channels;
codec->frame_size = icodec->frame_size;
codec->block_align= icodec->block_align;
if(codec->block_align == 1 && codec->codec_id == CODEC_ID_MP3)
codec->block_align= 0;
if(codec->codec_id == CODEC_ID_AC3)
codec->block_align= 0;
break;
case AVMEDIA_TYPE_VIDEO:
codec->pix_fmt = icodec->pix_fmt;
codec->width = icodec->width;
codec->height = icodec->height;
codec->has_b_frames = icodec->has_b_frames;
break;
case AVMEDIA_TYPE_SUBTITLE:
codec->width = icodec->width;
codec->height = icodec->height;
break;
default:
abort();
}
} else {
switch(codec->codec_type) {
case AVMEDIA_TYPE_AUDIO:
ost->fifo= av_fifo_alloc(1024);
if(!ost->fifo)
goto fail;
ost->reformat_pair = MAKE_SFMT_PAIR(SAMPLE_FMT_NONE,SAMPLE_FMT_NONE);
ost->audio_resample = codec->sample_rate != icodec->sample_rate || audio_sync_method > 1;
icodec->request_channels = codec->channels;
ist->decoding_needed = 1;
ost->encoding_needed = 1;
break;
case AVMEDIA_TYPE_VIDEO:
if (ost->st->codec->pix_fmt == PIX_FMT_NONE) {
[self sendErrorCode:77 message:@"Video pixel format is unknown, stream cannot be encoded"];
stopConverting = YES;
return -1;
}
ost->video_crop = ((frame_leftBand + frame_rightBand + frame_topBand + frame_bottomBand) != 0);
ost->video_pad = ((frame_padleft + frame_padright + frame_padtop + frame_padbottom) != 0);
ost->video_resample = ((codec->width != icodec->width -
(frame_leftBand + frame_rightBand) +
(frame_padleft + frame_padright)) ||
(codec->height != icodec->height -
(frame_topBand + frame_bottomBand) +
(frame_padtop + frame_padbottom)) ||
(codec->pix_fmt != icodec->pix_fmt));
if (ost->video_crop) {
ost->topBand = ost->original_topBand = frame_topBand;
ost->bottomBand = ost->original_bottomBand = frame_bottomBand;
ost->leftBand = ost->original_leftBand = frame_leftBand;
ost->rightBand = ost->original_rightBand = frame_rightBand;
}
if (ost->video_pad) {
ost->padtop = frame_padtop;
ost->padleft = frame_padleft;
ost->padbottom = frame_padbottom;
ost->padright = frame_padright;
if (!ost->video_resample) {
avcodec_get_frame_defaults(&ost->pict_tmp);
if(avpicture_alloc((AVPicture*)&ost->pict_tmp, codec->pix_fmt,
codec->width, codec->height))
goto fail;
}
}
if (ost->video_resample) {
avcodec_get_frame_defaults(&ost->pict_tmp);
if(avpicture_alloc((AVPicture*)&ost->pict_tmp, codec->pix_fmt,
codec->width, codec->height)) {
[self sendErrorCode:78 message:@"Cannot allocate temp picture, check pix fmt"];
stopConverting = YES;
return -1;
}
sws_flags = (unsigned int)av_get_int(sws_opts, "sws_flags", NULL);
ost->img_resample_ctx = sws_getContext(
icodec->width - (frame_leftBand + frame_rightBand),
icodec->height - (frame_topBand + frame_bottomBand),
icodec->pix_fmt,
codec->width - (frame_padleft + frame_padright),
codec->height - (frame_padtop + frame_padbottom),
codec->pix_fmt,
sws_flags, NULL, NULL, NULL);
if (ost->img_resample_ctx == NULL) {
[self sendErrorCode:79 message:@"Cannot get resampling context"];
stopConverting = YES;
return -1;
}
ost->original_height = icodec->height;
ost->original_width = icodec->width;
codec->bits_per_raw_sample= 0;
}
ost->resample_height = icodec->height - (frame_topBand + frame_bottomBand);
ost->resample_width = icodec->width - (frame_leftBand + frame_rightBand);
ost->resample_pix_fmt= icodec->pix_fmt;
ost->encoding_needed = 1;
ist->decoding_needed = 1;
break;
case AVMEDIA_TYPE_SUBTITLE:
ost->encoding_needed = 1;
ist->decoding_needed = 1;
break;
default:
abort();
break;
}
/* two pass mode */
if (ost->encoding_needed &&
(codec->flags & (CODEC_FLAG_PASS1 | CODEC_FLAG_PASS2))) {
char logfilename[1024];
FILE *f;
snprintf(logfilename, sizeof(logfilename), "%s-%d.log",
pass_logfilename_prefix ? pass_logfilename_prefix : DEFAULT_PASS_LOGFILENAME_PREFIX,
i);
if (codec->flags & CODEC_FLAG_PASS1) {
f = fopen(logfilename, "wb");
if (!f) {
[self sendErrorCode:80 message:@"Cannot write log file '%s' for pass-1 encoding: %s", logfilename, strerror(errno)];
stopConverting = YES;
return -1;
}
ost->logfile = f;
} else {
char *logbuffer;
size_t logbuffer_size;
if ([self read_file:logfilename buffer:&logbuffer size:&logbuffer_size] < 0) {
[self sendErrorCode:81 message:@"Error reading log file '%s' for pass-2 encoding", logfilename];
stopConverting = YES;
return -1;
}
codec->stats_in = logbuffer;
}
}
}
if(codec->codec_type == AVMEDIA_TYPE_VIDEO){
int size= codec->width * codec->height;
bit_buffer_size= FFMAX(bit_buffer_size, 6*size + 200);
}
}
if (!bit_buffer)
bit_buffer = av_malloc(bit_buffer_size);
if (!bit_buffer) {
fprintf(stderr, "Cannot allocate %d bytes output buffer\n",
bit_buffer_size);
ret = AVERROR(ENOMEM);
goto fail;
}
/* open each encoder */
for(i=0;i<nb_ostreams;i++) {
ost = ost_table[i];
if (ost->encoding_needed) {
AVCodec *codec = output_codecs[i];
if (!codec)
codec = avcodec_find_encoder(ost->st->codec->codec_id);
if (!codec) {
snprintf(error, sizeof(error), "Encoder (codec id %d) not found for output stream #%d.%d",
ost->st->codec->codec_id, ost->file_index, ost->index);
ret = AVERROR(EINVAL);
goto dump_format;
}
if (avcodec_open(ost->st->codec, codec) < 0) {
snprintf(error, sizeof(error), "Error while opening encoder for output stream #%d.%d - maybe incorrect parameters such as bit_rate, rate, width or height",
ost->file_index, ost->index);
ret = AVERROR(EINVAL);
goto dump_format;
}
extra_size += ost->st->codec->extradata_size;
}
}
/* open each decoder */
for(i=0;i<nb_istreams;i++) {
ist = ist_table[i];
if (ist->decoding_needed) {
AVCodec *codec = input_codecs[i];
if (!codec)
codec = avcodec_find_decoder(ist->st->codec->codec_id);
if (!codec) {
snprintf(error, sizeof(error), "Decoder (codec id %d) not found for input stream #%d.%d",
ist->st->codec->codec_id, ist->file_index, ist->index);
ret = AVERROR(EINVAL);
goto dump_format;
}
if (avcodec_open(ist->st->codec, codec) < 0) {
snprintf(error, sizeof(error), "Error while opening decoder for input stream #%d.%d",
ist->file_index, ist->index);
ret = AVERROR(EINVAL);
goto dump_format;
}
//if (ist->st->codec->codec_type == AVMEDIA_TYPE_VIDEO)
// ist->st->codec->flags |= CODEC_FLAG_REPEAT_FIELD;
}
}
/* init pts */
for(i=0;i<nb_istreams;i++) {
AVStream *st;
ist = ist_table[i];
st= ist->st;
ist->pts = st->avg_frame_rate.num ? - st->codec->has_b_frames*AV_TIME_BASE / av_q2d(st->avg_frame_rate) : 0;
ist->next_pts = AV_NOPTS_VALUE;
ist->is_start = 1;
}
/* set meta data information from input file if required */
for (i=0;i<nb_meta_data_maps;i++) {
AVFormatContext *out_file;
AVFormatContext *in_file;
AVMetadataTag *mtag;
int out_file_index = meta_data_maps[i].out_file;
int in_file_index = meta_data_maps[i].in_file;
if (out_file_index < 0 || out_file_index >= theNB_output_files) {
snprintf(error, sizeof(error), "Invalid output file index %d map_meta_data(%d,%d)",
out_file_index, out_file_index, in_file_index);
ret = AVERROR(EINVAL);
goto dump_format;
}
if (in_file_index < 0 || in_file_index >= theNB_input_files) {
snprintf(error, sizeof(error), "Invalid input file index %d map_meta_data(%d,%d)",
in_file_index, out_file_index, in_file_index);
ret = AVERROR(EINVAL);
goto dump_format;
}
out_file = theOutput_files[out_file_index];
in_file = theInput_files[in_file_index];
mtag=NULL;
while((mtag=av_metadata_get(in_file->metadata, "", mtag, AV_METADATA_IGNORE_SUFFIX)))
av_metadata_set2(&out_file->metadata, mtag->key, mtag->value, AV_METADATA_DONT_OVERWRITE);
av_metadata_conv(out_file, out_file->oformat->metadata_conv,
in_file->iformat->metadata_conv);
}
/* copy chapters from the first input file that has them*/
for (i = 0; i < theNB_input_files; i++) {
if (!theInput_files[i]->nb_chapters)
continue;
for (j = 0; j < theNB_output_files; j++)
if ((ret = [self copy_chapters:i output:j]) < 0)
goto dump_format;
}
/* open files and write file headers */
for(i=0;i<theNB_output_files;i++) {
os = theOutput_files[i];
if (av_write_header(os) < 0) {
snprintf(error, sizeof(error), "Could not write header for output file #%d (incorrect codec parameters ?)", i);
ret = AVERROR(EINVAL);
goto dump_format;
}
if (strcmp(theOutput_files[i]->oformat->name, "rtp")) {
want_sdp = 0;
}
}
dump_format:
/* dump the file output parameters - cannot be done before in case
of stream copy */
for(i=0;i<theNB_output_files;i++) {
dump_format(theOutput_files[i], i, theOutput_files[i]->filename, 1);
}
/* dump the stream mapping */
if (verbose >= 0) {
fprintf(stderr, "Stream mapping:\n");
for(i=0;i<nb_ostreams;i++) {
ost = ost_table[i];
fprintf(stderr, " Stream #%d.%d -> #%d.%d",
ist_table[ost->source_index]->file_index,
ist_table[ost->source_index]->index,
ost->file_index,
ost->index);
if (ost->sync_ist != ist_table[ost->source_index])
fprintf(stderr, " [sync #%d.%d]",
ost->sync_ist->file_index,
ost->sync_ist->index);
fprintf(stderr, "\n");
}
}
if (ret) {
fprintf(stderr, "%s\n", error);
goto fail;
}
if (want_sdp) {
[self print_sdp:theOutput_files number:theNB_output_files];
}
timer_start = av_gettime();
while(!stopConverting) {
int file_index, ist_index;
AVPacket pkt;
double ipts_min;
double opts_min;
redo:
ipts_min= 1e100;
opts_min= 1e100;
/* select the stream that we must read now by looking at the
smallest output pts */
file_index = -1;
for(i=0;i<nb_ostreams;i++) {
double ipts, opts;
ost = ost_table[i];
os = theOutput_files[ost->file_index];
ist = ist_table[ost->source_index];
if(ist->is_past_recording_time || no_packet[ist->file_index])
continue;
opts = ost->st->pts.val * av_q2d(ost->st->time_base);
ipts = (double)ist->pts;
if (!file_table[ist->file_index].eof_reached){
if(ipts < ipts_min) {
ipts_min = ipts;
if(input_sync ) file_index = ist->file_index;
}
if(opts < opts_min) {
opts_min = opts;
if(!input_sync) file_index = ist->file_index;
}
}
if(ost->frame_number >= max_frames[ost->st->codec->codec_type]){
file_index= -1;
break;
}
}
/* if none, if is finished */
if (file_index < 0) {
if(no_packet_count){
no_packet_count=0;
memset(no_packet, 0, sizeof(no_packet));
usleep(10000);
continue;
}
break;
}
/* finish if limit size exhausted */
if (limit_filesize != 0 && limit_filesize < url_ftell(theOutput_files[0]->pb))
break;
/* read a frame from it and output it in the fifo */
is = theInput_files[file_index];
ret= av_read_frame(is, &pkt);
if(ret == AVERROR(EAGAIN)){
no_packet[file_index]=1;
no_packet_count++;
continue;
}
if (ret < 0) {
file_table[file_index].eof_reached = 1;
if (opt_shortest)
break;
else
continue;
}
no_packet_count=0;
memset(no_packet, 0, sizeof(no_packet));
if (do_pkt_dump) {
av_pkt_dump_log(NULL, AV_LOG_DEBUG, &pkt, do_hex_dump);
}
/* the following test is needed in case new streams appear
dynamically in stream : we ignore them */
if (pkt.stream_index >= file_table[file_index].nb_streams)
goto discard_packet;
ist_index = file_table[file_index].ist_index + pkt.stream_index;
ist = ist_table[ist_index];
if (ist->discard)
goto discard_packet;
if (pkt.dts != AV_NOPTS_VALUE)
pkt.dts += av_rescale_q(input_files_ts_offset[ist->file_index], AV_TIME_BASE_Q, ist->st->time_base);
if (pkt.pts != AV_NOPTS_VALUE)
pkt.pts += av_rescale_q(input_files_ts_offset[ist->file_index], AV_TIME_BASE_Q, ist->st->time_base);
if(input_files_ts_scale[file_index][pkt.stream_index]){
if(pkt.pts != AV_NOPTS_VALUE)
pkt.pts *= input_files_ts_scale[file_index][pkt.stream_index];
if(pkt.dts != AV_NOPTS_VALUE)
pkt.dts *= input_files_ts_scale[file_index][pkt.stream_index];
}
// fprintf(stderr, "next:%"PRId64" dts:%"PRId64" off:%"PRId64" %d\n", ist->next_pts, pkt.dts, input_files_ts_offset[ist->file_index], ist->st->codec->codec_type);
if (pkt.dts != AV_NOPTS_VALUE && ist->next_pts != AV_NOPTS_VALUE
&& (is->iformat->flags & AVFMT_TS_DISCONT)) {
int64_t pkt_dts= av_rescale_q(pkt.dts, ist->st->time_base, AV_TIME_BASE_Q);
int64_t delta= pkt_dts - ist->next_pts;
if((FFABS(delta) > 1LL*dts_delta_threshold*AV_TIME_BASE || pkt_dts+1<ist->pts)&& !copy_ts){
input_files_ts_offset[ist->file_index]-= delta;
if (verbose > 2)
fprintf(stderr, "timestamp discontinuity %"PRId64", new offset= %"PRId64"\n", delta, input_files_ts_offset[ist->file_index]);
pkt.dts-= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
if(pkt.pts != AV_NOPTS_VALUE)
pkt.pts-= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
}
}
/* finish if recording time exhausted */
if (recording_time != INT64_MAX &&
av_compare_ts(pkt.pts, ist->st->time_base, recording_time + start_time, (AVRational){1, 1000000}) >= 0) {
ist->is_past_recording_time = 1;
goto discard_packet;
}
//fprintf(stderr,"read #%d.%d size=%d\n", ist->file_index, ist->index, pkt.size);
if ([self output_packet:ist index:ist_index table:ost_table streams:nb_ostreams packet:&pkt] < 0) {
if (verbose >= 0)
[self sendErrorCode:82 message:@"Error while decoding stream #%d.%d",
ist->file_index, ist->index];
if (exit_on_error)
stopConverting = YES;
av_free_packet(&pkt);
goto redo;
}
discard_packet:
av_free_packet(&pkt);
/* dump report by using the output first video and audio streams */
[self send_report:theOutput_files inputFiles:(AVFormatContext **)theInput_files table:ost_table streams:nb_ostreams lastReport:0];
}
/* at the end of stream, we must flush the decoder buffers */
for(i=0;i<nb_istreams;i++) {
ist = ist_table[i];
if (ist->decoding_needed) {
[self output_packet:ist index:i table:ost_table streams:nb_ostreams packet:NULL];
}
}
/* write the trailer if needed and close file */
for(i=0;i<theNB_output_files;i++) {
os = theOutput_files[i];
av_write_trailer(os);
}
/* dump report by using the first video and audio streams */
[self send_report:theOutput_files inputFiles:(AVFormatContext **)theInput_files table:ost_table streams:nb_ostreams lastReport:1];
/* close each encoder */
for(i=0;i<nb_ostreams;i++) {
ost = ost_table[i];
if (ost->encoding_needed) {
av_freep(&ost->st->codec->stats_in);
avcodec_close(ost->st->codec);
}
}
/* close each decoder */
for(i=0;i<nb_istreams;i++) {
ist = ist_table[i];
if (ist->decoding_needed) {
avcodec_close(ist->st->codec);
}
}
/* finished ! */
ret = 0;
fail:
av_freep(&bit_buffer);
av_free(file_table);
if (ist_table) {
for(i=0;i<nb_istreams;i++) {
ist = ist_table[i];
av_free(ist);
}
av_free(ist_table);
}
if (ost_table) {
for(i=0;i<nb_ostreams;i++) {
ost = ost_table[i];
if (ost) {
if (ost->logfile) {
fclose(ost->logfile);
ost->logfile = NULL;
}
av_fifo_free(ost->fifo); /* works even if fifo is not
initialized but set to zero */
av_free(ost->pict_tmp.data[0]);
if (ost->video_resample)
sws_freeContext(ost->img_resample_ctx);
if (ost->resample)
audio_resample_close(ost->resample);
if (ost->reformat_ctx)
av_audio_convert_free(ost->reformat_ctx);
av_free(ost);
}
}
av_free(ost_table);
}
return ret;
}
#if !TARGET_OS_IPHONE
- (NSApplicationTerminateReply)applicationShouldTerminate:(NSApplication *)sender {
if (isConverting) {
stopConverting = YES;
stoppedByQuit = YES;
return NSTerminateLater;
}
return NSTerminateNow;
}
#endif
- (void)startConverting {
[NSThread detachNewThreadSelector:@selector(startConvertingBackground) toTarget:self withObject:nil];
}
- (void)startConvertingBackground {
NSAutoreleasePool *pool = [NSAutoreleasePool new];
int64_t ti;
if(nb_output_files <= 0 && nb_input_files == 0) {
[self sendErrorCode:83 message:@"No input or output specified"];
goto cleanup;
}
/* file converter / grab */
if (nb_output_files <= 0) {
[self sendErrorCode:84 message:@"At least one output file must be specified"];
[pool drain];
return;
}
if (nb_input_files == 0) {
[self sendErrorCode:85 message:@"At least one input file must be specified"];
goto cleanup;
}
isConverting = YES;
ti = [self getutime];
if ([self av_transcode:output_files ouputCount:nb_output_files inputFiles:input_files inputCount:nb_input_files maps:stream_maps mapsCount:nb_stream_maps] < 0)
goto cleanup;
ti = [self getutime] - ti;
if (do_benchmark) {
int maxrss = (int)([self getmaxrss] / 1024);
printf("bench: utime=%0.3fs maxrss=%ikB\n", ti / 1000000.0, maxrss);
}
cleanup:
isConverting = NO;
stopConverting = NO;
#if !TARGET_OS_IPHONE
if (stoppedByQuit)
[[NSApplication sharedApplication] replyToApplicationShouldTerminate:YES];
#endif
if ([delegate respondsToSelector:@selector(conversionFinished)])
[delegate conversionFinished];
[pool drain];
}
@end
#endif