VirtualBox

source: vbox/trunk/src/libs/ffmpeg-20060710/libavformat/utils.c@ 11537

Last change on this file since 11537 was 5776, checked in by vboxsync, 17 years ago

ffmpeg: exported to OSE

File size: 93.8 KB
Line 
1/*
2 * Various utilities for ffmpeg system
3 * Copyright (c) 2000, 2001, 2002 Fabrice Bellard
4 *
5 * This library is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU Lesser General Public
7 * License as published by the Free Software Foundation; either
8 * version 2 of the License, or (at your option) any later version.
9 *
10 * This library is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * Lesser General Public License for more details.
14 *
15 * You should have received a copy of the GNU Lesser General Public
16 * License along with this library; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
18 */
19#include "avformat.h"
20#include "allformats.h"
21
22#undef NDEBUG
23#include <assert.h>
24
25/**
26 * @file libavformat/utils.c
27 * Various utility functions for using ffmpeg library.
28 */
29
30static void av_frac_init(AVFrac *f, int64_t val, int64_t num, int64_t den);
31static void av_frac_add(AVFrac *f, int64_t incr);
32static void av_frac_set(AVFrac *f, int64_t val);
33
34/** head of registered input format linked list. */
35AVInputFormat *first_iformat = NULL;
36/** head of registered output format linked list. */
37AVOutputFormat *first_oformat = NULL;
38/** head of registered image format linked list. */
39AVImageFormat *first_image_format = NULL;
40
41void av_register_input_format(AVInputFormat *format)
42{
43 AVInputFormat **p;
44 p = &first_iformat;
45 while (*p != NULL) p = &(*p)->next;
46 *p = format;
47 format->next = NULL;
48}
49
50void av_register_output_format(AVOutputFormat *format)
51{
52 AVOutputFormat **p;
53 p = &first_oformat;
54 while (*p != NULL) p = &(*p)->next;
55 *p = format;
56 format->next = NULL;
57}
58
59int match_ext(const char *filename, const char *extensions)
60{
61 const char *ext, *p;
62 char ext1[32], *q;
63
64 if(!filename)
65 return 0;
66
67 ext = strrchr(filename, '.');
68 if (ext) {
69 ext++;
70 p = extensions;
71 for(;;) {
72 q = ext1;
73 while (*p != '\0' && *p != ',' && q-ext1<sizeof(ext1)-1)
74 *q++ = *p++;
75 *q = '\0';
76 if (!strcasecmp(ext1, ext))
77 return 1;
78 if (*p == '\0')
79 break;
80 p++;
81 }
82 }
83 return 0;
84}
85
86AVOutputFormat *guess_format(const char *short_name, const char *filename,
87 const char *mime_type)
88{
89 AVOutputFormat *fmt, *fmt_found;
90 int score_max, score;
91
92 /* specific test for image sequences */
93 if (!short_name && filename &&
94 filename_number_test(filename) >= 0 &&
95 av_guess_image2_codec(filename) != CODEC_ID_NONE) {
96 return guess_format("image2", NULL, NULL);
97 }
98 if (!short_name && filename &&
99 filename_number_test(filename) >= 0 &&
100 guess_image_format(filename)) {
101 return guess_format("image", NULL, NULL);
102 }
103
104 /* find the proper file type */
105 fmt_found = NULL;
106 score_max = 0;
107 fmt = first_oformat;
108 while (fmt != NULL) {
109 score = 0;
110 if (fmt->name && short_name && !strcmp(fmt->name, short_name))
111 score += 100;
112 if (fmt->mime_type && mime_type && !strcmp(fmt->mime_type, mime_type))
113 score += 10;
114 if (filename && fmt->extensions &&
115 match_ext(filename, fmt->extensions)) {
116 score += 5;
117 }
118 if (score > score_max) {
119 score_max = score;
120 fmt_found = fmt;
121 }
122 fmt = fmt->next;
123 }
124 return fmt_found;
125}
126
127AVOutputFormat *guess_stream_format(const char *short_name, const char *filename,
128 const char *mime_type)
129{
130 AVOutputFormat *fmt = guess_format(short_name, filename, mime_type);
131
132 if (fmt) {
133 AVOutputFormat *stream_fmt;
134 char stream_format_name[64];
135
136 snprintf(stream_format_name, sizeof(stream_format_name), "%s_stream", fmt->name);
137 stream_fmt = guess_format(stream_format_name, NULL, NULL);
138
139 if (stream_fmt)
140 fmt = stream_fmt;
141 }
142
143 return fmt;
144}
145
146/**
147 * Guesses the codec id based upon muxer and filename.
148 */
149enum CodecID av_guess_codec(AVOutputFormat *fmt, const char *short_name,
150 const char *filename, const char *mime_type, enum CodecType type){
151 if(type == CODEC_TYPE_VIDEO){
152 enum CodecID codec_id= CODEC_ID_NONE;
153
154 if(!strcmp(fmt->name, "image2") || !strcmp(fmt->name, "image2pipe")){
155 codec_id= av_guess_image2_codec(filename);
156 }
157 if(codec_id == CODEC_ID_NONE)
158 codec_id= fmt->video_codec;
159 return codec_id;
160 }else if(type == CODEC_TYPE_AUDIO)
161 return fmt->audio_codec;
162 else
163 return CODEC_ID_NONE;
164}
165
166/**
167 * finds AVInputFormat based on input format's short name.
168 */
169AVInputFormat *av_find_input_format(const char *short_name)
170{
171 AVInputFormat *fmt;
172 for(fmt = first_iformat; fmt != NULL; fmt = fmt->next) {
173 if (!strcmp(fmt->name, short_name))
174 return fmt;
175 }
176 return NULL;
177}
178
179/* memory handling */
180
181/**
182 * Default packet destructor.
183 */
184void av_destruct_packet(AVPacket *pkt)
185{
186 av_free(pkt->data);
187 pkt->data = NULL; pkt->size = 0;
188}
189
190/**
191 * Allocate the payload of a packet and intialized its fields to default values.
192 *
193 * @param pkt packet
194 * @param size wanted payload size
195 * @return 0 if OK. AVERROR_xxx otherwise.
196 */
197int av_new_packet(AVPacket *pkt, int size)
198{
199 void *data;
200 if((unsigned)size > (unsigned)size + FF_INPUT_BUFFER_PADDING_SIZE)
201 return AVERROR_NOMEM;
202 data = av_malloc(size + FF_INPUT_BUFFER_PADDING_SIZE);
203 if (!data)
204 return AVERROR_NOMEM;
205 memset(data + size, 0, FF_INPUT_BUFFER_PADDING_SIZE);
206
207 av_init_packet(pkt);
208 pkt->data = data;
209 pkt->size = size;
210 pkt->destruct = av_destruct_packet;
211 return 0;
212}
213
214/**
215 * Allocate and read the payload of a packet and intialized its fields to default values.
216 *
217 * @param pkt packet
218 * @param size wanted payload size
219 * @return >0 (read size) if OK. AVERROR_xxx otherwise.
220 */
221int av_get_packet(ByteIOContext *s, AVPacket *pkt, int size)
222{
223 int ret= av_new_packet(pkt, size);
224
225 if(ret<0)
226 return ret;
227
228 pkt->pos= url_ftell(s);
229
230 ret= get_buffer(s, pkt->data, size);
231 if(ret<=0)
232 av_free_packet(pkt);
233 else
234 pkt->size= ret;
235
236 return ret;
237}
238
239/* This is a hack - the packet memory allocation stuff is broken. The
240 packet is allocated if it was not really allocated */
241int av_dup_packet(AVPacket *pkt)
242{
243 if (pkt->destruct != av_destruct_packet) {
244 uint8_t *data;
245 /* we duplicate the packet and don't forget to put the padding
246 again */
247 if((unsigned)pkt->size > (unsigned)pkt->size + FF_INPUT_BUFFER_PADDING_SIZE)
248 return AVERROR_NOMEM;
249 data = av_malloc(pkt->size + FF_INPUT_BUFFER_PADDING_SIZE);
250 if (!data) {
251 return AVERROR_NOMEM;
252 }
253 memcpy(data, pkt->data, pkt->size);
254 memset(data + pkt->size, 0, FF_INPUT_BUFFER_PADDING_SIZE);
255 pkt->data = data;
256 pkt->destruct = av_destruct_packet;
257 }
258 return 0;
259}
260
261/* fifo handling */
262
263int fifo_init(FifoBuffer *f, int size)
264{
265 f->buffer = av_malloc(size);
266 if (!f->buffer)
267 return -1;
268 f->end = f->buffer + size;
269 f->wptr = f->rptr = f->buffer;
270 return 0;
271}
272
273void fifo_free(FifoBuffer *f)
274{
275 av_free(f->buffer);
276}
277
278int fifo_size(FifoBuffer *f, uint8_t *rptr)
279{
280 int size;
281
282 if(!rptr)
283 rptr= f->rptr;
284
285 if (f->wptr >= rptr) {
286 size = f->wptr - rptr;
287 } else {
288 size = (f->end - rptr) + (f->wptr - f->buffer);
289 }
290 return size;
291}
292
293/**
294 * Get data from the fifo (returns -1 if not enough data).
295 */
296int fifo_read(FifoBuffer *f, uint8_t *buf, int buf_size, uint8_t **rptr_ptr)
297{
298 uint8_t *rptr;
299 int size, len;
300
301 if(!rptr_ptr)
302 rptr_ptr= &f->rptr;
303 rptr = *rptr_ptr;
304
305 if (f->wptr >= rptr) {
306 size = f->wptr - rptr;
307 } else {
308 size = (f->end - rptr) + (f->wptr - f->buffer);
309 }
310
311 if (size < buf_size)
312 return -1;
313 while (buf_size > 0) {
314 len = f->end - rptr;
315 if (len > buf_size)
316 len = buf_size;
317 memcpy(buf, rptr, len);
318 buf += len;
319 rptr += len;
320 if (rptr >= f->end)
321 rptr = f->buffer;
322 buf_size -= len;
323 }
324 *rptr_ptr = rptr;
325 return 0;
326}
327
328/**
329 * Resizes a FIFO.
330 */
331void fifo_realloc(FifoBuffer *f, unsigned int new_size){
332 unsigned int old_size= f->end - f->buffer;
333
334 if(old_size < new_size){
335 uint8_t *old= f->buffer;
336
337 f->buffer= av_realloc(f->buffer, new_size);
338
339 f->rptr += f->buffer - old;
340 f->wptr += f->buffer - old;
341
342 if(f->wptr < f->rptr){
343 memmove(f->rptr + new_size - old_size, f->rptr, f->buffer + old_size - f->rptr);
344 f->rptr += new_size - old_size;
345 }
346 f->end= f->buffer + new_size;
347 }
348}
349
350void fifo_write(FifoBuffer *f, const uint8_t *buf, int size, uint8_t **wptr_ptr)
351{
352 int len;
353 uint8_t *wptr;
354
355 if(!wptr_ptr)
356 wptr_ptr= &f->wptr;
357 wptr = *wptr_ptr;
358
359 while (size > 0) {
360 len = f->end - wptr;
361 if (len > size)
362 len = size;
363 memcpy(wptr, buf, len);
364 wptr += len;
365 if (wptr >= f->end)
366 wptr = f->buffer;
367 buf += len;
368 size -= len;
369 }
370 *wptr_ptr = wptr;
371}
372
373/* get data from the fifo (return -1 if not enough data) */
374int put_fifo(ByteIOContext *pb, FifoBuffer *f, int buf_size, uint8_t **rptr_ptr)
375{
376 uint8_t *rptr = *rptr_ptr;
377 int size, len;
378
379 if (f->wptr >= rptr) {
380 size = f->wptr - rptr;
381 } else {
382 size = (f->end - rptr) + (f->wptr - f->buffer);
383 }
384
385 if (size < buf_size)
386 return -1;
387 while (buf_size > 0) {
388 len = f->end - rptr;
389 if (len > buf_size)
390 len = buf_size;
391 put_buffer(pb, rptr, len);
392 rptr += len;
393 if (rptr >= f->end)
394 rptr = f->buffer;
395 buf_size -= len;
396 }
397 *rptr_ptr = rptr;
398 return 0;
399}
400
401int filename_number_test(const char *filename)
402{
403 char buf[1024];
404 if(!filename)
405 return -1;
406 return get_frame_filename(buf, sizeof(buf), filename, 1);
407}
408
409/**
410 * Guess file format.
411 */
412AVInputFormat *av_probe_input_format(AVProbeData *pd, int is_opened)
413{
414 AVInputFormat *fmt1, *fmt;
415 int score, score_max;
416
417 fmt = NULL;
418 score_max = 0;
419 for(fmt1 = first_iformat; fmt1 != NULL; fmt1 = fmt1->next) {
420 if (!is_opened && !(fmt1->flags & AVFMT_NOFILE))
421 continue;
422 score = 0;
423 if (fmt1->read_probe) {
424 score = fmt1->read_probe(pd);
425 } else if (fmt1->extensions) {
426 if (match_ext(pd->filename, fmt1->extensions)) {
427 score = 50;
428 }
429 }
430 if (score > score_max) {
431 score_max = score;
432 fmt = fmt1;
433 }
434 }
435 return fmt;
436}
437
438/************************************************************/
439/* input media file */
440
441/**
442 * Open a media file from an IO stream. 'fmt' must be specified.
443 */
444static const char* format_to_name(void* ptr)
445{
446 AVFormatContext* fc = (AVFormatContext*) ptr;
447 if(fc->iformat) return fc->iformat->name;
448 else if(fc->oformat) return fc->oformat->name;
449 else return "NULL";
450}
451
452static const AVClass av_format_context_class = { "AVFormatContext", format_to_name };
453
454AVFormatContext *av_alloc_format_context(void)
455{
456 AVFormatContext *ic;
457 ic = av_mallocz(sizeof(AVFormatContext));
458 if (!ic) return ic;
459 ic->av_class = &av_format_context_class;
460 return ic;
461}
462
463/**
464 * Allocates all the structures needed to read an input stream.
465 * This does not open the needed codecs for decoding the stream[s].
466 */
467int av_open_input_stream(AVFormatContext **ic_ptr,
468 ByteIOContext *pb, const char *filename,
469 AVInputFormat *fmt, AVFormatParameters *ap)
470{
471 int err;
472 AVFormatContext *ic;
473 AVFormatParameters default_ap;
474
475 if(!ap){
476 ap=&default_ap;
477 memset(ap, 0, sizeof(default_ap));
478 }
479
480 ic = av_alloc_format_context();
481 if (!ic) {
482 err = AVERROR_NOMEM;
483 goto fail;
484 }
485 ic->iformat = fmt;
486 if (pb)
487 ic->pb = *pb;
488 ic->duration = AV_NOPTS_VALUE;
489 ic->start_time = AV_NOPTS_VALUE;
490 pstrcpy(ic->filename, sizeof(ic->filename), filename);
491
492 /* allocate private data */
493 if (fmt->priv_data_size > 0) {
494 ic->priv_data = av_mallocz(fmt->priv_data_size);
495 if (!ic->priv_data) {
496 err = AVERROR_NOMEM;
497 goto fail;
498 }
499 } else {
500 ic->priv_data = NULL;
501 }
502
503 err = ic->iformat->read_header(ic, ap);
504 if (err < 0)
505 goto fail;
506
507 if (pb)
508 ic->data_offset = url_ftell(&ic->pb);
509
510 *ic_ptr = ic;
511 return 0;
512 fail:
513 if (ic) {
514 av_freep(&ic->priv_data);
515 }
516 av_free(ic);
517 *ic_ptr = NULL;
518 return err;
519}
520
521/** Size of probe buffer, for guessing file type from file contents. */
522#define PROBE_BUF_MIN 2048
523#define PROBE_BUF_MAX (1<<20)
524
525/**
526 * Open a media file as input. The codec are not opened. Only the file
527 * header (if present) is read.
528 *
529 * @param ic_ptr the opened media file handle is put here
530 * @param filename filename to open.
531 * @param fmt if non NULL, force the file format to use
532 * @param buf_size optional buffer size (zero if default is OK)
533 * @param ap additionnal parameters needed when opening the file (NULL if default)
534 * @return 0 if OK. AVERROR_xxx otherwise.
535 */
536int av_open_input_file(AVFormatContext **ic_ptr, const char *filename,
537 AVInputFormat *fmt,
538 int buf_size,
539 AVFormatParameters *ap)
540{
541 int err, must_open_file, file_opened, probe_size;
542 AVProbeData probe_data, *pd = &probe_data;
543 ByteIOContext pb1, *pb = &pb1;
544
545 file_opened = 0;
546 pd->filename = "";
547 if (filename)
548 pd->filename = filename;
549 pd->buf = NULL;
550 pd->buf_size = 0;
551
552 if (!fmt) {
553 /* guess format if no file can be opened */
554 fmt = av_probe_input_format(pd, 0);
555 }
556
557 /* do not open file if the format does not need it. XXX: specific
558 hack needed to handle RTSP/TCP */
559 must_open_file = 1;
560 if (fmt && (fmt->flags & AVFMT_NOFILE)) {
561 must_open_file = 0;
562 pb= NULL; //FIXME this or memset(pb, 0, sizeof(ByteIOContext)); otherwise its uninitalized
563 }
564
565 if (!fmt || must_open_file) {
566 /* if no file needed do not try to open one */
567 if (url_fopen(pb, filename, URL_RDONLY) < 0) {
568 err = AVERROR_IO;
569 goto fail;
570 }
571 file_opened = 1;
572 if (buf_size > 0) {
573 url_setbufsize(pb, buf_size);
574 }
575
576 for(probe_size= PROBE_BUF_MIN; probe_size<=PROBE_BUF_MAX && !fmt; probe_size<<=1){
577 /* read probe data */
578 pd->buf= av_realloc(pd->buf, probe_size);
579 pd->buf_size = get_buffer(pb, pd->buf, probe_size);
580 if (url_fseek(pb, 0, SEEK_SET) == (offset_t)-EPIPE) {
581 url_fclose(pb);
582 if (url_fopen(pb, filename, URL_RDONLY) < 0) {
583 file_opened = 0;
584 err = AVERROR_IO;
585 goto fail;
586 }
587 }
588 /* guess file format */
589 fmt = av_probe_input_format(pd, 1);
590 }
591 av_freep(&pd->buf);
592 }
593
594 /* if still no format found, error */
595 if (!fmt) {
596 err = AVERROR_NOFMT;
597 goto fail;
598 }
599
600 /* XXX: suppress this hack for redirectors */
601#ifdef CONFIG_NETWORK
602 if (fmt == &redir_demuxer) {
603 err = redir_open(ic_ptr, pb);
604 url_fclose(pb);
605 return err;
606 }
607#endif
608
609 /* check filename in case of an image number is expected */
610 if (fmt->flags & AVFMT_NEEDNUMBER) {
611 if (filename_number_test(filename) < 0) {
612 err = AVERROR_NUMEXPECTED;
613 goto fail;
614 }
615 }
616 err = av_open_input_stream(ic_ptr, pb, filename, fmt, ap);
617 if (err)
618 goto fail;
619 return 0;
620 fail:
621 av_freep(&pd->buf);
622 if (file_opened)
623 url_fclose(pb);
624 *ic_ptr = NULL;
625 return err;
626
627}
628
629/*******************************************************/
630
631/**
632 * Read a transport packet from a media file.
633 *
634 * This function is absolete and should never be used.
635 * Use av_read_frame() instead.
636 *
637 * @param s media file handle
638 * @param pkt is filled
639 * @return 0 if OK. AVERROR_xxx if error.
640 */
641int av_read_packet(AVFormatContext *s, AVPacket *pkt)
642{
643 return s->iformat->read_packet(s, pkt);
644}
645
646/**********************************************************/
647
648/**
649 * Get the number of samples of an audio frame. Return (-1) if error.
650 */
651static int get_audio_frame_size(AVCodecContext *enc, int size)
652{
653 int frame_size;
654
655 if (enc->frame_size <= 1) {
656 int bits_per_sample = av_get_bits_per_sample(enc->codec_id);
657
658 if (bits_per_sample) {
659 if (enc->channels == 0)
660 return -1;
661 frame_size = (size << 3) / (bits_per_sample * enc->channels);
662 } else {
663 /* used for example by ADPCM codecs */
664 if (enc->bit_rate == 0)
665 return -1;
666 frame_size = (size * 8 * enc->sample_rate) / enc->bit_rate;
667 }
668 } else {
669 frame_size = enc->frame_size;
670 }
671 return frame_size;
672}
673
674
675/**
676 * Return the frame duration in seconds, return 0 if not available.
677 */
678static void compute_frame_duration(int *pnum, int *pden, AVStream *st,
679 AVCodecParserContext *pc, AVPacket *pkt)
680{
681 int frame_size;
682
683 *pnum = 0;
684 *pden = 0;
685 switch(st->codec->codec_type) {
686 case CODEC_TYPE_VIDEO:
687 if(st->time_base.num*1000LL > st->time_base.den){
688 *pnum = st->time_base.num;
689 *pden = st->time_base.den;
690 }else if(st->codec->time_base.num*1000LL > st->codec->time_base.den){
691 *pnum = st->codec->time_base.num;
692 *pden = st->codec->time_base.den;
693 if (pc && pc->repeat_pict) {
694 *pden *= 2;
695 *pnum = (*pnum) * (2 + pc->repeat_pict);
696 }
697 }
698 break;
699 case CODEC_TYPE_AUDIO:
700 frame_size = get_audio_frame_size(st->codec, pkt->size);
701 if (frame_size < 0)
702 break;
703 *pnum = frame_size;
704 *pden = st->codec->sample_rate;
705 break;
706 default:
707 break;
708 }
709}
710
711static int is_intra_only(AVCodecContext *enc){
712 if(enc->codec_type == CODEC_TYPE_AUDIO){
713 return 1;
714 }else if(enc->codec_type == CODEC_TYPE_VIDEO){
715 switch(enc->codec_id){
716 case CODEC_ID_MJPEG:
717 case CODEC_ID_MJPEGB:
718 case CODEC_ID_LJPEG:
719 case CODEC_ID_RAWVIDEO:
720 case CODEC_ID_DVVIDEO:
721 case CODEC_ID_HUFFYUV:
722 case CODEC_ID_FFVHUFF:
723 case CODEC_ID_ASV1:
724 case CODEC_ID_ASV2:
725 case CODEC_ID_VCR1:
726 return 1;
727 default: break;
728 }
729 }
730 return 0;
731}
732
733static int64_t lsb2full(int64_t lsb, int64_t last_ts, int lsb_bits){
734 int64_t mask = lsb_bits < 64 ? (1LL<<lsb_bits)-1 : -1LL;
735 int64_t delta= last_ts - mask/2;
736 return ((lsb - delta)&mask) + delta;
737}
738
739static void compute_pkt_fields(AVFormatContext *s, AVStream *st,
740 AVCodecParserContext *pc, AVPacket *pkt)
741{
742 int num, den, presentation_delayed;
743 /* handle wrapping */
744 if(st->cur_dts != AV_NOPTS_VALUE){
745 if(pkt->pts != AV_NOPTS_VALUE)
746 pkt->pts= lsb2full(pkt->pts, st->cur_dts, st->pts_wrap_bits);
747 if(pkt->dts != AV_NOPTS_VALUE)
748 pkt->dts= lsb2full(pkt->dts, st->cur_dts, st->pts_wrap_bits);
749 }
750
751 if (pkt->duration == 0) {
752 compute_frame_duration(&num, &den, st, pc, pkt);
753 if (den && num) {
754 pkt->duration = av_rescale(1, num * (int64_t)st->time_base.den, den * (int64_t)st->time_base.num);
755 }
756 }
757
758 if(is_intra_only(st->codec))
759 pkt->flags |= PKT_FLAG_KEY;
760
761 /* do we have a video B frame ? */
762 presentation_delayed = 0;
763 if (st->codec->codec_type == CODEC_TYPE_VIDEO) {
764 /* XXX: need has_b_frame, but cannot get it if the codec is
765 not initialized */
766 if (( st->codec->codec_id == CODEC_ID_H264
767 || st->codec->has_b_frames) &&
768 pc && pc->pict_type != FF_B_TYPE)
769 presentation_delayed = 1;
770 /* this may be redundant, but it shouldnt hurt */
771 if(pkt->dts != AV_NOPTS_VALUE && pkt->pts != AV_NOPTS_VALUE && pkt->pts > pkt->dts)
772 presentation_delayed = 1;
773 }
774
775 if(st->cur_dts == AV_NOPTS_VALUE){
776 if(presentation_delayed) st->cur_dts = -pkt->duration;
777 else st->cur_dts = 0;
778 }
779
780// av_log(NULL, AV_LOG_DEBUG, "IN delayed:%d pts:%lld, dts:%lld cur_dts:%lld st:%d pc:%p\n", presentation_delayed, pkt->pts, pkt->dts, st->cur_dts, pkt->stream_index, pc);
781 /* interpolate PTS and DTS if they are not present */
782 if (presentation_delayed) {
783 /* DTS = decompression time stamp */
784 /* PTS = presentation time stamp */
785 if (pkt->dts == AV_NOPTS_VALUE) {
786 /* if we know the last pts, use it */
787 if(st->last_IP_pts != AV_NOPTS_VALUE)
788 st->cur_dts = pkt->dts = st->last_IP_pts;
789 else
790 pkt->dts = st->cur_dts;
791 } else {
792 st->cur_dts = pkt->dts;
793 }
794 /* this is tricky: the dts must be incremented by the duration
795 of the frame we are displaying, i.e. the last I or P frame */
796 if (st->last_IP_duration == 0)
797 st->cur_dts += pkt->duration;
798 else
799 st->cur_dts += st->last_IP_duration;
800 st->last_IP_duration = pkt->duration;
801 st->last_IP_pts= pkt->pts;
802 /* cannot compute PTS if not present (we can compute it only
803 by knowing the futur */
804 } else if(pkt->pts != AV_NOPTS_VALUE || pkt->dts != AV_NOPTS_VALUE || pkt->duration){
805 if(pkt->pts != AV_NOPTS_VALUE && pkt->duration){
806 int64_t old_diff= ABS(st->cur_dts - pkt->duration - pkt->pts);
807 int64_t new_diff= ABS(st->cur_dts - pkt->pts);
808 if(old_diff < new_diff && old_diff < (pkt->duration>>3)){
809 pkt->pts += pkt->duration;
810// av_log(NULL, AV_LOG_DEBUG, "id:%d old:%Ld new:%Ld dur:%d cur:%Ld size:%d\n", pkt->stream_index, old_diff, new_diff, pkt->duration, st->cur_dts, pkt->size);
811 }
812 }
813
814 /* presentation is not delayed : PTS and DTS are the same */
815 if (pkt->pts == AV_NOPTS_VALUE) {
816 if (pkt->dts == AV_NOPTS_VALUE) {
817 pkt->pts = st->cur_dts;
818 pkt->dts = st->cur_dts;
819 }
820 else {
821 st->cur_dts = pkt->dts;
822 pkt->pts = pkt->dts;
823 }
824 } else {
825 st->cur_dts = pkt->pts;
826 pkt->dts = pkt->pts;
827 }
828 st->cur_dts += pkt->duration;
829 }
830// av_log(NULL, AV_LOG_DEBUG, "OUTdelayed:%d pts:%lld, dts:%lld cur_dts:%lld\n", presentation_delayed, pkt->pts, pkt->dts, st->cur_dts);
831
832 /* update flags */
833 if (pc) {
834 pkt->flags = 0;
835 /* key frame computation */
836 switch(st->codec->codec_type) {
837 case CODEC_TYPE_VIDEO:
838 if (pc->pict_type == FF_I_TYPE)
839 pkt->flags |= PKT_FLAG_KEY;
840 break;
841 case CODEC_TYPE_AUDIO:
842 pkt->flags |= PKT_FLAG_KEY;
843 break;
844 default:
845 break;
846 }
847 }
848}
849
850void av_destruct_packet_nofree(AVPacket *pkt)
851{
852 pkt->data = NULL; pkt->size = 0;
853}
854
855static int av_read_frame_internal(AVFormatContext *s, AVPacket *pkt)
856{
857 AVStream *st;
858 int len, ret, i;
859
860 for(;;) {
861 /* select current input stream component */
862 st = s->cur_st;
863 if (st) {
864 if (!st->need_parsing || !st->parser) {
865 /* no parsing needed: we just output the packet as is */
866 /* raw data support */
867 *pkt = s->cur_pkt;
868 compute_pkt_fields(s, st, NULL, pkt);
869 s->cur_st = NULL;
870 return 0;
871 } else if (s->cur_len > 0 && st->discard < AVDISCARD_ALL) {
872 len = av_parser_parse(st->parser, st->codec, &pkt->data, &pkt->size,
873 s->cur_ptr, s->cur_len,
874 s->cur_pkt.pts, s->cur_pkt.dts);
875 s->cur_pkt.pts = AV_NOPTS_VALUE;
876 s->cur_pkt.dts = AV_NOPTS_VALUE;
877 /* increment read pointer */
878 s->cur_ptr += len;
879 s->cur_len -= len;
880
881 /* return packet if any */
882 if (pkt->size) {
883 got_packet:
884 pkt->duration = 0;
885 pkt->stream_index = st->index;
886 pkt->pts = st->parser->pts;
887 pkt->dts = st->parser->dts;
888 pkt->destruct = av_destruct_packet_nofree;
889 compute_pkt_fields(s, st, st->parser, pkt);
890 return 0;
891 }
892 } else {
893 /* free packet */
894 av_free_packet(&s->cur_pkt);
895 s->cur_st = NULL;
896 }
897 } else {
898 /* read next packet */
899 ret = av_read_packet(s, &s->cur_pkt);
900 if (ret < 0) {
901 if (ret == -EAGAIN)
902 return ret;
903 /* return the last frames, if any */
904 for(i = 0; i < s->nb_streams; i++) {
905 st = s->streams[i];
906 if (st->parser && st->need_parsing) {
907 av_parser_parse(st->parser, st->codec,
908 &pkt->data, &pkt->size,
909 NULL, 0,
910 AV_NOPTS_VALUE, AV_NOPTS_VALUE);
911 if (pkt->size)
912 goto got_packet;
913 }
914 }
915 /* no more packets: really terminates parsing */
916 return ret;
917 }
918
919 st = s->streams[s->cur_pkt.stream_index];
920
921 s->cur_st = st;
922 s->cur_ptr = s->cur_pkt.data;
923 s->cur_len = s->cur_pkt.size;
924 if (st->need_parsing && !st->parser) {
925 st->parser = av_parser_init(st->codec->codec_id);
926 if (!st->parser) {
927 /* no parser available : just output the raw packets */
928 st->need_parsing = 0;
929 }else if(st->need_parsing == 2){
930 st->parser->flags |= PARSER_FLAG_COMPLETE_FRAMES;
931 }
932 }
933 }
934 }
935}
936
937/**
938 * Return the next frame of a stream.
939 *
940 * The returned packet is valid
941 * until the next av_read_frame() or until av_close_input_file() and
942 * must be freed with av_free_packet. For video, the packet contains
943 * exactly one frame. For audio, it contains an integer number of
944 * frames if each frame has a known fixed size (e.g. PCM or ADPCM
945 * data). If the audio frames have a variable size (e.g. MPEG audio),
946 * then it contains one frame.
947 *
948 * pkt->pts, pkt->dts and pkt->duration are always set to correct
949 * values in AV_TIME_BASE unit (and guessed if the format cannot
950 * provided them). pkt->pts can be AV_NOPTS_VALUE if the video format
951 * has B frames, so it is better to rely on pkt->dts if you do not
952 * decompress the payload.
953 *
954 * @return 0 if OK, < 0 if error or end of file.
955 */
956int av_read_frame(AVFormatContext *s, AVPacket *pkt)
957{
958 AVPacketList *pktl;
959 int eof=0;
960 const int genpts= s->flags & AVFMT_FLAG_GENPTS;
961
962 for(;;){
963 pktl = s->packet_buffer;
964 if (pktl) {
965 AVPacket *next_pkt= &pktl->pkt;
966
967 if(genpts && next_pkt->dts != AV_NOPTS_VALUE){
968 while(pktl && next_pkt->pts == AV_NOPTS_VALUE){
969 if( pktl->pkt.stream_index == next_pkt->stream_index
970 && next_pkt->dts < pktl->pkt.dts
971 && pktl->pkt.pts != pktl->pkt.dts //not b frame
972 /*&& pktl->pkt.dts != AV_NOPTS_VALUE*/){
973 next_pkt->pts= pktl->pkt.dts;
974 }
975 pktl= pktl->next;
976 }
977 pktl = s->packet_buffer;
978 }
979
980 if( next_pkt->pts != AV_NOPTS_VALUE
981 || next_pkt->dts == AV_NOPTS_VALUE
982 || !genpts || eof){
983 /* read packet from packet buffer, if there is data */
984 *pkt = *next_pkt;
985 s->packet_buffer = pktl->next;
986 av_free(pktl);
987 return 0;
988 }
989 }
990 if(genpts){
991 AVPacketList **plast_pktl= &s->packet_buffer;
992 int ret= av_read_frame_internal(s, pkt);
993 if(ret<0){
994 if(pktl && ret != -EAGAIN){
995 eof=1;
996 continue;
997 }else
998 return ret;
999 }
1000
1001 /* duplicate the packet */
1002 if (av_dup_packet(pkt) < 0)
1003 return AVERROR_NOMEM;
1004
1005 while(*plast_pktl) plast_pktl= &(*plast_pktl)->next; //FIXME maybe maintain pointer to the last?
1006
1007 pktl = av_mallocz(sizeof(AVPacketList));
1008 if (!pktl)
1009 return AVERROR_NOMEM;
1010
1011 /* add the packet in the buffered packet list */
1012 *plast_pktl = pktl;
1013 pktl->pkt= *pkt;
1014 }else{
1015 assert(!s->packet_buffer);
1016 return av_read_frame_internal(s, pkt);
1017 }
1018 }
1019}
1020
1021/* XXX: suppress the packet queue */
1022static void flush_packet_queue(AVFormatContext *s)
1023{
1024 AVPacketList *pktl;
1025
1026 for(;;) {
1027 pktl = s->packet_buffer;
1028 if (!pktl)
1029 break;
1030 s->packet_buffer = pktl->next;
1031 av_free_packet(&pktl->pkt);
1032 av_free(pktl);
1033 }
1034}
1035
1036/*******************************************************/
1037/* seek support */
1038
1039int av_find_default_stream_index(AVFormatContext *s)
1040{
1041 int i;
1042 AVStream *st;
1043
1044 if (s->nb_streams <= 0)
1045 return -1;
1046 for(i = 0; i < s->nb_streams; i++) {
1047 st = s->streams[i];
1048 if (st->codec->codec_type == CODEC_TYPE_VIDEO) {
1049 return i;
1050 }
1051 }
1052 return 0;
1053}
1054
1055/**
1056 * Flush the frame reader.
1057 */
1058static void av_read_frame_flush(AVFormatContext *s)
1059{
1060 AVStream *st;
1061 int i;
1062
1063 flush_packet_queue(s);
1064
1065 /* free previous packet */
1066 if (s->cur_st) {
1067 if (s->cur_st->parser)
1068 av_free_packet(&s->cur_pkt);
1069 s->cur_st = NULL;
1070 }
1071 /* fail safe */
1072 s->cur_ptr = NULL;
1073 s->cur_len = 0;
1074
1075 /* for each stream, reset read state */
1076 for(i = 0; i < s->nb_streams; i++) {
1077 st = s->streams[i];
1078
1079 if (st->parser) {
1080 av_parser_close(st->parser);
1081 st->parser = NULL;
1082 }
1083 st->last_IP_pts = AV_NOPTS_VALUE;
1084 st->cur_dts = 0; /* we set the current DTS to an unspecified origin */
1085 }
1086}
1087
1088/**
1089 * Updates cur_dts of all streams based on given timestamp and AVStream.
1090 *
1091 * Stream ref_st unchanged, others set cur_dts in their native timebase
1092 * only needed for timestamp wrapping or if (dts not set and pts!=dts)
1093 * @param timestamp new dts expressed in time_base of param ref_st
1094 * @param ref_st reference stream giving time_base of param timestamp
1095 */
1096static void av_update_cur_dts(AVFormatContext *s, AVStream *ref_st, int64_t timestamp){
1097 int i;
1098
1099 for(i = 0; i < s->nb_streams; i++) {
1100 AVStream *st = s->streams[i];
1101
1102 st->cur_dts = av_rescale(timestamp,
1103 st->time_base.den * (int64_t)ref_st->time_base.num,
1104 st->time_base.num * (int64_t)ref_st->time_base.den);
1105 }
1106}
1107
1108/**
1109 * Add a index entry into a sorted list updateing if it is already there.
1110 *
1111 * @param timestamp timestamp in the timebase of the given stream
1112 */
1113int av_add_index_entry(AVStream *st,
1114 int64_t pos, int64_t timestamp, int size, int distance, int flags)
1115{
1116 AVIndexEntry *entries, *ie;
1117 int index;
1118
1119 if((unsigned)st->nb_index_entries + 1 >= UINT_MAX / sizeof(AVIndexEntry))
1120 return -1;
1121
1122 entries = av_fast_realloc(st->index_entries,
1123 &st->index_entries_allocated_size,
1124 (st->nb_index_entries + 1) *
1125 sizeof(AVIndexEntry));
1126 if(!entries)
1127 return -1;
1128
1129 st->index_entries= entries;
1130
1131 index= av_index_search_timestamp(st, timestamp, AVSEEK_FLAG_ANY);
1132
1133 if(index<0){
1134 index= st->nb_index_entries++;
1135 ie= &entries[index];
1136 assert(index==0 || ie[-1].timestamp < timestamp);
1137 }else{
1138 ie= &entries[index];
1139 if(ie->timestamp != timestamp){
1140 if(ie->timestamp <= timestamp)
1141 return -1;
1142 memmove(entries + index + 1, entries + index, sizeof(AVIndexEntry)*(st->nb_index_entries - index));
1143 st->nb_index_entries++;
1144 }else if(ie->pos == pos && distance < ie->min_distance) //dont reduce the distance
1145 distance= ie->min_distance;
1146 }
1147
1148 ie->pos = pos;
1149 ie->timestamp = timestamp;
1150 ie->min_distance= distance;
1151 ie->size= size;
1152 ie->flags = flags;
1153
1154 return index;
1155}
1156
1157/**
1158 * build an index for raw streams using a parser.
1159 */
1160static void av_build_index_raw(AVFormatContext *s)
1161{
1162 AVPacket pkt1, *pkt = &pkt1;
1163 int ret;
1164 AVStream *st;
1165
1166 st = s->streams[0];
1167 av_read_frame_flush(s);
1168 url_fseek(&s->pb, s->data_offset, SEEK_SET);
1169
1170 for(;;) {
1171 ret = av_read_frame(s, pkt);
1172 if (ret < 0)
1173 break;
1174 if (pkt->stream_index == 0 && st->parser &&
1175 (pkt->flags & PKT_FLAG_KEY)) {
1176 av_add_index_entry(st, st->parser->frame_offset, pkt->dts,
1177 0, 0, AVINDEX_KEYFRAME);
1178 }
1179 av_free_packet(pkt);
1180 }
1181}
1182
1183/**
1184 * Returns TRUE if we deal with a raw stream.
1185 *
1186 * Raw codec data and parsing needed.
1187 */
1188static int is_raw_stream(AVFormatContext *s)
1189{
1190 AVStream *st;
1191
1192 if (s->nb_streams != 1)
1193 return 0;
1194 st = s->streams[0];
1195 if (!st->need_parsing)
1196 return 0;
1197 return 1;
1198}
1199
1200/**
1201 * Gets the index for a specific timestamp.
1202 * @param flags if AVSEEK_FLAG_BACKWARD then the returned index will correspond to
1203 * the timestamp which is <= the requested one, if backward is 0
1204 * then it will be >=
1205 * if AVSEEK_FLAG_ANY seek to any frame, only keyframes otherwise
1206 * @return < 0 if no such timestamp could be found
1207 */
1208int av_index_search_timestamp(AVStream *st, int64_t wanted_timestamp,
1209 int flags)
1210{
1211 AVIndexEntry *entries= st->index_entries;
1212 int nb_entries= st->nb_index_entries;
1213 int a, b, m;
1214 int64_t timestamp;
1215
1216 a = - 1;
1217 b = nb_entries;
1218
1219 while (b - a > 1) {
1220 m = (a + b) >> 1;
1221 timestamp = entries[m].timestamp;
1222 if(timestamp >= wanted_timestamp)
1223 b = m;
1224 if(timestamp <= wanted_timestamp)
1225 a = m;
1226 }
1227 m= (flags & AVSEEK_FLAG_BACKWARD) ? a : b;
1228
1229 if(!(flags & AVSEEK_FLAG_ANY)){
1230 while(m>=0 && m<nb_entries && !(entries[m].flags & AVINDEX_KEYFRAME)){
1231 m += (flags & AVSEEK_FLAG_BACKWARD) ? -1 : 1;
1232 }
1233 }
1234
1235 if(m == nb_entries)
1236 return -1;
1237 return m;
1238}
1239
1240#define DEBUG_SEEK
1241
1242/**
1243 * Does a binary search using av_index_search_timestamp() and AVCodec.read_timestamp().
1244 * this isnt supposed to be called directly by a user application, but by demuxers
1245 * @param target_ts target timestamp in the time base of the given stream
1246 * @param stream_index stream number
1247 */
1248int av_seek_frame_binary(AVFormatContext *s, int stream_index, int64_t target_ts, int flags){
1249 AVInputFormat *avif= s->iformat;
1250 int64_t pos_min, pos_max, pos, pos_limit;
1251 int64_t ts_min, ts_max, ts;
1252 int64_t start_pos, filesize;
1253 int index, no_change;
1254 AVStream *st;
1255
1256 if (stream_index < 0)
1257 return -1;
1258
1259#ifdef DEBUG_SEEK
1260 av_log(s, AV_LOG_DEBUG, "read_seek: %d %"PRId64"\n", stream_index, target_ts);
1261#endif
1262
1263 ts_max=
1264 ts_min= AV_NOPTS_VALUE;
1265 pos_limit= -1; //gcc falsely says it may be uninitalized
1266
1267 st= s->streams[stream_index];
1268 if(st->index_entries){
1269 AVIndexEntry *e;
1270
1271 index= av_index_search_timestamp(st, target_ts, flags | AVSEEK_FLAG_BACKWARD); //FIXME whole func must be checked for non keyframe entries in index case, especially read_timestamp()
1272 index= FFMAX(index, 0);
1273 e= &st->index_entries[index];
1274
1275 if(e->timestamp <= target_ts || e->pos == e->min_distance){
1276 pos_min= e->pos;
1277 ts_min= e->timestamp;
1278#ifdef DEBUG_SEEK
1279 av_log(s, AV_LOG_DEBUG, "using cached pos_min=0x%"PRIx64" dts_min=%"PRId64"\n",
1280 pos_min,ts_min);
1281#endif
1282 }else{
1283 assert(index==0);
1284 }
1285
1286 index= av_index_search_timestamp(st, target_ts, flags & ~AVSEEK_FLAG_BACKWARD);
1287 assert(index < st->nb_index_entries);
1288 if(index >= 0){
1289 e= &st->index_entries[index];
1290 assert(e->timestamp >= target_ts);
1291 pos_max= e->pos;
1292 ts_max= e->timestamp;
1293 pos_limit= pos_max - e->min_distance;
1294#ifdef DEBUG_SEEK
1295 av_log(s, AV_LOG_DEBUG, "using cached pos_max=0x%"PRIx64" pos_limit=0x%"PRIx64" dts_max=%"PRId64"\n",
1296 pos_max,pos_limit, ts_max);
1297#endif
1298 }
1299 }
1300
1301 if(ts_min == AV_NOPTS_VALUE){
1302 pos_min = s->data_offset;
1303 ts_min = avif->read_timestamp(s, stream_index, &pos_min, INT64_MAX);
1304 if (ts_min == AV_NOPTS_VALUE)
1305 return -1;
1306 }
1307
1308 if(ts_max == AV_NOPTS_VALUE){
1309 int step= 1024;
1310 filesize = url_fsize(&s->pb);
1311 pos_max = filesize - 1;
1312 do{
1313 pos_max -= step;
1314 ts_max = avif->read_timestamp(s, stream_index, &pos_max, pos_max + step);
1315 step += step;
1316 }while(ts_max == AV_NOPTS_VALUE && pos_max >= step);
1317 if (ts_max == AV_NOPTS_VALUE)
1318 return -1;
1319
1320 for(;;){
1321 int64_t tmp_pos= pos_max + 1;
1322 int64_t tmp_ts= avif->read_timestamp(s, stream_index, &tmp_pos, INT64_MAX);
1323 if(tmp_ts == AV_NOPTS_VALUE)
1324 break;
1325 ts_max= tmp_ts;
1326 pos_max= tmp_pos;
1327 if(tmp_pos >= filesize)
1328 break;
1329 }
1330 pos_limit= pos_max;
1331 }
1332
1333 if(ts_min > ts_max){
1334 return -1;
1335 }else if(ts_min == ts_max){
1336 pos_limit= pos_min;
1337 }
1338
1339 no_change=0;
1340 while (pos_min < pos_limit) {
1341#ifdef DEBUG_SEEK
1342 av_log(s, AV_LOG_DEBUG, "pos_min=0x%"PRIx64" pos_max=0x%"PRIx64" dts_min=%"PRId64" dts_max=%"PRId64"\n",
1343 pos_min, pos_max,
1344 ts_min, ts_max);
1345#endif
1346 assert(pos_limit <= pos_max);
1347
1348 if(no_change==0){
1349 int64_t approximate_keyframe_distance= pos_max - pos_limit;
1350 // interpolate position (better than dichotomy)
1351 pos = av_rescale(target_ts - ts_min, pos_max - pos_min, ts_max - ts_min)
1352 + pos_min - approximate_keyframe_distance;
1353 }else if(no_change==1){
1354 // bisection, if interpolation failed to change min or max pos last time
1355 pos = (pos_min + pos_limit)>>1;
1356 }else{
1357 // linear search if bisection failed, can only happen if there are very few or no keframes between min/max
1358 pos=pos_min;
1359 }
1360 if(pos <= pos_min)
1361 pos= pos_min + 1;
1362 else if(pos > pos_limit)
1363 pos= pos_limit;
1364 start_pos= pos;
1365
1366 ts = avif->read_timestamp(s, stream_index, &pos, INT64_MAX); //may pass pos_limit instead of -1
1367 if(pos == pos_max)
1368 no_change++;
1369 else
1370 no_change=0;
1371#ifdef DEBUG_SEEK
1372av_log(s, AV_LOG_DEBUG, "%"PRId64" %"PRId64" %"PRId64" / %"PRId64" %"PRId64" %"PRId64" target:%"PRId64" limit:%"PRId64" start:%"PRId64" noc:%d\n", pos_min, pos, pos_max, ts_min, ts, ts_max, target_ts, pos_limit, start_pos, no_change);
1373#endif
1374 assert(ts != AV_NOPTS_VALUE);
1375 if (target_ts <= ts) {
1376 pos_limit = start_pos - 1;
1377 pos_max = pos;
1378 ts_max = ts;
1379 }
1380 if (target_ts >= ts) {
1381 pos_min = pos;
1382 ts_min = ts;
1383 }
1384 }
1385
1386 pos = (flags & AVSEEK_FLAG_BACKWARD) ? pos_min : pos_max;
1387 ts = (flags & AVSEEK_FLAG_BACKWARD) ? ts_min : ts_max;
1388#ifdef DEBUG_SEEK
1389 pos_min = pos;
1390 ts_min = avif->read_timestamp(s, stream_index, &pos_min, INT64_MAX);
1391 pos_min++;
1392 ts_max = avif->read_timestamp(s, stream_index, &pos_min, INT64_MAX);
1393 av_log(s, AV_LOG_DEBUG, "pos=0x%"PRIx64" %"PRId64"<=%"PRId64"<=%"PRId64"\n",
1394 pos, ts_min, target_ts, ts_max);
1395#endif
1396 /* do the seek */
1397 url_fseek(&s->pb, pos, SEEK_SET);
1398
1399 av_update_cur_dts(s, st, ts);
1400
1401 return 0;
1402}
1403
1404static int av_seek_frame_byte(AVFormatContext *s, int stream_index, int64_t pos, int flags){
1405 int64_t pos_min, pos_max;
1406#if 0
1407 AVStream *st;
1408
1409 if (stream_index < 0)
1410 return -1;
1411
1412 st= s->streams[stream_index];
1413#endif
1414
1415 pos_min = s->data_offset;
1416 pos_max = url_fsize(&s->pb) - 1;
1417
1418 if (pos < pos_min) pos= pos_min;
1419 else if(pos > pos_max) pos= pos_max;
1420
1421 url_fseek(&s->pb, pos, SEEK_SET);
1422
1423#if 0
1424 av_update_cur_dts(s, st, ts);
1425#endif
1426 return 0;
1427}
1428
1429static int av_seek_frame_generic(AVFormatContext *s,
1430 int stream_index, int64_t timestamp, int flags)
1431{
1432 int index;
1433 AVStream *st;
1434 AVIndexEntry *ie;
1435
1436 if (!s->index_built) {
1437 if (is_raw_stream(s)) {
1438 av_build_index_raw(s);
1439 } else {
1440 return -1;
1441 }
1442 s->index_built = 1;
1443 }
1444
1445 st = s->streams[stream_index];
1446 index = av_index_search_timestamp(st, timestamp, flags);
1447 if (index < 0)
1448 return -1;
1449
1450 /* now we have found the index, we can seek */
1451 ie = &st->index_entries[index];
1452 av_read_frame_flush(s);
1453 url_fseek(&s->pb, ie->pos, SEEK_SET);
1454
1455 av_update_cur_dts(s, st, ie->timestamp);
1456
1457 return 0;
1458}
1459
1460/**
1461 * Seek to the key frame at timestamp.
1462 * 'timestamp' in 'stream_index'.
1463 * @param stream_index If stream_index is (-1), a default
1464 * stream is selected, and timestamp is automatically converted
1465 * from AV_TIME_BASE units to the stream specific time_base.
1466 * @param timestamp timestamp in AVStream.time_base units
1467 * or if there is no stream specified then in AV_TIME_BASE units
1468 * @param flags flags which select direction and seeking mode
1469 * @return >= 0 on success
1470 */
1471int av_seek_frame(AVFormatContext *s, int stream_index, int64_t timestamp, int flags)
1472{
1473 int ret;
1474 AVStream *st;
1475
1476 av_read_frame_flush(s);
1477
1478 if(flags & AVSEEK_FLAG_BYTE)
1479 return av_seek_frame_byte(s, stream_index, timestamp, flags);
1480
1481 if(stream_index < 0){
1482 stream_index= av_find_default_stream_index(s);
1483 if(stream_index < 0)
1484 return -1;
1485
1486 st= s->streams[stream_index];
1487 /* timestamp for default must be expressed in AV_TIME_BASE units */
1488 timestamp = av_rescale(timestamp, st->time_base.den, AV_TIME_BASE * (int64_t)st->time_base.num);
1489 }
1490 st= s->streams[stream_index];
1491
1492 /* first, we try the format specific seek */
1493 if (s->iformat->read_seek)
1494 ret = s->iformat->read_seek(s, stream_index, timestamp, flags);
1495 else
1496 ret = -1;
1497 if (ret >= 0) {
1498 return 0;
1499 }
1500
1501 if(s->iformat->read_timestamp)
1502 return av_seek_frame_binary(s, stream_index, timestamp, flags);
1503 else
1504 return av_seek_frame_generic(s, stream_index, timestamp, flags);
1505}
1506
1507/*******************************************************/
1508
1509/**
1510 * Returns TRUE if the stream has accurate timings in any stream.
1511 *
1512 * @return TRUE if the stream has accurate timings for at least one component.
1513 */
1514static int av_has_timings(AVFormatContext *ic)
1515{
1516 int i;
1517 AVStream *st;
1518
1519 for(i = 0;i < ic->nb_streams; i++) {
1520 st = ic->streams[i];
1521 if (st->start_time != AV_NOPTS_VALUE &&
1522 st->duration != AV_NOPTS_VALUE)
1523 return 1;
1524 }
1525 return 0;
1526}
1527
1528/**
1529 * Estimate the stream timings from the one of each components.
1530 *
1531 * Also computes the global bitrate if possible.
1532 */
1533static void av_update_stream_timings(AVFormatContext *ic)
1534{
1535 int64_t start_time, start_time1, end_time, end_time1;
1536 int i;
1537 AVStream *st;
1538
1539 start_time = MAXINT64;
1540 end_time = MININT64;
1541 for(i = 0;i < ic->nb_streams; i++) {
1542 st = ic->streams[i];
1543 if (st->start_time != AV_NOPTS_VALUE) {
1544 start_time1= av_rescale_q(st->start_time, st->time_base, AV_TIME_BASE_Q);
1545 if (start_time1 < start_time)
1546 start_time = start_time1;
1547 if (st->duration != AV_NOPTS_VALUE) {
1548 end_time1 = start_time1
1549 + av_rescale_q(st->duration, st->time_base, AV_TIME_BASE_Q);
1550 if (end_time1 > end_time)
1551 end_time = end_time1;
1552 }
1553 }
1554 }
1555 if (start_time != MAXINT64) {
1556 ic->start_time = start_time;
1557 if (end_time != MININT64) {
1558 ic->duration = end_time - start_time;
1559 if (ic->file_size > 0) {
1560 /* compute the bit rate */
1561 ic->bit_rate = (double)ic->file_size * 8.0 * AV_TIME_BASE /
1562 (double)ic->duration;
1563 }
1564 }
1565 }
1566
1567}
1568
1569static void fill_all_stream_timings(AVFormatContext *ic)
1570{
1571 int i;
1572 AVStream *st;
1573
1574 av_update_stream_timings(ic);
1575 for(i = 0;i < ic->nb_streams; i++) {
1576 st = ic->streams[i];
1577 if (st->start_time == AV_NOPTS_VALUE) {
1578 if(ic->start_time != AV_NOPTS_VALUE)
1579 st->start_time = av_rescale_q(ic->start_time, AV_TIME_BASE_Q, st->time_base);
1580 if(ic->duration != AV_NOPTS_VALUE)
1581 st->duration = av_rescale_q(ic->duration, AV_TIME_BASE_Q, st->time_base);
1582 }
1583 }
1584}
1585
1586static void av_estimate_timings_from_bit_rate(AVFormatContext *ic)
1587{
1588 int64_t filesize, duration;
1589 int bit_rate, i;
1590 AVStream *st;
1591
1592 /* if bit_rate is already set, we believe it */
1593 if (ic->bit_rate == 0) {
1594 bit_rate = 0;
1595 for(i=0;i<ic->nb_streams;i++) {
1596 st = ic->streams[i];
1597 bit_rate += st->codec->bit_rate;
1598 }
1599 ic->bit_rate = bit_rate;
1600 }
1601
1602 /* if duration is already set, we believe it */
1603 if (ic->duration == AV_NOPTS_VALUE &&
1604 ic->bit_rate != 0 &&
1605 ic->file_size != 0) {
1606 filesize = ic->file_size;
1607 if (filesize > 0) {
1608 for(i = 0; i < ic->nb_streams; i++) {
1609 st = ic->streams[i];
1610 duration= av_rescale(8*filesize, st->time_base.den, ic->bit_rate*(int64_t)st->time_base.num);
1611 if (st->start_time == AV_NOPTS_VALUE ||
1612 st->duration == AV_NOPTS_VALUE) {
1613 st->start_time = 0;
1614 st->duration = duration;
1615 }
1616 }
1617 }
1618 }
1619}
1620
1621#define DURATION_MAX_READ_SIZE 250000
1622
1623/* only usable for MPEG-PS streams */
1624static void av_estimate_timings_from_pts(AVFormatContext *ic)
1625{
1626 AVPacket pkt1, *pkt = &pkt1;
1627 AVStream *st;
1628 int read_size, i, ret;
1629 int64_t end_time;
1630 int64_t filesize, offset, duration;
1631
1632 /* free previous packet */
1633 if (ic->cur_st && ic->cur_st->parser)
1634 av_free_packet(&ic->cur_pkt);
1635 ic->cur_st = NULL;
1636
1637 /* flush packet queue */
1638 flush_packet_queue(ic);
1639
1640 for(i=0;i<ic->nb_streams;i++) {
1641 st = ic->streams[i];
1642 if (st->parser) {
1643 av_parser_close(st->parser);
1644 st->parser= NULL;
1645 }
1646 }
1647
1648 /* we read the first packets to get the first PTS (not fully
1649 accurate, but it is enough now) */
1650 url_fseek(&ic->pb, 0, SEEK_SET);
1651 read_size = 0;
1652 for(;;) {
1653 if (read_size >= DURATION_MAX_READ_SIZE)
1654 break;
1655 /* if all info is available, we can stop */
1656 for(i = 0;i < ic->nb_streams; i++) {
1657 st = ic->streams[i];
1658 if (st->start_time == AV_NOPTS_VALUE)
1659 break;
1660 }
1661 if (i == ic->nb_streams)
1662 break;
1663
1664 ret = av_read_packet(ic, pkt);
1665 if (ret != 0)
1666 break;
1667 read_size += pkt->size;
1668 st = ic->streams[pkt->stream_index];
1669 if (pkt->pts != AV_NOPTS_VALUE) {
1670 if (st->start_time == AV_NOPTS_VALUE)
1671 st->start_time = pkt->pts;
1672 }
1673 av_free_packet(pkt);
1674 }
1675
1676 /* estimate the end time (duration) */
1677 /* XXX: may need to support wrapping */
1678 filesize = ic->file_size;
1679 offset = filesize - DURATION_MAX_READ_SIZE;
1680 if (offset < 0)
1681 offset = 0;
1682
1683 url_fseek(&ic->pb, offset, SEEK_SET);
1684 read_size = 0;
1685 for(;;) {
1686 if (read_size >= DURATION_MAX_READ_SIZE)
1687 break;
1688 /* if all info is available, we can stop */
1689 for(i = 0;i < ic->nb_streams; i++) {
1690 st = ic->streams[i];
1691 if (st->duration == AV_NOPTS_VALUE)
1692 break;
1693 }
1694 if (i == ic->nb_streams)
1695 break;
1696
1697 ret = av_read_packet(ic, pkt);
1698 if (ret != 0)
1699 break;
1700 read_size += pkt->size;
1701 st = ic->streams[pkt->stream_index];
1702 if (pkt->pts != AV_NOPTS_VALUE) {
1703 end_time = pkt->pts;
1704 duration = end_time - st->start_time;
1705 if (duration > 0) {
1706 if (st->duration == AV_NOPTS_VALUE ||
1707 st->duration < duration)
1708 st->duration = duration;
1709 }
1710 }
1711 av_free_packet(pkt);
1712 }
1713
1714 fill_all_stream_timings(ic);
1715
1716 url_fseek(&ic->pb, 0, SEEK_SET);
1717}
1718
1719static void av_estimate_timings(AVFormatContext *ic)
1720{
1721 int64_t file_size;
1722
1723 /* get the file size, if possible */
1724 if (ic->iformat->flags & AVFMT_NOFILE) {
1725 file_size = 0;
1726 } else {
1727 file_size = url_fsize(&ic->pb);
1728 if (file_size < 0)
1729 file_size = 0;
1730 }
1731 ic->file_size = file_size;
1732
1733 if ((ic->iformat == &mpegps_demuxer || ic->iformat == &mpegts_demuxer) && file_size && !ic->pb.is_streamed) {
1734 /* get accurate estimate from the PTSes */
1735 av_estimate_timings_from_pts(ic);
1736 } else if (av_has_timings(ic)) {
1737 /* at least one components has timings - we use them for all
1738 the components */
1739 fill_all_stream_timings(ic);
1740 } else {
1741 /* less precise: use bit rate info */
1742 av_estimate_timings_from_bit_rate(ic);
1743 }
1744 av_update_stream_timings(ic);
1745
1746#if 0
1747 {
1748 int i;
1749 AVStream *st;
1750 for(i = 0;i < ic->nb_streams; i++) {
1751 st = ic->streams[i];
1752 printf("%d: start_time: %0.3f duration: %0.3f\n",
1753 i, (double)st->start_time / AV_TIME_BASE,
1754 (double)st->duration / AV_TIME_BASE);
1755 }
1756 printf("stream: start_time: %0.3f duration: %0.3f bitrate=%d kb/s\n",
1757 (double)ic->start_time / AV_TIME_BASE,
1758 (double)ic->duration / AV_TIME_BASE,
1759 ic->bit_rate / 1000);
1760 }
1761#endif
1762}
1763
1764static int has_codec_parameters(AVCodecContext *enc)
1765{
1766 int val;
1767 switch(enc->codec_type) {
1768 case CODEC_TYPE_AUDIO:
1769 val = enc->sample_rate;
1770 break;
1771 case CODEC_TYPE_VIDEO:
1772 val = enc->width && enc->pix_fmt != PIX_FMT_NONE;
1773 break;
1774 default:
1775 val = 1;
1776 break;
1777 }
1778 return (val != 0);
1779}
1780
1781static int try_decode_frame(AVStream *st, const uint8_t *data, int size)
1782{
1783 int16_t *samples;
1784 AVCodec *codec;
1785 int got_picture, ret=0;
1786 AVFrame picture;
1787
1788 if(!st->codec->codec){
1789 codec = avcodec_find_decoder(st->codec->codec_id);
1790 if (!codec)
1791 return -1;
1792 ret = avcodec_open(st->codec, codec);
1793 if (ret < 0)
1794 return ret;
1795 }
1796
1797 if(!has_codec_parameters(st->codec)){
1798 switch(st->codec->codec_type) {
1799 case CODEC_TYPE_VIDEO:
1800 ret = avcodec_decode_video(st->codec, &picture,
1801 &got_picture, (uint8_t *)data, size);
1802 break;
1803 case CODEC_TYPE_AUDIO:
1804 samples = av_malloc(AVCODEC_MAX_AUDIO_FRAME_SIZE);
1805 if (!samples)
1806 goto fail;
1807 ret = avcodec_decode_audio(st->codec, samples,
1808 &got_picture, (uint8_t *)data, size);
1809 av_free(samples);
1810 break;
1811 default:
1812 break;
1813 }
1814 }
1815 fail:
1816 return ret;
1817}
1818
1819/* absolute maximum size we read until we abort */
1820#define MAX_READ_SIZE 5000000
1821
1822/* maximum duration until we stop analysing the stream */
1823#define MAX_STREAM_DURATION ((int)(AV_TIME_BASE * 3.0))
1824
1825/**
1826 * Read the beginning of a media file to get stream information. This
1827 * is useful for file formats with no headers such as MPEG. This
1828 * function also compute the real frame rate in case of mpeg2 repeat
1829 * frame mode.
1830 *
1831 * @param ic media file handle
1832 * @return >=0 if OK. AVERROR_xxx if error.
1833 * @todo let user decide somehow what information is needed so we dont waste time geting stuff the user doesnt need
1834 */
1835int av_find_stream_info(AVFormatContext *ic)
1836{
1837 int i, count, ret, read_size, j;
1838 AVStream *st;
1839 AVPacket pkt1, *pkt;
1840 AVPacketList *pktl=NULL, **ppktl;
1841 int64_t last_dts[MAX_STREAMS];
1842 int64_t duration_sum[MAX_STREAMS];
1843 int duration_count[MAX_STREAMS]={0};
1844
1845 for(i=0;i<ic->nb_streams;i++) {
1846 st = ic->streams[i];
1847 if(st->codec->codec_type == CODEC_TYPE_VIDEO){
1848/* if(!st->time_base.num)
1849 st->time_base= */
1850 if(!st->codec->time_base.num)
1851 st->codec->time_base= st->time_base;
1852 }
1853 //only for the split stuff
1854 if (!st->parser) {
1855 st->parser = av_parser_init(st->codec->codec_id);
1856 if(st->need_parsing == 2 && st->parser){
1857 st->parser->flags |= PARSER_FLAG_COMPLETE_FRAMES;
1858 }
1859 }
1860 }
1861
1862 for(i=0;i<MAX_STREAMS;i++){
1863 last_dts[i]= AV_NOPTS_VALUE;
1864 duration_sum[i]= INT64_MAX;
1865 }
1866
1867 count = 0;
1868 read_size = 0;
1869 ppktl = &ic->packet_buffer;
1870 for(;;) {
1871 /* check if one codec still needs to be handled */
1872 for(i=0;i<ic->nb_streams;i++) {
1873 st = ic->streams[i];
1874 if (!has_codec_parameters(st->codec))
1875 break;
1876 /* variable fps and no guess at the real fps */
1877 if( st->codec->time_base.den >= 101LL*st->codec->time_base.num
1878 && duration_count[i]<20 && st->codec->codec_type == CODEC_TYPE_VIDEO)
1879 break;
1880 if(st->parser && st->parser->parser->split && !st->codec->extradata)
1881 break;
1882 }
1883 if (i == ic->nb_streams) {
1884 /* NOTE: if the format has no header, then we need to read
1885 some packets to get most of the streams, so we cannot
1886 stop here */
1887 if (!(ic->ctx_flags & AVFMTCTX_NOHEADER)) {
1888 /* if we found the info for all the codecs, we can stop */
1889 ret = count;
1890 break;
1891 }
1892 } else {
1893 /* we did not get all the codec info, but we read too much data */
1894 if (read_size >= MAX_READ_SIZE) {
1895 ret = count;
1896 break;
1897 }
1898 }
1899
1900 /* NOTE: a new stream can be added there if no header in file
1901 (AVFMTCTX_NOHEADER) */
1902 ret = av_read_frame_internal(ic, &pkt1);
1903 if (ret < 0) {
1904 /* EOF or error */
1905 ret = -1; /* we could not have all the codec parameters before EOF */
1906 for(i=0;i<ic->nb_streams;i++) {
1907 st = ic->streams[i];
1908 if (!has_codec_parameters(st->codec)){
1909 char buf[256];
1910 avcodec_string(buf, sizeof(buf), st->codec, 0);
1911 av_log(ic, AV_LOG_INFO, "Could not find codec parameters (%s)\n", buf);
1912 } else {
1913 ret = 0;
1914 }
1915 }
1916 break;
1917 }
1918
1919 pktl = av_mallocz(sizeof(AVPacketList));
1920 if (!pktl) {
1921 ret = AVERROR_NOMEM;
1922 break;
1923 }
1924
1925 /* add the packet in the buffered packet list */
1926 *ppktl = pktl;
1927 ppktl = &pktl->next;
1928
1929 pkt = &pktl->pkt;
1930 *pkt = pkt1;
1931
1932 /* duplicate the packet */
1933 if (av_dup_packet(pkt) < 0) {
1934 ret = AVERROR_NOMEM;
1935 break;
1936 }
1937
1938 read_size += pkt->size;
1939
1940 st = ic->streams[pkt->stream_index];
1941 st->codec_info_duration += pkt->duration;
1942 if (pkt->duration != 0)
1943 st->codec_info_nb_frames++;
1944
1945 {
1946 int index= pkt->stream_index;
1947 int64_t last= last_dts[index];
1948 int64_t duration= pkt->dts - last;
1949
1950 if(pkt->dts != AV_NOPTS_VALUE && last != AV_NOPTS_VALUE && duration>0){
1951 if(duration*duration_count[index]*10/9 < duration_sum[index]){
1952 duration_sum[index]= duration;
1953 duration_count[index]=1;
1954 }else{
1955 int factor= av_rescale(duration, duration_count[index], duration_sum[index]);
1956 duration_sum[index] += duration;
1957 duration_count[index]+= factor;
1958 }
1959 if(st->codec_info_nb_frames == 0 && 0)
1960 st->codec_info_duration += duration;
1961 }
1962 last_dts[pkt->stream_index]= pkt->dts;
1963 }
1964 if(st->parser && st->parser->parser->split && !st->codec->extradata){
1965 int i= st->parser->parser->split(st->codec, pkt->data, pkt->size);
1966 if(i){
1967 st->codec->extradata_size= i;
1968 st->codec->extradata= av_malloc(st->codec->extradata_size + FF_INPUT_BUFFER_PADDING_SIZE);
1969 memcpy(st->codec->extradata, pkt->data, st->codec->extradata_size);
1970 memset(st->codec->extradata + i, 0, FF_INPUT_BUFFER_PADDING_SIZE);
1971 }
1972 }
1973
1974 /* if still no information, we try to open the codec and to
1975 decompress the frame. We try to avoid that in most cases as
1976 it takes longer and uses more memory. For MPEG4, we need to
1977 decompress for Quicktime. */
1978 if (!has_codec_parameters(st->codec) /*&&
1979 (st->codec->codec_id == CODEC_ID_FLV1 ||
1980 st->codec->codec_id == CODEC_ID_H264 ||
1981 st->codec->codec_id == CODEC_ID_H263 ||
1982 st->codec->codec_id == CODEC_ID_H261 ||
1983 st->codec->codec_id == CODEC_ID_VORBIS ||
1984 st->codec->codec_id == CODEC_ID_MJPEG ||
1985 st->codec->codec_id == CODEC_ID_PNG ||
1986 st->codec->codec_id == CODEC_ID_PAM ||
1987 st->codec->codec_id == CODEC_ID_PGM ||
1988 st->codec->codec_id == CODEC_ID_PGMYUV ||
1989 st->codec->codec_id == CODEC_ID_PBM ||
1990 st->codec->codec_id == CODEC_ID_PPM ||
1991 st->codec->codec_id == CODEC_ID_SHORTEN ||
1992 (st->codec->codec_id == CODEC_ID_MPEG4 && !st->need_parsing))*/)
1993 try_decode_frame(st, pkt->data, pkt->size);
1994
1995 if (av_rescale_q(st->codec_info_duration, st->time_base, AV_TIME_BASE_Q) >= MAX_STREAM_DURATION) {
1996 break;
1997 }
1998 count++;
1999 }
2000
2001 // close codecs which where opened in try_decode_frame()
2002 for(i=0;i<ic->nb_streams;i++) {
2003 st = ic->streams[i];
2004 if(st->codec->codec)
2005 avcodec_close(st->codec);
2006 }
2007 for(i=0;i<ic->nb_streams;i++) {
2008 st = ic->streams[i];
2009 if (st->codec->codec_type == CODEC_TYPE_VIDEO) {
2010 if(st->codec->codec_id == CODEC_ID_RAWVIDEO && !st->codec->codec_tag && !st->codec->bits_per_sample)
2011 st->codec->codec_tag= avcodec_pix_fmt_to_codec_tag(st->codec->pix_fmt);
2012
2013 if(duration_count[i] && st->codec->time_base.num*101LL <= st->codec->time_base.den &&
2014 st->time_base.num*duration_sum[i]/duration_count[i]*101LL > st->time_base.den){
2015 int64_t num, den, error, best_error;
2016
2017 num= st->time_base.den*duration_count[i];
2018 den= st->time_base.num*duration_sum[i];
2019
2020 best_error= INT64_MAX;
2021 for(j=1; j<60*12; j++){
2022 error= ABS(1001*12*num - 1001*j*den);
2023 if(error < best_error){
2024 best_error= error;
2025 av_reduce(&st->r_frame_rate.num, &st->r_frame_rate.den, j, 12, INT_MAX);
2026 }
2027 }
2028 for(j=24; j<=30; j+=6){
2029 error= ABS(1001*12*num - 1000*12*j*den);
2030 if(error < best_error){
2031 best_error= error;
2032 av_reduce(&st->r_frame_rate.num, &st->r_frame_rate.den, j*1000, 1001, INT_MAX);
2033 }
2034 }
2035 }
2036
2037 /* set real frame rate info */
2038 /* compute the real frame rate for telecine */
2039 if ((st->codec->codec_id == CODEC_ID_MPEG1VIDEO ||
2040 st->codec->codec_id == CODEC_ID_MPEG2VIDEO) &&
2041 st->codec->sub_id == 2) {
2042 if (st->codec_info_nb_frames >= 20) {
2043 float coded_frame_rate, est_frame_rate;
2044 est_frame_rate = ((double)st->codec_info_nb_frames * AV_TIME_BASE) /
2045 (double)st->codec_info_duration ;
2046 coded_frame_rate = 1.0/av_q2d(st->codec->time_base);
2047#if 0
2048 printf("telecine: coded_frame_rate=%0.3f est_frame_rate=%0.3f\n",
2049 coded_frame_rate, est_frame_rate);
2050#endif
2051 /* if we detect that it could be a telecine, we
2052 signal it. It would be better to do it at a
2053 higher level as it can change in a film */
2054 if (coded_frame_rate >= 24.97 &&
2055 (est_frame_rate >= 23.5 && est_frame_rate < 24.5)) {
2056 st->r_frame_rate = (AVRational){24000, 1001};
2057 }
2058 }
2059 }
2060 /* if no real frame rate, use the codec one */
2061 if (!st->r_frame_rate.num){
2062 st->r_frame_rate.num = st->codec->time_base.den;
2063 st->r_frame_rate.den = st->codec->time_base.num;
2064 }
2065 }
2066 }
2067
2068 av_estimate_timings(ic);
2069#if 0
2070 /* correct DTS for b frame streams with no timestamps */
2071 for(i=0;i<ic->nb_streams;i++) {
2072 st = ic->streams[i];
2073 if (st->codec->codec_type == CODEC_TYPE_VIDEO) {
2074 if(b-frames){
2075 ppktl = &ic->packet_buffer;
2076 while(ppkt1){
2077 if(ppkt1->stream_index != i)
2078 continue;
2079 if(ppkt1->pkt->dts < 0)
2080 break;
2081 if(ppkt1->pkt->pts != AV_NOPTS_VALUE)
2082 break;
2083 ppkt1->pkt->dts -= delta;
2084 ppkt1= ppkt1->next;
2085 }
2086 if(ppkt1)
2087 continue;
2088 st->cur_dts -= delta;
2089 }
2090 }
2091 }
2092#endif
2093 return ret;
2094}
2095
2096/*******************************************************/
2097
2098/**
2099 * start playing a network based stream (e.g. RTSP stream) at the
2100 * current position
2101 */
2102int av_read_play(AVFormatContext *s)
2103{
2104 if (!s->iformat->read_play)
2105 return AVERROR_NOTSUPP;
2106 return s->iformat->read_play(s);
2107}
2108
2109/**
2110 * Pause a network based stream (e.g. RTSP stream).
2111 *
2112 * Use av_read_play() to resume it.
2113 */
2114int av_read_pause(AVFormatContext *s)
2115{
2116 if (!s->iformat->read_pause)
2117 return AVERROR_NOTSUPP;
2118 return s->iformat->read_pause(s);
2119}
2120
2121/**
2122 * Close a media file (but not its codecs).
2123 *
2124 * @param s media file handle
2125 */
2126void av_close_input_file(AVFormatContext *s)
2127{
2128 int i, must_open_file;
2129 AVStream *st;
2130
2131 /* free previous packet */
2132 if (s->cur_st && s->cur_st->parser)
2133 av_free_packet(&s->cur_pkt);
2134
2135 if (s->iformat->read_close)
2136 s->iformat->read_close(s);
2137 for(i=0;i<s->nb_streams;i++) {
2138 /* free all data in a stream component */
2139 st = s->streams[i];
2140 if (st->parser) {
2141 av_parser_close(st->parser);
2142 }
2143 av_free(st->index_entries);
2144 av_free(st->codec->extradata);
2145 av_free(st->codec);
2146 av_free(st);
2147 }
2148 flush_packet_queue(s);
2149 must_open_file = 1;
2150 if (s->iformat->flags & AVFMT_NOFILE) {
2151 must_open_file = 0;
2152 }
2153 if (must_open_file) {
2154 url_fclose(&s->pb);
2155 }
2156 av_freep(&s->priv_data);
2157 av_free(s);
2158}
2159
2160/**
2161 * Add a new stream to a media file.
2162 *
2163 * Can only be called in the read_header() function. If the flag
2164 * AVFMTCTX_NOHEADER is in the format context, then new streams
2165 * can be added in read_packet too.
2166 *
2167 * @param s media file handle
2168 * @param id file format dependent stream id
2169 */
2170AVStream *av_new_stream(AVFormatContext *s, int id)
2171{
2172 AVStream *st;
2173
2174 if (s->nb_streams >= MAX_STREAMS)
2175 return NULL;
2176
2177 st = av_mallocz(sizeof(AVStream));
2178 if (!st)
2179 return NULL;
2180
2181 st->codec= avcodec_alloc_context();
2182 if (s->iformat) {
2183 /* no default bitrate if decoding */
2184 st->codec->bit_rate = 0;
2185 }
2186 st->index = s->nb_streams;
2187 st->id = id;
2188 st->start_time = AV_NOPTS_VALUE;
2189 st->duration = AV_NOPTS_VALUE;
2190 st->cur_dts = AV_NOPTS_VALUE;
2191
2192 /* default pts settings is MPEG like */
2193 av_set_pts_info(st, 33, 1, 90000);
2194 st->last_IP_pts = AV_NOPTS_VALUE;
2195
2196 s->streams[s->nb_streams++] = st;
2197 return st;
2198}
2199
2200/************************************************************/
2201/* output media file */
2202
2203int av_set_parameters(AVFormatContext *s, AVFormatParameters *ap)
2204{
2205 int ret;
2206
2207 if (s->oformat->priv_data_size > 0) {
2208 s->priv_data = av_mallocz(s->oformat->priv_data_size);
2209 if (!s->priv_data)
2210 return AVERROR_NOMEM;
2211 } else
2212 s->priv_data = NULL;
2213
2214 if (s->oformat->set_parameters) {
2215 ret = s->oformat->set_parameters(s, ap);
2216 if (ret < 0)
2217 return ret;
2218 }
2219 return 0;
2220}
2221
2222/**
2223 * allocate the stream private data and write the stream header to an
2224 * output media file
2225 *
2226 * @param s media file handle
2227 * @return 0 if OK. AVERROR_xxx if error.
2228 */
2229int av_write_header(AVFormatContext *s)
2230{
2231 int ret, i;
2232 AVStream *st;
2233
2234 // some sanity checks
2235 for(i=0;i<s->nb_streams;i++) {
2236 st = s->streams[i];
2237
2238 switch (st->codec->codec_type) {
2239 case CODEC_TYPE_AUDIO:
2240 if(st->codec->sample_rate<=0){
2241 av_log(s, AV_LOG_ERROR, "sample rate not set\n");
2242 return -1;
2243 }
2244 break;
2245 case CODEC_TYPE_VIDEO:
2246 if(st->codec->time_base.num<=0 || st->codec->time_base.den<=0){ //FIXME audio too?
2247 av_log(s, AV_LOG_ERROR, "time base not set\n");
2248 return -1;
2249 }
2250 if(st->codec->width<=0 || st->codec->height<=0){
2251 av_log(s, AV_LOG_ERROR, "dimensions not set\n");
2252 return -1;
2253 }
2254 break;
2255 }
2256 }
2257
2258 if(s->oformat->write_header){
2259 ret = s->oformat->write_header(s);
2260 if (ret < 0)
2261 return ret;
2262 }
2263
2264 /* init PTS generation */
2265 for(i=0;i<s->nb_streams;i++) {
2266 int64_t den = AV_NOPTS_VALUE;
2267 st = s->streams[i];
2268
2269 switch (st->codec->codec_type) {
2270 case CODEC_TYPE_AUDIO:
2271 den = (int64_t)st->time_base.num * st->codec->sample_rate;
2272 break;
2273 case CODEC_TYPE_VIDEO:
2274 den = (int64_t)st->time_base.num * st->codec->time_base.den;
2275 break;
2276 default:
2277 break;
2278 }
2279 if (den != AV_NOPTS_VALUE) {
2280 if (den <= 0)
2281 return AVERROR_INVALIDDATA;
2282 av_frac_init(&st->pts, 0, 0, den);
2283 }
2284 }
2285 return 0;
2286}
2287
2288//FIXME merge with compute_pkt_fields
2289static int compute_pkt_fields2(AVStream *st, AVPacket *pkt){
2290 int b_frames = FFMAX(st->codec->has_b_frames, st->codec->max_b_frames);
2291 int num, den, frame_size;
2292
2293// av_log(NULL, AV_LOG_DEBUG, "av_write_frame: pts:%lld dts:%lld cur_dts:%lld b:%d size:%d st:%d\n", pkt->pts, pkt->dts, st->cur_dts, b_frames, pkt->size, pkt->stream_index);
2294
2295/* if(pkt->pts == AV_NOPTS_VALUE && pkt->dts == AV_NOPTS_VALUE)
2296 return -1;*/
2297
2298 /* duration field */
2299 if (pkt->duration == 0) {
2300 compute_frame_duration(&num, &den, st, NULL, pkt);
2301 if (den && num) {
2302 pkt->duration = av_rescale(1, num * (int64_t)st->time_base.den, den * (int64_t)st->time_base.num);
2303 }
2304 }
2305
2306 //XXX/FIXME this is a temporary hack until all encoders output pts
2307 if((pkt->pts == 0 || pkt->pts == AV_NOPTS_VALUE) && pkt->dts == AV_NOPTS_VALUE && !b_frames){
2308 pkt->dts=
2309// pkt->pts= st->cur_dts;
2310 pkt->pts= st->pts.val;
2311 }
2312
2313 //calculate dts from pts
2314 if(pkt->pts != AV_NOPTS_VALUE && pkt->dts == AV_NOPTS_VALUE){
2315 if(b_frames){
2316 if(st->last_IP_pts == AV_NOPTS_VALUE){
2317 st->last_IP_pts= -pkt->duration;
2318 }
2319 if(st->last_IP_pts < pkt->pts){
2320 pkt->dts= st->last_IP_pts;
2321 st->last_IP_pts= pkt->pts;
2322 }else
2323 pkt->dts= pkt->pts;
2324 }else
2325 pkt->dts= pkt->pts;
2326 }
2327
2328 if(st->cur_dts && st->cur_dts != AV_NOPTS_VALUE && st->cur_dts >= pkt->dts){
2329 av_log(NULL, AV_LOG_ERROR, "error, non monotone timestamps %"PRId64" >= %"PRId64"\n", st->cur_dts, pkt->dts);
2330 return -1;
2331 }
2332 if(pkt->dts != AV_NOPTS_VALUE && pkt->pts != AV_NOPTS_VALUE && pkt->pts < pkt->dts){
2333 av_log(NULL, AV_LOG_ERROR, "error, pts < dts\n");
2334 return -1;
2335 }
2336
2337// av_log(NULL, AV_LOG_DEBUG, "av_write_frame: pts2:%lld dts2:%lld\n", pkt->pts, pkt->dts);
2338 st->cur_dts= pkt->dts;
2339 st->pts.val= pkt->dts;
2340
2341 /* update pts */
2342 switch (st->codec->codec_type) {
2343 case CODEC_TYPE_AUDIO:
2344 frame_size = get_audio_frame_size(st->codec, pkt->size);
2345
2346 /* HACK/FIXME, we skip the initial 0-size packets as they are most likely equal to the encoder delay,
2347 but it would be better if we had the real timestamps from the encoder */
2348 if (frame_size >= 0 && (pkt->size || st->pts.num!=st->pts.den>>1 || st->pts.val)) {
2349 av_frac_add(&st->pts, (int64_t)st->time_base.den * frame_size);
2350 }
2351 break;
2352 case CODEC_TYPE_VIDEO:
2353 av_frac_add(&st->pts, (int64_t)st->time_base.den * st->codec->time_base.num);
2354 break;
2355 default:
2356 break;
2357 }
2358 return 0;
2359}
2360
2361static void truncate_ts(AVStream *st, AVPacket *pkt){
2362 int64_t pts_mask = (2LL << (st->pts_wrap_bits-1)) - 1;
2363
2364// if(pkt->dts < 0)
2365// pkt->dts= 0; //this happens for low_delay=0 and b frames, FIXME, needs further invstigation about what we should do here
2366
2367 pkt->pts &= pts_mask;
2368 pkt->dts &= pts_mask;
2369}
2370
2371/**
2372 * Write a packet to an output media file.
2373 *
2374 * The packet shall contain one audio or video frame.
2375 *
2376 * @param s media file handle
2377 * @param pkt the packet, which contains the stream_index, buf/buf_size, dts/pts, ...
2378 * @return < 0 if error, = 0 if OK, 1 if end of stream wanted.
2379 */
2380int av_write_frame(AVFormatContext *s, AVPacket *pkt)
2381{
2382 int ret;
2383
2384 ret=compute_pkt_fields2(s->streams[pkt->stream_index], pkt);
2385 if(ret<0)
2386 return ret;
2387
2388 truncate_ts(s->streams[pkt->stream_index], pkt);
2389
2390 ret= s->oformat->write_packet(s, pkt);
2391 if(!ret)
2392 ret= url_ferror(&s->pb);
2393 return ret;
2394}
2395
2396/**
2397 * interleave_packet implementation which will interleave per DTS.
2398 * packets with pkt->destruct == av_destruct_packet will be freed inside this function.
2399 * so they cannot be used after it, note calling av_free_packet() on them is still safe
2400 */
2401static int av_interleave_packet_per_dts(AVFormatContext *s, AVPacket *out, AVPacket *pkt, int flush){
2402 AVPacketList *pktl, **next_point, *this_pktl;
2403 int stream_count=0;
2404 int streams[MAX_STREAMS];
2405
2406 if(pkt){
2407 AVStream *st= s->streams[ pkt->stream_index];
2408
2409// assert(pkt->destruct != av_destruct_packet); //FIXME
2410
2411 this_pktl = av_mallocz(sizeof(AVPacketList));
2412 this_pktl->pkt= *pkt;
2413 if(pkt->destruct == av_destruct_packet)
2414 pkt->destruct= NULL; // non shared -> must keep original from being freed
2415 else
2416 av_dup_packet(&this_pktl->pkt); //shared -> must dup
2417
2418 next_point = &s->packet_buffer;
2419 while(*next_point){
2420 AVStream *st2= s->streams[ (*next_point)->pkt.stream_index];
2421 int64_t left= st2->time_base.num * (int64_t)st ->time_base.den;
2422 int64_t right= st ->time_base.num * (int64_t)st2->time_base.den;
2423 if((*next_point)->pkt.dts * left > pkt->dts * right) //FIXME this can overflow
2424 break;
2425 next_point= &(*next_point)->next;
2426 }
2427 this_pktl->next= *next_point;
2428 *next_point= this_pktl;
2429 }
2430
2431 memset(streams, 0, sizeof(streams));
2432 pktl= s->packet_buffer;
2433 while(pktl){
2434//av_log(s, AV_LOG_DEBUG, "show st:%d dts:%lld\n", pktl->pkt.stream_index, pktl->pkt.dts);
2435 if(streams[ pktl->pkt.stream_index ] == 0)
2436 stream_count++;
2437 streams[ pktl->pkt.stream_index ]++;
2438 pktl= pktl->next;
2439 }
2440
2441 if(s->nb_streams == stream_count || (flush && stream_count)){
2442 pktl= s->packet_buffer;
2443 *out= pktl->pkt;
2444
2445 s->packet_buffer= pktl->next;
2446 av_freep(&pktl);
2447 return 1;
2448 }else{
2449 av_init_packet(out);
2450 return 0;
2451 }
2452}
2453
2454/**
2455 * Interleaves a AVPacket correctly so it can be muxed.
2456 * @param out the interleaved packet will be output here
2457 * @param in the input packet
2458 * @param flush 1 if no further packets are available as input and all
2459 * remaining packets should be output
2460 * @return 1 if a packet was output, 0 if no packet could be output,
2461 * < 0 if an error occured
2462 */
2463static int av_interleave_packet(AVFormatContext *s, AVPacket *out, AVPacket *in, int flush){
2464 if(s->oformat->interleave_packet)
2465 return s->oformat->interleave_packet(s, out, in, flush);
2466 else
2467 return av_interleave_packet_per_dts(s, out, in, flush);
2468}
2469
2470/**
2471 * Writes a packet to an output media file ensuring correct interleaving.
2472 *
2473 * The packet must contain one audio or video frame.
2474 * If the packets are already correctly interleaved the application should
2475 * call av_write_frame() instead as its slightly faster, its also important
2476 * to keep in mind that completly non interleaved input will need huge amounts
2477 * of memory to interleave with this, so its prefereable to interleave at the
2478 * demuxer level
2479 *
2480 * @param s media file handle
2481 * @param pkt the packet, which contains the stream_index, buf/buf_size, dts/pts, ...
2482 * @return < 0 if error, = 0 if OK, 1 if end of stream wanted.
2483 */
2484int av_interleaved_write_frame(AVFormatContext *s, AVPacket *pkt){
2485 AVStream *st= s->streams[ pkt->stream_index];
2486
2487 //FIXME/XXX/HACK drop zero sized packets
2488 if(st->codec->codec_type == CODEC_TYPE_AUDIO && pkt->size==0)
2489 return 0;
2490
2491//av_log(NULL, AV_LOG_DEBUG, "av_interleaved_write_frame %d %Ld %Ld\n", pkt->size, pkt->dts, pkt->pts);
2492 if(compute_pkt_fields2(st, pkt) < 0)
2493 return -1;
2494
2495 if(pkt->dts == AV_NOPTS_VALUE)
2496 return -1;
2497
2498 for(;;){
2499 AVPacket opkt;
2500 int ret= av_interleave_packet(s, &opkt, pkt, 0);
2501 if(ret<=0) //FIXME cleanup needed for ret<0 ?
2502 return ret;
2503
2504 truncate_ts(s->streams[opkt.stream_index], &opkt);
2505 ret= s->oformat->write_packet(s, &opkt);
2506
2507 av_free_packet(&opkt);
2508 pkt= NULL;
2509
2510 if(ret<0)
2511 return ret;
2512 if(url_ferror(&s->pb))
2513 return url_ferror(&s->pb);
2514 }
2515}
2516
2517/**
2518 * @brief Write the stream trailer to an output media file and
2519 * free the file private data.
2520 *
2521 * @param s media file handle
2522 * @return 0 if OK. AVERROR_xxx if error.
2523 */
2524int av_write_trailer(AVFormatContext *s)
2525{
2526 int ret, i;
2527
2528 for(;;){
2529 AVPacket pkt;
2530 ret= av_interleave_packet(s, &pkt, NULL, 1);
2531 if(ret<0) //FIXME cleanup needed for ret<0 ?
2532 goto fail;
2533 if(!ret)
2534 break;
2535
2536 truncate_ts(s->streams[pkt.stream_index], &pkt);
2537 ret= s->oformat->write_packet(s, &pkt);
2538
2539 av_free_packet(&pkt);
2540
2541 if(ret<0)
2542 goto fail;
2543 if(url_ferror(&s->pb))
2544 goto fail;
2545 }
2546
2547 if(s->oformat->write_trailer)
2548 ret = s->oformat->write_trailer(s);
2549fail:
2550 if(ret == 0)
2551 ret=url_ferror(&s->pb);
2552 for(i=0;i<s->nb_streams;i++)
2553 av_freep(&s->streams[i]->priv_data);
2554 av_freep(&s->priv_data);
2555 return ret;
2556}
2557
2558/* "user interface" functions */
2559
2560void dump_format(AVFormatContext *ic,
2561 int index,
2562 const char *url,
2563 int is_output)
2564{
2565 int i, flags;
2566 char buf[256];
2567
2568 av_log(NULL, AV_LOG_INFO, "%s #%d, %s, %s '%s':\n",
2569 is_output ? "Output" : "Input",
2570 index,
2571 is_output ? ic->oformat->name : ic->iformat->name,
2572 is_output ? "to" : "from", url);
2573 if (!is_output) {
2574 av_log(NULL, AV_LOG_INFO, " Duration: ");
2575 if (ic->duration != AV_NOPTS_VALUE) {
2576 int hours, mins, secs, us;
2577 secs = ic->duration / AV_TIME_BASE;
2578 us = ic->duration % AV_TIME_BASE;
2579 mins = secs / 60;
2580 secs %= 60;
2581 hours = mins / 60;
2582 mins %= 60;
2583 av_log(NULL, AV_LOG_INFO, "%02d:%02d:%02d.%01d", hours, mins, secs,
2584 (10 * us) / AV_TIME_BASE);
2585 } else {
2586 av_log(NULL, AV_LOG_INFO, "N/A");
2587 }
2588 if (ic->start_time != AV_NOPTS_VALUE) {
2589 int secs, us;
2590 av_log(NULL, AV_LOG_INFO, ", start: ");
2591 secs = ic->start_time / AV_TIME_BASE;
2592 us = ic->start_time % AV_TIME_BASE;
2593 av_log(NULL, AV_LOG_INFO, "%d.%06d",
2594 secs, (int)av_rescale(us, 1000000, AV_TIME_BASE));
2595 }
2596 av_log(NULL, AV_LOG_INFO, ", bitrate: ");
2597 if (ic->bit_rate) {
2598 av_log(NULL, AV_LOG_INFO,"%d kb/s", ic->bit_rate / 1000);
2599 } else {
2600 av_log(NULL, AV_LOG_INFO, "N/A");
2601 }
2602 av_log(NULL, AV_LOG_INFO, "\n");
2603 }
2604 for(i=0;i<ic->nb_streams;i++) {
2605 AVStream *st = ic->streams[i];
2606 int g= ff_gcd(st->time_base.num, st->time_base.den);
2607 avcodec_string(buf, sizeof(buf), st->codec, is_output);
2608 av_log(NULL, AV_LOG_INFO, " Stream #%d.%d", index, i);
2609 /* the pid is an important information, so we display it */
2610 /* XXX: add a generic system */
2611 if (is_output)
2612 flags = ic->oformat->flags;
2613 else
2614 flags = ic->iformat->flags;
2615 if (flags & AVFMT_SHOW_IDS) {
2616 av_log(NULL, AV_LOG_INFO, "[0x%x]", st->id);
2617 }
2618 if (strlen(st->language) > 0) {
2619 av_log(NULL, AV_LOG_INFO, "(%s)", st->language);
2620 }
2621 av_log(NULL, AV_LOG_DEBUG, ", %d/%d", st->time_base.num/g, st->time_base.den/g);
2622 av_log(NULL, AV_LOG_INFO, ": %s", buf);
2623 if(st->codec->codec_type == CODEC_TYPE_VIDEO){
2624 if(st->r_frame_rate.den && st->r_frame_rate.num)
2625 av_log(NULL, AV_LOG_INFO, ", %5.2f fps(r)", av_q2d(st->r_frame_rate));
2626/* else if(st->time_base.den && st->time_base.num)
2627 av_log(NULL, AV_LOG_INFO, ", %5.2f fps(m)", 1/av_q2d(st->time_base));*/
2628 else
2629 av_log(NULL, AV_LOG_INFO, ", %5.2f fps(c)", 1/av_q2d(st->codec->time_base));
2630 }
2631 av_log(NULL, AV_LOG_INFO, "\n");
2632 }
2633}
2634
2635typedef struct {
2636 const char *abv;
2637 int width, height;
2638 int frame_rate, frame_rate_base;
2639} AbvEntry;
2640
2641static AbvEntry frame_abvs[] = {
2642 { "ntsc", 720, 480, 30000, 1001 },
2643 { "pal", 720, 576, 25, 1 },
2644 { "qntsc", 352, 240, 30000, 1001 }, /* VCD compliant ntsc */
2645 { "qpal", 352, 288, 25, 1 }, /* VCD compliant pal */
2646 { "sntsc", 640, 480, 30000, 1001 }, /* square pixel ntsc */
2647 { "spal", 768, 576, 25, 1 }, /* square pixel pal */
2648 { "film", 352, 240, 24, 1 },
2649 { "ntsc-film", 352, 240, 24000, 1001 },
2650 { "sqcif", 128, 96, 0, 0 },
2651 { "qcif", 176, 144, 0, 0 },
2652 { "cif", 352, 288, 0, 0 },
2653 { "4cif", 704, 576, 0, 0 },
2654};
2655
2656/**
2657 * parses width and height out of string str.
2658 */
2659int parse_image_size(int *width_ptr, int *height_ptr, const char *str)
2660{
2661 int i;
2662 int n = sizeof(frame_abvs) / sizeof(AbvEntry);
2663 const char *p;
2664 int frame_width = 0, frame_height = 0;
2665
2666 for(i=0;i<n;i++) {
2667 if (!strcmp(frame_abvs[i].abv, str)) {
2668 frame_width = frame_abvs[i].width;
2669 frame_height = frame_abvs[i].height;
2670 break;
2671 }
2672 }
2673 if (i == n) {
2674 p = str;
2675 frame_width = strtol(p, (char **)&p, 10);
2676 if (*p)
2677 p++;
2678 frame_height = strtol(p, (char **)&p, 10);
2679 }
2680 if (frame_width <= 0 || frame_height <= 0)
2681 return -1;
2682 *width_ptr = frame_width;
2683 *height_ptr = frame_height;
2684 return 0;
2685}
2686
2687/**
2688 * Converts frame rate from string to a fraction.
2689 *
2690 * First we try to get an exact integer or fractional frame rate.
2691 * If this fails we convert the frame rate to a double and return
2692 * an approximate fraction using the DEFAULT_FRAME_RATE_BASE.
2693 */
2694int parse_frame_rate(int *frame_rate, int *frame_rate_base, const char *arg)
2695{
2696 int i;
2697 char* cp;
2698
2699 /* First, we check our abbreviation table */
2700 for (i = 0; i < sizeof(frame_abvs)/sizeof(*frame_abvs); ++i)
2701 if (!strcmp(frame_abvs[i].abv, arg)) {
2702 *frame_rate = frame_abvs[i].frame_rate;
2703 *frame_rate_base = frame_abvs[i].frame_rate_base;
2704 return 0;
2705 }
2706
2707 /* Then, we try to parse it as fraction */
2708 cp = strchr(arg, '/');
2709 if (!cp)
2710 cp = strchr(arg, ':');
2711 if (cp) {
2712 char* cpp;
2713 *frame_rate = strtol(arg, &cpp, 10);
2714 if (cpp != arg || cpp == cp)
2715 *frame_rate_base = strtol(cp+1, &cpp, 10);
2716 else
2717 *frame_rate = 0;
2718 }
2719 else {
2720 /* Finally we give up and parse it as double */
2721 AVRational time_base = av_d2q(strtod(arg, 0), DEFAULT_FRAME_RATE_BASE);
2722 *frame_rate_base = time_base.den;
2723 *frame_rate = time_base.num;
2724 }
2725 if (!*frame_rate || !*frame_rate_base)
2726 return -1;
2727 else
2728 return 0;
2729}
2730
2731/**
2732 * Converts date string to number of seconds since Jan 1st, 1970.
2733 *
2734 * @code
2735 * Syntax:
2736 * - If not a duration:
2737 * [{YYYY-MM-DD|YYYYMMDD}]{T| }{HH[:MM[:SS[.m...]]][Z]|HH[MM[SS[.m...]]][Z]}
2738 * Time is localtime unless Z is suffixed to the end. In this case GMT
2739 * Return the date in micro seconds since 1970
2740 *
2741 * - If a duration:
2742 * HH[:MM[:SS[.m...]]]
2743 * S+[.m...]
2744 * @endcode
2745 */
2746#ifndef CONFIG_WINCE
2747int64_t parse_date(const char *datestr, int duration)
2748{
2749 const char *p;
2750 int64_t t;
2751 struct tm dt;
2752 int i;
2753 static const char *date_fmt[] = {
2754 "%Y-%m-%d",
2755 "%Y%m%d",
2756 };
2757 static const char *time_fmt[] = {
2758 "%H:%M:%S",
2759 "%H%M%S",
2760 };
2761 const char *q;
2762 int is_utc, len;
2763 char lastch;
2764 int negative = 0;
2765
2766#undef time
2767 time_t now = time(0);
2768
2769 len = strlen(datestr);
2770 if (len > 0)
2771 lastch = datestr[len - 1];
2772 else
2773 lastch = '\0';
2774 is_utc = (lastch == 'z' || lastch == 'Z');
2775
2776 memset(&dt, 0, sizeof(dt));
2777
2778 p = datestr;
2779 q = NULL;
2780 if (!duration) {
2781 for (i = 0; i < sizeof(date_fmt) / sizeof(date_fmt[0]); i++) {
2782 q = small_strptime(p, date_fmt[i], &dt);
2783 if (q) {
2784 break;
2785 }
2786 }
2787
2788 if (!q) {
2789 if (is_utc) {
2790 dt = *gmtime(&now);
2791 } else {
2792 dt = *localtime(&now);
2793 }
2794 dt.tm_hour = dt.tm_min = dt.tm_sec = 0;
2795 } else {
2796 p = q;
2797 }
2798
2799 if (*p == 'T' || *p == 't' || *p == ' ')
2800 p++;
2801
2802 for (i = 0; i < sizeof(time_fmt) / sizeof(time_fmt[0]); i++) {
2803 q = small_strptime(p, time_fmt[i], &dt);
2804 if (q) {
2805 break;
2806 }
2807 }
2808 } else {
2809 if (p[0] == '-') {
2810 negative = 1;
2811 ++p;
2812 }
2813 q = small_strptime(p, time_fmt[0], &dt);
2814 if (!q) {
2815 dt.tm_sec = strtol(p, (char **)&q, 10);
2816 dt.tm_min = 0;
2817 dt.tm_hour = 0;
2818 }
2819 }
2820
2821 /* Now we have all the fields that we can get */
2822 if (!q) {
2823 if (duration)
2824 return 0;
2825 else
2826 return now * int64_t_C(1000000);
2827 }
2828
2829 if (duration) {
2830 t = dt.tm_hour * 3600 + dt.tm_min * 60 + dt.tm_sec;
2831 } else {
2832 dt.tm_isdst = -1; /* unknown */
2833 if (is_utc) {
2834 t = mktimegm(&dt);
2835 } else {
2836 t = mktime(&dt);
2837 }
2838 }
2839
2840 t *= 1000000;
2841
2842 if (*q == '.') {
2843 int val, n;
2844 q++;
2845 for (val = 0, n = 100000; n >= 1; n /= 10, q++) {
2846 if (!isdigit(*q))
2847 break;
2848 val += n * (*q - '0');
2849 }
2850 t += val;
2851 }
2852 return negative ? -t : t;
2853}
2854#endif /* CONFIG_WINCE */
2855
2856/**
2857 * Attempts to find a specific tag in a URL.
2858 *
2859 * syntax: '?tag1=val1&tag2=val2...'. Little URL decoding is done.
2860 * Return 1 if found.
2861 */
2862int find_info_tag(char *arg, int arg_size, const char *tag1, const char *info)
2863{
2864 const char *p;
2865 char tag[128], *q;
2866
2867 p = info;
2868 if (*p == '?')
2869 p++;
2870 for(;;) {
2871 q = tag;
2872 while (*p != '\0' && *p != '=' && *p != '&') {
2873 if ((q - tag) < sizeof(tag) - 1)
2874 *q++ = *p;
2875 p++;
2876 }
2877 *q = '\0';
2878 q = arg;
2879 if (*p == '=') {
2880 p++;
2881 while (*p != '&' && *p != '\0') {
2882 if ((q - arg) < arg_size - 1) {
2883 if (*p == '+')
2884 *q++ = ' ';
2885 else
2886 *q++ = *p;
2887 }
2888 p++;
2889 }
2890 *q = '\0';
2891 }
2892 if (!strcmp(tag, tag1))
2893 return 1;
2894 if (*p != '&')
2895 break;
2896 p++;
2897 }
2898 return 0;
2899}
2900
2901/**
2902 * Returns in 'buf' the path with '%d' replaced by number.
2903 *
2904 * Also handles the '%0nd' format where 'n' is the total number
2905 * of digits and '%%'. Return 0 if OK, and -1 if format error.
2906 */
2907int get_frame_filename(char *buf, int buf_size,
2908 const char *path, int number)
2909{
2910 const char *p;
2911 char *q, buf1[20], c;
2912 int nd, len, percentd_found;
2913
2914 q = buf;
2915 p = path;
2916 percentd_found = 0;
2917 for(;;) {
2918 c = *p++;
2919 if (c == '\0')
2920 break;
2921 if (c == '%') {
2922 do {
2923 nd = 0;
2924 while (isdigit(*p)) {
2925 nd = nd * 10 + *p++ - '0';
2926 }
2927 c = *p++;
2928 } while (isdigit(c));
2929
2930 switch(c) {
2931 case '%':
2932 goto addchar;
2933 case 'd':
2934 if (percentd_found)
2935 goto fail;
2936 percentd_found = 1;
2937 snprintf(buf1, sizeof(buf1), "%0*d", nd, number);
2938 len = strlen(buf1);
2939 if ((q - buf + len) > buf_size - 1)
2940 goto fail;
2941 memcpy(q, buf1, len);
2942 q += len;
2943 break;
2944 default:
2945 goto fail;
2946 }
2947 } else {
2948 addchar:
2949 if ((q - buf) < buf_size - 1)
2950 *q++ = c;
2951 }
2952 }
2953 if (!percentd_found)
2954 goto fail;
2955 *q = '\0';
2956 return 0;
2957 fail:
2958 *q = '\0';
2959 return -1;
2960}
2961
2962/**
2963 * Print nice hexa dump of a buffer
2964 * @param f stream for output
2965 * @param buf buffer
2966 * @param size buffer size
2967 */
2968void av_hex_dump(FILE *f, uint8_t *buf, int size)
2969{
2970 int len, i, j, c;
2971
2972 for(i=0;i<size;i+=16) {
2973 len = size - i;
2974 if (len > 16)
2975 len = 16;
2976 fprintf(f, "%08x ", i);
2977 for(j=0;j<16;j++) {
2978 if (j < len)
2979 fprintf(f, " %02x", buf[i+j]);
2980 else
2981 fprintf(f, " ");
2982 }
2983 fprintf(f, " ");
2984 for(j=0;j<len;j++) {
2985 c = buf[i+j];
2986 if (c < ' ' || c > '~')
2987 c = '.';
2988 fprintf(f, "%c", c);
2989 }
2990 fprintf(f, "\n");
2991 }
2992}
2993
2994/**
2995 * Print on 'f' a nice dump of a packet
2996 * @param f stream for output
2997 * @param pkt packet to dump
2998 * @param dump_payload true if the payload must be displayed too
2999 */
3000 //FIXME needs to know the time_base
3001void av_pkt_dump(FILE *f, AVPacket *pkt, int dump_payload)
3002{
3003 fprintf(f, "stream #%d:\n", pkt->stream_index);
3004 fprintf(f, " keyframe=%d\n", ((pkt->flags & PKT_FLAG_KEY) != 0));
3005 fprintf(f, " duration=%0.3f\n", (double)pkt->duration / AV_TIME_BASE);
3006 /* DTS is _always_ valid after av_read_frame() */
3007 fprintf(f, " dts=");
3008 if (pkt->dts == AV_NOPTS_VALUE)
3009 fprintf(f, "N/A");
3010 else
3011 fprintf(f, "%0.3f", (double)pkt->dts / AV_TIME_BASE);
3012 /* PTS may be not known if B frames are present */
3013 fprintf(f, " pts=");
3014 if (pkt->pts == AV_NOPTS_VALUE)
3015 fprintf(f, "N/A");
3016 else
3017 fprintf(f, "%0.3f", (double)pkt->pts / AV_TIME_BASE);
3018 fprintf(f, "\n");
3019 fprintf(f, " size=%d\n", pkt->size);
3020 if (dump_payload)
3021 av_hex_dump(f, pkt->data, pkt->size);
3022}
3023
3024void url_split(char *proto, int proto_size,
3025 char *authorization, int authorization_size,
3026 char *hostname, int hostname_size,
3027 int *port_ptr,
3028 char *path, int path_size,
3029 const char *url)
3030{
3031 const char *p;
3032 char *q;
3033 int port;
3034
3035 port = -1;
3036
3037 p = url;
3038 q = proto;
3039 while (*p != ':' && *p != '\0') {
3040 if ((q - proto) < proto_size - 1)
3041 *q++ = *p;
3042 p++;
3043 }
3044 if (proto_size > 0)
3045 *q = '\0';
3046 if (authorization_size > 0)
3047 authorization[0] = '\0';
3048 if (*p == '\0') {
3049 if (proto_size > 0)
3050 proto[0] = '\0';
3051 if (hostname_size > 0)
3052 hostname[0] = '\0';
3053 p = url;
3054 } else {
3055 char *at,*slash; // PETR: position of '@' character and '/' character
3056
3057 p++;
3058 if (*p == '/')
3059 p++;
3060 if (*p == '/')
3061 p++;
3062 at = strchr(p,'@'); // PETR: get the position of '@'
3063 slash = strchr(p,'/'); // PETR: get position of '/' - end of hostname
3064 if (at && slash && at > slash) at = NULL; // PETR: not interested in '@' behind '/'
3065
3066 q = at ? authorization : hostname; // PETR: if '@' exists starting with auth.
3067
3068 while ((at || *p != ':') && *p != '/' && *p != '?' && *p != '\0') { // PETR:
3069 if (*p == '@') { // PETR: passed '@'
3070 if (authorization_size > 0)
3071 *q = '\0';
3072 q = hostname;
3073 at = NULL;
3074 } else if (!at) { // PETR: hostname
3075 if ((q - hostname) < hostname_size - 1)
3076 *q++ = *p;
3077 } else {
3078 if ((q - authorization) < authorization_size - 1)
3079 *q++ = *p;
3080 }
3081 p++;
3082 }
3083 if (hostname_size > 0)
3084 *q = '\0';
3085 if (*p == ':') {
3086 p++;
3087 port = strtoul(p, (char **)&p, 10);
3088 }
3089 }
3090 if (port_ptr)
3091 *port_ptr = port;
3092 pstrcpy(path, path_size, p);
3093}
3094
3095/**
3096 * Set the pts for a given stream.
3097 *
3098 * @param s stream
3099 * @param pts_wrap_bits number of bits effectively used by the pts
3100 * (used for wrap control, 33 is the value for MPEG)
3101 * @param pts_num numerator to convert to seconds (MPEG: 1)
3102 * @param pts_den denominator to convert to seconds (MPEG: 90000)
3103 */
3104void av_set_pts_info(AVStream *s, int pts_wrap_bits,
3105 int pts_num, int pts_den)
3106{
3107 s->pts_wrap_bits = pts_wrap_bits;
3108 s->time_base.num = pts_num;
3109 s->time_base.den = pts_den;
3110}
3111
3112/* fraction handling */
3113
3114/**
3115 * f = val + (num / den) + 0.5.
3116 *
3117 * 'num' is normalized so that it is such as 0 <= num < den.
3118 *
3119 * @param f fractional number
3120 * @param val integer value
3121 * @param num must be >= 0
3122 * @param den must be >= 1
3123 */
3124static void av_frac_init(AVFrac *f, int64_t val, int64_t num, int64_t den)
3125{
3126 num += (den >> 1);
3127 if (num >= den) {
3128 val += num / den;
3129 num = num % den;
3130 }
3131 f->val = val;
3132 f->num = num;
3133 f->den = den;
3134}
3135
3136/**
3137 * Set f to (val + 0.5).
3138 */
3139static void av_frac_set(AVFrac *f, int64_t val)
3140{
3141 f->val = val;
3142 f->num = f->den >> 1;
3143}
3144
3145/**
3146 * Fractionnal addition to f: f = f + (incr / f->den).
3147 *
3148 * @param f fractional number
3149 * @param incr increment, can be positive or negative
3150 */
3151static void av_frac_add(AVFrac *f, int64_t incr)
3152{
3153 int64_t num, den;
3154
3155 num = f->num + incr;
3156 den = f->den;
3157 if (num < 0) {
3158 f->val += num / den;
3159 num = num % den;
3160 if (num < 0) {
3161 num += den;
3162 f->val--;
3163 }
3164 } else if (num >= den) {
3165 f->val += num / den;
3166 num = num % den;
3167 }
3168 f->num = num;
3169}
3170
3171/**
3172 * register a new image format
3173 * @param img_fmt Image format descriptor
3174 */
3175void av_register_image_format(AVImageFormat *img_fmt)
3176{
3177 AVImageFormat **p;
3178
3179 p = &first_image_format;
3180 while (*p != NULL) p = &(*p)->next;
3181 *p = img_fmt;
3182 img_fmt->next = NULL;
3183}
3184
3185/**
3186 * Guesses image format based on data in the image.
3187 */
3188AVImageFormat *av_probe_image_format(AVProbeData *pd)
3189{
3190 AVImageFormat *fmt1, *fmt;
3191 int score, score_max;
3192
3193 fmt = NULL;
3194 score_max = 0;
3195 for(fmt1 = first_image_format; fmt1 != NULL; fmt1 = fmt1->next) {
3196 if (fmt1->img_probe) {
3197 score = fmt1->img_probe(pd);
3198 if (score > score_max) {
3199 score_max = score;
3200 fmt = fmt1;
3201 }
3202 }
3203 }
3204 return fmt;
3205}
3206
3207/**
3208 * Guesses image format based on file name extensions.
3209 */
3210AVImageFormat *guess_image_format(const char *filename)
3211{
3212 AVImageFormat *fmt1;
3213
3214 for(fmt1 = first_image_format; fmt1 != NULL; fmt1 = fmt1->next) {
3215 if (fmt1->extensions && match_ext(filename, fmt1->extensions))
3216 return fmt1;
3217 }
3218 return NULL;
3219}
3220
3221/**
3222 * Read an image from a stream.
3223 * @param gb byte stream containing the image
3224 * @param fmt image format, NULL if probing is required
3225 */
3226int av_read_image(ByteIOContext *pb, const char *filename,
3227 AVImageFormat *fmt,
3228 int (*alloc_cb)(void *, AVImageInfo *info), void *opaque)
3229{
3230 uint8_t buf[PROBE_BUF_MIN];
3231 AVProbeData probe_data, *pd = &probe_data;
3232 offset_t pos;
3233 int ret;
3234
3235 if (!fmt) {
3236 pd->filename = filename;
3237 pd->buf = buf;
3238 pos = url_ftell(pb);
3239 pd->buf_size = get_buffer(pb, buf, PROBE_BUF_MIN);
3240 url_fseek(pb, pos, SEEK_SET);
3241 fmt = av_probe_image_format(pd);
3242 }
3243 if (!fmt)
3244 return AVERROR_NOFMT;
3245 ret = fmt->img_read(pb, alloc_cb, opaque);
3246 return ret;
3247}
3248
3249/**
3250 * Write an image to a stream.
3251 * @param pb byte stream for the image output
3252 * @param fmt image format
3253 * @param img image data and informations
3254 */
3255int av_write_image(ByteIOContext *pb, AVImageFormat *fmt, AVImageInfo *img)
3256{
3257 return fmt->img_write(pb, img);
3258}
3259
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette