VirtualBox

source: vbox/trunk/src/libs/ffmpeg-20060710/libavcodec/vp3.c@ 9441

Last change on this file since 9441 was 5776, checked in by vboxsync, 17 years ago

ffmpeg: exported to OSE

File size: 103.3 KB
Line 
1/*
2 * Copyright (C) 2003-2004 the ffmpeg project
3 *
4 * This library is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU Lesser General Public
6 * License as published by the Free Software Foundation; either
7 * version 2 of the License, or (at your option) any later version.
8 *
9 * This library is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 * Lesser General Public License for more details.
13 *
14 * You should have received a copy of the GNU Lesser General Public
15 * License along with this library; if not, write to the Free Software
16 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
17 *
18 */
19
20/**
21 * @file vp3.c
22 * On2 VP3 Video Decoder
23 *
24 * VP3 Video Decoder by Mike Melanson (mike at multimedia.cx)
25 * For more information about the VP3 coding process, visit:
26 * http://multimedia.cx/
27 *
28 * Theora decoder by Alex Beregszaszi
29 */
30
31#include <stdio.h>
32#include <stdlib.h>
33#include <string.h>
34#include <unistd.h>
35
36#include "common.h"
37#include "avcodec.h"
38#include "dsputil.h"
39#include "mpegvideo.h"
40
41#include "vp3data.h"
42
43#define FRAGMENT_PIXELS 8
44
45/*
46 * Debugging Variables
47 *
48 * Define one or more of the following compile-time variables to 1 to obtain
49 * elaborate information about certain aspects of the decoding process.
50 *
51 * KEYFRAMES_ONLY: set this to 1 to only see keyframes (VP3 slideshow mode)
52 * DEBUG_VP3: high-level decoding flow
53 * DEBUG_INIT: initialization parameters
54 * DEBUG_DEQUANTIZERS: display how the dequanization tables are built
55 * DEBUG_BLOCK_CODING: unpacking the superblock/macroblock/fragment coding
56 * DEBUG_MODES: unpacking the coding modes for individual fragments
57 * DEBUG_VECTORS: display the motion vectors
58 * DEBUG_TOKEN: display exhaustive information about each DCT token
59 * DEBUG_VLC: display the VLCs as they are extracted from the stream
60 * DEBUG_DC_PRED: display the process of reversing DC prediction
61 * DEBUG_IDCT: show every detail of the IDCT process
62 */
63
64#define KEYFRAMES_ONLY 0
65
66#define DEBUG_VP3 0
67#define DEBUG_INIT 0
68#define DEBUG_DEQUANTIZERS 0
69#define DEBUG_BLOCK_CODING 0
70#define DEBUG_MODES 0
71#define DEBUG_VECTORS 0
72#define DEBUG_TOKEN 0
73#define DEBUG_VLC 0
74#define DEBUG_DC_PRED 0
75#define DEBUG_IDCT 0
76
77#if DEBUG_VP3
78#define debug_vp3(args...) av_log(NULL, AV_LOG_DEBUG, ## args)
79#else
80static inline void debug_vp3(const char *format, ...) { }
81#endif
82
83#if DEBUG_INIT
84#define debug_init(args...) av_log(NULL, AV_LOG_DEBUG, ## args)
85#else
86static inline void debug_init(const char *format, ...) { }
87#endif
88
89#if DEBUG_DEQUANTIZERS
90#define debug_dequantizers(args...) av_log(NULL, AV_LOG_DEBUG, ## args)
91#else
92static inline void debug_dequantizers(const char *format, ...) { }
93#endif
94
95#if DEBUG_BLOCK_CODING
96#define debug_block_coding(args...) av_log(NULL, AV_LOG_DEBUG, ## args)
97#else
98static inline void debug_block_coding(const char *format, ...) { }
99#endif
100
101#if DEBUG_MODES
102#define debug_modes(args...) av_log(NULL, AV_LOG_DEBUG, ## args)
103#else
104static inline void debug_modes(const char *format, ...) { }
105#endif
106
107#if DEBUG_VECTORS
108#define debug_vectors(args...) av_log(NULL, AV_LOG_DEBUG, ## args)
109#else
110static inline void debug_vectors(const char *format, ...) { }
111#endif
112
113#if DEBUG_TOKEN
114#define debug_token(args...) av_log(NULL, AV_LOG_DEBUG, ## args)
115#else
116static inline void debug_token(const char *format, ...) { }
117#endif
118
119#if DEBUG_VLC
120#define debug_vlc(args...) av_log(NULL, AV_LOG_DEBUG, ## args)
121#else
122static inline void debug_vlc(const char *format, ...) { }
123#endif
124
125#if DEBUG_DC_PRED
126#define debug_dc_pred(args...) av_log(NULL, AV_LOG_DEBUG, ## args)
127#else
128static inline void debug_dc_pred(const char *format, ...) { }
129#endif
130
131#if DEBUG_IDCT
132#define debug_idct(args...) av_log(NULL, AV_LOG_DEBUG, ## args)
133#else
134static inline void debug_idct(const char *format, ...) { }
135#endif
136
137typedef struct Coeff {
138 struct Coeff *next;
139 DCTELEM coeff;
140 uint8_t index;
141} Coeff;
142
143//FIXME split things out into their own arrays
144typedef struct Vp3Fragment {
145 Coeff *next_coeff;
146 /* address of first pixel taking into account which plane the fragment
147 * lives on as well as the plane stride */
148 int first_pixel;
149 /* this is the macroblock that the fragment belongs to */
150 uint16_t macroblock;
151 uint8_t coding_method;
152 uint8_t coeff_count;
153 int8_t motion_x;
154 int8_t motion_y;
155} Vp3Fragment;
156
157#define SB_NOT_CODED 0
158#define SB_PARTIALLY_CODED 1
159#define SB_FULLY_CODED 2
160
161#define MODE_INTER_NO_MV 0
162#define MODE_INTRA 1
163#define MODE_INTER_PLUS_MV 2
164#define MODE_INTER_LAST_MV 3
165#define MODE_INTER_PRIOR_LAST 4
166#define MODE_USING_GOLDEN 5
167#define MODE_GOLDEN_MV 6
168#define MODE_INTER_FOURMV 7
169#define CODING_MODE_COUNT 8
170
171/* special internal mode */
172#define MODE_COPY 8
173
174/* There are 6 preset schemes, plus a free-form scheme */
175static int ModeAlphabet[7][CODING_MODE_COUNT] =
176{
177 /* this is the custom scheme */
178 { 0, 0, 0, 0, 0, 0, 0, 0 },
179
180 /* scheme 1: Last motion vector dominates */
181 { MODE_INTER_LAST_MV, MODE_INTER_PRIOR_LAST,
182 MODE_INTER_PLUS_MV, MODE_INTER_NO_MV,
183 MODE_INTRA, MODE_USING_GOLDEN,
184 MODE_GOLDEN_MV, MODE_INTER_FOURMV },
185
186 /* scheme 2 */
187 { MODE_INTER_LAST_MV, MODE_INTER_PRIOR_LAST,
188 MODE_INTER_NO_MV, MODE_INTER_PLUS_MV,
189 MODE_INTRA, MODE_USING_GOLDEN,
190 MODE_GOLDEN_MV, MODE_INTER_FOURMV },
191
192 /* scheme 3 */
193 { MODE_INTER_LAST_MV, MODE_INTER_PLUS_MV,
194 MODE_INTER_PRIOR_LAST, MODE_INTER_NO_MV,
195 MODE_INTRA, MODE_USING_GOLDEN,
196 MODE_GOLDEN_MV, MODE_INTER_FOURMV },
197
198 /* scheme 4 */
199 { MODE_INTER_LAST_MV, MODE_INTER_PLUS_MV,
200 MODE_INTER_NO_MV, MODE_INTER_PRIOR_LAST,
201 MODE_INTRA, MODE_USING_GOLDEN,
202 MODE_GOLDEN_MV, MODE_INTER_FOURMV },
203
204 /* scheme 5: No motion vector dominates */
205 { MODE_INTER_NO_MV, MODE_INTER_LAST_MV,
206 MODE_INTER_PRIOR_LAST, MODE_INTER_PLUS_MV,
207 MODE_INTRA, MODE_USING_GOLDEN,
208 MODE_GOLDEN_MV, MODE_INTER_FOURMV },
209
210 /* scheme 6 */
211 { MODE_INTER_NO_MV, MODE_USING_GOLDEN,
212 MODE_INTER_LAST_MV, MODE_INTER_PRIOR_LAST,
213 MODE_INTER_PLUS_MV, MODE_INTRA,
214 MODE_GOLDEN_MV, MODE_INTER_FOURMV },
215
216};
217
218#define MIN_DEQUANT_VAL 2
219
220typedef struct Vp3DecodeContext {
221 AVCodecContext *avctx;
222 int theora, theora_tables;
223 int version;
224 int width, height;
225 AVFrame golden_frame;
226 AVFrame last_frame;
227 AVFrame current_frame;
228 int keyframe;
229 DSPContext dsp;
230 int flipped_image;
231
232 int quality_index;
233 int last_quality_index;
234
235 int superblock_count;
236 int superblock_width;
237 int superblock_height;
238 int y_superblock_width;
239 int y_superblock_height;
240 int c_superblock_width;
241 int c_superblock_height;
242 int u_superblock_start;
243 int v_superblock_start;
244 unsigned char *superblock_coding;
245
246 int macroblock_count;
247 int macroblock_width;
248 int macroblock_height;
249
250 int fragment_count;
251 int fragment_width;
252 int fragment_height;
253
254 Vp3Fragment *all_fragments;
255 Coeff *coeffs;
256 Coeff *next_coeff;
257 int u_fragment_start;
258 int v_fragment_start;
259
260 ScanTable scantable;
261
262 /* tables */
263 uint16_t coded_dc_scale_factor[64];
264 uint32_t coded_ac_scale_factor[64];
265 uint16_t coded_intra_y_dequant[64];
266 uint16_t coded_intra_c_dequant[64];
267 uint16_t coded_inter_dequant[64];
268
269 /* this is a list of indices into the all_fragments array indicating
270 * which of the fragments are coded */
271 int *coded_fragment_list;
272 int coded_fragment_list_index;
273 int pixel_addresses_inited;
274
275 VLC dc_vlc[16];
276 VLC ac_vlc_1[16];
277 VLC ac_vlc_2[16];
278 VLC ac_vlc_3[16];
279 VLC ac_vlc_4[16];
280
281 VLC superblock_run_length_vlc;
282 VLC fragment_run_length_vlc;
283 VLC mode_code_vlc;
284 VLC motion_vector_vlc;
285
286 /* these arrays need to be on 16-byte boundaries since SSE2 operations
287 * index into them */
288 DECLARE_ALIGNED_16(int16_t, intra_y_dequant[64]);
289 DECLARE_ALIGNED_16(int16_t, intra_c_dequant[64]);
290 DECLARE_ALIGNED_16(int16_t, inter_dequant[64]);
291
292 /* This table contains superblock_count * 16 entries. Each set of 16
293 * numbers corresponds to the fragment indices 0..15 of the superblock.
294 * An entry will be -1 to indicate that no entry corresponds to that
295 * index. */
296 int *superblock_fragments;
297
298 /* This table contains superblock_count * 4 entries. Each set of 4
299 * numbers corresponds to the macroblock indices 0..3 of the superblock.
300 * An entry will be -1 to indicate that no entry corresponds to that
301 * index. */
302 int *superblock_macroblocks;
303
304 /* This table contains macroblock_count * 6 entries. Each set of 6
305 * numbers corresponds to the fragment indices 0..5 which comprise
306 * the macroblock (4 Y fragments and 2 C fragments). */
307 int *macroblock_fragments;
308 /* This is an array that indicates how a particular macroblock
309 * is coded. */
310 unsigned char *macroblock_coding;
311
312 int first_coded_y_fragment;
313 int first_coded_c_fragment;
314 int last_coded_y_fragment;
315 int last_coded_c_fragment;
316
317 uint8_t edge_emu_buffer[9*2048]; //FIXME dynamic alloc
318 uint8_t qscale_table[2048]; //FIXME dynamic alloc (width+15)/16
319
320 /* Huffman decode */
321 int hti;
322 unsigned int hbits;
323 int entries;
324 int huff_code_size;
325 uint16_t huffman_table[80][32][2];
326
327 uint32_t filter_limit_values[64];
328 int bounding_values_array[256];
329} Vp3DecodeContext;
330
331static int theora_decode_tables(AVCodecContext *avctx, GetBitContext gb);
332
333/************************************************************************
334 * VP3 specific functions
335 ************************************************************************/
336
337/*
338 * This function sets up all of the various blocks mappings:
339 * superblocks <-> fragments, macroblocks <-> fragments,
340 * superblocks <-> macroblocks
341 *
342 * Returns 0 is successful; returns 1 if *anything* went wrong.
343 */
344static int init_block_mapping(Vp3DecodeContext *s)
345{
346 int i, j;
347 signed int hilbert_walk_y[16];
348 signed int hilbert_walk_c[16];
349 signed int hilbert_walk_mb[4];
350
351 int current_fragment = 0;
352 int current_width = 0;
353 int current_height = 0;
354 int right_edge = 0;
355 int bottom_edge = 0;
356 int superblock_row_inc = 0;
357 int *hilbert = NULL;
358 int mapping_index = 0;
359
360 int current_macroblock;
361 int c_fragment;
362
363 signed char travel_width[16] = {
364 1, 1, 0, -1,
365 0, 0, 1, 0,
366 1, 0, 1, 0,
367 0, -1, 0, 1
368 };
369
370 signed char travel_height[16] = {
371 0, 0, 1, 0,
372 1, 1, 0, -1,
373 0, 1, 0, -1,
374 -1, 0, -1, 0
375 };
376
377 signed char travel_width_mb[4] = {
378 1, 0, 1, 0
379 };
380
381 signed char travel_height_mb[4] = {
382 0, 1, 0, -1
383 };
384
385 debug_vp3(" vp3: initialize block mapping tables\n");
386
387 /* figure out hilbert pattern per these frame dimensions */
388 hilbert_walk_y[0] = 1;
389 hilbert_walk_y[1] = 1;
390 hilbert_walk_y[2] = s->fragment_width;
391 hilbert_walk_y[3] = -1;
392 hilbert_walk_y[4] = s->fragment_width;
393 hilbert_walk_y[5] = s->fragment_width;
394 hilbert_walk_y[6] = 1;
395 hilbert_walk_y[7] = -s->fragment_width;
396 hilbert_walk_y[8] = 1;
397 hilbert_walk_y[9] = s->fragment_width;
398 hilbert_walk_y[10] = 1;
399 hilbert_walk_y[11] = -s->fragment_width;
400 hilbert_walk_y[12] = -s->fragment_width;
401 hilbert_walk_y[13] = -1;
402 hilbert_walk_y[14] = -s->fragment_width;
403 hilbert_walk_y[15] = 1;
404
405 hilbert_walk_c[0] = 1;
406 hilbert_walk_c[1] = 1;
407 hilbert_walk_c[2] = s->fragment_width / 2;
408 hilbert_walk_c[3] = -1;
409 hilbert_walk_c[4] = s->fragment_width / 2;
410 hilbert_walk_c[5] = s->fragment_width / 2;
411 hilbert_walk_c[6] = 1;
412 hilbert_walk_c[7] = -s->fragment_width / 2;
413 hilbert_walk_c[8] = 1;
414 hilbert_walk_c[9] = s->fragment_width / 2;
415 hilbert_walk_c[10] = 1;
416 hilbert_walk_c[11] = -s->fragment_width / 2;
417 hilbert_walk_c[12] = -s->fragment_width / 2;
418 hilbert_walk_c[13] = -1;
419 hilbert_walk_c[14] = -s->fragment_width / 2;
420 hilbert_walk_c[15] = 1;
421
422 hilbert_walk_mb[0] = 1;
423 hilbert_walk_mb[1] = s->macroblock_width;
424 hilbert_walk_mb[2] = 1;
425 hilbert_walk_mb[3] = -s->macroblock_width;
426
427 /* iterate through each superblock (all planes) and map the fragments */
428 for (i = 0; i < s->superblock_count; i++) {
429 debug_init(" superblock %d (u starts @ %d, v starts @ %d)\n",
430 i, s->u_superblock_start, s->v_superblock_start);
431
432 /* time to re-assign the limits? */
433 if (i == 0) {
434
435 /* start of Y superblocks */
436 right_edge = s->fragment_width;
437 bottom_edge = s->fragment_height;
438 current_width = -1;
439 current_height = 0;
440 superblock_row_inc = 3 * s->fragment_width -
441 (s->y_superblock_width * 4 - s->fragment_width);
442 hilbert = hilbert_walk_y;
443
444 /* the first operation for this variable is to advance by 1 */
445 current_fragment = -1;
446
447 } else if (i == s->u_superblock_start) {
448
449 /* start of U superblocks */
450 right_edge = s->fragment_width / 2;
451 bottom_edge = s->fragment_height / 2;
452 current_width = -1;
453 current_height = 0;
454 superblock_row_inc = 3 * (s->fragment_width / 2) -
455 (s->c_superblock_width * 4 - s->fragment_width / 2);
456 hilbert = hilbert_walk_c;
457
458 /* the first operation for this variable is to advance by 1 */
459 current_fragment = s->u_fragment_start - 1;
460
461 } else if (i == s->v_superblock_start) {
462
463 /* start of V superblocks */
464 right_edge = s->fragment_width / 2;
465 bottom_edge = s->fragment_height / 2;
466 current_width = -1;
467 current_height = 0;
468 superblock_row_inc = 3 * (s->fragment_width / 2) -
469 (s->c_superblock_width * 4 - s->fragment_width / 2);
470 hilbert = hilbert_walk_c;
471
472 /* the first operation for this variable is to advance by 1 */
473 current_fragment = s->v_fragment_start - 1;
474
475 }
476
477 if (current_width >= right_edge - 1) {
478 /* reset width and move to next superblock row */
479 current_width = -1;
480 current_height += 4;
481
482 /* fragment is now at the start of a new superblock row */
483 current_fragment += superblock_row_inc;
484 }
485
486 /* iterate through all 16 fragments in a superblock */
487 for (j = 0; j < 16; j++) {
488 current_fragment += hilbert[j];
489 current_width += travel_width[j];
490 current_height += travel_height[j];
491
492 /* check if the fragment is in bounds */
493 if ((current_width < right_edge) &&
494 (current_height < bottom_edge)) {
495 s->superblock_fragments[mapping_index] = current_fragment;
496 debug_init(" mapping fragment %d to superblock %d, position %d (%d/%d x %d/%d)\n",
497 s->superblock_fragments[mapping_index], i, j,
498 current_width, right_edge, current_height, bottom_edge);
499 } else {
500 s->superblock_fragments[mapping_index] = -1;
501 debug_init(" superblock %d, position %d has no fragment (%d/%d x %d/%d)\n",
502 i, j,
503 current_width, right_edge, current_height, bottom_edge);
504 }
505
506 mapping_index++;
507 }
508 }
509
510 /* initialize the superblock <-> macroblock mapping; iterate through
511 * all of the Y plane superblocks to build this mapping */
512 right_edge = s->macroblock_width;
513 bottom_edge = s->macroblock_height;
514 current_width = -1;
515 current_height = 0;
516 superblock_row_inc = s->macroblock_width -
517 (s->y_superblock_width * 2 - s->macroblock_width);;
518 hilbert = hilbert_walk_mb;
519 mapping_index = 0;
520 current_macroblock = -1;
521 for (i = 0; i < s->u_superblock_start; i++) {
522
523 if (current_width >= right_edge - 1) {
524 /* reset width and move to next superblock row */
525 current_width = -1;
526 current_height += 2;
527
528 /* macroblock is now at the start of a new superblock row */
529 current_macroblock += superblock_row_inc;
530 }
531
532 /* iterate through each potential macroblock in the superblock */
533 for (j = 0; j < 4; j++) {
534 current_macroblock += hilbert_walk_mb[j];
535 current_width += travel_width_mb[j];
536 current_height += travel_height_mb[j];
537
538 /* check if the macroblock is in bounds */
539 if ((current_width < right_edge) &&
540 (current_height < bottom_edge)) {
541 s->superblock_macroblocks[mapping_index] = current_macroblock;
542 debug_init(" mapping macroblock %d to superblock %d, position %d (%d/%d x %d/%d)\n",
543 s->superblock_macroblocks[mapping_index], i, j,
544 current_width, right_edge, current_height, bottom_edge);
545 } else {
546 s->superblock_macroblocks[mapping_index] = -1;
547 debug_init(" superblock %d, position %d has no macroblock (%d/%d x %d/%d)\n",
548 i, j,
549 current_width, right_edge, current_height, bottom_edge);
550 }
551
552 mapping_index++;
553 }
554 }
555
556 /* initialize the macroblock <-> fragment mapping */
557 current_fragment = 0;
558 current_macroblock = 0;
559 mapping_index = 0;
560 for (i = 0; i < s->fragment_height; i += 2) {
561
562 for (j = 0; j < s->fragment_width; j += 2) {
563
564 debug_init(" macroblock %d contains fragments: ", current_macroblock);
565 s->all_fragments[current_fragment].macroblock = current_macroblock;
566 s->macroblock_fragments[mapping_index++] = current_fragment;
567 debug_init("%d ", current_fragment);
568
569 if (j + 1 < s->fragment_width) {
570 s->all_fragments[current_fragment + 1].macroblock = current_macroblock;
571 s->macroblock_fragments[mapping_index++] = current_fragment + 1;
572 debug_init("%d ", current_fragment + 1);
573 } else
574 s->macroblock_fragments[mapping_index++] = -1;
575
576 if (i + 1 < s->fragment_height) {
577 s->all_fragments[current_fragment + s->fragment_width].macroblock =
578 current_macroblock;
579 s->macroblock_fragments[mapping_index++] =
580 current_fragment + s->fragment_width;
581 debug_init("%d ", current_fragment + s->fragment_width);
582 } else
583 s->macroblock_fragments[mapping_index++] = -1;
584
585 if ((j + 1 < s->fragment_width) && (i + 1 < s->fragment_height)) {
586 s->all_fragments[current_fragment + s->fragment_width + 1].macroblock =
587 current_macroblock;
588 s->macroblock_fragments[mapping_index++] =
589 current_fragment + s->fragment_width + 1;
590 debug_init("%d ", current_fragment + s->fragment_width + 1);
591 } else
592 s->macroblock_fragments[mapping_index++] = -1;
593
594 /* C planes */
595 c_fragment = s->u_fragment_start +
596 (i * s->fragment_width / 4) + (j / 2);
597 s->all_fragments[c_fragment].macroblock = s->macroblock_count;
598 s->macroblock_fragments[mapping_index++] = c_fragment;
599 debug_init("%d ", c_fragment);
600
601 c_fragment = s->v_fragment_start +
602 (i * s->fragment_width / 4) + (j / 2);
603 s->all_fragments[c_fragment].macroblock = s->macroblock_count;
604 s->macroblock_fragments[mapping_index++] = c_fragment;
605 debug_init("%d ", c_fragment);
606
607 debug_init("\n");
608
609 if (j + 2 <= s->fragment_width)
610 current_fragment += 2;
611 else
612 current_fragment++;
613 current_macroblock++;
614 }
615
616 current_fragment += s->fragment_width;
617 }
618
619 return 0; /* successful path out */
620}
621
622/*
623 * This function wipes out all of the fragment data.
624 */
625static void init_frame(Vp3DecodeContext *s, GetBitContext *gb)
626{
627 int i;
628
629 /* zero out all of the fragment information */
630 s->coded_fragment_list_index = 0;
631 for (i = 0; i < s->fragment_count; i++) {
632 s->all_fragments[i].coeff_count = 0;
633 s->all_fragments[i].motion_x = 127;
634 s->all_fragments[i].motion_y = 127;
635 s->all_fragments[i].next_coeff= NULL;
636 s->coeffs[i].index=
637 s->coeffs[i].coeff=0;
638 s->coeffs[i].next= NULL;
639 }
640}
641
642/*
643 * This function sets up the dequantization tables used for a particular
644 * frame.
645 */
646static void init_dequantizer(Vp3DecodeContext *s)
647{
648
649 int ac_scale_factor = s->coded_ac_scale_factor[s->quality_index];
650 int dc_scale_factor = s->coded_dc_scale_factor[s->quality_index];
651 int i, j;
652
653 debug_vp3(" vp3: initializing dequantization tables\n");
654
655 /*
656 * Scale dequantizers:
657 *
658 * quantizer * sf
659 * --------------
660 * 100
661 *
662 * where sf = dc_scale_factor for DC quantizer
663 * or ac_scale_factor for AC quantizer
664 *
665 * Then, saturate the result to a lower limit of MIN_DEQUANT_VAL.
666 */
667#define SCALER 4
668
669 /* scale DC quantizers */
670 s->intra_y_dequant[0] = s->coded_intra_y_dequant[0] * dc_scale_factor / 100;
671 if (s->intra_y_dequant[0] < MIN_DEQUANT_VAL * 2)
672 s->intra_y_dequant[0] = MIN_DEQUANT_VAL * 2;
673 s->intra_y_dequant[0] *= SCALER;
674
675 s->intra_c_dequant[0] = s->coded_intra_c_dequant[0] * dc_scale_factor / 100;
676 if (s->intra_c_dequant[0] < MIN_DEQUANT_VAL * 2)
677 s->intra_c_dequant[0] = MIN_DEQUANT_VAL * 2;
678 s->intra_c_dequant[0] *= SCALER;
679
680 s->inter_dequant[0] = s->coded_inter_dequant[0] * dc_scale_factor / 100;
681 if (s->inter_dequant[0] < MIN_DEQUANT_VAL * 4)
682 s->inter_dequant[0] = MIN_DEQUANT_VAL * 4;
683 s->inter_dequant[0] *= SCALER;
684
685 /* scale AC quantizers, zigzag at the same time in preparation for
686 * the dequantization phase */
687 for (i = 1; i < 64; i++) {
688 int k= s->scantable.scantable[i];
689 j = s->scantable.permutated[i];
690
691 s->intra_y_dequant[j] = s->coded_intra_y_dequant[k] * ac_scale_factor / 100;
692 if (s->intra_y_dequant[j] < MIN_DEQUANT_VAL)
693 s->intra_y_dequant[j] = MIN_DEQUANT_VAL;
694 s->intra_y_dequant[j] *= SCALER;
695
696 s->intra_c_dequant[j] = s->coded_intra_c_dequant[k] * ac_scale_factor / 100;
697 if (s->intra_c_dequant[j] < MIN_DEQUANT_VAL)
698 s->intra_c_dequant[j] = MIN_DEQUANT_VAL;
699 s->intra_c_dequant[j] *= SCALER;
700
701 s->inter_dequant[j] = s->coded_inter_dequant[k] * ac_scale_factor / 100;
702 if (s->inter_dequant[j] < MIN_DEQUANT_VAL * 2)
703 s->inter_dequant[j] = MIN_DEQUANT_VAL * 2;
704 s->inter_dequant[j] *= SCALER;
705 }
706
707 memset(s->qscale_table, (FFMAX(s->intra_y_dequant[1], s->intra_c_dequant[1])+8)/16, 512); //FIXME finetune
708
709 /* print debug information as requested */
710 debug_dequantizers("intra Y dequantizers:\n");
711 for (i = 0; i < 8; i++) {
712 for (j = i * 8; j < i * 8 + 8; j++) {
713 debug_dequantizers(" %4d,", s->intra_y_dequant[j]);
714 }
715 debug_dequantizers("\n");
716 }
717 debug_dequantizers("\n");
718
719 debug_dequantizers("intra C dequantizers:\n");
720 for (i = 0; i < 8; i++) {
721 for (j = i * 8; j < i * 8 + 8; j++) {
722 debug_dequantizers(" %4d,", s->intra_c_dequant[j]);
723 }
724 debug_dequantizers("\n");
725 }
726 debug_dequantizers("\n");
727
728 debug_dequantizers("interframe dequantizers:\n");
729 for (i = 0; i < 8; i++) {
730 for (j = i * 8; j < i * 8 + 8; j++) {
731 debug_dequantizers(" %4d,", s->inter_dequant[j]);
732 }
733 debug_dequantizers("\n");
734 }
735 debug_dequantizers("\n");
736}
737
738/*
739 * This function initializes the loop filter boundary limits if the frame's
740 * quality index is different from the previous frame's.
741 */
742static void init_loop_filter(Vp3DecodeContext *s)
743{
744 int *bounding_values= s->bounding_values_array+127;
745 int filter_limit;
746 int x;
747
748 filter_limit = s->filter_limit_values[s->quality_index];
749
750 /* set up the bounding values */
751 memset(s->bounding_values_array, 0, 256 * sizeof(int));
752 for (x = 0; x < filter_limit; x++) {
753 bounding_values[-x - filter_limit] = -filter_limit + x;
754 bounding_values[-x] = -x;
755 bounding_values[x] = x;
756 bounding_values[x + filter_limit] = filter_limit - x;
757 }
758}
759
760/*
761 * This function unpacks all of the superblock/macroblock/fragment coding
762 * information from the bitstream.
763 */
764static int unpack_superblocks(Vp3DecodeContext *s, GetBitContext *gb)
765{
766 int bit = 0;
767 int current_superblock = 0;
768 int current_run = 0;
769 int decode_fully_flags = 0;
770 int decode_partial_blocks = 0;
771 int first_c_fragment_seen;
772
773 int i, j;
774 int current_fragment;
775
776 debug_vp3(" vp3: unpacking superblock coding\n");
777
778 if (s->keyframe) {
779
780 debug_vp3(" keyframe-- all superblocks are fully coded\n");
781 memset(s->superblock_coding, SB_FULLY_CODED, s->superblock_count);
782
783 } else {
784
785 /* unpack the list of partially-coded superblocks */
786 bit = get_bits(gb, 1);
787 /* toggle the bit because as soon as the first run length is
788 * fetched the bit will be toggled again */
789 bit ^= 1;
790 while (current_superblock < s->superblock_count) {
791 if (current_run-- == 0) {
792 bit ^= 1;
793 current_run = get_vlc2(gb,
794 s->superblock_run_length_vlc.table, 6, 2);
795 if (current_run == 33)
796 current_run += get_bits(gb, 12);
797 debug_block_coding(" setting superblocks %d..%d to %s\n",
798 current_superblock,
799 current_superblock + current_run - 1,
800 (bit) ? "partially coded" : "not coded");
801
802 /* if any of the superblocks are not partially coded, flag
803 * a boolean to decode the list of fully-coded superblocks */
804 if (bit == 0) {
805 decode_fully_flags = 1;
806 } else {
807
808 /* make a note of the fact that there are partially coded
809 * superblocks */
810 decode_partial_blocks = 1;
811 }
812 }
813 s->superblock_coding[current_superblock++] = bit;
814 }
815
816 /* unpack the list of fully coded superblocks if any of the blocks were
817 * not marked as partially coded in the previous step */
818 if (decode_fully_flags) {
819
820 current_superblock = 0;
821 current_run = 0;
822 bit = get_bits(gb, 1);
823 /* toggle the bit because as soon as the first run length is
824 * fetched the bit will be toggled again */
825 bit ^= 1;
826 while (current_superblock < s->superblock_count) {
827
828 /* skip any superblocks already marked as partially coded */
829 if (s->superblock_coding[current_superblock] == SB_NOT_CODED) {
830
831 if (current_run-- == 0) {
832 bit ^= 1;
833 current_run = get_vlc2(gb,
834 s->superblock_run_length_vlc.table, 6, 2);
835 if (current_run == 33)
836 current_run += get_bits(gb, 12);
837 }
838
839 debug_block_coding(" setting superblock %d to %s\n",
840 current_superblock,
841 (bit) ? "fully coded" : "not coded");
842 s->superblock_coding[current_superblock] = 2*bit;
843 }
844 current_superblock++;
845 }
846 }
847
848 /* if there were partial blocks, initialize bitstream for
849 * unpacking fragment codings */
850 if (decode_partial_blocks) {
851
852 current_run = 0;
853 bit = get_bits(gb, 1);
854 /* toggle the bit because as soon as the first run length is
855 * fetched the bit will be toggled again */
856 bit ^= 1;
857 }
858 }
859
860 /* figure out which fragments are coded; iterate through each
861 * superblock (all planes) */
862 s->coded_fragment_list_index = 0;
863 s->next_coeff= s->coeffs + s->fragment_count;
864 s->first_coded_y_fragment = s->first_coded_c_fragment = 0;
865 s->last_coded_y_fragment = s->last_coded_c_fragment = -1;
866 first_c_fragment_seen = 0;
867 memset(s->macroblock_coding, MODE_COPY, s->macroblock_count);
868 for (i = 0; i < s->superblock_count; i++) {
869
870 /* iterate through all 16 fragments in a superblock */
871 for (j = 0; j < 16; j++) {
872
873 /* if the fragment is in bounds, check its coding status */
874 current_fragment = s->superblock_fragments[i * 16 + j];
875 if (current_fragment >= s->fragment_count) {
876 av_log(s->avctx, AV_LOG_ERROR, " vp3:unpack_superblocks(): bad fragment number (%d >= %d)\n",
877 current_fragment, s->fragment_count);
878 return 1;
879 }
880 if (current_fragment != -1) {
881 if (s->superblock_coding[i] == SB_NOT_CODED) {
882
883 /* copy all the fragments from the prior frame */
884 s->all_fragments[current_fragment].coding_method =
885 MODE_COPY;
886
887 } else if (s->superblock_coding[i] == SB_PARTIALLY_CODED) {
888
889 /* fragment may or may not be coded; this is the case
890 * that cares about the fragment coding runs */
891 if (current_run-- == 0) {
892 bit ^= 1;
893 current_run = get_vlc2(gb,
894 s->fragment_run_length_vlc.table, 5, 2);
895 }
896
897 if (bit) {
898 /* default mode; actual mode will be decoded in
899 * the next phase */
900 s->all_fragments[current_fragment].coding_method =
901 MODE_INTER_NO_MV;
902 s->all_fragments[current_fragment].next_coeff= s->coeffs + current_fragment;
903 s->coded_fragment_list[s->coded_fragment_list_index] =
904 current_fragment;
905 if ((current_fragment >= s->u_fragment_start) &&
906 (s->last_coded_y_fragment == -1) &&
907 (!first_c_fragment_seen)) {
908 s->first_coded_c_fragment = s->coded_fragment_list_index;
909 s->last_coded_y_fragment = s->first_coded_c_fragment - 1;
910 first_c_fragment_seen = 1;
911 }
912 s->coded_fragment_list_index++;
913 s->macroblock_coding[s->all_fragments[current_fragment].macroblock] = MODE_INTER_NO_MV;
914 debug_block_coding(" superblock %d is partially coded, fragment %d is coded\n",
915 i, current_fragment);
916 } else {
917 /* not coded; copy this fragment from the prior frame */
918 s->all_fragments[current_fragment].coding_method =
919 MODE_COPY;
920 debug_block_coding(" superblock %d is partially coded, fragment %d is not coded\n",
921 i, current_fragment);
922 }
923
924 } else {
925
926 /* fragments are fully coded in this superblock; actual
927 * coding will be determined in next step */
928 s->all_fragments[current_fragment].coding_method =
929 MODE_INTER_NO_MV;
930 s->all_fragments[current_fragment].next_coeff= s->coeffs + current_fragment;
931 s->coded_fragment_list[s->coded_fragment_list_index] =
932 current_fragment;
933 if ((current_fragment >= s->u_fragment_start) &&
934 (s->last_coded_y_fragment == -1) &&
935 (!first_c_fragment_seen)) {
936 s->first_coded_c_fragment = s->coded_fragment_list_index;
937 s->last_coded_y_fragment = s->first_coded_c_fragment - 1;
938 first_c_fragment_seen = 1;
939 }
940 s->coded_fragment_list_index++;
941 s->macroblock_coding[s->all_fragments[current_fragment].macroblock] = MODE_INTER_NO_MV;
942 debug_block_coding(" superblock %d is fully coded, fragment %d is coded\n",
943 i, current_fragment);
944 }
945 }
946 }
947 }
948
949 if (!first_c_fragment_seen)
950 /* only Y fragments coded in this frame */
951 s->last_coded_y_fragment = s->coded_fragment_list_index - 1;
952 else
953 /* end the list of coded C fragments */
954 s->last_coded_c_fragment = s->coded_fragment_list_index - 1;
955
956 debug_block_coding(" %d total coded fragments, y: %d -> %d, c: %d -> %d\n",
957 s->coded_fragment_list_index,
958 s->first_coded_y_fragment,
959 s->last_coded_y_fragment,
960 s->first_coded_c_fragment,
961 s->last_coded_c_fragment);
962
963 return 0;
964}
965
966/*
967 * This function unpacks all the coding mode data for individual macroblocks
968 * from the bitstream.
969 */
970static int unpack_modes(Vp3DecodeContext *s, GetBitContext *gb)
971{
972 int i, j, k;
973 int scheme;
974 int current_macroblock;
975 int current_fragment;
976 int coding_mode;
977
978 debug_vp3(" vp3: unpacking encoding modes\n");
979
980 if (s->keyframe) {
981 debug_vp3(" keyframe-- all blocks are coded as INTRA\n");
982
983 for (i = 0; i < s->fragment_count; i++)
984 s->all_fragments[i].coding_method = MODE_INTRA;
985
986 } else {
987
988 /* fetch the mode coding scheme for this frame */
989 scheme = get_bits(gb, 3);
990 debug_modes(" using mode alphabet %d\n", scheme);
991
992 /* is it a custom coding scheme? */
993 if (scheme == 0) {
994 debug_modes(" custom mode alphabet ahead:\n");
995 for (i = 0; i < 8; i++)
996 ModeAlphabet[scheme][get_bits(gb, 3)] = i;
997 }
998
999 for (i = 0; i < 8; i++)
1000 debug_modes(" mode[%d][%d] = %d\n", scheme, i,
1001 ModeAlphabet[scheme][i]);
1002
1003 /* iterate through all of the macroblocks that contain 1 or more
1004 * coded fragments */
1005 for (i = 0; i < s->u_superblock_start; i++) {
1006
1007 for (j = 0; j < 4; j++) {
1008 current_macroblock = s->superblock_macroblocks[i * 4 + j];
1009 if ((current_macroblock == -1) ||
1010 (s->macroblock_coding[current_macroblock] == MODE_COPY))
1011 continue;
1012 if (current_macroblock >= s->macroblock_count) {
1013 av_log(s->avctx, AV_LOG_ERROR, " vp3:unpack_modes(): bad macroblock number (%d >= %d)\n",
1014 current_macroblock, s->macroblock_count);
1015 return 1;
1016 }
1017
1018 /* mode 7 means get 3 bits for each coding mode */
1019 if (scheme == 7)
1020 coding_mode = get_bits(gb, 3);
1021 else
1022 coding_mode = ModeAlphabet[scheme]
1023 [get_vlc2(gb, s->mode_code_vlc.table, 3, 3)];
1024
1025 s->macroblock_coding[current_macroblock] = coding_mode;
1026 for (k = 0; k < 6; k++) {
1027 current_fragment =
1028 s->macroblock_fragments[current_macroblock * 6 + k];
1029 if (current_fragment == -1)
1030 continue;
1031 if (current_fragment >= s->fragment_count) {
1032 av_log(s->avctx, AV_LOG_ERROR, " vp3:unpack_modes(): bad fragment number (%d >= %d)\n",
1033 current_fragment, s->fragment_count);
1034 return 1;
1035 }
1036 if (s->all_fragments[current_fragment].coding_method !=
1037 MODE_COPY)
1038 s->all_fragments[current_fragment].coding_method =
1039 coding_mode;
1040 }
1041
1042 debug_modes(" coding method for macroblock starting @ fragment %d = %d\n",
1043 s->macroblock_fragments[current_macroblock * 6], coding_mode);
1044 }
1045 }
1046 }
1047
1048 return 0;
1049}
1050
1051/*
1052 * This function unpacks all the motion vectors for the individual
1053 * macroblocks from the bitstream.
1054 */
1055static int unpack_vectors(Vp3DecodeContext *s, GetBitContext *gb)
1056{
1057 int i, j, k;
1058 int coding_mode;
1059 int motion_x[6];
1060 int motion_y[6];
1061 int last_motion_x = 0;
1062 int last_motion_y = 0;
1063 int prior_last_motion_x = 0;
1064 int prior_last_motion_y = 0;
1065 int current_macroblock;
1066 int current_fragment;
1067
1068 debug_vp3(" vp3: unpacking motion vectors\n");
1069 if (s->keyframe) {
1070
1071 debug_vp3(" keyframe-- there are no motion vectors\n");
1072
1073 } else {
1074
1075 memset(motion_x, 0, 6 * sizeof(int));
1076 memset(motion_y, 0, 6 * sizeof(int));
1077
1078 /* coding mode 0 is the VLC scheme; 1 is the fixed code scheme */
1079 coding_mode = get_bits(gb, 1);
1080 debug_vectors(" using %s scheme for unpacking motion vectors\n",
1081 (coding_mode == 0) ? "VLC" : "fixed-length");
1082
1083 /* iterate through all of the macroblocks that contain 1 or more
1084 * coded fragments */
1085 for (i = 0; i < s->u_superblock_start; i++) {
1086
1087 for (j = 0; j < 4; j++) {
1088 current_macroblock = s->superblock_macroblocks[i * 4 + j];
1089 if ((current_macroblock == -1) ||
1090 (s->macroblock_coding[current_macroblock] == MODE_COPY))
1091 continue;
1092 if (current_macroblock >= s->macroblock_count) {
1093 av_log(s->avctx, AV_LOG_ERROR, " vp3:unpack_vectors(): bad macroblock number (%d >= %d)\n",
1094 current_macroblock, s->macroblock_count);
1095 return 1;
1096 }
1097
1098 current_fragment = s->macroblock_fragments[current_macroblock * 6];
1099 if (current_fragment >= s->fragment_count) {
1100 av_log(s->avctx, AV_LOG_ERROR, " vp3:unpack_vectors(): bad fragment number (%d >= %d\n",
1101 current_fragment, s->fragment_count);
1102 return 1;
1103 }
1104 switch (s->macroblock_coding[current_macroblock]) {
1105
1106 case MODE_INTER_PLUS_MV:
1107 case MODE_GOLDEN_MV:
1108 /* all 6 fragments use the same motion vector */
1109 if (coding_mode == 0) {
1110 motion_x[0] = motion_vector_table[get_vlc2(gb, s->motion_vector_vlc.table, 6, 2)];
1111 motion_y[0] = motion_vector_table[get_vlc2(gb, s->motion_vector_vlc.table, 6, 2)];
1112 } else {
1113 motion_x[0] = fixed_motion_vector_table[get_bits(gb, 6)];
1114 motion_y[0] = fixed_motion_vector_table[get_bits(gb, 6)];
1115 }
1116
1117 for (k = 1; k < 6; k++) {
1118 motion_x[k] = motion_x[0];
1119 motion_y[k] = motion_y[0];
1120 }
1121
1122 /* vector maintenance, only on MODE_INTER_PLUS_MV */
1123 if (s->macroblock_coding[current_macroblock] ==
1124 MODE_INTER_PLUS_MV) {
1125 prior_last_motion_x = last_motion_x;
1126 prior_last_motion_y = last_motion_y;
1127 last_motion_x = motion_x[0];
1128 last_motion_y = motion_y[0];
1129 }
1130 break;
1131
1132 case MODE_INTER_FOURMV:
1133 /* fetch 4 vectors from the bitstream, one for each
1134 * Y fragment, then average for the C fragment vectors */
1135 motion_x[4] = motion_y[4] = 0;
1136 for (k = 0; k < 4; k++) {
1137 if (coding_mode == 0) {
1138 motion_x[k] = motion_vector_table[get_vlc2(gb, s->motion_vector_vlc.table, 6, 2)];
1139 motion_y[k] = motion_vector_table[get_vlc2(gb, s->motion_vector_vlc.table, 6, 2)];
1140 } else {
1141 motion_x[k] = fixed_motion_vector_table[get_bits(gb, 6)];
1142 motion_y[k] = fixed_motion_vector_table[get_bits(gb, 6)];
1143 }
1144 motion_x[4] += motion_x[k];
1145 motion_y[4] += motion_y[k];
1146 }
1147
1148 if (motion_x[4] >= 0)
1149 motion_x[4] = (motion_x[4] + 2) / 4;
1150 else
1151 motion_x[4] = (motion_x[4] - 2) / 4;
1152 motion_x[5] = motion_x[4];
1153
1154 if (motion_y[4] >= 0)
1155 motion_y[4] = (motion_y[4] + 2) / 4;
1156 else
1157 motion_y[4] = (motion_y[4] - 2) / 4;
1158 motion_y[5] = motion_y[4];
1159
1160 /* vector maintenance; vector[3] is treated as the
1161 * last vector in this case */
1162 prior_last_motion_x = last_motion_x;
1163 prior_last_motion_y = last_motion_y;
1164 last_motion_x = motion_x[3];
1165 last_motion_y = motion_y[3];
1166 break;
1167
1168 case MODE_INTER_LAST_MV:
1169 /* all 6 fragments use the last motion vector */
1170 motion_x[0] = last_motion_x;
1171 motion_y[0] = last_motion_y;
1172 for (k = 1; k < 6; k++) {
1173 motion_x[k] = motion_x[0];
1174 motion_y[k] = motion_y[0];
1175 }
1176
1177 /* no vector maintenance (last vector remains the
1178 * last vector) */
1179 break;
1180
1181 case MODE_INTER_PRIOR_LAST:
1182 /* all 6 fragments use the motion vector prior to the
1183 * last motion vector */
1184 motion_x[0] = prior_last_motion_x;
1185 motion_y[0] = prior_last_motion_y;
1186 for (k = 1; k < 6; k++) {
1187 motion_x[k] = motion_x[0];
1188 motion_y[k] = motion_y[0];
1189 }
1190
1191 /* vector maintenance */
1192 prior_last_motion_x = last_motion_x;
1193 prior_last_motion_y = last_motion_y;
1194 last_motion_x = motion_x[0];
1195 last_motion_y = motion_y[0];
1196 break;
1197
1198 default:
1199 /* covers intra, inter without MV, golden without MV */
1200 memset(motion_x, 0, 6 * sizeof(int));
1201 memset(motion_y, 0, 6 * sizeof(int));
1202
1203 /* no vector maintenance */
1204 break;
1205 }
1206
1207 /* assign the motion vectors to the correct fragments */
1208 debug_vectors(" vectors for macroblock starting @ fragment %d (coding method %d):\n",
1209 current_fragment,
1210 s->macroblock_coding[current_macroblock]);
1211 for (k = 0; k < 6; k++) {
1212 current_fragment =
1213 s->macroblock_fragments[current_macroblock * 6 + k];
1214 if (current_fragment == -1)
1215 continue;
1216 if (current_fragment >= s->fragment_count) {
1217 av_log(s->avctx, AV_LOG_ERROR, " vp3:unpack_vectors(): bad fragment number (%d >= %d)\n",
1218 current_fragment, s->fragment_count);
1219 return 1;
1220 }
1221 s->all_fragments[current_fragment].motion_x = motion_x[k];
1222 s->all_fragments[current_fragment].motion_y = motion_y[k];
1223 debug_vectors(" vector %d: fragment %d = (%d, %d)\n",
1224 k, current_fragment, motion_x[k], motion_y[k]);
1225 }
1226 }
1227 }
1228 }
1229
1230 return 0;
1231}
1232
1233/*
1234 * This function is called by unpack_dct_coeffs() to extract the VLCs from
1235 * the bitstream. The VLCs encode tokens which are used to unpack DCT
1236 * data. This function unpacks all the VLCs for either the Y plane or both
1237 * C planes, and is called for DC coefficients or different AC coefficient
1238 * levels (since different coefficient types require different VLC tables.
1239 *
1240 * This function returns a residual eob run. E.g, if a particular token gave
1241 * instructions to EOB the next 5 fragments and there were only 2 fragments
1242 * left in the current fragment range, 3 would be returned so that it could
1243 * be passed into the next call to this same function.
1244 */
1245static int unpack_vlcs(Vp3DecodeContext *s, GetBitContext *gb,
1246 VLC *table, int coeff_index,
1247 int first_fragment, int last_fragment,
1248 int eob_run)
1249{
1250 int i;
1251 int token;
1252 int zero_run = 0;
1253 DCTELEM coeff = 0;
1254 Vp3Fragment *fragment;
1255 uint8_t *perm= s->scantable.permutated;
1256 int bits_to_get;
1257
1258 if ((first_fragment >= s->fragment_count) ||
1259 (last_fragment >= s->fragment_count)) {
1260
1261 av_log(s->avctx, AV_LOG_ERROR, " vp3:unpack_vlcs(): bad fragment number (%d -> %d ?)\n",
1262 first_fragment, last_fragment);
1263 return 0;
1264 }
1265
1266 for (i = first_fragment; i <= last_fragment; i++) {
1267
1268 fragment = &s->all_fragments[s->coded_fragment_list[i]];
1269 if (fragment->coeff_count > coeff_index)
1270 continue;
1271
1272 if (!eob_run) {
1273 /* decode a VLC into a token */
1274 token = get_vlc2(gb, table->table, 5, 3);
1275 debug_vlc(" token = %2d, ", token);
1276 /* use the token to get a zero run, a coefficient, and an eob run */
1277 if (token <= 6) {
1278 eob_run = eob_run_base[token];
1279 if (eob_run_get_bits[token])
1280 eob_run += get_bits(gb, eob_run_get_bits[token]);
1281 coeff = zero_run = 0;
1282 } else {
1283 bits_to_get = coeff_get_bits[token];
1284 if (!bits_to_get)
1285 coeff = coeff_tables[token][0];
1286 else
1287 coeff = coeff_tables[token][get_bits(gb, bits_to_get)];
1288
1289 zero_run = zero_run_base[token];
1290 if (zero_run_get_bits[token])
1291 zero_run += get_bits(gb, zero_run_get_bits[token]);
1292 }
1293 }
1294
1295 if (!eob_run) {
1296 fragment->coeff_count += zero_run;
1297 if (fragment->coeff_count < 64){
1298 fragment->next_coeff->coeff= coeff;
1299 fragment->next_coeff->index= perm[fragment->coeff_count++]; //FIXME perm here already?
1300 fragment->next_coeff->next= s->next_coeff;
1301 s->next_coeff->next=NULL;
1302 fragment->next_coeff= s->next_coeff++;
1303 }
1304 debug_vlc(" fragment %d coeff = %d\n",
1305 s->coded_fragment_list[i], fragment->next_coeff[coeff_index]);
1306 } else {
1307 fragment->coeff_count |= 128;
1308 debug_vlc(" fragment %d eob with %d coefficients\n",
1309 s->coded_fragment_list[i], fragment->coeff_count&127);
1310 eob_run--;
1311 }
1312 }
1313
1314 return eob_run;
1315}
1316
1317/*
1318 * This function unpacks all of the DCT coefficient data from the
1319 * bitstream.
1320 */
1321static int unpack_dct_coeffs(Vp3DecodeContext *s, GetBitContext *gb)
1322{
1323 int i;
1324 int dc_y_table;
1325 int dc_c_table;
1326 int ac_y_table;
1327 int ac_c_table;
1328 int residual_eob_run = 0;
1329
1330 /* fetch the DC table indices */
1331 dc_y_table = get_bits(gb, 4);
1332 dc_c_table = get_bits(gb, 4);
1333
1334 /* unpack the Y plane DC coefficients */
1335 debug_vp3(" vp3: unpacking Y plane DC coefficients using table %d\n",
1336 dc_y_table);
1337 residual_eob_run = unpack_vlcs(s, gb, &s->dc_vlc[dc_y_table], 0,
1338 s->first_coded_y_fragment, s->last_coded_y_fragment, residual_eob_run);
1339
1340 /* unpack the C plane DC coefficients */
1341 debug_vp3(" vp3: unpacking C plane DC coefficients using table %d\n",
1342 dc_c_table);
1343 residual_eob_run = unpack_vlcs(s, gb, &s->dc_vlc[dc_c_table], 0,
1344 s->first_coded_c_fragment, s->last_coded_c_fragment, residual_eob_run);
1345
1346 /* fetch the AC table indices */
1347 ac_y_table = get_bits(gb, 4);
1348 ac_c_table = get_bits(gb, 4);
1349
1350 /* unpack the group 1 AC coefficients (coeffs 1-5) */
1351 for (i = 1; i <= 5; i++) {
1352
1353 debug_vp3(" vp3: unpacking level %d Y plane AC coefficients using table %d\n",
1354 i, ac_y_table);
1355 residual_eob_run = unpack_vlcs(s, gb, &s->ac_vlc_1[ac_y_table], i,
1356 s->first_coded_y_fragment, s->last_coded_y_fragment, residual_eob_run);
1357
1358 debug_vp3(" vp3: unpacking level %d C plane AC coefficients using table %d\n",
1359 i, ac_c_table);
1360 residual_eob_run = unpack_vlcs(s, gb, &s->ac_vlc_1[ac_c_table], i,
1361 s->first_coded_c_fragment, s->last_coded_c_fragment, residual_eob_run);
1362 }
1363
1364 /* unpack the group 2 AC coefficients (coeffs 6-14) */
1365 for (i = 6; i <= 14; i++) {
1366
1367 debug_vp3(" vp3: unpacking level %d Y plane AC coefficients using table %d\n",
1368 i, ac_y_table);
1369 residual_eob_run = unpack_vlcs(s, gb, &s->ac_vlc_2[ac_y_table], i,
1370 s->first_coded_y_fragment, s->last_coded_y_fragment, residual_eob_run);
1371
1372 debug_vp3(" vp3: unpacking level %d C plane AC coefficients using table %d\n",
1373 i, ac_c_table);
1374 residual_eob_run = unpack_vlcs(s, gb, &s->ac_vlc_2[ac_c_table], i,
1375 s->first_coded_c_fragment, s->last_coded_c_fragment, residual_eob_run);
1376 }
1377
1378 /* unpack the group 3 AC coefficients (coeffs 15-27) */
1379 for (i = 15; i <= 27; i++) {
1380
1381 debug_vp3(" vp3: unpacking level %d Y plane AC coefficients using table %d\n",
1382 i, ac_y_table);
1383 residual_eob_run = unpack_vlcs(s, gb, &s->ac_vlc_3[ac_y_table], i,
1384 s->first_coded_y_fragment, s->last_coded_y_fragment, residual_eob_run);
1385
1386 debug_vp3(" vp3: unpacking level %d C plane AC coefficients using table %d\n",
1387 i, ac_c_table);
1388 residual_eob_run = unpack_vlcs(s, gb, &s->ac_vlc_3[ac_c_table], i,
1389 s->first_coded_c_fragment, s->last_coded_c_fragment, residual_eob_run);
1390 }
1391
1392 /* unpack the group 4 AC coefficients (coeffs 28-63) */
1393 for (i = 28; i <= 63; i++) {
1394
1395 debug_vp3(" vp3: unpacking level %d Y plane AC coefficients using table %d\n",
1396 i, ac_y_table);
1397 residual_eob_run = unpack_vlcs(s, gb, &s->ac_vlc_4[ac_y_table], i,
1398 s->first_coded_y_fragment, s->last_coded_y_fragment, residual_eob_run);
1399
1400 debug_vp3(" vp3: unpacking level %d C plane AC coefficients using table %d\n",
1401 i, ac_c_table);
1402 residual_eob_run = unpack_vlcs(s, gb, &s->ac_vlc_4[ac_c_table], i,
1403 s->first_coded_c_fragment, s->last_coded_c_fragment, residual_eob_run);
1404 }
1405
1406 return 0;
1407}
1408
1409/*
1410 * This function reverses the DC prediction for each coded fragment in
1411 * the frame. Much of this function is adapted directly from the original
1412 * VP3 source code.
1413 */
1414#define COMPATIBLE_FRAME(x) \
1415 (compatible_frame[s->all_fragments[x].coding_method] == current_frame_type)
1416#define FRAME_CODED(x) (s->all_fragments[x].coding_method != MODE_COPY)
1417#define DC_COEFF(u) (s->coeffs[u].index ? 0 : s->coeffs[u].coeff) //FIXME do somethin to simplify this
1418static inline int iabs (int x) { return ((x < 0) ? -x : x); }
1419
1420static void reverse_dc_prediction(Vp3DecodeContext *s,
1421 int first_fragment,
1422 int fragment_width,
1423 int fragment_height)
1424{
1425
1426#define PUL 8
1427#define PU 4
1428#define PUR 2
1429#define PL 1
1430
1431 int x, y;
1432 int i = first_fragment;
1433
1434 /*
1435 * Fragment prediction groups:
1436 *
1437 * 32222222226
1438 * 10000000004
1439 * 10000000004
1440 * 10000000004
1441 * 10000000004
1442 *
1443 * Note: Groups 5 and 7 do not exist as it would mean that the
1444 * fragment's x coordinate is both 0 and (width - 1) at the same time.
1445 */
1446 int predictor_group;
1447 short predicted_dc;
1448
1449 /* validity flags for the left, up-left, up, and up-right fragments */
1450 int fl, ful, fu, fur;
1451
1452 /* DC values for the left, up-left, up, and up-right fragments */
1453 int vl, vul, vu, vur;
1454
1455 /* indices for the left, up-left, up, and up-right fragments */
1456 int l, ul, u, ur;
1457
1458 /*
1459 * The 6 fields mean:
1460 * 0: up-left multiplier
1461 * 1: up multiplier
1462 * 2: up-right multiplier
1463 * 3: left multiplier
1464 * 4: mask
1465 * 5: right bit shift divisor (e.g., 7 means >>=7, a.k.a. div by 128)
1466 */
1467 int predictor_transform[16][6] = {
1468 { 0, 0, 0, 0, 0, 0 },
1469 { 0, 0, 0, 1, 0, 0 }, // PL
1470 { 0, 0, 1, 0, 0, 0 }, // PUR
1471 { 0, 0, 53, 75, 127, 7 }, // PUR|PL
1472 { 0, 1, 0, 0, 0, 0 }, // PU
1473 { 0, 1, 0, 1, 1, 1 }, // PU|PL
1474 { 0, 1, 0, 0, 0, 0 }, // PU|PUR
1475 { 0, 0, 53, 75, 127, 7 }, // PU|PUR|PL
1476 { 1, 0, 0, 0, 0, 0 }, // PUL
1477 { 0, 0, 0, 1, 0, 0 }, // PUL|PL
1478 { 1, 0, 1, 0, 1, 1 }, // PUL|PUR
1479 { 0, 0, 53, 75, 127, 7 }, // PUL|PUR|PL
1480 { 0, 1, 0, 0, 0, 0 }, // PUL|PU
1481 {-26, 29, 0, 29, 31, 5 }, // PUL|PU|PL
1482 { 3, 10, 3, 0, 15, 4 }, // PUL|PU|PUR
1483 {-26, 29, 0, 29, 31, 5 } // PUL|PU|PUR|PL
1484 };
1485
1486 /* This table shows which types of blocks can use other blocks for
1487 * prediction. For example, INTRA is the only mode in this table to
1488 * have a frame number of 0. That means INTRA blocks can only predict
1489 * from other INTRA blocks. There are 2 golden frame coding types;
1490 * blocks encoding in these modes can only predict from other blocks
1491 * that were encoded with these 1 of these 2 modes. */
1492 unsigned char compatible_frame[8] = {
1493 1, /* MODE_INTER_NO_MV */
1494 0, /* MODE_INTRA */
1495 1, /* MODE_INTER_PLUS_MV */
1496 1, /* MODE_INTER_LAST_MV */
1497 1, /* MODE_INTER_PRIOR_MV */
1498 2, /* MODE_USING_GOLDEN */
1499 2, /* MODE_GOLDEN_MV */
1500 1 /* MODE_INTER_FOUR_MV */
1501 };
1502 int current_frame_type;
1503
1504 /* there is a last DC predictor for each of the 3 frame types */
1505 short last_dc[3];
1506
1507 int transform = 0;
1508
1509 debug_vp3(" vp3: reversing DC prediction\n");
1510
1511 vul = vu = vur = vl = 0;
1512 last_dc[0] = last_dc[1] = last_dc[2] = 0;
1513
1514 /* for each fragment row... */
1515 for (y = 0; y < fragment_height; y++) {
1516
1517 /* for each fragment in a row... */
1518 for (x = 0; x < fragment_width; x++, i++) {
1519
1520 /* reverse prediction if this block was coded */
1521 if (s->all_fragments[i].coding_method != MODE_COPY) {
1522
1523 current_frame_type =
1524 compatible_frame[s->all_fragments[i].coding_method];
1525 predictor_group = (x == 0) + ((y == 0) << 1) +
1526 ((x + 1 == fragment_width) << 2);
1527 debug_dc_pred(" frag %d: group %d, orig DC = %d, ",
1528 i, predictor_group, DC_COEFF(i));
1529
1530 switch (predictor_group) {
1531
1532 case 0:
1533 /* main body of fragments; consider all 4 possible
1534 * fragments for prediction */
1535
1536 /* calculate the indices of the predicting fragments */
1537 ul = i - fragment_width - 1;
1538 u = i - fragment_width;
1539 ur = i - fragment_width + 1;
1540 l = i - 1;
1541
1542 /* fetch the DC values for the predicting fragments */
1543 vul = DC_COEFF(ul);
1544 vu = DC_COEFF(u);
1545 vur = DC_COEFF(ur);
1546 vl = DC_COEFF(l);
1547
1548 /* figure out which fragments are valid */
1549 ful = FRAME_CODED(ul) && COMPATIBLE_FRAME(ul);
1550 fu = FRAME_CODED(u) && COMPATIBLE_FRAME(u);
1551 fur = FRAME_CODED(ur) && COMPATIBLE_FRAME(ur);
1552 fl = FRAME_CODED(l) && COMPATIBLE_FRAME(l);
1553
1554 /* decide which predictor transform to use */
1555 transform = (fl*PL) | (fu*PU) | (ful*PUL) | (fur*PUR);
1556
1557 break;
1558
1559 case 1:
1560 /* left column of fragments, not including top corner;
1561 * only consider up and up-right fragments */
1562
1563 /* calculate the indices of the predicting fragments */
1564 u = i - fragment_width;
1565 ur = i - fragment_width + 1;
1566
1567 /* fetch the DC values for the predicting fragments */
1568 vu = DC_COEFF(u);
1569 vur = DC_COEFF(ur);
1570
1571 /* figure out which fragments are valid */
1572 fur = FRAME_CODED(ur) && COMPATIBLE_FRAME(ur);
1573 fu = FRAME_CODED(u) && COMPATIBLE_FRAME(u);
1574
1575 /* decide which predictor transform to use */
1576 transform = (fu*PU) | (fur*PUR);
1577
1578 break;
1579
1580 case 2:
1581 case 6:
1582 /* top row of fragments, not including top-left frag;
1583 * only consider the left fragment for prediction */
1584
1585 /* calculate the indices of the predicting fragments */
1586 l = i - 1;
1587
1588 /* fetch the DC values for the predicting fragments */
1589 vl = DC_COEFF(l);
1590
1591 /* figure out which fragments are valid */
1592 fl = FRAME_CODED(l) && COMPATIBLE_FRAME(l);
1593
1594 /* decide which predictor transform to use */
1595 transform = (fl*PL);
1596
1597 break;
1598
1599 case 3:
1600 /* top-left fragment */
1601
1602 /* nothing to predict from in this case */
1603 transform = 0;
1604
1605 break;
1606
1607 case 4:
1608 /* right column of fragments, not including top corner;
1609 * consider up-left, up, and left fragments for
1610 * prediction */
1611
1612 /* calculate the indices of the predicting fragments */
1613 ul = i - fragment_width - 1;
1614 u = i - fragment_width;
1615 l = i - 1;
1616
1617 /* fetch the DC values for the predicting fragments */
1618 vul = DC_COEFF(ul);
1619 vu = DC_COEFF(u);
1620 vl = DC_COEFF(l);
1621
1622 /* figure out which fragments are valid */
1623 ful = FRAME_CODED(ul) && COMPATIBLE_FRAME(ul);
1624 fu = FRAME_CODED(u) && COMPATIBLE_FRAME(u);
1625 fl = FRAME_CODED(l) && COMPATIBLE_FRAME(l);
1626
1627 /* decide which predictor transform to use */
1628 transform = (fl*PL) | (fu*PU) | (ful*PUL);
1629
1630 break;
1631
1632 }
1633
1634 debug_dc_pred("transform = %d, ", transform);
1635
1636 if (transform == 0) {
1637
1638 /* if there were no fragments to predict from, use last
1639 * DC saved */
1640 predicted_dc = last_dc[current_frame_type];
1641 debug_dc_pred("from last DC (%d) = %d\n",
1642 current_frame_type, DC_COEFF(i));
1643
1644 } else {
1645
1646 /* apply the appropriate predictor transform */
1647 predicted_dc =
1648 (predictor_transform[transform][0] * vul) +
1649 (predictor_transform[transform][1] * vu) +
1650 (predictor_transform[transform][2] * vur) +
1651 (predictor_transform[transform][3] * vl);
1652
1653 /* if there is a shift value in the transform, add
1654 * the sign bit before the shift */
1655 if (predictor_transform[transform][5] != 0) {
1656 predicted_dc += ((predicted_dc >> 15) &
1657 predictor_transform[transform][4]);
1658 predicted_dc >>= predictor_transform[transform][5];
1659 }
1660
1661 /* check for outranging on the [ul u l] and
1662 * [ul u ur l] predictors */
1663 if ((transform == 13) || (transform == 15)) {
1664 if (iabs(predicted_dc - vu) > 128)
1665 predicted_dc = vu;
1666 else if (iabs(predicted_dc - vl) > 128)
1667 predicted_dc = vl;
1668 else if (iabs(predicted_dc - vul) > 128)
1669 predicted_dc = vul;
1670 }
1671
1672 debug_dc_pred("from pred DC = %d\n",
1673 DC_COEFF(i));
1674 }
1675
1676 /* at long last, apply the predictor */
1677 if(s->coeffs[i].index){
1678 *s->next_coeff= s->coeffs[i];
1679 s->coeffs[i].index=0;
1680 s->coeffs[i].coeff=0;
1681 s->coeffs[i].next= s->next_coeff++;
1682 }
1683 s->coeffs[i].coeff += predicted_dc;
1684 /* save the DC */
1685 last_dc[current_frame_type] = DC_COEFF(i);
1686 if(DC_COEFF(i) && !(s->all_fragments[i].coeff_count&127)){
1687 s->all_fragments[i].coeff_count= 129;
1688// s->all_fragments[i].next_coeff= s->next_coeff;
1689 s->coeffs[i].next= s->next_coeff;
1690 (s->next_coeff++)->next=NULL;
1691 }
1692 }
1693 }
1694 }
1695}
1696
1697
1698static void horizontal_filter(unsigned char *first_pixel, int stride,
1699 int *bounding_values);
1700static void vertical_filter(unsigned char *first_pixel, int stride,
1701 int *bounding_values);
1702
1703/*
1704 * Perform the final rendering for a particular slice of data.
1705 * The slice number ranges from 0..(macroblock_height - 1).
1706 */
1707static void render_slice(Vp3DecodeContext *s, int slice)
1708{
1709 int x, y;
1710 int m, n;
1711 int i; /* indicates current fragment */
1712 int16_t *dequantizer;
1713 DECLARE_ALIGNED_16(DCTELEM, block[64]);
1714 unsigned char *output_plane;
1715 unsigned char *last_plane;
1716 unsigned char *golden_plane;
1717 int stride;
1718 int motion_x = 0xdeadbeef, motion_y = 0xdeadbeef;
1719 int upper_motion_limit, lower_motion_limit;
1720 int motion_halfpel_index;
1721 uint8_t *motion_source;
1722 int plane;
1723 int plane_width;
1724 int plane_height;
1725 int slice_height;
1726 int current_macroblock_entry = slice * s->macroblock_width * 6;
1727 int fragment_width;
1728
1729 if (slice >= s->macroblock_height)
1730 return;
1731
1732 for (plane = 0; plane < 3; plane++) {
1733
1734 /* set up plane-specific parameters */
1735 if (plane == 0) {
1736 output_plane = s->current_frame.data[0];
1737 last_plane = s->last_frame.data[0];
1738 golden_plane = s->golden_frame.data[0];
1739 stride = s->current_frame.linesize[0];
1740 if (!s->flipped_image) stride = -stride;
1741 upper_motion_limit = 7 * s->current_frame.linesize[0];
1742 lower_motion_limit = s->height * s->current_frame.linesize[0] + s->width - 8;
1743 y = slice * FRAGMENT_PIXELS * 2;
1744 plane_width = s->width;
1745 plane_height = s->height;
1746 slice_height = y + FRAGMENT_PIXELS * 2;
1747 i = s->macroblock_fragments[current_macroblock_entry + 0];
1748 } else if (plane == 1) {
1749 output_plane = s->current_frame.data[1];
1750 last_plane = s->last_frame.data[1];
1751 golden_plane = s->golden_frame.data[1];
1752 stride = s->current_frame.linesize[1];
1753 if (!s->flipped_image) stride = -stride;
1754 upper_motion_limit = 7 * s->current_frame.linesize[1];
1755 lower_motion_limit = (s->height / 2) * s->current_frame.linesize[1] + (s->width / 2) - 8;
1756 y = slice * FRAGMENT_PIXELS;
1757 plane_width = s->width / 2;
1758 plane_height = s->height / 2;
1759 slice_height = y + FRAGMENT_PIXELS;
1760 i = s->macroblock_fragments[current_macroblock_entry + 4];
1761 } else {
1762 output_plane = s->current_frame.data[2];
1763 last_plane = s->last_frame.data[2];
1764 golden_plane = s->golden_frame.data[2];
1765 stride = s->current_frame.linesize[2];
1766 if (!s->flipped_image) stride = -stride;
1767 upper_motion_limit = 7 * s->current_frame.linesize[2];
1768 lower_motion_limit = (s->height / 2) * s->current_frame.linesize[2] + (s->width / 2) - 8;
1769 y = slice * FRAGMENT_PIXELS;
1770 plane_width = s->width / 2;
1771 plane_height = s->height / 2;
1772 slice_height = y + FRAGMENT_PIXELS;
1773 i = s->macroblock_fragments[current_macroblock_entry + 5];
1774 }
1775 fragment_width = plane_width / FRAGMENT_PIXELS;
1776
1777 if(ABS(stride) > 2048)
1778 return; //various tables are fixed size
1779
1780 /* for each fragment row in the slice (both of them)... */
1781 for (; y < slice_height; y += 8) {
1782
1783 /* for each fragment in a row... */
1784 for (x = 0; x < plane_width; x += 8, i++) {
1785
1786 if ((i < 0) || (i >= s->fragment_count)) {
1787 av_log(s->avctx, AV_LOG_ERROR, " vp3:render_slice(): bad fragment number (%d)\n", i);
1788 return;
1789 }
1790
1791 /* transform if this block was coded */
1792 if ((s->all_fragments[i].coding_method != MODE_COPY) &&
1793 !((s->avctx->flags & CODEC_FLAG_GRAY) && plane)) {
1794
1795 if ((s->all_fragments[i].coding_method == MODE_USING_GOLDEN) ||
1796 (s->all_fragments[i].coding_method == MODE_GOLDEN_MV))
1797 motion_source= golden_plane;
1798 else
1799 motion_source= last_plane;
1800
1801 motion_source += s->all_fragments[i].first_pixel;
1802 motion_halfpel_index = 0;
1803
1804 /* sort out the motion vector if this fragment is coded
1805 * using a motion vector method */
1806 if ((s->all_fragments[i].coding_method > MODE_INTRA) &&
1807 (s->all_fragments[i].coding_method != MODE_USING_GOLDEN)) {
1808 int src_x, src_y;
1809 motion_x = s->all_fragments[i].motion_x;
1810 motion_y = s->all_fragments[i].motion_y;
1811 if(plane){
1812 motion_x= (motion_x>>1) | (motion_x&1);
1813 motion_y= (motion_y>>1) | (motion_y&1);
1814 }
1815
1816 src_x= (motion_x>>1) + x;
1817 src_y= (motion_y>>1) + y;
1818 if ((motion_x == 127) || (motion_y == 127))
1819 av_log(s->avctx, AV_LOG_ERROR, " help! got invalid motion vector! (%X, %X)\n", motion_x, motion_y);
1820
1821 motion_halfpel_index = motion_x & 0x01;
1822 motion_source += (motion_x >> 1);
1823
1824 motion_halfpel_index |= (motion_y & 0x01) << 1;
1825 motion_source += ((motion_y >> 1) * stride);
1826
1827 if(src_x<0 || src_y<0 || src_x + 9 >= plane_width || src_y + 9 >= plane_height){
1828 uint8_t *temp= s->edge_emu_buffer;
1829 if(stride<0) temp -= 9*stride;
1830 else temp += 9*stride;
1831
1832 ff_emulated_edge_mc(temp, motion_source, stride, 9, 9, src_x, src_y, plane_width, plane_height);
1833 motion_source= temp;
1834 }
1835 }
1836
1837
1838 /* first, take care of copying a block from either the
1839 * previous or the golden frame */
1840 if (s->all_fragments[i].coding_method != MODE_INTRA) {
1841 /* Note, it is possible to implement all MC cases with
1842 put_no_rnd_pixels_l2 which would look more like the
1843 VP3 source but this would be slower as
1844 put_no_rnd_pixels_tab is better optimzed */
1845 if(motion_halfpel_index != 3){
1846 s->dsp.put_no_rnd_pixels_tab[1][motion_halfpel_index](
1847 output_plane + s->all_fragments[i].first_pixel,
1848 motion_source, stride, 8);
1849 }else{
1850 int d= (motion_x ^ motion_y)>>31; // d is 0 if motion_x and _y have the same sign, else -1
1851 s->dsp.put_no_rnd_pixels_l2[1](
1852 output_plane + s->all_fragments[i].first_pixel,
1853 motion_source - d,
1854 motion_source + stride + 1 + d,
1855 stride, 8);
1856 }
1857 dequantizer = s->inter_dequant;
1858 }else{
1859 if (plane == 0)
1860 dequantizer = s->intra_y_dequant;
1861 else
1862 dequantizer = s->intra_c_dequant;
1863 }
1864
1865 /* dequantize the DCT coefficients */
1866 debug_idct("fragment %d, coding mode %d, DC = %d, dequant = %d:\n",
1867 i, s->all_fragments[i].coding_method,
1868 DC_COEFF(i), dequantizer[0]);
1869
1870 if(s->avctx->idct_algo==FF_IDCT_VP3){
1871 Coeff *coeff= s->coeffs + i;
1872 memset(block, 0, sizeof(block));
1873 while(coeff->next){
1874 block[coeff->index]= coeff->coeff * dequantizer[coeff->index];
1875 coeff= coeff->next;
1876 }
1877 }else{
1878 Coeff *coeff= s->coeffs + i;
1879 memset(block, 0, sizeof(block));
1880 while(coeff->next){
1881 block[coeff->index]= (coeff->coeff * dequantizer[coeff->index] + 2)>>2;
1882 coeff= coeff->next;
1883 }
1884 }
1885
1886 /* invert DCT and place (or add) in final output */
1887
1888 if (s->all_fragments[i].coding_method == MODE_INTRA) {
1889 if(s->avctx->idct_algo!=FF_IDCT_VP3)
1890 block[0] += 128<<3;
1891 s->dsp.idct_put(
1892 output_plane + s->all_fragments[i].first_pixel,
1893 stride,
1894 block);
1895 } else {
1896 s->dsp.idct_add(
1897 output_plane + s->all_fragments[i].first_pixel,
1898 stride,
1899 block);
1900 }
1901
1902 debug_idct("block after idct_%s():\n",
1903 (s->all_fragments[i].coding_method == MODE_INTRA)?
1904 "put" : "add");
1905 for (m = 0; m < 8; m++) {
1906 for (n = 0; n < 8; n++) {
1907 debug_idct(" %3d", *(output_plane +
1908 s->all_fragments[i].first_pixel + (m * stride + n)));
1909 }
1910 debug_idct("\n");
1911 }
1912 debug_idct("\n");
1913
1914 } else {
1915
1916 /* copy directly from the previous frame */
1917 s->dsp.put_pixels_tab[1][0](
1918 output_plane + s->all_fragments[i].first_pixel,
1919 last_plane + s->all_fragments[i].first_pixel,
1920 stride, 8);
1921
1922 }
1923#if 0
1924 /* perform the left edge filter if:
1925 * - the fragment is not on the left column
1926 * - the fragment is coded in this frame
1927 * - the fragment is not coded in this frame but the left
1928 * fragment is coded in this frame (this is done instead
1929 * of a right edge filter when rendering the left fragment
1930 * since this fragment is not available yet) */
1931 if ((x > 0) &&
1932 ((s->all_fragments[i].coding_method != MODE_COPY) ||
1933 ((s->all_fragments[i].coding_method == MODE_COPY) &&
1934 (s->all_fragments[i - 1].coding_method != MODE_COPY)) )) {
1935 horizontal_filter(
1936 output_plane + s->all_fragments[i].first_pixel + 7*stride,
1937 -stride, bounding_values);
1938 }
1939
1940 /* perform the top edge filter if:
1941 * - the fragment is not on the top row
1942 * - the fragment is coded in this frame
1943 * - the fragment is not coded in this frame but the above
1944 * fragment is coded in this frame (this is done instead
1945 * of a bottom edge filter when rendering the above
1946 * fragment since this fragment is not available yet) */
1947 if ((y > 0) &&
1948 ((s->all_fragments[i].coding_method != MODE_COPY) ||
1949 ((s->all_fragments[i].coding_method == MODE_COPY) &&
1950 (s->all_fragments[i - fragment_width].coding_method != MODE_COPY)) )) {
1951 vertical_filter(
1952 output_plane + s->all_fragments[i].first_pixel - stride,
1953 -stride, bounding_values);
1954 }
1955#endif
1956 }
1957 }
1958 }
1959
1960 /* this looks like a good place for slice dispatch... */
1961 /* algorithm:
1962 * if (slice == s->macroblock_height - 1)
1963 * dispatch (both last slice & 2nd-to-last slice);
1964 * else if (slice > 0)
1965 * dispatch (slice - 1);
1966 */
1967
1968 emms_c();
1969}
1970
1971static void horizontal_filter(unsigned char *first_pixel, int stride,
1972 int *bounding_values)
1973{
1974 unsigned char *end;
1975 int filter_value;
1976
1977 for (end= first_pixel + 8*stride; first_pixel < end; first_pixel += stride) {
1978 filter_value =
1979 (first_pixel[-2] - first_pixel[ 1])
1980 +3*(first_pixel[ 0] - first_pixel[-1]);
1981 filter_value = bounding_values[(filter_value + 4) >> 3];
1982 first_pixel[-1] = clip_uint8(first_pixel[-1] + filter_value);
1983 first_pixel[ 0] = clip_uint8(first_pixel[ 0] - filter_value);
1984 }
1985}
1986
1987static void vertical_filter(unsigned char *first_pixel, int stride,
1988 int *bounding_values)
1989{
1990 unsigned char *end;
1991 int filter_value;
1992 const int nstride= -stride;
1993
1994 for (end= first_pixel + 8; first_pixel < end; first_pixel++) {
1995 filter_value =
1996 (first_pixel[2 * nstride] - first_pixel[ stride])
1997 +3*(first_pixel[0 ] - first_pixel[nstride]);
1998 filter_value = bounding_values[(filter_value + 4) >> 3];
1999 first_pixel[nstride] = clip_uint8(first_pixel[nstride] + filter_value);
2000 first_pixel[0] = clip_uint8(first_pixel[0] - filter_value);
2001 }
2002}
2003
2004static void apply_loop_filter(Vp3DecodeContext *s)
2005{
2006 int x, y, plane;
2007 int width, height;
2008 int fragment;
2009 int stride;
2010 unsigned char *plane_data;
2011 int *bounding_values= s->bounding_values_array+127;
2012
2013#if 0
2014 int bounding_values_array[256];
2015 int filter_limit;
2016
2017 /* find the right loop limit value */
2018 for (x = 63; x >= 0; x--) {
2019 if (vp31_ac_scale_factor[x] >= s->quality_index)
2020 break;
2021 }
2022 filter_limit = vp31_filter_limit_values[s->quality_index];
2023
2024 /* set up the bounding values */
2025 memset(bounding_values_array, 0, 256 * sizeof(int));
2026 for (x = 0; x < filter_limit; x++) {
2027 bounding_values[-x - filter_limit] = -filter_limit + x;
2028 bounding_values[-x] = -x;
2029 bounding_values[x] = x;
2030 bounding_values[x + filter_limit] = filter_limit - x;
2031 }
2032#endif
2033
2034 for (plane = 0; plane < 3; plane++) {
2035
2036 if (plane == 0) {
2037 /* Y plane parameters */
2038 fragment = 0;
2039 width = s->fragment_width;
2040 height = s->fragment_height;
2041 stride = s->current_frame.linesize[0];
2042 plane_data = s->current_frame.data[0];
2043 } else if (plane == 1) {
2044 /* U plane parameters */
2045 fragment = s->u_fragment_start;
2046 width = s->fragment_width / 2;
2047 height = s->fragment_height / 2;
2048 stride = s->current_frame.linesize[1];
2049 plane_data = s->current_frame.data[1];
2050 } else {
2051 /* V plane parameters */
2052 fragment = s->v_fragment_start;
2053 width = s->fragment_width / 2;
2054 height = s->fragment_height / 2;
2055 stride = s->current_frame.linesize[2];
2056 plane_data = s->current_frame.data[2];
2057 }
2058
2059 for (y = 0; y < height; y++) {
2060
2061 for (x = 0; x < width; x++) {
2062START_TIMER
2063 /* do not perform left edge filter for left columns frags */
2064 if ((x > 0) &&
2065 (s->all_fragments[fragment].coding_method != MODE_COPY)) {
2066 horizontal_filter(
2067 plane_data + s->all_fragments[fragment].first_pixel - 7*stride,
2068 stride, bounding_values);
2069 }
2070
2071 /* do not perform top edge filter for top row fragments */
2072 if ((y > 0) &&
2073 (s->all_fragments[fragment].coding_method != MODE_COPY)) {
2074 vertical_filter(
2075 plane_data + s->all_fragments[fragment].first_pixel + stride,
2076 stride, bounding_values);
2077 }
2078
2079 /* do not perform right edge filter for right column
2080 * fragments or if right fragment neighbor is also coded
2081 * in this frame (it will be filtered in next iteration) */
2082 if ((x < width - 1) &&
2083 (s->all_fragments[fragment].coding_method != MODE_COPY) &&
2084 (s->all_fragments[fragment + 1].coding_method == MODE_COPY)) {
2085 horizontal_filter(
2086 plane_data + s->all_fragments[fragment + 1].first_pixel - 7*stride,
2087 stride, bounding_values);
2088 }
2089
2090 /* do not perform bottom edge filter for bottom row
2091 * fragments or if bottom fragment neighbor is also coded
2092 * in this frame (it will be filtered in the next row) */
2093 if ((y < height - 1) &&
2094 (s->all_fragments[fragment].coding_method != MODE_COPY) &&
2095 (s->all_fragments[fragment + width].coding_method == MODE_COPY)) {
2096 vertical_filter(
2097 plane_data + s->all_fragments[fragment + width].first_pixel + stride,
2098 stride, bounding_values);
2099 }
2100
2101 fragment++;
2102STOP_TIMER("loop filter")
2103 }
2104 }
2105 }
2106}
2107
2108/*
2109 * This function computes the first pixel addresses for each fragment.
2110 * This function needs to be invoked after the first frame is allocated
2111 * so that it has access to the plane strides.
2112 */
2113static void vp3_calculate_pixel_addresses(Vp3DecodeContext *s)
2114{
2115
2116 int i, x, y;
2117
2118 /* figure out the first pixel addresses for each of the fragments */
2119 /* Y plane */
2120 i = 0;
2121 for (y = s->fragment_height; y > 0; y--) {
2122 for (x = 0; x < s->fragment_width; x++) {
2123 s->all_fragments[i++].first_pixel =
2124 s->golden_frame.linesize[0] * y * FRAGMENT_PIXELS -
2125 s->golden_frame.linesize[0] +
2126 x * FRAGMENT_PIXELS;
2127 debug_init(" fragment %d, first pixel @ %d\n",
2128 i-1, s->all_fragments[i-1].first_pixel);
2129 }
2130 }
2131
2132 /* U plane */
2133 i = s->u_fragment_start;
2134 for (y = s->fragment_height / 2; y > 0; y--) {
2135 for (x = 0; x < s->fragment_width / 2; x++) {
2136 s->all_fragments[i++].first_pixel =
2137 s->golden_frame.linesize[1] * y * FRAGMENT_PIXELS -
2138 s->golden_frame.linesize[1] +
2139 x * FRAGMENT_PIXELS;
2140 debug_init(" fragment %d, first pixel @ %d\n",
2141 i-1, s->all_fragments[i-1].first_pixel);
2142 }
2143 }
2144
2145 /* V plane */
2146 i = s->v_fragment_start;
2147 for (y = s->fragment_height / 2; y > 0; y--) {
2148 for (x = 0; x < s->fragment_width / 2; x++) {
2149 s->all_fragments[i++].first_pixel =
2150 s->golden_frame.linesize[2] * y * FRAGMENT_PIXELS -
2151 s->golden_frame.linesize[2] +
2152 x * FRAGMENT_PIXELS;
2153 debug_init(" fragment %d, first pixel @ %d\n",
2154 i-1, s->all_fragments[i-1].first_pixel);
2155 }
2156 }
2157}
2158
2159/* FIXME: this should be merged with the above! */
2160static void theora_calculate_pixel_addresses(Vp3DecodeContext *s)
2161{
2162
2163 int i, x, y;
2164
2165 /* figure out the first pixel addresses for each of the fragments */
2166 /* Y plane */
2167 i = 0;
2168 for (y = 1; y <= s->fragment_height; y++) {
2169 for (x = 0; x < s->fragment_width; x++) {
2170 s->all_fragments[i++].first_pixel =
2171 s->golden_frame.linesize[0] * y * FRAGMENT_PIXELS -
2172 s->golden_frame.linesize[0] +
2173 x * FRAGMENT_PIXELS;
2174 debug_init(" fragment %d, first pixel @ %d\n",
2175 i-1, s->all_fragments[i-1].first_pixel);
2176 }
2177 }
2178
2179 /* U plane */
2180 i = s->u_fragment_start;
2181 for (y = 1; y <= s->fragment_height / 2; y++) {
2182 for (x = 0; x < s->fragment_width / 2; x++) {
2183 s->all_fragments[i++].first_pixel =
2184 s->golden_frame.linesize[1] * y * FRAGMENT_PIXELS -
2185 s->golden_frame.linesize[1] +
2186 x * FRAGMENT_PIXELS;
2187 debug_init(" fragment %d, first pixel @ %d\n",
2188 i-1, s->all_fragments[i-1].first_pixel);
2189 }
2190 }
2191
2192 /* V plane */
2193 i = s->v_fragment_start;
2194 for (y = 1; y <= s->fragment_height / 2; y++) {
2195 for (x = 0; x < s->fragment_width / 2; x++) {
2196 s->all_fragments[i++].first_pixel =
2197 s->golden_frame.linesize[2] * y * FRAGMENT_PIXELS -
2198 s->golden_frame.linesize[2] +
2199 x * FRAGMENT_PIXELS;
2200 debug_init(" fragment %d, first pixel @ %d\n",
2201 i-1, s->all_fragments[i-1].first_pixel);
2202 }
2203 }
2204}
2205
2206/*
2207 * This is the ffmpeg/libavcodec API init function.
2208 */
2209static int vp3_decode_init(AVCodecContext *avctx)
2210{
2211 Vp3DecodeContext *s = avctx->priv_data;
2212 int i;
2213 int c_width;
2214 int c_height;
2215 int y_superblock_count;
2216 int c_superblock_count;
2217
2218 if (avctx->codec_tag == MKTAG('V','P','3','0'))
2219 s->version = 0;
2220 else
2221 s->version = 1;
2222
2223 s->avctx = avctx;
2224 s->width = (avctx->width + 15) & 0xFFFFFFF0;
2225 s->height = (avctx->height + 15) & 0xFFFFFFF0;
2226 avctx->pix_fmt = PIX_FMT_YUV420P;
2227 avctx->has_b_frames = 0;
2228 if(avctx->idct_algo==FF_IDCT_AUTO)
2229 avctx->idct_algo=FF_IDCT_VP3;
2230 dsputil_init(&s->dsp, avctx);
2231
2232 ff_init_scantable(s->dsp.idct_permutation, &s->scantable, ff_zigzag_direct);
2233
2234 /* initialize to an impossible value which will force a recalculation
2235 * in the first frame decode */
2236 s->quality_index = -1;
2237
2238 s->y_superblock_width = (s->width + 31) / 32;
2239 s->y_superblock_height = (s->height + 31) / 32;
2240 y_superblock_count = s->y_superblock_width * s->y_superblock_height;
2241
2242 /* work out the dimensions for the C planes */
2243 c_width = s->width / 2;
2244 c_height = s->height / 2;
2245 s->c_superblock_width = (c_width + 31) / 32;
2246 s->c_superblock_height = (c_height + 31) / 32;
2247 c_superblock_count = s->c_superblock_width * s->c_superblock_height;
2248
2249 s->superblock_count = y_superblock_count + (c_superblock_count * 2);
2250 s->u_superblock_start = y_superblock_count;
2251 s->v_superblock_start = s->u_superblock_start + c_superblock_count;
2252 s->superblock_coding = av_malloc(s->superblock_count);
2253
2254 s->macroblock_width = (s->width + 15) / 16;
2255 s->macroblock_height = (s->height + 15) / 16;
2256 s->macroblock_count = s->macroblock_width * s->macroblock_height;
2257
2258 s->fragment_width = s->width / FRAGMENT_PIXELS;
2259 s->fragment_height = s->height / FRAGMENT_PIXELS;
2260
2261 /* fragment count covers all 8x8 blocks for all 3 planes */
2262 s->fragment_count = s->fragment_width * s->fragment_height * 3 / 2;
2263 s->u_fragment_start = s->fragment_width * s->fragment_height;
2264 s->v_fragment_start = s->fragment_width * s->fragment_height * 5 / 4;
2265
2266 debug_init(" Y plane: %d x %d\n", s->width, s->height);
2267 debug_init(" C plane: %d x %d\n", c_width, c_height);
2268 debug_init(" Y superblocks: %d x %d, %d total\n",
2269 s->y_superblock_width, s->y_superblock_height, y_superblock_count);
2270 debug_init(" C superblocks: %d x %d, %d total\n",
2271 s->c_superblock_width, s->c_superblock_height, c_superblock_count);
2272 debug_init(" total superblocks = %d, U starts @ %d, V starts @ %d\n",
2273 s->superblock_count, s->u_superblock_start, s->v_superblock_start);
2274 debug_init(" macroblocks: %d x %d, %d total\n",
2275 s->macroblock_width, s->macroblock_height, s->macroblock_count);
2276 debug_init(" %d fragments, %d x %d, u starts @ %d, v starts @ %d\n",
2277 s->fragment_count,
2278 s->fragment_width,
2279 s->fragment_height,
2280 s->u_fragment_start,
2281 s->v_fragment_start);
2282
2283 s->all_fragments = av_malloc(s->fragment_count * sizeof(Vp3Fragment));
2284 s->coeffs = av_malloc(s->fragment_count * sizeof(Coeff) * 65);
2285 s->coded_fragment_list = av_malloc(s->fragment_count * sizeof(int));
2286 s->pixel_addresses_inited = 0;
2287
2288 if (!s->theora_tables)
2289 {
2290 for (i = 0; i < 64; i++)
2291 s->coded_dc_scale_factor[i] = vp31_dc_scale_factor[i];
2292 for (i = 0; i < 64; i++)
2293 s->coded_ac_scale_factor[i] = vp31_ac_scale_factor[i];
2294 for (i = 0; i < 64; i++)
2295 s->coded_intra_y_dequant[i] = vp31_intra_y_dequant[i];
2296 for (i = 0; i < 64; i++)
2297 s->coded_intra_c_dequant[i] = vp31_intra_c_dequant[i];
2298 for (i = 0; i < 64; i++)
2299 s->coded_inter_dequant[i] = vp31_inter_dequant[i];
2300 for (i = 0; i < 64; i++)
2301 s->filter_limit_values[i] = vp31_filter_limit_values[i];
2302
2303 /* init VLC tables */
2304 for (i = 0; i < 16; i++) {
2305
2306 /* DC histograms */
2307 init_vlc(&s->dc_vlc[i], 5, 32,
2308 &dc_bias[i][0][1], 4, 2,
2309 &dc_bias[i][0][0], 4, 2, 0);
2310
2311 /* group 1 AC histograms */
2312 init_vlc(&s->ac_vlc_1[i], 5, 32,
2313 &ac_bias_0[i][0][1], 4, 2,
2314 &ac_bias_0[i][0][0], 4, 2, 0);
2315
2316 /* group 2 AC histograms */
2317 init_vlc(&s->ac_vlc_2[i], 5, 32,
2318 &ac_bias_1[i][0][1], 4, 2,
2319 &ac_bias_1[i][0][0], 4, 2, 0);
2320
2321 /* group 3 AC histograms */
2322 init_vlc(&s->ac_vlc_3[i], 5, 32,
2323 &ac_bias_2[i][0][1], 4, 2,
2324 &ac_bias_2[i][0][0], 4, 2, 0);
2325
2326 /* group 4 AC histograms */
2327 init_vlc(&s->ac_vlc_4[i], 5, 32,
2328 &ac_bias_3[i][0][1], 4, 2,
2329 &ac_bias_3[i][0][0], 4, 2, 0);
2330 }
2331 } else {
2332 for (i = 0; i < 16; i++) {
2333
2334 /* DC histograms */
2335 init_vlc(&s->dc_vlc[i], 5, 32,
2336 &s->huffman_table[i][0][1], 4, 2,
2337 &s->huffman_table[i][0][0], 4, 2, 0);
2338
2339 /* group 1 AC histograms */
2340 init_vlc(&s->ac_vlc_1[i], 5, 32,
2341 &s->huffman_table[i+16][0][1], 4, 2,
2342 &s->huffman_table[i+16][0][0], 4, 2, 0);
2343
2344 /* group 2 AC histograms */
2345 init_vlc(&s->ac_vlc_2[i], 5, 32,
2346 &s->huffman_table[i+16*2][0][1], 4, 2,
2347 &s->huffman_table[i+16*2][0][0], 4, 2, 0);
2348
2349 /* group 3 AC histograms */
2350 init_vlc(&s->ac_vlc_3[i], 5, 32,
2351 &s->huffman_table[i+16*3][0][1], 4, 2,
2352 &s->huffman_table[i+16*3][0][0], 4, 2, 0);
2353
2354 /* group 4 AC histograms */
2355 init_vlc(&s->ac_vlc_4[i], 5, 32,
2356 &s->huffman_table[i+16*4][0][1], 4, 2,
2357 &s->huffman_table[i+16*4][0][0], 4, 2, 0);
2358 }
2359 }
2360
2361 init_vlc(&s->superblock_run_length_vlc, 6, 34,
2362 &superblock_run_length_vlc_table[0][1], 4, 2,
2363 &superblock_run_length_vlc_table[0][0], 4, 2, 0);
2364
2365 init_vlc(&s->fragment_run_length_vlc, 5, 30,
2366 &fragment_run_length_vlc_table[0][1], 4, 2,
2367 &fragment_run_length_vlc_table[0][0], 4, 2, 0);
2368
2369 init_vlc(&s->mode_code_vlc, 3, 8,
2370 &mode_code_vlc_table[0][1], 2, 1,
2371 &mode_code_vlc_table[0][0], 2, 1, 0);
2372
2373 init_vlc(&s->motion_vector_vlc, 6, 63,
2374 &motion_vector_vlc_table[0][1], 2, 1,
2375 &motion_vector_vlc_table[0][0], 2, 1, 0);
2376
2377 /* work out the block mapping tables */
2378 s->superblock_fragments = av_malloc(s->superblock_count * 16 * sizeof(int));
2379 s->superblock_macroblocks = av_malloc(s->superblock_count * 4 * sizeof(int));
2380 s->macroblock_fragments = av_malloc(s->macroblock_count * 6 * sizeof(int));
2381 s->macroblock_coding = av_malloc(s->macroblock_count + 1);
2382 init_block_mapping(s);
2383
2384 for (i = 0; i < 3; i++) {
2385 s->current_frame.data[i] = NULL;
2386 s->last_frame.data[i] = NULL;
2387 s->golden_frame.data[i] = NULL;
2388 }
2389
2390 return 0;
2391}
2392
2393/*
2394 * This is the ffmpeg/libavcodec API frame decode function.
2395 */
2396static int vp3_decode_frame(AVCodecContext *avctx,
2397 void *data, int *data_size,
2398 uint8_t *buf, int buf_size)
2399{
2400 Vp3DecodeContext *s = avctx->priv_data;
2401 GetBitContext gb;
2402 static int counter = 0;
2403 int i;
2404
2405 init_get_bits(&gb, buf, buf_size * 8);
2406
2407 if (s->theora && get_bits1(&gb))
2408 {
2409#if 1
2410 av_log(avctx, AV_LOG_ERROR, "Header packet passed to frame decoder, skipping\n");
2411 return -1;
2412#else
2413 int ptype = get_bits(&gb, 7);
2414
2415 skip_bits(&gb, 6*8); /* "theora" */
2416
2417 switch(ptype)
2418 {
2419 case 1:
2420 theora_decode_comments(avctx, gb);
2421 break;
2422 case 2:
2423 theora_decode_tables(avctx, gb);
2424 init_dequantizer(s);
2425 break;
2426 default:
2427 av_log(avctx, AV_LOG_ERROR, "Unknown Theora config packet: %d\n", ptype);
2428 }
2429 return buf_size;
2430#endif
2431 }
2432
2433 s->keyframe = !get_bits1(&gb);
2434 if (!s->theora)
2435 skip_bits(&gb, 1);
2436 s->last_quality_index = s->quality_index;
2437 s->quality_index = get_bits(&gb, 6);
2438 if (s->theora >= 0x030200)
2439 skip_bits1(&gb);
2440
2441 if (s->avctx->debug & FF_DEBUG_PICT_INFO)
2442 av_log(s->avctx, AV_LOG_INFO, " VP3 %sframe #%d: Q index = %d\n",
2443 s->keyframe?"key":"", counter, s->quality_index);
2444 counter++;
2445
2446 if (s->quality_index != s->last_quality_index) {
2447 init_dequantizer(s);
2448 init_loop_filter(s);
2449 }
2450
2451 if (s->keyframe) {
2452 if (!s->theora)
2453 {
2454 skip_bits(&gb, 4); /* width code */
2455 skip_bits(&gb, 4); /* height code */
2456 if (s->version)
2457 {
2458 s->version = get_bits(&gb, 5);
2459 if (counter == 1)
2460 av_log(s->avctx, AV_LOG_DEBUG, "VP version: %d\n", s->version);
2461 }
2462 }
2463 if (s->version || s->theora)
2464 {
2465 if (get_bits1(&gb))
2466 av_log(s->avctx, AV_LOG_ERROR, "Warning, unsupported keyframe coding type?!\n");
2467 skip_bits(&gb, 2); /* reserved? */
2468 }
2469
2470 if (s->last_frame.data[0] == s->golden_frame.data[0]) {
2471 if (s->golden_frame.data[0])
2472 avctx->release_buffer(avctx, &s->golden_frame);
2473 s->last_frame= s->golden_frame; /* ensure that we catch any access to this released frame */
2474 } else {
2475 if (s->golden_frame.data[0])
2476 avctx->release_buffer(avctx, &s->golden_frame);
2477 if (s->last_frame.data[0])
2478 avctx->release_buffer(avctx, &s->last_frame);
2479 }
2480
2481 s->golden_frame.reference = 3;
2482 if(avctx->get_buffer(avctx, &s->golden_frame) < 0) {
2483 av_log(s->avctx, AV_LOG_ERROR, "vp3: get_buffer() failed\n");
2484 return -1;
2485 }
2486
2487 /* golden frame is also the current frame */
2488 memcpy(&s->current_frame, &s->golden_frame, sizeof(AVFrame));
2489
2490 /* time to figure out pixel addresses? */
2491 if (!s->pixel_addresses_inited)
2492 {
2493 if (!s->flipped_image)
2494 vp3_calculate_pixel_addresses(s);
2495 else
2496 theora_calculate_pixel_addresses(s);
2497 }
2498 } else {
2499 /* allocate a new current frame */
2500 s->current_frame.reference = 3;
2501 if(avctx->get_buffer(avctx, &s->current_frame) < 0) {
2502 av_log(s->avctx, AV_LOG_ERROR, "vp3: get_buffer() failed\n");
2503 return -1;
2504 }
2505 }
2506
2507 s->current_frame.qscale_table= s->qscale_table; //FIXME allocate individual tables per AVFrame
2508 s->current_frame.qstride= 0;
2509
2510 {START_TIMER
2511 init_frame(s, &gb);
2512 STOP_TIMER("init_frame")}
2513
2514#if KEYFRAMES_ONLY
2515if (!s->keyframe) {
2516
2517 memcpy(s->current_frame.data[0], s->golden_frame.data[0],
2518 s->current_frame.linesize[0] * s->height);
2519 memcpy(s->current_frame.data[1], s->golden_frame.data[1],
2520 s->current_frame.linesize[1] * s->height / 2);
2521 memcpy(s->current_frame.data[2], s->golden_frame.data[2],
2522 s->current_frame.linesize[2] * s->height / 2);
2523
2524} else {
2525#endif
2526
2527 {START_TIMER
2528 if (unpack_superblocks(s, &gb)){
2529 av_log(s->avctx, AV_LOG_ERROR, "error in unpack_superblocks\n");
2530 return -1;
2531 }
2532 STOP_TIMER("unpack_superblocks")}
2533 {START_TIMER
2534 if (unpack_modes(s, &gb)){
2535 av_log(s->avctx, AV_LOG_ERROR, "error in unpack_modes\n");
2536 return -1;
2537 }
2538 STOP_TIMER("unpack_modes")}
2539 {START_TIMER
2540 if (unpack_vectors(s, &gb)){
2541 av_log(s->avctx, AV_LOG_ERROR, "error in unpack_vectors\n");
2542 return -1;
2543 }
2544 STOP_TIMER("unpack_vectors")}
2545 {START_TIMER
2546 if (unpack_dct_coeffs(s, &gb)){
2547 av_log(s->avctx, AV_LOG_ERROR, "error in unpack_dct_coeffs\n");
2548 return -1;
2549 }
2550 STOP_TIMER("unpack_dct_coeffs")}
2551 {START_TIMER
2552
2553 reverse_dc_prediction(s, 0, s->fragment_width, s->fragment_height);
2554 if ((avctx->flags & CODEC_FLAG_GRAY) == 0) {
2555 reverse_dc_prediction(s, s->u_fragment_start,
2556 s->fragment_width / 2, s->fragment_height / 2);
2557 reverse_dc_prediction(s, s->v_fragment_start,
2558 s->fragment_width / 2, s->fragment_height / 2);
2559 }
2560 STOP_TIMER("reverse_dc_prediction")}
2561 {START_TIMER
2562
2563 for (i = 0; i < s->macroblock_height; i++)
2564 render_slice(s, i);
2565 STOP_TIMER("render_fragments")}
2566
2567 {START_TIMER
2568 apply_loop_filter(s);
2569 STOP_TIMER("apply_loop_filter")}
2570#if KEYFRAMES_ONLY
2571}
2572#endif
2573
2574 *data_size=sizeof(AVFrame);
2575 *(AVFrame*)data= s->current_frame;
2576
2577 /* release the last frame, if it is allocated and if it is not the
2578 * golden frame */
2579 if ((s->last_frame.data[0]) &&
2580 (s->last_frame.data[0] != s->golden_frame.data[0]))
2581 avctx->release_buffer(avctx, &s->last_frame);
2582
2583 /* shuffle frames (last = current) */
2584 memcpy(&s->last_frame, &s->current_frame, sizeof(AVFrame));
2585 s->current_frame.data[0]= NULL; /* ensure that we catch any access to this released frame */
2586
2587 return buf_size;
2588}
2589
2590/*
2591 * This is the ffmpeg/libavcodec API module cleanup function.
2592 */
2593static int vp3_decode_end(AVCodecContext *avctx)
2594{
2595 Vp3DecodeContext *s = avctx->priv_data;
2596
2597 av_free(s->all_fragments);
2598 av_free(s->coeffs);
2599 av_free(s->coded_fragment_list);
2600 av_free(s->superblock_fragments);
2601 av_free(s->superblock_macroblocks);
2602 av_free(s->macroblock_fragments);
2603 av_free(s->macroblock_coding);
2604
2605 /* release all frames */
2606 if (s->golden_frame.data[0] && s->golden_frame.data[0] != s->last_frame.data[0])
2607 avctx->release_buffer(avctx, &s->golden_frame);
2608 if (s->last_frame.data[0])
2609 avctx->release_buffer(avctx, &s->last_frame);
2610 /* no need to release the current_frame since it will always be pointing
2611 * to the same frame as either the golden or last frame */
2612
2613 return 0;
2614}
2615
2616static int read_huffman_tree(AVCodecContext *avctx, GetBitContext *gb)
2617{
2618 Vp3DecodeContext *s = avctx->priv_data;
2619
2620 if (get_bits(gb, 1)) {
2621 int token;
2622 if (s->entries >= 32) { /* overflow */
2623 av_log(avctx, AV_LOG_ERROR, "huffman tree overflow\n");
2624 return -1;
2625 }
2626 token = get_bits(gb, 5);
2627 //av_log(avctx, AV_LOG_DEBUG, "hti %d hbits %x token %d entry : %d size %d\n", s->hti, s->hbits, token, s->entries, s->huff_code_size);
2628 s->huffman_table[s->hti][token][0] = s->hbits;
2629 s->huffman_table[s->hti][token][1] = s->huff_code_size;
2630 s->entries++;
2631 }
2632 else {
2633 if (s->huff_code_size >= 32) {/* overflow */
2634 av_log(avctx, AV_LOG_ERROR, "huffman tree overflow\n");
2635 return -1;
2636 }
2637 s->huff_code_size++;
2638 s->hbits <<= 1;
2639 read_huffman_tree(avctx, gb);
2640 s->hbits |= 1;
2641 read_huffman_tree(avctx, gb);
2642 s->hbits >>= 1;
2643 s->huff_code_size--;
2644 }
2645 return 0;
2646}
2647
2648static int theora_decode_header(AVCodecContext *avctx, GetBitContext gb)
2649{
2650 Vp3DecodeContext *s = avctx->priv_data;
2651 int major, minor, micro;
2652
2653 major = get_bits(&gb, 8); /* version major */
2654 minor = get_bits(&gb, 8); /* version minor */
2655 micro = get_bits(&gb, 8); /* version micro */
2656 av_log(avctx, AV_LOG_INFO, "Theora bitstream version %d.%d.%d\n",
2657 major, minor, micro);
2658
2659 /* FIXME: endianess? */
2660 s->theora = (major << 16) | (minor << 8) | micro;
2661
2662 /* 3.2.0 aka alpha3 has the same frame orientation as original vp3 */
2663 /* but previous versions have the image flipped relative to vp3 */
2664 if (s->theora < 0x030200)
2665 {
2666 s->flipped_image = 1;
2667 av_log(avctx, AV_LOG_DEBUG, "Old (<alpha3) Theora bitstream, flipped image\n");
2668 }
2669
2670 s->width = get_bits(&gb, 16) << 4;
2671 s->height = get_bits(&gb, 16) << 4;
2672
2673 if(avcodec_check_dimensions(avctx, s->width, s->height)){
2674 av_log(avctx, AV_LOG_ERROR, "Invalid dimensions (%dx%d)\n", s->width, s->height);
2675 s->width= s->height= 0;
2676 return -1;
2677 }
2678
2679 if (s->theora >= 0x030400)
2680 {
2681 skip_bits(&gb, 32); /* total number of superblocks in a frame */
2682 // fixme, the next field is 36bits long
2683 skip_bits(&gb, 32); /* total number of blocks in a frame */
2684 skip_bits(&gb, 4); /* total number of blocks in a frame */
2685 skip_bits(&gb, 32); /* total number of macroblocks in a frame */
2686
2687 skip_bits(&gb, 24); /* frame width */
2688 skip_bits(&gb, 24); /* frame height */
2689 }
2690 else
2691 {
2692 skip_bits(&gb, 24); /* frame width */
2693 skip_bits(&gb, 24); /* frame height */
2694 }
2695
2696 skip_bits(&gb, 8); /* offset x */
2697 skip_bits(&gb, 8); /* offset y */
2698
2699 skip_bits(&gb, 32); /* fps numerator */
2700 skip_bits(&gb, 32); /* fps denumerator */
2701 skip_bits(&gb, 24); /* aspect numerator */
2702 skip_bits(&gb, 24); /* aspect denumerator */
2703
2704 if (s->theora < 0x030200)
2705 skip_bits(&gb, 5); /* keyframe frequency force */
2706 skip_bits(&gb, 8); /* colorspace */
2707 if (s->theora >= 0x030400)
2708 skip_bits(&gb, 2); /* pixel format: 420,res,422,444 */
2709 skip_bits(&gb, 24); /* bitrate */
2710
2711 skip_bits(&gb, 6); /* quality hint */
2712
2713 if (s->theora >= 0x030200)
2714 {
2715 skip_bits(&gb, 5); /* keyframe frequency force */
2716
2717 if (s->theora < 0x030400)
2718 skip_bits(&gb, 5); /* spare bits */
2719 }
2720
2721// align_get_bits(&gb);
2722
2723 avctx->width = s->width;
2724 avctx->height = s->height;
2725
2726 return 0;
2727}
2728
2729static inline int theora_get_32bit(GetBitContext gb)
2730{
2731 int ret = get_bits(&gb, 8);
2732 ret += get_bits(&gb, 8) << 8;
2733 ret += get_bits(&gb, 8) << 16;
2734 ret += get_bits(&gb, 8) << 24;
2735
2736 return ret;
2737}
2738
2739static int theora_decode_tables(AVCodecContext *avctx, GetBitContext gb)
2740{
2741 Vp3DecodeContext *s = avctx->priv_data;
2742 int i, n, matrices;
2743
2744 if (s->theora >= 0x030200) {
2745 n = get_bits(&gb, 3);
2746 /* loop filter limit values table */
2747 for (i = 0; i < 64; i++)
2748 s->filter_limit_values[i] = get_bits(&gb, n);
2749 }
2750
2751 if (s->theora >= 0x030200)
2752 n = get_bits(&gb, 4) + 1;
2753 else
2754 n = 16;
2755 /* quality threshold table */
2756 for (i = 0; i < 64; i++)
2757 s->coded_ac_scale_factor[i] = get_bits(&gb, n);
2758
2759 if (s->theora >= 0x030200)
2760 n = get_bits(&gb, 4) + 1;
2761 else
2762 n = 16;
2763 /* dc scale factor table */
2764 for (i = 0; i < 64; i++)
2765 s->coded_dc_scale_factor[i] = get_bits(&gb, n);
2766
2767 if (s->theora >= 0x030200)
2768 matrices = get_bits(&gb, 9) + 1;
2769 else
2770 matrices = 3;
2771 if (matrices != 3) {
2772 av_log(avctx,AV_LOG_ERROR, "unsupported matrices: %d\n", matrices);
2773// return -1;
2774 }
2775 /* y coeffs */
2776 for (i = 0; i < 64; i++)
2777 s->coded_intra_y_dequant[i] = get_bits(&gb, 8);
2778
2779 /* uv coeffs */
2780 for (i = 0; i < 64; i++)
2781 s->coded_intra_c_dequant[i] = get_bits(&gb, 8);
2782
2783 /* inter coeffs */
2784 for (i = 0; i < 64; i++)
2785 s->coded_inter_dequant[i] = get_bits(&gb, 8);
2786
2787 /* skip unknown matrices */
2788 n = matrices - 3;
2789 while(n--)
2790 for (i = 0; i < 64; i++)
2791 skip_bits(&gb, 8);
2792
2793 for (i = 0; i <= 1; i++) {
2794 for (n = 0; n <= 2; n++) {
2795 int newqr;
2796 if (i > 0 || n > 0)
2797 newqr = get_bits(&gb, 1);
2798 else
2799 newqr = 1;
2800 if (!newqr) {
2801 if (i > 0)
2802 get_bits(&gb, 1);
2803 }
2804 else {
2805 int qi = 0;
2806 skip_bits(&gb, av_log2(matrices-1)+1);
2807 while (qi < 63) {
2808 qi += get_bits(&gb, av_log2(63-qi)+1) + 1;
2809 skip_bits(&gb, av_log2(matrices-1)+1);
2810 }
2811 if (qi > 63) {
2812 av_log(avctx, AV_LOG_ERROR, "invalid qi %d > 63\n", qi);
2813 return -1;
2814 }
2815 }
2816 }
2817 }
2818
2819 /* Huffman tables */
2820 for (s->hti = 0; s->hti < 80; s->hti++) {
2821 s->entries = 0;
2822 s->huff_code_size = 1;
2823 if (!get_bits(&gb, 1)) {
2824 s->hbits = 0;
2825 read_huffman_tree(avctx, &gb);
2826 s->hbits = 1;
2827 read_huffman_tree(avctx, &gb);
2828 }
2829 }
2830
2831 s->theora_tables = 1;
2832
2833 return 0;
2834}
2835
2836static int theora_decode_init(AVCodecContext *avctx)
2837{
2838 Vp3DecodeContext *s = avctx->priv_data;
2839 GetBitContext gb;
2840 int ptype;
2841 uint8_t *p= avctx->extradata;
2842 int op_bytes, i;
2843
2844 s->theora = 1;
2845
2846 if (!avctx->extradata_size)
2847 {
2848 av_log(avctx, AV_LOG_ERROR, "Missing extradata!\n");
2849 return -1;
2850 }
2851
2852 for(i=0;i<3;i++) {
2853 op_bytes = *(p++)<<8;
2854 op_bytes += *(p++);
2855
2856 init_get_bits(&gb, p, op_bytes);
2857 p += op_bytes;
2858
2859 ptype = get_bits(&gb, 8);
2860 debug_vp3("Theora headerpacket type: %x\n", ptype);
2861
2862 if (!(ptype & 0x80))
2863 {
2864 av_log(avctx, AV_LOG_ERROR, "Invalid extradata!\n");
2865 return -1;
2866 }
2867
2868 // FIXME: check for this aswell
2869 skip_bits(&gb, 6*8); /* "theora" */
2870
2871 switch(ptype)
2872 {
2873 case 0x80:
2874 theora_decode_header(avctx, gb);
2875 break;
2876 case 0x81:
2877// FIXME: is this needed? it breaks sometimes
2878// theora_decode_comments(avctx, gb);
2879 break;
2880 case 0x82:
2881 theora_decode_tables(avctx, gb);
2882 break;
2883 default:
2884 av_log(avctx, AV_LOG_ERROR, "Unknown Theora config packet: %d\n", ptype&~0x80);
2885 break;
2886 }
2887 }
2888
2889 vp3_decode_init(avctx);
2890 return 0;
2891}
2892
2893AVCodec vp3_decoder = {
2894 "vp3",
2895 CODEC_TYPE_VIDEO,
2896 CODEC_ID_VP3,
2897 sizeof(Vp3DecodeContext),
2898 vp3_decode_init,
2899 NULL,
2900 vp3_decode_end,
2901 vp3_decode_frame,
2902 0,
2903 NULL
2904};
2905
2906#ifndef CONFIG_LIBTHEORA
2907AVCodec theora_decoder = {
2908 "theora",
2909 CODEC_TYPE_VIDEO,
2910 CODEC_ID_THEORA,
2911 sizeof(Vp3DecodeContext),
2912 theora_decode_init,
2913 NULL,
2914 vp3_decode_end,
2915 vp3_decode_frame,
2916 0,
2917 NULL
2918};
2919#endif
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette