4 * Copyright (c) 1999 Mark Taylor
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Library General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Library General Public License for more details.
16 * You should have received a copy of the GNU Library General Public
17 * License along with this library; if not, write to the
18 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
19 * Boston, MA 02111-1307, USA.
22 /* $Id: vbrquantize.c,v 1.1 2002/04/28 17:30:30 kramm Exp $ */
24 #include "config_static.h"
29 #include "reservoir.h"
30 #include "quantize_pvt.h"
31 #include "vbrquantize.h"
44 #define MAGIC_FLOAT (65536*(128))
45 #define MAGIC_INT 0x4b000000
47 #ifdef TAKEHIRO_IEEE754_HACK
49 #define DUFFBLOCKMQ() do { \
50 xp = xr34[0] * sfpow34_p1; \
51 xe = xr34[0] * sfpow34_eq; \
52 xm = xr34[0] * sfpow34_m1; \
61 fi[0].f = xp + (adj43asm - MAGIC_INT)[fi[0].i]; \
62 fi[1].f = xe + (adj43asm - MAGIC_INT)[fi[1].i]; \
63 fi[2].f = xm + (adj43asm - MAGIC_INT)[fi[2].i]; \
64 fi[0].i -= MAGIC_INT; \
65 fi[1].i -= MAGIC_INT; \
66 fi[2].i -= MAGIC_INT; \
68 xp = x0 - pow43[fi[0].i] * sfpow_p1; \
69 xe = x0 - pow43[fi[1].i] * sfpow_eq; \
70 xm = x0 - pow43[fi[2].i] * sfpow_m1; \
74 xfsf_eq = Max(xfsf_eq, xe); \
75 xfsf_p1 = Max(xfsf_p1, xp); \
76 xfsf_m1 = Max(xfsf_m1, xm); \
81 #define DUFFBLOCK() do { \
82 xp = xr34[0] * sfpow34_p1; \
83 xe = xr34[0] * sfpow34_eq; \
84 xm = xr34[0] * sfpow34_m1; \
93 fi[0].f = xp + (adj43asm - MAGIC_INT)[fi[0].i]; \
94 fi[1].f = xe + (adj43asm - MAGIC_INT)[fi[1].i]; \
95 fi[2].f = xm + (adj43asm - MAGIC_INT)[fi[2].i]; \
96 fi[0].i -= MAGIC_INT; \
97 fi[1].i -= MAGIC_INT; \
98 fi[2].i -= MAGIC_INT; \
100 xp = x0 - pow43[fi[0].i] * sfpow_p1; \
101 xe = x0 - pow43[fi[1].i] * sfpow_eq; \
102 xm = x0 - pow43[fi[2].i] * sfpow_m1; \
103 xfsf_p1 += xp * xp; \
104 xfsf_eq += xe * xe; \
105 xfsf_m1 += xm * xm; \
112 /*********************************************************************
113 * XRPOW_FTOI is a macro to convert floats to ints.
114 * if XRPOW_FTOI(x) = nearest_int(x), then QUANTFAC(x)=adj43asm[x]
117 * if XRPOW_FTOI(x) = floor(x), then QUANTFAC(x)=asj43[x]
119 *********************************************************************/
120 # define QUANTFAC(rx) adj43[rx]
121 # define ROUNDFAC 0.4054
122 # define XRPOW_FTOI(src,dest) ((dest) = (int)(src))
127 /* caution: a[] will be resorted!!
129 FLOAT8 select_kth(FLOAT8 a[], int N, int k)
141 while (a[++i] < v) /*empty*/;
142 while (a[--j] > v) /*empty*/;
166 calc_sfb_noise(const FLOAT8 *xr, const FLOAT8 *xr34, int bw, int sf)
172 FLOAT8 sfpow, sfpow34;
174 sfpow = POW20(sf+210); /*pow(2.0,sf/4.0); */
175 sfpow34 = IPOW20(sf+210); /*pow(sfpow,-3.0/4.0);*/
177 for ( j = 0; j < bw ; ++j ) {
178 if ( xr34[j]*sfpow34 > IXMAX_VAL ) return -1;
180 #ifdef TAKEHIRO_IEEE754_HACK
181 temp = sfpow34*xr34[j];
184 fi.f = temp + (adj43asm - MAGIC_INT)[fi.i];
187 temp = xr34[j]*sfpow34;
188 XRPOW_FTOI(temp, fi.i);
189 XRPOW_FTOI(temp + QUANTFAC(fi.i), fi.i);
192 temp = fabs(xr[j])- pow43[fi.i]*sfpow;
202 calc_sfb_noise_mq( const FLOAT8 *xr, const FLOAT8 *xr34, int bw, int sf,
203 int mq, FLOAT8 *scratch )
208 FLOAT8 sfpow, sfpow34, xfsfm = 0, xfsf = 0;
210 sfpow = POW20(sf+210); /*pow(2.0,sf/4.0); */
211 sfpow34 = IPOW20(sf+210); /*pow(sfpow,-3.0/4.0);*/
213 for ( j = 0; j < bw; ++j ) {
214 if ( xr34[j]*sfpow34 > IXMAX_VAL ) return -1;
216 #ifdef TAKEHIRO_IEEE754_HACK
217 temp = sfpow34*xr34[j];
220 fi.f = temp + (adj43asm - MAGIC_INT)[fi.i];
223 temp = xr34[j]*sfpow34;
224 XRPOW_FTOI(temp, fi.i);
225 XRPOW_FTOI(temp + QUANTFAC(fi.i), fi.i);
228 temp = fabs(xr[j])- pow43[fi.i]*sfpow;
232 if ( xfsfm < temp ) xfsfm = temp;
235 if ( mq == 1 ) return bw*select_kth(scratch,bw,bw*13/16);
238 for ( k = 1, j = 0; j < bw; ++j ) {
239 if ( scratch[j] > xfsf ) {
251 calc_sfb_noise_ave(const FLOAT8 *xr, const FLOAT8 *xr34, int bw, int sf)
256 #ifdef TAKEHIRO_IEEE754_HACK
260 fi_union *fi = (fi_union *)xx;
261 FLOAT8 sfpow34_eq, sfpow34_p1, sfpow34_m1;
262 FLOAT8 sfpow_eq, sfpow_p1, sfpow_m1;
263 FLOAT8 xfsf_eq = 0, xfsf_p1 = 0, xfsf_m1 = 0;
265 sfpow_eq = POW20(sf + 210); /*pow(2.0,sf/4.0); */
266 sfpow_m1 = sfpow_eq * .8408964153; /* pow(2,(sf-1)/4.0) */
267 sfpow_p1 = sfpow_eq * 1.189207115;
269 sfpow34_eq = IPOW20(sf + 210); /*pow(sfpow,-3.0/4.0);*/
270 sfpow34_m1 = sfpow34_eq * 1.13878863476; /* .84089 ^ -3/4 */
271 sfpow34_p1 = sfpow34_eq * 0.878126080187;
273 #ifdef TAKEHIRO_IEEE754_HACK
275 * loop unrolled into "Duff's Device". Robert Hegemann
280 case 0: do{ DUFFBLOCK();
283 case 1: DUFFBLOCK(); } while (--j);
286 for (j = 0; j < bw; ++j) {
288 if (xr34[j]*sfpow34_m1 > IXMAX_VAL) return -1;
290 xe = xr34[j]*sfpow34_eq;
291 XRPOW_FTOI(xe, fi[0].i);
292 XRPOW_FTOI(xe + QUANTFAC(fi[0].i), fi[0].i);
293 xe = fabs(xr[j])- pow43[fi[0].i]*sfpow_eq;
296 xp = xr34[j]*sfpow34_p1;
297 XRPOW_FTOI(xp, fi[0].i);
298 XRPOW_FTOI(xp + QUANTFAC(fi[0].i), fi[0].i);
299 xp = fabs(xr[j])- pow43[fi[0].i]*sfpow_p1;
302 xm = xr34[j]*sfpow34_m1;
303 XRPOW_FTOI(xm, fi[0].i);
304 XRPOW_FTOI(xm + QUANTFAC(fi[0].i), fi[0].i);
305 xm = fabs(xr[j])- pow43[fi[0].i]*sfpow_m1;
314 if ( xfsf_eq < xfsf_p1 ) xfsf_eq = xfsf_p1;
315 if ( xfsf_eq < xfsf_m1 ) xfsf_eq = xfsf_m1;
322 find_scalefac( const FLOAT8 *xr, const FLOAT8 *xr34, FLOAT8 l3_xmin, int bw )
325 int i, sf, sf_ok, delsf;
327 /* search will range from sf: -209 -> 45 */
332 for (i = 0; i < 7; ++i) {
334 xfsf = calc_sfb_noise( xr, xr34, bw, sf );
337 /* scalefactors too small */
341 if (xfsf > l3_xmin) {
342 /* distortion. try a smaller scalefactor */
352 /* returning a scalefac without distortion, if possible
354 return sf_ok > 45 ? sf : sf_ok;
358 find_scalefac_mq( const FLOAT8 *xr, const FLOAT8 *xr34, FLOAT8 l3_xmin,
359 int bw, int mq, FLOAT8 *scratch )
362 int i, sf, sf_ok, delsf;
364 /* search will range from sf: -209 -> 45 */
369 for (i = 0; i < 7; ++i) {
371 xfsf = calc_sfb_noise_mq( xr, xr34, bw, sf, mq, scratch );
374 /* scalefactors too small */
378 if (xfsf > l3_xmin) {
379 /* distortion. try a smaller scalefactor */
389 /* returning a scalefac without distortion, if possible
391 return sf_ok > 45 ? sf : sf_ok;
395 find_scalefac_ave( const FLOAT8 *xr, const FLOAT8 *xr34, FLOAT8 l3_xmin, int bw )
398 int i, sf, sf_ok, delsf;
400 /* search will range from sf: -209 -> 45 */
405 for (i = 0; i < 7; ++i) {
407 xfsf = calc_sfb_noise_ave( xr, xr34, bw, sf );
410 /* scalefactors too small */
414 if (xfsf > l3_xmin) {
415 /* distortion. try a smaller scalefactor */
425 /* returning a scalefac without distortion, if possible
427 return sf_ok > 45 ? sf : sf_ok;
432 * Robert Hegemann 2001-05-01
433 * calculates quantization step size determined by allowed masking
436 calc_scalefac( FLOAT8 l3_xmin, int bw, FLOAT8 preset_tune )
438 FLOAT8 const c = (preset_tune > 0 ? preset_tune
439 : 5.799142446); // 10 * 10^(2/3) * log10(4/3)
440 return (int)(c*log10(l3_xmin/bw)-.5);
445 static const int max_range_short[SBMAX_s] =
446 {15, 15, 15, 15, 15, 15, 7, 7, 7, 7, 7, 7, 0 };
448 static const int max_range_long[SBMAX_l] =
449 {15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 0};
451 static const int max_range_long_lsf_pretab[SBMAX_l] =
452 { 7,7,7,7,7,7, 3,3,3,3,3, 0,0,0,0, 0,0,0, 0,0,0, 0 };
457 sfb=0..5 scalefac < 16
460 ifqstep = ( cod_info->scalefac_scale == 0 ) ? 2 : 4;
461 ol_sf = (cod_info->global_gain-210.0);
462 ol_sf -= 8*cod_info->subblock_gain[i];
463 ol_sf -= ifqstep*scalefac[gr][ch].s[sfb][i];
467 compute_scalefacs_short(
468 int sf[][3], const gr_info *cod_info, int scalefac[][3], int *sbg )
470 const int maxrange1 = 15, maxrange2 = 7;
471 int maxrange, maxover = 0;
473 int ifqstep = ( cod_info->scalefac_scale == 0 ) ? 2 : 4;
475 for (i = 0; i < 3; ++i) {
476 int maxsf1 = 0, maxsf2 = 0, minsf = 1000;
477 /* see if we should use subblock gain */
478 for (sfb = 0; sfb < 6; ++sfb) { /* part 1 */
479 if (maxsf1 < -sf[sfb][i]) maxsf1 = -sf[sfb][i];
480 if (minsf > -sf[sfb][i]) minsf = -sf[sfb][i];
482 for (; sfb < SBPSY_s; ++sfb) { /* part 2 */
483 if (maxsf2 < -sf[sfb][i]) maxsf2 = -sf[sfb][i];
484 if (minsf > -sf[sfb][i]) minsf = -sf[sfb][i];
487 /* boost subblock gain as little as possible so we can
488 * reach maxsf1 with scalefactors
491 maxsf1 = Max (maxsf1-maxrange1*ifqstep, maxsf2-maxrange2*ifqstep);
493 if (minsf > 0) sbg[i] = floor (.125*minsf + .001);
494 if (maxsf1 > 0) sbg[i] = Max (sbg[i], (maxsf1/8 + (maxsf1 % 8 != 0)));
495 if (sbg[i] > 7) sbg[i] = 7;
497 for (sfb = 0; sfb < SBPSY_s; ++sfb) {
498 sf[sfb][i] += 8*sbg[i];
500 if (sf[sfb][i] < 0) {
501 maxrange = sfb < 6 ? maxrange1 : maxrange2;
504 = -sf[sfb][i]/ifqstep + (-sf[sfb][i]%ifqstep != 0);
506 if (scalefac[sfb][i] > maxrange)
507 scalefac[sfb][i] = maxrange;
509 if (maxover < -(sf[sfb][i] + scalefac[sfb][i]*ifqstep))
510 maxover = -(sf[sfb][i] + scalefac[sfb][i]*ifqstep);
513 scalefac[sfb][i] = 0;
522 ifqstep = ( cod_info->scalefac_scale == 0 ) ? 2 : 4;
523 ol_sf = (cod_info->global_gain-210.0);
524 ol_sf -= ifqstep*scalefac[gr][ch].l[sfb];
525 if (cod_info->preflag && sfb>=11)
526 ol_sf -= ifqstep*pretab[sfb];
529 compute_scalefacs_long_lsf( int *sf, const gr_info * cod_info, int *scalefac )
531 const int * max_range = max_range_long;
532 int ifqstep = ( cod_info->scalefac_scale == 0 ) ? 2 : 4;
536 if (cod_info->preflag) {
537 max_range = max_range_long_lsf_pretab;
538 for (sfb = 11; sfb < SBPSY_l; ++sfb)
539 sf[sfb] += pretab[sfb] * ifqstep;
543 for (sfb = 0; sfb < SBPSY_l; ++sfb) {
546 /* ifqstep*scalefac >= -sf[sfb], so round UP */
547 scalefac[sfb] = -sf[sfb]/ifqstep + (-sf[sfb] % ifqstep != 0);
548 if (scalefac[sfb] > max_range[sfb])
549 scalefac[sfb] = max_range[sfb];
551 /* sf[sfb] should now be positive: */
552 if (-(sf[sfb] + scalefac[sfb]*ifqstep) > maxover) {
553 maxover = -(sf[sfb] + scalefac[sfb]*ifqstep);
557 scalefac[sfb] = 0; /* sfb21 */
567 ifqstep = ( cod_info->scalefac_scale == 0 ) ? 2 : 4;
568 ol_sf = (cod_info->global_gain-210.0);
569 ol_sf -= ifqstep*scalefac[gr][ch].l[sfb];
570 if (cod_info->preflag && sfb>=11)
571 ol_sf -= ifqstep*pretab[sfb];
574 compute_scalefacs_long( int *sf, const gr_info * cod_info, int *scalefac )
576 int ifqstep = ( cod_info->scalefac_scale == 0 ) ? 2 : 4;
580 if (cod_info->preflag) {
581 for (sfb = 11; sfb < SBPSY_l; ++sfb)
582 sf[sfb] += pretab[sfb] * ifqstep;
586 for (sfb = 0; sfb < SBPSY_l; ++sfb) {
589 /* ifqstep*scalefac >= -sf[sfb], so round UP */
590 scalefac[sfb] = -sf[sfb]/ifqstep + (-sf[sfb] % ifqstep != 0);
591 if (scalefac[sfb] > max_range_long[sfb])
592 scalefac[sfb] = max_range_long[sfb];
594 /* sf[sfb] should now be positive: */
595 if (-(sf[sfb] + scalefac[sfb]*ifqstep) > maxover) {
596 maxover = -(sf[sfb] + scalefac[sfb]*ifqstep);
600 scalefac[sfb] = 0; /* sfb21 */
612 /************************************************************************
614 * quantize and encode with the given scalefacs and global gain
616 * compute scalefactors, l3_enc, and return number of bits needed to encode
619 ************************************************************************/
622 VBR_quantize_granule(
623 lame_internal_flags * gfc,
626 III_scalefac_t * scalefac,
630 III_side_info_t * l3_side = & gfc->l3_side;
631 gr_info * cod_info = & l3_side->gr[gr].ch[ch].tt;
633 /* encode scalefacs */
635 status = scale_bitcount( scalefac, cod_info );
637 status = scale_bitcount_lsf( gfc, scalefac, cod_info );
644 cod_info->part2_3_length = count_bits(gfc,l3_enc,xr34,cod_info);
645 if (cod_info->part2_3_length >= LARGE_BITS) return -2;
646 cod_info->part2_3_length += cod_info->part2_length;
649 if (gfc->use_best_huffman == 1) {
650 best_huffman_divide(gfc, cod_info, l3_enc);
657 /***********************************************************************
659 * calc_short_block_vbr_sf()
660 * calc_long_block_vbr_sf()
662 * Mark Taylor 2000-??-??
663 * Robert Hegemann 2000-10-25 made functions of it
665 ***********************************************************************/
666 static const int MAX_SF_DELTA = 4;
670 const lame_internal_flags * gfc,
671 const III_psy_xmin * l3_xmin,
672 const FLOAT8 * xr34_orig,
674 III_scalefac_t * vbrsf,
680 int sf_cache[SBMAX_s];
681 int scalefac_criteria;
683 if (gfc->presetTune.use) {
684 /* map experimentalX settings to internal selections */
685 static char const map[] = {2,1,0,3,6};
686 scalefac_criteria = map[gfc->presetTune.quantcomp_current];
689 scalefac_criteria = gfc->VBR->quality;
692 for (j = 0, sfb = 0; sfb < SBMAX_s; ++sfb) {
693 for (b = 0; b < 3; ++b) {
694 const int start = gfc->scalefac_band.s[ sfb ];
695 const int end = gfc->scalefac_band.s[ sfb+1 ];
696 const int width = end - start;
698 switch( scalefac_criteria ) {
702 vbrsf->s[sfb][b] = calc_scalefac( l3_xmin->s[sfb][b], width,
703 gfc->presetTune.quantcomp_adjust_mtrh );
708 /* the faster and sloppier mode to use at lower quality
710 vbrsf->s[sfb][b] = find_scalefac (&xr34[j], &xr34_orig[j],
711 l3_xmin->s[sfb][b], width);
714 /* the slower and better mode to use at higher quality
716 vbrsf->s[sfb][b] = find_scalefac_ave (&xr34[j], &xr34_orig[j],
717 l3_xmin->s[sfb][b], width);
720 /* maxnoise mode to use at higher quality
722 vbrsf->s[sfb][b] = find_scalefac_mq (&xr34[j], &xr34_orig[j],
723 l3_xmin->s[sfb][b], width,
724 1, gfc->VBR->scratch);
727 /* maxnoise mode to use at higher quality
729 vbrsf->s[sfb][b] = find_scalefac_mq (&xr34[j], &xr34_orig[j],
730 l3_xmin->s[sfb][b], width,
731 0, gfc->VBR->scratch);
741 for (b = 0; b < 3; ++b) {
745 switch( gfc->VBR->smooth ) {
751 /* make working copy, get min value, select_kth_int will reorder!
753 for (vbrmn = +10000, sfb = 0; sfb < SBMAX_s; ++sfb) {
754 sf_cache[sfb] = vbrsf->s[sfb][b];
755 if (vbrmn > vbrsf->s[sfb][b])
756 vbrmn = vbrsf->s[sfb][b];
759 /* find median value, take it as mean
761 vbrmean = select_kth_int (sf_cache, SBMAX_s, (SBMAX_s+1)/2);
765 for (sfb = 0; sfb < SBMAX_s; ++sfb) {
766 if (vbrsf->s[sfb][b] > vbrmean+(vbrmean-vbrmn))
767 vbrsf->s[sfb][b] = vbrmean+(vbrmean-vbrmn);
772 for (sfb = 0; sfb < SBMAX_s; ++sfb) {
774 if (vbrsf->s[sfb][b] > vbrsf->s[sfb-1][b]+MAX_SF_DELTA)
775 vbrsf->s[sfb][b] = vbrsf->s[sfb-1][b]+MAX_SF_DELTA;
777 if (vbrsf->s[sfb][b] > vbrsf->s[sfb+1][b]+MAX_SF_DELTA)
778 vbrsf->s[sfb][b] = vbrsf->s[sfb+1][b]+MAX_SF_DELTA;
785 for (sfb = 0; sfb < SBMAX_s; ++sfb) {
786 if (*vbrmax < vbrsf->s[sfb][b])
787 *vbrmax = vbrsf->s[sfb][b];
788 if (*vbrmin > vbrsf->s[sfb][b])
789 *vbrmin = vbrsf->s[sfb][b];
795 /* a variation for vbr-mtrh */
798 const lame_internal_flags * gfc,
799 const III_psy_xmin * l3_xmin,
800 const FLOAT8 * xr34_orig,
802 III_scalefac_t * vbrsf,
808 int sf_cache[SBMAX_l];
809 int scalefac_criteria;
811 if (gfc->presetTune.use) {
812 /* map experimentalX settings to internal selections */
813 static char const map[] = {2,1,0,3,6};
814 scalefac_criteria = map[gfc->presetTune.quantcomp_current];
817 scalefac_criteria = gfc->VBR->quality;
820 for (sfb = 0; sfb < SBMAX_l; ++sfb) {
821 const int start = gfc->scalefac_band.l[ sfb ];
822 const int end = gfc->scalefac_band.l[ sfb+1 ];
823 const int width = end - start;
825 switch( scalefac_criteria ) {
829 vbrsf->l[sfb] = calc_scalefac( l3_xmin->l[sfb], width,
830 gfc->presetTune.quantcomp_adjust_mtrh );
835 /* the faster and sloppier mode to use at lower quality
837 vbrsf->l[sfb] = find_scalefac (&xr34[start], &xr34_orig[start],
838 l3_xmin->l[sfb], width);
841 /* the slower and better mode to use at higher quality
843 vbrsf->l[sfb] = find_scalefac_ave (&xr34[start], &xr34_orig[start],
844 l3_xmin->l[sfb], width);
847 /* maxnoise mode to use at higher quality
849 vbrsf->l[sfb] = find_scalefac_mq (&xr34[start], &xr34_orig[start],
850 l3_xmin->l[sfb], width,
851 1, gfc->VBR->scratch);
854 /* maxnoise mode to use at higher quality
856 vbrsf->l[sfb] = find_scalefac_mq (&xr34[start], &xr34_orig[start],
857 l3_xmin->l[sfb], width,
858 0, gfc->VBR->scratch);
863 switch( gfc->VBR->smooth ) {
869 /* make working copy, get min value, select_kth_int will reorder!
871 for (vbrmn = +10000, sfb = 0; sfb < SBMAX_l; ++sfb) {
872 sf_cache[sfb] = vbrsf->l[sfb];
873 if (vbrmn > vbrsf->l[sfb])
874 vbrmn = vbrsf->l[sfb];
876 /* find median value, take it as mean
878 vbrmean = select_kth_int (sf_cache, SBMAX_l, (SBMAX_l+1)/2);
882 for (sfb = 0; sfb < SBMAX_l; ++sfb) {
883 if (vbrsf->l[sfb] > vbrmean+(vbrmean-vbrmn))
884 vbrsf->l[sfb] = vbrmean+(vbrmean-vbrmn);
889 for (sfb = 0; sfb < SBMAX_l; ++sfb) {
891 if (vbrsf->l[sfb] > vbrsf->l[sfb-1]+MAX_SF_DELTA)
892 vbrsf->l[sfb] = vbrsf->l[sfb-1]+MAX_SF_DELTA;
894 if (vbrsf->l[sfb] > vbrsf->l[sfb+1]+MAX_SF_DELTA)
895 vbrsf->l[sfb] = vbrsf->l[sfb+1]+MAX_SF_DELTA;
902 for (*vbrmin = +10000, *vbrmax = -10000, sfb = 0; sfb < SBMAX_l; ++sfb) {
903 if (*vbrmax < vbrsf->l[sfb])
904 *vbrmax = vbrsf->l[sfb];
905 if (*vbrmin > vbrsf->l[sfb])
906 *vbrmin = vbrsf->l[sfb];
912 /******************************************************************
914 * short block scalefacs
916 ******************************************************************/
919 short_block_scalefacs (
920 const lame_internal_flags * gfc,
922 III_scalefac_t * scalefac,
923 III_scalefac_t * vbrsf,
927 int maxover, maxover0, maxover1, mover;
930 int vbrmax = *VBRmax;
934 maxsfb = gfc->sfb21_extra ? SBMAX_s : SBPSY_s;
935 for (sfb = 0; sfb < maxsfb; ++sfb) {
936 for (b = 0; b < 3; ++b) {
937 v0 = (vbrmax - vbrsf->s[sfb][b]) - (4*14 + 2*max_range_short[sfb]);
938 v1 = (vbrmax - vbrsf->s[sfb][b]) - (4*14 + 4*max_range_short[sfb]);
946 if ((gfc->noise_shaping == 2) && (gfc->presetTune.use && !(gfc->presetTune.athadjust_safe_noiseshaping || gfc->ATH->adjust < 1.0)))
947 /* allow scalefac_scale=1 */
948 mover = Min (maxover0, maxover1);
957 cod_info->scalefac_scale = 0;
958 else if (maxover1 == 0)
959 cod_info->scalefac_scale = 1;
961 /* sf = (cod_info->global_gain-210.0) */
962 cod_info->global_gain = vbrmax + 210;
963 assert(cod_info->global_gain < 256);
965 if (cod_info->global_gain < 0) {
966 cod_info->global_gain = 0;
969 if (cod_info->global_gain > 255) {
970 cod_info->global_gain = 255;
972 for (sfb = 0; sfb < SBMAX_s; ++sfb) {
973 for (b = 0; b < 3; ++b) {
974 vbrsf->s[sfb][b] -= vbrmax;
977 maxover = compute_scalefacs_short (vbrsf->s, cod_info, scalefac->s,
978 cod_info->subblock_gain);
980 assert (maxover <= 0);
982 /* adjust global_gain so at least 1 subblock gain = 0 */
983 minsfb = 999; /* prepare for minimum search */
984 for (b = 0; b < 3; ++b)
985 if (minsfb > cod_info->subblock_gain[b])
986 minsfb = cod_info->subblock_gain[b];
988 if (minsfb > cod_info->global_gain/8)
989 minsfb = cod_info->global_gain/8;
992 cod_info->global_gain -= 8*minsfb;
994 for (b = 0; b < 3; ++b)
995 cod_info->subblock_gain[b] -= minsfb;
1002 /******************************************************************
1004 * long block scalefacs
1006 ******************************************************************/
1009 long_block_scalefacs (
1010 const lame_internal_flags * gfc,
1012 III_scalefac_t * scalefac,
1013 III_scalefac_t * vbrsf,
1016 const int * max_rangep;
1018 int maxover, maxover0, maxover1, maxover0p, maxover1p, mover;
1019 int v0, v1, v0p, v1p;
1020 int vbrmax = *VBRmax;
1022 max_rangep = gfc->is_mpeg1 ? max_range_long : max_range_long_lsf_pretab;
1026 maxover0p = 0; /* pretab */
1027 maxover1p = 0; /* pretab */
1029 maxsfb = gfc->sfb21_extra ? SBMAX_l : SBPSY_l;
1030 for ( sfb = 0; sfb < maxsfb; ++sfb ) {
1031 v0 = (vbrmax - vbrsf->l[sfb]) - 2*max_range_long[sfb];
1032 v1 = (vbrmax - vbrsf->l[sfb]) - 4*max_range_long[sfb];
1033 v0p = (vbrmax - vbrsf->l[sfb]) - 2*(max_rangep[sfb]+pretab[sfb]);
1034 v1p = (vbrmax - vbrsf->l[sfb]) - 4*(max_rangep[sfb]+pretab[sfb]);
1039 if (maxover0p < v0p)
1041 if (maxover1p < v1p)
1045 mover = Min (maxover0, maxover0p);
1046 if ((gfc->noise_shaping == 2) && (gfc->presetTune.use && !(gfc->presetTune.athadjust_safe_noiseshaping || gfc->ATH->adjust < 1.0))) {
1047 /* allow scalefac_scale=1 */
1048 mover = Min (mover, maxover1);
1049 mover = Min (mover, maxover1p);
1058 if (maxover0 <= 0) {
1059 cod_info->scalefac_scale = 0;
1060 cod_info->preflag = 0;
1062 } else if (maxover0p <= 0) {
1063 cod_info->scalefac_scale = 0;
1064 cod_info->preflag = 1;
1065 vbrmax -= maxover0p;
1066 } else if (maxover1 == 0) {
1067 cod_info->scalefac_scale = 1;
1068 cod_info->preflag = 0;
1069 } else if (maxover1p == 0) {
1070 cod_info->scalefac_scale = 1;
1071 cod_info->preflag = 1;
1073 assert(0); /* this should not happen */
1076 /* sf = (cod_info->global_gain-210.0) */
1077 cod_info->global_gain = vbrmax + 210;
1078 assert (cod_info->global_gain < 256);
1080 if (cod_info->global_gain < 0) {
1081 cod_info->global_gain = 0;
1084 if (cod_info->global_gain > 255)
1085 cod_info->global_gain = 255;
1087 for (sfb = 0; sfb < SBMAX_l; ++sfb)
1088 vbrsf->l[sfb] -= vbrmax;
1090 if ( gfc->is_mpeg1 == 1 )
1091 maxover = compute_scalefacs_long (vbrsf->l, cod_info, scalefac->l);
1093 maxover = compute_scalefacs_long_lsf (vbrsf->l, cod_info, scalefac->l);
1095 assert (maxover <= 0);
1102 /***********************************************************************
1104 * quantize xr34 based on scalefactors
1106 * calc_short_block_xr34
1107 * calc_long_block_xr34
1109 * Mark Taylor 2000-??-??
1110 * Robert Hegemann 2000-10-20 made functions of them
1112 ***********************************************************************/
1116 const lame_internal_flags * gfc,
1117 const gr_info * cod_info,
1118 const III_scalefac_t * scalefac,
1119 const FLOAT8 * xr34_orig,
1123 int ifac, ifqstep, start, end;
1126 /* even though there is no scalefactor for sfb12
1127 * subblock gain affects upper frequencies too, that's why
1128 * we have to go up to SBMAX_s
1130 ifqstep = ( cod_info->scalefac_scale == 0 ) ? 2 : 4;
1131 for ( j = 0, sfb = 0; sfb < SBMAX_s; ++sfb ) {
1132 start = gfc->scalefac_band.s[ sfb ];
1133 end = gfc->scalefac_band.s[ sfb+1 ];
1134 for (b = 0; b < 3; ++b) {
1135 ifac = 8*cod_info->subblock_gain[b]+ifqstep*scalefac->s[sfb][b];
1137 if ( ifac == 0 ) { /* just copy */
1138 l = (end-start+7) / 8;
1139 switch ((end-start) % 8) {
1141 case 0: do{ xr34[j] = xr34_orig[j]; ++j;
1142 case 7: xr34[j] = xr34_orig[j]; ++j;
1143 case 6: xr34[j] = xr34_orig[j]; ++j;
1144 case 5: xr34[j] = xr34_orig[j]; ++j;
1145 case 4: xr34[j] = xr34_orig[j]; ++j;
1146 case 3: xr34[j] = xr34_orig[j]; ++j;
1147 case 2: xr34[j] = xr34_orig[j]; ++j;
1148 case 1: xr34[j] = xr34_orig[j]; ++j; } while (--l);
1152 if (ifac < Q_MAX-210)
1153 fac = IIPOW20_(ifac);
1155 fac = pow (2.0, 0.1875*ifac);
1158 * loop unrolled into "Duff's Device". Robert Hegemann
1160 l = (end-start+7) / 8;
1161 switch ((end-start) % 8) {
1163 case 0: do{ xr34[j] = xr34_orig[j]*fac; ++j;
1164 case 7: xr34[j] = xr34_orig[j]*fac; ++j;
1165 case 6: xr34[j] = xr34_orig[j]*fac; ++j;
1166 case 5: xr34[j] = xr34_orig[j]*fac; ++j;
1167 case 4: xr34[j] = xr34_orig[j]*fac; ++j;
1168 case 3: xr34[j] = xr34_orig[j]*fac; ++j;
1169 case 2: xr34[j] = xr34_orig[j]*fac; ++j;
1170 case 1: xr34[j] = xr34_orig[j]*fac; ++j; } while (--l);
1180 const lame_internal_flags * gfc,
1181 const gr_info * cod_info,
1182 const III_scalefac_t * scalefac,
1183 const FLOAT8 * xr34_orig,
1187 int ifac, ifqstep, start, end;
1190 ifqstep = ( cod_info->scalefac_scale == 0 ) ? 2 : 4;
1191 for ( sfb = 0; sfb < SBMAX_l; ++sfb ) {
1193 ifac = ifqstep*scalefac->l[sfb];
1194 if (cod_info->preflag)
1195 ifac += ifqstep*pretab[sfb];
1197 start = gfc->scalefac_band.l[ sfb ];
1198 end = gfc->scalefac_band.l[ sfb+1 ];
1200 if ( ifac == 0 ) { /* just copy */
1202 l = (end-start+7) / 8;
1203 switch ((end-start) % 8) {
1205 case 0: do{ xr34[j] = xr34_orig[j]; ++j;
1206 case 7: xr34[j] = xr34_orig[j]; ++j;
1207 case 6: xr34[j] = xr34_orig[j]; ++j;
1208 case 5: xr34[j] = xr34_orig[j]; ++j;
1209 case 4: xr34[j] = xr34_orig[j]; ++j;
1210 case 3: xr34[j] = xr34_orig[j]; ++j;
1211 case 2: xr34[j] = xr34_orig[j]; ++j;
1212 case 1: xr34[j] = xr34_orig[j]; ++j; } while (--l);
1216 if (ifac < Q_MAX-210)
1217 fac = IIPOW20_(ifac);
1219 fac = pow (2.0, 0.1875*ifac);
1222 * loop unrolled into "Duff's Device". Robert Hegemann
1225 l = (end-start+7) / 8;
1226 switch ((end-start) % 8) {
1228 case 0: do{ xr34[j] = xr34_orig[j]*fac; ++j;
1229 case 7: xr34[j] = xr34_orig[j]*fac; ++j;
1230 case 6: xr34[j] = xr34_orig[j]*fac; ++j;
1231 case 5: xr34[j] = xr34_orig[j]*fac; ++j;
1232 case 4: xr34[j] = xr34_orig[j]*fac; ++j;
1233 case 3: xr34[j] = xr34_orig[j]*fac; ++j;
1234 case 2: xr34[j] = xr34_orig[j]*fac; ++j;
1235 case 1: xr34[j] = xr34_orig[j]*fac; ++j; } while (--l);
1246 /************************************************************************
1248 * VBR_noise_shaping2()
1250 * may result in a need of too many bits, then do it CBR like
1252 * Robert Hegemann 2000-10-25
1254 ***********************************************************************/
1257 VBR_noise_shaping2 (
1258 lame_global_flags * gfp,
1264 III_scalefac_t * scalefac,
1265 III_psy_xmin * l3_xmin,
1269 lame_internal_flags *gfc = gfp->internal_flags;
1270 III_scalefac_t vbrsf;
1271 III_scalefac_t vbrsf2;
1274 int shortblock, ret, bits, huffbits;
1275 int vbrmin, vbrmax, vbrmin2, vbrmax2;
1276 int best_huffman = gfc->use_best_huffman;
1279 gfc->use_best_huffman = 0; /* we will do it later */
1281 cod_info = &gfc->l3_side.gr[gr].ch[ch].tt;
1282 shortblock = (cod_info->block_type == SHORT_TYPE);
1285 short_block_sf (gfc, l3_xmin, xr34orig, xr, &vbrsf2, &vbrmin2, &vbrmax2);
1287 long_block_sf (gfc, l3_xmin, xr34orig, xr, &vbrsf2, &vbrmin2, &vbrmax2);
1297 short_block_scalefacs (gfc, cod_info, scalefac, &vbrsf, &vbrmax);
1298 short_block_xr34 (gfc, cod_info, scalefac, xr34orig, xr34);
1300 long_block_scalefacs (gfc, cod_info, scalefac, &vbrsf, &vbrmax);
1301 long_block_xr34 (gfc, cod_info, scalefac, xr34orig, xr34);
1304 ret = VBR_quantize_granule (gfc, xr34, l3_enc, scalefac, gr, ch);
1306 if (vbrmin == vbrmax) break;
1307 else if (cod_info->part2_3_length < minbits) {
1309 vbrmax = vbrmin2 + (vbrmax2-vbrmin2) * count/6;
1312 for (i = 0; i < SBMAX_s; ++i) {
1313 //vbrsf.s[i][0] = vbrmin2 + (vbrsf2.s[i][0]-vbrmin2) * count/6;
1314 //vbrsf.s[i][1] = vbrmin2 + (vbrsf2.s[i][1]-vbrmin2) * count/6;
1315 //vbrsf.s[i][2] = vbrmin2 + (vbrsf2.s[i][2]-vbrmin2) * count/6;
1316 vbrsf.s[i][0] = Min(vbrsf2.s[i][0], vbrmax);
1317 vbrsf.s[i][1] = Min(vbrsf2.s[i][1], vbrmax);
1318 vbrsf.s[i][2] = Min(vbrsf2.s[i][2], vbrmax);
1322 for (i = 0; i < SBMAX_l; ++i) {
1323 //vbrsf.l[i] = vbrmin2 + (vbrsf2.l[i]-vbrmin2) * count/6;
1324 vbrsf.l[i] = Min(vbrsf2.l[i], vbrmax);
1328 else if (cod_info->part2_3_length > maxbits) {
1331 vbrmin = vbrmax2 + (vbrmin2-vbrmax2) * count/6;
1333 for (i = 0; i < SBMAX_s; ++i) {
1334 //vbrsf.s[i][0] = vbrmax2 + (vbrsf2.s[i][0]-vbrmax2) * count/6;
1335 //vbrsf.s[i][1] = vbrmax2 + (vbrsf2.s[i][1]-vbrmax2) * count/6;
1336 //vbrsf.s[i][2] = vbrmax2 + (vbrsf2.s[i][2]-vbrmax2) * count/6;
1337 vbrsf.s[i][0] = Max(vbrsf2.s[i][0], vbrmin);
1338 vbrsf.s[i][1] = Max(vbrsf2.s[i][1], vbrmin);
1339 vbrsf.s[i][2] = Max(vbrsf2.s[i][2], vbrmin);
1343 for (i = 0; i < SBMAX_l; ++i) {
1344 //vbrsf.l[i] = vbrmax2 + (vbrsf2.l[i]-vbrmax2) * count/6;
1345 vbrsf.l[i] = Max(vbrsf2.l[i], vbrmin);
1350 } while(1 && ret != -1);
1353 gfc->use_best_huffman = best_huffman;
1355 if (ret == -1) /* Houston, we have a problem */
1358 if (cod_info->part2_3_length < minbits) {
1359 huffbits = minbits - cod_info->part2_length;
1360 bits = bin_search_StepSize (gfc, cod_info, huffbits,
1361 gfc->OldValue[ch], xr34, l3_enc);
1362 gfc->OldValue[ch] = cod_info->global_gain;
1363 cod_info->part2_3_length = bits + cod_info->part2_length;
1365 if (cod_info->part2_3_length > maxbits) {
1366 huffbits = maxbits - cod_info->part2_length;
1367 if (huffbits < 0) huffbits = 0;
1368 bits = bin_search_StepSize (gfc, cod_info, huffbits,
1369 gfc->OldValue[ch], xr34, l3_enc);
1370 gfc->OldValue[ch] = cod_info->global_gain;
1371 cod_info->part2_3_length = bits;
1372 if (bits > huffbits) {
1373 bits = inner_loop (gfc, cod_info, huffbits, xr34, l3_enc);
1374 cod_info->part2_3_length = bits;
1376 if (bits >= LARGE_BITS) /* Houston, we have a problem */
1378 cod_info->part2_3_length += cod_info->part2_length;
1381 if (cod_info->part2_length >= LARGE_BITS) /* Houston, we have a problem */
1384 assert (cod_info->global_gain < 256);
1394 /************************************************************************
1396 * VBR_noise_shaping()
1398 * compute scalefactors, l3_enc, and return number of bits needed to encode
1400 * return code: 0 scalefactors were found with all noise < masking
1402 * n>0 scalefactors required too many bits. global gain
1403 * was decreased by n
1404 * If n is large, we should probably recompute scalefacs
1405 * with a lower quality.
1407 * n<0 scalefactors used less than minbits.
1408 * global gain was increased by n.
1409 * If n is large, might want to recompute scalefacs
1410 * with a higher quality setting?
1412 ************************************************************************/
1415 lame_global_flags *gfp,
1417 FLOAT8 xr34orig [576],
1419 int digital_silence,
1422 III_scalefac_t *scalefac,
1423 III_psy_xmin *l3_xmin,
1427 lame_internal_flags *gfc=gfp->internal_flags;
1428 III_scalefac_t save_sf;
1429 III_scalefac_t vbrsf;
1434 int global_gain_adjust = 0;
1436 cod_info = &gfc->l3_side.gr[gr].ch[ch].tt;
1437 shortblock = (cod_info->block_type == SHORT_TYPE);
1440 short_block_sf (gfc, l3_xmin, xr34orig, xr, &vbrsf, &vbrmin, &vbrmax);
1442 long_block_sf (gfc, l3_xmin, xr34orig, xr, &vbrsf, &vbrmin, &vbrmax);
1444 /* save a copy of vbrsf, incase we have to recomptue scalefacs */
1445 memcpy (&save_sf, &vbrsf, sizeof(III_scalefac_t));
1448 memset (scalefac, 0, sizeof(III_scalefac_t));
1451 short_block_scalefacs (gfc, cod_info, scalefac, &vbrsf, &vbrmax);
1452 short_block_xr34 (gfc, cod_info, scalefac, xr34orig, xr34);
1454 long_block_scalefacs (gfc, cod_info, scalefac, &vbrsf, &vbrmax);
1455 long_block_xr34 (gfc, cod_info, scalefac, xr34orig, xr34);
1457 VBR_quantize_granule (gfc, xr34, l3_enc, scalefac, gr, ch);
1460 /* decrease noise until we use at least minbits
1462 if (cod_info->part2_3_length < minbits) {
1463 if (digital_silence) break;
1464 //if (cod_info->part2_3_length == cod_info->part2_length) break;
1465 if (vbrmax+210 == 0) break;
1467 /* decrease global gain, recompute scale factors */
1469 --global_gain_adjust;
1470 memcpy (&vbrsf, &save_sf, sizeof(III_scalefac_t));
1473 } while (cod_info->part2_3_length < minbits);
1475 /* inject noise until we meet our bit limit
1477 while (cod_info->part2_3_length > Min (maxbits, MAX_BITS)) {
1478 /* increase global gain, keep existing scale factors */
1479 ++cod_info->global_gain;
1480 if (cod_info->global_gain > 255)
1481 ERRORF (gfc,"%ld impossible to encode ??? frame! bits=%d\n",
1482 // gfp->frameNum, cod_info->part2_3_length);
1483 -1, cod_info->part2_3_length);
1484 VBR_quantize_granule (gfc, xr34, l3_enc, scalefac, gr, ch);
1486 ++global_gain_adjust;
1489 return global_gain_adjust;
1495 VBR_quantize(lame_global_flags *gfp,
1496 FLOAT8 pe[2][2], FLOAT8 ms_ener_ratio[2],
1497 FLOAT8 xr[2][2][576], III_psy_ratio ratio[2][2],
1498 int l3_enc[2][2][576],
1499 III_scalefac_t scalefac[2][2])
1501 lame_internal_flags *gfc=gfp->internal_flags;
1502 III_psy_xmin l3_xmin[2][2];
1503 int minbits,maxbits,max_frame_bits,totbits,gr,ch,i,bits_ok;
1504 int bitsPerFrame,mean_bits;
1507 III_side_info_t * l3_side;
1509 int digital_silence[2][2];
1510 FLOAT8 masking_lower_db=0;
1511 FLOAT8 xr34[2][2][576];
1513 qadjust=0; /* start with -1 db quality improvement over quantize.c VBR */
1515 l3_side = &gfc->l3_side;
1517 /* now find out: if the frame can be considered analog silent
1518 * if each granule can be considered digital silent
1519 * and calculate l3_xmin and the fresh xr34 array
1522 assert( gfp->VBR_q <= 9 );
1523 assert( gfp->VBR_q >= 0 );
1525 for (gr = 0; gr < gfc->mode_gr; ++gr) {
1526 /* copy data to be quantized into xr */
1527 if (gfc->mode_ext==MPG_MD_MS_LR) {
1528 ms_convert(xr[gr],xr[gr]);
1530 for (ch = 0; ch < gfc->channels_out; ++ch) {
1531 /* if in the following sections the quality would not be adjusted
1532 * then we would only have to call calc_xmin once here and
1533 * could drop subsequently calls (rh 2000/07/17)
1536 cod_info = &l3_side->gr[gr].ch[ch].tt;
1537 cod_info->part2_3_length=LARGE_BITS;
1539 if (cod_info->block_type == SHORT_TYPE) {
1540 cod_info->sfb_lmax = 0; /* No sb*/
1541 cod_info->sfb_smin = 0;
1543 /* MPEG 1 doesnt use last scalefactor band */
1544 cod_info->sfb_lmax = SBPSY_l;
1545 cod_info->sfb_smin = SBPSY_s; /* No sb */
1546 if (cod_info->mixed_block_flag) {
1547 cod_info->sfb_lmax = gfc->is_mpeg1 ? 8 : 6;
1548 cod_info->sfb_smin = 3;
1552 /* quality setting */
1553 masking_lower_db = gfc->VBR->mask_adjust;
1554 if (pe[gr][ch]>750) {
1555 masking_lower_db -= Min(10,4*(pe[gr][ch]-750.)/750.);
1557 gfc->masking_lower = pow(10.0,masking_lower_db/10);
1559 /* masking thresholds */
1560 over_ath = calc_xmin(gfp,xr[gr][ch],&ratio[gr][ch],cod_info,&l3_xmin[gr][ch]);
1562 /* if there are bands with more energy than the ATH
1563 * then we say the frame is not analog silent */
1568 /* if there is no line with more energy than 1e-20
1569 * then this granule is considered to be digital silent
1570 * plus calculation of xr34 */
1571 digital_silence[gr][ch] = 1;
1572 for(i=0;i<576;++i) {
1573 FLOAT8 temp=fabs(xr[gr][ch][i]);
1574 xr34[gr][ch][i]=sqrt(sqrt(temp)*temp);
1575 digital_silence[gr][ch] &= temp < 1E-20;
1581 /* compute minimum allowed bits from minimum allowed bitrate */
1582 if (analog_silence) {
1583 gfc->bitrate_index=1;
1585 gfc->bitrate_index=gfc->VBR_min_bitrate;
1587 getframebits(gfp, &bitsPerFrame, &mean_bits);
1588 minbits = (mean_bits/gfc->channels_out);
1590 /* compute maximum allowed bits from max allowed bitrate */
1591 gfc->bitrate_index=gfc->VBR_max_bitrate;
1592 getframebits(gfp, &bitsPerFrame, &mean_bits);
1593 max_frame_bits = ResvFrameBegin(gfp, l3_side, mean_bits, bitsPerFrame);
1594 maxbits=2.5*(mean_bits/gfc->channels_out);
1597 /* compute a target mean_bits based on compression ratio
1598 * which was set based on VBR_q
1600 int bit_rate = gfp->out_samplerate*16*gfc->channels_out/(1000.0*gfp->compression_ratio);
1601 bitsPerFrame = (bit_rate*gfp->framesize*1000)/gfp->out_samplerate;
1602 mean_bits = (bitsPerFrame - 8*gfc->sideinfo_len) / gfc->mode_gr;
1606 minbits = Max(minbits,125);
1607 minbits=Max(minbits,.40*(mean_bits/gfc->channels_out));
1608 maxbits=Min(maxbits,2.5*(mean_bits/gfc->channels_out));
1617 * loop over all ch,gr, encoding anything with bits > .5*(max_frame_bits/4)
1619 * If a particular granule uses way too many bits, it will be re-encoded
1620 * on the next iteration of the loop (with a lower quality setting).
1621 * But granules which dont use
1622 * use too many bits will not be re-encoded.
1624 * minbits: minimum allowed bits for 1 granule 1 channel
1625 * maxbits: maximum allowwed bits for 1 granule 1 channel
1626 * max_frame_bits: maximum allowed bits for entire frame
1627 * (max_frame_bits/4) estimate of average bits per granule per channel
1634 for (gr = 0; gr < gfc->mode_gr; ++gr) {
1636 minbits_lr[0]=minbits;
1637 minbits_lr[1]=minbits;
1640 if (gfc->mode_ext==MPG_MD_MS_LR) {
1642 fac = .33*(.5-ms_ener_ratio[gr])/.5;
1645 minbits_lr[0] = (1+fac)*minbits;
1646 minbits_lr[1] = Max(125,(1-fac)*minbits);
1651 for (ch = 0; ch < gfc->channels_out; ++ch) {
1652 int adjusted,shortblock;
1653 cod_info = &l3_side->gr[gr].ch[ch].tt;
1655 /* ENCODE this data first pass, and on future passes unless it uses
1656 * a very small percentage of the max_frame_bits */
1657 if (cod_info->part2_3_length > (max_frame_bits/(2*gfc->channels_out*gfc->mode_gr))) {
1659 shortblock = (cod_info->block_type == SHORT_TYPE);
1661 /* Adjust allowed masking based on quality setting */
1662 if (qadjust!=0 /*|| shortblock*/) {
1663 masking_lower_db = gfc->VBR->mask_adjust + qadjust;
1666 if (shortblock) masking_lower_db -= 4;
1670 masking_lower_db -= Min(10,4*(pe[gr][ch]-750.)/750.);
1671 gfc->masking_lower = pow(10.0,masking_lower_db/10);
1672 calc_xmin( gfp, xr[gr][ch], ratio[gr]+ch, cod_info, l3_xmin[gr]+ch);
1675 /* digital silent granules do not need the full round trip,
1676 * but this can be optimized later on
1678 adjusted = VBR_noise_shaping (gfp,xr[gr][ch],xr34[gr][ch],
1680 digital_silence[gr][ch],
1682 maxbits,scalefac[gr]+ch,
1683 l3_xmin[gr]+ch,gr,ch);
1685 /* global_gain was changed by a large amount to get bits < maxbits */
1686 /* quality is set to high. we could set bits = LARGE_BITS
1687 * to force re-encoding. But most likely the other channels/granules
1688 * will also use too many bits, and the entire frame will
1689 * be > max_frame_bits, forcing re-encoding below.
1691 // cod_info->part2_3_bits = LARGE_BITS;
1694 totbits += cod_info->part2_3_length;
1698 if (totbits>max_frame_bits) {
1700 qadjust += Max(.125,Min(1,(totbits-max_frame_bits)/300.0));
1701 /* adjusting minbits and maxbits is necessary too
1702 * cos lowering quality is not enough in rare cases
1703 * when each granule still needs almost maxbits, it wont fit */
1704 minbits = Max(125,minbits*0.975);
1705 maxbits = Max(minbits,maxbits*0.975);
1706 // DEBUGF("%i totbits>max_frame_bits totbits=%i maxbits=%i \n",gfp->frameNum,totbits,max_frame_bits);
1707 // DEBUGF("next masking_lower_db = %f \n",masking_lower_db + qadjust);
1715 /* find optimal scalefac storage. Cant be done above because
1716 * might enable scfsi which breaks the interation loops */
1718 for (gr = 0; gr < gfc->mode_gr; ++gr) {
1719 for (ch = 0; ch < gfc->channels_out; ++ch) {
1720 best_scalefac_store(gfc, gr, ch, l3_enc, l3_side, scalefac);
1721 totbits += l3_side->gr[gr].ch[ch].tt.part2_3_length;
1728 if (analog_silence && !gfp->VBR_hard_min) {
1729 gfc->bitrate_index = 1;
1731 gfc->bitrate_index = gfc->VBR_min_bitrate;
1733 for( ; gfc->bitrate_index < gfc->VBR_max_bitrate; ++gfc->bitrate_index ) {
1735 getframebits (gfp, &bitsPerFrame, &mean_bits);
1736 maxbits = ResvFrameBegin(gfp, l3_side, mean_bits, bitsPerFrame);
1737 if (totbits <= maxbits) break;
1739 if (gfc->bitrate_index == gfc->VBR_max_bitrate) {
1740 getframebits (gfp, &bitsPerFrame, &mean_bits);
1741 maxbits = ResvFrameBegin(gfp, l3_side, mean_bits, bitsPerFrame);
1744 // DEBUGF("%i total_bits=%i max_frame_bits=%i index=%i \n",gfp->frameNum,totbits,max_frame_bits,gfc->bitrate_index);
1746 for (gr = 0; gr < gfc->mode_gr; ++gr) {
1747 for (ch = 0; ch < gfc->channels_out; ++ch) {
1748 cod_info = &l3_side->gr[gr].ch[ch].tt;
1751 ResvAdjust (gfc, cod_info, l3_side, mean_bits);
1753 /*******************************************************************
1754 * set the sign of l3_enc from the sign of xr
1755 *******************************************************************/
1756 for ( i = 0; i < 576; ++i) {
1757 if (xr[gr][ch][i] < 0) l3_enc[gr][ch][i] *= -1;
1761 ResvFrameEnd (gfc, l3_side, mean_bits);