/*-------------------------------------------------------------*/ /*--- Compression machinery (not incl block sorting) ---*/ /*--- compress.c ---*/ /*-------------------------------------------------------------*/ /* ------------------------------------------------------------------ This file is part of bzip2/libbzip2, a program and library for lossless, block-sorting data compression. bzip2/libbzip2 version 1.0.5 of 10 December 2007 Copyright (C) 1996-2007 Julian Seward Please read the WARNING, DISCLAIMER and PATENTS sections in the README file. This program is released under the terms of the license contained in the file LICENSE. ------------------------------------------------------------------ */ /* CHANGES 0.9.0 -- original version. 0.9.0a/b -- no changes in this file. 0.9.0c -- changed setting of nGroups in sendMTFValues() so as to do a bit better on small files */ #include "bzlib_private.h" ABC_NAMESPACE_IMPL_START /*---------------------------------------------------*/ /*--- Bit stream I/O ---*/ /*---------------------------------------------------*/ /*---------------------------------------------------*/ void BZ2_bsInitWrite ( EState* s ) { s->bsLive = 0; s->bsBuff = 0; } /*---------------------------------------------------*/ static void bsFinishWrite ( EState* s ) { while (s->bsLive > 0) { s->zbits[s->numZ] = (UChar)(s->bsBuff >> 24); s->numZ++; s->bsBuff <<= 8; s->bsLive -= 8; } } /*---------------------------------------------------*/ #define bsNEEDW(nz) \ { \ while (s->bsLive >= 8) { \ s->zbits[s->numZ] \ = (UChar)(s->bsBuff >> 24); \ s->numZ++; \ s->bsBuff <<= 8; \ s->bsLive -= 8; \ } \ } /*---------------------------------------------------*/ static __inline__ void bsW ( EState* s, Int32 n, UInt32 v ) { bsNEEDW ( n ); s->bsBuff |= (v << (32 - s->bsLive - n)); s->bsLive += n; } /*---------------------------------------------------*/ static void bsPutUInt32 ( EState* s, UInt32 u ) { bsW ( s, 8, (u >> 24) & 0xffL ); bsW ( s, 8, (u >> 16) & 0xffL ); bsW ( s, 8, (u >> 8) & 0xffL ); bsW ( s, 8, u & 0xffL ); } /*---------------------------------------------------*/ static void bsPutUChar ( EState* s, UChar c ) { bsW( s, 8, (UInt32)c ); } /*---------------------------------------------------*/ /*--- The back end proper ---*/ /*---------------------------------------------------*/ /*---------------------------------------------------*/ static void makeMaps_e ( EState* s ) { Int32 i; s->nInUse = 0; for (i = 0; i < 256; i++) if (s->inUse[i]) { s->unseqToSeq[i] = s->nInUse; s->nInUse++; } } /*---------------------------------------------------*/ static void generateMTFValues ( EState* s ) { UChar yy[256]; Int32 i, j; Int32 zPend; Int32 wr; Int32 EOB; /* After sorting (eg, here), s->arr1 [ 0 .. s->nblock-1 ] holds sorted order, and ((UChar*)s->arr2) [ 0 .. s->nblock-1 ] holds the original block data. The first thing to do is generate the MTF values, and put them in ((UInt16*)s->arr1) [ 0 .. s->nblock-1 ]. Because there are strictly fewer or equal MTF values than block values, ptr values in this area are overwritten with MTF values only when they are no longer needed. The final compressed bitstream is generated into the area starting at (UChar*) (&((UChar*)s->arr2)[s->nblock]) These storage aliases are set up in bzCompressInit(), except for the last one, which is arranged in compressBlock(). */ UInt32* ptr = s->ptr; UChar* block = s->block; UInt16* mtfv = s->mtfv; makeMaps_e ( s ); EOB = s->nInUse+1; for (i = 0; i <= EOB; i++) s->mtfFreq[i] = 0; wr = 0; zPend = 0; for (i = 0; i < s->nInUse; i++) yy[i] = (UChar) i; for (i = 0; i < s->nblock; i++) { UChar ll_i; AssertD ( wr <= i, "generateMTFValues(1)" ); j = ptr[i]-1; if (j < 0) j += s->nblock; ll_i = s->unseqToSeq[block[j]]; AssertD ( ll_i < s->nInUse, "generateMTFValues(2a)" ); if (yy[0] == ll_i) { zPend++; } else { if (zPend > 0) { zPend--; while (True) { if (zPend & 1) { mtfv[wr] = BZ_RUNB; wr++; s->mtfFreq[BZ_RUNB]++; } else { mtfv[wr] = BZ_RUNA; wr++; s->mtfFreq[BZ_RUNA]++; } if (zPend < 2) break; zPend = (zPend - 2) / 2; }; zPend = 0; } { register UChar rtmp; register UChar* ryy_j; register UChar rll_i; rtmp = yy[1]; yy[1] = yy[0]; ryy_j = &(yy[1]); rll_i = ll_i; while ( rll_i != rtmp ) { register UChar rtmp2; ryy_j++; rtmp2 = rtmp; rtmp = *ryy_j; *ryy_j = rtmp2; }; yy[0] = rtmp; j = ryy_j - &(yy[0]); mtfv[wr] = j+1; wr++; s->mtfFreq[j+1]++; } } } if (zPend > 0) { zPend--; while (True) { if (zPend & 1) { mtfv[wr] = BZ_RUNB; wr++; s->mtfFreq[BZ_RUNB]++; } else { mtfv[wr] = BZ_RUNA; wr++; s->mtfFreq[BZ_RUNA]++; } if (zPend < 2) break; zPend = (zPend - 2) / 2; }; zPend = 0; } mtfv[wr] = EOB; wr++; s->mtfFreq[EOB]++; s->nMTF = wr; } /*---------------------------------------------------*/ #define BZ_LESSER_ICOST 0 #define BZ_GREATER_ICOST 15 static void sendMTFValues ( EState* s ) { Int32 v, t, i, j, gs, ge, totc, bt, bc, iter; Int32 nSelectors, alphaSize, minLen, maxLen, selCtr; Int32 nGroups, nBytes; /*-- UChar len [BZ_N_GROUPS][BZ_MAX_ALPHA_SIZE]; is a global since the decoder also needs it. Int32 code[BZ_N_GROUPS][BZ_MAX_ALPHA_SIZE]; Int32 rfreq[BZ_N_GROUPS][BZ_MAX_ALPHA_SIZE]; are also globals only used in this proc. Made global to keep stack frame size small. --*/ UInt16 cost[BZ_N_GROUPS]; Int32 fave[BZ_N_GROUPS]; UInt16* mtfv = s->mtfv; if (s->verbosity >= 3) VPrintf3( " %d in block, %d after MTF & 1-2 coding, " "%d+2 syms in use\n", s->nblock, s->nMTF, s->nInUse ); alphaSize = s->nInUse+2; for (t = 0; t < BZ_N_GROUPS; t++) for (v = 0; v < alphaSize; v++) s->len[t][v] = BZ_GREATER_ICOST; /*--- Decide how many coding tables to use ---*/ AssertH ( s->nMTF > 0, 3001 ); if (s->nMTF < 200) nGroups = 2; else if (s->nMTF < 600) nGroups = 3; else if (s->nMTF < 1200) nGroups = 4; else if (s->nMTF < 2400) nGroups = 5; else nGroups = 6; /*--- Generate an initial set of coding tables ---*/ { Int32 nPart, remF, tFreq, aFreq; nPart = nGroups; remF = s->nMTF; gs = 0; while (nPart > 0) { tFreq = remF / nPart; ge = gs-1; aFreq = 0; while (aFreq < tFreq && ge < alphaSize-1) { ge++; aFreq += s->mtfFreq[ge]; } if (ge > gs && nPart != nGroups && nPart != 1 && ((nGroups-nPart) % 2 == 1)) { aFreq -= s->mtfFreq[ge]; ge--; } if (s->verbosity >= 3) VPrintf5( " initial group %d, [%d .. %d], " "has %d syms (%4.1f%%)\n", nPart, gs, ge, aFreq, (100.0 * (float)aFreq) / (float)(s->nMTF) ); for (v = 0; v < alphaSize; v++) if (v >= gs && v <= ge) s->len[nPart-1][v] = BZ_LESSER_ICOST; else s->len[nPart-1][v] = BZ_GREATER_ICOST; nPart--; gs = ge+1; remF -= aFreq; } } /*--- Iterate up to BZ_N_ITERS times to improve the tables. ---*/ for (iter = 0; iter < BZ_N_ITERS; iter++) { for (t = 0; t < nGroups; t++) fave[t] = 0; for (t = 0; t < nGroups; t++) for (v = 0; v < alphaSize; v++) s->rfreq[t][v] = 0; /*--- Set up an auxiliary length table which is used to fast-track the common case (nGroups == 6). ---*/ if (nGroups == 6) { for (v = 0; v < alphaSize; v++) { s->len_pack[v][0] = (s->len[1][v] << 16) | s->len[0][v]; s->len_pack[v][1] = (s->len[3][v] << 16) | s->len[2][v]; s->len_pack[v][2] = (s->len[5][v] << 16) | s->len[4][v]; } } nSelectors = 0; totc = 0; gs = 0; while (True) { /*--- Set group start & end marks. --*/ if (gs >= s->nMTF) break; ge = gs + BZ_G_SIZE - 1; if (ge >= s->nMTF) ge = s->nMTF-1; /*-- Calculate the cost of this group as coded by each of the coding tables. --*/ for (t = 0; t < nGroups; t++) cost[t] = 0; if (nGroups == 6 && 50 == ge-gs+1) { /*--- fast track the common case ---*/ register UInt32 cost01, cost23, cost45; register UInt16 icv; cost01 = cost23 = cost45 = 0; # define BZ_ITER(nn) \ icv = mtfv[gs+(nn)]; \ cost01 += s->len_pack[icv][0]; \ cost23 += s->len_pack[icv][1]; \ cost45 += s->len_pack[icv][2]; \ BZ_ITER(0); BZ_ITER(1); BZ_ITER(2); BZ_ITER(3); BZ_ITER(4); BZ_ITER(5); BZ_ITER(6); BZ_ITER(7); BZ_ITER(8); BZ_ITER(9); BZ_ITER(10); BZ_ITER(11); BZ_ITER(12); BZ_ITER(13); BZ_ITER(14); BZ_ITER(15); BZ_ITER(16); BZ_ITER(17); BZ_ITER(18); BZ_ITER(19); BZ_ITER(20); BZ_ITER(21); BZ_ITER(22); BZ_ITER(23); BZ_ITER(24); BZ_ITER(25); BZ_ITER(26); BZ_ITER(27); BZ_ITER(28); BZ_ITER(29); BZ_ITER(30); BZ_ITER(31); BZ_ITER(32); BZ_ITER(33); BZ_ITER(34); BZ_ITER(35); BZ_ITER(36); BZ_ITER(37); BZ_ITER(38); BZ_ITER(39); BZ_ITER(40); BZ_ITER(41); BZ_ITER(42); BZ_ITER(43); BZ_ITER(44); BZ_ITER(45); BZ_ITER(46); BZ_ITER(47); BZ_ITER(48); BZ_ITER(49); # undef BZ_ITER cost[0] = cost01 & 0xffff; cost[1] = cost01 >> 16; cost[2] = cost23 & 0xffff; cost[3] = cost23 >> 16; cost[4] = cost45 & 0xffff; cost[5] = cost45 >> 16; } else { /*--- slow version which correctly handles all situations ---*/ for (i = gs; i <= ge; i++) { UInt16 icv = mtfv[i]; for (t = 0; t < nGroups; t++) cost[t] += s->len[t][icv]; } } /*-- Find the coding table which is best for this group, and record its identity in the selector table. --*/ bc = 999999999; bt = -1; for (t = 0; t < nGroups; t++) if (cost[t] < bc) { bc = cost[t]; bt = t; }; totc += bc; fave[bt]++; s->selector[nSelectors] = bt; nSelectors++; /*-- Increment the symbol frequencies for the selected table. --*/ if (nGroups == 6 && 50 == ge-gs+1) { /*--- fast track the common case ---*/ # define BZ_ITUR(nn) s->rfreq[bt][ mtfv[gs+(nn)] ]++ BZ_ITUR(0); BZ_ITUR(1); BZ_ITUR(2); BZ_ITUR(3); BZ_ITUR(4); BZ_ITUR(5); BZ_ITUR(6); BZ_ITUR(7); BZ_ITUR(8); BZ_ITUR(9); BZ_ITUR(10); BZ_ITUR(11); BZ_ITUR(12); BZ_ITUR(13); BZ_ITUR(14); BZ_ITUR(15); BZ_ITUR(16); BZ_ITUR(17); BZ_ITUR(18); BZ_ITUR(19); BZ_ITUR(20); BZ_ITUR(21); BZ_ITUR(22); BZ_ITUR(23); BZ_ITUR(24); BZ_ITUR(25); BZ_ITUR(26); BZ_ITUR(27); BZ_ITUR(28); BZ_ITUR(29); BZ_ITUR(30); BZ_ITUR(31); BZ_ITUR(32); BZ_ITUR(33); BZ_ITUR(34); BZ_ITUR(35); BZ_ITUR(36); BZ_ITUR(37); BZ_ITUR(38); BZ_ITUR(39); BZ_ITUR(40); BZ_ITUR(41); BZ_ITUR(42); BZ_ITUR(43); BZ_ITUR(44); BZ_ITUR(45); BZ_ITUR(46); BZ_ITUR(47); BZ_ITUR(48); BZ_ITUR(49); # undef BZ_ITUR } else { /*--- slow version which correctly handles all situations ---*/ for (i = gs; i <= ge; i++) s->rfreq[bt][ mtfv[i] ]++; } gs = ge+1; } if (s->verbosity >= 3) { VPrintf2 ( " pass %d: size is %d, grp uses are ", iter+1, totc/8 ); for (t = 0; t < nGroups; t++) VPrintf1 ( "%d ", fave[t] ); VPrintf0 ( "\n" ); } /*-- Recompute the tables based on the accumulated frequencies. --*/ /* maxLen was changed from 20 to 17 in bzip2-1.0.3. See comment in huffman.c for details. */ for (t = 0; t < nGroups; t++) BZ2_hbMakeCodeLengths ( &(s->len[t][0]), &(s->rfreq[t][0]), alphaSize, 17 /*20*/ ); } AssertH( nGroups < 8, 3002 ); AssertH( nSelectors < 32768 && nSelectors <= (2 + (900000 / BZ_G_SIZE)), 3003 ); /*--- Compute MTF values for the selectors. ---*/ { UChar pos[BZ_N_GROUPS], ll_i, tmp2, tmp; for (i = 0; i < nGroups; i++) pos[i] = i; for (i = 0; i < nSelectors; i++) { ll_i = s->selector[i]; j = 0; tmp = pos[j]; while ( ll_i != tmp ) { j++; tmp2 = tmp; tmp = pos[j]; pos[j] = tmp2; }; pos[0] = tmp; s->selectorMtf[i] = j; } }; /*--- Assign actual codes for the tables. --*/ for (t = 0; t < nGroups; t++) { minLen = 32; maxLen = 0; for (i = 0; i < alphaSize; i++) { if (s->len[t][i] > maxLen) maxLen = s->len[t][i]; if (s->len[t][i] < minLen) minLen = s->len[t][i]; } AssertH ( !(maxLen > 17 /*20*/ ), 3004 ); AssertH ( !(minLen < 1), 3005 ); BZ2_hbAssignCodes ( &(s->code[t][0]), &(s->len[t][0]), minLen, maxLen, alphaSize ); } /*--- Transmit the mapping table. ---*/ { Bool inUse16[16]; for (i = 0; i < 16; i++) { inUse16[i] = False; for (j = 0; j < 16; j++) if (s->inUse[i * 16 + j]) inUse16[i] = True; } nBytes = s->numZ; for (i = 0; i < 16; i++) if (inUse16[i]) bsW(s,1,1); else bsW(s,1,0); for (i = 0; i < 16; i++) if (inUse16[i]) for (j = 0; j < 16; j++) { if (s->inUse[i * 16 + j]) bsW(s,1,1); else bsW(s,1,0); } if (s->verbosity >= 3) VPrintf1( " bytes: mapping %d, ", s->numZ-nBytes ); } /*--- Now the selectors. ---*/ nBytes = s->numZ; bsW ( s, 3, nGroups ); bsW ( s, 15, nSelectors ); for (i = 0; i < nSelectors; i++) { for (j = 0; j < s->selectorMtf[i]; j++) bsW(s,1,1); bsW(s,1,0); } if (s->verbosity >= 3) VPrintf1( "selectors %d, ", s->numZ-nBytes ); /*--- Now the coding tables. ---*/ nBytes = s->numZ; for (t = 0; t < nGroups; t++) { Int32 curr = s->len[t][0]; bsW ( s, 5, curr ); for (i = 0; i < alphaSize; i++) { while (curr < s->len[t][i]) { bsW(s,2,2); curr++; /* 10 */ }; while (curr > s->len[t][i]) { bsW(s,2,3); curr--; /* 11 */ }; bsW ( s, 1, 0 ); } } if (s->verbosity >= 3) VPrintf1 ( "code lengths %d, ", s->numZ-nBytes ); /*--- And finally, the block data proper ---*/ nBytes = s->numZ; selCtr = 0; gs = 0; while (True) { if (gs >= s->nMTF) break; ge = gs + BZ_G_SIZE - 1; if (ge >= s->nMTF) ge = s->nMTF-1; AssertH ( s->selector[selCtr] < nGroups, 3006 ); if (nGroups == 6 && 50 == ge-gs+1) { /*--- fast track the common case ---*/ UInt16 mtfv_i; UChar* s_len_sel_selCtr = &(s->len[s->selector[selCtr]][0]); Int32* s_code_sel_selCtr = &(s->code[s->selector[selCtr]][0]); # define BZ_ITAH(nn) \ mtfv_i = mtfv[gs+(nn)]; \ bsW ( s, \ s_len_sel_selCtr[mtfv_i], \ s_code_sel_selCtr[mtfv_i] ) BZ_ITAH(0); BZ_ITAH(1); BZ_ITAH(2); BZ_ITAH(3); BZ_ITAH(4); BZ_ITAH(5); BZ_ITAH(6); BZ_ITAH(7); BZ_ITAH(8); BZ_ITAH(9); BZ_ITAH(10); BZ_ITAH(11); BZ_ITAH(12); BZ_ITAH(13); BZ_ITAH(14); BZ_ITAH(15); BZ_ITAH(16); BZ_ITAH(17); BZ_ITAH(18); BZ_ITAH(19); BZ_ITAH(20); BZ_ITAH(21); BZ_ITAH(22); BZ_ITAH(23); BZ_ITAH(24); BZ_ITAH(25); BZ_ITAH(26); BZ_ITAH(27); BZ_ITAH(28); BZ_ITAH(29); BZ_ITAH(30); BZ_ITAH(31); BZ_ITAH(32); BZ_ITAH(33); BZ_ITAH(34); BZ_ITAH(35); BZ_ITAH(36); BZ_ITAH(37); BZ_ITAH(38); BZ_ITAH(39); BZ_ITAH(40); BZ_ITAH(41); BZ_ITAH(42); BZ_ITAH(43); BZ_ITAH(44); BZ_ITAH(45); BZ_ITAH(46); BZ_ITAH(47); BZ_ITAH(48); BZ_ITAH(49); # undef BZ_ITAH } else { /*--- slow version which correctly handles all situations ---*/ for (i = gs; i <= ge; i++) { bsW ( s, s->len [s->selector[selCtr]] [mtfv[i]], s->code [s->selector[selCtr]] [mtfv[i]] ); } } gs = ge+1; selCtr++; } AssertH( selCtr == nSelectors, 3007 ); if (s->verbosity >= 3) VPrintf1( "codes %d\n", s->numZ-nBytes ); } /*---------------------------------------------------*/ void BZ2_compressBlock ( EState* s, Bool is_last_block ) { if (s->nblock > 0) { BZ_FINALISE_CRC ( s->blockCRC ); s->combinedCRC = (s->combinedCRC << 1) | (s->combinedCRC >> 31); s->combinedCRC ^= s->blockCRC; if (s->blockNo > 1) s->numZ = 0; if (s->verbosity >= 2) VPrintf4( " block %d: crc = 0x%08x, " "combined CRC = 0x%08x, size = %d\n", s->blockNo, s->blockCRC, s->combinedCRC, s->nblock ); BZ2_blockSort ( s ); } s->zbits = (UChar*) (&((UChar*)s->arr2)[s->nblock]); /*-- If this is the first block, create the stream header. --*/ if (s->blockNo == 1) { BZ2_bsInitWrite ( s ); bsPutUChar ( s, BZ_HDR_B ); bsPutUChar ( s, BZ_HDR_Z ); bsPutUChar ( s, BZ_HDR_h ); bsPutUChar ( s, (UChar)(BZ_HDR_0 + s->blockSize100k) ); } if (s->nblock > 0) { bsPutUChar ( s, 0x31 ); bsPutUChar ( s, 0x41 ); bsPutUChar ( s, 0x59 ); bsPutUChar ( s, 0x26 ); bsPutUChar ( s, 0x53 ); bsPutUChar ( s, 0x59 ); /*-- Now the block's CRC, so it is in a known place. --*/ bsPutUInt32 ( s, s->blockCRC ); /*-- Now a single bit indicating (non-)randomisation. As of version 0.9.5, we use a better sorting algorithm which makes randomisation unnecessary. So always set the randomised bit to 'no'. Of course, the decoder still needs to be able to handle randomised blocks so as to maintain backwards compatibility with older versions of bzip2. --*/ bsW(s,1,0); bsW ( s, 24, s->origPtr ); generateMTFValues ( s ); sendMTFValues ( s ); } /*-- If this is the last block, add the stream trailer. --*/ if (is_last_block) { bsPutUChar ( s, 0x17 ); bsPutUChar ( s, 0x72 ); bsPutUChar ( s, 0x45 ); bsPutUChar ( s, 0x38 ); bsPutUChar ( s, 0x50 ); bsPutUChar ( s, 0x90 ); bsPutUInt32 ( s, s->combinedCRC ); if (s->verbosity >= 2) VPrintf1( " final combined CRC = 0x%08x\n ", s->combinedCRC ); bsFinishWrite ( s ); } } /*-------------------------------------------------------------*/ /*--- end compress.c ---*/ /*-------------------------------------------------------------*/ ABC_NAMESPACE_IMPL_END */ .highlight .nt { color: #bb0066; font-weight: bold } /* Name.Tag */ .highlight .nv { color: #336699 } /* Name.Variable */ .highlight .ow { color: #008800 } /* Operator.Word */ .highlight .w { color: #bbbbbb } /* Text.Whitespace */ .highlight .mb { color: #0000DD; font-weight: bold } /* Literal.Number.Bin */ .highlight .mf { color: #0000DD; font-weight: bold } /* Literal.Number.Float */ .highlight .mh { color: #0000DD; font-weight: bold } /* Literal.Number.Hex */ .highlight .mi { color: #0000DD; font-weight: bold } /* Literal.Number.Integer */ .highlight .mo { color: #0000DD; font-weight: bold } /* Literal.Number.Oct */ .highlight .sa { color: #dd2200; background-color: #fff0f0 } /* Literal.String.Affix */ .highlight .sb { color: #dd2200; background-color: #fff0f0 } /* Literal.String.Backtick */ .highlight .sc { color: #dd2200; background-color: #fff0f0 } /* Literal.String.Char */ .highlight .dl { color: #dd2200; background-color: #fff0f0 } /* Literal.String.Delimiter */ .highlight .sd { color: #dd2200; background-color: #fff0f0 } /* Literal.String.Doc */ .highlight .s2 { color: #dd2200; background-color: #fff0f0 } /* Literal.String.Double */ .highlight .se { color: #0044dd; background-color: #fff0f0 } /* Literal.String.Escape */ .highlight .sh { color: #dd2200; background-color: #fff0f0 } /* Literal.String.Heredoc */ .highlight .si { color: #3333bb; background-color: #fff0f0 } /* Literal.String.Interpol */ .highlight .sx { color: #22bb22; background-color: #f0fff0 } /* Literal.String.Other */ .highlight .sr { color: #008800; background-color: #fff0ff } /* Literal.String.Regex */ .highlight .s1 { color: #dd2200; background-color: #fff0f0 } /* Literal.String.Single */ .highlight .ss { color: #aa6600; background-color: #fff0f0 } /* Literal.String.Symbol */ .highlight .bp { color: #003388 } /* Name.Builtin.Pseudo */ .highlight .fm { color: #0066bb; font-weight: bold } /* Name.Function.Magic */ .highlight .vc { color: #336699 } /* Name.Variable.Class */ .highlight .vg { color: #dd7700 } /* Name.Variable.Global */ .highlight .vi { color: #3333bb } /* Name.Variable.Instance */ .highlight .vm { color: #336699 } /* Name.Variable.Magic */ .highlight .il { color: #0000DD; font-weight: bold } /* Literal.Number.Integer.Long */
/*
 *  utility.c - misc functions for cpufreq driver and Px statistic
 *
 *  Copyright (C) 2001 Russell King
 *            (C) 2002 - 2003 Dominik Brodowski <linux@brodo.de>
 *
 *  Oct 2005 - Ashok Raj <ashok.raj@intel.com>
 *    Added handling for CPU hotplug
 *  Feb 2006 - Jacob Shin <jacob.shin@amd.com>
 *    Fix handling for CPU hotplug -- affected CPUs
 *  Feb 2008 - Liu Jinsong <jinsong.liu@intel.com>
 *    1. Merge cpufreq.c and freq_table.c of linux 2.6.23
 *    And poring to Xen hypervisor
 *    2. some Px statistic interface funcdtions
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 as
 * published by the Free Software Foundation.
 *
 */

#include <xen/errno.h>
#include <xen/cpumask.h>
#include <xen/types.h>
#include <xen/spinlock.h>
#include <xen/percpu.h>
#include <xen/types.h>
#include <xen/sched.h>
#include <xen/timer.h>
#include <xen/trace.h>
#include <asm/config.h>
#include <acpi/cpufreq/cpufreq.h>
#include <public/sysctl.h>

struct cpufreq_driver   *cpufreq_driver;
struct processor_pminfo *__read_mostly processor_pminfo[NR_CPUS];
struct cpufreq_policy   *__read_mostly cpufreq_cpu_policy[NR_CPUS];

DEFINE_PER_CPU(spinlock_t, cpufreq_statistic_lock) = SPIN_LOCK_UNLOCKED;

/*********************************************************************
 *                    Px STATISTIC INFO                              *
 *********************************************************************/

void cpufreq_residency_update(unsigned int cpu, uint8_t state)
{
    uint64_t now, total_idle_ns;
    int64_t delta;
    struct pm_px *pxpt = cpufreq_statistic_data[cpu];

    total_idle_ns = get_cpu_idle_time(cpu);
    now = NOW();

    delta = (now - pxpt->prev_state_wall) - 
            (total_idle_ns - pxpt->prev_idle_wall);

    if ( likely(delta >= 0) )
        pxpt->u.pt[state].residency += delta;

    pxpt->prev_state_wall = now;
    pxpt->prev_idle_wall = total_idle_ns;
}

void cpufreq_statistic_update(unsigned int cpu, uint8_t from, uint8_t to)
{
    struct pm_px *pxpt = cpufreq_statistic_data[cpu];
    struct processor_pminfo *pmpt = processor_pminfo[cpu];
    spinlock_t *cpufreq_statistic_lock = 
               &per_cpu(cpufreq_statistic_lock, cpu);

    spin_lock(cpufreq_statistic_lock);

    if ( !pxpt || !pmpt ) {
        spin_unlock(cpufreq_statistic_lock);
        return;
    }

    pxpt->u.last = from;
    pxpt->u.cur = to;
    pxpt->u.pt[to].count++;

    cpufreq_residency_update(cpu, from);

    (*(pxpt->u.trans_pt + from * pmpt->perf.state_count + to))++;

    spin_unlock(cpufreq_statistic_lock);
}

int cpufreq_statistic_init(unsigned int cpuid)
{
    uint32_t i, count;
    struct pm_px *pxpt = cpufreq_statistic_data[cpuid];
    const struct processor_pminfo *pmpt = processor_pminfo[cpuid];
    spinlock_t *cpufreq_statistic_lock = 
                          &per_cpu(cpufreq_statistic_lock, cpuid);

    if ( !pmpt )
        return -EINVAL;

    spin_lock(cpufreq_statistic_lock);

    if ( pxpt ) {
        spin_unlock(cpufreq_statistic_lock);
        return 0;
    }

    count = pmpt->perf.state_count;

    pxpt = xmalloc(struct pm_px);
    if ( !pxpt ) {
        spin_unlock(cpufreq_statistic_lock);
        return -ENOMEM;
    }
    memset(pxpt, 0, sizeof(*pxpt));
    cpufreq_statistic_data[cpuid] = pxpt;

    pxpt->u.trans_pt = xmalloc_array(uint64_t, count * count);
    if (!pxpt->u.trans_pt) {
        xfree(pxpt);
        spin_unlock(cpufreq_statistic_lock);
        return -ENOMEM;
    }

    pxpt->u.pt = xmalloc_array(struct pm_px_val, count);
    if (!pxpt->u.pt) {
        xfree(pxpt->u.trans_pt);
        xfree(pxpt);
        spin_unlock(cpufreq_statistic_lock);
        return -ENOMEM;
    }

    memset(pxpt->u.trans_pt, 0, count * count * (sizeof(uint64_t)));
    memset(pxpt->u.pt, 0, count * (sizeof(struct pm_px_val)));

    pxpt->u.total = pmpt->perf.state_count;
    pxpt->u.usable = pmpt->perf.state_count - pmpt->perf.platform_limit;

    for (i=0; i < pmpt->perf.state_count; i++)
        pxpt->u.pt[i].freq = pmpt->perf.states[i].core_frequency;

    pxpt->prev_state_wall = NOW();
    pxpt->prev_idle_wall = get_cpu_idle_time(cpuid);

    spin_unlock(cpufreq_statistic_lock);

    return 0;
}

void cpufreq_statistic_exit(unsigned int cpuid)
{
    struct pm_px *pxpt = cpufreq_statistic_data[cpuid];
    spinlock_t *cpufreq_statistic_lock = 
               &per_cpu(cpufreq_statistic_lock, cpuid);

    spin_lock(cpufreq_statistic_lock);

    if (!pxpt) {
        spin_unlock(cpufreq_statistic_lock);
        return;
    }

    xfree(pxpt->u.trans_pt);
    xfree(pxpt->u.pt);
    xfree(pxpt);
    cpufreq_statistic_data[cpuid] = NULL;

    spin_unlock(cpufreq_statistic_lock);
}

void cpufreq_statistic_reset(unsigned int cpuid)
{
    uint32_t i, j, count;
    struct pm_px *pxpt = cpufreq_statistic_data[cpuid];
    const struct processor_pminfo *pmpt = processor_pminfo[cpuid];
    spinlock_t *cpufreq_statistic_lock = 
               &per_cpu(cpufreq_statistic_lock, cpuid);

    spin_lock(cpufreq_statistic_lock);

    if ( !pmpt || !pxpt || !pxpt->u.pt || !pxpt->u.trans_pt ) {
        spin_unlock(cpufreq_statistic_lock);
        return;
    }

    count = pmpt->perf.state_count;

    for (i=0; i < count; i++) {
        pxpt->u.pt[i].residency = 0;
        pxpt->u.pt[i].count = 0;

        for (j=0; j < count; j++)
            *(pxpt->u.trans_pt + i*count + j) = 0;
    }

    pxpt->prev_state_wall = NOW();
    pxpt->prev_idle_wall = get_cpu_idle_time(cpuid);

    spin_unlock(cpufreq_statistic_lock);
}


/*********************************************************************
 *                   FREQUENCY TABLE HELPERS                         *
 *********************************************************************/

int cpufreq_frequency_table_cpuinfo(struct cpufreq_policy *policy,
                                    struct cpufreq_frequency_table *table)
{
    unsigned int min_freq = ~0;
    unsigned int max_freq = 0;
    unsigned int i;

    for (i=0; (table[i].frequency != CPUFREQ_TABLE_END); i++) {
        unsigned int freq = table[i].frequency;
        if (freq == CPUFREQ_ENTRY_INVALID)
            continue;
        if (freq < min_freq)
            min_freq = freq;
        if (freq > max_freq)
            max_freq = freq;
    }

    policy->min = policy->cpuinfo.min_freq = min_freq;
    policy->max = policy->cpuinfo.max_freq = max_freq;

    if (policy->min == ~0)
        return -EINVAL;
    else
        return 0;
}

int cpufreq_frequency_table_verify(struct cpufreq_policy *policy,
                                   struct cpufreq_frequency_table *table)
{
    unsigned int next_larger = ~0;
    unsigned int i;
    unsigned int count = 0;

    if (!cpu_online(policy->cpu))
        return -EINVAL;

    cpufreq_verify_within_limits(policy, policy->cpuinfo.min_freq,
                                 policy->cpuinfo.max_freq);

    for (i=0; (table[i].frequency != CPUFREQ_TABLE_END); i++) {
        unsigned int freq = table[i].frequency;
        if (freq == CPUFREQ_ENTRY_INVALID)
            continue;
        if ((freq >= policy->min) && (freq <= policy->max))
            count++;
        else if ((next_larger > freq) && (freq > policy->max))
            next_larger = freq;
    }

    if (!count)
        policy->max = next_larger;

    cpufreq_verify_within_limits(policy, policy->cpuinfo.min_freq,
                                 policy->cpuinfo.max_freq);

    return 0;
}

int cpufreq_frequency_table_target(struct cpufreq_policy *policy,
                                   struct cpufreq_frequency_table *table,
                                   unsigned int target_freq,
                                   unsigned int relation,
                                   unsigned int *index)
{
    struct cpufreq_frequency_table optimal = {
        .index = ~0,
        .frequency = 0,
    };
    struct cpufreq_frequency_table suboptimal = {
        .index = ~0,
        .frequency = 0,
    };
    unsigned int i;

    switch (relation) {
    case CPUFREQ_RELATION_H:
        suboptimal.frequency = ~0;
        break;
    case CPUFREQ_RELATION_L:
        optimal.frequency = ~0;
        break;
    }

    if (!cpu_online(policy->cpu))
        return -EINVAL;

    for (i=0; (table[i].frequency != CPUFREQ_TABLE_END); i++) {
        unsigned int freq = table[i].frequency;
        if (freq == CPUFREQ_ENTRY_INVALID)
            continue;
        if ((freq < policy->min) || (freq > policy->max))
            continue;
        switch(relation) {
        case CPUFREQ_RELATION_H:
            if (freq <= target_freq) {
                if (freq >= optimal.frequency) {
                    optimal.frequency = freq;
                    optimal.index = i;
                }
            } else {
                if (freq <= suboptimal.frequency) {
                    suboptimal.frequency = freq;
                    suboptimal.index = i;
                }
            }
            break;
        case CPUFREQ_RELATION_L:
            if (freq >= target_freq) {
                if (freq <= optimal.frequency) {
                    optimal.frequency = freq;
                    optimal.index = i;
                }
            } else {
                if (freq >= suboptimal.frequency) {
                    suboptimal.frequency = freq;
                    suboptimal.index = i;
                }
            }
            break;
        }
    }
    if (optimal.index > i) {
        if (suboptimal.index > i)
            return -EINVAL;
        *index = suboptimal.index;
    } else
        *index = optimal.index;

    return 0;
}


/*********************************************************************
 *               GOVERNORS                                           *
 *********************************************************************/

int __cpufreq_driver_target(struct cpufreq_policy *policy,
                            unsigned int target_freq,
                            unsigned int relation)
{
    int retval = -EINVAL;

    if (cpu_online(policy->cpu) && cpufreq_driver->target)
    {
        unsigned int prev_freq = policy->cur;

        retval = cpufreq_driver->target(policy, target_freq, relation);
        if ( retval == 0 )
            TRACE_2D(TRC_PM_FREQ_CHANGE, prev_freq/1000, policy->cur/1000);
    }

    return retval;
}

int __cpufreq_driver_getavg(struct cpufreq_policy *policy)
{
    int ret = 0;

    if (!policy)
        return -EINVAL;

    if (cpu_online(policy->cpu) && cpufreq_driver->getavg)
        ret = cpufreq_driver->getavg(policy->cpu);

    return ret;
}


/*********************************************************************
 *                 POLICY                                            *
 *********************************************************************/

/*
 * data   : current policy.
 * policy : policy to be set.
 */
int __cpufreq_set_policy(struct cpufreq_policy *data,
                                struct cpufreq_policy *policy)
{
    int ret = 0;

    memcpy(&policy->cpuinfo, &data->cpuinfo, sizeof(struct cpufreq_cpuinfo));

    if (policy->min > data->min && policy->min > policy->max)
        return -EINVAL;

    /* verify the cpu speed can be set within this limit */
    ret = cpufreq_driver->verify(policy);
    if (ret)
        return ret;

    data->min = policy->min;
    data->max = policy->max;

    if (policy->governor != data->governor) {
        /* save old, working values */
        struct cpufreq_governor *old_gov = data->governor;

        /* end old governor */
        if (data->governor)
            __cpufreq_governor(data, CPUFREQ_GOV_STOP);

        /* start new governor */
        data->governor = policy->governor;
        if (__cpufreq_governor(data, CPUFREQ_GOV_START)) {
            printk(KERN_WARNING "Fail change to %s governor\n",
                                 data->governor->name);

            /* new governor failed, so re-start old one */
            if (old_gov) {
                data->governor = old_gov;
                __cpufreq_governor(data, CPUFREQ_GOV_START);
                printk(KERN_WARNING "Still stay at %s governor\n",
                                     data->governor->name);
            }
            return -EINVAL;
        }
        /* might be a policy change, too, so fall through */
    }

    return __cpufreq_governor(data, CPUFREQ_GOV_LIMITS);
}