chenlc626
BAN USERHere is my thought: all we need is an API which will find the kth element in the tree, such as tree.find(int k). For the kth element, we can achieve O(n) time complexity using normal binary search tree itself. If we want better, we can store some counts of child elements in the node, and so the delete and insert operation would need to update the count of all its Ancestors. The final performance of search would depend on the snap shot of tree at the time. If it is more like balanced binary search tree, you got the O(log(n)). If it is more like a list, it is O(n). But it will all guarantee the uniform probability distribution at any snapshot of the tree.
- chenlc626 March 07, 2013Here is my code:
#include <stdio.h>
char* strFind(char*pSrc, char*pSub)
{
char *pSrcTemp, *pSubTemp;
if(NULL==pSrc||NULL==pSub)
return NULL;
if(!*pSub)
return pSrc;
for(pSrcTemp=pSrc, pSubTemp=pSub;*pSrcTemp;)
{
if(*pSrcTemp==*pSubTemp)
{
pSrcTemp++;
pSubTemp++;
if(!*pSubTemp)
{
return(pSrcTemp-(pSubTemp-pSub));
}
}else
{
pSrcTemp=pSrcTemp-(pSubTemp-pSub)+1;
pSubTemp=pSub;
}
}
return NULL;
}
int main()
{
char src[]="why should i love programming? i love programming because it lets me not feel boring";
char sub[]=" love p";
printf("find str(%s) from str (%s) at (%s)\n", sub, src, strFind(src, sub));
}
Here is my thought: For the permutation of string, its simplified represents is the number of each character existed in the string. If you want to check if A's permutation is sub string of the B string, you only need to be sure that the number of each kind of character in A is less the number of the same character in the string B. So you can build a histogram of characters in A by going through A, then going through B, decrease the count of character histogram of A when it find the one in B. Return true when all histogram reach zero during the going through B, otherwise return false.
- chenlc626 March 03, 2013Test Code:
int main()
{
squareProb_t cb;
double p1, p2, p3;
squareProbInit(&cb, 2,2, 5);
p1=squareProbDead(&cb, 0, 0, 1);
p2=squareProbDead(&cb, 0, 0, 2);
p3=squareProbDead(&cb, 0, 0, 5);
printf("p1 is %f\n", p1);
printf("p2 is %f\n", p2);
printf("p3 is %f\n", p3);
}
My Idea :
1> For area outside the NXM, the death Prob is 1, what ever the step is. (step>=0)
2> For area inside the NxM and step=0, death Prob is zero.
3> For Other x, y, and step, probDeath(x,y,step)=1/4*(probDeath(x-1,y, step-1) + probDeath(x+1,y, step-1)+probDeath(x,y-1, step-1) +probDeath(x,y+1, step01)). Assume the person runs randomly.
4> The logic above also added a array to store the shared calculated result (as does dynamic programming) to save the computation time during the recursive call.
5> ProbLive(x,y, step)=1-ProbDeath(x,y,step).
My Code:
typedef struct{
double *pProb;
int w;
int h;
int step;
} squareProb_t;
void squareProbInit(squareProb_t *pCb, int w, int h, int maxStep)
{
int count;
if(NULL==pCb||1>w||1>h)
return;
pCb->w=w;
pCb->h=h;
pCb->step=maxStep;
pCb->pProb=malloc(sizeof(double)*w*h*(maxStep));
if(NULL==pCb->pProb)
return;
for(count=0;count<w*h*(maxStep);count++)
pCb->pProb[count]=-1.0;
return;
}
double squareProbDead(squareProb_t *pCb, int x, int y, int step)
{
int count;
if(NULL==pCb)
return 0;
if(0>step||pCb->step<step)
return 0;
if(x<0||x>=pCb->w||y<0||y>=pCb->h)
{
printf("(x=%d, y=%d, step=%d) ==> %f\n", x, y, step, 1.0);
return 1;
}
if(step==0)
{
printf("(x=%d, y=%d, step=%d) ==> %f\n", x, y, step, 0.0);
return 0;
}
count=(y*pCb->w+x)*pCb->step+step-1;
if(-1.0==pCb->pProb[count])
{
step--;
pCb->pProb[count]=(squareProbDead(pCb, x-1,y, step)+
squareProbDead(pCb, x+1,y, step)+
squareProbDead(pCb, x,y-1, step)+
squareProbDead(pCb, x,y+1, step))/4;
printf(" ==>");
}
printf("(x=%d, y=%d, step=%d) ==> %f\n", x, y, step+1, pCb->pProb[count]);
return pCb->pProb[count];
}
Test Results:
amb-sw2 a5m 107% ./a.out
finished the counting
finsihed the accumlating
finished sorting
src aadfadfiagaflagagafaggsdfsdfsdipoqpeidkdjhbcdefghijklmnopqrstuvwxyzAiafadfBDDGBADDSEFHCDEFGHIJKLMNOPQRSTUVWXYZ dict ZYXWVUTSRQPONMLKJIHGFEDCBAabcdefghijklmnopqrstuvwxyz ==> dst ZYXWVUTSSRQPONMLKJIHHGGFFEEDDDDDCBBAAaaaaaaaaaaabcdddddddddeefffffffffgggggghhiiiiijjkkllmnoopppqqrsssstuvwxyz
~
Here is the test code:
int main()
{
char pSrc[]="aadfadfiagaflagagafaggsdfsdfsdipoqpeidkdjhbcdefghijklmnopqrstuvwxyzAiafadfBDDGBADDSEFHCDEFGHIJKLMNOPQRSTUVWXYZ";
// char pSrc[]="abcABC";
char pDst[256];
char pDict[]="ZYXWVUTSRQPONMLKJIHGFEDCBAabcdefghijklmnopqrstuvwxyz";
// char pDict[]="cbaABDC";
dictsort(pSrc, pDst, pDict);
printf("src %s dict %s ==> dst %s\n", pSrc, pDict, pDst);
}
My idea is that we just use modified count sorting algorithm: Just change the accumulating processing according to the order of dictionary, not by the alphabet order. The following code would assume the string is 'a'-'z' and 'A'-'Z', and the Dict should contains all the alphabet char existing in the input strings.
Here is the code:
#include <stdio.h>
static int getCharIndex(char c)
{
if(c>='a'&&c<='z')
{
return c-'a';
}else if(c>='A' && c<='Z')
{
return 'z'-'a'+1+c-'A';
}else
{
/*error Here*/
return -1;
}
}
void dictsort(char*pStr, char*pDst, char*pDict)
{
int charCnt[52];
int temp, temp2, len;
if(NULL==pStr||0==*pStr||NULL==pDict||0==*pDict||NULL==pDst)
return;
memset((void*)charCnt, 0, sizeof(charCnt));
/*Counting the number */
for(len=0;*pStr;pStr++, len++)
{
temp2=getCharIndex(*pStr);
if(0>temp2)
return;
charCnt[temp2]++;
}
printf("finished the counting\n");
/*Accumulating now*/
for(temp=-1;*pDict;pDict++)
{
temp2=getCharIndex(*pDict);
if(0>temp2)
return;
charCnt[temp2]+=temp;
temp=charCnt[temp2];
}
printf("finsihed the accumlating\n");
/*Now we are going to sorting*/
pDst[len]=0;
for(; len; len--)
{
--pStr;
temp2=getCharIndex(*pStr);
if(0>temp2)
return;
pDst[charCnt[temp2]]=*pStr;
charCnt[temp2]--;
}
printf("finished sorting\n");
return;
}
How about you have two read points, one for each file. And you just moving this read point on, ignoring the space or any extra character you want to ignore, comparing the character left one bye one, return fail if you got a mismatch or one reach EOF when the other not, return OK when each reach EOF at the same time.
- chenlc626 February 28, 2013If the elements of the data structure only supports comparative sorting, then those kind of data structure does not exist. Because according to the feature list, people can achieve the a sorting algorithm wit O(n) time complexity:
1> You put elements in to the data structure one by one
2> Then you get max or min from it one by one.
So time complexity O(2n).
But it is told that comparative sorting could only achieve O(nlogn). So it is impossible for existence of such data structure in this case.
The badness of this code right now is that it will change the dictionary.
You can change the function
unsigned int charCountString() not to change the content of it.
Try to add some explanation, although i would think the logic and function is quite clear already:
1> charCountString() will try to count the number of c in the string and remove all of the c in the string.
2> For each char of the searching char list, longestword() will go through each words in the dictionary, accumulates the counts of char in the array pDstCnt. At the same time tracing max count of chars in these words.
3> Here is one trick in 2>, when the maxCount already exceeds the len of the word in the dict, you do not need to call charCountString() anymore.
Here is test code:
void main
{
char word1[]="god"; int wordlen1=strlen(word1);
char word2[]="love"; int wordlen2=strlen(word2);
char word3[]="America"; int wordlen3=strlen(word3);
char word4[]="and"; int wordlen4=strlen(word4);
char word5[]="other"; int wordlen5=strlen(word5);
char word6[]="people"; int wordlen6=strlen(word6);
char word7[]="in"; int wordlen7=strlen(word7);
char word8[]="world"; int wordlen8=strlen(word8);
char* pDict[]={word1, word2, word3,word4, word5,word6,word7, word8};
int pWordLen[]={wordlen1, wordlen2,wordlen3, wordlen4, wordlen5, wordlen6, wordlen7, wordlen8};
char dstChars[]="aoep";
longestword(pDict, pWordLen, 8, dstChars, 4);
}
Here is my Code:
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
unsigned int charCountString(char*pStr, char c)
{
char *pLeft, *pRunning;
unsigned int charCount;
if(NULL==pStr)
{
return 0;
}
for(pLeft=pStr, pRunning=pStr, charCount=0;*pRunning; pRunning++)
{
if(c==*pRunning)
{
charCount++;
}else
{
*pLeft=*pRunning;
pLeft++;
}
}
*pLeft=0;
return charCount;
}
void longestword(char**ppDict,
int *pDictWordLen,
int dictLen,
char* pDstChars,
int dstCharLen)
{
int count1, count2, maxCnt, maxIndex;
int *pDstCnt;
if(NULL==ppDict||NULL==pDictWordLen||1>dictLen||NULL==pDstChars||1>dstCharLen)
return;
pDstCnt=malloc(sizeof(int)*dictLen);
if(NULL==pDstCnt)
return;
memset((void*)pDstCnt, 0, sizeof(*pDstCnt));
for(count1=0, maxCnt=0, maxIndex=-1;count1<dstCharLen;count1++)
{
char temp1=pDstChars[count1];
for(count2=0;count2<dictLen;count2++)
{
char*pTemp2=ppDict[count2];
char len;
printf("%s with Len %d\n", pTemp2, pDictWordLen[count2]);
if(maxCnt>=pDictWordLen[count2])
continue;
len=charCountString(pTemp2, temp1);
pDstCnt[count2]+=len;
if(maxCnt<pDstCnt[count2])
{
maxCnt=pDstCnt[count2];
maxIndex=count2;
}
}
}
if(0<=maxIndex)
{
printf("MaxIndex is %d and value is %d\n", maxIndex+1, maxCnt);
}
free(pDstCnt);
}
The idea is as bellow:
1> Use wsum to contain the accumulated weight from start to end.
2> Use asum to contain the accumulated weight*distance from start to the end.
3> For each sub pieces in the string, n to m,
its subweight=wsum[m]-wsum[n-1];
subwegith*length=asum[m]-asum[n-1]-n*subweight.
4> For each pair of same length sub string, you check its four combination to see they could be central weight. Use some algebra you avoid the dividing since dividing may case some floating, and you can under stand the logic.
5> Finally, the search will go from longest string to shortest one to get the goal as soon as possible. There could be some alternative same length combination, but we terminated here immediately.
6> The int is used for computation, i guess would be good enough for 500. If not, we can use long long.
Here is the test code:
int main()
{
char p1[]="41111921111119";
char p2[]="131251141231";
char p3[]="11";
char p4[]="123232111119232333277777999";
char p5[]="751283918273129483751265369875938721253256384982385781251985354664939832887525615625665211639491598528185935839473825642193794184375895489172359871654785647324524354639289887198715265623845821451815818815252738638451823475832516531656348728374628574593847652354612753472165281273645987465847536642387615238749187265876321827635482776859871628376457165263745196283764872687654782635987162983654786253476179834691827567647382964865167234698172658761946256162516256152738427348274823748273482734827482";
centerweight(p1);
centerweight(p2);
centerweight(p3);
centerweight(p4);
centerweight(p5);
}
Here is my code:
#include <stdio.h>
#include <stdlib.h>
static void centerweight2(unsigned int *wsum, unsigned int *asum, int len)
{
int count1,count2,count3;
int found;
if(2>len)
return;
count1=len>> 1;
for(found=0;count1;count1--)
{
for(count2=0;count2+(count1<< 1)<=len;count2++)
{
int leftwsum=wsum[count2+count1-1]-wsum[count2-1];
int leftasum1=(asum[count2+count1-1]-asum[count2-1])-leftwsum*count2;
int leftasum2=(count1+1)*leftwsum-leftasum1;
for(count3=count2+count1;count3<=len-count1;count3++)
{
int rightwsum=wsum[count3+count1-1]-wsum[count3-1];
int rightasum1=(asum[count3+count1-1]-asum[count3-1])-rightwsum*count3;
int rightasum2=(count1+1)*rightwsum-rightasum1;
int temp=(leftwsum+rightwsum)*(1+(count1<< 1))-(count1<< 1)*rightwsum;
if(((leftasum1+rightasum1)<< 1)==temp||
((leftasum1+rightasum2)<< 1)==temp||
((leftasum2+rightasum1)<< 1)==temp||
((leftasum2+rightasum2)<< 1)==temp)
{
found=1;
break;
}
}
if(found) break;
}
if(found) break;
}
if(count1)
printf("%d %d %d \n", count2, count3, count1);
else
printf("found non\n");
}
void centerweight(char*p)
{
unsigned int *wsum, *asum;
int count;
int count2;
if(NULL==p)
return;
if(!*p)
return;
wsum=malloc(sizeof(unsigned int)*501);
if(NULL==wsum)
return;
asum=malloc(sizeof(unsigned int)*501);
if(NULL==asum)
{free(wsum); return;}
asum[0]=0;
wsum[0]=0;
for(count=1;*p;count++, p++)
{
int temp;
temp=*p-'0';
if(temp>9||temp<0||count> 500)
{
free(asum);
free(wsum);
return;
}
asum[count]=asum[count-1]+temp*(count);
wsum[count]=wsum[count-1]+temp;
}
centerweight2(wsum+1, asum+1, count-1);
free(wsum);
free(asum);
}
Here is my code:
#include <stdio.h>
int number_of_disc_intersections(int a[], int n)
{
int *pNode;
int count, count2, count3;
if(2>n)
return -1;
pNode=malloc(sizeof(int)*(n<< 1));
if(NULL==pNode)
return -1;
for(count=0;count<n;count++)
{
count2=count<<1;
pNode[count2]=((count-a[count])<< 1)-1; //this is make sure that
//Same value Left will be put in the before of same value of right.
count2++;
pNode[count2]=((count+a[count])<< 1);
}
mergesort(pNode, n<< 1);
{
for(count=0,count2=0, count3=0;count<(n<< 1);count++)
{
if(pNode[count]%2)
{
//means it is left node
count3+=count2;
count2++;
}else
{
count2--;
}
}
}
free(pNode);
return count3;
}
My idea:
a> Build an array containing left point and right point of each pixel. Each left point or right point consume on item in the array. Here is some extra trick, shift the original (left, right) ==> (left*2-1, right*2). The extra logic is making sure we can sort easily for the case that left value of some point is the same right value of some point (or even same point, in which its range is zero), in which case we need to be sure left value should be put before the right value(single point contact is till contact). And also we need to know if it is left /right value according to its value, easily.
b> merge sort the array. We need to use merge sort because worse case of quicksort is bigger than O(nlogn). The array's len is 2N, (left +right), so the merge sort will have O(2NLOG(2N)), still O(NLOGN)
c> Go through the sorted array, if it is left value, which means we found start of a new disk, so we can add preexisted disk number as total number of overlappled pair number and then increased of preexisted disk number. If it is right value, which means we found end of a new disk, we need to delete the preexisted disk number.
d> This logic bellow does not care the number limiting issue. (such 10,000,000). Some extra logic could be added for that.
e> Time complexity is O(NLOGN)+2N , still O(NLOGN). Space complexity, 2N for array, N for mergeSort, so still O(N).
I did not see the benefit of 1> yet. I can see that if the countable sort could be done in A and B separately, we would choose 3>, which may have O((N+Ka) + (N+Kb)) time efficiency, but you got extra space for maximum(Ka, Kb). If Ka or Kb is too big, or the A or B only support comparison sort, 2> is better, which has complexity O(NlogN), may support in space sorting, no extra space requirement.
- chenlc626 February 25, 2013Really most wonderful one, i guess i need to refresh my math memory now, :(. Just spend some time proving its correctness:
(x>>1)^x=(y>>1)^y <==> (x==y)
(x>>1)^x and ((x+1)>>1)^(x+1) only has one bit change.
binary representation of x and y could easily give out the result.
My code:
void singlebitchange(int n, int k, int flag, int *p)
{
int count;
if(NULL==p)
return;
if(k==n)
{
for(count=0;count<n;count++)
printf("%d ", p[count]);
printf("\n");
return;
}
p[k]=flag;
singlebitchange(n, k+1, 0, p);
p[k]=!flag;
singlebitchange(n, k+1, 1, p);
}
Test Result:
Odo Print :
0 0 0 0
Odo Print :
0 0 0 1
Odo Print :
0 0 1 0
Odo Print :
0 0 1 1
Odo Print :
0 0 2 0
Odo Print :
0 0 2 1
Odo Print :
0 0 3 0
Odo Print :
0 0 3 1
Odo Print :
0 1 0 0
Odo Print :
0 1 0 1
Odo Print :
0 1 1 0
Odo Print :
0 1 1 1
Odo Print :
0 1 2 0
Odo Print :
0 1 2 1
Odo Print :
0 1 3 0
Odo Print :
0 1 3 1
Odo Print :
0 2 0 0
Odo Print :
0 2 0 1
Odo Print :
0 2 1 0
Odo Print :
0 2 1 1
Odo Print :
0 2 2 0
Odo Print :
0 2 2 1
Odo Print :
0 2 3 0
Odo Print :
0 2 3 1
Odo Print :
1 0 0 0
Odo Print :
1 0 0 1
Odo Print :
1 0 1 0
Odo Print :
1 0 1 1
Odo Print :
1 0 2 0
Odo Print :
1 0 2 1
Odo Print :
1 0 3 0
Odo Print :
1 0 3 1
Odo Print :
1 1 0 0
Odo Print :
1 1 0 1
Odo Print :
1 1 1 0
Odo Print :
1 1 1 1
Odo Print :
1 1 2 0
Odo Print :
1 1 2 1
Odo Print :
1 1 3 0
Odo Print :
1 1 3 1
Odo Print :
1 2 0 0
Odo Print :
1 2 0 1
Odo Print :
1 2 1 0
Odo Print :
1 2 1 1
Odo Print :
1 2 2 0
Odo Print :
1 2 2 1
Odo Print :
1 2 3 0
Odo Print :
1 2 3 1
Here is my code for the odoMeter. You only need to map the odoValue to the cartesian product you want.
int odoNext(int *pOdoMax, int len, int* pOdoValue)
{
int inc;
if(NULL==pOdoMax || NULL==pOdoValue || 1>len)
return -1;
len=len-1;
inc=1;
for(;len>=0 && inc;len--)
{
pOdoValue[len]=(pOdoValue[len]+inc)%pOdoMax[len];
inc=!pOdoValue[len];
}
return (inc&&(len<0));
}
void odoInit(int *pOdoValue, int len)
{
memset((void*)pOdoValue, 0, sizeof(int)*len);
}
void odoPrint(int* pOdoValue, int len)
{
int count;
printf("Odo Print :\n");
for(count=0;count<len;count++)
printf("%d ", pOdoValue[count]);
printf("\n");
}
Here is the test results:
Push consumes :
1 1 2 1 2 3 1 1 1 3 3 2 1 2 1 1
3 2 2 1 2 1 1 2 1 2 1 7 2 1 1 2
5 1 1 1 1 1 6 1 1 1 1 1 2 2 5 1
2 1 1 1 6 1 2 2 2 2 1 2 1 1 6 1
1 1 1 2 5 1 3 1 2 2 1 3 1 3 4 1
2 1 2 2 1 2 2 1 1 4 1 5 1 2 1 6
1 1 3 1 1 2 3 2 1 1 4 1 2 1 1 1
5 2 1 1 1 2 2 1 4 1 7 1 2 1 2 1
1 1 4 1 3 3 1 1 4 1 3 1 2 1 2 1
4 5 1 1 1 1 4 3 1 2 2 2 1 2 1 2
3 1 1 5 1 2 3 1 4 1 2 1 1 3 2 2
2 1 1 1 3 1 2 4 1 1 1 7 1 2 4 1
2 1 1 3 3 5 1 2 1 3 1 1 2 2 1 1
1 3 6 1 2 1 1 1 5 1 2 3 1 1 1 2
2 2 2 5 1 2 2 4 1 2 2 1 2 1 3 1
1 1 5 1 1 2 3 1 3 1 2 1 2 1 1 6
push Consumes 502 counts of min list op in total for 256 number queue addRear: Average is 1.960938
Here is the test code:
include "minqueue/minqueue.c"
int main()
{
int len, value, minValue;
minQueueList minList;
srandom(1);
initQueue(&minList, 256);
printf("Push consumes :");
for(len=0;len<256;len++)
{
value=minList.counts;
pushRearQueue(&minList, random()%(1000));
if(!(len%16))
printf("\n");
printf("%d ", minList.counts-value);
}
/*
for(len=0;len<256;len++)
{
getMinQueue(&minList, &value);
getMinQueueDummy(&minList, &minValue);
if(value!=minValue)
printf("ERROR dummy MIn is %d while quick min is %d\n",
minValue, value);
printf("Min is %d now ", value);
popFrontQueue(&minList, &value);
printf(": Pop value is %d\n", value);
}
*/
printf("\nPush Consumes %d counts of min list op in total for %d number queue addRear: Average is %f\n", minList.counts, 256, minList.counts/256.0);
}
My Analyze is that the addRear operation will cost O(2) in average. Because you can find the number of min list operation + the left over in the min list after each addRear operation will not exceed the twice number of total addRear operations. Although the worse case would be the queue depth.
- chenlc626 February 24, 2013Here is my Code
include <string.h>
struct node_s {
int minNext;
int minPrev;
int value;
};
typedef struct{
struct node_s *pBase;
unsigned int rd;
unsigned int wr;
unsigned int len;
unsigned int minHdr;
unsigned int minTail;
unsigned int counts; /*Debug Usage*/
} minQueueList;
void initQueue(minQueueList *pList, unsigned int depth)
{
if(NULL==pList||1>depth)
return;
memset((void*)pList, 0, sizeof(minQueueList));
pList->pBase=malloc(sizeof(struct node_s)*(depth+1));
if(NULL==pList->pBase)
return;
pList->len=depth+1;
/*Following means that the list is empty*/
pList->rd=0;
pList->wr=0;
pList->minHdr=depth+1;
pList->minTail=pList->minHdr;
}
int pushRearQueue(minQueueList *pList, int value)
{
unsigned int nextWr;
unsigned int curWr;
unsigned int count;
if(NULL==pList)
return -1;
curWr=pList->wr;
nextWr=(pList->wr+1)%pList->len;
if(nextWr==pList->rd)
{
// the queue is FULL
return -1;
}else
{
memset(pList->pBase+curWr, 0, sizeof(pList->pBase[curWr]));
pList->pBase[curWr].value=value;
pList->pBase[curWr].minNext=pList->len;/*NO NEXT FOR LATEST ONE ITEM*/
if(pList->minHdr==pList->len)
{
//no hdr at all before
pList->minHdr=curWr;
pList->minTail=curWr;
pList->pBase[curWr].minPrev=pList->len;/*NO PREV FOR FIRST ITEM*/
pList->counts++;
}else
{
/*here we need to add new element*/
for(count=pList->minTail;1;)
{
pList->counts++;
if(pList->pBase[count].value>value)
{
if(count==pList->minHdr)
{
pList->minHdr=curWr;
pList->pBase[curWr].minPrev=pList->len;
break;
}else
{
count=pList->pBase[count].minPrev;
}
}else
{
pList->pBase[count].minNext=curWr;
pList->pBase[curWr].minPrev=count;
break;
}
}
pList->minTail=curWr;
}
pList->wr=nextWr;
}
return 0;
}
int popFrontQueue(minQueueList* pList, int *pValue)
{
if(NULL==pList || NULL==pValue)
return -1;
if(pList->wr==pList->rd)
return -1; /* Empty Here*/
*pValue=pList->pBase[pList->rd].value;
if(pList->rd==pList->minHdr)
{
pList->minHdr=pList->pBase[pList->minHdr].minNext;
if(pList->minHdr==pList->len)
pList->minTail=pList->len; /*That is the case it is empty now*/
}
pList->rd=(pList->rd+1)%pList->len;
}
int getMinQueue(minQueueList *pList, int *pValue)
{
if(NULL==pList || NULL==pValue|| pList->minHdr==pList->len)
return -1;
*pValue=pList->pBase[pList->minHdr].value;
}
/*this api is used for the verification usage*/
int getMinQueueDummy(minQueueList *pList, int *pValue)
{
int minValue;
int temp;
if(NULL==pList || NULL==pValue || (pList->rd==pList->wr))
return -1;
minValue=pList->pBase[pList->rd].value;
temp=pList->rd;
while(temp!=pList->wr)
{
if(minValue>pList->pBase[temp].value)
minValue=pList->pBase[temp].value;
temp=(temp+1)%pList->len;
}
*pValue=minValue;
}
My implementation:
#include <stdio.h>
unsigned int countsingleword(char*p)
{
unsigned int count, count1;
if(NULL==p)
return 0;
for(count=0, count1=0;*p;p++)
{
if(' '==*p)
{
if(1==count1)
count++;
count1=0;
}else
{
count1++;
}
}
if(1==count1)
count++;
return count;
}
int main()
{
char p[]="I am a humble human! I am ";
printf("string %s has %d single word\n", p, countsingleword(p));
}
#include <stdio.h>
void chardecode(char* pInput,
unsigned int len,
unsigned int prefix,
unsigned int second)
{
char *pTemp, *pTemp2;
unsigned int shrinked;
int count,count2, count3;
if(NULL==pInput)
return;
//First round shrink here
printf("prefix is %d second is %d len %d for %s\n", prefix, second, len, pInput);
if(second)
{
//that means we are in the second state of the algorithm
//we try to find the first zero expand area
pTemp=pInput;
pTemp2=pTemp;
for(count=-prefix, count2=0;count2<len;count2++)
{
pTemp++;
if(*pTemp>'0' && *pTemp<='9')
{
count+=*pTemp-'2';
if(count==0)
{
count2++;
break;
}
}else
{
return;
}
pTemp++;
}
//Here we are doing the normal expand
printf("count is %d count2 is %d\n", count, count2);
pTemp=pInput+(count2<<1)-2;
pTemp2=pTemp+count+1;
pInput=pTemp2+1;
len=len-count2;
for(;count2;count2--)
{
for(count=*(pTemp+1)-'0';count;count--)
{
*pTemp2=*pTemp;
pTemp2--;
}
pTemp-=2;
}
//now we are going to perform the left part if existed;
printf("len is %d\n", len);
if(!len)
{
*pInput='\0';
return;
}
}
pTemp=pInput;
pTemp2=pTemp;
for(count=0, count2=0, shrinked=0;count2<len;count2++)
{
pTemp++;
if(*pTemp>'0' && *pTemp<='9')
{
count+=*pTemp-'2';
if(count<=0)
{
shrinked=1;
for(count3=*pTemp-'0';count3;count3--)
{
*pTemp2++=*(pTemp-1);
}
}else
{
//in this case we enter the state mahine second state:
{
//we found that we are in the middle of the array
//and we can not shrink anymore
chardecode(pTemp-1, len-count2, *pTemp-'2'-count, 1);
return;
}
}
}else
{
return;
}
pTemp++;
}
*pTemp2='\0';
}
int main()
{
char p[]="a1b1c1";
char p2[]="a2b2c2";
char p3[]="a3b4 ";
char p4[256]="a1b4c4d1e5f4";
chardecode(p, 3, 0,0);
printf("p is %s\n", p);
chardecode(p2,3, 0, 0);
chardecode(p3, 2,0,0);
printf("p2 is %s\n", p2);
printf("p3 is %s\n", p3);
printf("p4 is %s\n", p4);
chardecode(p4,6, 0,0);
printf("p4 is %s\n", p4);
}
#include <stdio.h>
void chardecode(char* pInput,
unsigned int len,
unsigned int prefix,
unsigned int second)
{
char *pTemp, *pTemp2;
unsigned int shrinked;
int count,count2, count3;
if(NULL==pInput)
return;
//First round shrink here
printf("prefix is %d second is %d len %d for %s\n", prefix, second, len, pInput);
if(second)
{
//that means we are in the second state of the algorithm
//we try to find the first zero expand area
pTemp=pInput;
pTemp2=pTemp;
for(count=-prefix, count2=0;count2<len;count2++)
{
pTemp++;
if(*pTemp>'0' && *pTemp<='9')
{
count+=*pTemp-'2';
if(count==0)
{
count2++;
break;
}
}else
{
return;
}
pTemp++;
}
//Here we are doing the normal expand
printf("count is %d count2 is %d\n", count, count2);
pTemp=pInput+(count2<<1)-2;
pTemp2=pTemp+count+1;
pInput=pTemp2+1;
len=len-count2;
for(;count2;count2--)
{
for(count=*(pTemp+1)-'0';count;count--)
{
*pTemp2=*pTemp;
pTemp2--;
}
pTemp-=2;
}
//now we are going to perform the left part if existed;
printf("len is %d\n", len);
if(!len)
{
*pInput='\0';
return;
}
}
pTemp=pInput;
pTemp2=pTemp;
for(count=0, count2=0, shrinked=0;count2<len;count2++)
{
pTemp++;
if(*pTemp>'0' && *pTemp<='9')
{
count+=*pTemp-'2';
if(count<=0)
{
shrinked=1;
for(count3=*pTemp-'0';count3;count3--)
{
*pTemp2++=*(pTemp-1);
}
}else
{
//in this case we enter the state mahine second state:
{
//we found that we are in the middle of the array
//and we can not shrink anymore
chardecode(pTemp-1, len-count2, *pTemp-'2'-count, 1);
return;
}
}
}else
{
return;
}
pTemp++;
}
*pTemp2='\0';
}
int main()
{
char p[]="a1b1c1";
char p2[]="a2b2c2";
char p3[]="a3b4 ";
char p4[256]="a1b4c4d1e5f4";
chardecode(p, 3, 0,0);
printf("p is %s\n", p);
chardecode(p2,3, 0, 0);
chardecode(p3, 2,0,0);
printf("p2 is %s\n", p2);
printf("p3 is %s\n", p3);
printf("p4 is %s\n", p4);
chardecode(p4,6, 0,0);
printf("p4 is %s\n", p4);
}
It makes quite a time to find out the algorithm.
1> In the begging of the processing, if the decode will result in shrinking or the space in the left is enough to contain the expansion, do it using two pointers, one write and one read.
2> Once you got a word pair you can not expand to the left, which means your write pointer will collide your read pointer, the algorithm change to second stage, it will start to counting the number of the extra space on the right until it reach the end of the array or the extra space needed is zero. After you got this, you decode your char array using another two pointers, this time write pointer is bigger than read pointer until.
3> Goto 1> if there are still some bytes left.
write the sort compare criteria of two strings (str1, str2) as bellow
- chenlc626 March 07, 20131> build two arrays p1[256], p2[256], set to zero in the begging.
2> for each character c in str1, let p1[a]++
3> for each character d in str2, let p2[d]++;
4> check count from 1 to 255,
4.1> if p1[count]>p2[count],
4.1.1> If there exist one count2 >count that p2[count2]>0 , then str1 < str2,
for case str1="aaaacdefdg", str2="aaab"
4.1.2> if for all count2 >count, p2[count2]=0 then str1>str2
for case str1="aaaacdefdg", str2="aaa"
4.2> if p1[count]<p2[count]:
4.2.1> if there is count2 >count, p1[count2]>0, then str1>str2
symmetric as 4.1.1.
4.2.2> if for all count2> count, p1[count2]=0, then str1<str2
symmetric as 4.1.2.
5> if all p[count]=0, then using strcmp to find which str is bigger.
then people could use the quick sort to perform the sorting, by copying the str address but not the whole string while doing the swapping.