def getinput(tinput,k): size=len(tinput) if size==0 or k>size: return [] array=sort(tinput,size) return array[:k] def sort(array,size): for i in range(int(size/2-1),-1,-1): array=adjust(array,i,size-1)# for j in range(size-1,-1,-1): temp=array[j] array[j]=array[0] array[0]=temp array=adjust(array,0,j-1) return array def adjust(array,start,end): temp=array[start] i=start*2+1 while i<=end: if i+1<end and array[i+1]>array[i]: i+=1 if array[i]<temp: break array[start]=temp start=i i=start*2+1 array[start]=temp return array 选择排序 def choose_sort(r): length=len(r) for i in range(length): minmum=i for j in range(i+1,length): if lis[minmum]>lis[j]: minmum=j if i!=minmum: swap(i,minmum) 快速排序 def quicksort(r): if (low<=high): div=partion(r,0,len(r)) quicksort(r,0,div-1) quicksort(r,div+1,len(r)) def partion(r,low,high): lis=r key=lis[low] while low<high: while low<high and lis[high]>=key: hight-=1 swap(low,high) while low<high and lis[low]<=key: low+=1 swap(low,high) return low key=lis[low] while low<high: while low<high and lis[high]<key: high-=1 swap(low,high) while low<high and lis[low]>key: low+=1 swap(low,high) 归并排序 def merge_sort(arr,temp,s,end): temp=[None for i in range(len(arr))] if s<end: mid=int((s+end)/2) merge_sort(arr,temp,s,mid) merge_sort(arr,temp,mid+1,end) merge(arr,temp,s,mid,end) def merge(arr,temp,s,m,end): i=s j=m+1 t=0 while(i<=m and j<=end): if arr[i]<=arr[j]: temp[t]=arr[i] i+=1 else: temp[t]=arr[j] j+=1 t+=1 while(i<=m): temp[t]=arr[i] t+=1 i+=1 while(j<=end): temp[t]=arr[j] t+=1 j+=1 t=0 while(s<=end): arr[s]=temp[t] s+=1 t+=1 def merge_sort(arr,temp,s,end): temp=[None for i in range(len(arr))] if s<end: mid=int((s+end)/2) merge_sort(arr,temp,s,mid) merge_sort(arr,temp,mid+1,end) merge(arr,temp,s,mid,end) def merge(arr,temp,s,m,end): i=s j=m+1 t=0 while(i<=m and j<=end): if arr[i]<=arr[j]: temp[t]=arr[i] i+=1 else: temp[t]=arr[j] j+=1 t+=1 while(i<=m): temp[t]=arr[i] t+=1 i+=1 while(j<=end): temp[t]=arr[j] t+=1 j+=1 t=0 while(s<=end): arr[s]=temp[t] s+=1 t+=1 import tensorflow as tf class TextCnn: def __init__(self,sequence_length,num_classes,embedding_size,filter_sizes,num_filters,l2_reg_lambda=0.0,attention_dim=100,use_attention=True): self.embedded_chars=tf.placeholder(tf.float32,[None,sequence_length,embedding_size],name='embedded_chars') self.input_y=tf.placeholder(tf.float32,[None,num_classes],name='input_y') self.dropout_keep_prob=tf.placeholder(tf.float32,name='dropout_keep_prob') self.sequence_length=sequence_length self.embedding_size=embedding_size l2_loss=tf.constant(0.0) if use_attention: self.attention_hidden_dim=attention_dim self.attention_W=tf.Variable(tf.random_uniform([self.embedding_size,self.attention_hidden_dim],0.0,1.0),name='attention_W') self.attention_U=tf.Variable(tf.random_uniform([self.embedding_size,self.attention_hidden_dim],0.0,1.0),name='attendion_U') self.attention_V=tf.Variable(tf.random_uniform([self.attention_hidden_dim,1],0.0,1.0),name='attention_V') self.output_att=list() with tf.name_scope('attention'): input_att=tf.split(self.embedded_chars,self.sequence_length,axis=1) for index,x_i in enumerate(input_att): x_i=tf.reshape(x_i,[-1,self.embedding_size]) c_i=self.attention(x_i,input_att,index) inp=tf.concat([x_i,c_i],axis=1) self.output_att.append(inp) input_conv = tf.reshape(tf.concat(self.output_att,axis=1),[-1,self.sequence_length,self.embedding_size*2],name='input_convolution') self.input_conv_expanded=tf.expand_dims(input_conv,-1) else: self.input_conv_expanded=tf.expand_dims(self.embedded_chars,-1) dim_input_conv=self.input_conv_expanded.shape[-2].value pooled_outputs=[] for i,filter_size in enumerate(filter_sizes): with tf.name_scope('conv-maxpool-%s'%filter_size): filter_shape=[filter_size,dim_input_conv,1,num_filters] W=tf.Variable(tf.truncated_normal(filter_shape,stddev=0.1),name='W') b=tf.Variable(tf.constant(0.1,shape=[num_filters]),name='b') conv=tf.nn.conv2d(self.input_conv_expanded,W,strides=[1,1,1,1]) padding=VALID name='convolution') h=tf.nn.relu(tf.nn.bias_add(conv,b),name='relu') pooled=tf.nn.max_pool(h,ksize=[1,sequence_length-filter_size+1,1,1],strides=[1,1,1,1],padding='valid',name='pool') pooled_outputs.append(pooled) num_filters_total=num_filters*len(filter_sizes) self.h_pool=tf.concat(pooled_outputs,3) self.h_pool_flat=tf.reshape(self.h_pool,[-1,num_filters_total]) with tf.name_scope('dropout'): self.h_drop=tf.nn.dropout(self.h_pool_flat,self.dropout_keep_prob) with tf.name_scope('output') W = tf.get_variable('W',shape=[num_filters_total,num_classes],initializer=tf.contrib.layers.xavier_initialzer()) b=tf.Variable(tf.constant(0.1,shape=[num_classes]),name='b') l2_loss+=tf.nn.l2_loss(W) l2_loss+=tf.nn.l2_loss(b) self.scores=tf.nn.xw_plus_b(self.h_drop,W,b,name='scores') self.predictions=tf.argmax(self.scores,1,name='predictions') self.probabilities=tf.nn.sigmoid(self.scores,name='probabilities') with tf.name_scope('loss): losses=tf.nn.sigmoid_cross_entropy_with_logits(logits=self.scores,labels=self.input_y) self.loss=tf.reduce_mean(losses)+l2_reg_lambda*l2_loss with tf.name_scope('accuracy'): correct_predictions=tf.equal(self.predictions,tf.argmax(self.input_y,1)) self.accuracy=tf.reduce_mean(tf.cast(correct_predictions,'flot'),name'accuracy') def attention(self,x_i,x,index): e_i=[] c_i=[] for output in x: output=tf.reshape(output,[-1,self.embedding_size]) atten_hidden=tf.tanh(tf.add(tf.matmul(x_i,self.attention_W),tf.matmul(output,self.attention_U))) e_i_j=tf.matmul(atten_hidden,self.attention_V) e_i.append(e_i_j) e_i=tf.concat(e_i,axis=1) alpha_i=tf.nn.softmax(e_i) alpha_i=tf.split(alpha_i,self.sequence_length,1) for j,(alpha_i_j,output) in enumerate(zip(alpha_i,x): if j==index: continue else: output=tf.reshape(output,[-1,self.embedding_size]) c_i_j=tf.multiply(alpha_i_j,output) c_i.append(c_i_j) c_i=tf.reshape(tf.concat(c_i,axis=1),[-1,self.sequence_length-1,self.embedding_size]) c_i=tf.reduce_sum(c_i,1) return c_i #KMP def getnextarray(t): next=[-1,0] for i in range(2,len(t)): next.append(0) for j in range(2,len(t)): k=next[j-1] while k!=-1: if t[j-1]==t[k]: next[j]=k+1 else: k=next[k] next[j]=0 return next def kmpalg(s,t): next=getnextarray(t) i=0 j=0 while i<len(s) or j<len(t): if j==-1 or s[i]==t[j]: i+=1 j+=1 else: j=next[j] if j==len(t): return i-j else: return -1 #计算base的exponent次方 def Power(self, base, exponent): # write code here if base==0:#若底为0,返回0 return 0 sum=1#注意要初始化一个存储计算结果的变量,不能直接对base进行叠乘,因为直接叠乘会改变base的大小 if exponent>0:#次方大于0,正常操作 for i in range(exponent): sum=sum*base return sum elif exponent==0:#次数等于0,返回1 return 1 else:#次数小于0,先将次数取反,最后返回的是求完积之后的倒数 exponent=-(exponent) for i in range(exponent): sum=sum*base return 1.0/sum #调整数组顺序使奇数位于偶数前面 def reOrderArray(self, array): # write code here #ood,even=[],[]#新建两个列表,分别存储奇数、偶数 # #for a in array:#数组可以通过循环依次得到各个元素 # if a%2==0: # even.append(a) # else: # ood.append(a) #return ood+even for i in range(1,len(array)): if (array[i]%2)!=0 and i!=0: temp=array[i] j=i-1 while j>=0 and (array[j]%2)==0: array[j+1]=array[j] j-=1 array[j+1]=temp return array #链表中倒数第k个节点 def FindKthToTail(self, head, k): # write code here #方法一:先遍历一遍得到链表长度,再遍历到第n-k+1个节点即为倒数第k个节点 #方法二:设置两个指针,一个先走k-1步,然后另一个指针开始走,两个始终相差k-1,直到前面的指针走到 #最后一个节点输出后面指针指向的节点 if head==None or k==0: return None else: bef=head for i in range(k-1):#注意先走k-1步,由于range从0开始,先走一步的时候要k-1!!!!!!!! if bef.next!=None: bef=bef.next else: return None after=head while bef.next!=None: bef=bef.next after=after.next return after def Merge(pHead1,pHead2): dummy=ListNode(0) pHead=dummy while pHead1 and pHead2: if pHead1.val>=pHead2.val: dummy.next=pHead2 pHead2=pHead2.next else: dummy.next=pHead1 pHead1=pHead1.next dummy=dummy.next if pHead1: dummy.next=pHead1 if pHead2: dummy.next=pHead2 return pHead.next def Merge(self, pHead1, pHead2): # write code here dummy = ListNode(0) pHead = dummy while pHead1 and pHead2: if pHead1.val >= pHead2.val: dummy.next = pHead2 pHead2 = pHead2.next else: dummy.next = pHead1 pHead1 = pHead1.next dummy = dummy.next if pHead1: dummy.next = pHead1 if pHead2: dummy.next = pHead2 return pHead.next def Merge(self, pHead1, pHead2): # write code here ListNode mergenode=null ListNode current=null while pHead1!=null and pHead2!=null: if pHead1.val<=pHead2.val: if mergenode==null: mergenode=current=pHead1 else: current.next=pHead1 pHead1=pHead1.next else: if mergenode==null: mergenode=current=pHead2 else: current.next=pHead2 pHead2=pHead2.next return mergenode ListNode Merge(ListNode list1,ListNode list2): if list1==None: return list2 if list2==None: return list1 if list1.val<list2.val: res=list1 res.next=Merge(list1.next,list2) elif list1.val>list2.val: res=list2 res.next=Merge(list1,list2.next) return res def HasSubtree(proot1,proot2): result=False if proot1!=None and proot2!=None: if proot1.val==proot2.val: result=doestree(proot1,proot2) if not result: result=Has(proot1.left,proot2) if not result: result=Has(proot1.right,proot2) def doestree(proot1,proot2): if proot2==None: return True if proot1==None: return False if proot1.val!=proot2.val: return False return does()and does() char *strcpy(char *strDes,const char *string) { if(string==NULL &&strDes==NULL) {return NULL; } char* address=strDes while(*string!='