Welcome    Usage    Browse    Find CID    Search     Log in

cM API Documentation

module.py

Go to the documentation of this file.
00001 #
00002 # Collective Mind
00003 #
00004 # See cM LICENSE.txt for licensing details.
00005 # See cM Copyright.txt for copyright details.
00006 #
00007 # Developer(s): (C) Grigori Fursin, started on 2011.09
00008 #
00009 
00010 # Should always be here
00011 ini={}
00012 cm_kernel=None
00013 
00014 # Local settings
00015 import copy
00016 import time
00017 import json
00018 import random
00019 
00020 # ============================================================================
00021 def init(i):
00022     return {'cm_return':0}
00023 
00024 # ============================================================================
00025 def record(i):
00026     """
00027     Process and record semi-raw data during benchmarking
00028 
00029     Input:  {
00030               cm_array    - semi-raw benchmarking info that will be converted 
00031                             to cM format and recorded to cM repository
00032               cm_repo_uoa - working repository
00033             }
00034 
00035     Output: {
00036               cm_return   - if =0, success
00037             }
00038     """
00039 
00040     cm_kernel.print_for_con('')
00041     cm_kernel.print_for_con('Processing and recording semi-raw data during benchmarking ...')
00042  
00043     repo=i.get('cm_repo_uoa','')
00044 
00045     entry={} # This entry will be prepared and saved in the repository
00046     entry['points']=[] # Universal multi-dimensional points in space (for visualization and analysis)
00047 
00048     ca=i.get('cm_array',{})
00049     if len(ca)>0:
00050        cbs=ca['common_benchmark_setup']
00051        be=ca['benchmark_execution']
00052 
00053        # Checking board *********************************************************************
00054        ss=cbs.get('system_string','')
00055        if ss!='':
00056           system_uoa=''
00057 
00058           cm_kernel.print_for_con('')
00059           cm_kernel.print_for_con('Searching for system "'+ss+'" in the repository ...')
00060  
00061           ii={'cm_run_module_uoa':ini['cfg']['cm_modules']['cm-core'],
00062               'cm_action':'search',
00063               'key_0':'##cm_properties#system_string',
00064               'value_0':ss,
00065               'use_flat_array':'yes',
00066               'module_selection':[ini['cfg']['cm_modules']['ctuning.system']],
00067              }
00068           if repo!='': ii['repo_selection']=[repo]
00069           r=cm_kernel.access(ii)
00070           if r['cm_return']>0: return r
00071 
00072           all=r['all']
00073 
00074           if len(all)==0:
00075              cm_kernel.print_for_con('  System not found in the repository, adding new one ...')
00076 
00077              # Prepare to add
00078              jj={'cm_run_module_uoa':ini['cfg']['cm_modules']['ctuning.system'],
00079                  'cm_action':'add',
00080                  'cm_array':{'cm_properties':{'system_string':ss}}, 
00081                  'cm_display_as_alias':ss
00082                 }
00083              if repo!='': jj['cm_repo_uoa']=repo
00084 
00085              r=cm_kernel.access(jj)
00086              if r['cm_return']>0: return r
00087 
00088              system_uoa=r['cm_uid']
00089 
00090              cm_kernel.print_for_con('  System entry added: '+system_uoa)
00091           else:
00092              system_uoa=all[0]['cm_data_uoa']
00093 
00094              cm_kernel.print_for_con('  System entry found: '+system_uoa)
00095 
00096        # Prepare common info in the entry *********************************************************************
00097        entry['scenario_module_uoa']=ini['cm_module_uid']
00098        entry['pipeline_module_uoa']=ini['cfg']['cm_modules']['ctuning.pipeline.build_and_run_program']
00099 
00100        # Will be used to find exactly the right entry during new explorations/search
00101        entry['summary_point']={}
00102        entry['summary_point']['target_processor_uoa']=cbs['target_processor_uoa']
00103        entry['summary_point']['tuning_objective']=cbs['tuning_objective']
00104        entry['summary_point']['notes']=cbs['notes']
00105        entry['summary_point']['ctuning_system_uoa']=system_uoa
00106        entry['summary_point']['benchmark_execution_internal_id']=be['internal_id']
00107        entry['summary_point']['benchmark_execution_reason']=be['reason']
00108        entry['summary_point']['common_benchmark_setup_svn_repo_revision']=cbs['svn_repo_revision']
00109 
00110        # Will be used to visualize and prune coarse-grain results in the cM
00111        entry['summary_point_relaxed']={}
00112        entry['summary_point_relaxed'].update(entry['summary_point'])
00113 
00114        entry['summary_point_relaxed']['system_string']=cbs['system_string']
00115 
00116        # Check if already added
00117        cm_kernel.print_for_con('')
00118        cm_kernel.print_for_con('Checking if entry already added ...')
00119 
00120        ii={'cm_run_module_uoa':ini['cfg']['cm_modules']['ctuning.pipeline.build_and_run_program'],
00121            'cm_action':'find_aggregated_entry',
00122            'record_module_uoa':ini['cm_module_uid'],
00123            'summary_point':entry['summary_point']}
00124        if repo!='': ii['work_repo_uoa']=repo
00125        r=cm_kernel.access(ii)
00126        if r['cm_return']>0: return r
00127 
00128        ae=r['aggregated_entries']
00129 
00130        if len(ae)>0:
00131           # Go through results *********************************************************************
00132           cm_kernel.print_for_con('')
00133           cm_kernel.print_for_con('Entry already exists ...')
00134 
00135           r={}
00136           r.update(ae[0])
00137           r['cm_uid']=r['cm_data_uid']
00138           r['cm_return']=0
00139           return r
00140        else:
00141           # Go through results *********************************************************************
00142           cm_kernel.print_for_con('')
00143           cm_kernel.print_for_con('Processing individual benchmarks ...')
00144 
00145           results=ca.get('results',{})
00146           if len(results)>0:
00147              for result in results:
00148                  bench=result['benchmark_name']
00149                  
00150                  bench_uoa=''
00151 
00152                  cm_kernel.print_for_con('')
00153                  cm_kernel.print_for_con('Searching for benchmark "'+bench+'" in the repository ...')
00154         
00155                  ii={'cm_run_module_uoa':ini['cfg']['cm_modules']['cm-core'],
00156                      'cm_action':'search',
00157                      'key_0':'##cm_properties#benchmark_string',
00158                      'value_0':bench,
00159                      'use_flat_array':'yes',
00160                      'module_selection':[ini['cfg']['cm_modules']['code.source']],
00161                      'timeout':'1000'
00162                     }
00163                  if repo!='': ii['repo_selection']=[repo]
00164                  r=cm_kernel.access(ii)
00165                  if r['cm_return']>0: return r
00166 
00167                  all=r['all']
00168                  
00169                  if len(all)==0:
00170                     cm_kernel.print_for_con('  Benchmark not found in the repository, adding new one ...')
00171 
00172                     # Prepare to add
00173                     jj={'cm_run_module_uoa':ini['cfg']['cm_modules']['code.source'],
00174                         'cm_action':'add',
00175                         'cm_array':{'cm_properties':{'benchmark_string':bench}}, 
00176                         'cm_display_as_alias':bench
00177                        }
00178                     if repo!='': jj['cm_repo_uoa']=repo
00179 
00180                     r=cm_kernel.access(jj)
00181                     if r['cm_return']>0: return r
00182 
00183                     bench_uoa=r['cm_uid']
00184 
00185                     cm_kernel.print_for_con('  Benchmark entry added: '+bench_uoa)
00186                  else:
00187                     bench_uoa=all[0]['cm_data_uoa']
00188 
00189                     cm_kernel.print_for_con('  Benchmark entry found: '+bench_uoa)
00190 
00191                  # Adding points to entry ************************************************************
00192                  point={}
00193 
00194                  # Raw results from benchmarks
00195                  point['raw']=result
00196 
00197                  # Results and other data converted to cTuning pipeline (already has description and connections)
00198 
00199                  # Load pipeline description
00200                  r=cm_kernel.get_data_description({'cm_module_uoa':ini['cfg']['cm_modules']['ctuning.pipeline.build_and_run_program']})
00201                  if r['cm_return']>0: return r
00202                  pu=r['cm_data_obj']['cfg']['cm_p_uids']
00203 
00204 #                 point['pipeline_input']={}
00205 #                 point['pipeline_input']['state']={}
00206 #                 point['pipeline_input']['state'][pu['init']]={'input':entry['summary_point']}
00207 
00208                  point['pipeline_output']={}
00209                  point['pipeline_output']['cm_return']=0
00210                  point['pipeline_output']['fail']=False
00211                  point['pipeline_output']['state']={}
00212                  point['pipeline_output']['state'][pu['init']]={'input':entry['summary_point']}
00213 
00214                  point['pipeline_output']['state'][pu['init_params']]={'output':{'global_source_code_name':result['benchmark_name'],
00215                                                                                  'program_uoa':bench_uoa}}
00216 
00217                  point['pipeline_output']['state'][pu['build_program']]={'output':{'exit_code':result['build_exit_code'], 
00218                                                                                   'run_time_by_module':result['compilation_time']}}
00219 
00220                  point['pipeline_output']['state'][pu['run_program']]={'output':{'exit_code':result['run_exit_code'], 
00221                                                                                  'benchmark_score':result['benchmark_score']}}
00222 
00223                  point['pipeline_output']['state'][pu['check_output']]={'output':{'output_correct':result['output_correct']}} 
00224 
00225                  entry['points'].append(point)
00226 
00227           # Record in the repository
00228           cm_kernel.print_for_con('')
00229           cm_kernel.print_for_con('Entry prepared! Recording in the repository ...')
00230 
00231           ii={'cm_run_module_uoa':ini['cm_module_uid'],
00232               'cm_action':'add',
00233               'cm_array':entry 
00234              }
00235           if repo!='': ii['cm_repo_uoa']=repo
00236           return cm_kernel.access(ii)
00237 
00238     return {'cm_return':0}
00239 
00240 # ============================================================================
00241 def analyze_variation(i):
00242     """
00243     Analyze variation in characteristics
00244 
00245     Input:  {
00246               (data)                           - list of data entries to analyze
00247               (prune_by_summary_point_relaxed) - if data is not provided, search will be performed for all entries;
00248                                                  returned entries can be pruned by this dictionary 
00249                                                  using 'summary_point_relaxed' in entries
00250               characteristic_key               - key to get characteristic (will be converted to float)
00251               percent_threshold                - percent threshold to detect benchmarks where characteristic has high variability 
00252               (cm_repo_uoa)                    - working repository
00253             }
00254 
00255     Output: {
00256               cm_return   - if =0, success
00257             }
00258     """
00259 
00260     cm_kernel.print_for_con('')
00261     cm_kernel.print_for_con('Analyzing variation in characteristics ...')
00262 
00263     repo=i.get('cm_repo_uoa','')
00264 
00265     ck=i.get('characteristic_key','')
00266     if ck=='':
00267        return {'cm_return':1,'cm_error':'characteristic_key is not defined'}
00268 
00269     # Get benchmarks and their variation
00270     ii={'data':i.get('data',{}),
00271         'prune_by_summary_point_relaxed':i.get('prune_by_summary_point_relaxed',{}),
00272         'characteristic_key':ck}
00273     if repo!='': ii['cm_repo_uoa']=repo
00274     r=get_stats_for_benchmarks(i)
00275     if r['cm_return']>0: return r
00276 
00277     variation=r['stats']
00278     if len(variation)>0:
00279        # Find benchmarks with high variation
00280        hbench=[] #benchmarks with high variability
00281        pt=float(i.get('percent_threshold','0.10'))
00282        for bench in variation:
00283            var=variation[bench]
00284            v1=float(var['percent_delta_to_min'])
00285 
00286            if v1>pt:
00287               hbench.append(bench)
00288 
00289        # Print results
00290        cm_kernel.print_for_con('')
00291        cm_kernel.print_for_con('Results:')
00292 
00293        for t in ('high', 'normal'):
00294            cm_kernel.print_for_con('')
00295            if t=='high':
00296               cm_kernel.print_for_con('  Benchmarks with high variability:')
00297            else:
00298               cm_kernel.print_for_con('  Normal benchmarks (percent < '+str(pt*100)+'%) :')
00299 
00300            cm_kernel.print_for_con('')
00301            for bench in variation:
00302                var=variation[bench]
00303                if t=='high' and bench in hbench or \
00304                   t=='normal' and bench not in hbench:
00305                   smin=str(float(var['min']))
00306                   smax=str(float(var['max']))
00307                   sper=str(float(var['percent_delta_to_min'])*100)
00308                   cm_kernel.print_for_con('     '+bench+'   min='+smin+'; max='+smax+'; percent_variation='+sper)
00309 
00310     return {'cm_return':0}
00311 
00312 # ============================================================================
00313 def compare_systems(i):
00314     """
00315     Compare systems through benchmarking
00316 
00317     Input:  {
00318             }
00319 
00320     Output: {
00321               cm_return   - if =0, success
00322             }
00323     """
00324 
00325     cm_kernel.print_for_con('')
00326     cm_kernel.print_for_con('Comparing systems through benchmarking ...')
00327 
00328     repo=i.get('cm_repo_uoa','')
00329 
00330     ck=i.get('characteristic_key','')
00331     if ck=='':
00332        return {'cm_return':1,'cm_error':'characteristic_key is not defined'}
00333 
00334     # Get benchmarks (1) and their variation
00335     ii={'data':i.get('data1',[]),
00336         'prune_by_summary_point_relaxed':i.get('prune_by_summary_point_relaxed1',{}),
00337         'characteristic_key':ck}
00338     if repo!='': ii['cm_repo_uoa']=repo
00339     r=get_stats_for_benchmarks(ii)
00340     if r['cm_return']>0: return r
00341     variation1=r['stats']
00342 
00343     # Get benchmarks (2) and their variation
00344     ii={'data':i.get('data2',[]),
00345         'prune_by_summary_point_relaxed':i.get('prune_by_summary_point_relaxed2',{}),
00346         'characteristic_key':ck}
00347     if repo!='': ii['cm_repo_uoa']=repo
00348     r=get_stats_for_benchmarks(ii)
00349     if r['cm_return']>0: return r
00350     variation2=r['stats']
00351 
00352     if len(variation1)>0 and len(variation2)>0:
00353        # Compare systems
00354 
00355        cm_kernel.print_for_con('  Testing            min1      <  min2      <  max2      <  max1')
00356        cm_kernel.print_for_con('')
00357 
00358        hbench=[] #benchmarks with high variability
00359        fail=False
00360        for bench in variation1:
00361            var1=variation1[bench]
00362            var2=variation2[bench]
00363 
00364            smin1=str(float(var1['min']))
00365            smax1=str(float(var1['max']))
00366 
00367            smin2=str(float(var2['min']))
00368            smax2=str(float(var2['max']))
00369 
00370            stest=''
00371            if var1['min']>var2['min'] or var2['max']>var1['max']:
00372               stest=' - test failed'
00373               fail=True
00374 
00375            cm_kernel.print_for_con('     '+bench+'   '+smin1+' <= '+smin2+' <= '+smax2+' <= '+smax1+'   '+stest)
00376 
00377        cm_kernel.print_for_con('')
00378        if fail:
00379            cm_kernel.print_for_con('  Test failed - systems are not similar')
00380        else:
00381            cm_kernel.print_for_con('  Systems similar')
00382 
00383     return {'cm_return':0}
00384 
00385 # ============================================================================
00386 def get_stats_for_benchmarks(i):
00387     """
00388     Compare systems through benchmarking
00389 
00390     Input:  {
00391               (data)                           - list of data entries to analyze
00392               (prune_by_summary_point_relaxed) - if data is not provided, search will be performed for all entries;
00393                                                  returned entries can be pruned by this dictionary 
00394                                                  using 'summary_point_relaxed' in entries
00395               characteristic_key               - key to get characteristic (will be converted to float)
00396               (cm_repo_uoa)                    - working repository
00397             }
00398 
00399     Output: {
00400               cm_return   - if =0, success
00401               stats       - dictionary of format
00402                             {
00403                               "benchmark1":{
00404                                             "min"                  - min value
00405                                             "max"                  - max value
00406                                             "delta"                - max-min
00407                                             "percent_delta_to_min" - delta/min
00408                                             "mean"                 - mean
00409                                             "std"                  - standard deviation
00410                                             "percent_std_to_min"   - standard deviation/min  
00411                                            }
00412                               ...    
00413                             }
00414 
00415             }
00416     """
00417 
00418     repo=i.get('cm_repo_uoa','')
00419 
00420     ck=i.get('characteristic_key','')
00421     if ck=='':
00422        return {'cm_return':1,'cm_error':'characteristic_key is not defined'}
00423 
00424     benchmarks={} # collect stats per benchmark
00425 
00426     stats={}
00427 
00428     # Get either all data, or using search or directly from array
00429     data=i.get('data',[])
00430     if len(data)==0:
00431        cm_kernel.print_for_con('')
00432        cm_kernel.print_for_con('Selecting/searching entries ...')
00433        cm_kernel.print_for_con('')
00434 
00435        ii={'cm_run_module_uoa':ini['cfg']['cm_modules']['cm-core'],
00436            'cm_action':'search',
00437            'module_selection':[ini['cm_module_uid']]
00438           }
00439        if repo!='': ii['repo_selection']=[repo]
00440 
00441        ii['use_flat_array']='yes'
00442        ii['show_search_progress']='yes'
00443        ii['timeout']='900'
00444 
00445        # Check if pruning is needed
00446        sum1=i.get('prune_by_summary_point_relaxed',{})
00447        if len(sum1)>0:
00448           j=0
00449           for k in sum1:
00450               ii['key_'+str(j)]='##summary_point_relaxed#'+k
00451               ii['value_'+str(j)]=sum1[k]
00452               j+=1
00453           ii['strict_match']='yes'
00454 
00455        r=cm_kernel.access(ii)
00456        if r['cm_return']>0: return r
00457 
00458        # Adding entries to data
00459        for q in r['all']:
00460            data.append(q['cm_data_uoa'])
00461 
00462     # If data entries are selected
00463     if len(data)>0:
00464        # Collect statistics per benchmark in an array for further analysis
00465        num=0
00466        for q in data:
00467            num+=1
00468 
00469            # Load data entry
00470            cm_kernel.print_for_con('')
00471            cm_kernel.print_for_con('Loading entry '+q+' ('+str(num)+' of '+str(len(data))+') ...')
00472 
00473            ii={'cm_run_module_uoa':ini['cm_module_uid'],
00474                'cm_action':'load',
00475                'cm_data_uoa':q}
00476            if repo!='': ii['cm_repo_uoa']=repo
00477            r=cm_kernel.access(ii)
00478            if r['cm_return']>0: return r
00479                
00480            d=r['cm_data_obj']['cfg']
00481 
00482            points=d.get('points',[])
00483 
00484            # Go through points; each point is an individual benchmark execution in an entry
00485            cm_kernel.print_for_con('')
00486            for p in points:
00487                bench=p.get('pipeline_output',{}).get('state',{}).get('f53e1f0898493e87',{}).get('output',{}).get('global_source_code_name','')
00488 
00489                rg=cm_kernel.get_value_by_flattened_key({'cm_array':p, 'cm_key':ck})
00490                if rg['cm_return']>0: return rg
00491                v=rg['cm_value']
00492                if v==None: v=''
00493 
00494                fv=float(v)
00495 
00496                cm_kernel.print_for_con(bench+' - '+v)
00497 
00498                # If first time, add list to the stat array
00499                if bench not in benchmarks:
00500                   benchmarks[bench]=[]
00501 
00502                benchmarks[bench].append(v)
00503 
00504        # Process statistically
00505 #       print json.dumps(benchmarks, indent=2)       
00506 
00507        cm_kernel.print_for_con('')
00508        cm_kernel.print_for_con('Checking variation ...')
00509        ii={'cm_run_module_uoa':ini['cfg']['cm_modules']['math.variation'],
00510            'cm_action':'process',
00511            'array':benchmarks}
00512        r=cm_kernel.access(ii)
00513        if r['cm_return']>0: return r
00514 
00515        stats=r['array']
00516 
00517     return {'cm_return':0, 'stats':stats}
00518 
00519 # ============================================================================
00520 def scenario(i):
00521     """
00522     Build and run program, save to repository
00523 
00524     Input:  {
00525               caller_module_uid    - caller module UID
00526               state                - pipeline state
00527               exploration_scenario - exploraiton scenario parameters
00528               summary_point        - summary dimensions describing aggregated point
00529               aggregated_entry     - all aggregated points
00530               space                - if aggregated, current space
00531               original_input       - to get other additional forms per scenario
00532               cm_data_desc         - data description (from the original form)
00533               cm_form_explore      - form explore (form the original form)
00534               number_of_iterations - max number of iterations
00535             }
00536 
00537     Output: {
00538               cm_return   - if =0, success
00539             }
00540     """
00541 
00542     cm_kernel.print_for_con('')
00543     cm_kernel.print_for_con('Not implemented ...')
00544 
00545     return {'cm_return':0}
00546 
00547 # ============================================================================
00548 def prepare_input_params(i):
00549     """
00550     Prepare input parameters for this scenario
00551 
00552     Input:  {
00553               cm_form_array - array with forms
00554               form_exists   - yes or no
00555               cm_array1     - array from form 1 (coarse-grain points)
00556               cm_array2     - array from form 2 (additional parameters) just in case
00557               caller_cfg    - caller module cfg
00558             }
00559 
00560     Output: {
00561               cm_return   - if =0, success
00562             }
00563     """
00564 
00565     return {'cm_return':0}
00566 
00567 # ============================================================================
00568 def web_view_header(i):
00569     """
00570     Prepare header
00571 
00572     Input:  {
00573               header_desc - header description
00574             }
00575 
00576     Output: {
00577               cm_return   - if =0, success
00578               cm_html     - header html
00579               cm_length   - header length
00580             }
00581     """
00582 
00583     # Get web style
00584     if 'cfg' in cm_kernel.ini['web_style']: web=cm_kernel.ini['web_style']['cfg']
00585     else:
00586        return {'cm_return':1, 'cm_error':'web style is not defined'}
00587 
00588     local_pipeline_desc=ini['cfg'].get('pipeline_point_viewer_desc',{})
00589 
00590     x1=''
00591     if 'table_bgcolor_line5' in web: x1=' bgcolor="'+web['table_bgcolor_line5']+'" ' 
00592     x2=''
00593     if 'table_bgcolor_line6' in web: x2=' bgcolor="'+web['table_bgcolor_line6']+'" '
00594 
00595     vsx=ini['cfg']['pipeline_point_viewer']
00596     vs=vsx.get('main',[])
00597     ks=vsx.get('key_prefix','')
00598 
00599     desc=i.get('header_desc',{})
00600 
00601     h=''
00602 
00603     col=True; 
00604     for kx in vs:
00605         k=ks+kx
00606         ky='#'+kx
00607         v=desc.get(ky,{}).get('desc_text','')
00608         if v=='': v=local_pipeline_desc.get(ky,{}).get('desc_text','')
00609 
00610         if col: col=False ; x=x1
00611         else:   col=True  ; x=x2
00612         
00613         h+='<td align="center"'+x+'><small><b>'+v+':</b></small></td>'
00614 
00615     return {'cm_return':0, 'cm_html':h, 'cm_length':str(len(vs))}
00616 
00617 # ============================================================================
00618 def web_view_data(i):
00619     """
00620     Prepare header
00621 
00622     Input:  {
00623               cm_data        - data object from pipeline
00624               header_desc    - header description
00625               cur_line_style - { 
00626                                  cur    - cur (style1 or style2)
00627                                  style1
00628                                  style2
00629                                }
00630               point          - point to visualize
00631             }
00632 
00633     Output: {
00634               cm_return   - if =0, success
00635               cm_html     - header html
00636             }
00637     """
00638 
00639     # Get web style
00640     if 'cfg' in cm_kernel.ini['web_style']: web=cm_kernel.ini['web_style']['cfg']
00641     else:
00642        return {'cm_return':1, 'cm_error':'web style is not defined'}
00643 
00644     desc=i.get('header_desc',{})
00645 
00646     # Current style
00647     cls=i.get('cur_line_style',{})
00648     y=cls.get('cur','')
00649     y1=cls.get('style1','')
00650     y2=cls.get('style2','')
00651 
00652     # Prepare some vars to change colors for each second line
00653     x1=''
00654     if 'table_bgcolor_line3' in web: x1=' bgcolor="'+web['table_bgcolor_line3']+'" ' 
00655     x2=''
00656     if 'table_bgcolor_line4' in web: x2=' bgcolor="'+web['table_bgcolor_line4']+'" '
00657     x3=''
00658     if 'table_bgcolor_line5' in web: x3=' bgcolor="'+web['table_bgcolor_line5']+'" ' 
00659     x4=''
00660     if 'table_bgcolor_line6' in web: x4=' bgcolor="'+web['table_bgcolor_line6']+'" '
00661 
00662     vsx=ini['cfg']['pipeline_point_viewer']
00663     vs=vsx.get('main',[])
00664     vsf=vsx.get('fail',[])
00665     vss=vsx.get('strange',[])
00666     vsff=vsx.get('full_fail',[])
00667     vnt=vsx.get('normality_test',[])
00668 
00669     ks=vsx.get('key_prefix','')
00670 
00671     d=i.get('cm_data',{})
00672     points=d.get('cm_data_obj',{}).get('cfg',{}).get('points',[])
00673 
00674     h=''
00675 
00676     col=True; 
00677     for kx in vs:
00678 
00679         k=ks+kx
00680 
00681         if col: 
00682            col=False 
00683            if y==y1: x=x1
00684            else: x=x3
00685         else: 
00686            col=True 
00687            if y==y1: x=x2
00688            else: x=x4
00689 
00690         p=points[int(i.get('point','0'))]
00691         rg=cm_kernel.get_value_by_flattened_key({'cm_array':p, 'cm_key':k})
00692         if rg['cm_return']>0: return rg
00693         v=rg['cm_value']
00694         if v==None: v=''
00695 
00696         d1=desc.get('#'+kx,{})
00697         d1['cm_view_as_text']='yes'
00698         ii={'cm_run_module_uoa':ini['cfg']['cm_modules']['cm-web'],
00699             'cm_action':'convert_field_to_html',
00700             'cm_value':str(v),
00701             'cm_key':'',
00702             'cm_mode':'view',
00703             'cm_data_desc':d1,
00704             'cm_disable_coloring':'yes'}
00705         r1=cm_kernel.access(ii)
00706         if r1['cm_return']>0: return r1
00707         vh=r1['cm_value_html']
00708         vt=r1['cm_value_text']
00709 
00710         h+='<td '+x+' align="right" valign="top"><small>'
00711         if kx in vsf:
00712            if v=='' or int(v)!=0:
00713               h+='<span style="color:#9F0000"><B>Fail ('+str(v)+')</B></span>'
00714            else:
00715               h+=str(v)
00716         elif kx in vss:
00717            if v=='' or int(v)!=0:
00718               h+='<span style="color:#9F0000">Strange ('+str(v)+')</span>'
00719            else:
00720               h+=str(v)
00721         elif kx in vsff:
00722            if v=='yes':
00723               h+='<span style="color:#9F0000"><b>Fail</b></span>'
00724            else:
00725               h+='no'
00726         elif kx in vnt:
00727            if v!='' and float(v)<0.05:
00728               h+='<span style="color:#9F0000"><B>'+str(v)+'</B></span>'
00729            else:
00730               h+=str(v)
00731         else:
00732            h+=vh
00733         h+='<BR>\n'
00734         h+='</small></td>'
00735 
00736     return {'cm_return':0, 'cm_html':h}

Generated on Wed May 28 02:49:02 2014 for Collective Mind Framework by DoxyGen 1.6.1
Concept, design and coordination: Grigori Fursin (C) 1993-2013