emayecs | 5656b2b | 2021-08-04 12:44:13 -0400 | [diff] [blame] | 1 | #!/usr/bin/env python3 |
emayecs | 5966a53 | 2021-07-29 10:07:02 -0400 | [diff] [blame] | 2 | """ |
| 3 | cace_launch.py |
| 4 | A simple script that pulls in a JSON file and uses the hash key to find the |
| 5 | directory of spice simulation netlists associated with that file, and runs |
| 6 | them. The output of all files in a category <SKU>_<METHOD>_<PIN>_* is |
| 7 | analyzed, and the result written back to the data structure. Then the |
| 8 | annotated structure is passed back to the marketplace. |
| 9 | """ |
| 10 | |
| 11 | # NOTE: This file is only a local stand-in for the script that launches |
| 12 | # and manages jobs in parallel and that communicates job status with the |
| 13 | # front-end. This stand-alone version should not be used for any significant |
| 14 | # project, as all simulations are run sequentially and will tie up resources. |
| 15 | |
| 16 | import os |
| 17 | import sys |
| 18 | import shutil |
| 19 | import tarfile |
| 20 | import json |
| 21 | import re |
| 22 | import math |
| 23 | import signal |
| 24 | import datetime |
| 25 | import requests |
| 26 | import subprocess |
| 27 | import faulthandler |
| 28 | from spiceunits import spice_unit_unconvert |
| 29 | from spiceunits import spice_unit_convert |
| 30 | |
| 31 | import file_compressor |
| 32 | import cace_makeplot |
| 33 | |
emayecs | b2487ae | 2021-08-05 10:30:13 -0400 | [diff] [blame] | 34 | import config |
emayecs | 5966a53 | 2021-07-29 10:07:02 -0400 | [diff] [blame] | 35 | |
emayecs | b2487ae | 2021-08-05 10:30:13 -0400 | [diff] [blame] | 36 | # Values imported from config: |
emayecs | 5966a53 | 2021-07-29 10:07:02 -0400 | [diff] [blame] | 37 | # |
emayecs | b2487ae | 2021-08-05 10:30:13 -0400 | [diff] [blame] | 38 | mktp_server_url = config.mktp_server_url |
| 39 | # obs: og_server_url = config.og_server_url |
| 40 | simulation_path = config.simulation_path |
emayecs | 5966a53 | 2021-07-29 10:07:02 -0400 | [diff] [blame] | 41 | |
| 42 | # Variables needing to be global until this file is properly made into a class |
| 43 | simfiles_path = [] |
| 44 | layout_path = [] |
| 45 | netlist_path = [] |
| 46 | root_path = [] |
| 47 | hashname = "" |
| 48 | spiceproc = None |
| 49 | localmode = False |
| 50 | bypassmode = False |
| 51 | statdoc = {} |
| 52 | |
| 53 | # Send the simulation status to the remote Open Galaxy host |
| 54 | def send_status(doc): |
| 55 | result = requests.post(og_server_url + '/opengalaxy/send_status_cace', json=doc) |
| 56 | print('send_status_cace ' + str(result.status_code)) |
| 57 | |
| 58 | # Make request to server sending annotated json back |
| 59 | def send_doc(doc): |
| 60 | result = requests.post(mktp_server_url + '/cace/save_result', json=doc) |
| 61 | print('send_doc ' + str(result.status_code)) |
| 62 | |
| 63 | # Pure HTTP post here. Add the file to files object and the hash/filename |
| 64 | # to the data params. |
| 65 | def send_file(hash, file, file_name): |
| 66 | files = {'file': file.getvalue()} |
| 67 | data = {'request-hash': hash, 'file-name': file_name} |
| 68 | result = requests.post(mktp_server_url + '/cace/save_result_files', files=files, data=data) |
| 69 | print('send_file ' + str(result.status_code)) |
| 70 | |
| 71 | # Clean up and exit on termination signal |
| 72 | def cleanup_exit(signum, frame): |
| 73 | global root_path |
| 74 | global simfiles_path |
| 75 | global simulation_path |
| 76 | global spiceproc |
| 77 | global statdoc |
| 78 | global localmode |
| 79 | if spiceproc: |
| 80 | print("CACE launch: Termination signal received.") |
| 81 | spiceproc.terminate() |
| 82 | spiceproc.wait() |
| 83 | |
| 84 | # Remove simulation files |
| 85 | print("CACE launch: Simulations have been terminated.") |
| 86 | if localmode == False: |
| 87 | test = os.path.split(root_path)[0] |
| 88 | if test != simulation_path: |
| 89 | print('Error: Root path is not in the system simulation path. Not deleting.') |
| 90 | print('Root path is ' + root_path + '; simulation path is ' + simulation_path) |
| 91 | else: |
| 92 | subprocess.run(['rm', '-r', root_path]) |
| 93 | else: |
| 94 | # Remove all .spi files, .data files, .raw files and copy of datasheet |
| 95 | os.chdir(simfiles_path) |
| 96 | if os.path.exists('datasheet.json'): |
| 97 | os.remove('datasheet.json') |
| 98 | files = os.listdir(simfiles_path) |
| 99 | for filename in files: |
| 100 | try: |
| 101 | fileext = os.path.splitext(filename)[1] |
| 102 | except: |
| 103 | pass |
| 104 | else: |
| 105 | if fileext == '.spi' or fileext == '.data' or fileext == '.raw': |
| 106 | os.remove(filename) |
| 107 | elif fileext == '.tv' or fileext == '.tvo' or fileext == '.lxt' or fileext == '.vcd': |
| 108 | os.remove(filename) |
| 109 | |
| 110 | # Post exit status back to Open Galaxy |
| 111 | if statdoc and not localmode: |
| 112 | status['message'] = 'canceled' |
| 113 | send_status(statdoc) |
| 114 | |
| 115 | # Exit |
| 116 | sys.exit(0) |
| 117 | |
| 118 | # Handling of 2s complement values in calculations (e.g., "1000" is -8, not +8) |
| 119 | # If a value should be unsigned, then the units for the value should be one bit |
| 120 | # larger than represented. e.g., if unit = "4'b" and value = "1000" then value |
| 121 | # is -8, but if unit = "5'b" and value = "1000" then value is +8. |
| 122 | |
| 123 | def twos_complement(val, bits): |
| 124 | """compute the 2's compliment of int value val""" |
| 125 | if (val & (1 << (bits - 1))) != 0: # if sign bit is set e.g., 8bit: 128-255 |
| 126 | val = val - (1 << bits) # compute negative value |
| 127 | return val # return positive value as is |
| 128 | |
| 129 | # Calculation of results from collected data for an output record, |
| 130 | # given the type of calculation to perform in 'calctype'. Known |
| 131 | # calculations are minimum, maximum, and average (others can be |
| 132 | # added as needed). |
| 133 | |
| 134 | def calculate(record, rawdata, conditions, calcrec, score, units, param): |
| 135 | # Calculate result from rawdata based on calctype; place |
| 136 | # result in record['value']. |
| 137 | |
| 138 | # "calcrec" is parsed as "calctype"-"limittype", where: |
| 139 | # "calctype" is one of: avg, min, max |
| 140 | # "limittype" is one of: above, below, exact |
| 141 | |
| 142 | # "min" alone implies "min-above" |
| 143 | # "max" alone implies "max-below" |
| 144 | # "avg" alone implies "avg-exact" |
| 145 | |
| 146 | # Future development: |
| 147 | # Add "minimax", "maximin", and "typ" to calctypes (needs extra record(s)) |
| 148 | # Add "range" to limittypes (needs extra record or tuple for target) |
| 149 | |
| 150 | binrex = re.compile(r'([0-9]*)\'([bodh])', re.IGNORECASE) |
| 151 | |
| 152 | data = rawdata |
| 153 | if 'filter' in record: |
| 154 | # Filter data by condition range. |
| 155 | filtspec = record['filter'].split('=') |
| 156 | if len(filtspec) == 2: |
| 157 | condition = filtspec[0].upper() |
| 158 | valuerange = filtspec[1].split(':') |
| 159 | # Pick data according to filter, which specifies a condition and value, or condition |
| 160 | # and range of values in the form "a:b". Syntax is limited and needs to be expanded. |
| 161 | if condition in conditions: |
| 162 | condvec = conditions[condition] |
| 163 | if len(valuerange) == 2: |
| 164 | valuemin = int(valuerange[0]) |
| 165 | valuemax = int(valuerange[1]) |
| 166 | data = list(i for i, j in zip(rawdata, condvec) if j >= valuemin and j <= valuemax) |
| 167 | else: |
| 168 | try: |
| 169 | valueonly = float(valuerange[0]) |
| 170 | except ValueError: |
| 171 | valueonly = valuerange[0] |
| 172 | vtype = type(valueonly) |
| 173 | if vtype == type('str') or vtype == type('int'): |
| 174 | data = list(i for i, j in zip(rawdata, condvec) if j == valueonly) |
| 175 | if not data: |
| 176 | print('Error: no data match ' + condition + ' = ' + str(valueonly)) |
| 177 | data = rawdata |
| 178 | else: |
| 179 | # Avoid round-off problems from floating-point values |
| 180 | d = valueonly * 0.001 |
| 181 | data = list(i for i, j in zip(rawdata, condvec) if j - d < valueonly and j + d > valueonly) |
| 182 | if not data: |
| 183 | print('Error: no data match ' + condition + ' ~= ' + str(valueonly)) |
| 184 | data = rawdata |
| 185 | |
| 186 | # For "filter: typical", limit data to those taken for any condition value |
| 187 | # which is marked as typical for that condition. |
| 188 | |
| 189 | elif record['filter'] == 'typ' or record['filter'] == 'typical': |
| 190 | |
| 191 | # Create a boolean vector to track which results are under typical conditions |
| 192 | typvec = [True] * len(rawdata) |
| 193 | for condition in conditions: |
| 194 | # Pull record of the condition (this must exist by definition) |
| 195 | condrec = next(item for item in param['conditions'] if item['condition'] == condition) |
| 196 | if 'typ' not in condrec: |
| 197 | continue |
| 198 | try: |
| 199 | valueonly = float(condrec['typ']) |
| 200 | except ValueError: |
| 201 | valueonly = condrec['typ'] |
| 202 | condvec = conditions[condition] |
| 203 | typloc = list(i == valueonly for i in condvec) |
| 204 | typvec = list(i and j for i, j in zip(typloc, typvec)) |
| 205 | # Limit data to marked entries |
| 206 | data = list(i for i, j in zip(rawdata, typvec) if j) |
| 207 | try: |
| 208 | calctype, limittype = calcrec.split('-') |
| 209 | except ValueError: |
| 210 | calctype = calcrec |
| 211 | if calctype == 'min': |
| 212 | limittype = 'above' |
| 213 | elif calctype == 'max': |
| 214 | limittype = 'below' |
| 215 | elif calctype == 'avg': |
| 216 | limittype = 'exact' |
| 217 | elif calctype == 'diffmin': |
| 218 | limittype = 'above' |
| 219 | elif calctype == 'diffmax': |
| 220 | limittype = 'below' |
| 221 | else: |
| 222 | return 0 |
| 223 | |
| 224 | # Quick format sanity check---may need binary or hex conversion |
| 225 | # using the new method of letting units be 'b or 'h, etc. |
| 226 | # (to be done: signed conversion, see cace_makeplot.py) |
| 227 | if type(data[0]) == type('str'): |
| 228 | bmatch = binrex.match(units) |
| 229 | if (bmatch): |
| 230 | digits = bmatch.group(1) |
| 231 | if digits == '': |
| 232 | digits = len(data[0]) |
| 233 | else: |
| 234 | digits = int(digits) |
| 235 | base = bmatch.group(2) |
| 236 | if base == 'b': |
| 237 | a = list(int(x, 2) for x in data) |
| 238 | elif base == 'o': |
| 239 | a = list(int(x, 8) for x in data) |
| 240 | elif base == 'd': |
| 241 | a = list(int(x, 10) for x in data) |
| 242 | else: |
| 243 | a = list(int(x, 16) for x in data) |
| 244 | data = list(twos_complement(x, digits) for x in a) |
| 245 | else: |
| 246 | print("Warning: result data do not correspond to specified units.") |
| 247 | print("Data = " + str(data)) |
| 248 | return 0 |
| 249 | |
| 250 | # The target and result should both match the specified units, so convert |
| 251 | # the target if it is a binary, hex, etc., value. |
| 252 | if 'target' in record: |
| 253 | targval = record['target'] |
| 254 | bmatch = binrex.match(units) |
| 255 | if (bmatch): |
| 256 | digits = bmatch.group(1) |
| 257 | base = bmatch.group(2) |
| 258 | if digits == '': |
| 259 | digits = len(targval) |
| 260 | else: |
| 261 | digits = int(digits) |
| 262 | try: |
| 263 | if base == 'b': |
| 264 | a = int(targval, 2) |
| 265 | elif base == 'o': |
| 266 | a = int(targval, 8) |
| 267 | elif base == 'd': |
| 268 | a = int(targval, 10) |
| 269 | else: |
| 270 | a = int(targval, 16) |
| 271 | targval = twos_complement(a, digits) |
| 272 | except: |
| 273 | print("Warning: target data do not correspond to units; assuming integer.") |
| 274 | |
| 275 | # First run the calculation to get the single result value |
| 276 | |
| 277 | if calctype == 'min': |
| 278 | # Result is the minimum of the data |
| 279 | value = min(data) |
| 280 | elif calctype == 'max': |
| 281 | # Result is the maximum of the data |
| 282 | value = max(data) |
| 283 | elif calctype == 'avg': |
| 284 | # Result is the average of the data |
| 285 | value = sum(data) / len(data) |
| 286 | elif calctype[0:3] == 'std': |
| 287 | # Result is the standard deviation of the data |
| 288 | mean = sum(data) / len(data) |
| 289 | value = pow(sum([((i - mean) * (i - mean)) for i in data]) / len(data), 0.5) |
| 290 | # For "stdX", where "X" is an integer, multiply the standard deviation by X |
| 291 | if len(calctype) > 3: |
| 292 | value *= int(calctype[3]) |
| 293 | |
| 294 | if len(calctype) > 4: |
| 295 | # For "stdXn", subtract X times the standard deviation from the mean |
| 296 | if calctype[4] == 'n': |
| 297 | value = mean - value |
| 298 | # For "stdXp", add X times the standard deviation to the mean |
| 299 | elif calctype[4] == 'p': |
| 300 | value = mean + value |
| 301 | elif calctype == 'diffmax': |
| 302 | value = max(data) - min(data) |
| 303 | elif calctype == 'diffmin': |
| 304 | value = min(data) - max(data) |
| 305 | else: |
| 306 | return 0 |
| 307 | |
| 308 | try: |
| 309 | record['value'] = '{0:.4g}'.format(value) |
| 310 | except ValueError: |
| 311 | print('Warning: Min/Typ/Max value is not not numeric; value is ' + value) |
| 312 | return 0 |
| 313 | |
| 314 | # Next calculate the score based on the limit type |
| 315 | |
| 316 | if limittype == 'above': |
| 317 | # Score a penalty if value is below the target |
| 318 | if 'target' in record: |
| 319 | targval = float(targval) |
| 320 | dopassfail = False |
| 321 | if 'penalty' in record: |
| 322 | if record['penalty'] == 'fail': |
| 323 | dopassfail = True |
| 324 | else: |
| 325 | penalty = float(record['penalty']) |
| 326 | else: |
| 327 | penalty = 0 |
| 328 | print('min = ' + str(value)) |
| 329 | # NOTE: 0.0005 value corresponds to formatting above, so the |
| 330 | # value is not marked in error unless it would show a different |
| 331 | # value in the display. |
| 332 | if value < targval - 0.0005: |
| 333 | if dopassfail: |
| 334 | locscore = 'fail' |
| 335 | score = 'fail' |
| 336 | print('fail: target = ' + str(record['target']) + '\n') |
| 337 | else: |
| 338 | locscore = (targval - value) * penalty |
| 339 | print('fail: target = ' + str(record['target']) |
| 340 | + ' penalty = ' + str(locscore)) |
| 341 | if score != 'fail': |
| 342 | score += locscore |
| 343 | elif math.isnan(value): |
| 344 | locscore = 'fail' |
| 345 | score = 'fail' |
| 346 | else: |
| 347 | if dopassfail: |
| 348 | locscore = 'pass' |
| 349 | else: |
| 350 | locscore = 0 |
| 351 | print('pass') |
| 352 | if dopassfail: |
| 353 | record['score'] = locscore |
| 354 | else: |
| 355 | record['score'] = '{0:.4g}'.format(locscore) |
| 356 | |
| 357 | elif limittype == 'below': |
| 358 | # Score a penalty if value is above the target |
| 359 | if 'target' in record: |
| 360 | targval = float(targval) |
| 361 | dopassfail = False |
| 362 | if 'penalty' in record: |
| 363 | if record['penalty'] == 'fail': |
| 364 | dopassfail = True |
| 365 | else: |
| 366 | penalty = float(record['penalty']) |
| 367 | else: |
| 368 | penalty = 0 |
| 369 | print('max = ' + str(value)) |
| 370 | # NOTE: 0.0005 value corresponds to formatting above, so the |
| 371 | # value is not marked in error unless it would show a different |
| 372 | # value in the display. |
| 373 | if value > targval + 0.0005: |
| 374 | if dopassfail: |
| 375 | locscore = 'fail' |
| 376 | score = 'fail' |
| 377 | print('fail: target = ' + str(record['target']) + '\n') |
| 378 | else: |
| 379 | locscore = (value - targval) * penalty |
| 380 | print('fail: target = ' + str(record['target']) |
| 381 | + ' penalty = ' + str(locscore)) |
| 382 | if score != 'fail': |
| 383 | score += locscore |
| 384 | elif math.isnan(value): |
| 385 | locscore = 'fail' |
| 386 | score = 'fail' |
| 387 | else: |
| 388 | if dopassfail: |
| 389 | locscore = 'pass' |
| 390 | else: |
| 391 | locscore = 0 |
| 392 | print('pass') |
| 393 | if dopassfail: |
| 394 | record['score'] = locscore |
| 395 | else: |
| 396 | record['score'] = '{0:.4g}'.format(locscore) |
| 397 | |
| 398 | elif limittype == 'exact': |
| 399 | # Score a penalty if value is not equal to the target |
| 400 | if 'target' in record: |
| 401 | targval = float(targval) |
| 402 | dopassfail = False |
| 403 | if 'penalty' in record: |
| 404 | if record['penalty'] == 'fail': |
| 405 | dopassfail = True |
| 406 | else: |
| 407 | penalty = float(record['penalty']) |
| 408 | else: |
| 409 | penalty = 0 |
| 410 | |
| 411 | if value != targval: |
| 412 | if dopassfail: |
| 413 | locscore = 'fail' |
| 414 | score = 'fail' |
| 415 | print('off-target failure') |
| 416 | else: |
| 417 | locscore = abs(targval - value) * penalty |
| 418 | print('off-target: target = ' + str(record['target']) |
| 419 | + ' penalty = ' + str(locscore)) |
| 420 | if score != 'fail': |
| 421 | score += locscore |
| 422 | elif math.isnan(value): |
| 423 | locscore = 'fail' |
| 424 | score = 'fail' |
| 425 | else: |
| 426 | print('on-target') |
| 427 | if dopassfail: |
| 428 | locscore = 'pass' |
| 429 | else: |
| 430 | locscore = 0 |
| 431 | |
| 432 | if dopassfail: |
| 433 | record['score'] = locscore |
| 434 | else: |
| 435 | record['score'] = '{0:.4g}'.format(locscore) |
| 436 | |
| 437 | elif limittype == 'legacy': |
| 438 | # Score a penalty if the value is not equal to the target, except |
| 439 | # that a lack of a minimum record implies no penalty below the |
| 440 | # target, and lack of a maximum record implies no penalty above |
| 441 | # the target. This is legacy behavior for "typ" records, and is |
| 442 | # used if no "calc" key appears in the "typ" record. "legacy" may |
| 443 | # also be explicitly stated, although it is considered deprecated |
| 444 | # in favor of "avg-max" and "avg-min". |
| 445 | |
| 446 | if 'target' in record: |
| 447 | targval = float(targval) |
| 448 | if record['penalty'] == 'fail': |
| 449 | # "typical" should never be pass-fail |
| 450 | penalty = 0 |
| 451 | else: |
| 452 | penalty = float(record['penalty']) |
| 453 | print('typ = ' + str(value)) |
| 454 | if value != targval: |
| 455 | if 'max' in param and value > targval: |
| 456 | # max specified, so values below 'typ' are not costed |
| 457 | # this method deprecated, use 'calc' = 'avg-max' instead. |
| 458 | locscore = (value - targval) * penalty |
| 459 | print('above-target: target = ' + str(record['target']) |
| 460 | + ' penalty = ' + str(locscore)) |
| 461 | elif 'min' in param and value < targval: |
| 462 | # min specified, so values above 'typ' are not costed |
| 463 | # this method deprecated, use 'calc' = 'avg-min' instead. |
| 464 | locscore = (targval - value) * penalty |
| 465 | print('below-target: target = ' + str(record['target']) |
| 466 | + ' penalty = ' + str(locscore)) |
| 467 | elif 'max' not in param and 'min' not in param: |
| 468 | # Neither min and max specified, so value is costed on |
| 469 | # both sides of the target. |
| 470 | locscore = abs(targval - value) * penalty |
| 471 | print('off-target: target = ' + str(record['target']) |
| 472 | + ' penalty = ' + str(locscore)) |
| 473 | else: |
| 474 | locscore = 0 |
| 475 | if score != 'fail': |
| 476 | score += locscore |
| 477 | else: |
| 478 | locscore = 0 |
| 479 | print('on-target') |
| 480 | record['score'] = '{0:.4g}'.format(locscore) |
| 481 | |
| 482 | # Note: Calctype 'none' performs no calculation. Record is unchanged, |
| 483 | # and "score" is returned unchanged. |
| 484 | |
| 485 | return score |
| 486 | |
| 487 | def run_and_analyze_lvs(dsheet): |
| 488 | ipname = dsheet['ip-name'] |
| 489 | node = dsheet['node'] |
| 490 | # Hack---node XH035 should have been specified as EFXH035A; allow |
| 491 | # the original one for backwards compatibility. |
| 492 | if node == 'XH035': |
| 493 | node = 'EFXH035A' |
| 494 | mag_path = netlist_path + '/lvs/' + ipname + '.spi' |
| 495 | schem_path = netlist_path + '/stub/' + ipname + '.spi' |
| 496 | |
| 497 | if not os.path.exists(schem_path): |
| 498 | schem_path = netlist_path + '/' + ipname + '.spi' |
| 499 | if not os.path.exists(schem_path): |
| 500 | if os.path.exists(root_path + '/verilog'): |
| 501 | schem_path = root_path + '/verilog/' + ipname + '.v' |
| 502 | |
| 503 | # Check the netlist to see if the cell to match is a subcircuit. If |
| 504 | # not, then assume it is the top level. |
| 505 | |
| 506 | is_subckt = False |
| 507 | subrex = re.compile('^[^\*]*[ \t]*.subckt[ \t]+([^ \t]+).*$', re.IGNORECASE) |
| 508 | with open(mag_path) as ifile: |
| 509 | spitext = ifile.read() |
| 510 | |
| 511 | dutlines = spitext.replace('\n+', ' ').splitlines() |
| 512 | for line in dutlines: |
| 513 | lmatch = subrex.match(line) |
| 514 | if lmatch: |
| 515 | subname = lmatch.group(1) |
| 516 | if subname.lower() == ipname.lower(): |
| 517 | is_subckt = True |
| 518 | break |
| 519 | |
| 520 | if is_subckt: |
| 521 | layout_arg = mag_path + ' ' + ipname |
| 522 | else: |
| 523 | layout_arg = mag_path |
| 524 | |
| 525 | # Get PDK name for finding the netgen setup file |
| 526 | if os.path.exists(root_path + '/.ef-config'): |
| 527 | pdkdir = os.path.realpath(root_path + '/.ef-config/techdir') |
| 528 | else: |
| 529 | foundry = dsheet['foundry'] |
| 530 | pdkdir = '/ef/tech/' + foundry + '/' + node |
| 531 | lvs_setup = pdkdir + '/libs.tech/netgen/' + node + '_setup.tcl' |
| 532 | |
| 533 | # Run LVS as a subprocess and wait for it to finish. Use the -json |
| 534 | # switch to get a file that is easy to parse. |
| 535 | |
| 536 | print('cace_launch.py: running /ef/apps/bin/netgen -batch lvs ') |
| 537 | print(layout_arg + ' ' + schem_path + ' ' + ipname + ' ' + lvs_setup + ' comp.out -json -blackbox') |
| 538 | |
| 539 | lvsproc = subprocess.run(['/ef/apps/bin/netgen', '-batch', 'lvs', |
| 540 | layout_arg, schem_path + ' ' + ipname, |
| 541 | lvs_setup, 'comp.out', '-json', '-blackbox'], cwd=layout_path, |
| 542 | stdout=subprocess.PIPE, stderr=subprocess.PIPE, bufsize=0) |
| 543 | |
| 544 | with open(layout_path + '/comp.json', 'r') as cfile: |
| 545 | lvsdata = json.load(cfile) |
| 546 | |
| 547 | # Count errors in the JSON file |
| 548 | failures = 0 |
| 549 | ncells = len(lvsdata) |
| 550 | for c in range(0, ncells): |
| 551 | cellrec = lvsdata[c] |
| 552 | if c == ncells - 1: |
| 553 | topcell = True |
| 554 | else: |
| 555 | topcell = False |
| 556 | |
| 557 | # Most errors must only be counted for the top cell, because individual |
| 558 | # failing cells are flattened and the matching attempted again on the |
| 559 | # flattened netlist. |
| 560 | |
| 561 | if topcell: |
| 562 | if 'devices' in cellrec: |
| 563 | devices = cellrec['devices'] |
| 564 | devlist = [val for pair in zip(devices[0], devices[1]) for val in pair] |
| 565 | devpair = list(devlist[p:p + 2] for p in range(0, len(devlist), 2)) |
| 566 | for dev in devpair: |
| 567 | c1dev = dev[0] |
| 568 | c2dev = dev[1] |
| 569 | diffdevs = abs(c1dev[1] - c2dev[1]) |
| 570 | failures += diffdevs |
| 571 | |
| 572 | if 'nets' in cellrec: |
| 573 | nets = cellrec['nets'] |
| 574 | diffnets = abs(nets[0] - nets[1]) |
| 575 | failures += diffnets |
| 576 | |
| 577 | if 'badnets' in cellrec: |
| 578 | badnets = cellrec['badnets'] |
| 579 | failures += len(badnets) |
| 580 | |
| 581 | if 'badelements' in cellrec: |
| 582 | badelements = cellrec['badelements'] |
| 583 | failures += len(badelements) |
| 584 | |
| 585 | if 'pins' in cellrec: |
| 586 | pins = cellrec['pins'] |
| 587 | pinlist = [val for pair in zip(pins[0], pins[1]) for val in pair] |
| 588 | pinpair = list(pinlist[p:p + 2] for p in range(0, len(pinlist), 2)) |
| 589 | for pin in pinpair: |
| 590 | if pin[0].lower() != pin[1].lower(): |
| 591 | failures += 1 |
| 592 | |
| 593 | # Property errors must be counted for every cell |
| 594 | if 'properties' in cellrec: |
| 595 | properties = cellrec['properties'] |
| 596 | failures += len(properties) |
| 597 | |
| 598 | return failures |
| 599 | |
| 600 | def apply_measure(varresult, measure, variables): |
| 601 | # Apply a measurement (record "measure") using vectors found in |
| 602 | # "varresult" and produce new vectors which overwrite the original |
| 603 | # ones. Operations may reduce "varresult" vectors to a single value. |
| 604 | |
| 605 | # 'condition' defaults to TIME; but this only applies to transient analysis data! |
| 606 | if 'condition' in measure: |
| 607 | condition = measure['condition'] |
| 608 | if condition == 'RESULT': |
| 609 | # 'RESULT' can either be a specified name (not recommended), or else it is |
| 610 | # taken to be the variable set as the result variable. |
| 611 | try: |
| 612 | activevar = next(item for item in variables if item['condition'] == condition) |
| 613 | except StopIteration: |
| 614 | try: |
| 615 | activevar = next(item for item in variables if 'result' in item) |
| 616 | except StopIteration: |
| 617 | print('Error: measurement condition ' + condition + ' does not exist!') |
| 618 | return 0 |
| 619 | else: |
| 620 | condition = activevar['condition'] |
| 621 | |
| 622 | else: |
| 623 | condition = 'TIME' |
| 624 | |
| 625 | # Convert old-style separate condition, pin to new style combined |
| 626 | if 'pin' in measure: |
| 627 | if ':' not in measure['condition']: |
| 628 | measure['condition'] += ':' + measure['pin'] |
| 629 | measure.pop('pin', 0) |
| 630 | |
| 631 | try: |
| 632 | activevar = next(item for item in variables if item['condition'] == condition) |
| 633 | except: |
| 634 | activeunit = '' |
| 635 | else: |
| 636 | if 'unit' in activevar: |
| 637 | activeunit = activevar['unit'] |
| 638 | else: |
| 639 | activeunit = '' |
| 640 | |
| 641 | try: |
| 642 | activetrace = varresult[condition] |
| 643 | except KeyError: |
| 644 | print("Measurement error: Condition " + condition + " does not exist in results.") |
| 645 | # No active trace; cannot continue. |
| 646 | return |
| 647 | |
| 648 | rsize = len(activetrace) |
| 649 | |
| 650 | if 'TIME' in varresult: |
| 651 | timevector = varresult['TIME'] |
| 652 | try: |
| 653 | timevar = next(item for item in variables if item['condition'] == 'TIME') |
| 654 | except: |
| 655 | timeunit = 's' |
| 656 | else: |
| 657 | if 'unit' in timevar: |
| 658 | timeunit = timevar['unit'] |
| 659 | else: |
| 660 | timeunit = 's' |
| 661 | else: |
| 662 | timevector = [] |
| 663 | timeunit = '' |
| 664 | |
| 665 | calctype = measure['calc'] |
| 666 | # Diagnostic |
| 667 | # print("Measure calctype = " + calctype) |
| 668 | |
| 669 | if calctype == 'RESULT': |
| 670 | # Change the 'result' marker to the indicated condition. |
| 671 | for var in variables: |
| 672 | if 'result' in var: |
| 673 | var.pop('result') |
| 674 | |
| 675 | activevar['result'] = True |
| 676 | |
| 677 | elif calctype == 'REMOVE': |
| 678 | # Remove the indicated condition vector. |
| 679 | varresult.pop(condition) |
| 680 | |
| 681 | elif calctype == 'REBASE': |
| 682 | # Rebase specified vector (subtract minimum value from all components) |
| 683 | base = min(activetrace) |
| 684 | varresult[condition] = [i - base for i in activetrace] |
| 685 | |
| 686 | elif calctype == 'ABS': |
| 687 | # Take absolute value of activetrace. |
| 688 | varresult[condition] = [abs(i) for i in activetrace] |
| 689 | |
| 690 | elif calctype == 'NEGATE': |
| 691 | # Negate the specified vector |
| 692 | varresult[condition] = [-i for i in activetrace] |
| 693 | |
| 694 | elif calctype == 'ADD': |
| 695 | if 'value' in measure: |
| 696 | v = float(measure['value']) |
| 697 | varresult[condition] = [i + v for i in activetrace] |
| 698 | else: |
| 699 | # Add the specified vector to the result and replace the result |
| 700 | varresult[condition] = [i + j for i, j in zip(activetrace, paramresult)] |
| 701 | |
| 702 | elif calctype == 'SUBTRACT': |
| 703 | if 'value' in measure: |
| 704 | v = float(measure['value']) |
| 705 | varresult[condition] = [i - v for i in activetrace] |
| 706 | else: |
| 707 | # Subtract the specified vector from the result |
| 708 | varresult[condition] = [j - i for i, j in zip(activetrace, paramresult)] |
| 709 | |
| 710 | elif calctype == 'MULTIPLY': |
| 711 | if 'value' in measure: |
| 712 | v = float(measure['value']) |
| 713 | varresult[condition] = [i * v for i in activetrace] |
| 714 | else: |
| 715 | # Multiply the specified vector by the result (e.g., to get power) |
| 716 | varresult[condition] = [j * i for i, j in zip(activetrace, paramresult)] |
| 717 | |
| 718 | elif calctype == 'CLIP': |
| 719 | if timevector == []: |
| 720 | return |
| 721 | # Clip specified vector to the indicated times |
| 722 | if 'from' in measure: |
| 723 | fromtime = float(spice_unit_convert([timeunit, measure['from'], 'time'])) |
| 724 | else: |
| 725 | fromtime = timevector[0] |
| 726 | if 'to' in measure: |
| 727 | totime = float(spice_unit_convert([timeunit, measure['to'], 'time'])) |
| 728 | else: |
| 729 | totime = timevector[-1] |
| 730 | |
| 731 | try: |
| 732 | fromidx = next(i for i, j in enumerate(timevector) if j >= fromtime) |
| 733 | except StopIteration: |
| 734 | fromidx = len(timevector) - 1 |
| 735 | try: |
| 736 | toidx = next(i for i, j in enumerate(timevector) if j >= totime) |
| 737 | toidx += 1 |
| 738 | except StopIteration: |
| 739 | toidx = len(timevector) |
| 740 | |
| 741 | for key in varresult: |
| 742 | vector = varresult[key] |
| 743 | varresult[key] = vector[fromidx:toidx] |
| 744 | |
| 745 | rsize = toidx - fromidx |
| 746 | |
| 747 | elif calctype == 'MEAN': |
| 748 | if timevector == []: |
| 749 | return |
| 750 | |
| 751 | # Get the mean value of all traces in the indicated range. Results are |
| 752 | # collapsed to the single mean value. |
| 753 | if 'from' in measure: |
| 754 | fromtime = float(spice_unit_convert([timeunit, measure['from'], 'time'])) |
| 755 | else: |
| 756 | fromtime = timevector[0] |
| 757 | if 'to' in measure: |
| 758 | totime = float(spice_unit_convert([timeunit, measure['to'], 'time'])) |
| 759 | else: |
| 760 | totime = timevector[-1] |
| 761 | |
| 762 | try: |
| 763 | fromidx = next(i for i, j in enumerate(timevector) if j >= fromtime) |
| 764 | except StopIteration: |
| 765 | fromidx = len(timevector) - 1 |
| 766 | try: |
| 767 | toidx = next(i for i, j in enumerate(timevector) if j >= totime) |
| 768 | toidx += 1 |
| 769 | except StopIteration: |
| 770 | toidx = len(timevector) |
| 771 | |
| 772 | # Correct time average requires weighting according to the size of the |
| 773 | # time slice. |
| 774 | tsum = timevector[toidx - 1] - timevector[fromidx] |
| 775 | |
| 776 | for key in varresult: |
| 777 | vector = varresult[key] |
| 778 | try: |
| 779 | # Test if condition is a numeric value |
| 780 | varresult[key] = vector[fromidx] + 1 |
| 781 | except TypeError: |
| 782 | # Some conditions like 'corner' cannot be averaged, so just take the |
| 783 | # first entry (may want to consider different handling) |
| 784 | varresult[key] = [vector[fromidx]] |
| 785 | else: |
| 786 | vtot = 0.0 |
| 787 | for i in range(fromidx + 1, toidx): |
| 788 | # Note: This expression can and should be optimized! |
| 789 | vtot += ((vector[i] + vector[i - 1]) / 2) * (timevector[i] - timevector[i - 1]) |
| 790 | varresult[key] = [vtot / tsum] |
| 791 | |
| 792 | rsize = 1 |
| 793 | |
| 794 | elif calctype == 'RISINGEDGE': |
| 795 | if timevector == []: |
| 796 | return |
| 797 | |
| 798 | # RISINGEDGE finds the time of a signal rising edge. |
| 799 | # parameters used are: |
| 800 | # 'from': start time of search (default zero) |
| 801 | # 'to': end time of search (default end) |
| 802 | # 'number': edge number (default first edge, or zero) (to be done) |
| 803 | # 'cross': measure time when signal crosses this value |
| 804 | # 'keep': determines what part of the vectors to keep |
| 805 | if 'from' in measure: |
| 806 | fromtime = float(spice_unit_convert([timeunit, measure['from'], 'time'])) |
| 807 | else: |
| 808 | fromtime = timevector[0] |
| 809 | if 'to' in measure: |
| 810 | totime = float(spice_unit_convert([timeunit, measure['to'], 'time'])) |
| 811 | else: |
| 812 | totime = timevector[-1] |
| 813 | if 'cross' in measure: |
| 814 | crossval = float(measure['cross']) |
| 815 | else: |
| 816 | crossval = (max(activetrace) + min(activetrace)) / 2; |
| 817 | try: |
| 818 | fromidx = next(i for i, j in enumerate(timevector) if j >= fromtime) |
| 819 | except StopIteration: |
| 820 | fromidx = len(timevector) - 1 |
| 821 | try: |
| 822 | toidx = next(i for i, j in enumerate(timevector) if j >= totime) |
| 823 | toidx += 1 |
| 824 | except StopIteration: |
| 825 | toidx = len(timevector) |
| 826 | try: |
| 827 | startidx = next(i for i, j in enumerate(activetrace[fromidx:toidx]) if j < crossval) |
| 828 | except StopIteration: |
| 829 | startidx = 0 |
| 830 | startidx += fromidx |
| 831 | try: |
| 832 | riseidx = next(i for i, j in enumerate(activetrace[startidx:toidx]) if j >= crossval) |
| 833 | except StopIteration: |
| 834 | riseidx = toidx - startidx - 1 |
| 835 | riseidx += startidx |
| 836 | |
| 837 | # If not specified, 'keep' defaults to 'INSTANT'. |
| 838 | if 'keep' in measure: |
| 839 | keeptype = measure['keep'] |
| 840 | if keeptype == 'BEFORE': |
| 841 | istart = 0 |
| 842 | istop = riseidx |
| 843 | elif keeptype == 'AFTER': |
| 844 | istart = riseidx |
| 845 | istop = len(timevector) |
| 846 | else: |
| 847 | istart = riseidx |
| 848 | istop = riseidx + 1 |
| 849 | else: |
| 850 | istart = riseidx |
| 851 | istop = riseidx + 1 |
| 852 | |
| 853 | for key in varresult: |
| 854 | vector = varresult[key] |
| 855 | varresult[key] = vector[istart:istop] |
| 856 | |
| 857 | rsize = istop - istart |
| 858 | |
| 859 | elif calctype == 'FALLINGEDGE': |
| 860 | if timevector == []: |
| 861 | return |
| 862 | |
| 863 | # FALLINGEDGE finds the time of a signal rising edge. |
| 864 | # parameters used are: |
| 865 | # 'from': start time of search (default zero) |
| 866 | # 'to': end time of search (default end) |
| 867 | # 'number': edge number (default first edge, or zero) (to be done) |
| 868 | # 'cross': measure time when signal crosses this value |
| 869 | # 'keep': determines what part of the vectors to keep |
| 870 | if 'from' in measure: |
| 871 | fromtime = float(spice_unit_convert([timeunit, measure['from'], 'time'])) |
| 872 | else: |
| 873 | fromtime = timevector[0] |
| 874 | if 'to' in measure: |
| 875 | totime = float(spice_unit_convert([timeunit, measure['to'], 'time'])) |
| 876 | else: |
| 877 | totime = timevector[-1] |
| 878 | if 'cross' in measure: |
| 879 | crossval = measure['cross'] |
| 880 | else: |
| 881 | crossval = (max(activetrace) + min(activetrace)) / 2; |
| 882 | try: |
| 883 | fromidx = next(i for i, j in enumerate(timevector) if j >= fromtime) |
| 884 | except StopIteration: |
| 885 | fromidx = len(timevector) - 1 |
| 886 | try: |
| 887 | toidx = next(i for i, j in enumerate(timevector) if j >= totime) |
| 888 | toidx += 1 |
| 889 | except StopIteration: |
| 890 | toidx = len(timevector) |
| 891 | try: |
| 892 | startidx = next(i for i, j in enumerate(activetrace[fromidx:toidx]) if j > crossval) |
| 893 | except StopIteration: |
| 894 | startidx = 0 |
| 895 | startidx += fromidx |
| 896 | try: |
| 897 | fallidx = next(i for i, j in enumerate(activetrace[startidx:toidx]) if j <= crossval) |
| 898 | except StopIteration: |
| 899 | fallidx = toidx - startidx - 1 |
| 900 | fallidx += startidx |
| 901 | |
| 902 | # If not specified, 'keep' defaults to 'INSTANT'. |
| 903 | if 'keep' in measure: |
| 904 | keeptype = measure['keep'] |
| 905 | if keeptype == 'BEFORE': |
| 906 | istart = 0 |
| 907 | istop = fallidx |
| 908 | elif keeptype == 'AFTER': |
| 909 | istart = fallidx |
| 910 | istop = len(timevector) |
| 911 | else: |
| 912 | istart = fallidx |
| 913 | istop = fallidx + 1 |
| 914 | else: |
| 915 | istart = fallidx |
| 916 | istop = fallidx + 1 |
| 917 | |
| 918 | for key in varresult: |
| 919 | vector = varresult[key] |
| 920 | varresult[key] = vector[istart:istop] |
| 921 | |
| 922 | rsize = istop - istart |
| 923 | |
| 924 | elif calctype == 'STABLETIME': |
| 925 | if timevector == []: |
| 926 | return |
| 927 | |
| 928 | # STABLETIME finds the time at which the signal stabilizes |
| 929 | # parameters used are: |
| 930 | # 'from': start time of search (default zero) |
| 931 | # 'to': end time of search (works backwards from here) (default end) |
| 932 | # 'slope': measure time when signal rate of change equals this slope |
| 933 | # 'keep': determines what part of the vectors to keep |
| 934 | if 'from' in measure: |
| 935 | fromtime = float(spice_unit_convert([timeunit, measure['from'], 'time'])) |
| 936 | else: |
| 937 | fromtime = timevector[0] |
| 938 | if 'to' in measure: |
| 939 | totime = float(spice_unit_convert([timeunit, measure['to'], 'time'])) |
| 940 | else: |
| 941 | totime = timevector[-1] |
| 942 | if 'limit' in measure: |
| 943 | limit = float(measure['limit']) |
| 944 | else: |
| 945 | # Default is 5% higher or lower than final value |
| 946 | limit = 0.05 |
| 947 | try: |
| 948 | fromidx = next(i for i, j in enumerate(timevector) if j >= fromtime) |
| 949 | except StopIteration: |
| 950 | fromidx = len(timevector) - 1 |
| 951 | try: |
| 952 | toidx = next(i for i, j in enumerate(timevector) if j >= totime) |
| 953 | except StopIteration: |
| 954 | toidx = len(timevector) - 1 |
| 955 | finalval = activetrace[toidx] |
| 956 | toidx += 1 |
| 957 | highval = finalval * (1.0 + limit) |
| 958 | lowval = finalval * (1.0 - limit) |
| 959 | try: |
| 960 | breakidx = next(i for i, j in reversed(list(enumerate(activetrace[fromidx:toidx]))) if j >= highval or j <= lowval) |
| 961 | except StopIteration: |
| 962 | breakidx = 0 |
| 963 | breakidx += fromidx |
| 964 | |
| 965 | # If not specified, 'keep' defaults to 'INSTANT'. |
| 966 | if 'keep' in measure: |
| 967 | keeptype = measure['keep'] |
| 968 | if keeptype == 'BEFORE': |
| 969 | istart = 0 |
| 970 | istop = breakidx |
| 971 | elif keeptype == 'AFTER': |
| 972 | istart = breakidx |
| 973 | istop = len(timevector) |
| 974 | else: |
| 975 | istart = breakidx |
| 976 | istop = breakidx + 1 |
| 977 | else: |
| 978 | istart = breakidx |
| 979 | istop = breakidx + 1 |
| 980 | |
| 981 | for key in varresult: |
| 982 | vector = varresult[key] |
| 983 | varresult[key] = vector[istart:istop] |
| 984 | |
| 985 | rsize = istop - istart |
| 986 | |
| 987 | elif calctype == 'INSIDE': |
| 988 | if timevector == []: |
| 989 | return |
| 990 | |
| 991 | # INSIDE retains only values which are inside the indicated limits |
| 992 | # 'min': minimum value limit to keep results |
| 993 | # 'max': maximum value limit to keep results |
| 994 | if 'from' in measure: |
| 995 | fromtime = float(spice_unit_convert([timeunit, measure['from'], 'time'])) |
| 996 | else: |
| 997 | fromtime = timevector[0] |
| 998 | if 'to' in measure: |
| 999 | totime = float(spice_unit_convert([timeunit, measure['to'], 'time'])) |
| 1000 | else: |
| 1001 | totime = timevector[-1] |
| 1002 | if 'min' in measure: |
| 1003 | minval = float(spice_unit_convert([activeunit, measure['min']])) |
| 1004 | else: |
| 1005 | minval = min(activetrace) |
| 1006 | if 'max' in measure: |
| 1007 | maxval = float(spice_unit_convert([activeunit, measure['max']])) |
| 1008 | else: |
| 1009 | maxval = max(activetrace) |
| 1010 | |
| 1011 | try: |
| 1012 | fromidx = next(i for i, j in enumerate(timevector) if j >= fromtime) |
| 1013 | except StopIteration: |
| 1014 | fromidx = len(timevector) - 1 |
| 1015 | try: |
| 1016 | toidx = next(i for i, j in enumerate(timevector) if j >= totime) |
| 1017 | toidx += 1 |
| 1018 | except StopIteration: |
| 1019 | toidx = len(timevector) |
| 1020 | goodidx = list(i for i, j in enumerate(activetrace[fromidx:toidx]) if j >= minval and j <= maxval) |
| 1021 | # Diagnostic |
| 1022 | if goodidx == []: |
| 1023 | print('All vector components failed bounds test. max = ' + str(max(activetrace[fromidx:toidx])) + '; min = ' + str(min(activetrace[fromidx:toidx]))) |
| 1024 | |
| 1025 | goodidx = [i + fromidx for i in goodidx] |
| 1026 | for key in varresult: |
| 1027 | vector = varresult[key] |
| 1028 | varresult[key] = [vector[i] for i in goodidx] |
| 1029 | |
| 1030 | rsize = len(goodidx) |
| 1031 | |
| 1032 | return rsize |
| 1033 | |
| 1034 | def read_ascii_datafile(file, *args): |
| 1035 | # Read a file of data produced by the 'wrdata' command in ngspice |
| 1036 | # (simple ASCII data in columnar format) |
| 1037 | # No unit conversions occur at this time. |
| 1038 | # |
| 1039 | # Arguments always include the analysis variable vector. If additional |
| 1040 | # arguments are present in "args", they are value vectors representing |
| 1041 | # additional columns in the data file, and should be treated similarly |
| 1042 | # to the analysis variable. Note, however, that the wrdata format |
| 1043 | # redundantly puts the analysis variable in every other column. |
| 1044 | |
| 1045 | if not args: |
| 1046 | print('Error: testbench does not specify contents of data file!') |
| 1047 | return |
| 1048 | |
| 1049 | dmatrix = [] |
| 1050 | filepath = simfiles_path + '/' + file |
| 1051 | if not os.path.isfile(filepath): |
| 1052 | # Handle ngspice's stupid handling of file extensions for the argument |
| 1053 | # passed to the 'wrdata' command, which sometimes adds the .data |
| 1054 | # extension and sometimes doesn't, regardless of whether the argument |
| 1055 | # has an extension or not. Method here is to always include the |
| 1056 | # extension in the argument, then look for possible ".data.data" files. |
| 1057 | if os.path.isfile(filepath + '.data'): |
| 1058 | filepath = filepath + '.data' |
| 1059 | else: |
| 1060 | return 0 |
| 1061 | |
| 1062 | with open(filepath, 'r') as afile: |
| 1063 | for line in afile.readlines(): |
| 1064 | ldata = line.split() |
| 1065 | if ldata: |
| 1066 | # Note: dependent variable (e.g., TIME) is repeated |
| 1067 | # every other column, so record this only once, then |
| 1068 | # read the remainder while skipping every other column. |
| 1069 | dvec = [] |
| 1070 | dvec.append(float(ldata[0])) |
| 1071 | dvec.extend(list(map(float, ldata[1::2]))) |
| 1072 | dmatrix.append(dvec) |
| 1073 | |
| 1074 | # Transpose dmatrix |
| 1075 | try: |
| 1076 | dmatrix = list(map(list, zip(*dmatrix))) |
| 1077 | except TypeError: |
| 1078 | print("last line data are " + str(ldata)) |
| 1079 | print("dmatrix is " + str(dmatrix)) |
| 1080 | |
| 1081 | for dvalues, dvec in zip(dmatrix, args): |
| 1082 | dvec.extend(dvalues) |
| 1083 | |
| 1084 | try: |
| 1085 | rval = len(ldata[0]) |
| 1086 | except TypeError: |
| 1087 | rval = 1 |
| 1088 | return rval |
| 1089 | |
| 1090 | if __name__ == '__main__': |
| 1091 | |
| 1092 | # Exit in response to terminate signal by terminating ngspice processes |
| 1093 | faulthandler.register(signal.SIGUSR2) |
| 1094 | signal.signal(signal.SIGINT, cleanup_exit) |
| 1095 | signal.signal(signal.SIGTERM, cleanup_exit) |
| 1096 | options = [] |
| 1097 | arguments = [] |
| 1098 | for item in sys.argv[1:]: |
| 1099 | if item.find('-', 0) == 0: |
| 1100 | options.append(item) |
| 1101 | else: |
| 1102 | arguments.append(item) |
| 1103 | |
| 1104 | # track the circuit score (for simulation; layout handled separately) |
| 1105 | # (initial score may be overridden by passing -score=value to cace_launch.py) |
| 1106 | score = 0.0 |
| 1107 | |
| 1108 | # read the JSON file |
| 1109 | keepmode = False |
| 1110 | plotmode = False |
| 1111 | postmode = True |
| 1112 | if len(arguments) > 0: |
| 1113 | inputfile = arguments[0] |
| 1114 | else: |
| 1115 | raise SyntaxError('Usage: ' + sys.argv[0] + ' json_file [-options]\n') |
| 1116 | |
| 1117 | if os.path.splitext(inputfile)[1] != '.json': |
| 1118 | raise SyntaxError('Usage: ' + sys.argv[0] + ' json_file [-options]\n') |
| 1119 | |
| 1120 | for item in options: |
| 1121 | result = item.split('=') |
| 1122 | if result[0] == '-keep': |
| 1123 | keepmode = True |
| 1124 | elif result[0] == '-plot': |
| 1125 | plotmode = True |
| 1126 | elif result[0] == '-nosim': |
| 1127 | # Diagnostic |
| 1128 | print('No simulations specified. . . cace_launch exiting.\n') |
| 1129 | sys.exit(0) |
| 1130 | elif result[0] == '-nopost': |
| 1131 | postmode = False |
| 1132 | keepmode = True |
| 1133 | elif result[0] == '-simdir': |
| 1134 | simfiles_path = result[1] |
| 1135 | elif result[0] == '-layoutdir': |
| 1136 | layout_path = result[1] |
| 1137 | elif result[0] == '-netlistdir': |
| 1138 | netlist_path = result[1] |
| 1139 | elif result[0] == '-rootdir': |
| 1140 | root_path = result[1] |
| 1141 | elif result[0] == '-local': |
| 1142 | localmode = True |
| 1143 | bypassmode = False |
| 1144 | postmode = False |
| 1145 | keepmode = False |
| 1146 | elif result[0] == '-bypass': |
| 1147 | bypassmode = True |
| 1148 | localmode = True |
| 1149 | postmode = True |
| 1150 | keepmode = False |
| 1151 | elif result[0] == '-score': |
| 1152 | score = result[1] |
| 1153 | else: |
| 1154 | raise SyntaxError('Bad option ' + item + ', options are -keep, -nosim, -nopost, -local, and -simdir=\n') |
| 1155 | |
| 1156 | # Various information could be obtained from the input JSON file |
| 1157 | # name, but it will be assumed that all information should be |
| 1158 | # obtained from the contents of the JSON file itself. |
| 1159 | |
| 1160 | with open(inputfile) as ifile: |
| 1161 | datatop = json.load(ifile) |
| 1162 | |
| 1163 | # Option passing through the JSON: use "nopost" or "keep" defined at the top level. |
| 1164 | if 'nopost' in datatop: |
| 1165 | postmode = False |
| 1166 | datatop.pop('nopost') |
| 1167 | if 'keep' in datatop: |
| 1168 | keepmode = True |
| 1169 | datatop.pop('keep') |
| 1170 | if 'local' in datatop: |
| 1171 | localmode = True |
| 1172 | datatop.pop('local') |
| 1173 | |
| 1174 | if 'request-hash' in datatop: |
| 1175 | hashname = datatop['request-hash'] |
| 1176 | else: |
| 1177 | print("Document JSON missing request-hash.") |
| 1178 | sys.exit(1) |
| 1179 | |
| 1180 | # Simfiles should be put in path specified by -simdir, or else |
| 1181 | # put them in the working directory. Normally "-simdir" will |
| 1182 | # be given on the command line. |
| 1183 | |
| 1184 | if not simfiles_path: |
| 1185 | if root_path: |
| 1186 | simfiles_path = root_path + '/' + hashname |
| 1187 | else: |
emayecs | b2487ae | 2021-08-05 10:30:13 -0400 | [diff] [blame] | 1188 | simfiles_path = config.simulation_path + '/' + hashname |
emayecs | 5966a53 | 2021-07-29 10:07:02 -0400 | [diff] [blame] | 1189 | |
| 1190 | if not os.path.isdir(simfiles_path): |
| 1191 | print('Error: Simulation folder ' + simfiles_path + ' does not exist.') |
| 1192 | sys.exit(1) |
| 1193 | |
| 1194 | if not layout_path: |
| 1195 | if root_path: |
| 1196 | layout_path = root_path + '/mag' |
| 1197 | |
| 1198 | if not netlist_path: |
| 1199 | if root_path: |
| 1200 | netlist_path = root_path + '/spi' |
| 1201 | |
| 1202 | # Change location to the simulation directory |
| 1203 | os.chdir(simfiles_path) |
| 1204 | |
| 1205 | # pull out the relevant part of the JSON file, which is "data-sheet" |
| 1206 | dsheet = datatop['data-sheet'] |
| 1207 | |
| 1208 | # Prepare a dictionary for the status and pass critical values from datatop. |
| 1209 | try: |
| 1210 | statdoc['UID'] = datatop['UID'] |
| 1211 | statdoc['request-hash'] = datatop['request-hash'] |
| 1212 | if 'project-folder' in datatop: |
| 1213 | statdoc['project'] = datatop['project-folder'] |
| 1214 | else: |
| 1215 | statdoc['project'] = dsheet['ip-name'] |
| 1216 | status = {} |
| 1217 | status['message'] = 'initializing' |
| 1218 | status['completed'] = '0' |
| 1219 | status['total'] = 'unknown' |
| 1220 | status['hash'] = datatop['request-hash'] |
| 1221 | statdoc['status'] = status |
| 1222 | if not localmode: |
| 1223 | send_status(statdoc) |
| 1224 | except KeyError: |
| 1225 | if not localmode: |
| 1226 | print("Failed to generate status record.") |
| 1227 | else: |
| 1228 | pass |
| 1229 | |
| 1230 | # find the eparamlist. If it exists, then the template has been |
| 1231 | # loaded. If not, find the template name, then load it from known |
| 1232 | # templates. |
| 1233 | |
| 1234 | if 'electrical-params' in dsheet: |
| 1235 | eparamlist = dsheet['electrical-params'] |
| 1236 | else: |
| 1237 | eparamlist = [] |
| 1238 | if 'physical-params' in dsheet: |
| 1239 | pparamlist = dsheet['physical-params'] |
| 1240 | else: |
| 1241 | pparamlist = [] |
| 1242 | |
| 1243 | if eparamlist == [] and pparamlist == []: |
| 1244 | print('Circuit JSON file does not have a characterization template!') |
| 1245 | sys.exit(0) |
| 1246 | |
| 1247 | simulations = 0 |
| 1248 | has_aux_files = False |
| 1249 | |
| 1250 | # Diagnostic: find and print the number of files to be simulated |
| 1251 | # Names are methodname, pinname, and simulation number. |
| 1252 | totalsims = 0 |
| 1253 | filessimmed = [] |
| 1254 | for param in eparamlist: |
| 1255 | if 'testbenches' in param: |
| 1256 | totalsims += len(param['testbenches']) |
| 1257 | print('Total files to simulate: ' + str(totalsims)) |
| 1258 | |
| 1259 | # Status |
| 1260 | if statdoc and not localmode: |
| 1261 | status['message'] = 'starting' |
| 1262 | status['completed'] = '0' |
| 1263 | status['total'] = str(totalsims) |
| 1264 | send_status(statdoc) |
| 1265 | |
| 1266 | for param in eparamlist: |
| 1267 | # Process only entries in JSON that have 'testbenches' record |
| 1268 | if 'testbenches' not in param: |
| 1269 | continue |
| 1270 | |
| 1271 | # Information needed to construct the filenames |
| 1272 | simtype = param['method'] |
| 1273 | |
| 1274 | # For methods with ":", the filename is the part before the colon. |
| 1275 | methodname = simtype.split(":") |
| 1276 | if len(methodname) > 1: |
| 1277 | testbench = methodname[0] |
| 1278 | submethod = ":" + methodname[1] |
| 1279 | else: |
| 1280 | testbench = simtype |
| 1281 | submethod = "" |
| 1282 | |
| 1283 | # Simple outputs are followed by a single value |
| 1284 | outrex = re.compile("[ \t]*\"?([^ \t\"]+)\"?(.*)$", re.IGNORECASE) |
| 1285 | # conditions always follow as key=value pairs |
| 1286 | dictrex = re.compile("[ \t]*([^ \t=]+)=([^ \t=]+)(.*)$", re.IGNORECASE) |
| 1287 | # conditions specified as min:step:max match a result vector. |
| 1288 | steprex = re.compile("[ \t]*([^:]+):([^:]+):([^:]+)$", re.IGNORECASE) |
| 1289 | # specification of units as a binary, hex, etc., string in verilog format |
| 1290 | binrex = re.compile(r'([0-9]*)\'([bodh])', re.IGNORECASE) |
| 1291 | |
| 1292 | paramresult = [] # List of results |
| 1293 | paramname = 'RESULT' # Name of the result parameter (default 'RESULT') |
| 1294 | condresult = {} # Dictionary of condition names and values for each result |
| 1295 | simfailures = 0 # Track simulations that don't generate results |
| 1296 | |
| 1297 | # Run ngspice on each prepared simulation file |
| 1298 | # FYI; ngspice generates output directly to the TTY, bypassing stdout |
| 1299 | # and stdin, so that it can update the simulation time at the bottom |
| 1300 | # of the screen without scrolling. Subvert this in ngspice, if possible. |
| 1301 | # It is a bad practice of ngspice to output to the TTY in batch mode. . . |
| 1302 | |
| 1303 | testbenches = param['testbenches'] |
| 1304 | print('Files to simulate method ' + testbenches[0]['prefix'] + ': ' + str(len(testbenches))) |
| 1305 | |
| 1306 | for testbench in testbenches: |
| 1307 | filename = testbench['filename'] |
| 1308 | filessimmed.append(filename) |
| 1309 | fileprefix = testbench['prefix'] |
| 1310 | # All output lines start with prefix |
| 1311 | outrexall = re.compile(fileprefix + submethod + "[ \t]+=?[ \t]*(.+)$", re.IGNORECASE) |
| 1312 | # "measure" statements act on results of individual simulations, |
| 1313 | # so keep the results separate until after measurements have been made |
| 1314 | locparamresult = [] |
| 1315 | loccondresult = {} |
| 1316 | locvarresult = {} |
| 1317 | |
| 1318 | # Cosimulation: If there is a '.tv' file in the simulation directory |
| 1319 | # with the same root name as the netlist file, then run iverilog and |
| 1320 | # vvp. vvp will call ngspice from the verilog. |
| 1321 | verilog = os.path.splitext(filename)[0] + '.tv' |
| 1322 | my_env = os.environ.copy() |
| 1323 | if os.path.exists(verilog): |
| 1324 | cosim = True |
| 1325 | simulator = '/ef/apps/bin/vvp' |
| 1326 | simargs = ['-M.', '-md_hdl_vpi'] |
| 1327 | filename = verilog + 'o' |
| 1328 | # Copy the d_hdl object file into the simulation directory |
| 1329 | shutil.copy('/ef/efabless/lib/iverilog/d_hdl_vpi.vpi', simfiles_path) |
| 1330 | # Generate the output executable (.tvo) file for vvp. |
| 1331 | subprocess.call(['/ef/apps/bin/iverilog', '-o' + filename, verilog]) |
| 1332 | # Specific version of ngspice must be used for cosimulation |
| 1333 | # (Deprecated; default version of ngspice now supports cosimulation) |
| 1334 | # my_env['NGSPICE_VERSION'] = 'cosim1' |
| 1335 | |
| 1336 | # There must not be a file 'simulator_pipe' in the directory or vvp will fail. |
| 1337 | if os.path.exists('simulator_pipe'): |
| 1338 | os.remove('simulator_pipe') |
| 1339 | else: |
| 1340 | cosim = False |
| 1341 | simulator = '/ef/apps/bin/ngspice' |
| 1342 | simargs = ['-b'] |
| 1343 | # Do not generate LXT files, as CACE does not have any methods to handle |
| 1344 | # the data in them anyway. |
| 1345 | my_env['NGSPICE_LXT2NO'] = '1' |
| 1346 | |
| 1347 | # ngspice writes to both stdout and stderr; capture all |
| 1348 | # output equally. Print each line in real-time, flush the |
| 1349 | # output buffer, and then accumulate the lines for processing. |
| 1350 | |
| 1351 | # Note: bufsize = 1 and universal_newlines = True sets line-buffered output |
| 1352 | |
| 1353 | print('Running: ' + simulator + ' ' + ' '.join(simargs) + ' ' + filename) |
| 1354 | |
| 1355 | with subprocess.Popen([simulator, *simargs, filename], |
| 1356 | stdout=subprocess.PIPE, |
| 1357 | bufsize=1, universal_newlines=True, env=my_env) as spiceproc: |
| 1358 | for line in spiceproc.stdout: |
| 1359 | print(line, end='') |
| 1360 | sys.stdout.flush() |
| 1361 | |
| 1362 | # Each netlist can have as many results as there are in |
| 1363 | # the "measurements" list for the electrical parameter, |
| 1364 | # grouped according to common testbench netlist file and |
| 1365 | # common set of conditions. |
| 1366 | |
| 1367 | matchline = outrexall.match(line) |
| 1368 | if matchline: |
| 1369 | # Divide result into tokens. Space-separated values in quotes |
| 1370 | # become a result vector; all other entries should be in the |
| 1371 | # form <key>=<value>. Result value becomes "result":[<vector>] |
| 1372 | # dictionary entry. |
| 1373 | rest = matchline.group(1) |
| 1374 | |
| 1375 | # ASCII file format handling: Data are in the indicated |
| 1376 | # file in pairs of analysis variable (e.g., TIME for transients) |
| 1377 | # and 'result'. Note that the analysis variable is |
| 1378 | # always the first and every other column of the data file. |
| 1379 | # The primary result is implicit. All other columns |
| 1380 | # must be explicitly called out on the echo line. |
| 1381 | if '.data' in rest: |
| 1382 | print('Reading data from ASCII file.') |
| 1383 | |
| 1384 | # "variables" are similar to conditions but describe what is |
| 1385 | # being output from ngspice. There should be one entry for |
| 1386 | # each (unique) column in the data file, matching the names |
| 1387 | # given in the testbench file. |
| 1388 | |
| 1389 | if 'variables' in param: |
| 1390 | pvars = param['variables'] |
| 1391 | # Convert any old-style condition, pin |
| 1392 | for var in pvars: |
| 1393 | if 'pin' in var: |
| 1394 | if not ':' in var['condition']: |
| 1395 | var['condition'] += ':' + var['pin'] |
| 1396 | var.pop('pin') |
| 1397 | else: |
| 1398 | pvars = [] |
| 1399 | |
| 1400 | # Parse all additional variables. At least one (the |
| 1401 | # analysis variable) must be specified. |
| 1402 | data_args = [] |
| 1403 | extra = rest.split() |
| 1404 | |
| 1405 | if len(extra) == 1: |
| 1406 | # If the testbench specifies no vectors, then they |
| 1407 | # must all be specified in order in 'variables' in |
| 1408 | # the datasheet entry for the electrical parameters. |
| 1409 | for var in pvars: |
| 1410 | extra.append(var['condition']) |
| 1411 | if not pvars: |
| 1412 | print('Error: No variables specified in testbench or datasheet.') |
| 1413 | rest = '' |
| 1414 | |
| 1415 | if len(extra) > 1: |
| 1416 | for varname in extra[1:]: |
| 1417 | if varname not in locvarresult: |
| 1418 | locvarresult[varname] = [] |
| 1419 | data_args.append(locvarresult[varname]) |
| 1420 | |
| 1421 | rsize = read_ascii_datafile(extra[0], *data_args) |
| 1422 | |
| 1423 | # All values in extra[1:] should be param['variables']. If not, add |
| 1424 | # an entry and flag a warning because information may be incomplete. |
| 1425 | |
| 1426 | for varname in extra[1:]: |
| 1427 | try: |
| 1428 | var = next(item for item in pvars if item['condition'] == varname) |
| 1429 | except StopIteration: |
| 1430 | print('Variable ' + varname + ' not specified; ', end='') |
| 1431 | print('information may be incomplete.') |
| 1432 | var = {} |
| 1433 | var['condition'] = varname |
| 1434 | pvars.append(var) |
| 1435 | |
| 1436 | # By default, the 2nd result is the result |
| 1437 | if len(extra) > 2: |
| 1438 | varname = extra[2] |
| 1439 | varrec = next(item for item in pvars if item['condition'] == varname) |
| 1440 | varrec['result'] = True |
| 1441 | print('Setting condition ' + varname + ' as the result vector.') |
| 1442 | |
| 1443 | # "measure" records are applied to individual simulation outputs, |
| 1444 | # usually to reduce a time-based vector to a single value by |
| 1445 | # measuring a steady-state value, peak-peak, frequency, etc. |
| 1446 | |
| 1447 | if 'measure' in param: |
| 1448 | # Diagnostic |
| 1449 | # print('Applying measurements.') |
| 1450 | |
| 1451 | for measure in param['measure']: |
| 1452 | # Convert any old-style condition, pin |
| 1453 | if 'pin' in measure: |
| 1454 | if not ':' in measure['condition']: |
| 1455 | measure['condition'] += ':' + measure['pin'] |
| 1456 | measure.pop('pin') |
| 1457 | rsize = apply_measure(locvarresult, measure, pvars) |
| 1458 | # Diagnostic |
| 1459 | # print("after measure, rsize = " + str(rsize)) |
| 1460 | # print("locvarresult = " + str(locvarresult)) |
| 1461 | |
| 1462 | # Now recast locvarresult back into loccondresult. |
| 1463 | for varname in locvarresult: |
| 1464 | varrec = next(item for item in pvars if item['condition'] == varname) |
| 1465 | if 'result' in varrec: |
| 1466 | # print('Result for ' + varname + ' = ' + str(locvarresult[varname])) |
| 1467 | locparamresult = locvarresult[varname] |
| 1468 | paramname = varname |
| 1469 | else: |
| 1470 | # print('Condition ' + varname + ' = ' + str(locvarresult[varname])) |
| 1471 | loccondresult[varname] = locvarresult[varname] |
| 1472 | # Diagnostic |
| 1473 | # print("Variable " + varname + " length = " + str(len(locvarresult[varname]))) |
| 1474 | rest = '' |
| 1475 | |
| 1476 | else: |
| 1477 | # For plots, there is not necessarily any measurements. Just |
| 1478 | # copy values into locparamresult and loccondresult. |
| 1479 | for varname in locvarresult: |
| 1480 | varrec = next(item for item in pvars if item['condition'] == varname) |
| 1481 | if 'result' in varrec: |
| 1482 | # print('Result for ' + varname + ' = ' + str(locvarresult[varname])) |
| 1483 | locparamresult = locvarresult[varname] |
| 1484 | rsize = len(locparamresult) |
| 1485 | paramname = varname |
| 1486 | else: |
| 1487 | # print('Condition ' + varname + ' = ' + str(locvarresult[varname])) |
| 1488 | loccondresult[varname] = locvarresult[varname] |
| 1489 | rest = '' |
| 1490 | else: |
| 1491 | rsize = 0 |
| 1492 | |
| 1493 | # To-do: Handle raw files in similar manner to ASCII files. |
| 1494 | |
| 1495 | while rest: |
| 1496 | # This code depends on values coming first, followed by conditions. |
| 1497 | matchtext = dictrex.match(rest) |
| 1498 | if matchtext: |
| 1499 | # Diagnostic! |
| 1500 | condname = matchtext.group(1) |
| 1501 | # Append to the condition list |
| 1502 | if condname not in loccondresult: |
| 1503 | loccondresult[condname] = [] |
| 1504 | |
| 1505 | # Find the condition name in the condition list, so values can |
| 1506 | # be converted back to the expected units. |
| 1507 | try: |
| 1508 | condrec = next(item for item in param['conditions'] if item['condition'] == condname) |
| 1509 | except StopIteration: |
| 1510 | condunit = '' |
| 1511 | else: |
| 1512 | condunit = condrec['unit'] |
| 1513 | |
| 1514 | rest = matchtext.group(3) |
| 1515 | matchstep = steprex.match(matchtext.group(2)) |
| 1516 | if matchstep: |
| 1517 | # condition is in form min:step:max, and the |
| 1518 | # number of values must match rsize. |
| 1519 | cmin = float(matchstep.group(1)) |
| 1520 | cstep = float(matchstep.group(2)) |
| 1521 | cmax = float(matchstep.group(3)) |
| 1522 | cnum = int(round((cmax + cstep - cmin) / cstep)) |
| 1523 | if cnum != rsize: |
| 1524 | print("Warning: Number of conditions (" + str(cnum) + ") is not") |
| 1525 | print("equal to the number of results (" + str(rsize) + ")") |
| 1526 | # Back-calculate the correct step size. Usually this |
| 1527 | # means that the testbench did not add margin to the |
| 1528 | # DC or AC stop condition, and the steps fell 1 short of |
| 1529 | # the max. |
| 1530 | if rsize > 1: |
| 1531 | cstep = (float(cmax) - float(cmin)) / float(rsize - 1) |
| 1532 | |
| 1533 | condvec = [] |
| 1534 | for r in range(rsize): |
| 1535 | condvec.append(cmin) |
| 1536 | cmin += cstep |
| 1537 | |
| 1538 | cresult = spice_unit_unconvert([condunit, condvec]) |
| 1539 | condval = loccondresult[condname] |
| 1540 | for cr in cresult: |
| 1541 | condval.append(str(cr)) |
| 1542 | |
| 1543 | else: |
| 1544 | # If there is a vector of results but only one condition, copy the |
| 1545 | # condition for each result. Note that value may not be numeric. |
| 1546 | |
| 1547 | # (To do: Apply 'measure' records here) |
| 1548 | condval = loccondresult[condname] |
| 1549 | try: |
| 1550 | test = float(matchtext.group(2)) |
| 1551 | except ValueError: |
| 1552 | cval = matchtext.group(2) |
| 1553 | else: |
| 1554 | cval = str(spice_unit_unconvert([condunit, test])) |
| 1555 | for r in range(rsize): |
| 1556 | condval.append(cval) |
| 1557 | else: |
| 1558 | # Not a key=value pair, so must be a result value |
| 1559 | matchtext = outrex.match(rest) |
| 1560 | if matchtext: |
| 1561 | rest = matchtext.group(2) |
| 1562 | rsize += 1 |
| 1563 | # Result value units come directly from the param record. |
| 1564 | if 'unit' in param: |
| 1565 | condunit = param['unit'] |
| 1566 | else: |
| 1567 | condunit = '' |
| 1568 | if binrex.match(condunit): |
| 1569 | # Digital result with units 'b, 'h, etc. are kept as strings. |
| 1570 | locparamresult.append(matchtext.group(1)) |
| 1571 | else: |
| 1572 | locparamresult.append(float(matchtext.group(1))) |
| 1573 | else: |
| 1574 | print('Error: Result line cannot be parsed.') |
| 1575 | print('Bad part of line is: ' + rest) |
| 1576 | print('Full line is: ' + line) |
| 1577 | break |
| 1578 | |
| 1579 | # Values passed in testbench['conditions'] are common to each result |
| 1580 | # value. From one line there are rsize values, so append each known |
| 1581 | # condition to loccondresult rsize times. |
| 1582 | for condrec in testbench['conditions']: |
| 1583 | condname = condrec[0] |
| 1584 | if condname in locvarresult: |
| 1585 | print('Error: name ' + condname + ' is both a variable and a condition!') |
| 1586 | print('Ignoring the condition.') |
| 1587 | continue |
| 1588 | if condname not in loccondresult: |
| 1589 | loccondresult[condname] = [] |
| 1590 | condval = loccondresult[condname] |
| 1591 | if 'unit' in condrec: |
| 1592 | condunit = condrec['unit'] |
| 1593 | else: |
| 1594 | condunit = '' |
| 1595 | for r in range(rsize): |
| 1596 | if condname.split(':')[0] == 'DIGITAL' or condname == 'CORNER': |
| 1597 | # Values that are known to be strings |
| 1598 | condval.append(condrec[2]) |
| 1599 | elif binrex.match(condunit): |
| 1600 | # Alternate digital specification using units 'b, 'h, etc. |
| 1601 | condval.append(condrec[2]) |
| 1602 | elif condname == 'ITERATIONS': |
| 1603 | # Values that are known to be integers |
| 1604 | condval.append(int(float(condrec[2]))) |
| 1605 | else: |
| 1606 | # All other values to be treated as floats unless |
| 1607 | # they are non-numeric, in which case they are |
| 1608 | # treated as strings and copied as-is. |
| 1609 | try: |
| 1610 | condval.append(float(condrec[2])) |
| 1611 | except ValueError: |
| 1612 | # Values that are not numeric just get copied |
| 1613 | condval.append(condrec[2]) |
| 1614 | |
| 1615 | spiceproc.stdout.close() |
| 1616 | return_code = spiceproc.wait() |
| 1617 | if return_code != 0: |
| 1618 | raise subprocess.CalledProcessError(return_code, 'ngspice') |
| 1619 | |
| 1620 | if len(locparamresult) > 0: |
| 1621 | # Fold local results into total results |
| 1622 | paramresult.extend(locparamresult) |
| 1623 | for key in loccondresult: |
| 1624 | if not key in condresult: |
| 1625 | condresult[key] = loccondresult[key] |
| 1626 | else: |
| 1627 | condresult[key].extend(loccondresult[key]) |
| 1628 | |
| 1629 | else: |
| 1630 | # Catch simulation failures |
| 1631 | simfailures += 1 |
| 1632 | |
| 1633 | simulations += 1 |
| 1634 | |
| 1635 | # Clean up pipe file after cosimulation, also the .lxt file and .tvo files |
| 1636 | if cosim: |
| 1637 | if os.path.exists('simulator_pipe'): |
| 1638 | os.remove('simulator_pipe') |
| 1639 | # Remove all '.tvo', '.lxt', and '.vcd' files from the work area. |
| 1640 | if keepmode == False: |
| 1641 | files = os.listdir(simfiles_path) |
| 1642 | for filename in files: |
| 1643 | try: |
| 1644 | fileext = os.path.splitext(filename)[1] |
| 1645 | except: |
| 1646 | pass |
| 1647 | else: |
| 1648 | if fileext == '.lxt' or fileext == '.vcd' or fileext == '.tvo' or fileext == '.vpi': |
| 1649 | os.remove(filename) |
| 1650 | |
| 1651 | |
| 1652 | # Other files to clean up |
| 1653 | if os.path.exists('b3v32check.log'): |
| 1654 | os.remove('b3v32check.log') |
| 1655 | |
| 1656 | # Status |
| 1657 | if statdoc and not localmode: |
| 1658 | if simulations < totalsims: |
| 1659 | status['message'] = 'in progress' |
| 1660 | else: |
| 1661 | status['message'] = 'completed' |
| 1662 | status['completed'] = str(simulations) |
| 1663 | status['total'] = str(totalsims) |
| 1664 | send_status(statdoc) |
| 1665 | |
| 1666 | # Evaluate concatentated results after all files for this electrical parameter |
| 1667 | # have been run through simulation. |
| 1668 | |
| 1669 | if paramresult: |
| 1670 | print(simtype + ':') |
| 1671 | |
| 1672 | # Diagnostic |
| 1673 | # print("paramresult length " + str(len(paramresult))) |
| 1674 | # for key in condresult: |
| 1675 | # print("condresult length " + str(len(condresult[key]))) |
| 1676 | |
| 1677 | # Write out all results into the JSON file. |
| 1678 | # Results are a list of lists; the first list is a list of |
| 1679 | # methods, and the rest are sets of values corresponding to unique |
| 1680 | # conditions. The first item in each lists is the result value |
| 1681 | # for that set of conditions. |
| 1682 | |
| 1683 | # Always keep results, even for remote CACE. |
| 1684 | |
| 1685 | outnames = [paramname] |
| 1686 | outunits = [] |
| 1687 | |
| 1688 | if 'unit' in param: |
| 1689 | outunits.append(param['unit']) |
| 1690 | else: |
| 1691 | outunits.append('') |
| 1692 | for key in condresult: |
| 1693 | outnames.append(key) |
| 1694 | try: |
| 1695 | condrec = next(item for item in param['conditions'] if item['condition'] == key) |
| 1696 | except: |
| 1697 | try: |
| 1698 | condrec = next(item for item in param['variables'] if item['condition'] == key) |
| 1699 | except: |
| 1700 | outunits.append('') |
| 1701 | else: |
| 1702 | if 'unit' in condrec: |
| 1703 | outunits.append(condrec['unit']) |
| 1704 | # 'variable' entries need to be unconverted |
| 1705 | cconv = spice_unit_unconvert([condrec['unit'], condresult[key]]) |
| 1706 | condresult[key] = cconv |
| 1707 | else: |
| 1708 | outunits.append('') |
| 1709 | else: |
| 1710 | if 'unit' in condrec: |
| 1711 | outunits.append(condrec['unit']) |
| 1712 | else: |
| 1713 | outunits.append('') |
| 1714 | |
| 1715 | # Evaluate a script to transform the output, if there is an 'evaluate' |
| 1716 | # record in the electrical parameter. |
| 1717 | |
| 1718 | if 'evaluate' in param: |
| 1719 | |
| 1720 | evalrec = param['evaluate'] |
| 1721 | try: |
| 1722 | tool = evalrec['tool'] |
| 1723 | except: |
| 1724 | print("Error: Evaluate record does not indicate a tool to run.") |
| 1725 | break |
| 1726 | else: |
| 1727 | if tool != 'octave' and tool != 'matlab': |
| 1728 | print("Error: CASE does not know how to use tool '" + tool + "'") |
| 1729 | break |
| 1730 | |
| 1731 | try: |
| 1732 | script = evalrec['script'] |
| 1733 | except: |
| 1734 | print("Error: Evaluate record does not indicate a script to run.") |
| 1735 | break |
| 1736 | else: |
| 1737 | if os.path.isdir(root_path + '/testbench'): |
| 1738 | tb_path = root_path + '/testbench/' + script |
| 1739 | if not os.path.exists(tb_path): |
| 1740 | if os.path.exists(tb_path + '.m'): |
| 1741 | tb_path += '.m' |
| 1742 | else: |
| 1743 | print("Error: No script '" + script + "' found in testbench path.") |
| 1744 | break |
| 1745 | else: |
| 1746 | print("Error: testbench directory not found in root path.") |
| 1747 | break |
| 1748 | |
| 1749 | # General purpose tool-based evaluation. For complex operations of |
| 1750 | # any kind, dump the simulation results to a file "results.json" and |
| 1751 | # invoke the specified tool, which should read the results and |
| 1752 | # generate an output in the form of modified 'paramresult'. |
| 1753 | # e.g., input is an array of transient vectors, output is an FFT |
| 1754 | # analysis. Input is a voltage, output is an INL value. Note that |
| 1755 | # 'unit' is the unit produced by the script. The script is supposed |
| 1756 | # to know what units it gets as input and what it produces as output. |
| 1757 | |
| 1758 | # Create octave-compatible output with structures for the condition |
| 1759 | # names, units, and data. |
| 1760 | with open('results.dat', 'w') as ofile: |
| 1761 | print('# Created by cace_gensim.py', file=ofile) |
| 1762 | print('# name: results', file=ofile) |
| 1763 | print('# type: scalar struct', file=ofile) |
| 1764 | print('# ndims: 2', file=ofile) |
| 1765 | print('# 1 1', file=ofile) |
| 1766 | numentries = len(outnames) |
| 1767 | print('# length: ' + str(2 + numentries), file=ofile) |
| 1768 | print('# name: NAMES', file=ofile) |
| 1769 | print('# type: cell', file=ofile) |
| 1770 | print('# rows: ' + str(numentries), file=ofile) |
| 1771 | print('# columns: 1', file=ofile) |
| 1772 | for name in outnames: |
| 1773 | print('# name: <cell-element>', file=ofile) |
| 1774 | print('# type: sq_string', file=ofile) |
| 1775 | print('# elements: 1', file=ofile) |
| 1776 | print('# length: ' + str(len(name)), file=ofile) |
| 1777 | print(name, file=ofile) |
| 1778 | print('', file=ofile) |
| 1779 | print('', file=ofile) |
| 1780 | |
| 1781 | print('', file=ofile) |
| 1782 | print('', file=ofile) |
| 1783 | print('# name: UNITS', file=ofile) |
| 1784 | print('# type: cell', file=ofile) |
| 1785 | print('# rows: ' + str(len(outunits)), file=ofile) |
| 1786 | print('# columns: 1', file=ofile) |
| 1787 | for unit in outunits: |
| 1788 | print('# name: <cell-element>', file=ofile) |
| 1789 | print('# type: sq_string', file=ofile) |
| 1790 | print('# elements: 1', file=ofile) |
| 1791 | print('# length: ' + str(len(unit)), file=ofile) |
| 1792 | print(unit, file=ofile) |
| 1793 | print('', file=ofile) |
| 1794 | print('', file=ofile) |
| 1795 | print('', file=ofile) |
| 1796 | print('', file=ofile) |
| 1797 | |
| 1798 | # Each condition is output as a 1D array with structure |
| 1799 | # entry name equal to the condition name. If the units |
| 1800 | # is empty then the array is a string. Otherwise, the |
| 1801 | # array is numeric (as far as octave is concerned). |
| 1802 | |
| 1803 | # First entry is the result (paramresult). This should never |
| 1804 | # be a string (at least not in this version of CACE) |
| 1805 | |
| 1806 | idx = 0 |
| 1807 | print('# name: ' + outnames[idx], file=ofile) |
| 1808 | units = outunits[idx] |
| 1809 | print('# type: matrix', file=ofile) |
| 1810 | print('# rows: ' + str(len(paramresult)), file=ofile) |
| 1811 | print('# columns: 1', file=ofile) |
| 1812 | for value in paramresult: |
| 1813 | print(' ' + str(value), file=ofile) |
| 1814 | print('', file=ofile) |
| 1815 | print('', file=ofile) |
| 1816 | |
| 1817 | idx += 1 |
| 1818 | # The rest of the entries are the conditions. Note that the |
| 1819 | # name must be a valid octave variable (letters, numbers, |
| 1820 | # underscores) and so cannot use the condition name. However, |
| 1821 | # each condition name is held in the names list, so it can be |
| 1822 | # recovered. Each condition is called CONDITION2, CONDITION3, |
| 1823 | # etc. |
| 1824 | |
| 1825 | for key, entry in condresult.items(): |
| 1826 | |
| 1827 | print('# name: CONDITION' + str(idx + 1), file=ofile) |
| 1828 | units = outunits[idx] |
| 1829 | if units == '': |
| 1830 | # Use cell array for strings |
| 1831 | print('# type: cell', file=ofile) |
| 1832 | print('# rows: ' + str(len(entry)), file=ofile) |
| 1833 | print('# columns: 1', file=ofile) |
| 1834 | for value in entry: |
| 1835 | print('# name: <cell-element>', file=ofile) |
| 1836 | print('# type: sq_string', file=ofile) |
| 1837 | print('# elements: 1', file=ofile) |
| 1838 | print('# length: ' + str(len(str(value))), file=ofile) |
| 1839 | print(str(value), file=ofile) |
| 1840 | print('', file=ofile) |
| 1841 | print('', file=ofile) |
| 1842 | else: |
| 1843 | print('# type: matrix', file=ofile) |
| 1844 | print('# rows: ' + str(len(entry)), file=ofile) |
| 1845 | print('# columns: 1', file=ofile) |
| 1846 | for value in entry: |
| 1847 | print(' ' + str(value), file=ofile) |
| 1848 | |
| 1849 | print('', file=ofile) |
| 1850 | print('', file=ofile) |
| 1851 | idx += 1 |
| 1852 | |
| 1853 | # Now run the specified octave script on the result. Script |
| 1854 | # generates an output file. stdout/stderr can be ignored. |
| 1855 | # May want to watch stderr for error messages and/or handle |
| 1856 | # exit status. |
| 1857 | |
| 1858 | postproc = subprocess.Popen(['/ef/apps/bin/octave-cli', tb_path], |
| 1859 | stdout = subprocess.PIPE) |
| 1860 | rvalues = postproc.communicate()[0].decode('ascii').splitlines() |
| 1861 | |
| 1862 | # Replace paramresult with the numeric result |
| 1863 | paramresult = list(float(item) for item in rvalues) |
| 1864 | |
| 1865 | # pconv is paramresult scaled to the units used by param. |
| 1866 | if 'unit' in param: |
| 1867 | pconv = spice_unit_unconvert([param['unit'], paramresult]) |
| 1868 | else: |
| 1869 | pconv = paramresult |
| 1870 | |
| 1871 | outresult = [] |
| 1872 | outresult.append(outnames) |
| 1873 | outresult.append(outunits) |
| 1874 | |
| 1875 | for p in range(len(pconv)): |
| 1876 | outvalues = [] |
| 1877 | outvalues.append(str(pconv[p])) |
| 1878 | for key, value in condresult.items(): |
| 1879 | try: |
| 1880 | outvalues.append(str(value[p])) |
| 1881 | except IndexError: |
| 1882 | # Note: This should not happen. . . |
| 1883 | print("Error: number of values in result and conditions do not match!") |
| 1884 | print("Result: " + str(len(pconv))) |
| 1885 | print("Conditions: " + str(len(condresult))) |
| 1886 | break |
| 1887 | |
| 1888 | outresult.append(outvalues) |
| 1889 | |
| 1890 | param['results'] = outresult |
| 1891 | |
| 1892 | if 'unit' in param: |
| 1893 | units = param['unit'] |
| 1894 | else: |
| 1895 | units = '' |
| 1896 | |
| 1897 | # Catch simulation failures. |
| 1898 | if simfailures > 0: |
| 1899 | print('Simulation failures: ' + str(simfailures)) |
| 1900 | score = 'fail' |
| 1901 | |
| 1902 | if 'min' in param: |
| 1903 | minrec = param['min'] |
| 1904 | if 'calc' in minrec: |
| 1905 | calc = minrec['calc'] |
| 1906 | else: |
| 1907 | calc = 'min-above' |
| 1908 | minscore = calculate(minrec, pconv, condresult, calc, score, units, param) |
| 1909 | if score != 'fail': |
| 1910 | score = minscore |
| 1911 | |
| 1912 | if 'max' in param: |
| 1913 | maxrec = param['max'] |
| 1914 | if 'calc' in maxrec: |
| 1915 | calc = maxrec['calc'] |
| 1916 | else: |
| 1917 | calc = 'max-below' |
| 1918 | maxscore = calculate(maxrec, pconv, condresult, calc, score, units, param) |
| 1919 | if score != 'fail': |
| 1920 | score = maxscore |
| 1921 | |
| 1922 | if 'typ' in param: |
| 1923 | typrec = param['typ'] |
| 1924 | if 'calc' in typrec: |
| 1925 | calc = typrec['calc'] |
| 1926 | else: |
| 1927 | calc = 'avg-legacy' |
| 1928 | typscore = calculate(typrec, pconv, condresult, calc, score, units, param) |
| 1929 | if score != 'fail': |
| 1930 | score = typscore |
| 1931 | |
| 1932 | if 'plot' in param: |
| 1933 | # If not in localmode, or if in plotmode then create a plot and |
| 1934 | # save it to a file. |
| 1935 | plotrec = param['plot'] |
| 1936 | if localmode == False or bypassmode == True or plotmode == True: |
| 1937 | if 'variables' in param: |
| 1938 | variables = param['variables'] |
| 1939 | else: |
| 1940 | variables = [] |
| 1941 | result = cace_makeplot.makeplot(plotrec, param['results'], variables) |
| 1942 | # New behavior implemented 3/28/2017: Always keep results. |
| 1943 | # param.pop('results') |
| 1944 | if result: |
| 1945 | plotrec['status'] = 'done' |
| 1946 | has_aux_files = True |
| 1947 | else: |
| 1948 | print('Failure: No plot from file ' + filename + '\n') |
| 1949 | else: |
| 1950 | plotrec['status'] = 'done' |
| 1951 | else: |
| 1952 | try: |
| 1953 | print('Failure: No output from file ' + filename + '\n') |
| 1954 | except NameError: |
| 1955 | print('Failure: No simulation file, so no output\n') |
| 1956 | continue |
| 1957 | |
| 1958 | # Handle errors where simulation generated no output. |
| 1959 | # This is the one case where 'typ' can be treated as pass-fail. |
| 1960 | # "score" will be set to "fail" for any of "min", "max", and |
| 1961 | # "typ" that exists in the electrical parameters record and |
| 1962 | # which specifies a target value. "value" is set to "failure" |
| 1963 | # for display. |
| 1964 | score = 'fail' |
| 1965 | if 'typ' in param: |
| 1966 | typrec = param['typ'] |
| 1967 | if 'target' in typrec: |
| 1968 | typrec['score'] = 'fail' |
| 1969 | typrec['value'] = 'failure' |
| 1970 | if 'max' in param: |
| 1971 | maxrec = param['max'] |
| 1972 | if 'target' in maxrec: |
| 1973 | maxrec['score'] = 'fail' |
| 1974 | maxrec['value'] = 'failure' |
| 1975 | if 'min' in param: |
| 1976 | minrec = param['min'] |
| 1977 | if 'target' in minrec: |
| 1978 | minrec['score'] = 'fail' |
| 1979 | minrec['value'] = 'failure' |
| 1980 | |
| 1981 | # Pop the testbenches record, which has been replaced by the 'results' record. |
| 1982 | param.pop('testbenches') |
| 1983 | |
| 1984 | # Final cleanup step: Remove any remaining '.tv' files from the work area. |
| 1985 | if keepmode == False: |
| 1986 | files = os.listdir(simfiles_path) |
| 1987 | for filename in files: |
| 1988 | try: |
| 1989 | fileext = os.path.splitext(filename)[1] |
| 1990 | except: |
| 1991 | pass |
| 1992 | else: |
| 1993 | if fileext == '.tv': |
| 1994 | os.remove(filename) |
| 1995 | |
| 1996 | # Report the final score, and save it to the JSON data |
| 1997 | |
| 1998 | print('Completed ' + str(simulations) + ' of ' + str(totalsims) + ' simulations'); |
| 1999 | print('Circuit pre-extraction simulation total score (lower is better) = ' |
| 2000 | + str(score)) |
| 2001 | |
| 2002 | if score == 'fail': |
| 2003 | dsheet['score'] = 'fail' |
| 2004 | else: |
| 2005 | dsheet['score'] = '{0:.4g}'.format(score) |
| 2006 | |
| 2007 | # Now handle physical parameters |
| 2008 | netlist_source = dsheet['netlist-source'] |
| 2009 | areaval = 0.0 |
| 2010 | |
| 2011 | totalchecks = 0 |
| 2012 | for param in pparamlist: |
| 2013 | if 'check' in param: |
| 2014 | totalchecks += 1 |
| 2015 | print('Total physical parameters to check: ' + str(totalchecks)) |
| 2016 | |
| 2017 | for param in pparamlist: |
| 2018 | # Process only entries in JSON that have the 'check' record |
| 2019 | if 'check' not in param: |
| 2020 | continue |
| 2021 | if param['check'] != 'true': |
| 2022 | continue |
| 2023 | |
| 2024 | cond = param['condition'] |
| 2025 | |
| 2026 | if cond == 'device_area': |
| 2027 | areaest = 0 |
| 2028 | ipname = dsheet['ip-name'] |
| 2029 | foundry = dsheet['foundry'] |
| 2030 | node = dsheet['node'] |
| 2031 | # Hack---node XH035 should have been specified as EFXH035A; allow |
| 2032 | # the original one for backwards compatibility. |
| 2033 | if node == 'XH035': |
| 2034 | node = 'EFXH035A' |
| 2035 | |
| 2036 | if layout_path and netlist_path: |
| 2037 | |
| 2038 | # Run the device area (area estimation) script |
| 2039 | if os.path.exists(netlist_path + '/' + ipname + '.spi'): |
| 2040 | estproc = subprocess.Popen(['/ef/efabless/bin/layout_estimate.py', |
| 2041 | netlist_path + '/' + ipname + '.spi', node.lower()], |
| 2042 | stdout=subprocess.PIPE, |
| 2043 | cwd = layout_path, universal_newlines = True) |
| 2044 | outlines = estproc.communicate()[0] |
| 2045 | arealine = re.compile('.*=[ \t]*([0-9]+)[ \t]*um\^2') |
| 2046 | for line in outlines.splitlines(): |
| 2047 | lmatch = arealine.match(line) |
| 2048 | if lmatch: |
| 2049 | areaum2 = lmatch.group(1) |
| 2050 | areaest = int(areaum2) |
| 2051 | |
| 2052 | if areaest > 0: |
| 2053 | score = 'pass' |
| 2054 | maxrec = param['max'] |
| 2055 | targarea = float(maxrec['target']) |
| 2056 | maxrec['value'] = str(areaest) |
| 2057 | if 'penalty' in maxrec: |
| 2058 | if maxrec['penalty'] == 'fail': |
| 2059 | if areaest > targarea: |
| 2060 | score = 'fail' |
| 2061 | else: |
| 2062 | score = 'pass' |
| 2063 | else: |
| 2064 | try: |
| 2065 | if areaest > targarea: |
| 2066 | score = str((areaest - targarea) * float(maxrec['penalty'])) |
| 2067 | else: |
| 2068 | score = 'pass' |
| 2069 | except: |
| 2070 | if areaest > targarea: |
| 2071 | score = maxrec['penalty'] |
| 2072 | else: |
| 2073 | score = 'pass' |
| 2074 | else: |
| 2075 | score = 'pass' |
| 2076 | maxrec['score'] = score |
| 2077 | |
| 2078 | if cond == 'area' or cond == 'height' or cond == 'width': |
| 2079 | |
| 2080 | # First time for any of these, run the check and get values |
| 2081 | |
| 2082 | if areaval == 0 and not netlist_source == 'schematic': |
| 2083 | |
| 2084 | ipname = dsheet['ip-name'] |
| 2085 | foundry = dsheet['foundry'] |
| 2086 | node = dsheet['node'] |
| 2087 | # Hack---node XH035 should have been specified as EFXH035A; allow |
| 2088 | # the original one for backwards compatibility. |
| 2089 | if node == 'XH035': |
| 2090 | node = 'EFXH035A' |
| 2091 | |
| 2092 | if layout_path: |
| 2093 | |
| 2094 | # Find the layout directory and check if there is a layout |
| 2095 | # for the cell there. If not, use the layout estimation |
| 2096 | # script. Result is either an actual area or an area estimate. |
| 2097 | |
| 2098 | if os.path.exists(layout_path + '/' + ipname + '.mag'): |
| 2099 | areaproc = subprocess.Popen(['/ef/apps/bin/magic', |
| 2100 | '-dnull', '-noconsole', layout_path + '/' + ipname + '.mag'], |
| 2101 | stdin = subprocess.PIPE, stdout = subprocess.PIPE, |
| 2102 | cwd = layout_path, universal_newlines = True) |
| 2103 | areaproc.stdin.write("select top cell\n") |
| 2104 | areaproc.stdin.write("box\n") |
| 2105 | areaproc.stdin.write("quit -noprompt\n") |
| 2106 | outlines = areaproc.communicate()[0] |
| 2107 | magrex = re.compile('microns:[ \t]+([0-9.]+)[ \t]*x[ \t]*([0-9.]+)[ \t]+.*[ \t]+([0-9.]+)[ \t]*$') |
| 2108 | for line in outlines.splitlines(): |
| 2109 | lmatch = magrex.match(line) |
| 2110 | if lmatch: |
| 2111 | widthval = float(lmatch.group(1)) |
| 2112 | heightval = float(lmatch.group(2)) |
| 2113 | areaval = float(lmatch.group(3)) |
| 2114 | |
| 2115 | if areaval > 0: |
| 2116 | |
| 2117 | # Now work through the physical parameters --- pass 1 |
| 2118 | # If area was estimated, then find target width and height |
| 2119 | # for estimating actual width and height. |
| 2120 | |
| 2121 | for checkparam in dsheet['physical-params']: |
| 2122 | checkcond = checkparam['condition'] |
| 2123 | maxrec = checkparam['max'] |
| 2124 | if checkcond == 'area': |
| 2125 | targarea = float(maxrec['target']) |
| 2126 | elif checkcond == 'width': |
| 2127 | targwidth = float(maxrec['target']) |
| 2128 | elif checkcond == 'height': |
| 2129 | targheight = float(maxrec['target']) |
| 2130 | |
| 2131 | maxrec = param['max'] |
| 2132 | unit = param['unit'] |
| 2133 | |
| 2134 | if cond == 'area': |
| 2135 | if areaval > 0: |
| 2136 | maxrec['value'] = str(areaval) |
| 2137 | if areaval > targarea: |
| 2138 | score = 'fail' |
| 2139 | maxrec['score'] = 'fail' |
| 2140 | else: |
| 2141 | maxrec['score'] = 'pass' |
| 2142 | elif cond == 'width': |
| 2143 | if areaval > 0: |
| 2144 | maxrec['value'] = str(widthval) |
| 2145 | if widthval > targwidth: |
| 2146 | score = 'fail' |
| 2147 | maxrec['score'] = 'fail' |
| 2148 | else: |
| 2149 | maxrec['score'] = 'pass' |
| 2150 | |
| 2151 | elif cond == 'height': |
| 2152 | if areaval > 0: |
| 2153 | maxrec['value'] = str(heightval) |
| 2154 | if heightval > targheight: |
| 2155 | score = 'fail' |
| 2156 | maxrec['score'] = 'fail' |
| 2157 | else: |
| 2158 | maxrec['score'] = 'pass' |
| 2159 | |
| 2160 | elif cond == 'DRC_errors': |
| 2161 | |
| 2162 | ipname = dsheet['ip-name'] |
| 2163 | |
| 2164 | if layout_path and not netlist_source == 'schematic': |
| 2165 | if os.path.exists(layout_path + '/' + ipname + '.mag'): |
| 2166 | |
| 2167 | # Find the layout directory and check if there is a layout |
| 2168 | # for the cell there. |
| 2169 | |
| 2170 | areaproc = subprocess.Popen(['/ef/apps/bin/magic', |
| 2171 | '-dnull', '-noconsole', layout_path + '/' + ipname + '.mag'], |
| 2172 | stdin = subprocess.PIPE, stdout = subprocess.PIPE, |
| 2173 | cwd = layout_path, universal_newlines = True) |
| 2174 | areaproc.stdin.write("drc on\n") |
| 2175 | areaproc.stdin.write("select top cell\n") |
| 2176 | areaproc.stdin.write("drc check\n") |
| 2177 | areaproc.stdin.write("drc catchup\n") |
| 2178 | areaproc.stdin.write("set dcount [drc list count total]\n") |
| 2179 | areaproc.stdin.write("puts stdout \"drc = $dcount\"\n") |
| 2180 | outlines = areaproc.communicate()[0] |
| 2181 | magrex = re.compile('drc[ \t]+=[ \t]+([0-9.]+)[ \t]*$') |
| 2182 | for line in outlines.splitlines(): |
| 2183 | # Diagnostic |
| 2184 | print(line) |
| 2185 | lmatch = magrex.match(line) |
| 2186 | if lmatch: |
| 2187 | drccount = int(lmatch.group(1)) |
| 2188 | maxrec = param['max'] |
| 2189 | maxrec['value'] = str(drccount) |
| 2190 | if drccount > 0: |
| 2191 | maxrec['score'] = 'fail' |
| 2192 | else: |
| 2193 | maxrec['score'] = 'pass' |
| 2194 | |
| 2195 | # Check on LVS from comp.out file (must be more recent than both netlists) |
| 2196 | elif cond == 'LVS_errors': |
| 2197 | ipname = dsheet['ip-name'] |
| 2198 | foundry = dsheet['foundry'] |
| 2199 | node = dsheet['node'] |
| 2200 | # Hack---node XH035 should have been specified as EFXH035A; allow |
| 2201 | # the original one for backwards compatibility. |
| 2202 | if node == 'XH035': |
| 2203 | node = 'EFXH035A' |
| 2204 | |
| 2205 | # To do even a precheck, the layout path must exist and must be populated |
| 2206 | # with the .magicrc file. |
| 2207 | if not os.path.exists(layout_path): |
| 2208 | os.makedirs(layout_path) |
| 2209 | if not os.path.exists(layout_path + '/.magicrc'): |
| 2210 | pdkdir = '/ef/tech/' + foundry + '/' + node + '/libs.tech/magic/current' |
| 2211 | if os.path.exists(pdkdir + '/' + node + '.magicrc'): |
| 2212 | shutil.copy(pdkdir + '/' + node + '.magicrc', layout_path + '/.magicrc') |
| 2213 | |
| 2214 | # Netlists should have been generated by cace_gensim.py |
| 2215 | has_layout_nl = os.path.exists(netlist_path + '/lvs/' + ipname + '.spi') |
| 2216 | has_schem_nl = os.path.exists(netlist_path + '/' + ipname + '.spi') |
| 2217 | has_vlog_nl = os.path.exists(root_path + '/verilog/' + ipname + '.v') |
| 2218 | has_stub_nl = os.path.exists(netlist_path + '/stub/' + ipname + '.spi') |
| 2219 | if has_layout_nl and has_stub_nl and not netlist_source == 'schematic': |
| 2220 | failures = run_and_analyze_lvs(dsheet) |
| 2221 | elif has_layout_nl and has_vlog_nl and not netlist_source == 'schematic': |
| 2222 | failures = run_and_analyze_lvs(dsheet) |
| 2223 | elif netlist_path and has_schem_nl: |
| 2224 | if not has_layout_nl or not has_stub_nl: |
| 2225 | if not has_layout_nl: |
| 2226 | print("Did not find layout LVS netlist " + netlist_path + '/lvs/' + ipname + '.spi') |
| 2227 | if not has_stub_nl: |
| 2228 | print("Did not find schematic LVS netlist " + netlist_path + '/' + ipname + '.spi') |
| 2229 | print("Running layout device pre-check.") |
| 2230 | if localmode == True: |
| 2231 | if keepmode == True: |
| 2232 | precheck_opts = ['-log', '-debug'] |
| 2233 | else: |
| 2234 | precheck_opts = ['-log'] |
| 2235 | print('/ef/efabless/bin/layout_precheck.py ' + netlist_path + '/' + ipname + '.spi ' + node.lower() + ' ' + ' '.join(precheck_opts)) |
| 2236 | chkproc = subprocess.Popen(['/ef/efabless/bin/layout_precheck.py', |
| 2237 | netlist_path + '/' + ipname + '.spi', node.lower(), *precheck_opts], |
| 2238 | stdout=subprocess.PIPE, |
| 2239 | cwd = layout_path, universal_newlines = True) |
| 2240 | else: |
| 2241 | chkproc = subprocess.Popen(['/ef/efabless/bin/layout_precheck.py', |
| 2242 | netlist_path + '/' + ipname + '.spi', node.lower()], |
| 2243 | stdout=subprocess.PIPE, |
| 2244 | cwd = layout_path, universal_newlines = True) |
| 2245 | outlines = chkproc.communicate()[0] |
| 2246 | failline = re.compile('.*=[ \t]*([0-9]+)[ \t]*') |
| 2247 | for line in outlines.splitlines(): |
| 2248 | lmatch = failline.match(line) |
| 2249 | if lmatch: |
| 2250 | failures = int(lmatch.group(1)) |
| 2251 | else: |
| 2252 | failures = -1 |
| 2253 | |
| 2254 | if failures >= 0: |
| 2255 | maxrec = param['max'] |
| 2256 | maxrec['value'] = str(failures) |
| 2257 | if failures > int(maxrec['target']): |
| 2258 | score = 'fail' |
| 2259 | maxrec['score'] = 'fail' |
| 2260 | else: |
| 2261 | maxrec['score'] = 'pass' |
| 2262 | |
| 2263 | # Pop the 'check' record, which has been replaced by the 'value' record. |
| 2264 | param.pop('check') |
| 2265 | |
| 2266 | # Remove 'project-folder' from document if it exists, as this document |
| 2267 | # is no longer related to an Open Galaxy account. |
| 2268 | if 'project-folder' in datatop: |
| 2269 | datatop.pop('project-folder') |
| 2270 | |
| 2271 | # Write the annotated JSON file (NOTE: In the absence of further |
| 2272 | # processing on the CACE side, this file is just getting deleted |
| 2273 | # right after it's made. But the file appears to be correctly |
| 2274 | # pushed back to the marketplace server, so this can be removed. |
| 2275 | |
| 2276 | filem = os.path.splitext(inputfile) |
| 2277 | if filem[1]: |
| 2278 | outputfile = filem[0] + '_anno' + filem[1] |
| 2279 | else: |
| 2280 | outputfile = inputfile + '_anno.json' |
| 2281 | |
| 2282 | with open(outputfile, 'w') as ofile: |
| 2283 | json.dump(datatop, ofile, indent = 4) |
| 2284 | |
| 2285 | # Create tarball of auxiliary files and send them as well. |
| 2286 | # Note that the files themselves are tarballed, not the directory |
| 2287 | |
| 2288 | if has_aux_files: |
| 2289 | tar = file_compressor.tar_directory_contents(simfiles_path + '/simulation_files') |
| 2290 | if 'ip-name' in dsheet: |
| 2291 | tarballname = dsheet['ip-name'] + '_result_files.tar.gz' |
| 2292 | else: |
| 2293 | tarballname = 'result_files.tar.gz' |
| 2294 | |
| 2295 | # In addition to dumping the file locally, also send back to the |
| 2296 | # marketplace, along with the tarball of simulation-generated files. |
| 2297 | if postmode == True: |
| 2298 | send_doc(datatop) |
| 2299 | if has_aux_files: |
| 2300 | send_file(hashname, tar, tarballname) |
| 2301 | else: |
| 2302 | print('Posting to marketplace was disabled by -nopost\n') |
| 2303 | |
| 2304 | # Clean up by removing simulation directory |
| 2305 | if keepmode == False: |
| 2306 | if localmode == True: |
| 2307 | print('Simulation results retained per -local option\n') |
| 2308 | # If cace_gensim and cace_launch are run locally, keep the results |
| 2309 | # since they won't be posted, but remove all other generated files. |
| 2310 | os.chdir(simfiles_path) |
| 2311 | if os.path.exists('datasheet.json'): |
| 2312 | os.remove('datasheet.json') |
| 2313 | for filename in filessimmed: |
| 2314 | os.remove(filename) |
| 2315 | # Remove any generated ASCII data files |
| 2316 | dfile = os.path.splitext(filename)[0] + '.data' |
| 2317 | if os.path.exists(dfile): |
| 2318 | os.remove(dfile) |
| 2319 | # Stupid ngspice handling of wrdata command. . . |
| 2320 | dfile = os.path.splitext(filename)[0] + '.data.data' |
| 2321 | if os.path.exists(dfile): |
| 2322 | os.remove(dfile) |
| 2323 | # Remove any generated raw files |
| 2324 | dfile = os.path.splitext(filename)[0] + '.raw' |
| 2325 | if os.path.exists(dfile): |
| 2326 | os.remove(dfile) |
| 2327 | # Remove any cosim verilog files |
| 2328 | verilog = os.path.splitext(filename)[0] + '.tv' |
| 2329 | if os.path.exists(verilog): |
| 2330 | os.remove(verilog) |
| 2331 | else: |
| 2332 | # Remove the entire simulation directory. To avoid horrible |
| 2333 | # consequences of, e.g., "-rootdir /" insist that the last path |
| 2334 | # component of root_path must be the hashname. |
| 2335 | test = os.path.split(root_path)[0] |
| 2336 | if test != simulation_path: |
| 2337 | print('Error: Root path is not in the system simulation path. Not deleting.') |
| 2338 | print('Root path is ' + root_path + '; simulation path is ' + simulation_path) |
| 2339 | else: |
| 2340 | subprocess.run(['rm', '-rf', root_path]) |
| 2341 | else: |
| 2342 | print('Simulation directory retained per -keep option\n') |
| 2343 | |
| 2344 | sys.exit(0) |