blob: bb0cd50a2eaa1e3905f922f631b0d7486c87f07c [file] [log] [blame] [edit]
import argparse
import re
from pathlib import Path
import json
from termcolor import colored
from collections import defaultdict, deque
from pprint import pprint as pp
import common
import os
from pathlib import Path
import shlex
copyright = [
'* Copyright 2019 The Skywater PDK Authors',
'*',
'* Licensed under the Apache License, Version 2.0 (the "License");',
'* you may not use this file except in compliance with the License.',
'* You may obtain a copy of the License at',
'*',
'* https://www.apache.org/licenses/LICENSE-2.0',
'*',
'* Unless required by applicable law or agreed to in writing, software',
'* distributed under the License is distributed on an "AS IS" BASIS,',
'* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.',
'* See the License for the specific language governing permissions and',
'* limitations under the License.',
''
]
def get_relative_path_to(path_from: Path, path_to: Path):
path_from = Path(path_from)
path_to = Path(path_to)
absolute_from = os.path.abspath(path_from.parent)
absolute_to = os.path.abspath(path_to)
relpath = os.path.relpath(absolute_to, absolute_from)
print(str(absolute_from))
print(str(absolute_to))
print(str(relpath))
return relpath
def basic_cleaning(lines):
fullfile = '\n'.join(lines)
# remove comments (C/C++ style)
fullfile = re.sub(r'(?:\/\*(.*?)\*\/)', '',
fullfile, flags=re.DOTALL)
# replace all tabs with single space
fullfile = fullfile.replace('\t', ' ')
lines = fullfile.split('\n')
lines = [line.rstrip() for line in lines if line.strip()]
finlines = []
for line in lines:
line = re.sub(r'\/\/.*$', '', line)
if not line:
continue
if re.match(r'^\s*\*.*$', line):
continue
finlines.append(line)
return finlines
def remap_path(old_path, ext, base = "output/skywater-pdk/libraries", modname=None):
lib, mod = common.mod_extract_from_path(old_path)
if common.version_extract_from_path(old_path):
ver = "V" + ".".join([str(v) for v in common.version_extract_from_path(old_path)])
else:
ver = ''
if lib is not None and lib != '???':
if modname is not None:
mod = modname
elif mod is None and lib is not None:
mod = lib
rest, f_name= os.path.split(old_path)
if f_name == "verilog.v":
if rest.split('/')[-2] != 'skywater-src-nda':
d_name = f"{rest.split('/')[-2]}_{rest.split('/')[-1]}"
else:
d_name = rest.split('/')[-1]
else:
if modname == 'broken':
d_name = f"{rest.split('/')[-3]}_{rest.split('/')[-2]}"
if lib == 's8phirs_10r':
d_name += '_10r'
else:
if rest.split('/')[-2] != 'skywater-src-nda':
d_name = f"{rest.split('/')[-2]}_{rest.split('/')[-1]}"
else:
d_name = rest.split('/')[-1]
# remove Models/SPECTRE names from final path
brokendir = '' if modname != 'broken' else 'broken'
finfilename = f'{d_name}_{Path(f_name).stem}_{mod}{ext}'
finfilename = finfilename.replace('Models_', '')
finfilename = finfilename.replace('SPECTRE_', '')
old_path = Path(base) / brokendir / lib / ver / 'cells' / re.sub(r'_[0-9]$', '', mod) / finfilename
else:
index = old_path.find('skywater-src-nda') + len('skywater-src-nda')
old_path = Path(base) / ('./' + old_path[index:])
old_path = str(old_path).replace("/s8iom0s8/", "/sky130_fd_io/")
old_path = str(old_path).replace("/s8/", "/sky130_fd_pr/")
old_path = old_path.replace("/VirtuosoOA/libs", "")
new_lib = None if lib is None else common.convert_libname(lib)
if lib != None and new_lib != None:
new_path = old_path.replace(lib, new_lib)
else:
new_path = old_path
return new_path
def makefixedwidthcolumn(spicelines):
lastnumcol = -1
columngroup = []
result = []
mergeequalsign = re.compile(r'\s*=')
startpluswithspace = re.compile(r'^\+\s*')
for line in spicelines + ['']:
line = mergeequalsign.sub('=', line)
line = startpluswithspace.sub('+', line)
columns = shlex.split(line, posix=False)
for i in range(len(columns)):
columns[i] = ' '.join(columns[i].split())
dump = False
if lastnumcol == -1 and len(columns) != 0:
lastnumcol = len(columns)
columngroup.append(columns)
elif len(columns) == 0 or lastnumcol == len(columns):
columngroup.append(columns)
else:
dump = True
if dump:
columnwidths = [max([len(val) for val in [row[i] for row in columngroup if len(row) == lastnumcol]]) for i in range(lastnumcol)]
for row in columngroup:
res = ' '.join([row[i].ljust(columnwidths[i]) for i in range(lastnumcol) if len(row) == lastnumcol])
result.append(res.rstrip())
result.append('')
columngroup = []
lastnumcol = -1
if len(columns) > 0:
columngroup.append(columns)
lastnumcol = len(columns)
if len(columngroup) > 0:
columnwidths = [max([len(val) for val in [row[i] for row in columngroup if len(row) == lastnumcol]]) for i in range(lastnumcol)]
for row in columngroup:
res = ' '.join([row[i].ljust(columnwidths[i]) for i in range(lastnumcol) if len(row) == lastnumcol])
result.append(res.rstrip())
return result
def cleanspicefile(spicelines, cellname, filepath, newfilepath, oldlibname, newlibname, includemaps):
cleanedspicelines = []
filedir = Path(filepath).parent
spicelines = basic_cleaning(spicelines)
for line in spicelines:
if 'include' in line:
match = re.search(r'.*include\s*[\"\']?(?P<includename>[^\"\']+)[\"\']?', line)
includecontent = match.group('includename')
originalpath = os.path.abspath(filedir / includecontent)
if not os.path.exists(originalpath):
raise Exception(f'File not found: {originalpath}')
else:
if 'ahdl_include' in line:
with open(originalpath, 'r') as verilogafile:
verilogalines = verilogafile.readlines()
verilogalines = basic_cleaning(verilogalines)
newverilogapath = Path(newfilepath).parent / Path(originalpath).name
verilogalines = [l.replace('*','//') for l in copyright] + verilogalines
with open(newverilogapath, 'w') as newverilogafile:
newverilogafile.write('\n'.join(verilogalines))
includemaps[os.path.abspath(originalpath)] = os.path.abspath(newverilogapath)
elif not originalpath in includemaps:
return None
relpath = Path(get_relative_path_to(newfilepath, includemaps[originalpath]))
if not os.path.exists(includemaps[originalpath]):
raise Exception('File not found {} : {}'.format(includemaps[originalpath]))
if not re.search(r'[\"\']?{}[\"\']?'.format(re.escape(includecontent)), line):
raise Exception('FAILED TO FIND {}'.format(r'[\"\']?{}[\"\']?'.format(re.escape(includecontent))))
line = re.sub(r'[\"\']?{}[\"\']?'.format(re.escape(includecontent)), '"' + str(relpath) + '"', line)
cleanedspicelines.append(line)
else:
line = line.replace(oldlibname, newlibname)
cleanedspicelines.append(line)
fixedcolumns = makefixedwidthcolumn(cleanedspicelines)
return fixedcolumns
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
'input',
help='Input JSON file containing the product of spice2mods.py script',
type=Path)
parser.add_argument(
'outputdir',
help='Directory containing cleaned Spice sources',
type=Path)
parser.add_argument(
"--path-prefix-to-remove",
help="The substring that needs to be removed before generating subdirectories for Liberty files",
type=Path)
args = parser.parse_args()
with open(args.input, 'r') as infile:
celltolibs = json.load(infile)
containingcellnames = defaultdict(list)
# this is sorted so always the longest matching string will be taken
cellnames = sorted(list(celltolibs.keys()), key=lambda i: (-len(i),i))
for i, cell in enumerate(cellnames):
for cell2 in cellnames[i + 1:]:
if cell in cell2:
containingcellnames[cell].append(cell2)
elif cell2 in cell:
containingcellnames[cell2].append(cell)
toprocess = []
for cell, files in celltolibs.items():
celldir = args.outputdir / cell
# celldir.mkdir(parents=True, exist_ok=True)
for f in files:
try:
oldlibname = common.lib_extract_from_path(str(f))
except Exception as ex:
print(f)
print(ex)
if not oldlibname or oldlibname == '???':
oldlibname = None
newlibname = None
else:
newlibname = common.convert_libname(oldlibname)
# newf = f.replace(oldlibname, newlibname) if oldlibname else f
ext = Path(f).suffix
newf = remap_path(f, ext, base=args.outputdir, modname=cell)
# dirprefix = str(Path(f).parent)
# if args.path_prefix_to_remove:
# dirprefix = dirprefix.replace(str(args.path_prefix_to_remove), '')
# dirprefix = dirprefix.replace(oldlibname, newlibname) if oldlibname else f
# target = findir / Path(newf).name
toprocess.append((f, cell, newf, oldlibname, newlibname, False))
newfiles = {}
repeated = False
for data in toprocess:
if data[2] in newfiles:
repeated = True
print(f'This file will be overwritten: {(data[1], data[0])} = {newfiles[data[2]]}')
print(f'{data[2]}')
else:
newfiles[data[2]] = (data[1], data[0])
assert not repeated
allfilescount = len(toprocess)
numfailed = 0
errortypes = set()
taskqueue = deque(toprocess)
num = 0
includemaps = {}
while taskqueue:
data = taskqueue.popleft()
try:
filename, cell, newfilename, oldlibname, newlibname, done = data
newfilename = Path(newfilename)
with open(filename, 'r') as f:
spicelines = f.readlines()
if not os.path.isdir(newfilename.parent):
newfilename.parent.mkdir(parents=True, exist_ok=True)
cleanedfile = cleanspicefile(spicelines, cell, filename, newfilename, oldlibname, newlibname, includemaps)
if cleanedfile:
with open(newfilename, 'w') as outputspice:
fin = copyright + cleanedfile
outputspice.write('\n'.join(fin))
includemaps[os.path.abspath(filename)] = os.path.abspath(newfilename)
print(colored('[{:05d}/{:05d},failed={:05d}] {} : {} | {}DONE'.format(num + 1, allfilescount, numfailed, cell, filename, 'RESOLVED AND ' if done else ''), 'green'))
num += 1
else:
taskqueue.append((filename, cell, newfilename, oldlibname, newlibname, True))
print(colored('[{:05d}/{:05d},failed={:05d}] {} : {} | INCLUDES NOT RESOLVED YET'.format(num + 1, allfilescount, numfailed, cell, filename), 'yellow'))
except Exception as ex:
print(colored('[{:05d}/{:05d},failed={:05d}] {} : {} | ERROR : {}'.format(num + 1, allfilescount, numfailed, cell, filename, str(ex)), 'red'))
errortypes.add(type(ex).__name__)
num += 1
numfailed += 1
print('{} out of {} failed'.format(numfailed, allfilescount))
print('Error types:')
print(errortypes)