blob: 25814941d00a1aeec0f87b27569b3f48323d4f13 [file] [log] [blame]
#!/usr/bin/env python3
# Copyright 2020 The Skywater PDK Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import re
from pathlib import Path
import json
from termcolor import colored
from collections import defaultdict, deque
from pprint import pprint as pp
import common
import os
from pathlib import Path
import shlex
import sys
copyright = [
'* Copyright 2020 The Skywater PDK Authors',
'*',
'* Licensed under the Apache License, Version 2.0 (the "License");',
'* you may not use this file except in compliance with the License.',
'* You may obtain a copy of the License at',
'*',
'* https://www.apache.org/licenses/LICENSE-2.0',
'*',
'* Unless required by applicable law or agreed to in writing, software',
'* distributed under the License is distributed on an "AS IS" BASIS,',
'* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.',
'* See the License for the specific language governing permissions and',
'* limitations under the License.',
''
]
sourcetodests = defaultdict(list)
fileswithoutincludes = []
def get_relative_path_to(path_from: Path, path_to: Path):
path_from = Path(path_from)
path_to = Path(path_to)
absolute_from = os.path.abspath(path_from.parent)
absolute_to = os.path.abspath(path_to)
relpath = os.path.relpath(absolute_to, absolute_from)
return relpath
def basic_cleaning(lines):
basic_cleaning.unitmapping = {
'1/c': '1/coulomb',
'1/c^2': '1/coulomb^2',
'1/oc': '1/celsius',
'1/volt': 'volt',
'a': 'amper',
'a/m': 'amper/meter',
'a/m^2': 'amper/meter^2',
'angstrom': 'angstrom',
'c': 'coulomb',
'ev': 'electron-volt',
'ev/c': 'electron-volt/coulomb',
'ev/k': 'electron-volt/kelvin',
'farads/m': 'farad/meter',
'farads/m^2': 'farad/meter^2',
'f/m': 'farad/meter',
'f/m^2': 'farad/meter^2',
'm': 'meter',
'meter': 'meter',
'ohms (ohms/m^2 if area defined in netlist)': 'ohm (ohm/meter^2 if area defined)',
'temerature in oc passed from eldo *.temp statements': 'celsius',
'units: F/um': 'farad/micrometer',
'units: F/um^2': 'farad/micrometer^2',
'v': 'volt',
'v/c': 'volt/coulomb'
}
basic_cleaning.forbidden_contents = [
'proplus',
'cy_calbr_',
'depot/icm/proj',
's8d/s8t variation',
's8q/s8p variation'
]
fullfile = '\n'.join(lines)
# remove comments (C/C++ style)
fullfile = re.sub(r'(?:\/\*(.*?)\*\/)', '',
fullfile, flags=re.DOTALL)
# replace all tabs with single space
fullfile = fullfile.replace('\t', ' ')
lines = fullfile.split('\n')
lines = [line.rstrip() for line in lines if line.strip()]
finlines = []
for line in lines:
line = re.sub(r'\/\/.*$', '', line)
if not line:
continue
v = re.search(r'\$(?!VELOCE_PROJ_PATH)(?P<content>.*)$', line)
if v:
comm = v.group('content').strip().lower()
if comm in basic_cleaning.unitmapping:
line = re.sub(r'\$(?!VELOCE_PROJ_PATH).*$', f'$ {basic_cleaning.unitmapping[comm]}', line).rstrip()
elif not any([forbid in comm for forbid in basic_cleaning.forbidden_contents]):
line = re.sub(r'\$(?!VELOCE_PROJ_PATH).*$', f'$ {comm}', line).rstrip()
else:
line = re.sub(r'\$(?!VELOCE_PROJ_PATH).*$', '', line).rstrip()
if not line:
continue
if re.match(r'^\s*\*.*$', line):
continue
finlines.append(line)
return finlines
def makefixedwidthcolumn(spicelines):
lastnumcol = -1
columngroup = []
result = []
mergeequalsign = re.compile(r'\s*=')
startpluswithspace = re.compile(r'^\+\s*')
extrawhitespace = re.compile(r'\s+')
for line in spicelines + ['']:
line = mergeequalsign.sub('= ', line)
line = startpluswithspace.sub('+', line)
line = extrawhitespace.sub(' ', line)
try:
columns = shlex.split(line, posix=False)
except:
columns = shlex.split(line)
for i in range(len(columns)):
columns[i] = ' '.join(columns[i].split())
dump = False
if lastnumcol == -1 and len(columns) != 0:
lastnumcol = len(columns)
columngroup.append(columns)
elif len(columns) == 0 or lastnumcol == len(columns):
columngroup.append(columns)
else:
dump = True
if dump:
columnwidths = [max([len(val) for val in [row[i] for row in columngroup if len(row) == lastnumcol]]) for i in range(lastnumcol)]
for row in columngroup:
res = ' '.join([row[i].ljust(columnwidths[i]) for i in range(lastnumcol) if len(row) == lastnumcol])
result.append(res.rstrip())
if res.startswith('endsection'):
result.append('')
if not result[-1].startswith('section'):
result.append('')
columngroup = []
lastnumcol = -1
if len(columns) > 0:
columngroup.append(columns)
lastnumcol = len(columns)
if len(columngroup) > 0:
columnwidths = [max([len(val) for val in [row[i] for row in columngroup if len(row) == lastnumcol]]) for i in range(lastnumcol)]
for row in columngroup:
res = ' '.join([row[i].ljust(columnwidths[i]) for i in range(lastnumcol) if len(row) == lastnumcol])
result.append(res.rstrip())
return result
def cleanspicefile(spicelines, filepath, newfilepath, oldlibname, newlibname, includemaps, filestoprocess=None):
cleanedspicelines = []
filedir = Path(filepath).parent
spicelines = basic_cleaning(spicelines)
removedssf = []
if Path(filepath).suffix == '.scs':
clean = False
for line in spicelines:
if line in ['section slow_slow_f', 'section fast_fast_s', 'section wafer_L442_wafer_L442']:
clean = True
elif line in ['endsection slow_slow_f', 'endsection fast_fast_s', 'endsection wafer_L442_wafer_L442']:
clean = False
elif not clean:
removedssf.append(line)
spicelines = removedssf
for line in spicelines:
match = re.match(r'^\.?inc(lude)?\s*[\"\']?(?P<includename>[^\"\']+)[\"\']?', line.strip())
if match:
try:
includecontent = match.group('includename')
except:
print(line)
raise
originalpath = os.path.abspath(filedir / includecontent)
# if originalpath == os.path.abspath(str(filepath)):
# print(f'SAME NAMED INCLUDE: {originalpath}')
# originalpath = re.sub(r'V[0-9]\.[0-9]\.[0-9]\/', '', originalpath)
# print(f'SAME NAMED INCLUDE: {originalpath}')
if not os.path.exists(originalpath) or originalpath == os.path.abspath(str(filepath)):
# XXX assuming here that s8phirs_10r needs it and s8x has it
originalpath = str(originalpath).replace('s8phirs_10r', 's8x')
if not os.path.exists(originalpath):
if 'VELOCE_PROJ_PATH' in originalpath:
# cleanedspicelines.append(line)
return -1
else:
# raise Exception(f'File not found: {originalpath}')
# XXX
fileswithoutincludes.append([filepath, line])
print(colored(str([filepath, originalpath, line]), 'red'))
return -1
else:
if 'ahdl_include' in line:
with open(originalpath, 'r') as verilogafile:
verilogalines = verilogafile.readlines()
verilogalines = basic_cleaning(verilogalines)
newverilogapath = Path(newfilepath).parent / Path(originalpath).name
verilogalines = [l.replace('*','//') for l in copyright] + verilogalines
with open(newverilogapath, 'w') as newverilogafile:
newverilogafile.write('\n'.join(verilogalines))
sourcetodests[str(originalpath)].append(str(newverilogapath))
includemaps[os.path.abspath(originalpath)] = os.path.abspath(newverilogapath)
elif Path(originalpath).suffix == '.hed':
# we do not include hed files
continue
elif filestoprocess is not None and originalpath not in filestoprocess:
# this takes care of removed one-includers
print(f'SWITCHING PATH {originalpath} to s8x')
originalpath = str(originalpath).replace('s8phirs_10r', 's8x')
if not originalpath in includemaps:
print(f'MISSING INCLUDE: {line}')
return None
relpath = Path(get_relative_path_to(newfilepath, includemaps[originalpath]))
if not os.path.exists(includemaps[originalpath]):
raise Exception('File not found {} : {}'.format(includemaps[originalpath]))
if not re.search(r'[\"\']?{}[\"\']?'.format(re.escape(includecontent)), line):
raise Exception('FAILED TO FIND {}'.format(r'[\"\']?{}[\"\']?'.format(re.escape(includecontent))))
print(line)
# line = re.sub(r'[\"\']?{}[\"\']?'.format(re.escape(includecontent)), '"' + str(relpath) + '"', line)
line = f'.include {relpath}'
cleanedspicelines.append(line)
else:
line = line.replace(oldlibname, newlibname + '_')
cleanedspicelines.append(line)
fixedcolumns = makefixedwidthcolumn(cleanedspicelines)
bracecount = 0
finlines = []
RE_MODE_LINE = re.compile(r'(?P<modeid>\d+)\s*:\s*type=\s*(?P<modetype>.*)(\n|$)')
for line in fixedcolumns:
bracecount -= line.count('}')
if not line.startswith('+') and not RE_MODE_LINE.match(line):
finlines.append(' ' * bracecount + line)
else:
finlines.append(line)
bracecount += line.count('{')
return finlines
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
'input',
help='Input JSON file containing the product of spice2mods.py script',
type=Path)
parser.add_argument(
'outputdir',
help='Directory containing cleaned Spice sources',
type=Path)
parser.add_argument(
'unassigned_list',
help='Input file with files that are not assigned to any cell',
type=Path)
parser.add_argument(
"--sourcetodests",
help="Mapping from source files to destination files",
type=Path)
parser.add_argument(
"--fileswithoutincludes",
help="Contains files with includes that are not present in the source repository",
type=Path)
parser.add_argument(
"--path-prefix-to-remove",
help="The substring that needs to be removed before generating subdirectories for Liberty files",
type=Path)
args = parser.parse_args()
with open(args.input, 'r') as infile:
celltolibs = json.load(infile)
containingcellnames = defaultdict(list)
# this is sorted so always the longest matching string will be taken
cellnames = sorted(list(celltolibs.keys()), key=lambda i: (-len(i),i))
for i, cell in enumerate(cellnames):
for cell2 in cellnames[i + 1:]:
if cell in cell2:
containingcellnames[cell].append(cell2)
elif cell2 in cell:
containingcellnames[cell2].append(cell)
toprocess = []
for cell, files in celltolibs.items():
celldir = args.outputdir / cell
# celldir.mkdir(parents=True, exist_ok=True)
for f in files:
try:
oldlibname = common.lib_extract_from_path(str(f))
except Exception as ex:
print(f)
print(ex)
raise
if not oldlibname or oldlibname == '???':
oldlibname = None
newlibname = None
else:
newlibname = common.convert_libname(oldlibname)
# newf = f.replace(oldlibname, newlibname) if oldlibname else f
ext = Path(f).suffix
newf = remap_path(f, ext, base=args.outputdir, modname=cell)
# dirprefix = str(Path(f).parent)
# if args.path_prefix_to_remove:
# dirprefix = dirprefix.replace(str(args.path_prefix_to_remove), '')
# dirprefix = dirprefix.replace(oldlibname, newlibname) if oldlibname else f
# target = findir / Path(newf).name
toprocess.append((f, cell, newf, oldlibname, newlibname, False))
newfiles = {}
repeated = False
for data in toprocess:
if data[2] in newfiles:
repeated = True
# print(f'This file will be overwritten: {(data[1], data[0])} = {newfiles[data[2]]}')
# print(f'{data[2]}')
print(f'{data[2]} : {data[0]} {newfiles[data[2]][1]}')
else:
newfiles[data[2]] = (data[1], data[0])
assert not repeated
allfilescount = len(toprocess)
numfailed = 0
errortypes = set()
filestoprocess = set()
for data in toprocess:
filestoprocess.add(os.path.abspath(data[0]))
taskqueue = deque(toprocess)
num = 0
includemaps = {}
while taskqueue:
data = taskqueue.popleft()
try:
filename, cell, newfilename, oldlibname, newlibname, done = data
newfilename = Path(newfilename)
with open(filename, 'r') as f:
spicelines = f.readlines()
cleanedfile = cleanspicefile(spicelines, filename, newfilename, oldlibname, newlibname, includemaps)
if cleanedfile:
if not os.path.isdir(newfilename.parent):
newfilename.parent.mkdir(parents=True, exist_ok=True)
with open(newfilename, 'w') as outputspice:
fin = copyright + cleanedfile
outputspice.write('\n'.join(fin))
sourcetodests[str(filename)].append(str(newfilename))
includemaps[os.path.abspath(filename)] = os.path.abspath(newfilename)
print(colored('[{:05d}/{:05d},failed={:05d}] {} : {} | {}DONE'.format(num + 1, allfilescount, numfailed, cell, filename, 'RESOLVED AND ' if done else ''), 'green'))
num += 1
else:
taskqueue.append((filename, cell, newfilename, oldlibname, newlibname, True))
print(colored('[{:05d}/{:05d},failed={:05d}] {} : {} | INCLUDES NOT RESOLVED YET'.format(num + 1, allfilescount, numfailed, cell, filename), 'yellow'))
except Exception as ex:
print(colored('[{:05d}/{:05d},failed={:05d}] {} : {} | ERROR : {}'.format(num + 1, allfilescount, numfailed, cell, filename, str(ex)), 'red'))
errortypes.add(type(ex).__name__)
num += 1
numfailed += 1
print('Error types:')
print(errortypes)
if numfailed > 0:
print('Cleaning spice files: {} out of {} failed'.format(numfailed, allfilescount), file=sys.stderr)
sys.exit(1)
if len(taskqueue) > 0:
print('Cleaning spice files: task queue is not cleared'.format(numfailed, allfilescount), file=sys.stderr)
sys.exit(1)
print('***** Processing unassigned Spice files *****')
with open(args.unassigned_list, 'r') as unassignedfile:
unassignedfiles = unassignedfile.readlines()
unassignedfiles = [f.strip() for f in unassignedfiles]
includeforms = ['.include', '.inc', 'include']
toprocess = []
index = 0
for filename in unassignedfiles:
lib, mod = common.mod_extract_from_path(str(filename))
if lib is not None:
newlib = common.convert_libname(lib)
new_path = remap_path(filename, Path(filename).suffix, base=args.outputdir, isunassigned=True)
toprocess.append([new_path, filename, newlib, lib, mod, 0, index])
index += 1
newfiles = {}
repeated = False
repcount = 0
toremove = []
for index, data in enumerate(toprocess):
if data[0] in newfiles:
# since s8phirs_10r and s8x have lots of files with the same name, they need to be specially treated
prevsource = newfiles[data[0]]
if ('s8x', 's8phirs_10r') in [(data[3], prevsource[0]), (prevsource[0], data[3])] and Path(data[1]).name == Path(prevsource[1]).name:
torename = False
numincludes = 0
print('S8X-S8PHIRS_10R conflict')
includeobj = (data[3], data[1], data[6]) if data[3] == 's8phirs_10r' else prevsource
containobj = (data[3], data[1], data[6]) if data[3] == 's8x' else prevsource
includer = data[1] if data[3] == 's8phirs_10r' else prevsource[1]
# verify if includer has only one include
with open(includer, 'r') as inc:
lines = str(inc.read()).split('\n')
for line in lines:
line = line.strip()
if not line:
continue
if line.startswith('*'):
continue
match = re.match(r'^\.?inc(lude)?\s*[\"\']?(?P<includename>[^\"\']+)[\"\']?', line)
if not match:
torename = True
break
try:
includecontent = match.group('includename')
except:
print(line)
raise
if numincludes > 0:
torename = True
break
originalpath = os.path.abspath(Path(includer).parent / includecontent)
numincludes += 1
if Path(originalpath).name == Path(data[1]).name:
continue
torename = True
break
if torename:
pathtofix = Path(toprocess[containobj[2]][0])
toprocess[containobj[2]][0] = f'{pathtofix.parent / pathtofix.stem}_base{pathtofix.suffix}'
elif numincludes == 1:
toremove.append(includeobj[2])
else:
repeated = True
print(f'This file will be overwritten: {(data[3], data[1])} = {newfiles[data[0]]} => {data[0]}')
print(f'{data[1]}')
with open(data[1], 'r') as fil:
print(colored(''.join(fil.readlines()), 'cyan'))
print(f'{newfiles[data[0]][1]}')
with open(newfiles[data[0]][1], 'r') as fil:
print(colored(''.join(fil.readlines()), 'magenta'))
repcount += 1
else:
newfiles[data[0]] = (data[3], data[1], data[6])
if len(toremove) > 0:
newtoprocess = []
for index, data in enumerate(toprocess):
if index not in toremove:
newtoprocess.append(data)
else:
print(colored(f'Omitted single-include file {data[1]}', 'magenta'))
toprocess = newtoprocess
repeated = False
newfiles = {}
repcount = 0
for data in toprocess:
if data[0] in newfiles:
repeated = True
print(f'This file will be overwritten: {(data[3], data[1])} = {newfiles[data[0]]} => {data[0]}')
print(f'{data[1]}')
with open(data[1], 'r') as fil:
print(colored(''.join(fil.readlines()), 'cyan'))
print(f'{newfiles[data[0]][1]}')
with open(newfiles[data[0]][1], 'r') as fil:
print(colored(''.join(fil.readlines()), 'magenta'))
repcount += 1
else:
newfiles[data[0]] = (data[3], data[1], data[6])
assert not repeated, f'There are {repcount} conflicts'
allfilescount = len(toprocess)
numfailed = 0
numignored = 0
num = 0
taskqueue = deque(toprocess)
failedincludes = []
for data in toprocess:
filestoprocess.add(os.path.abspath(data[1]))
while taskqueue:
new_path, filename, newlib, lib, mod, misscount, index = taskqueue.popleft()
print(filename)
try:
with open(filename, 'r') as f:
spicelines = f.readlines()
cleanedspicefile = cleanspicefile(spicelines, filename, new_path, lib, newlib, includemaps, filestoprocess)
if cleanedspicefile == -1:
print(colored('[{:05d}/{:05d},failed={:05d},ignored={:05d}] {} | DONE'.format(num + 1, allfilescount, numfailed, numignored, filename), 'blue'))
num += 1
numignored += 1
elif cleanedspicefile:
if not os.path.isdir(Path(new_path).parent):
Path(new_path).parent.mkdir(parents=True, exist_ok=True)
with open(new_path, 'w') as outputspice:
fin = copyright + cleanedspicefile
outputspice.write('\n'.join(fin))
sourcetodests[str(filename)].append(str(new_path))
includemaps[os.path.abspath(str(filename))] = os.path.abspath(str(new_path))
print(colored('[{:05d}/{:05d},failed={:05d},ignored={:05d}] {} | DONE'.format(num + 1, allfilescount, numfailed, numignored, filename), 'green'))
num += 1
elif misscount > 5:
print(colored('[{:05d}/{:05d},failed={:05d},ignored={:05d}] {} | INCLUDE FAILED'.format(num + 1, allfilescount, numfailed, numignored, filename), 'red'))
num += 1
numfailed += 1
failedincludes.append((filename,new_path))
else:
taskqueue.append((new_path, filename, newlib, lib, mod, misscount + 1, index))
print(colored('[{:05d}/{:05d},failed={:05d},ignored={:05d}] {} | INCLUDES NOT RESOLVED YET'.format(num + 1, allfilescount, numfailed, numignored, filename), 'yellow'))
except Exception as ex:
print(colored('[{:05d}/{:05d},failed={:05d},ignored={:05d}] {} | ERROR : {}'.format(num + 1, allfilescount, numfailed, numignored, filename, str(ex)), 'red'))
errortypes.add(type(ex).__name__)
num += 1
numfailed += 1
with open(args.sourcetodests, 'w') as srctodst:
json.dump(sourcetodests, srctodst, indent=2)
if len(failedincludes) > 0:
print('Cleaning spice files: {} out of {} failed'.format(numfailed, allfilescount), file=sys.stderr)
sys.exit(1)
with open(args.fileswithoutincludes, 'w') as fwi:
for line in fileswithoutincludes:
fwi.write(f'{line}\n')