| #!/usr/bin/env python3 |
| # Copyright 2020 The Skywater PDK Authors |
| # |
| # Licensed under the Apache License, Version 2.0 (the "License"); |
| # you may not use this file except in compliance with the License. |
| # You may obtain a copy of the License at |
| # |
| # https://www.apache.org/licenses/LICENSE-2.0 |
| # |
| # Unless required by applicable law or agreed to in writing, software |
| # distributed under the License is distributed on an "AS IS" BASIS, |
| # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
| # See the License for the specific language governing permissions and |
| # limitations under the License. |
| |
| import argparse |
| from pathlib import Path |
| from collections import defaultdict |
| from pprint import pprint as pp |
| from os import system |
| import re |
| |
| |
| def extract_core_strength(filename): |
| core = str(filename.parent).split('/')[-1] |
| assert core in filename.name, (core, filename.name) |
| filewithoutext = str(filename.name).replace('.full.v', '') |
| if not filewithoutext.endswith(core): |
| drivestrength = filewithoutext[::-1].split('_', 1)[0][::-1] |
| filewithoutext = filewithoutext[::-1].split('_', 1)[1][::-1] |
| return filewithoutext, drivestrength |
| else: |
| return filewithoutext, None |
| |
| if __name__ == '__main__': |
| parser = argparse.ArgumentParser() |
| |
| parser.add_argument( |
| "input_dir", |
| help="The path to the directory containing diagrams and schematics", |
| type=Path) |
| |
| parser.add_argument( |
| "--dry_run", |
| help="Do not perform actions, only print the required actions", |
| action='store_true') |
| |
| args = parser.parse_args() |
| |
| files = sorted(args.input_dir.rglob('*.full.v')) |
| |
| newfiles = [] |
| |
| for f in files: |
| if re.match(r'.*_([0-9]{1,2}|m|lp|lp2).full.v', str(f)): |
| newfiles.append(f) |
| files = newfiles |
| |
| print(len(files)) |
| |
| groupedbydir = defaultdict(lambda: defaultdict(list)) |
| for i in files: |
| filecore, drivestrength = extract_core_strength(i) |
| if drivestrength is not None: |
| groupedbydir[i.parent][filecore].append((drivestrength, i)) |
| |
| pp(groupedbydir) |
| |
| for filesdir, filegroups in groupedbydir.items(): |
| for filecore, files in filegroups.items(): |
| allsame = True |
| print(files) |
| if len(files) >= 1: |
| first_path = str(files[0][1]) |
| if first_path.find('_u_mux_2') != -1 or first_path.find('_u_mux_4_') != -1: |
| # u_mux_* cells does not follow the naming convention - last digit means outputs count |
| continue |
| print('FINDME') |
| print(files[0][0]) |
| for i, strengthfil1 in enumerate(files): |
| for strengthfil2 in files[i + 1:]: |
| with open(strengthfil1[1], 'r') as f1: |
| lines1 = f1.readlines() |
| with open(strengthfil2[1], 'r') as f2: |
| lines2 = f2.readlines() |
| |
| if len(lines1) != len(lines2): |
| # print(f'NOl : {str(strengthfil1[1])} : {str(strengthfil2[1])}') |
| allsame = False |
| print(f'Different number of lines: {len(lines1)} != {len(lines2)}') |
| print(f'Different number of lines: {strengthfil1[1]} != {strengthfil2[1]}') |
| break |
| for line1, line2 in zip(lines1, lines2): |
| line1 = line1.replace(' ', '').strip() |
| line2 = line2.replace(' ', '').strip() |
| |
| if line1.startswith('module') and line2.startswith('module'): |
| split1 = line1.split('(')[0].split('_') |
| converted = '_'.join(split1[:-1] + [strengthfil2[0]]) |
| if converted != line2.split('(')[0]: |
| # print(f'NOh : {str(strengthfil1[1])} : {str(strengthfil2[1])} {converted} {line2}') |
| print(f'Invalid module {converted} {line2}') |
| allsame = False |
| break |
| elif line1 != line2: |
| if len(line1) != len(line2): |
| print(f'Length of lines differ:\n{line1}\n{line2}') |
| allsame = False |
| break |
| if line1 != line2: |
| for c1, c2, in zip(line1, line2): |
| if c1 != c2: |
| if c1 != strengthfil1[0] and c2 != strengthfil2[0]: |
| print(f'Differ: {line1} != {line2}') |
| print(f'Differ: {strengthfil1[0]} != {c1}') |
| print(f'Differ: {strengthfil2[0]} != {c2}') |
| allsame = False |
| break |
| if not allsame: |
| break |
| |
| if not allsame: |
| break |
| if allsame: |
| corewithstrengthregex = re.compile( |
| r'(?P<full>{}_(?P<strength>([0-9]{{1,2}}|m|lp|lp2)))'.format(filecore)) |
| filestomodify = filesdir.glob(f'{filecore}*') |
| renamedtypes = set() |
| for f in sorted(filestomodify): |
| if str(f).find('u_mux_2_1.v') != -1: |
| continue |
| print(f) |
| ftype = '.'.join(str(f).split('.')[-2:]) |
| m = corewithstrengthregex.search(str(f)) |
| if m: |
| if ftype not in renamedtypes: |
| toreplace = m.group('full') |
| print(f'toreplace: {toreplace}') |
| newname = Path(str(f).replace(toreplace, filecore)) |
| if args.dry_run: |
| print(f'RENAME: {f} to {newname}') |
| else: |
| print(f'RENAME: {f} to {newname}') |
| if f.suffix == '.v': |
| with open(f, 'r') as old: |
| with open(newname, 'w') as new: |
| for line in old: |
| rline = line.replace(toreplace, filecore) |
| new.write(rline) |
| f.unlink() |
| else: |
| f.rename(newname) |
| # HACK, rewrite to python - replace all occurences of name with strength with basename |
| cells_dir = f.parents[1] |
| system(f" for x in `grep -r -l -E '{filecore}_[0-9]?' {cells_dir} --exclude=*wrap.lib --exclude=*wrap.json --exclude=*cell.lib --exclude=*cell.json`;do sed -i 's/{filecore}_[0-9] /{filecore} /g' $x; done;") |
| renamedtypes.add(ftype) |
| else: |
| if args.dry_run: |
| print(f'REMOVE: {f}') |
| else: |
| print(f'REMOVE: {f}') |
| f.unlink() |