summaryrefslogtreecommitdiff
path: root/vere/ext/nasm/asm/tokhash.pl
diff options
context:
space:
mode:
authorpolwex <polwex@sortug.com>2025-10-05 21:56:51 +0700
committerpolwex <polwex@sortug.com>2025-10-05 21:56:51 +0700
commitfcedfddf00b3f994e4f4e40332ac7fc192c63244 (patch)
tree51d38e62c7bdfcc5f9a5e9435fe820c93cfc9a3d /vere/ext/nasm/asm/tokhash.pl
claude is gud
Diffstat (limited to 'vere/ext/nasm/asm/tokhash.pl')
-rwxr-xr-xvere/ext/nasm/asm/tokhash.pl282
1 files changed, 282 insertions, 0 deletions
diff --git a/vere/ext/nasm/asm/tokhash.pl b/vere/ext/nasm/asm/tokhash.pl
new file mode 100755
index 0000000..1f19647
--- /dev/null
+++ b/vere/ext/nasm/asm/tokhash.pl
@@ -0,0 +1,282 @@
+#!/usr/bin/perl
+## --------------------------------------------------------------------------
+##
+## Copyright 1996-2020 The NASM Authors - All Rights Reserved
+## See the file AUTHORS included with the NASM distribution for
+## the specific copyright holders.
+##
+## Redistribution and use in source and binary forms, with or without
+## modification, are permitted provided that the following
+## conditions are met:
+##
+## * Redistributions of source code must retain the above copyright
+## notice, this list of conditions and the following disclaimer.
+## * Redistributions in binary form must reproduce the above
+## copyright notice, this list of conditions and the following
+## disclaimer in the documentation and/or other materials provided
+## with the distribution.
+##
+## THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
+## CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
+## INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+## MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+## DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+## CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+## SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+## NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+## LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+## HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+## CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+## OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+## EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+##
+## --------------------------------------------------------------------------
+
+#
+# Generate a perfect hash for token parsing
+#
+# Usage: tokenhash.pl insns.dat regs.dat tokens.dat
+#
+
+require 'phash.ph';
+
+my($output, $insnsn_c, $regs_dat, $tokens_dat) = @ARGV;
+
+%tokens = ();
+@tokendata = ();
+
+#
+# List of condition codes
+#
+@conditions = ('a', 'ae', 'b', 'be', 'c', 'e', 'g', 'ge', 'l', 'le',
+ 'na', 'nae', 'nb', 'nbe', 'nc', 'ne', 'ng', 'nge', 'nl',
+ 'nle', 'no', 'np', 'ns', 'nz', 'o', 'p', 'pe', 'po', 's', 'z');
+
+#
+# Read insnsn.c
+#
+open(ID, '<', $insnsn_c) or die "$0: cannot open $insnsn_c: $!\n";
+while (defined($line = <ID>)) {
+ next unless ($line =~ /^\s*\"([\?\@a-z0-9_]+)\"/);
+
+ my $token = $1;
+ next if (defined($tokens{$token})); # This should never happen
+
+ $tokens{$token} = scalar @tokendata;
+ push(@tokendata, "\"${token}\", ".length($token).
+ ", TOKEN_INSN, 0, 0, I_\U${token}");
+}
+close(ID);
+
+#
+# Read regs.dat
+#
+open(RD, '<', $regs_dat) or die "$0: cannot open $regs_dat: $!\n";
+while (defined($line = <RD>)) {
+ if ($line =~ /^([\?\@a-z0-9_-]+)\s*\S+\s*\S+\s*[0-9]+\s*(\S*)/) {
+ $reg = $1;
+ $reg_flag = $2;
+
+ if ($reg =~ /^(.*[^0-9])([0-9]+)\-([0-9]+)(|[^0-9].*)$/) {
+ $nregs = $3-$2+1;
+ $reg = $1.$2.$4;
+ $reg_nr = $2;
+ $reg_prefix = $1;
+ $reg_suffix = $4;
+ } else {
+ $nregs = 1;
+ undef $reg_prefix;
+ undef $reg_suffix;
+ }
+
+ while ($nregs--) {
+ if (defined($tokens{$reg})) {
+ die "Duplicate definition: $reg\n";
+ }
+ $tokens{$reg} = scalar @tokendata;
+ $reg_flag = '0' if ($reg_flag eq '');
+ push(@tokendata, "\"${reg}\", ".length($reg).", TOKEN_REG, 0, ${reg_flag}, R_\U${reg}\E");
+
+ if (defined($reg_prefix)) {
+ $reg_nr++;
+ $reg = sprintf("%s%u%s", $reg_prefix, $reg_nr, $reg_suffix);
+ } else {
+ # Not a dashed sequence
+ die if ($nregs);
+ }
+ }
+ }
+}
+close(RD);
+
+#
+# Read tokens.dat
+#
+open(TD, '<', $tokens_dat) or die "$0: cannot open $tokens_dat: $!\n";
+while (defined($line = <TD>)) {
+ $line =~ s/\s*(|\#.*)$//;
+ if ($line =~ /^\%\s+(.*)$/) {
+ $pattern = $1;
+ } elsif ($line =~ /^(\S+)/) {
+ $token = $1;
+
+ if (defined($tokens{$token})) {
+ die "Duplicate definition: $token\n";
+ }
+ $tokens{$token} = scalar @tokendata;
+
+ $data = $pattern;
+ if ($data =~ /^(.*)\{(.*)\}(.*)$/) {
+ my $head = $1, $tail = $3;
+ my $px = $2;
+
+ $px =~ s/\?/\\?/g;
+ $px =~ s/\*/(.*)/g;
+ if ($token =~ /$px/i) {
+ $data = $head."\U$1".$tail;
+ } else {
+ die "$0: token $token doesn't match $px\n";
+ }
+ }
+
+ $data =~ s/\*/\U$token/g;
+ $data =~ s/\?//g;
+
+ push(@tokendata, "\"$token\", ".length($token).", $data");
+ }
+}
+close(TD);
+
+$max_len = 0;
+foreach $token (keys(%tokens)) {
+ if (length($token) > $max_len) {
+ $max_len = length($token);
+ }
+}
+
+if ($output eq 'h') {
+ #
+ # tokens.h
+ #
+
+ print "/*\n";
+ print " * This file is generated from insns.dat, regs.dat and token.dat\n";
+ print " * by tokhash.pl; do not edit.\n";
+ print " */\n";
+ print "\n";
+
+ print "#ifndef NASM_TOKENS_H\n";
+ print "#define NASM_TOKENS_H\n";
+ print "\n";
+ print "#define MAX_KEYWORD $max_len /* length of longest keyword */\n";
+ print "\n";
+ print "#endif /* NASM_TOKENS_H */\n";
+} elsif ($output eq 'c') {
+ #
+ # tokhash.c
+ #
+
+ @hashinfo = gen_perfect_hash(\%tokens);
+ if (!@hashinfo) {
+ die "$0: no hash found\n";
+ }
+
+ # Paranoia...
+ verify_hash_table(\%tokens, \@hashinfo);
+
+ ($n, $sv, $g) = @hashinfo;
+ die if ($n & ($n-1));
+ $n <<= 1;
+
+ print "/*\n";
+ print " * This file is generated from insns.dat, regs.dat and token.dat\n";
+ print " * by tokhash.pl; do not edit.\n";
+ print " */\n";
+ print "\n";
+
+ print "#include \"compiler.h\"\n";
+ print "#include \"nasm.h\"\n";
+ print "#include \"hashtbl.h\"\n";
+ print "#include \"insns.h\"\n";
+ print "#include \"stdscan.h\"\n";
+ print "\n";
+
+ # These somewhat odd sizes and ordering thereof are due to the
+ # relative ranges of the types; this makes it fit in 16 bytes on
+ # 64-bit machines and 12 bytes on 32-bit machines.
+ print "struct tokendata {\n";
+ print " const char *string;\n";
+ print " uint16_t len;\n";
+ print " int16_t tokentype;\n";
+ print " int16_t aux;\n";
+ print " uint16_t tokflag;\n";
+ print " int32_t num;\n";
+ print "};\n";
+ print "\n";
+
+ print "int nasm_token_hash(const char *token, struct tokenval *tv)\n";
+ print "{\n";
+
+ # Put a large value in unused slots. This makes it extremely unlikely
+ # that any combination that involves unused slot will pass the range test.
+ # This speeds up rejection of unrecognized tokens, i.e. identifiers.
+ print "#define INVALID_HASH_ENTRY (65535/3)\n";
+
+ printf " static const int16_t hashdata[%d] = {\n", $n;
+ for ($i = 0; $i < $n; $i++) {
+ my $h = ${$g}[$i];
+ print " ", defined($h) ? $h : 'INVALID_HASH_ENTRY', ",\n";
+ }
+ print " };\n";
+
+ printf " static const struct tokendata tokendata[%d] = {\n",
+ scalar(@tokendata);
+ foreach $d (@tokendata) {
+ print " { ", $d, " },\n";
+ }
+ print " };\n";
+
+ print " uint32_t k1, k2;\n";
+ # For correct overflow behavior, "ix" should be unsigned of the same
+ # width as the hash arrays.
+ print " uint16_t ix;\n";
+ print " const struct tokendata *data;\n";
+ printf " char lcbuf[%d];\n", $max_len+1;
+ print " const char *p = token;\n";
+ print " char c, *q = lcbuf;\n";
+ print " size_t len = 0;\n";
+ printf " uint64_t crc = UINT64_C(0x%08x%08x);\n", $$sv[0], $$sv[1];
+ print "\n";
+ print " while ((c = *p++)) {\n";
+ printf " if (++len > %d)\n", $max_len;
+ print " goto notfound;\n";
+ print " *q++ = c = nasm_tolower(c);\n";
+ print " crc = crc64_byte(crc, c);\n";
+ print " };\n";
+ print "\n";
+ printf " k1 = ((uint32_t)crc & 0x%x) + 0;\n", $n-2;
+ printf " k2 = ((uint32_t)(crc >> 32) & 0x%x) + 1;\n", $n-2;
+ print "\n";
+ printf " ix = hashdata[k1] + hashdata[k2];\n",
+ $n-2, $n-2;
+ printf " if (ix >= %d)\n", scalar(@tokendata);
+ print " goto notfound;\n";
+ print "\n";
+ print " data = &tokendata[ix];\n";
+ print " if (data->len != len)\n";
+ print " goto notfound;\n";
+ print " if (memcmp(data->string, lcbuf, len))\n";
+ print " goto notfound;\n";
+ print "\n";
+ print " tv->t_integer = data->num;\n";
+ print " tv->t_inttwo = data->aux;\n";
+ print " tv->t_flag = data->tokflag;\n";
+ print " return tv->t_type = data->tokentype;\n";
+ print "\n";
+ print "notfound:\n";
+ print " tv->t_integer = 0;\n";
+ print " tv->t_inttwo = 0;\n";
+ print " tv->t_flag = 0;\n";
+ print " return tv->t_type = TOKEN_ID;\n";
+ print "}\n";
+}