X-Git-Url: https://jxself.org/git/?p=inform.git;a=blobdiff_plain;f=verbs.c;fp=verbs.c;h=0000000000000000000000000000000000000000;hp=1116e096f4c7cada59d97ef9c5e929882a0a4b31;hb=81ffe9a7de1db0b3a318a053b38882d1b7ab304c;hpb=d1090135a32de7b38b48c55d4e21f95da4c405bc diff --git a/verbs.c b/verbs.c deleted file mode 100644 index 1116e09..0000000 --- a/verbs.c +++ /dev/null @@ -1,944 +0,0 @@ -/* ------------------------------------------------------------------------- */ -/* "verbs" : Manages actions and grammar tables; parses the directives */ -/* Verb and Extend. */ -/* */ -/* Copyright (c) Graham Nelson 1993 - 2018 */ -/* */ -/* This file is part of Inform. */ -/* */ -/* Inform is free software: you can redistribute it and/or modify */ -/* it under the terms of the GNU General Public License as published by */ -/* the Free Software Foundation, either version 3 of the License, or */ -/* (at your option) any later version. */ -/* */ -/* Inform is distributed in the hope that it will be useful, */ -/* but WITHOUT ANY WARRANTY; without even the implied warranty of */ -/* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the */ -/* GNU General Public License for more details. */ -/* */ -/* You should have received a copy of the GNU General Public License */ -/* along with Inform. If not, see https://gnu.org/licenses/ */ -/* */ -/* ------------------------------------------------------------------------- */ - -#include "header.h" - -int grammar_version_number; /* 1 for pre-Inform 6.06 table format */ -int32 grammar_version_symbol; /* Index of "Grammar__Version" - within symbols table */ - -/* ------------------------------------------------------------------------- */ -/* Actions. */ -/* ------------------------------------------------------------------------- */ -/* Array defined below: */ -/* */ -/* int32 action_byte_offset[n] The (byte) offset in the Z-machine */ -/* code area of the ...Sub routine */ -/* for action n. (NB: This is left */ -/* blank until the end of the */ -/* compilation pass.) */ -/* int32 action_symbol[n] The symbol table index of the n-th */ -/* action's name. */ -/* ------------------------------------------------------------------------- */ - -int no_actions, /* Number of actions made so far */ - no_fake_actions; /* Number of fake actions made so far */ - -/* ------------------------------------------------------------------------- */ -/* Adjectives. (The term "adjective" is traditional; they are mainly */ -/* prepositions, such as "onto".) */ -/* ------------------------------------------------------------------------- */ -/* Arrays defined below: */ -/* */ -/* int32 adjectives[n] Byte address of dictionary entry */ -/* for the nth adjective */ -/* dict_word adjective_sort_code[n] Dictionary sort code of nth adj */ -/* ------------------------------------------------------------------------- */ - -int no_adjectives; /* Number of adjectives made so far */ - -/* ------------------------------------------------------------------------- */ -/* Verbs. Note that Inform-verbs are not quite the same as English verbs: */ -/* for example the English verbs "take" and "drop" both normally */ -/* correspond in a game's dictionary to the same Inform verb. An */ -/* Inform verb is essentially a list of grammar lines. */ -/* ------------------------------------------------------------------------- */ -/* Arrays defined below: */ -/* */ -/* verbt Inform_verbs[n] The n-th grammar line sequence: */ -/* see "header.h" for the definition */ -/* of the typedef struct verbt */ -/* int32 grammar_token_routine[n] The byte offset from start of code */ -/* area of the n-th one */ -/* ------------------------------------------------------------------------- */ - -int no_Inform_verbs, /* Number of Inform-verbs made so far */ - no_grammar_token_routines; /* Number of routines given in tokens */ - -/* ------------------------------------------------------------------------- */ -/* We keep a list of English verb-words known (e.g. "take" or "eat") and */ -/* which Inform-verbs they correspond to. (This list is needed for some */ -/* of the grammar extension operations.) */ -/* The format of this list is a sequence of variable-length records: */ -/* */ -/* Byte offset to start of next record (1 byte) */ -/* Inform verb number this word corresponds to (1 byte) */ -/* The English verb-word (reduced to lower case), null-terminated */ -/* ------------------------------------------------------------------------- */ - -static char *English_verb_list, /* First byte of first record */ - *English_verb_list_top; /* Next byte free for new record */ - -static int English_verb_list_size; /* Size of the list in bytes - (redundant but convenient) */ - -/* ------------------------------------------------------------------------- */ -/* Arrays used by this file */ -/* ------------------------------------------------------------------------- */ - - verbt *Inform_verbs; - uchar *grammar_lines; - int32 grammar_lines_top; - int no_grammar_lines, no_grammar_tokens; - - int32 *action_byte_offset, - *action_symbol, - *grammar_token_routine, - *adjectives; - static uchar *adjective_sort_code; - -/* ------------------------------------------------------------------------- */ -/* Tracing for compiler maintenance */ -/* ------------------------------------------------------------------------- */ - -extern void list_verb_table(void) -{ int i; - for (i=0; i"); - debug_file_printf("##%s", token_text); - debug_file_printf("%d", svals[i]); - get_next_token(); - write_debug_locations - (get_token_location_end(beginning_debug_location)); - put_token_back(); - debug_file_printf(""); - } - - return; -} - -extern assembly_operand action_of_name(char *name) -{ - /* Returns the action number of the given name, creating it as a new - action name if it isn't already known as such. */ - - char action_sub[MAX_IDENTIFIER_LENGTH+4]; - int j; - assembly_operand AO; - - snprintf(action_sub, MAX_IDENTIFIER_LENGTH+4, "%s__A", name); - j = symbol_index(action_sub, -1); - - if (stypes[j] == FAKE_ACTION_T) - { INITAO(&AO); - AO.value = svals[j]; - if (!glulx_mode) - AO.type = LONG_CONSTANT_OT; - else - set_constant_ot(&AO); - sflags[j] |= USED_SFLAG; - return AO; - } - - if (sflags[j] & UNKNOWN_SFLAG) - { - if (no_actions>=MAX_ACTIONS) memoryerror("MAX_ACTIONS",MAX_ACTIONS); - new_action(name, no_actions); - action_symbol[no_actions] = j; - assign_symbol(j, no_actions++, CONSTANT_T); - sflags[j] |= ACTION_SFLAG; - } - sflags[j] |= USED_SFLAG; - - INITAO(&AO); - AO.value = svals[j]; - AO.marker = ACTION_MV; - if (!glulx_mode) { - AO.type = (module_switch)?LONG_CONSTANT_OT:SHORT_CONSTANT_OT; - if (svals[j] >= 256) AO.type = LONG_CONSTANT_OT; - } - else { - AO.type = CONSTANT_OT; - } - return AO; -} - -extern void find_the_actions(void) -{ int i; int32 j; - char action_name[MAX_IDENTIFIER_LENGTH+4]; - char action_sub[MAX_IDENTIFIER_LENGTH+4]; - - if (module_switch) - for (i=0; i= MAX_ADJECTIVES) - memoryerror("MAX_ADJECTIVES", MAX_ADJECTIVES); - - dictionary_prepare(English_word, new_sort_code); - for (i=0; i MAX_VERB_WORD_SIZE+4) - error_numbered("Verb word is too long -- max length is", MAX_VERB_WORD_SIZE); - English_verb_list_size += entrysize; - if (English_verb_list_size >= MAX_VERBSPACE) - memoryerror("MAX_VERBSPACE", MAX_VERBSPACE); - - English_verb_list_top[0] = entrysize; - English_verb_list_top[1] = number/256; - English_verb_list_top[2] = number%256; - strcpy(English_verb_list_top+3, English_verb); - English_verb_list_top += entrysize; -} - -static int get_verb(void) -{ - /* Look at the last-read token: if it's the name of an English verb - understood by Inform, in double-quotes, then return the Inform-verb - that word refers to: otherwise give an error and return -1. */ - - int j; - - if ((token_type == DQ_TT) || (token_type == SQ_TT)) - { j = find_or_renumber_verb(token_text, NULL); - if (j==-1) - error_named("There is no previous grammar for the verb", - token_text); - return j; - } - - ebf_error("an English verb in quotes", token_text); - - return -1; -} - -/* ------------------------------------------------------------------------- */ -/* Grammar lines for Verb/Extend directives. */ -/* ------------------------------------------------------------------------- */ - -static int grammar_line(int verbnum, int line) -{ - /* Parse a grammar line, to be written into grammar_lines[mark] onward. - - Syntax: * ... -> - - is compiled to a table in the form: - - - ... - - where is the byte 15, and each is 3 bytes long. - - If grammar_version_number is 1, the token holds - - 00 00 - - and otherwise a GV2 token. - - Return TRUE if grammar continues after the line, FALSE if the - directive comes to an end. */ - - int j, bytecode, mark; int32 wordcode; - int grammar_token, slash_mode, last_was_slash; - int reverse_action, TOKEN_SIZE; - debug_location_beginning beginning_debug_location = - get_token_location_beginning(); - - get_next_token(); - if ((token_type == SEP_TT) && (token_value == SEMICOLON_SEP)) - { discard_token_location(beginning_debug_location); - return FALSE; - } - if (!((token_type == SEP_TT) && (token_value == TIMES_SEP))) - { discard_token_location(beginning_debug_location); - ebf_error("'*' divider", token_text); - panic_mode_error_recovery(); - return FALSE; - } - - /* Have we run out of lines or token space? */ - - if (line >= MAX_LINES_PER_VERB) - { discard_token_location(beginning_debug_location); - error("Too many lines of grammar for verb. This maximum is built \ -into Inform, so suggest rewriting grammar using general parsing routines"); - return(FALSE); - } - - /* Internally, a line can be up to 3*32 + 1 + 2 = 99 bytes long */ - /* In Glulx, that's 5*32 + 4 = 164 bytes */ - - mark = grammar_lines_top; - if (!glulx_mode) { - if (mark + 100 >= MAX_LINESPACE) - { discard_token_location(beginning_debug_location); - memoryerror("MAX_LINESPACE", MAX_LINESPACE); - } - } - else { - if (mark + 165 >= MAX_LINESPACE) - { discard_token_location(beginning_debug_location); - memoryerror("MAX_LINESPACE", MAX_LINESPACE); - } - } - - Inform_verbs[verbnum].l[line] = mark; - - if (!glulx_mode) { - mark = mark + 2; - TOKEN_SIZE = 3; - } - else { - mark = mark + 3; - TOKEN_SIZE = 5; - } - - grammar_token = 0; last_was_slash = TRUE; slash_mode = FALSE; - no_grammar_lines++; - - do - { get_next_token(); - bytecode = 0; wordcode = 0; - if ((token_type == SEP_TT) && (token_value == SEMICOLON_SEP)) - { discard_token_location(beginning_debug_location); - ebf_error("'->' clause", token_text); - return FALSE; - } - if ((token_type == SEP_TT) && (token_value == ARROW_SEP)) - { if (last_was_slash && (grammar_token>0)) - ebf_error("grammar token", token_text); - break; - } - - if (!last_was_slash) slash_mode = FALSE; - if ((token_type == SEP_TT) && (token_value == DIVIDE_SEP)) - { if (grammar_version_number == 1) - error("'/' can only be used with Library 6/3 or later"); - if (last_was_slash) - ebf_error("grammar token or '->'", token_text); - else - { last_was_slash = TRUE; - slash_mode = TRUE; - if (((grammar_lines[mark-TOKEN_SIZE]) & 0x0f) != 2) - error("'/' can only be applied to prepositions"); - grammar_lines[mark-TOKEN_SIZE] |= 0x20; - continue; - } - } - else last_was_slash = FALSE; - - if ((token_type == DQ_TT) || (token_type == SQ_TT)) - { if (grammar_version_number == 1) - bytecode = make_adjective(token_text); - else - { bytecode = 0x42; - wordcode = dictionary_add(token_text, 8, 0, 0); - } - } - else if ((token_type==DIR_KEYWORD_TT)&&(token_value==NOUN_DK)) - { get_next_token(); - if ((token_type == SEP_TT) && (token_value == SETEQUALS_SEP)) - { - /* noun = */ - - get_next_token(); - if ((token_type != SYMBOL_TT) - || (stypes[token_value] != ROUTINE_T)) - { discard_token_location(beginning_debug_location); - ebf_error("routine name after 'noun='", token_text); - panic_mode_error_recovery(); - return FALSE; - } - if (grammar_version_number == 1) - bytecode - = 16 + make_parsing_routine(svals[token_value]); - else - { bytecode = 0x83; - wordcode = svals[token_value]; - } - sflags[token_value] |= USED_SFLAG; - } - else - { put_token_back(); - if (grammar_version_number == 1) bytecode=0; - else { bytecode = 1; wordcode = 0; } - } - } - else if ((token_type==DIR_KEYWORD_TT)&&(token_value==HELD_DK)) - { if (grammar_version_number==1) bytecode=1; - else { bytecode=1; wordcode=1; } } - else if ((token_type==DIR_KEYWORD_TT)&&(token_value==MULTI_DK)) - { if (grammar_version_number==1) bytecode=2; - else { bytecode=1; wordcode=2; } } - else if ((token_type==DIR_KEYWORD_TT)&&(token_value==MULTIHELD_DK)) - { if (grammar_version_number==1) bytecode=3; - else { bytecode=1; wordcode=3; } } - else if ((token_type==DIR_KEYWORD_TT)&&(token_value==MULTIEXCEPT_DK)) - { if (grammar_version_number==1) bytecode=4; - else { bytecode=1; wordcode=4; } } - else if ((token_type==DIR_KEYWORD_TT)&&(token_value==MULTIINSIDE_DK)) - { if (grammar_version_number==1) bytecode=5; - else { bytecode=1; wordcode=5; } } - else if ((token_type==DIR_KEYWORD_TT)&&(token_value==CREATURE_DK)) - { if (grammar_version_number==1) bytecode=6; - else { bytecode=1; wordcode=6; } } - else if ((token_type==DIR_KEYWORD_TT)&&(token_value==SPECIAL_DK)) - { if (grammar_version_number==1) bytecode=7; - else { bytecode=1; wordcode=7; } } - else if ((token_type==DIR_KEYWORD_TT)&&(token_value==NUMBER_DK)) - { if (grammar_version_number==1) bytecode=8; - else { bytecode=1; wordcode=8; } } - else if ((token_type==DIR_KEYWORD_TT)&&(token_value==TOPIC_DK)) - { if (grammar_version_number==1) - error("The 'topic' token is only available if you \ -are using Library 6/3 or later"); - else { bytecode=1; wordcode=9; } } - else if ((token_type==DIR_KEYWORD_TT)&&(token_value==SCOPE_DK)) - { - /* scope = */ - - get_next_token(); - if (!((token_type==SEP_TT)&&(token_value==SETEQUALS_SEP))) - { discard_token_location(beginning_debug_location); - ebf_error("'=' after 'scope'", token_text); - panic_mode_error_recovery(); - return FALSE; - } - - get_next_token(); - if ((token_type != SYMBOL_TT) - || (stypes[token_value] != ROUTINE_T)) - { discard_token_location(beginning_debug_location); - ebf_error("routine name after 'scope='", token_text); - panic_mode_error_recovery(); - return FALSE; - } - - if (grammar_version_number == 1) - bytecode = 80 + - make_parsing_routine(svals[token_value]); - else { bytecode = 0x85; wordcode = svals[token_value]; } - sflags[token_value] |= USED_SFLAG; - } - else if ((token_type == SEP_TT) && (token_value == SETEQUALS_SEP)) - { discard_token_location(beginning_debug_location); - error("'=' is only legal here as 'noun=Routine'"); - panic_mode_error_recovery(); - return FALSE; - } - else { /* or tokens */ - - if ((token_type != SYMBOL_TT) - || ((stypes[token_value] != ATTRIBUTE_T) - && (stypes[token_value] != ROUTINE_T))) - { discard_token_location(beginning_debug_location); - error_named("No such grammar token as", token_text); - panic_mode_error_recovery(); - return FALSE; - } - if (stypes[token_value]==ATTRIBUTE_T) - { if (grammar_version_number == 1) - bytecode = 128 + svals[token_value]; - else { bytecode = 4; wordcode = svals[token_value]; } - } - else - { if (grammar_version_number == 1) - bytecode = 48 + - make_parsing_routine(svals[token_value]); - else { bytecode = 0x86; wordcode = svals[token_value]; } - } - sflags[token_value] |= USED_SFLAG; - } - - grammar_token++; no_grammar_tokens++; - if ((grammar_version_number == 1) && (grammar_token > 6)) - { if (grammar_token == 7) - warning("Grammar line cut short: you can only have up to 6 \ -tokens in any line (unless you're compiling with library 6/3 or later)"); - } - else - { if (slash_mode) - { if (bytecode != 0x42) - error("'/' can only be applied to prepositions"); - bytecode |= 0x10; - } - grammar_lines[mark++] = bytecode; - if (!glulx_mode) { - grammar_lines[mark++] = wordcode/256; - grammar_lines[mark++] = wordcode%256; - } - else { - grammar_lines[mark++] = ((wordcode >> 24) & 0xFF); - grammar_lines[mark++] = ((wordcode >> 16) & 0xFF); - grammar_lines[mark++] = ((wordcode >> 8) & 0xFF); - grammar_lines[mark++] = ((wordcode) & 0xFF); - } - } - - } while (TRUE); - - grammar_lines[mark++] = 15; - grammar_lines_top = mark; - - dont_enter_into_symbol_table = TRUE; - get_next_token(); - dont_enter_into_symbol_table = FALSE; - - if (token_type != DQ_TT) - { discard_token_location(beginning_debug_location); - ebf_error("name of new or existing action", token_text); - panic_mode_error_recovery(); - return FALSE; - } - - { assembly_operand AO = action_of_name(token_text); - j = AO.value; - if (j >= ((grammar_version_number==1)?256:4096)) - error_named("This is a fake action, not a real one:", token_text); - } - - reverse_action = FALSE; - get_next_token(); - if ((token_type == DIR_KEYWORD_TT) && (token_value == REVERSE_DK)) - { if (grammar_version_number == 1) - error("'reverse' actions can only be used with \ -Library 6/3 or later"); - reverse_action = TRUE; - } - else put_token_back(); - - mark = Inform_verbs[verbnum].l[line]; - - if (debugfile_switch) - { debug_file_printf(""); - debug_file_printf("grammar line"); - debug_file_printf("
"); - write_debug_grammar_backpatch(mark); - debug_file_printf("
"); - debug_file_printf(""); - write_debug_grammar_backpatch(grammar_lines_top); - debug_file_printf(""); - write_debug_locations - (get_token_location_end(beginning_debug_location)); - debug_file_printf("
"); - } - - if (!glulx_mode) { - if (reverse_action) - j = j + 0x400; - grammar_lines[mark++] = j/256; - grammar_lines[mark++] = j%256; - } - else { - grammar_lines[mark++] = ((j >> 8) & 0xFF); - grammar_lines[mark++] = ((j) & 0xFF); - grammar_lines[mark++] = (reverse_action ? 1 : 0); - } - - return TRUE; -} - -/* ------------------------------------------------------------------------- */ -/* The Verb directive: */ -/* */ -/* Verb [meta] "word-1" ... "word-n" | = "existing-English-verb" */ -/* | ... */ -/* */ -/* ------------------------------------------------------------------------- */ - -extern void make_verb(void) -{ - /* Parse an entire Verb ... directive. */ - - int Inform_verb, meta_verb_flag=FALSE, verb_equals_form=FALSE; - - char *English_verbs_given[32]; int no_given = 0, i; - - directive_keywords.enabled = TRUE; - - get_next_token(); - - if ((token_type == DIR_KEYWORD_TT) && (token_value == META_DK)) - { meta_verb_flag = TRUE; - get_next_token(); - } - - while ((token_type == DQ_TT) || (token_type == SQ_TT)) - { English_verbs_given[no_given++] = token_text; - get_next_token(); - } - - if (no_given == 0) - { ebf_error("English verb in quotes", token_text); - panic_mode_error_recovery(); return; - } - - if ((token_type == SEP_TT) && (token_value == SETEQUALS_SEP)) - { verb_equals_form = TRUE; - get_next_token(); - Inform_verb = get_verb(); - if (Inform_verb == -1) return; - get_next_token(); - if (!((token_type == SEP_TT) && (token_value == SEMICOLON_SEP))) - ebf_error("';' after English verb", token_text); - } - else - { Inform_verb = no_Inform_verbs; - if (no_Inform_verbs == MAX_VERBS) - memoryerror("MAX_VERBS",MAX_VERBS); - } - - for (i=0; i */ -/* | "verb" | "replace" */ -/* | "first" */ -/* | "last" */ -/* */ -/* ------------------------------------------------------------------------- */ - -#define EXTEND_REPLACE 1 -#define EXTEND_FIRST 2 -#define EXTEND_LAST 3 - -extern void extend_verb(void) -{ - /* Parse an entire Extend ... directive. */ - - int Inform_verb = -1, k, l, lines, extend_mode; - - directive_keywords.enabled = TRUE; - directives.enabled = FALSE; - - get_next_token(); - if ((token_type == DIR_KEYWORD_TT) && (token_value == ONLY_DK)) - { l = -1; - if (no_Inform_verbs == MAX_VERBS) - memoryerror("MAX_VERBS", MAX_VERBS); - while (get_next_token(), - ((token_type == DQ_TT) || (token_type == SQ_TT))) - { Inform_verb = get_verb(); - if (Inform_verb == -1) return; - if ((l!=-1) && (Inform_verb!=l)) - warning_named("Verb disagrees with previous verbs:", token_text); - l = Inform_verb; - dictionary_set_verb_number(token_text, - (glulx_mode)?(0xffff-no_Inform_verbs):(0xff-no_Inform_verbs)); - /* make call to renumber verb in English_verb_list too */ - if (find_or_renumber_verb(token_text, &no_Inform_verbs) == -1) - warning_named("Verb to extend not found in English_verb_list:", - token_text); - } - - /* Copy the old Inform-verb into a new one which the list of - English-verbs given have had their dictionary entries modified - to point to */ - - Inform_verbs[no_Inform_verbs] = Inform_verbs[Inform_verb]; - Inform_verb = no_Inform_verbs++; - } - else - { Inform_verb = get_verb(); - if (Inform_verb == -1) return; - get_next_token(); - } - - /* Inform_verb now contains the number of the Inform-verb to extend... */ - - extend_mode = EXTEND_LAST; - if ((token_type == SEP_TT) && (token_value == TIMES_SEP)) - put_token_back(); - else - { extend_mode = 0; - if ((token_type == DIR_KEYWORD_TT) && (token_value == REPLACE_DK)) - extend_mode = EXTEND_REPLACE; - if ((token_type == DIR_KEYWORD_TT) && (token_value == FIRST_DK)) - extend_mode = EXTEND_FIRST; - if ((token_type == DIR_KEYWORD_TT) && (token_value == LAST_DK)) - extend_mode = EXTEND_LAST; - - if (extend_mode==0) - { ebf_error("'replace', 'last', 'first' or '*'", token_text); - extend_mode = EXTEND_LAST; - } - } - - l = Inform_verbs[Inform_verb].lines; - lines = 0; - if (extend_mode == EXTEND_LAST) lines=l; - do - { if (extend_mode == EXTEND_FIRST) - for (k=l; k>0; k--) - Inform_verbs[Inform_verb].l[k+lines] - = Inform_verbs[Inform_verb].l[k-1+lines]; - } while (grammar_line(Inform_verb, lines++)); - - if (extend_mode == EXTEND_FIRST) - { Inform_verbs[Inform_verb].lines = l+lines-1; - for (k=0; k