1 /* ------------------------------------------------------------------------- */
2 /* "verbs" : Manages actions and grammar tables; parses the directives */
5 /* Part of Inform 6.35 */
6 /* copyright (c) Graham Nelson 1993 - 2021 */
8 /* Inform is free software: you can redistribute it and/or modify */
9 /* it under the terms of the GNU General Public License as published by */
10 /* the Free Software Foundation, either version 3 of the License, or */
11 /* (at your option) any later version. */
13 /* Inform is distributed in the hope that it will be useful, */
14 /* but WITHOUT ANY WARRANTY; without even the implied warranty of */
15 /* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the */
16 /* GNU General Public License for more details. */
18 /* You should have received a copy of the GNU General Public License */
19 /* along with Inform. If not, see https://gnu.org/licenses/ */
21 /* ------------------------------------------------------------------------- */
25 int grammar_version_number; /* 1 for pre-Inform 6.06 table format */
26 int32 grammar_version_symbol; /* Index of "Grammar__Version"
27 within symbols table */
29 /* ------------------------------------------------------------------------- */
31 /* ------------------------------------------------------------------------- */
32 /* Array defined below: */
34 /* int32 action_byte_offset[n] The (byte) offset in the Z-machine */
35 /* code area of the ...Sub routine */
36 /* for action n. (NB: This is left */
37 /* blank until the end of the */
38 /* compilation pass.) */
39 /* int32 action_symbol[n] The symbol table index of the n-th */
41 /* ------------------------------------------------------------------------- */
43 int no_actions, /* Number of actions made so far */
44 no_fake_actions; /* Number of fake actions made so far */
46 /* ------------------------------------------------------------------------- */
47 /* Adjectives. (The term "adjective" is traditional; they are mainly */
48 /* prepositions, such as "onto".) */
49 /* ------------------------------------------------------------------------- */
50 /* Arrays defined below: */
52 /* int32 adjectives[n] Byte address of dictionary entry */
53 /* for the nth adjective */
54 /* dict_word adjective_sort_code[n] Dictionary sort code of nth adj */
55 /* ------------------------------------------------------------------------- */
57 int no_adjectives; /* Number of adjectives made so far */
59 /* ------------------------------------------------------------------------- */
60 /* Verbs. Note that Inform-verbs are not quite the same as English verbs: */
61 /* for example the English verbs "take" and "drop" both normally */
62 /* correspond in a game's dictionary to the same Inform verb. An */
63 /* Inform verb is essentially a list of grammar lines. */
64 /* (Calling them "English verbs" is of course out of date. Read */
65 /* this as jargon for "dict words which are verbs". */
66 /* ------------------------------------------------------------------------- */
67 /* Arrays defined below: */
69 /* verbt Inform_verbs[n] The n-th grammar line sequence: */
70 /* see "header.h" for the definition */
71 /* of the typedef struct verbt */
72 /* int32 grammar_token_routine[n] The byte offset from start of code */
73 /* area of the n-th one */
74 /* ------------------------------------------------------------------------- */
76 int no_Inform_verbs, /* Number of Inform-verbs made so far */
77 no_grammar_token_routines; /* Number of routines given in tokens */
79 /* ------------------------------------------------------------------------- */
80 /* We keep a list of English verb-words known (e.g. "take" or "eat") and */
81 /* which Inform-verbs they correspond to. (This list is needed for some */
82 /* of the grammar extension operations.) */
83 /* The format of this list is a sequence of variable-length records: */
85 /* Byte offset to start of next record (1 byte) */
86 /* Inform verb number this word corresponds to (1 byte) */
87 /* The English verb-word (reduced to lower case), null-terminated */
88 /* ------------------------------------------------------------------------- */
90 static char *English_verb_list, /* First byte of first record */
91 *English_verb_list_top; /* Next byte free for new record */
93 static int English_verb_list_size; /* Size of the list in bytes
94 (redundant but convenient) */
96 /* Maximum synonyms in a single Verb/Extend directive */
97 #define MAX_VERB_SYNONYMS (32)
99 /* ------------------------------------------------------------------------- */
100 /* Arrays used by this file */
101 /* ------------------------------------------------------------------------- */
104 uchar *grammar_lines;
105 int32 grammar_lines_top;
106 int no_grammar_lines, no_grammar_tokens;
108 int32 *action_byte_offset,
110 *grammar_token_routine,
112 static uchar *adjective_sort_code;
114 /* ------------------------------------------------------------------------- */
115 /* Tracing for compiler maintenance */
116 /* ------------------------------------------------------------------------- */
118 extern void list_verb_table(void)
120 for (i=0; i<no_Inform_verbs; i++)
121 printf("Verb %2d has %d lines\n", i, Inform_verbs[i].lines);
124 /* ------------------------------------------------------------------------- */
126 /* ------------------------------------------------------------------------- */
128 static void new_action(char *b, int c)
130 /* Called whenever a new action (or fake action) is created (either
131 by using make_action above, or the Fake_Action directive, or by
132 the linker). At present just a hook for some tracing code. */
134 if (printprops_switch)
135 printf("Action '%s' is numbered %d\n",b,c);
138 /* Note that fake actions are numbered from a high base point upwards;
139 real actions are numbered from 0 upward in GV2. */
141 extern void make_fake_action(void)
143 char action_sub[MAX_IDENTIFIER_LENGTH+4];
144 debug_location_beginning beginning_debug_location =
145 get_token_location_beginning();
148 if (token_type != SYMBOL_TT)
149 { discard_token_location(beginning_debug_location);
150 ebf_error("new fake action name", token_text);
151 panic_mode_error_recovery(); return;
153 /* Action symbols (including fake_actions) may collide with other kinds of symbols. So we don't check that. */
155 snprintf(action_sub, MAX_IDENTIFIER_LENGTH+4, "%s__A", token_text);
156 i = symbol_index(action_sub, -1);
158 if (!(sflags[i] & UNKNOWN_SFLAG))
159 { discard_token_location(beginning_debug_location);
160 /* The user didn't know they were defining FOO__A, but they were and it's a problem. */
161 ebf_symbol_error("new fake action name", action_sub, typename(stypes[i]), slines[i]);
162 panic_mode_error_recovery(); return;
165 assign_symbol(i, ((grammar_version_number==1)?256:4096)+no_fake_actions++,
168 new_action(token_text, i);
170 if (debugfile_switch)
171 { debug_file_printf("<fake-action>");
172 debug_file_printf("<identifier>##%s</identifier>", token_text);
173 debug_file_printf("<value>%d</value>", svals[i]);
175 write_debug_locations
176 (get_token_location_end(beginning_debug_location));
178 debug_file_printf("</fake-action>");
184 extern assembly_operand action_of_name(char *name)
186 /* Returns the action number of the given name, creating it as a new
187 action name if it isn't already known as such. */
189 char action_sub[MAX_IDENTIFIER_LENGTH+4];
193 snprintf(action_sub, MAX_IDENTIFIER_LENGTH+4, "%s__A", name);
194 j = symbol_index(action_sub, -1);
196 if (stypes[j] == FAKE_ACTION_T)
200 AO.type = LONG_CONSTANT_OT;
202 set_constant_ot(&AO);
203 sflags[j] |= USED_SFLAG;
207 if (sflags[j] & UNKNOWN_SFLAG)
209 if (no_actions>=MAX_ACTIONS) memoryerror("MAX_ACTIONS",MAX_ACTIONS);
210 new_action(name, no_actions);
211 action_symbol[no_actions] = j;
212 assign_symbol(j, no_actions++, CONSTANT_T);
213 sflags[j] |= ACTION_SFLAG;
215 sflags[j] |= USED_SFLAG;
219 AO.marker = ACTION_MV;
221 AO.type = (module_switch)?LONG_CONSTANT_OT:SHORT_CONSTANT_OT;
222 if (svals[j] >= 256) AO.type = LONG_CONSTANT_OT;
225 AO.type = CONSTANT_OT;
230 extern void find_the_actions(void)
232 char action_name[MAX_IDENTIFIER_LENGTH+4];
233 char action_sub[MAX_IDENTIFIER_LENGTH+4];
236 for (i=0; i<no_actions; i++) action_byte_offset[i] = 0;
238 for (i=0; i<no_actions; i++)
239 { strcpy(action_name, (char *) symbs[action_symbol[i]]);
240 action_name[strlen(action_name) - 3] = '\0'; /* remove "__A" */
241 strcpy(action_sub, action_name);
242 strcat(action_sub, "Sub");
243 j = symbol_index(action_sub, -1);
244 if (sflags[j] & UNKNOWN_SFLAG)
246 error_named_at("No ...Sub action routine found for action:", action_name, slines[action_symbol[i]]);
249 if (stypes[j] != ROUTINE_T)
251 error_named_at("No ...Sub action routine found for action:", action_name, slines[action_symbol[i]]);
252 error_named_at("-- ...Sub symbol found, but not a routine:", action_sub, slines[j]);
255 { action_byte_offset[i] = svals[j];
256 sflags[j] |= USED_SFLAG;
261 /* ------------------------------------------------------------------------- */
263 /* ------------------------------------------------------------------------- */
265 static int make_adjective(char *English_word)
267 /* Returns adjective number of the English word supplied, creating
268 a new adjective number if need be.
270 Note that (partly for historical reasons) adjectives are numbered
271 from 0xff downwards. (And partly to make them stand out as tokens.)
273 This routine is used only in grammar version 1: the corresponding
274 table is left empty in GV2. */
277 uchar new_sort_code[MAX_DICT_WORD_BYTES];
279 if (no_adjectives >= MAX_ADJECTIVES)
280 memoryerror("MAX_ADJECTIVES", MAX_ADJECTIVES);
282 dictionary_prepare(English_word, new_sort_code);
283 for (i=0; i<no_adjectives; i++)
284 if (compare_sorts(new_sort_code,
285 adjective_sort_code+i*DICT_WORD_BYTES) == 0)
287 adjectives[no_adjectives]
288 = dictionary_add(English_word,8,0,0xff-no_adjectives);
289 copy_sorts(adjective_sort_code+no_adjectives*DICT_WORD_BYTES,
291 return(0xff-no_adjectives++);
294 /* ------------------------------------------------------------------------- */
295 /* Parsing routines. */
296 /* ------------------------------------------------------------------------- */
298 static int make_parsing_routine(int32 routine_address)
300 /* This routine is used only in grammar version 1: the corresponding
301 table is left empty in GV2. */
304 for (l=0; l<no_grammar_token_routines; l++)
305 if (grammar_token_routine[l] == routine_address)
308 grammar_token_routine[l] = routine_address;
309 return(no_grammar_token_routines++);
312 /* ------------------------------------------------------------------------- */
313 /* The English-verb list. */
314 /* ------------------------------------------------------------------------- */
316 static int find_or_renumber_verb(char *English_verb, int *new_number)
318 /* If new_number is null, returns the Inform-verb number which the
319 * given English verb causes, or -1 if the given verb is not in the
322 /* If new_number is non-null, renumbers the Inform-verb number which
323 * English_verb matches in English_verb_list to account for the case
324 * when we are extending a verb. Returns 0 if successful, or -1 if
325 * the given verb is not in the dictionary (which shouldn't happen as
326 * get_verb has already run) */
330 while (p < English_verb_list_top)
331 { if (strcmp(English_verb, p+3) == 0)
333 { p[1] = (*new_number)/256;
334 p[2] = (*new_number)%256;
337 return(256*((uchar)p[1]))+((uchar)p[2]);
344 static void register_verb(char *English_verb, int number)
346 /* Registers a new English verb as referring to the given Inform-verb
347 number. (See comments above for format of the list.) */
350 if (find_or_renumber_verb(English_verb, NULL) != -1)
351 { error_named("Two different verb definitions refer to", English_verb);
355 /* We set a hard limit of MAX_VERB_WORD_SIZE=120 because the
356 English_verb_list table stores length in a leading byte. (We could
357 raise that to 250, really, but there's little point when
358 MAX_DICT_WORD_SIZE is 40.) */
359 entrysize = strlen(English_verb)+4;
360 if (entrysize > MAX_VERB_WORD_SIZE+4)
361 error_numbered("Verb word is too long -- max length is", MAX_VERB_WORD_SIZE);
362 English_verb_list_size += entrysize;
363 if (English_verb_list_size >= MAX_VERBSPACE)
364 memoryerror("MAX_VERBSPACE", MAX_VERBSPACE);
366 English_verb_list_top[0] = entrysize;
367 English_verb_list_top[1] = number/256;
368 English_verb_list_top[2] = number%256;
369 strcpy(English_verb_list_top+3, English_verb);
370 English_verb_list_top += entrysize;
373 static int get_verb(void)
375 /* Look at the last-read token: if it's the name of an English verb
376 understood by Inform, in double-quotes, then return the Inform-verb
377 that word refers to: otherwise give an error and return -1. */
381 if ((token_type == DQ_TT) || (token_type == SQ_TT))
382 { j = find_or_renumber_verb(token_text, NULL);
384 error_named("There is no previous grammar for the verb",
389 ebf_error("an English verb in quotes", token_text);
394 /* ------------------------------------------------------------------------- */
395 /* Grammar lines for Verb/Extend directives. */
396 /* ------------------------------------------------------------------------- */
398 static int grammar_line(int verbnum, int line)
400 /* Parse a grammar line, to be written into grammar_lines[mark] onward.
402 Syntax: * <token1> ... <token-n> -> <action>
404 is compiled to a table in the form:
406 <action number : word>
407 <token 1> ... <token n> <ENDIT>
409 where <ENDIT> is the byte 15, and each <token> is 3 bytes long.
411 If grammar_version_number is 1, the token holds
415 and otherwise a GV2 token.
417 Return TRUE if grammar continues after the line, FALSE if the
418 directive comes to an end. */
420 int j, bytecode, mark; int32 wordcode;
421 int grammar_token, slash_mode, last_was_slash;
422 int reverse_action, TOKEN_SIZE;
423 debug_location_beginning beginning_debug_location =
424 get_token_location_beginning();
427 if ((token_type == SEP_TT) && (token_value == SEMICOLON_SEP))
428 { discard_token_location(beginning_debug_location);
431 if (!((token_type == SEP_TT) && (token_value == TIMES_SEP)))
432 { discard_token_location(beginning_debug_location);
433 ebf_error("'*' divider", token_text);
434 panic_mode_error_recovery();
438 /* Have we run out of lines or token space? */
440 if (line >= MAX_LINES_PER_VERB)
441 { discard_token_location(beginning_debug_location);
442 error("Too many lines of grammar for verb. This maximum is built \
443 into Inform, so suggest rewriting grammar using general parsing routines");
447 /* Internally, a line can be up to 3*32 + 1 + 2 = 99 bytes long */
448 /* In Glulx, that's 5*32 + 4 = 164 bytes */
450 mark = grammar_lines_top;
452 if (mark + 100 >= MAX_LINESPACE)
453 { discard_token_location(beginning_debug_location);
454 memoryerror("MAX_LINESPACE", MAX_LINESPACE);
458 if (mark + 165 >= MAX_LINESPACE)
459 { discard_token_location(beginning_debug_location);
460 memoryerror("MAX_LINESPACE", MAX_LINESPACE);
464 Inform_verbs[verbnum].l[line] = mark;
475 grammar_token = 0; last_was_slash = TRUE; slash_mode = FALSE;
480 bytecode = 0; wordcode = 0;
481 if ((token_type == SEP_TT) && (token_value == SEMICOLON_SEP))
482 { discard_token_location(beginning_debug_location);
483 ebf_error("'->' clause", token_text);
486 if ((token_type == SEP_TT) && (token_value == ARROW_SEP))
487 { if (last_was_slash && (grammar_token>0))
488 ebf_error("grammar token", token_text);
492 if (!last_was_slash) slash_mode = FALSE;
493 if ((token_type == SEP_TT) && (token_value == DIVIDE_SEP))
494 { if (grammar_version_number == 1)
495 error("'/' can only be used with Library 6/3 or later");
497 ebf_error("grammar token or '->'", token_text);
499 { last_was_slash = TRUE;
501 if (((grammar_lines[mark-TOKEN_SIZE]) & 0x0f) != 2)
502 error("'/' can only be applied to prepositions");
503 grammar_lines[mark-TOKEN_SIZE] |= 0x20;
507 else last_was_slash = FALSE;
509 if ((token_type == DQ_TT) || (token_type == SQ_TT))
510 { if (grammar_version_number == 1)
511 bytecode = make_adjective(token_text);
514 wordcode = dictionary_add(token_text, 8, 0, 0);
517 else if ((token_type==DIR_KEYWORD_TT)&&(token_value==NOUN_DK))
519 if ((token_type == SEP_TT) && (token_value == SETEQUALS_SEP))
521 /* noun = <routine> */
524 if ((token_type != SYMBOL_TT)
525 || (stypes[token_value] != ROUTINE_T))
526 { discard_token_location(beginning_debug_location);
527 ebf_error("routine name after 'noun='", token_text);
528 panic_mode_error_recovery();
531 if (grammar_version_number == 1)
533 = 16 + make_parsing_routine(svals[token_value]);
536 wordcode = svals[token_value];
538 sflags[token_value] |= USED_SFLAG;
542 if (grammar_version_number == 1) bytecode=0;
543 else { bytecode = 1; wordcode = 0; }
546 else if ((token_type==DIR_KEYWORD_TT)&&(token_value==HELD_DK))
547 { if (grammar_version_number==1) bytecode=1;
548 else { bytecode=1; wordcode=1; } }
549 else if ((token_type==DIR_KEYWORD_TT)&&(token_value==MULTI_DK))
550 { if (grammar_version_number==1) bytecode=2;
551 else { bytecode=1; wordcode=2; } }
552 else if ((token_type==DIR_KEYWORD_TT)&&(token_value==MULTIHELD_DK))
553 { if (grammar_version_number==1) bytecode=3;
554 else { bytecode=1; wordcode=3; } }
555 else if ((token_type==DIR_KEYWORD_TT)&&(token_value==MULTIEXCEPT_DK))
556 { if (grammar_version_number==1) bytecode=4;
557 else { bytecode=1; wordcode=4; } }
558 else if ((token_type==DIR_KEYWORD_TT)&&(token_value==MULTIINSIDE_DK))
559 { if (grammar_version_number==1) bytecode=5;
560 else { bytecode=1; wordcode=5; } }
561 else if ((token_type==DIR_KEYWORD_TT)&&(token_value==CREATURE_DK))
562 { if (grammar_version_number==1) bytecode=6;
563 else { bytecode=1; wordcode=6; } }
564 else if ((token_type==DIR_KEYWORD_TT)&&(token_value==SPECIAL_DK))
565 { if (grammar_version_number==1) bytecode=7;
566 else { bytecode=1; wordcode=7; } }
567 else if ((token_type==DIR_KEYWORD_TT)&&(token_value==NUMBER_DK))
568 { if (grammar_version_number==1) bytecode=8;
569 else { bytecode=1; wordcode=8; } }
570 else if ((token_type==DIR_KEYWORD_TT)&&(token_value==TOPIC_DK))
571 { if (grammar_version_number==1)
572 error("The 'topic' token is only available if you \
573 are using Library 6/3 or later");
574 else { bytecode=1; wordcode=9; } }
575 else if ((token_type==DIR_KEYWORD_TT)&&(token_value==SCOPE_DK))
577 /* scope = <routine> */
580 if (!((token_type==SEP_TT)&&(token_value==SETEQUALS_SEP)))
581 { discard_token_location(beginning_debug_location);
582 ebf_error("'=' after 'scope'", token_text);
583 panic_mode_error_recovery();
588 if ((token_type != SYMBOL_TT)
589 || (stypes[token_value] != ROUTINE_T))
590 { discard_token_location(beginning_debug_location);
591 ebf_error("routine name after 'scope='", token_text);
592 panic_mode_error_recovery();
596 if (grammar_version_number == 1)
598 make_parsing_routine(svals[token_value]);
599 else { bytecode = 0x85; wordcode = svals[token_value]; }
600 sflags[token_value] |= USED_SFLAG;
602 else if ((token_type == SEP_TT) && (token_value == SETEQUALS_SEP))
603 { discard_token_location(beginning_debug_location);
604 error("'=' is only legal here as 'noun=Routine'");
605 panic_mode_error_recovery();
608 else { /* <attribute> or <general-parsing-routine> tokens */
610 if ((token_type != SYMBOL_TT)
611 || ((stypes[token_value] != ATTRIBUTE_T)
612 && (stypes[token_value] != ROUTINE_T)))
613 { discard_token_location(beginning_debug_location);
614 error_named("No such grammar token as", token_text);
615 panic_mode_error_recovery();
618 if (stypes[token_value]==ATTRIBUTE_T)
619 { if (grammar_version_number == 1)
620 bytecode = 128 + svals[token_value];
621 else { bytecode = 4; wordcode = svals[token_value]; }
624 { if (grammar_version_number == 1)
626 make_parsing_routine(svals[token_value]);
627 else { bytecode = 0x86; wordcode = svals[token_value]; }
629 sflags[token_value] |= USED_SFLAG;
632 grammar_token++; no_grammar_tokens++;
633 if ((grammar_version_number == 1) && (grammar_token > 6))
634 { if (grammar_token == 7)
635 warning("Grammar line cut short: you can only have up to 6 \
636 tokens in any line (unless you're compiling with library 6/3 or later)");
640 { if (bytecode != 0x42)
641 error("'/' can only be applied to prepositions");
644 grammar_lines[mark++] = bytecode;
646 grammar_lines[mark++] = wordcode/256;
647 grammar_lines[mark++] = wordcode%256;
650 grammar_lines[mark++] = ((wordcode >> 24) & 0xFF);
651 grammar_lines[mark++] = ((wordcode >> 16) & 0xFF);
652 grammar_lines[mark++] = ((wordcode >> 8) & 0xFF);
653 grammar_lines[mark++] = ((wordcode) & 0xFF);
659 grammar_lines[mark++] = 15;
660 grammar_lines_top = mark;
662 dont_enter_into_symbol_table = TRUE;
664 dont_enter_into_symbol_table = FALSE;
666 if (token_type != DQ_TT)
667 { discard_token_location(beginning_debug_location);
668 ebf_error("name of new or existing action", token_text);
669 panic_mode_error_recovery();
673 { assembly_operand AO = action_of_name(token_text);
675 if (j >= ((grammar_version_number==1)?256:4096))
676 error_named("This is a fake action, not a real one:", token_text);
679 reverse_action = FALSE;
681 if ((token_type == DIR_KEYWORD_TT) && (token_value == REVERSE_DK))
682 { if (grammar_version_number == 1)
683 error("'reverse' actions can only be used with \
684 Library 6/3 or later");
685 reverse_action = TRUE;
687 else put_token_back();
689 mark = Inform_verbs[verbnum].l[line];
691 if (debugfile_switch)
692 { debug_file_printf("<table-entry>");
693 debug_file_printf("<type>grammar line</type>");
694 debug_file_printf("<address>");
695 write_debug_grammar_backpatch(mark);
696 debug_file_printf("</address>");
697 debug_file_printf("<end-address>");
698 write_debug_grammar_backpatch(grammar_lines_top);
699 debug_file_printf("</end-address>");
700 write_debug_locations
701 (get_token_location_end(beginning_debug_location));
702 debug_file_printf("</table-entry>");
708 grammar_lines[mark++] = j/256;
709 grammar_lines[mark++] = j%256;
712 grammar_lines[mark++] = ((j >> 8) & 0xFF);
713 grammar_lines[mark++] = ((j) & 0xFF);
714 grammar_lines[mark++] = (reverse_action ? 1 : 0);
720 /* ------------------------------------------------------------------------- */
721 /* The Verb directive: */
723 /* Verb [meta] "word-1" ... "word-n" | = "existing-English-verb" */
724 /* | <grammar-line-1> ... <g-line-n> */
726 /* ------------------------------------------------------------------------- */
728 extern void make_verb(void)
730 /* Parse an entire Verb ... directive. */
732 int Inform_verb, meta_verb_flag=FALSE, verb_equals_form=FALSE;
734 char *English_verbs_given[MAX_VERB_SYNONYMS];
737 directive_keywords.enabled = TRUE;
741 if ((token_type == DIR_KEYWORD_TT) && (token_value == META_DK))
742 { meta_verb_flag = TRUE;
746 while ((token_type == DQ_TT) || (token_type == SQ_TT))
748 if (no_given >= MAX_VERB_SYNONYMS) {
749 error("Too many synonyms in a Verb directive.");
750 panic_mode_error_recovery(); return;
752 English_verbs_given[no_given++] = token_text;
757 { ebf_error("English verb in quotes", token_text);
758 panic_mode_error_recovery(); return;
761 if ((token_type == SEP_TT) && (token_value == SETEQUALS_SEP))
762 { verb_equals_form = TRUE;
764 Inform_verb = get_verb();
765 if (Inform_verb == -1) return;
767 if (!((token_type == SEP_TT) && (token_value == SEMICOLON_SEP)))
768 ebf_error("';' after English verb", token_text);
771 { Inform_verb = no_Inform_verbs;
772 if (no_Inform_verbs == MAX_VERBS)
773 memoryerror("MAX_VERBS",MAX_VERBS);
776 for (i=0; i<no_given; i++)
777 { dictionary_add(English_verbs_given[i],
778 0x41 + ((meta_verb_flag)?0x02:0x00),
779 (glulx_mode)?(0xffff-Inform_verb):(0xff-Inform_verb), 0);
780 register_verb(English_verbs_given[i], Inform_verb);
783 if (!verb_equals_form)
786 while (grammar_line(no_Inform_verbs, lines++)) ;
787 Inform_verbs[no_Inform_verbs++].lines = --lines;
790 directive_keywords.enabled = FALSE;
793 /* ------------------------------------------------------------------------- */
794 /* The Extend directive: */
796 /* Extend | only "verb-1" ... "verb-n" | <grammar-lines> */
797 /* | "verb" | "replace" */
801 /* ------------------------------------------------------------------------- */
803 #define EXTEND_REPLACE 1
804 #define EXTEND_FIRST 2
805 #define EXTEND_LAST 3
807 extern void extend_verb(void)
809 /* Parse an entire Extend ... directive. */
811 int Inform_verb = -1, k, l, lines, extend_mode;
813 directive_keywords.enabled = TRUE;
814 directives.enabled = FALSE;
817 if ((token_type == DIR_KEYWORD_TT) && (token_value == ONLY_DK))
819 if (no_Inform_verbs == MAX_VERBS)
820 memoryerror("MAX_VERBS", MAX_VERBS);
821 while (get_next_token(),
822 ((token_type == DQ_TT) || (token_type == SQ_TT)))
823 { Inform_verb = get_verb();
824 if (Inform_verb == -1) return;
825 if ((l!=-1) && (Inform_verb!=l))
826 warning_named("Verb disagrees with previous verbs:", token_text);
828 dictionary_set_verb_number(token_text,
829 (glulx_mode)?(0xffff-no_Inform_verbs):(0xff-no_Inform_verbs));
830 /* make call to renumber verb in English_verb_list too */
831 if (find_or_renumber_verb(token_text, &no_Inform_verbs) == -1)
832 warning_named("Verb to extend not found in English_verb_list:",
836 /* Copy the old Inform-verb into a new one which the list of
837 English-verbs given have had their dictionary entries modified
840 Inform_verbs[no_Inform_verbs] = Inform_verbs[Inform_verb];
841 Inform_verb = no_Inform_verbs++;
844 { Inform_verb = get_verb();
845 if (Inform_verb == -1) return;
849 /* Inform_verb now contains the number of the Inform-verb to extend... */
851 extend_mode = EXTEND_LAST;
852 if ((token_type == SEP_TT) && (token_value == TIMES_SEP))
856 if ((token_type == DIR_KEYWORD_TT) && (token_value == REPLACE_DK))
857 extend_mode = EXTEND_REPLACE;
858 if ((token_type == DIR_KEYWORD_TT) && (token_value == FIRST_DK))
859 extend_mode = EXTEND_FIRST;
860 if ((token_type == DIR_KEYWORD_TT) && (token_value == LAST_DK))
861 extend_mode = EXTEND_LAST;
864 { ebf_error("'replace', 'last', 'first' or '*'", token_text);
865 extend_mode = EXTEND_LAST;
869 l = Inform_verbs[Inform_verb].lines;
871 if (extend_mode == EXTEND_LAST) lines=l;
873 { if (extend_mode == EXTEND_FIRST)
875 Inform_verbs[Inform_verb].l[k+lines]
876 = Inform_verbs[Inform_verb].l[k-1+lines];
877 } while (grammar_line(Inform_verb, lines++));
879 if (extend_mode == EXTEND_FIRST)
880 { Inform_verbs[Inform_verb].lines = l+lines-1;
882 Inform_verbs[Inform_verb].l[k+lines-1]
883 = Inform_verbs[Inform_verb].l[k+lines];
885 else Inform_verbs[Inform_verb].lines = --lines;
887 directive_keywords.enabled = FALSE;
888 directives.enabled = TRUE;
891 /* ========================================================================= */
892 /* Data structure management routines */
893 /* ------------------------------------------------------------------------- */
895 extern void init_verbs_vars(void)
899 no_grammar_lines = 0;
900 no_grammar_tokens = 0;
901 English_verb_list_size = 0;
904 action_byte_offset = NULL;
905 grammar_token_routine = NULL;
907 adjective_sort_code = NULL;
908 English_verb_list = NULL;
911 grammar_version_number = 1;
913 grammar_version_number = 2;
916 extern void verbs_begin_pass(void)
918 no_Inform_verbs=0; no_adjectives=0;
919 no_grammar_token_routines=0;
923 grammar_lines_top = 0;
926 extern void verbs_allocate_arrays(void)
928 Inform_verbs = my_calloc(sizeof(verbt), MAX_VERBS, "verbs");
929 grammar_lines = my_malloc(MAX_LINESPACE, "grammar lines");
930 action_byte_offset = my_calloc(sizeof(int32), MAX_ACTIONS, "actions");
931 action_symbol = my_calloc(sizeof(int32), MAX_ACTIONS,
933 grammar_token_routine = my_calloc(sizeof(int32), MAX_ACTIONS,
934 "grammar token routines");
935 adjectives = my_calloc(sizeof(int32), MAX_ADJECTIVES,
937 adjective_sort_code = my_calloc(DICT_WORD_BYTES, MAX_ADJECTIVES,
938 "adjective sort codes");
940 English_verb_list = my_malloc(MAX_VERBSPACE, "register of verbs");
941 English_verb_list_top = English_verb_list;
944 extern void verbs_free_arrays(void)
946 my_free(&Inform_verbs, "verbs");
947 my_free(&grammar_lines, "grammar lines");
948 my_free(&action_byte_offset, "actions");
949 my_free(&action_symbol, "action symbols");
950 my_free(&grammar_token_routine, "grammar token routines");
951 my_free(&adjectives, "adjectives");
952 my_free(&adjective_sort_code, "adjective sort codes");
953 my_free(&English_verb_list, "register of verbs");
956 /* ========================================================================= */