1 /* ------------------------------------------------------------------------- */
2 /* "verbs" : Manages actions and grammar tables; parses the directives */
5 /* Part of Inform 6.35 */
6 /* copyright (c) Graham Nelson 1993 - 2020 */
8 /* Inform is free software: you can redistribute it and/or modify */
9 /* it under the terms of the GNU General Public License as published by */
10 /* the Free Software Foundation, either version 3 of the License, or */
11 /* (at your option) any later version. */
13 /* Inform is distributed in the hope that it will be useful, */
14 /* but WITHOUT ANY WARRANTY; without even the implied warranty of */
15 /* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the */
16 /* GNU General Public License for more details. */
18 /* You should have received a copy of the GNU General Public License */
19 /* along with Inform. If not, see https://gnu.org/licenses/ */
21 /* ------------------------------------------------------------------------- */
25 int grammar_version_number; /* 1 for pre-Inform 6.06 table format */
26 int32 grammar_version_symbol; /* Index of "Grammar__Version"
27 within symbols table */
29 /* ------------------------------------------------------------------------- */
31 /* ------------------------------------------------------------------------- */
32 /* Array defined below: */
34 /* int32 action_byte_offset[n] The (byte) offset in the Z-machine */
35 /* code area of the ...Sub routine */
36 /* for action n. (NB: This is left */
37 /* blank until the end of the */
38 /* compilation pass.) */
39 /* int32 action_symbol[n] The symbol table index of the n-th */
41 /* ------------------------------------------------------------------------- */
43 int no_actions, /* Number of actions made so far */
44 no_fake_actions; /* Number of fake actions made so far */
46 /* ------------------------------------------------------------------------- */
47 /* Adjectives. (The term "adjective" is traditional; they are mainly */
48 /* prepositions, such as "onto".) */
49 /* ------------------------------------------------------------------------- */
50 /* Arrays defined below: */
52 /* int32 adjectives[n] Byte address of dictionary entry */
53 /* for the nth adjective */
54 /* dict_word adjective_sort_code[n] Dictionary sort code of nth adj */
55 /* ------------------------------------------------------------------------- */
57 int no_adjectives; /* Number of adjectives made so far */
59 /* ------------------------------------------------------------------------- */
60 /* Verbs. Note that Inform-verbs are not quite the same as English verbs: */
61 /* for example the English verbs "take" and "drop" both normally */
62 /* correspond in a game's dictionary to the same Inform verb. An */
63 /* Inform verb is essentially a list of grammar lines. */
64 /* ------------------------------------------------------------------------- */
65 /* Arrays defined below: */
67 /* verbt Inform_verbs[n] The n-th grammar line sequence: */
68 /* see "header.h" for the definition */
69 /* of the typedef struct verbt */
70 /* int32 grammar_token_routine[n] The byte offset from start of code */
71 /* area of the n-th one */
72 /* ------------------------------------------------------------------------- */
74 int no_Inform_verbs, /* Number of Inform-verbs made so far */
75 no_grammar_token_routines; /* Number of routines given in tokens */
77 /* ------------------------------------------------------------------------- */
78 /* We keep a list of English verb-words known (e.g. "take" or "eat") and */
79 /* which Inform-verbs they correspond to. (This list is needed for some */
80 /* of the grammar extension operations.) */
81 /* The format of this list is a sequence of variable-length records: */
83 /* Byte offset to start of next record (1 byte) */
84 /* Inform verb number this word corresponds to (1 byte) */
85 /* The English verb-word (reduced to lower case), null-terminated */
86 /* ------------------------------------------------------------------------- */
88 static char *English_verb_list, /* First byte of first record */
89 *English_verb_list_top; /* Next byte free for new record */
91 static int English_verb_list_size; /* Size of the list in bytes
92 (redundant but convenient) */
94 /* ------------------------------------------------------------------------- */
95 /* Arrays used by this file */
96 /* ------------------------------------------------------------------------- */
100 int32 grammar_lines_top;
101 int no_grammar_lines, no_grammar_tokens;
103 int32 *action_byte_offset,
105 *grammar_token_routine,
107 static uchar *adjective_sort_code;
109 /* ------------------------------------------------------------------------- */
110 /* Tracing for compiler maintenance */
111 /* ------------------------------------------------------------------------- */
113 extern void list_verb_table(void)
115 for (i=0; i<no_Inform_verbs; i++)
116 printf("Verb %2d has %d lines\n", i, Inform_verbs[i].lines);
119 /* ------------------------------------------------------------------------- */
121 /* ------------------------------------------------------------------------- */
123 static void new_action(char *b, int c)
125 /* Called whenever a new action (or fake action) is created (either
126 by using make_action above, or the Fake_Action directive, or by
127 the linker). At present just a hook for some tracing code. */
129 if (printprops_switch)
130 printf("Action '%s' is numbered %d\n",b,c);
133 /* Note that fake actions are numbered from a high base point upwards;
134 real actions are numbered from 0 upward in GV2. */
136 extern void make_fake_action(void)
138 char action_sub[MAX_IDENTIFIER_LENGTH+4];
139 debug_location_beginning beginning_debug_location =
140 get_token_location_beginning();
143 if (token_type != SYMBOL_TT)
144 { discard_token_location(beginning_debug_location);
145 ebf_error("new fake action name", token_text);
146 panic_mode_error_recovery(); return;
148 /* Action symbols (including fake_actions) may collide with other kinds of symbols. So we don't check that. */
150 snprintf(action_sub, MAX_IDENTIFIER_LENGTH+4, "%s__A", token_text);
151 i = symbol_index(action_sub, -1);
153 if (!(sflags[i] & UNKNOWN_SFLAG))
154 { discard_token_location(beginning_debug_location);
155 /* The user didn't know they were defining FOO__A, but they were and it's a problem. */
156 ebf_symbol_error("new fake action name", action_sub, typename(stypes[i]), slines[i]);
157 panic_mode_error_recovery(); return;
160 assign_symbol(i, ((grammar_version_number==1)?256:4096)+no_fake_actions++,
163 new_action(token_text, i);
165 if (debugfile_switch)
166 { debug_file_printf("<fake-action>");
167 debug_file_printf("<identifier>##%s</identifier>", token_text);
168 debug_file_printf("<value>%d</value>", svals[i]);
170 write_debug_locations
171 (get_token_location_end(beginning_debug_location));
173 debug_file_printf("</fake-action>");
179 extern assembly_operand action_of_name(char *name)
181 /* Returns the action number of the given name, creating it as a new
182 action name if it isn't already known as such. */
184 char action_sub[MAX_IDENTIFIER_LENGTH+4];
188 snprintf(action_sub, MAX_IDENTIFIER_LENGTH+4, "%s__A", name);
189 j = symbol_index(action_sub, -1);
191 if (stypes[j] == FAKE_ACTION_T)
195 AO.type = LONG_CONSTANT_OT;
197 set_constant_ot(&AO);
198 sflags[j] |= USED_SFLAG;
202 if (sflags[j] & UNKNOWN_SFLAG)
204 if (no_actions>=MAX_ACTIONS) memoryerror("MAX_ACTIONS",MAX_ACTIONS);
205 new_action(name, no_actions);
206 action_symbol[no_actions] = j;
207 assign_symbol(j, no_actions++, CONSTANT_T);
208 sflags[j] |= ACTION_SFLAG;
210 sflags[j] |= USED_SFLAG;
214 AO.marker = ACTION_MV;
216 AO.type = (module_switch)?LONG_CONSTANT_OT:SHORT_CONSTANT_OT;
217 if (svals[j] >= 256) AO.type = LONG_CONSTANT_OT;
220 AO.type = CONSTANT_OT;
225 extern void find_the_actions(void)
227 char action_name[MAX_IDENTIFIER_LENGTH+4];
228 char action_sub[MAX_IDENTIFIER_LENGTH+4];
231 for (i=0; i<no_actions; i++) action_byte_offset[i] = 0;
233 for (i=0; i<no_actions; i++)
234 { strcpy(action_name, (char *) symbs[action_symbol[i]]);
235 action_name[strlen(action_name) - 3] = '\0'; /* remove "__A" */
236 strcpy(action_sub, action_name);
237 strcat(action_sub, "Sub");
238 j = symbol_index(action_sub, -1);
239 if (sflags[j] & UNKNOWN_SFLAG)
241 error_named_at("No ...Sub action routine found for action:", action_name, slines[action_symbol[i]]);
244 if (stypes[j] != ROUTINE_T)
246 error_named_at("No ...Sub action routine found for action:", action_name, slines[action_symbol[i]]);
247 error_named_at("-- ...Sub symbol found, but not a routine:", action_sub, slines[j]);
250 { action_byte_offset[i] = svals[j];
251 sflags[j] |= USED_SFLAG;
256 /* ------------------------------------------------------------------------- */
258 /* ------------------------------------------------------------------------- */
260 static int make_adjective(char *English_word)
262 /* Returns adjective number of the English word supplied, creating
263 a new adjective number if need be.
265 Note that (partly for historical reasons) adjectives are numbered
266 from 0xff downwards. (And partly to make them stand out as tokens.)
268 This routine is used only in grammar version 1: the corresponding
269 table is left empty in GV2. */
272 uchar new_sort_code[MAX_DICT_WORD_BYTES];
274 if (no_adjectives >= MAX_ADJECTIVES)
275 memoryerror("MAX_ADJECTIVES", MAX_ADJECTIVES);
277 dictionary_prepare(English_word, new_sort_code);
278 for (i=0; i<no_adjectives; i++)
279 if (compare_sorts(new_sort_code,
280 adjective_sort_code+i*DICT_WORD_BYTES) == 0)
282 adjectives[no_adjectives]
283 = dictionary_add(English_word,8,0,0xff-no_adjectives);
284 copy_sorts(adjective_sort_code+no_adjectives*DICT_WORD_BYTES,
286 return(0xff-no_adjectives++);
289 /* ------------------------------------------------------------------------- */
290 /* Parsing routines. */
291 /* ------------------------------------------------------------------------- */
293 static int make_parsing_routine(int32 routine_address)
295 /* This routine is used only in grammar version 1: the corresponding
296 table is left empty in GV2. */
299 for (l=0; l<no_grammar_token_routines; l++)
300 if (grammar_token_routine[l] == routine_address)
303 grammar_token_routine[l] = routine_address;
304 return(no_grammar_token_routines++);
307 /* ------------------------------------------------------------------------- */
308 /* The English-verb list. */
309 /* ------------------------------------------------------------------------- */
311 static int find_or_renumber_verb(char *English_verb, int *new_number)
313 /* If new_number is null, returns the Inform-verb number which the
314 * given English verb causes, or -1 if the given verb is not in the
317 /* If new_number is non-null, renumbers the Inform-verb number which
318 * English_verb matches in English_verb_list to account for the case
319 * when we are extending a verb. Returns 0 if successful, or -1 if
320 * the given verb is not in the dictionary (which shouldn't happen as
321 * get_verb has already run) */
325 while (p < English_verb_list_top)
326 { if (strcmp(English_verb, p+3) == 0)
328 { p[1] = (*new_number)/256;
329 p[2] = (*new_number)%256;
332 return(256*((uchar)p[1]))+((uchar)p[2]);
339 static void register_verb(char *English_verb, int number)
341 /* Registers a new English verb as referring to the given Inform-verb
342 number. (See comments above for format of the list.) */
345 if (find_or_renumber_verb(English_verb, NULL) != -1)
346 { error_named("Two different verb definitions refer to", English_verb);
350 /* We set a hard limit of MAX_VERB_WORD_SIZE=120 because the
351 English_verb_list table stores length in a leading byte. (We could
352 raise that to 250, really, but there's little point when
353 MAX_DICT_WORD_SIZE is 40.) */
354 entrysize = strlen(English_verb)+4;
355 if (entrysize > MAX_VERB_WORD_SIZE+4)
356 error_numbered("Verb word is too long -- max length is", MAX_VERB_WORD_SIZE);
357 English_verb_list_size += entrysize;
358 if (English_verb_list_size >= MAX_VERBSPACE)
359 memoryerror("MAX_VERBSPACE", MAX_VERBSPACE);
361 English_verb_list_top[0] = entrysize;
362 English_verb_list_top[1] = number/256;
363 English_verb_list_top[2] = number%256;
364 strcpy(English_verb_list_top+3, English_verb);
365 English_verb_list_top += entrysize;
368 static int get_verb(void)
370 /* Look at the last-read token: if it's the name of an English verb
371 understood by Inform, in double-quotes, then return the Inform-verb
372 that word refers to: otherwise give an error and return -1. */
376 if ((token_type == DQ_TT) || (token_type == SQ_TT))
377 { j = find_or_renumber_verb(token_text, NULL);
379 error_named("There is no previous grammar for the verb",
384 ebf_error("an English verb in quotes", token_text);
389 /* ------------------------------------------------------------------------- */
390 /* Grammar lines for Verb/Extend directives. */
391 /* ------------------------------------------------------------------------- */
393 static int grammar_line(int verbnum, int line)
395 /* Parse a grammar line, to be written into grammar_lines[mark] onward.
397 Syntax: * <token1> ... <token-n> -> <action>
399 is compiled to a table in the form:
401 <action number : word>
402 <token 1> ... <token n> <ENDIT>
404 where <ENDIT> is the byte 15, and each <token> is 3 bytes long.
406 If grammar_version_number is 1, the token holds
410 and otherwise a GV2 token.
412 Return TRUE if grammar continues after the line, FALSE if the
413 directive comes to an end. */
415 int j, bytecode, mark; int32 wordcode;
416 int grammar_token, slash_mode, last_was_slash;
417 int reverse_action, TOKEN_SIZE;
418 debug_location_beginning beginning_debug_location =
419 get_token_location_beginning();
422 if ((token_type == SEP_TT) && (token_value == SEMICOLON_SEP))
423 { discard_token_location(beginning_debug_location);
426 if (!((token_type == SEP_TT) && (token_value == TIMES_SEP)))
427 { discard_token_location(beginning_debug_location);
428 ebf_error("'*' divider", token_text);
429 panic_mode_error_recovery();
433 /* Have we run out of lines or token space? */
435 if (line >= MAX_LINES_PER_VERB)
436 { discard_token_location(beginning_debug_location);
437 error("Too many lines of grammar for verb. This maximum is built \
438 into Inform, so suggest rewriting grammar using general parsing routines");
442 /* Internally, a line can be up to 3*32 + 1 + 2 = 99 bytes long */
443 /* In Glulx, that's 5*32 + 4 = 164 bytes */
445 mark = grammar_lines_top;
447 if (mark + 100 >= MAX_LINESPACE)
448 { discard_token_location(beginning_debug_location);
449 memoryerror("MAX_LINESPACE", MAX_LINESPACE);
453 if (mark + 165 >= MAX_LINESPACE)
454 { discard_token_location(beginning_debug_location);
455 memoryerror("MAX_LINESPACE", MAX_LINESPACE);
459 Inform_verbs[verbnum].l[line] = mark;
470 grammar_token = 0; last_was_slash = TRUE; slash_mode = FALSE;
475 bytecode = 0; wordcode = 0;
476 if ((token_type == SEP_TT) && (token_value == SEMICOLON_SEP))
477 { discard_token_location(beginning_debug_location);
478 ebf_error("'->' clause", token_text);
481 if ((token_type == SEP_TT) && (token_value == ARROW_SEP))
482 { if (last_was_slash && (grammar_token>0))
483 ebf_error("grammar token", token_text);
487 if (!last_was_slash) slash_mode = FALSE;
488 if ((token_type == SEP_TT) && (token_value == DIVIDE_SEP))
489 { if (grammar_version_number == 1)
490 error("'/' can only be used with Library 6/3 or later");
492 ebf_error("grammar token or '->'", token_text);
494 { last_was_slash = TRUE;
496 if (((grammar_lines[mark-TOKEN_SIZE]) & 0x0f) != 2)
497 error("'/' can only be applied to prepositions");
498 grammar_lines[mark-TOKEN_SIZE] |= 0x20;
502 else last_was_slash = FALSE;
504 if ((token_type == DQ_TT) || (token_type == SQ_TT))
505 { if (grammar_version_number == 1)
506 bytecode = make_adjective(token_text);
509 wordcode = dictionary_add(token_text, 8, 0, 0);
512 else if ((token_type==DIR_KEYWORD_TT)&&(token_value==NOUN_DK))
514 if ((token_type == SEP_TT) && (token_value == SETEQUALS_SEP))
516 /* noun = <routine> */
519 if ((token_type != SYMBOL_TT)
520 || (stypes[token_value] != ROUTINE_T))
521 { discard_token_location(beginning_debug_location);
522 ebf_error("routine name after 'noun='", token_text);
523 panic_mode_error_recovery();
526 if (grammar_version_number == 1)
528 = 16 + make_parsing_routine(svals[token_value]);
531 wordcode = svals[token_value];
533 sflags[token_value] |= USED_SFLAG;
537 if (grammar_version_number == 1) bytecode=0;
538 else { bytecode = 1; wordcode = 0; }
541 else if ((token_type==DIR_KEYWORD_TT)&&(token_value==HELD_DK))
542 { if (grammar_version_number==1) bytecode=1;
543 else { bytecode=1; wordcode=1; } }
544 else if ((token_type==DIR_KEYWORD_TT)&&(token_value==MULTI_DK))
545 { if (grammar_version_number==1) bytecode=2;
546 else { bytecode=1; wordcode=2; } }
547 else if ((token_type==DIR_KEYWORD_TT)&&(token_value==MULTIHELD_DK))
548 { if (grammar_version_number==1) bytecode=3;
549 else { bytecode=1; wordcode=3; } }
550 else if ((token_type==DIR_KEYWORD_TT)&&(token_value==MULTIEXCEPT_DK))
551 { if (grammar_version_number==1) bytecode=4;
552 else { bytecode=1; wordcode=4; } }
553 else if ((token_type==DIR_KEYWORD_TT)&&(token_value==MULTIINSIDE_DK))
554 { if (grammar_version_number==1) bytecode=5;
555 else { bytecode=1; wordcode=5; } }
556 else if ((token_type==DIR_KEYWORD_TT)&&(token_value==CREATURE_DK))
557 { if (grammar_version_number==1) bytecode=6;
558 else { bytecode=1; wordcode=6; } }
559 else if ((token_type==DIR_KEYWORD_TT)&&(token_value==SPECIAL_DK))
560 { if (grammar_version_number==1) bytecode=7;
561 else { bytecode=1; wordcode=7; } }
562 else if ((token_type==DIR_KEYWORD_TT)&&(token_value==NUMBER_DK))
563 { if (grammar_version_number==1) bytecode=8;
564 else { bytecode=1; wordcode=8; } }
565 else if ((token_type==DIR_KEYWORD_TT)&&(token_value==TOPIC_DK))
566 { if (grammar_version_number==1)
567 error("The 'topic' token is only available if you \
568 are using Library 6/3 or later");
569 else { bytecode=1; wordcode=9; } }
570 else if ((token_type==DIR_KEYWORD_TT)&&(token_value==SCOPE_DK))
572 /* scope = <routine> */
575 if (!((token_type==SEP_TT)&&(token_value==SETEQUALS_SEP)))
576 { discard_token_location(beginning_debug_location);
577 ebf_error("'=' after 'scope'", token_text);
578 panic_mode_error_recovery();
583 if ((token_type != SYMBOL_TT)
584 || (stypes[token_value] != ROUTINE_T))
585 { discard_token_location(beginning_debug_location);
586 ebf_error("routine name after 'scope='", token_text);
587 panic_mode_error_recovery();
591 if (grammar_version_number == 1)
593 make_parsing_routine(svals[token_value]);
594 else { bytecode = 0x85; wordcode = svals[token_value]; }
595 sflags[token_value] |= USED_SFLAG;
597 else if ((token_type == SEP_TT) && (token_value == SETEQUALS_SEP))
598 { discard_token_location(beginning_debug_location);
599 error("'=' is only legal here as 'noun=Routine'");
600 panic_mode_error_recovery();
603 else { /* <attribute> or <general-parsing-routine> tokens */
605 if ((token_type != SYMBOL_TT)
606 || ((stypes[token_value] != ATTRIBUTE_T)
607 && (stypes[token_value] != ROUTINE_T)))
608 { discard_token_location(beginning_debug_location);
609 error_named("No such grammar token as", token_text);
610 panic_mode_error_recovery();
613 if (stypes[token_value]==ATTRIBUTE_T)
614 { if (grammar_version_number == 1)
615 bytecode = 128 + svals[token_value];
616 else { bytecode = 4; wordcode = svals[token_value]; }
619 { if (grammar_version_number == 1)
621 make_parsing_routine(svals[token_value]);
622 else { bytecode = 0x86; wordcode = svals[token_value]; }
624 sflags[token_value] |= USED_SFLAG;
627 grammar_token++; no_grammar_tokens++;
628 if ((grammar_version_number == 1) && (grammar_token > 6))
629 { if (grammar_token == 7)
630 warning("Grammar line cut short: you can only have up to 6 \
631 tokens in any line (unless you're compiling with library 6/3 or later)");
635 { if (bytecode != 0x42)
636 error("'/' can only be applied to prepositions");
639 grammar_lines[mark++] = bytecode;
641 grammar_lines[mark++] = wordcode/256;
642 grammar_lines[mark++] = wordcode%256;
645 grammar_lines[mark++] = ((wordcode >> 24) & 0xFF);
646 grammar_lines[mark++] = ((wordcode >> 16) & 0xFF);
647 grammar_lines[mark++] = ((wordcode >> 8) & 0xFF);
648 grammar_lines[mark++] = ((wordcode) & 0xFF);
654 grammar_lines[mark++] = 15;
655 grammar_lines_top = mark;
657 dont_enter_into_symbol_table = TRUE;
659 dont_enter_into_symbol_table = FALSE;
661 if (token_type != DQ_TT)
662 { discard_token_location(beginning_debug_location);
663 ebf_error("name of new or existing action", token_text);
664 panic_mode_error_recovery();
668 { assembly_operand AO = action_of_name(token_text);
670 if (j >= ((grammar_version_number==1)?256:4096))
671 error_named("This is a fake action, not a real one:", token_text);
674 reverse_action = FALSE;
676 if ((token_type == DIR_KEYWORD_TT) && (token_value == REVERSE_DK))
677 { if (grammar_version_number == 1)
678 error("'reverse' actions can only be used with \
679 Library 6/3 or later");
680 reverse_action = TRUE;
682 else put_token_back();
684 mark = Inform_verbs[verbnum].l[line];
686 if (debugfile_switch)
687 { debug_file_printf("<table-entry>");
688 debug_file_printf("<type>grammar line</type>");
689 debug_file_printf("<address>");
690 write_debug_grammar_backpatch(mark);
691 debug_file_printf("</address>");
692 debug_file_printf("<end-address>");
693 write_debug_grammar_backpatch(grammar_lines_top);
694 debug_file_printf("</end-address>");
695 write_debug_locations
696 (get_token_location_end(beginning_debug_location));
697 debug_file_printf("</table-entry>");
703 grammar_lines[mark++] = j/256;
704 grammar_lines[mark++] = j%256;
707 grammar_lines[mark++] = ((j >> 8) & 0xFF);
708 grammar_lines[mark++] = ((j) & 0xFF);
709 grammar_lines[mark++] = (reverse_action ? 1 : 0);
715 /* ------------------------------------------------------------------------- */
716 /* The Verb directive: */
718 /* Verb [meta] "word-1" ... "word-n" | = "existing-English-verb" */
719 /* | <grammar-line-1> ... <g-line-n> */
721 /* ------------------------------------------------------------------------- */
723 extern void make_verb(void)
725 /* Parse an entire Verb ... directive. */
727 int Inform_verb, meta_verb_flag=FALSE, verb_equals_form=FALSE;
729 char *English_verbs_given[32]; int no_given = 0, i;
731 directive_keywords.enabled = TRUE;
735 if ((token_type == DIR_KEYWORD_TT) && (token_value == META_DK))
736 { meta_verb_flag = TRUE;
740 while ((token_type == DQ_TT) || (token_type == SQ_TT))
741 { English_verbs_given[no_given++] = token_text;
746 { ebf_error("English verb in quotes", token_text);
747 panic_mode_error_recovery(); return;
750 if ((token_type == SEP_TT) && (token_value == SETEQUALS_SEP))
751 { verb_equals_form = TRUE;
753 Inform_verb = get_verb();
754 if (Inform_verb == -1) return;
756 if (!((token_type == SEP_TT) && (token_value == SEMICOLON_SEP)))
757 ebf_error("';' after English verb", token_text);
760 { Inform_verb = no_Inform_verbs;
761 if (no_Inform_verbs == MAX_VERBS)
762 memoryerror("MAX_VERBS",MAX_VERBS);
765 for (i=0; i<no_given; i++)
766 { dictionary_add(English_verbs_given[i],
767 0x41 + ((meta_verb_flag)?0x02:0x00),
768 (glulx_mode)?(0xffff-Inform_verb):(0xff-Inform_verb), 0);
769 register_verb(English_verbs_given[i], Inform_verb);
772 if (!verb_equals_form)
775 while (grammar_line(no_Inform_verbs, lines++)) ;
776 Inform_verbs[no_Inform_verbs++].lines = --lines;
779 directive_keywords.enabled = FALSE;
782 /* ------------------------------------------------------------------------- */
783 /* The Extend directive: */
785 /* Extend | only "verb-1" ... "verb-n" | <grammar-lines> */
786 /* | "verb" | "replace" */
790 /* ------------------------------------------------------------------------- */
792 #define EXTEND_REPLACE 1
793 #define EXTEND_FIRST 2
794 #define EXTEND_LAST 3
796 extern void extend_verb(void)
798 /* Parse an entire Extend ... directive. */
800 int Inform_verb = -1, k, l, lines, extend_mode;
802 directive_keywords.enabled = TRUE;
803 directives.enabled = FALSE;
806 if ((token_type == DIR_KEYWORD_TT) && (token_value == ONLY_DK))
808 if (no_Inform_verbs == MAX_VERBS)
809 memoryerror("MAX_VERBS", MAX_VERBS);
810 while (get_next_token(),
811 ((token_type == DQ_TT) || (token_type == SQ_TT)))
812 { Inform_verb = get_verb();
813 if (Inform_verb == -1) return;
814 if ((l!=-1) && (Inform_verb!=l))
815 warning_named("Verb disagrees with previous verbs:", token_text);
817 dictionary_set_verb_number(token_text,
818 (glulx_mode)?(0xffff-no_Inform_verbs):(0xff-no_Inform_verbs));
819 /* make call to renumber verb in English_verb_list too */
820 if (find_or_renumber_verb(token_text, &no_Inform_verbs) == -1)
821 warning_named("Verb to extend not found in English_verb_list:",
825 /* Copy the old Inform-verb into a new one which the list of
826 English-verbs given have had their dictionary entries modified
829 Inform_verbs[no_Inform_verbs] = Inform_verbs[Inform_verb];
830 Inform_verb = no_Inform_verbs++;
833 { Inform_verb = get_verb();
834 if (Inform_verb == -1) return;
838 /* Inform_verb now contains the number of the Inform-verb to extend... */
840 extend_mode = EXTEND_LAST;
841 if ((token_type == SEP_TT) && (token_value == TIMES_SEP))
845 if ((token_type == DIR_KEYWORD_TT) && (token_value == REPLACE_DK))
846 extend_mode = EXTEND_REPLACE;
847 if ((token_type == DIR_KEYWORD_TT) && (token_value == FIRST_DK))
848 extend_mode = EXTEND_FIRST;
849 if ((token_type == DIR_KEYWORD_TT) && (token_value == LAST_DK))
850 extend_mode = EXTEND_LAST;
853 { ebf_error("'replace', 'last', 'first' or '*'", token_text);
854 extend_mode = EXTEND_LAST;
858 l = Inform_verbs[Inform_verb].lines;
860 if (extend_mode == EXTEND_LAST) lines=l;
862 { if (extend_mode == EXTEND_FIRST)
864 Inform_verbs[Inform_verb].l[k+lines]
865 = Inform_verbs[Inform_verb].l[k-1+lines];
866 } while (grammar_line(Inform_verb, lines++));
868 if (extend_mode == EXTEND_FIRST)
869 { Inform_verbs[Inform_verb].lines = l+lines-1;
871 Inform_verbs[Inform_verb].l[k+lines-1]
872 = Inform_verbs[Inform_verb].l[k+lines];
874 else Inform_verbs[Inform_verb].lines = --lines;
876 directive_keywords.enabled = FALSE;
877 directives.enabled = TRUE;
880 /* ========================================================================= */
881 /* Data structure management routines */
882 /* ------------------------------------------------------------------------- */
884 extern void init_verbs_vars(void)
888 no_grammar_lines = 0;
889 no_grammar_tokens = 0;
890 English_verb_list_size = 0;
893 action_byte_offset = NULL;
894 grammar_token_routine = NULL;
896 adjective_sort_code = NULL;
897 English_verb_list = NULL;
900 grammar_version_number = 1;
902 grammar_version_number = 2;
905 extern void verbs_begin_pass(void)
907 no_Inform_verbs=0; no_adjectives=0;
908 no_grammar_token_routines=0;
912 grammar_lines_top = 0;
915 extern void verbs_allocate_arrays(void)
917 Inform_verbs = my_calloc(sizeof(verbt), MAX_VERBS, "verbs");
918 grammar_lines = my_malloc(MAX_LINESPACE, "grammar lines");
919 action_byte_offset = my_calloc(sizeof(int32), MAX_ACTIONS, "actions");
920 action_symbol = my_calloc(sizeof(int32), MAX_ACTIONS,
922 grammar_token_routine = my_calloc(sizeof(int32), MAX_ACTIONS,
923 "grammar token routines");
924 adjectives = my_calloc(sizeof(int32), MAX_ADJECTIVES,
926 adjective_sort_code = my_calloc(DICT_WORD_BYTES, MAX_ADJECTIVES,
927 "adjective sort codes");
929 English_verb_list = my_malloc(MAX_VERBSPACE, "register of verbs");
930 English_verb_list_top = English_verb_list;
933 extern void verbs_free_arrays(void)
935 my_free(&Inform_verbs, "verbs");
936 my_free(&grammar_lines, "grammar lines");
937 my_free(&action_byte_offset, "actions");
938 my_free(&action_symbol, "action symbols");
939 my_free(&grammar_token_routine, "grammar token routines");
940 my_free(&adjectives, "adjectives");
941 my_free(&adjective_sort_code, "adjective sort codes");
942 my_free(&English_verb_list, "register of verbs");
945 /* ========================================================================= */