1 /* ------------------------------------------------------------------------- */
2 /* "verbs" : Manages actions and grammar tables; parses the directives */
5 /* Copyright (c) Graham Nelson 1993 - 2020 */
7 /* This file is part of Inform. */
9 /* Inform is free software: you can redistribute it and/or modify */
10 /* it under the terms of the GNU General Public License as published by */
11 /* the Free Software Foundation, either version 3 of the License, or */
12 /* (at your option) any later version. */
14 /* Inform is distributed in the hope that it will be useful, */
15 /* but WITHOUT ANY WARRANTY; without even the implied warranty of */
16 /* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the */
17 /* GNU General Public License for more details. */
19 /* You should have received a copy of the GNU General Public License */
20 /* along with Inform. If not, see https://gnu.org/licenses/ */
22 /* ------------------------------------------------------------------------- */
26 int grammar_version_number; /* 1 for pre-Inform 6.06 table format */
27 int32 grammar_version_symbol; /* Index of "Grammar__Version"
28 within symbols table */
30 /* ------------------------------------------------------------------------- */
32 /* ------------------------------------------------------------------------- */
33 /* Array defined below: */
35 /* int32 action_byte_offset[n] The (byte) offset in the Z-machine */
36 /* code area of the ...Sub routine */
37 /* for action n. (NB: This is left */
38 /* blank until the end of the */
39 /* compilation pass.) */
40 /* int32 action_symbol[n] The symbol table index of the n-th */
42 /* ------------------------------------------------------------------------- */
44 int no_actions, /* Number of actions made so far */
45 no_fake_actions; /* Number of fake actions made so far */
47 /* ------------------------------------------------------------------------- */
48 /* Adjectives. (The term "adjective" is traditional; they are mainly */
49 /* prepositions, such as "onto".) */
50 /* ------------------------------------------------------------------------- */
51 /* Arrays defined below: */
53 /* int32 adjectives[n] Byte address of dictionary entry */
54 /* for the nth adjective */
55 /* dict_word adjective_sort_code[n] Dictionary sort code of nth adj */
56 /* ------------------------------------------------------------------------- */
58 int no_adjectives; /* Number of adjectives made so far */
60 /* ------------------------------------------------------------------------- */
61 /* Verbs. Note that Inform-verbs are not quite the same as English verbs: */
62 /* for example the English verbs "take" and "drop" both normally */
63 /* correspond in a game's dictionary to the same Inform verb. An */
64 /* Inform verb is essentially a list of grammar lines. */
65 /* ------------------------------------------------------------------------- */
66 /* Arrays defined below: */
68 /* verbt Inform_verbs[n] The n-th grammar line sequence: */
69 /* see "header.h" for the definition */
70 /* of the typedef struct verbt */
71 /* int32 grammar_token_routine[n] The byte offset from start of code */
72 /* area of the n-th one */
73 /* ------------------------------------------------------------------------- */
75 int no_Inform_verbs, /* Number of Inform-verbs made so far */
76 no_grammar_token_routines; /* Number of routines given in tokens */
78 /* ------------------------------------------------------------------------- */
79 /* We keep a list of English verb-words known (e.g. "take" or "eat") and */
80 /* which Inform-verbs they correspond to. (This list is needed for some */
81 /* of the grammar extension operations.) */
82 /* The format of this list is a sequence of variable-length records: */
84 /* Byte offset to start of next record (1 byte) */
85 /* Inform verb number this word corresponds to (1 byte) */
86 /* The English verb-word (reduced to lower case), null-terminated */
87 /* ------------------------------------------------------------------------- */
89 static char *English_verb_list, /* First byte of first record */
90 *English_verb_list_top; /* Next byte free for new record */
92 static int English_verb_list_size; /* Size of the list in bytes
93 (redundant but convenient) */
95 /* ------------------------------------------------------------------------- */
96 /* Arrays used by this file */
97 /* ------------------------------------------------------------------------- */
100 uchar *grammar_lines;
101 int32 grammar_lines_top;
102 int no_grammar_lines, no_grammar_tokens;
104 int32 *action_byte_offset,
106 *grammar_token_routine,
108 static uchar *adjective_sort_code;
110 /* ------------------------------------------------------------------------- */
111 /* Tracing for compiler maintenance */
112 /* ------------------------------------------------------------------------- */
114 extern void list_verb_table(void)
116 for (i=0; i<no_Inform_verbs; i++)
117 printf("Verb %2d has %d lines\n", i, Inform_verbs[i].lines);
120 /* ------------------------------------------------------------------------- */
122 /* ------------------------------------------------------------------------- */
124 static void new_action(char *b, int c)
126 /* Called whenever a new action (or fake action) is created (either
127 by using make_action above, or the Fake_Action directive, or by
128 the linker). At present just a hook for some tracing code. */
130 if (printprops_switch)
131 printf("Action '%s' is numbered %d\n",b,c);
134 /* Note that fake actions are numbered from a high base point upwards;
135 real actions are numbered from 0 upward in GV2. */
137 extern void make_fake_action(void)
139 char action_sub[MAX_IDENTIFIER_LENGTH+4];
140 debug_location_beginning beginning_debug_location =
141 get_token_location_beginning();
144 if (token_type != SYMBOL_TT)
145 { discard_token_location(beginning_debug_location);
146 ebf_error("new fake action name", token_text);
147 panic_mode_error_recovery(); return;
150 snprintf(action_sub, MAX_IDENTIFIER_LENGTH+4, "%s__A", token_text);
151 i = symbol_index(action_sub, -1);
153 if (!(sflags[i] & UNKNOWN_SFLAG))
154 { discard_token_location(beginning_debug_location);
155 ebf_error("new fake action name", token_text);
156 panic_mode_error_recovery(); return;
159 assign_symbol(i, ((grammar_version_number==1)?256:4096)+no_fake_actions++,
162 new_action(token_text, i);
164 if (debugfile_switch)
165 { debug_file_printf("<fake-action>");
166 debug_file_printf("<identifier>##%s</identifier>", token_text);
167 debug_file_printf("<value>%d</value>", svals[i]);
169 write_debug_locations
170 (get_token_location_end(beginning_debug_location));
172 debug_file_printf("</fake-action>");
178 extern assembly_operand action_of_name(char *name)
180 /* Returns the action number of the given name, creating it as a new
181 action name if it isn't already known as such. */
183 char action_sub[MAX_IDENTIFIER_LENGTH+4];
187 snprintf(action_sub, MAX_IDENTIFIER_LENGTH+4, "%s__A", name);
188 j = symbol_index(action_sub, -1);
190 if (stypes[j] == FAKE_ACTION_T)
194 AO.type = LONG_CONSTANT_OT;
196 set_constant_ot(&AO);
197 sflags[j] |= USED_SFLAG;
201 if (sflags[j] & UNKNOWN_SFLAG)
203 if (no_actions>=MAX_ACTIONS) memoryerror("MAX_ACTIONS",MAX_ACTIONS);
204 new_action(name, no_actions);
205 action_symbol[no_actions] = j;
206 assign_symbol(j, no_actions++, CONSTANT_T);
207 sflags[j] |= ACTION_SFLAG;
209 sflags[j] |= USED_SFLAG;
213 AO.marker = ACTION_MV;
215 AO.type = (module_switch)?LONG_CONSTANT_OT:SHORT_CONSTANT_OT;
216 if (svals[j] >= 256) AO.type = LONG_CONSTANT_OT;
219 AO.type = CONSTANT_OT;
224 extern void find_the_actions(void)
226 char action_name[MAX_IDENTIFIER_LENGTH+4];
227 char action_sub[MAX_IDENTIFIER_LENGTH+4];
230 for (i=0; i<no_actions; i++) action_byte_offset[i] = 0;
232 for (i=0; i<no_actions; i++)
233 { strcpy(action_name, (char *) symbs[action_symbol[i]]);
234 action_name[strlen(action_name) - 3] = '\0'; /* remove "__A" */
235 strcpy(action_sub, action_name);
236 strcat(action_sub, "Sub");
237 j = symbol_index(action_sub, -1);
238 if (sflags[j] & UNKNOWN_SFLAG)
240 error_named_at("No ...Sub action routine found for action:", action_name, slines[action_symbol[i]]);
243 if (stypes[j] != ROUTINE_T)
245 error_named_at("No ...Sub action routine found for action:", action_name, slines[action_symbol[i]]);
246 error_named_at("-- ...Sub symbol found, but not a routine:", action_sub, slines[j]);
249 { action_byte_offset[i] = svals[j];
250 sflags[j] |= USED_SFLAG;
255 /* ------------------------------------------------------------------------- */
257 /* ------------------------------------------------------------------------- */
259 static int make_adjective(char *English_word)
261 /* Returns adjective number of the English word supplied, creating
262 a new adjective number if need be.
264 Note that (partly for historical reasons) adjectives are numbered
265 from 0xff downwards. (And partly to make them stand out as tokens.)
267 This routine is used only in grammar version 1: the corresponding
268 table is left empty in GV2. */
271 uchar new_sort_code[MAX_DICT_WORD_BYTES];
273 if (no_adjectives >= MAX_ADJECTIVES)
274 memoryerror("MAX_ADJECTIVES", MAX_ADJECTIVES);
276 dictionary_prepare(English_word, new_sort_code);
277 for (i=0; i<no_adjectives; i++)
278 if (compare_sorts(new_sort_code,
279 adjective_sort_code+i*DICT_WORD_BYTES) == 0)
281 adjectives[no_adjectives]
282 = dictionary_add(English_word,8,0,0xff-no_adjectives);
283 copy_sorts(adjective_sort_code+no_adjectives*DICT_WORD_BYTES,
285 return(0xff-no_adjectives++);
288 /* ------------------------------------------------------------------------- */
289 /* Parsing routines. */
290 /* ------------------------------------------------------------------------- */
292 static int make_parsing_routine(int32 routine_address)
294 /* This routine is used only in grammar version 1: the corresponding
295 table is left empty in GV2. */
298 for (l=0; l<no_grammar_token_routines; l++)
299 if (grammar_token_routine[l] == routine_address)
302 grammar_token_routine[l] = routine_address;
303 return(no_grammar_token_routines++);
306 /* ------------------------------------------------------------------------- */
307 /* The English-verb list. */
308 /* ------------------------------------------------------------------------- */
310 static int find_or_renumber_verb(char *English_verb, int *new_number)
312 /* If new_number is null, returns the Inform-verb number which the
313 * given English verb causes, or -1 if the given verb is not in the
316 /* If new_number is non-null, renumbers the Inform-verb number which
317 * English_verb matches in English_verb_list to account for the case
318 * when we are extending a verb. Returns 0 if successful, or -1 if
319 * the given verb is not in the dictionary (which shouldn't happen as
320 * get_verb has already run) */
324 while (p < English_verb_list_top)
325 { if (strcmp(English_verb, p+3) == 0)
327 { p[1] = (*new_number)/256;
328 p[2] = (*new_number)%256;
331 return(256*((uchar)p[1]))+((uchar)p[2]);
338 static void register_verb(char *English_verb, int number)
340 /* Registers a new English verb as referring to the given Inform-verb
341 number. (See comments above for format of the list.) */
344 if (find_or_renumber_verb(English_verb, NULL) != -1)
345 { error_named("Two different verb definitions refer to", English_verb);
349 /* We set a hard limit of MAX_VERB_WORD_SIZE=120 because the
350 English_verb_list table stores length in a leading byte. (We could
351 raise that to 250, really, but there's little point when
352 MAX_DICT_WORD_SIZE is 40.) */
353 entrysize = strlen(English_verb)+4;
354 if (entrysize > MAX_VERB_WORD_SIZE+4)
355 error_numbered("Verb word is too long -- max length is", MAX_VERB_WORD_SIZE);
356 English_verb_list_size += entrysize;
357 if (English_verb_list_size >= MAX_VERBSPACE)
358 memoryerror("MAX_VERBSPACE", MAX_VERBSPACE);
360 English_verb_list_top[0] = entrysize;
361 English_verb_list_top[1] = number/256;
362 English_verb_list_top[2] = number%256;
363 strcpy(English_verb_list_top+3, English_verb);
364 English_verb_list_top += entrysize;
367 static int get_verb(void)
369 /* Look at the last-read token: if it's the name of an English verb
370 understood by Inform, in double-quotes, then return the Inform-verb
371 that word refers to: otherwise give an error and return -1. */
375 if ((token_type == DQ_TT) || (token_type == SQ_TT))
376 { j = find_or_renumber_verb(token_text, NULL);
378 error_named("There is no previous grammar for the verb",
383 ebf_error("an English verb in quotes", token_text);
388 /* ------------------------------------------------------------------------- */
389 /* Grammar lines for Verb/Extend directives. */
390 /* ------------------------------------------------------------------------- */
392 static int grammar_line(int verbnum, int line)
394 /* Parse a grammar line, to be written into grammar_lines[mark] onward.
396 Syntax: * <token1> ... <token-n> -> <action>
398 is compiled to a table in the form:
400 <action number : word>
401 <token 1> ... <token n> <ENDIT>
403 where <ENDIT> is the byte 15, and each <token> is 3 bytes long.
405 If grammar_version_number is 1, the token holds
409 and otherwise a GV2 token.
411 Return TRUE if grammar continues after the line, FALSE if the
412 directive comes to an end. */
414 int j, bytecode, mark; int32 wordcode;
415 int grammar_token, slash_mode, last_was_slash;
416 int reverse_action, TOKEN_SIZE;
417 debug_location_beginning beginning_debug_location =
418 get_token_location_beginning();
421 if ((token_type == SEP_TT) && (token_value == SEMICOLON_SEP))
422 { discard_token_location(beginning_debug_location);
425 if (!((token_type == SEP_TT) && (token_value == TIMES_SEP)))
426 { discard_token_location(beginning_debug_location);
427 ebf_error("'*' divider", token_text);
428 panic_mode_error_recovery();
432 /* Have we run out of lines or token space? */
434 if (line >= MAX_LINES_PER_VERB)
435 { discard_token_location(beginning_debug_location);
436 error("Too many lines of grammar for verb. This maximum is built \
437 into Inform, so suggest rewriting grammar using general parsing routines");
441 /* Internally, a line can be up to 3*32 + 1 + 2 = 99 bytes long */
442 /* In Glulx, that's 5*32 + 4 = 164 bytes */
444 mark = grammar_lines_top;
446 if (mark + 100 >= MAX_LINESPACE)
447 { discard_token_location(beginning_debug_location);
448 memoryerror("MAX_LINESPACE", MAX_LINESPACE);
452 if (mark + 165 >= MAX_LINESPACE)
453 { discard_token_location(beginning_debug_location);
454 memoryerror("MAX_LINESPACE", MAX_LINESPACE);
458 Inform_verbs[verbnum].l[line] = mark;
469 grammar_token = 0; last_was_slash = TRUE; slash_mode = FALSE;
474 bytecode = 0; wordcode = 0;
475 if ((token_type == SEP_TT) && (token_value == SEMICOLON_SEP))
476 { discard_token_location(beginning_debug_location);
477 ebf_error("'->' clause", token_text);
480 if ((token_type == SEP_TT) && (token_value == ARROW_SEP))
481 { if (last_was_slash && (grammar_token>0))
482 ebf_error("grammar token", token_text);
486 if (!last_was_slash) slash_mode = FALSE;
487 if ((token_type == SEP_TT) && (token_value == DIVIDE_SEP))
488 { if (grammar_version_number == 1)
489 error("'/' can only be used with Library 6/3 or later");
491 ebf_error("grammar token or '->'", token_text);
493 { last_was_slash = TRUE;
495 if (((grammar_lines[mark-TOKEN_SIZE]) & 0x0f) != 2)
496 error("'/' can only be applied to prepositions");
497 grammar_lines[mark-TOKEN_SIZE] |= 0x20;
501 else last_was_slash = FALSE;
503 if ((token_type == DQ_TT) || (token_type == SQ_TT))
504 { if (grammar_version_number == 1)
505 bytecode = make_adjective(token_text);
508 wordcode = dictionary_add(token_text, 8, 0, 0);
511 else if ((token_type==DIR_KEYWORD_TT)&&(token_value==NOUN_DK))
513 if ((token_type == SEP_TT) && (token_value == SETEQUALS_SEP))
515 /* noun = <routine> */
518 if ((token_type != SYMBOL_TT)
519 || (stypes[token_value] != ROUTINE_T))
520 { discard_token_location(beginning_debug_location);
521 ebf_error("routine name after 'noun='", token_text);
522 panic_mode_error_recovery();
525 if (grammar_version_number == 1)
527 = 16 + make_parsing_routine(svals[token_value]);
530 wordcode = svals[token_value];
532 sflags[token_value] |= USED_SFLAG;
536 if (grammar_version_number == 1) bytecode=0;
537 else { bytecode = 1; wordcode = 0; }
540 else if ((token_type==DIR_KEYWORD_TT)&&(token_value==HELD_DK))
541 { if (grammar_version_number==1) bytecode=1;
542 else { bytecode=1; wordcode=1; } }
543 else if ((token_type==DIR_KEYWORD_TT)&&(token_value==MULTI_DK))
544 { if (grammar_version_number==1) bytecode=2;
545 else { bytecode=1; wordcode=2; } }
546 else if ((token_type==DIR_KEYWORD_TT)&&(token_value==MULTIHELD_DK))
547 { if (grammar_version_number==1) bytecode=3;
548 else { bytecode=1; wordcode=3; } }
549 else if ((token_type==DIR_KEYWORD_TT)&&(token_value==MULTIEXCEPT_DK))
550 { if (grammar_version_number==1) bytecode=4;
551 else { bytecode=1; wordcode=4; } }
552 else if ((token_type==DIR_KEYWORD_TT)&&(token_value==MULTIINSIDE_DK))
553 { if (grammar_version_number==1) bytecode=5;
554 else { bytecode=1; wordcode=5; } }
555 else if ((token_type==DIR_KEYWORD_TT)&&(token_value==CREATURE_DK))
556 { if (grammar_version_number==1) bytecode=6;
557 else { bytecode=1; wordcode=6; } }
558 else if ((token_type==DIR_KEYWORD_TT)&&(token_value==SPECIAL_DK))
559 { if (grammar_version_number==1) bytecode=7;
560 else { bytecode=1; wordcode=7; } }
561 else if ((token_type==DIR_KEYWORD_TT)&&(token_value==NUMBER_DK))
562 { if (grammar_version_number==1) bytecode=8;
563 else { bytecode=1; wordcode=8; } }
564 else if ((token_type==DIR_KEYWORD_TT)&&(token_value==TOPIC_DK))
565 { if (grammar_version_number==1)
566 error("The 'topic' token is only available if you \
567 are using Library 6/3 or later");
568 else { bytecode=1; wordcode=9; } }
569 else if ((token_type==DIR_KEYWORD_TT)&&(token_value==SCOPE_DK))
571 /* scope = <routine> */
574 if (!((token_type==SEP_TT)&&(token_value==SETEQUALS_SEP)))
575 { discard_token_location(beginning_debug_location);
576 ebf_error("'=' after 'scope'", token_text);
577 panic_mode_error_recovery();
582 if ((token_type != SYMBOL_TT)
583 || (stypes[token_value] != ROUTINE_T))
584 { discard_token_location(beginning_debug_location);
585 ebf_error("routine name after 'scope='", token_text);
586 panic_mode_error_recovery();
590 if (grammar_version_number == 1)
592 make_parsing_routine(svals[token_value]);
593 else { bytecode = 0x85; wordcode = svals[token_value]; }
594 sflags[token_value] |= USED_SFLAG;
596 else if ((token_type == SEP_TT) && (token_value == SETEQUALS_SEP))
597 { discard_token_location(beginning_debug_location);
598 error("'=' is only legal here as 'noun=Routine'");
599 panic_mode_error_recovery();
602 else { /* <attribute> or <general-parsing-routine> tokens */
604 if ((token_type != SYMBOL_TT)
605 || ((stypes[token_value] != ATTRIBUTE_T)
606 && (stypes[token_value] != ROUTINE_T)))
607 { discard_token_location(beginning_debug_location);
608 error_named("No such grammar token as", token_text);
609 panic_mode_error_recovery();
612 if (stypes[token_value]==ATTRIBUTE_T)
613 { if (grammar_version_number == 1)
614 bytecode = 128 + svals[token_value];
615 else { bytecode = 4; wordcode = svals[token_value]; }
618 { if (grammar_version_number == 1)
620 make_parsing_routine(svals[token_value]);
621 else { bytecode = 0x86; wordcode = svals[token_value]; }
623 sflags[token_value] |= USED_SFLAG;
626 grammar_token++; no_grammar_tokens++;
627 if ((grammar_version_number == 1) && (grammar_token > 6))
628 { if (grammar_token == 7)
629 warning("Grammar line cut short: you can only have up to 6 \
630 tokens in any line (unless you're compiling with library 6/3 or later)");
634 { if (bytecode != 0x42)
635 error("'/' can only be applied to prepositions");
638 grammar_lines[mark++] = bytecode;
640 grammar_lines[mark++] = wordcode/256;
641 grammar_lines[mark++] = wordcode%256;
644 grammar_lines[mark++] = ((wordcode >> 24) & 0xFF);
645 grammar_lines[mark++] = ((wordcode >> 16) & 0xFF);
646 grammar_lines[mark++] = ((wordcode >> 8) & 0xFF);
647 grammar_lines[mark++] = ((wordcode) & 0xFF);
653 grammar_lines[mark++] = 15;
654 grammar_lines_top = mark;
656 dont_enter_into_symbol_table = TRUE;
658 dont_enter_into_symbol_table = FALSE;
660 if (token_type != DQ_TT)
661 { discard_token_location(beginning_debug_location);
662 ebf_error("name of new or existing action", token_text);
663 panic_mode_error_recovery();
667 { assembly_operand AO = action_of_name(token_text);
669 if (j >= ((grammar_version_number==1)?256:4096))
670 error_named("This is a fake action, not a real one:", token_text);
673 reverse_action = FALSE;
675 if ((token_type == DIR_KEYWORD_TT) && (token_value == REVERSE_DK))
676 { if (grammar_version_number == 1)
677 error("'reverse' actions can only be used with \
678 Library 6/3 or later");
679 reverse_action = TRUE;
681 else put_token_back();
683 mark = Inform_verbs[verbnum].l[line];
685 if (debugfile_switch)
686 { debug_file_printf("<table-entry>");
687 debug_file_printf("<type>grammar line</type>");
688 debug_file_printf("<address>");
689 write_debug_grammar_backpatch(mark);
690 debug_file_printf("</address>");
691 debug_file_printf("<end-address>");
692 write_debug_grammar_backpatch(grammar_lines_top);
693 debug_file_printf("</end-address>");
694 write_debug_locations
695 (get_token_location_end(beginning_debug_location));
696 debug_file_printf("</table-entry>");
702 grammar_lines[mark++] = j/256;
703 grammar_lines[mark++] = j%256;
706 grammar_lines[mark++] = ((j >> 8) & 0xFF);
707 grammar_lines[mark++] = ((j) & 0xFF);
708 grammar_lines[mark++] = (reverse_action ? 1 : 0);
714 /* ------------------------------------------------------------------------- */
715 /* The Verb directive: */
717 /* Verb [meta] "word-1" ... "word-n" | = "existing-English-verb" */
718 /* | <grammar-line-1> ... <g-line-n> */
720 /* ------------------------------------------------------------------------- */
722 extern void make_verb(void)
724 /* Parse an entire Verb ... directive. */
726 int Inform_verb, meta_verb_flag=FALSE, verb_equals_form=FALSE;
728 char *English_verbs_given[32]; int no_given = 0, i;
730 directive_keywords.enabled = TRUE;
734 if ((token_type == DIR_KEYWORD_TT) && (token_value == META_DK))
735 { meta_verb_flag = TRUE;
739 while ((token_type == DQ_TT) || (token_type == SQ_TT))
740 { English_verbs_given[no_given++] = token_text;
745 { ebf_error("English verb in quotes", token_text);
746 panic_mode_error_recovery(); return;
749 if ((token_type == SEP_TT) && (token_value == SETEQUALS_SEP))
750 { verb_equals_form = TRUE;
752 Inform_verb = get_verb();
753 if (Inform_verb == -1) return;
755 if (!((token_type == SEP_TT) && (token_value == SEMICOLON_SEP)))
756 ebf_error("';' after English verb", token_text);
759 { Inform_verb = no_Inform_verbs;
760 if (no_Inform_verbs == MAX_VERBS)
761 memoryerror("MAX_VERBS",MAX_VERBS);
764 for (i=0; i<no_given; i++)
765 { dictionary_add(English_verbs_given[i],
766 0x41 + ((meta_verb_flag)?0x02:0x00),
767 (glulx_mode)?(0xffff-Inform_verb):(0xff-Inform_verb), 0);
768 register_verb(English_verbs_given[i], Inform_verb);
771 if (!verb_equals_form)
774 while (grammar_line(no_Inform_verbs, lines++)) ;
775 Inform_verbs[no_Inform_verbs++].lines = --lines;
778 directive_keywords.enabled = FALSE;
781 /* ------------------------------------------------------------------------- */
782 /* The Extend directive: */
784 /* Extend | only "verb-1" ... "verb-n" | <grammar-lines> */
785 /* | "verb" | "replace" */
789 /* ------------------------------------------------------------------------- */
791 #define EXTEND_REPLACE 1
792 #define EXTEND_FIRST 2
793 #define EXTEND_LAST 3
795 extern void extend_verb(void)
797 /* Parse an entire Extend ... directive. */
799 int Inform_verb = -1, k, l, lines, extend_mode;
801 directive_keywords.enabled = TRUE;
802 directives.enabled = FALSE;
805 if ((token_type == DIR_KEYWORD_TT) && (token_value == ONLY_DK))
807 if (no_Inform_verbs == MAX_VERBS)
808 memoryerror("MAX_VERBS", MAX_VERBS);
809 while (get_next_token(),
810 ((token_type == DQ_TT) || (token_type == SQ_TT)))
811 { Inform_verb = get_verb();
812 if (Inform_verb == -1) return;
813 if ((l!=-1) && (Inform_verb!=l))
814 warning_named("Verb disagrees with previous verbs:", token_text);
816 dictionary_set_verb_number(token_text,
817 (glulx_mode)?(0xffff-no_Inform_verbs):(0xff-no_Inform_verbs));
818 /* make call to renumber verb in English_verb_list too */
819 if (find_or_renumber_verb(token_text, &no_Inform_verbs) == -1)
820 warning_named("Verb to extend not found in English_verb_list:",
824 /* Copy the old Inform-verb into a new one which the list of
825 English-verbs given have had their dictionary entries modified
828 Inform_verbs[no_Inform_verbs] = Inform_verbs[Inform_verb];
829 Inform_verb = no_Inform_verbs++;
832 { Inform_verb = get_verb();
833 if (Inform_verb == -1) return;
837 /* Inform_verb now contains the number of the Inform-verb to extend... */
839 extend_mode = EXTEND_LAST;
840 if ((token_type == SEP_TT) && (token_value == TIMES_SEP))
844 if ((token_type == DIR_KEYWORD_TT) && (token_value == REPLACE_DK))
845 extend_mode = EXTEND_REPLACE;
846 if ((token_type == DIR_KEYWORD_TT) && (token_value == FIRST_DK))
847 extend_mode = EXTEND_FIRST;
848 if ((token_type == DIR_KEYWORD_TT) && (token_value == LAST_DK))
849 extend_mode = EXTEND_LAST;
852 { ebf_error("'replace', 'last', 'first' or '*'", token_text);
853 extend_mode = EXTEND_LAST;
857 l = Inform_verbs[Inform_verb].lines;
859 if (extend_mode == EXTEND_LAST) lines=l;
861 { if (extend_mode == EXTEND_FIRST)
863 Inform_verbs[Inform_verb].l[k+lines]
864 = Inform_verbs[Inform_verb].l[k-1+lines];
865 } while (grammar_line(Inform_verb, lines++));
867 if (extend_mode == EXTEND_FIRST)
868 { Inform_verbs[Inform_verb].lines = l+lines-1;
870 Inform_verbs[Inform_verb].l[k+lines-1]
871 = Inform_verbs[Inform_verb].l[k+lines];
873 else Inform_verbs[Inform_verb].lines = --lines;
875 directive_keywords.enabled = FALSE;
876 directives.enabled = TRUE;
879 /* ========================================================================= */
880 /* Data structure management routines */
881 /* ------------------------------------------------------------------------- */
883 extern void init_verbs_vars(void)
887 no_grammar_lines = 0;
888 no_grammar_tokens = 0;
889 English_verb_list_size = 0;
892 action_byte_offset = NULL;
893 grammar_token_routine = NULL;
895 adjective_sort_code = NULL;
896 English_verb_list = NULL;
899 grammar_version_number = 1;
901 grammar_version_number = 2;
904 extern void verbs_begin_pass(void)
906 no_Inform_verbs=0; no_adjectives=0;
907 no_grammar_token_routines=0;
911 grammar_lines_top = 0;
914 extern void verbs_allocate_arrays(void)
916 Inform_verbs = my_calloc(sizeof(verbt), MAX_VERBS, "verbs");
917 grammar_lines = my_malloc(MAX_LINESPACE, "grammar lines");
918 action_byte_offset = my_calloc(sizeof(int32), MAX_ACTIONS, "actions");
919 action_symbol = my_calloc(sizeof(int32), MAX_ACTIONS,
921 grammar_token_routine = my_calloc(sizeof(int32), MAX_ACTIONS,
922 "grammar token routines");
923 adjectives = my_calloc(sizeof(int32), MAX_ADJECTIVES,
925 adjective_sort_code = my_calloc(DICT_WORD_BYTES, MAX_ADJECTIVES,
926 "adjective sort codes");
928 English_verb_list = my_malloc(MAX_VERBSPACE, "register of verbs");
929 English_verb_list_top = English_verb_list;
932 extern void verbs_free_arrays(void)
934 my_free(&Inform_verbs, "verbs");
935 my_free(&grammar_lines, "grammar lines");
936 my_free(&action_byte_offset, "actions");
937 my_free(&action_symbol, "action symbols");
938 my_free(&grammar_token_routine, "grammar token routines");
939 my_free(&adjectives, "adjectives");
940 my_free(&adjective_sort_code, "adjective sort codes");
941 my_free(&English_verb_list, "register of verbs");
944 /* ========================================================================= */