tokenizer.h 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411
  1. // Protocol Buffers - Google's data interchange format
  2. // Copyright 2008 Google Inc. All rights reserved.
  3. // https://developers.google.com/protocol-buffers/
  4. //
  5. // Redistribution and use in source and binary forms, with or without
  6. // modification, are permitted provided that the following conditions are
  7. // met:
  8. //
  9. // * Redistributions of source code must retain the above copyright
  10. // notice, this list of conditions and the following disclaimer.
  11. // * Redistributions in binary form must reproduce the above
  12. // copyright notice, this list of conditions and the following disclaimer
  13. // in the documentation and/or other materials provided with the
  14. // distribution.
  15. // * Neither the name of Google Inc. nor the names of its
  16. // contributors may be used to endorse or promote products derived from
  17. // this software without specific prior written permission.
  18. //
  19. // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
  20. // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
  21. // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
  22. // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
  23. // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
  24. // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
  25. // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
  26. // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
  27. // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  28. // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
  29. // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  30. // Author: kenton@google.com (Kenton Varda)
  31. // Based on original Protocol Buffers design by
  32. // Sanjay Ghemawat, Jeff Dean, and others.
  33. //
  34. // Class for parsing tokenized text from a ZeroCopyInputStream.
  35. #ifndef GOOGLE_PROTOBUF_IO_TOKENIZER_H__
  36. #define GOOGLE_PROTOBUF_IO_TOKENIZER_H__
  37. #include <string>
  38. #include <vector>
  39. #include <google/protobuf/stubs/common.h>
  40. #include <google/protobuf/stubs/logging.h>
  41. namespace google {
  42. namespace protobuf {
  43. namespace io {
  44. class ZeroCopyInputStream; // zero_copy_stream.h
  45. // Defined in this file.
  46. class ErrorCollector;
  47. class Tokenizer;
  48. // By "column number", the proto compiler refers to a count of the number
  49. // of bytes before a given byte, except that a tab character advances to
  50. // the next multiple of 8 bytes. Note in particular that column numbers
  51. // are zero-based, while many user interfaces use one-based column numbers.
  52. typedef int ColumnNumber;
  53. // Abstract interface for an object which collects the errors that occur
  54. // during parsing. A typical implementation might simply print the errors
  55. // to stdout.
  56. class LIBPROTOBUF_EXPORT ErrorCollector {
  57. public:
  58. inline ErrorCollector() {}
  59. virtual ~ErrorCollector();
  60. // Indicates that there was an error in the input at the given line and
  61. // column numbers. The numbers are zero-based, so you may want to add
  62. // 1 to each before printing them.
  63. virtual void AddError(int line, ColumnNumber column,
  64. const string& message) = 0;
  65. // Indicates that there was a warning in the input at the given line and
  66. // column numbers. The numbers are zero-based, so you may want to add
  67. // 1 to each before printing them.
  68. virtual void AddWarning(int line, ColumnNumber column,
  69. const string& message) { }
  70. private:
  71. GOOGLE_DISALLOW_EVIL_CONSTRUCTORS(ErrorCollector);
  72. };
  73. // This class converts a stream of raw text into a stream of tokens for
  74. // the protocol definition parser to parse. The tokens recognized are
  75. // similar to those that make up the C language; see the TokenType enum for
  76. // precise descriptions. Whitespace and comments are skipped. By default,
  77. // C- and C++-style comments are recognized, but other styles can be used by
  78. // calling set_comment_style().
  79. class LIBPROTOBUF_EXPORT Tokenizer {
  80. public:
  81. // Construct a Tokenizer that reads and tokenizes text from the given
  82. // input stream and writes errors to the given error_collector.
  83. // The caller keeps ownership of input and error_collector.
  84. Tokenizer(ZeroCopyInputStream* input, ErrorCollector* error_collector);
  85. ~Tokenizer();
  86. enum TokenType {
  87. TYPE_START, // Next() has not yet been called.
  88. TYPE_END, // End of input reached. "text" is empty.
  89. TYPE_IDENTIFIER, // A sequence of letters, digits, and underscores, not
  90. // starting with a digit. It is an error for a number
  91. // to be followed by an identifier with no space in
  92. // between.
  93. TYPE_INTEGER, // A sequence of digits representing an integer. Normally
  94. // the digits are decimal, but a prefix of "0x" indicates
  95. // a hex number and a leading zero indicates octal, just
  96. // like with C numeric literals. A leading negative sign
  97. // is NOT included in the token; it's up to the parser to
  98. // interpret the unary minus operator on its own.
  99. TYPE_FLOAT, // A floating point literal, with a fractional part and/or
  100. // an exponent. Always in decimal. Again, never
  101. // negative.
  102. TYPE_STRING, // A quoted sequence of escaped characters. Either single
  103. // or double quotes can be used, but they must match.
  104. // A string literal cannot cross a line break.
  105. TYPE_SYMBOL, // Any other printable character, like '!' or '+'.
  106. // Symbols are always a single character, so "!+$%" is
  107. // four tokens.
  108. };
  109. // Structure representing a token read from the token stream.
  110. struct Token {
  111. TokenType type;
  112. string text; // The exact text of the token as it appeared in
  113. // the input. e.g. tokens of TYPE_STRING will still
  114. // be escaped and in quotes.
  115. // "line" and "column" specify the position of the first character of
  116. // the token within the input stream. They are zero-based.
  117. int line;
  118. ColumnNumber column;
  119. ColumnNumber end_column;
  120. };
  121. // Get the current token. This is updated when Next() is called. Before
  122. // the first call to Next(), current() has type TYPE_START and no contents.
  123. const Token& current();
  124. // Return the previous token -- i.e. what current() returned before the
  125. // previous call to Next().
  126. const Token& previous();
  127. // Advance to the next token. Returns false if the end of the input is
  128. // reached.
  129. bool Next();
  130. // Like Next(), but also collects comments which appear between the previous
  131. // and next tokens.
  132. //
  133. // Comments which appear to be attached to the previous token are stored
  134. // in *prev_tailing_comments. Comments which appear to be attached to the
  135. // next token are stored in *next_leading_comments. Comments appearing in
  136. // between which do not appear to be attached to either will be added to
  137. // detached_comments. Any of these parameters can be NULL to simply discard
  138. // the comments.
  139. //
  140. // A series of line comments appearing on consecutive lines, with no other
  141. // tokens appearing on those lines, will be treated as a single comment.
  142. //
  143. // Only the comment content is returned; comment markers (e.g. //) are
  144. // stripped out. For block comments, leading whitespace and an asterisk will
  145. // be stripped from the beginning of each line other than the first. Newlines
  146. // are included in the output.
  147. //
  148. // Examples:
  149. //
  150. // optional int32 foo = 1; // Comment attached to foo.
  151. // // Comment attached to bar.
  152. // optional int32 bar = 2;
  153. //
  154. // optional string baz = 3;
  155. // // Comment attached to baz.
  156. // // Another line attached to baz.
  157. //
  158. // // Comment attached to qux.
  159. // //
  160. // // Another line attached to qux.
  161. // optional double qux = 4;
  162. //
  163. // // Detached comment. This is not attached to qux or corge
  164. // // because there are blank lines separating it from both.
  165. //
  166. // optional string corge = 5;
  167. // /* Block comment attached
  168. // * to corge. Leading asterisks
  169. // * will be removed. */
  170. // /* Block comment attached to
  171. // * grault. */
  172. // optional int32 grault = 6;
  173. bool NextWithComments(string* prev_trailing_comments,
  174. std::vector<string>* detached_comments,
  175. string* next_leading_comments);
  176. // Parse helpers ---------------------------------------------------
  177. // Parses a TYPE_FLOAT token. This never fails, so long as the text actually
  178. // comes from a TYPE_FLOAT token parsed by Tokenizer. If it doesn't, the
  179. // result is undefined (possibly an assert failure).
  180. static double ParseFloat(const string& text);
  181. // Parses a TYPE_STRING token. This never fails, so long as the text actually
  182. // comes from a TYPE_STRING token parsed by Tokenizer. If it doesn't, the
  183. // result is undefined (possibly an assert failure).
  184. static void ParseString(const string& text, string* output);
  185. // Identical to ParseString, but appends to output.
  186. static void ParseStringAppend(const string& text, string* output);
  187. // Parses a TYPE_INTEGER token. Returns false if the result would be
  188. // greater than max_value. Otherwise, returns true and sets *output to the
  189. // result. If the text is not from a Token of type TYPE_INTEGER originally
  190. // parsed by a Tokenizer, the result is undefined (possibly an assert
  191. // failure).
  192. static bool ParseInteger(const string& text, uint64 max_value,
  193. uint64* output);
  194. // Options ---------------------------------------------------------
  195. // Set true to allow floats to be suffixed with the letter 'f'. Tokens
  196. // which would otherwise be integers but which have the 'f' suffix will be
  197. // forced to be interpreted as floats. For all other purposes, the 'f' is
  198. // ignored.
  199. void set_allow_f_after_float(bool value) { allow_f_after_float_ = value; }
  200. // Valid values for set_comment_style().
  201. enum CommentStyle {
  202. // Line comments begin with "//", block comments are delimited by "/*" and
  203. // "*/".
  204. CPP_COMMENT_STYLE,
  205. // Line comments begin with "#". No way to write block comments.
  206. SH_COMMENT_STYLE
  207. };
  208. // Sets the comment style.
  209. void set_comment_style(CommentStyle style) { comment_style_ = style; }
  210. // Whether to require whitespace between a number and a field name.
  211. // Default is true. Do not use this; for Google-internal cleanup only.
  212. void set_require_space_after_number(bool require) {
  213. require_space_after_number_ = require;
  214. }
  215. // Whether to allow string literals to span multiple lines. Default is false.
  216. // Do not use this; for Google-internal cleanup only.
  217. void set_allow_multiline_strings(bool allow) {
  218. allow_multiline_strings_ = allow;
  219. }
  220. // External helper: validate an identifier.
  221. static bool IsIdentifier(const string& text);
  222. // -----------------------------------------------------------------
  223. private:
  224. GOOGLE_DISALLOW_EVIL_CONSTRUCTORS(Tokenizer);
  225. Token current_; // Returned by current().
  226. Token previous_; // Returned by previous().
  227. ZeroCopyInputStream* input_;
  228. ErrorCollector* error_collector_;
  229. char current_char_; // == buffer_[buffer_pos_], updated by NextChar().
  230. const char* buffer_; // Current buffer returned from input_.
  231. int buffer_size_; // Size of buffer_.
  232. int buffer_pos_; // Current position within the buffer.
  233. bool read_error_; // Did we previously encounter a read error?
  234. // Line and column number of current_char_ within the whole input stream.
  235. int line_;
  236. ColumnNumber column_;
  237. // String to which text should be appended as we advance through it.
  238. // Call RecordTo(&str) to start recording and StopRecording() to stop.
  239. // E.g. StartToken() calls RecordTo(&current_.text). record_start_ is the
  240. // position within the current buffer where recording started.
  241. string* record_target_;
  242. int record_start_;
  243. // Options.
  244. bool allow_f_after_float_;
  245. CommentStyle comment_style_;
  246. bool require_space_after_number_;
  247. bool allow_multiline_strings_;
  248. // Since we count columns we need to interpret tabs somehow. We'll take
  249. // the standard 8-character definition for lack of any way to do better.
  250. // This must match the documentation of ColumnNumber.
  251. static const int kTabWidth = 8;
  252. // -----------------------------------------------------------------
  253. // Helper methods.
  254. // Consume this character and advance to the next one.
  255. void NextChar();
  256. // Read a new buffer from the input.
  257. void Refresh();
  258. inline void RecordTo(string* target);
  259. inline void StopRecording();
  260. // Called when the current character is the first character of a new
  261. // token (not including whitespace or comments).
  262. inline void StartToken();
  263. // Called when the current character is the first character after the
  264. // end of the last token. After this returns, current_.text will
  265. // contain all text consumed since StartToken() was called.
  266. inline void EndToken();
  267. // Convenience method to add an error at the current line and column.
  268. void AddError(const string& message) {
  269. error_collector_->AddError(line_, column_, message);
  270. }
  271. // -----------------------------------------------------------------
  272. // The following four methods are used to consume tokens of specific
  273. // types. They are actually used to consume all characters *after*
  274. // the first, since the calling function consumes the first character
  275. // in order to decide what kind of token is being read.
  276. // Read and consume a string, ending when the given delimiter is
  277. // consumed.
  278. void ConsumeString(char delimiter);
  279. // Read and consume a number, returning TYPE_FLOAT or TYPE_INTEGER
  280. // depending on what was read. This needs to know if the first
  281. // character was a zero in order to correctly recognize hex and octal
  282. // numbers.
  283. // It also needs to know if the first character was a . to parse floating
  284. // point correctly.
  285. TokenType ConsumeNumber(bool started_with_zero, bool started_with_dot);
  286. // Consume the rest of a line.
  287. void ConsumeLineComment(string* content);
  288. // Consume until "*/".
  289. void ConsumeBlockComment(string* content);
  290. enum NextCommentStatus {
  291. // Started a line comment.
  292. LINE_COMMENT,
  293. // Started a block comment.
  294. BLOCK_COMMENT,
  295. // Consumed a slash, then realized it wasn't a comment. current_ has
  296. // been filled in with a slash token. The caller should return it.
  297. SLASH_NOT_COMMENT,
  298. // We do not appear to be starting a comment here.
  299. NO_COMMENT
  300. };
  301. // If we're at the start of a new comment, consume it and return what kind
  302. // of comment it is.
  303. NextCommentStatus TryConsumeCommentStart();
  304. // -----------------------------------------------------------------
  305. // These helper methods make the parsing code more readable. The
  306. // "character classes" referred to are defined at the top of the .cc file.
  307. // Basically it is a C++ class with one method:
  308. // static bool InClass(char c);
  309. // The method returns true if c is a member of this "class", like "Letter"
  310. // or "Digit".
  311. // Returns true if the current character is of the given character
  312. // class, but does not consume anything.
  313. template<typename CharacterClass>
  314. inline bool LookingAt();
  315. // If the current character is in the given class, consume it and return
  316. // true. Otherwise return false.
  317. // e.g. TryConsumeOne<Letter>()
  318. template<typename CharacterClass>
  319. inline bool TryConsumeOne();
  320. // Like above, but try to consume the specific character indicated.
  321. inline bool TryConsume(char c);
  322. // Consume zero or more of the given character class.
  323. template<typename CharacterClass>
  324. inline void ConsumeZeroOrMore();
  325. // Consume one or more of the given character class or log the given
  326. // error message.
  327. // e.g. ConsumeOneOrMore<Digit>("Expected digits.");
  328. template<typename CharacterClass>
  329. inline void ConsumeOneOrMore(const char* error);
  330. };
  331. // inline methods ====================================================
  332. inline const Tokenizer::Token& Tokenizer::current() {
  333. return current_;
  334. }
  335. inline const Tokenizer::Token& Tokenizer::previous() {
  336. return previous_;
  337. }
  338. inline void Tokenizer::ParseString(const string& text, string* output) {
  339. output->clear();
  340. ParseStringAppend(text, output);
  341. }
  342. } // namespace io
  343. } // namespace protobuf
  344. } // namespace google
  345. #endif // GOOGLE_PROTOBUF_IO_TOKENIZER_H__