clang  6.0.0svn
Lexer.cpp
Go to the documentation of this file.
1 //===--- Lexer.cpp - C Language Family Lexer ------------------------------===//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This file implements the Lexer and Token interfaces.
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #include "clang/Lex/Lexer.h"
15 #include "UnicodeCharSets.h"
16 #include "clang/Basic/CharInfo.h"
21 #include "clang/Lex/Preprocessor.h"
23 #include "llvm/ADT/StringExtras.h"
24 #include "llvm/ADT/StringSwitch.h"
25 #include "llvm/Support/Compiler.h"
26 #include "llvm/Support/ConvertUTF.h"
27 #include "llvm/Support/MathExtras.h"
28 #include "llvm/Support/MemoryBuffer.h"
29 #include "llvm/Support/UnicodeCharRanges.h"
30 #include <algorithm>
31 #include <cassert>
32 #include <cstddef>
33 #include <cstdint>
34 #include <cstring>
35 #include <string>
36 #include <tuple>
37 #include <utility>
38 
39 using namespace clang;
40 
41 //===----------------------------------------------------------------------===//
42 // Token Class Implementation
43 //===----------------------------------------------------------------------===//
44 
45 /// isObjCAtKeyword - Return true if we have an ObjC keyword identifier.
47  if (isAnnotation())
48  return false;
50  return II->getObjCKeywordID() == objcKey;
51  return false;
52 }
53 
54 /// getObjCKeywordID - Return the ObjC keyword kind.
56  if (isAnnotation())
57  return tok::objc_not_keyword;
59  return specId ? specId->getObjCKeywordID() : tok::objc_not_keyword;
60 }
61 
62 //===----------------------------------------------------------------------===//
63 // Lexer Class Implementation
64 //===----------------------------------------------------------------------===//
65 
66 void Lexer::anchor() { }
67 
68 void Lexer::InitLexer(const char *BufStart, const char *BufPtr,
69  const char *BufEnd) {
70  BufferStart = BufStart;
71  BufferPtr = BufPtr;
72  BufferEnd = BufEnd;
73 
74  assert(BufEnd[0] == 0 &&
75  "We assume that the input buffer has a null character at the end"
76  " to simplify lexing!");
77 
78  // Check whether we have a BOM in the beginning of the buffer. If yes - act
79  // accordingly. Right now we support only UTF-8 with and without BOM, so, just
80  // skip the UTF-8 BOM if it's present.
81  if (BufferStart == BufferPtr) {
82  // Determine the size of the BOM.
83  StringRef Buf(BufferStart, BufferEnd - BufferStart);
84  size_t BOMLength = llvm::StringSwitch<size_t>(Buf)
85  .StartsWith("\xEF\xBB\xBF", 3) // UTF-8 BOM
86  .Default(0);
87 
88  // Skip the BOM.
89  BufferPtr += BOMLength;
90  }
91 
92  Is_PragmaLexer = false;
93  CurrentConflictMarkerState = CMK_None;
94 
95  // Start of the file is a start of line.
96  IsAtStartOfLine = true;
97  IsAtPhysicalStartOfLine = true;
98 
99  HasLeadingSpace = false;
100  HasLeadingEmptyMacro = false;
101 
102  // We are not after parsing a #.
103  ParsingPreprocessorDirective = false;
104 
105  // We are not after parsing #include.
106  ParsingFilename = false;
107 
108  // We are not in raw mode. Raw mode disables diagnostics and interpretation
109  // of tokens (e.g. identifiers, thus disabling macro expansion). It is used
110  // to quickly lex the tokens of the buffer, e.g. when handling a "#if 0" block
111  // or otherwise skipping over tokens.
112  LexingRawMode = false;
113 
114  // Default to not keeping comments.
115  ExtendedTokenMode = 0;
116 }
117 
118 /// Lexer constructor - Create a new lexer object for the specified buffer
119 /// with the specified preprocessor managing the lexing process. This lexer
120 /// assumes that the associated file buffer and Preprocessor objects will
121 /// outlive it, so it doesn't take ownership of either of them.
122 Lexer::Lexer(FileID FID, const llvm::MemoryBuffer *InputFile, Preprocessor &PP)
123  : PreprocessorLexer(&PP, FID),
124  FileLoc(PP.getSourceManager().getLocForStartOfFile(FID)),
125  LangOpts(PP.getLangOpts()) {
126 
127  InitLexer(InputFile->getBufferStart(), InputFile->getBufferStart(),
128  InputFile->getBufferEnd());
129 
131 }
132 
134  assert(PP && "Cannot reset token mode without a preprocessor");
135  if (LangOpts.TraditionalCPP)
136  SetKeepWhitespaceMode(true);
137  else
139 }
140 
141 /// Lexer constructor - Create a new raw lexer object. This object is only
142 /// suitable for calls to 'LexFromRawLexer'. This lexer assumes that the text
143 /// range will outlive it, so it doesn't take ownership of it.
144 Lexer::Lexer(SourceLocation fileloc, const LangOptions &langOpts,
145  const char *BufStart, const char *BufPtr, const char *BufEnd)
146  : FileLoc(fileloc), LangOpts(langOpts) {
147 
148  InitLexer(BufStart, BufPtr, BufEnd);
149 
150  // We *are* in raw mode.
151  LexingRawMode = true;
152 }
153 
154 /// Lexer constructor - Create a new raw lexer object. This object is only
155 /// suitable for calls to 'LexFromRawLexer'. This lexer assumes that the text
156 /// range will outlive it, so it doesn't take ownership of it.
157 Lexer::Lexer(FileID FID, const llvm::MemoryBuffer *FromFile,
158  const SourceManager &SM, const LangOptions &langOpts)
159  : Lexer(SM.getLocForStartOfFile(FID), langOpts, FromFile->getBufferStart(),
160  FromFile->getBufferStart(), FromFile->getBufferEnd()) {}
161 
162 /// Create_PragmaLexer: Lexer constructor - Create a new lexer object for
163 /// _Pragma expansion. This has a variety of magic semantics that this method
164 /// sets up. It returns a new'd Lexer that must be delete'd when done.
165 ///
166 /// On entrance to this routine, TokStartLoc is a macro location which has a
167 /// spelling loc that indicates the bytes to be lexed for the token and an
168 /// expansion location that indicates where all lexed tokens should be
169 /// "expanded from".
170 ///
171 /// TODO: It would really be nice to make _Pragma just be a wrapper around a
172 /// normal lexer that remaps tokens as they fly by. This would require making
173 /// Preprocessor::Lex virtual. Given that, we could just dump in a magic lexer
174 /// interface that could handle this stuff. This would pull GetMappedTokenLoc
175 /// out of the critical path of the lexer!
176 ///
178  SourceLocation ExpansionLocStart,
179  SourceLocation ExpansionLocEnd,
180  unsigned TokLen, Preprocessor &PP) {
182 
183  // Create the lexer as if we were going to lex the file normally.
184  FileID SpellingFID = SM.getFileID(SpellingLoc);
185  const llvm::MemoryBuffer *InputFile = SM.getBuffer(SpellingFID);
186  Lexer *L = new Lexer(SpellingFID, InputFile, PP);
187 
188  // Now that the lexer is created, change the start/end locations so that we
189  // just lex the subsection of the file that we want. This is lexing from a
190  // scratch buffer.
191  const char *StrData = SM.getCharacterData(SpellingLoc);
192 
193  L->BufferPtr = StrData;
194  L->BufferEnd = StrData+TokLen;
195  assert(L->BufferEnd[0] == 0 && "Buffer is not nul terminated!");
196 
197  // Set the SourceLocation with the remapping information. This ensures that
198  // GetMappedTokenLoc will remap the tokens as they are lexed.
199  L->FileLoc = SM.createExpansionLoc(SM.getLocForStartOfFile(SpellingFID),
200  ExpansionLocStart,
201  ExpansionLocEnd, TokLen);
202 
203  // Ensure that the lexer thinks it is inside a directive, so that end \n will
204  // return an EOD token.
206 
207  // This lexer really is for _Pragma.
208  L->Is_PragmaLexer = true;
209  return L;
210 }
211 
212 /// Stringify - Convert the specified string into a C string, with surrounding
213 /// ""'s, and with escaped \ and " characters.
214 std::string Lexer::Stringify(StringRef Str, bool Charify) {
215  std::string Result = Str;
216  char Quote = Charify ? '\'' : '"';
217  for (unsigned i = 0, e = Result.size(); i != e; ++i) {
218  if (Result[i] == '\\' || Result[i] == Quote) {
219  Result.insert(Result.begin()+i, '\\');
220  ++i; ++e;
221  }
222  }
223  return Result;
224 }
225 
226 /// Stringify - Convert the specified string into a C string by escaping '\'
227 /// and " characters. This does not add surrounding ""'s to the string.
229  for (unsigned i = 0, e = Str.size(); i != e; ++i) {
230  if (Str[i] == '\\' || Str[i] == '"') {
231  Str.insert(Str.begin()+i, '\\');
232  ++i; ++e;
233  }
234  }
235 }
236 
237 //===----------------------------------------------------------------------===//
238 // Token Spelling
239 //===----------------------------------------------------------------------===//
240 
241 /// \brief Slow case of getSpelling. Extract the characters comprising the
242 /// spelling of this token from the provided input buffer.
243 static size_t getSpellingSlow(const Token &Tok, const char *BufPtr,
244  const LangOptions &LangOpts, char *Spelling) {
245  assert(Tok.needsCleaning() && "getSpellingSlow called on simple token");
246 
247  size_t Length = 0;
248  const char *BufEnd = BufPtr + Tok.getLength();
249 
250  if (tok::isStringLiteral(Tok.getKind())) {
251  // Munch the encoding-prefix and opening double-quote.
252  while (BufPtr < BufEnd) {
253  unsigned Size;
254  Spelling[Length++] = Lexer::getCharAndSizeNoWarn(BufPtr, Size, LangOpts);
255  BufPtr += Size;
256 
257  if (Spelling[Length - 1] == '"')
258  break;
259  }
260 
261  // Raw string literals need special handling; trigraph expansion and line
262  // splicing do not occur within their d-char-sequence nor within their
263  // r-char-sequence.
264  if (Length >= 2 &&
265  Spelling[Length - 2] == 'R' && Spelling[Length - 1] == '"') {
266  // Search backwards from the end of the token to find the matching closing
267  // quote.
268  const char *RawEnd = BufEnd;
269  do --RawEnd; while (*RawEnd != '"');
270  size_t RawLength = RawEnd - BufPtr + 1;
271 
272  // Everything between the quotes is included verbatim in the spelling.
273  memcpy(Spelling + Length, BufPtr, RawLength);
274  Length += RawLength;
275  BufPtr += RawLength;
276 
277  // The rest of the token is lexed normally.
278  }
279  }
280 
281  while (BufPtr < BufEnd) {
282  unsigned Size;
283  Spelling[Length++] = Lexer::getCharAndSizeNoWarn(BufPtr, Size, LangOpts);
284  BufPtr += Size;
285  }
286 
287  assert(Length < Tok.getLength() &&
288  "NeedsCleaning flag set on token that didn't need cleaning!");
289  return Length;
290 }
291 
292 /// getSpelling() - Return the 'spelling' of this token. The spelling of a
293 /// token are the characters used to represent the token in the source file
294 /// after trigraph expansion and escaped-newline folding. In particular, this
295 /// wants to get the true, uncanonicalized, spelling of things like digraphs
296 /// UCNs, etc.
298  SmallVectorImpl<char> &buffer,
299  const SourceManager &SM,
300  const LangOptions &options,
301  bool *invalid) {
302  // Break down the source location.
303  std::pair<FileID, unsigned> locInfo = SM.getDecomposedLoc(loc);
304 
305  // Try to the load the file buffer.
306  bool invalidTemp = false;
307  StringRef file = SM.getBufferData(locInfo.first, &invalidTemp);
308  if (invalidTemp) {
309  if (invalid) *invalid = true;
310  return StringRef();
311  }
312 
313  const char *tokenBegin = file.data() + locInfo.second;
314 
315  // Lex from the start of the given location.
316  Lexer lexer(SM.getLocForStartOfFile(locInfo.first), options,
317  file.begin(), tokenBegin, file.end());
318  Token token;
319  lexer.LexFromRawLexer(token);
320 
321  unsigned length = token.getLength();
322 
323  // Common case: no need for cleaning.
324  if (!token.needsCleaning())
325  return StringRef(tokenBegin, length);
326 
327  // Hard case, we need to relex the characters into the string.
328  buffer.resize(length);
329  buffer.resize(getSpellingSlow(token, tokenBegin, options, buffer.data()));
330  return StringRef(buffer.data(), buffer.size());
331 }
332 
333 /// getSpelling() - Return the 'spelling' of this token. The spelling of a
334 /// token are the characters used to represent the token in the source file
335 /// after trigraph expansion and escaped-newline folding. In particular, this
336 /// wants to get the true, uncanonicalized, spelling of things like digraphs
337 /// UCNs, etc.
338 std::string Lexer::getSpelling(const Token &Tok, const SourceManager &SourceMgr,
339  const LangOptions &LangOpts, bool *Invalid) {
340  assert((int)Tok.getLength() >= 0 && "Token character range is bogus!");
341 
342  bool CharDataInvalid = false;
343  const char *TokStart = SourceMgr.getCharacterData(Tok.getLocation(),
344  &CharDataInvalid);
345  if (Invalid)
346  *Invalid = CharDataInvalid;
347  if (CharDataInvalid)
348  return std::string();
349 
350  // If this token contains nothing interesting, return it directly.
351  if (!Tok.needsCleaning())
352  return std::string(TokStart, TokStart + Tok.getLength());
353 
354  std::string Result;
355  Result.resize(Tok.getLength());
356  Result.resize(getSpellingSlow(Tok, TokStart, LangOpts, &*Result.begin()));
357  return Result;
358 }
359 
360 /// getSpelling - This method is used to get the spelling of a token into a
361 /// preallocated buffer, instead of as an std::string. The caller is required
362 /// to allocate enough space for the token, which is guaranteed to be at least
363 /// Tok.getLength() bytes long. The actual length of the token is returned.
364 ///
365 /// Note that this method may do two possible things: it may either fill in
366 /// the buffer specified with characters, or it may *change the input pointer*
367 /// to point to a constant buffer with the data already in it (avoiding a
368 /// copy). The caller is not allowed to modify the returned buffer pointer
369 /// if an internal buffer is returned.
370 unsigned Lexer::getSpelling(const Token &Tok, const char *&Buffer,
371  const SourceManager &SourceMgr,
372  const LangOptions &LangOpts, bool *Invalid) {
373  assert((int)Tok.getLength() >= 0 && "Token character range is bogus!");
374 
375  const char *TokStart = nullptr;
376  // NOTE: this has to be checked *before* testing for an IdentifierInfo.
377  if (Tok.is(tok::raw_identifier))
378  TokStart = Tok.getRawIdentifier().data();
379  else if (!Tok.hasUCN()) {
380  if (const IdentifierInfo *II = Tok.getIdentifierInfo()) {
381  // Just return the string from the identifier table, which is very quick.
382  Buffer = II->getNameStart();
383  return II->getLength();
384  }
385  }
386 
387  // NOTE: this can be checked even after testing for an IdentifierInfo.
388  if (Tok.isLiteral())
389  TokStart = Tok.getLiteralData();
390 
391  if (!TokStart) {
392  // Compute the start of the token in the input lexer buffer.
393  bool CharDataInvalid = false;
394  TokStart = SourceMgr.getCharacterData(Tok.getLocation(), &CharDataInvalid);
395  if (Invalid)
396  *Invalid = CharDataInvalid;
397  if (CharDataInvalid) {
398  Buffer = "";
399  return 0;
400  }
401  }
402 
403  // If this token contains nothing interesting, return it directly.
404  if (!Tok.needsCleaning()) {
405  Buffer = TokStart;
406  return Tok.getLength();
407  }
408 
409  // Otherwise, hard case, relex the characters into the string.
410  return getSpellingSlow(Tok, TokStart, LangOpts, const_cast<char*>(Buffer));
411 }
412 
413 /// MeasureTokenLength - Relex the token at the specified location and return
414 /// its length in bytes in the input file. If the token needs cleaning (e.g.
415 /// includes a trigraph or an escaped newline) then this count includes bytes
416 /// that are part of that.
418  const SourceManager &SM,
419  const LangOptions &LangOpts) {
420  Token TheTok;
421  if (getRawToken(Loc, TheTok, SM, LangOpts))
422  return 0;
423  return TheTok.getLength();
424 }
425 
426 /// \brief Relex the token at the specified location.
427 /// \returns true if there was a failure, false on success.
429  const SourceManager &SM,
430  const LangOptions &LangOpts,
431  bool IgnoreWhiteSpace) {
432  // TODO: this could be special cased for common tokens like identifiers, ')',
433  // etc to make this faster, if it mattered. Just look at StrData[0] to handle
434  // all obviously single-char tokens. This could use
435  // Lexer::isObviouslySimpleCharacter for example to handle identifiers or
436  // something.
437 
438  // If this comes from a macro expansion, we really do want the macro name, not
439  // the token this macro expanded to.
440  Loc = SM.getExpansionLoc(Loc);
441  std::pair<FileID, unsigned> LocInfo = SM.getDecomposedLoc(Loc);
442  bool Invalid = false;
443  StringRef Buffer = SM.getBufferData(LocInfo.first, &Invalid);
444  if (Invalid)
445  return true;
446 
447  const char *StrData = Buffer.data()+LocInfo.second;
448 
449  if (!IgnoreWhiteSpace && isWhitespace(StrData[0]))
450  return true;
451 
452  // Create a lexer starting at the beginning of this token.
453  Lexer TheLexer(SM.getLocForStartOfFile(LocInfo.first), LangOpts,
454  Buffer.begin(), StrData, Buffer.end());
455  TheLexer.SetCommentRetentionState(true);
456  TheLexer.LexFromRawLexer(Result);
457  return false;
458 }
459 
460 /// Returns the pointer that points to the beginning of line that contains
461 /// the given offset, or null if the offset if invalid.
462 static const char *findBeginningOfLine(StringRef Buffer, unsigned Offset) {
463  const char *BufStart = Buffer.data();
464  if (Offset >= Buffer.size())
465  return nullptr;
466 
467  const char *LexStart = BufStart + Offset;
468  for (; LexStart != BufStart; --LexStart) {
469  if (isVerticalWhitespace(LexStart[0]) &&
470  !Lexer::isNewLineEscaped(BufStart, LexStart)) {
471  // LexStart should point at first character of logical line.
472  ++LexStart;
473  break;
474  }
475  }
476  return LexStart;
477 }
478 
480  const SourceManager &SM,
481  const LangOptions &LangOpts) {
482  assert(Loc.isFileID());
483  std::pair<FileID, unsigned> LocInfo = SM.getDecomposedLoc(Loc);
484  if (LocInfo.first.isInvalid())
485  return Loc;
486 
487  bool Invalid = false;
488  StringRef Buffer = SM.getBufferData(LocInfo.first, &Invalid);
489  if (Invalid)
490  return Loc;
491 
492  // Back up from the current location until we hit the beginning of a line
493  // (or the buffer). We'll relex from that point.
494  const char *StrData = Buffer.data() + LocInfo.second;
495  const char *LexStart = findBeginningOfLine(Buffer, LocInfo.second);
496  if (!LexStart || LexStart == StrData)
497  return Loc;
498 
499  // Create a lexer starting at the beginning of this token.
500  SourceLocation LexerStartLoc = Loc.getLocWithOffset(-LocInfo.second);
501  Lexer TheLexer(LexerStartLoc, LangOpts, Buffer.data(), LexStart,
502  Buffer.end());
503  TheLexer.SetCommentRetentionState(true);
504 
505  // Lex tokens until we find the token that contains the source location.
506  Token TheTok;
507  do {
508  TheLexer.LexFromRawLexer(TheTok);
509 
510  if (TheLexer.getBufferLocation() > StrData) {
511  // Lexing this token has taken the lexer past the source location we're
512  // looking for. If the current token encompasses our source location,
513  // return the beginning of that token.
514  if (TheLexer.getBufferLocation() - TheTok.getLength() <= StrData)
515  return TheTok.getLocation();
516 
517  // We ended up skipping over the source location entirely, which means
518  // that it points into whitespace. We're done here.
519  break;
520  }
521  } while (TheTok.getKind() != tok::eof);
522 
523  // We've passed our source location; just return the original source location.
524  return Loc;
525 }
526 
528  const SourceManager &SM,
529  const LangOptions &LangOpts) {
530  if (Loc.isFileID())
531  return getBeginningOfFileToken(Loc, SM, LangOpts);
532 
533  if (!SM.isMacroArgExpansion(Loc))
534  return Loc;
535 
536  SourceLocation FileLoc = SM.getSpellingLoc(Loc);
537  SourceLocation BeginFileLoc = getBeginningOfFileToken(FileLoc, SM, LangOpts);
538  std::pair<FileID, unsigned> FileLocInfo = SM.getDecomposedLoc(FileLoc);
539  std::pair<FileID, unsigned> BeginFileLocInfo =
540  SM.getDecomposedLoc(BeginFileLoc);
541  assert(FileLocInfo.first == BeginFileLocInfo.first &&
542  FileLocInfo.second >= BeginFileLocInfo.second);
543  return Loc.getLocWithOffset(BeginFileLocInfo.second - FileLocInfo.second);
544 }
545 
546 namespace {
547 
549  PDK_Skipped,
550  PDK_Unknown
551  };
552 
553 } // end anonymous namespace
554 
556  const LangOptions &LangOpts,
557  unsigned MaxLines) {
558  // Create a lexer starting at the beginning of the file. Note that we use a
559  // "fake" file source location at offset 1 so that the lexer will track our
560  // position within the file.
561  const unsigned StartOffset = 1;
562  SourceLocation FileLoc = SourceLocation::getFromRawEncoding(StartOffset);
563  Lexer TheLexer(FileLoc, LangOpts, Buffer.begin(), Buffer.begin(),
564  Buffer.end());
565  TheLexer.SetCommentRetentionState(true);
566 
567  bool InPreprocessorDirective = false;
568  Token TheTok;
569  SourceLocation ActiveCommentLoc;
570 
571  unsigned MaxLineOffset = 0;
572  if (MaxLines) {
573  const char *CurPtr = Buffer.begin();
574  unsigned CurLine = 0;
575  while (CurPtr != Buffer.end()) {
576  char ch = *CurPtr++;
577  if (ch == '\n') {
578  ++CurLine;
579  if (CurLine == MaxLines)
580  break;
581  }
582  }
583  if (CurPtr != Buffer.end())
584  MaxLineOffset = CurPtr - Buffer.begin();
585  }
586 
587  do {
588  TheLexer.LexFromRawLexer(TheTok);
589 
590  if (InPreprocessorDirective) {
591  // If we've hit the end of the file, we're done.
592  if (TheTok.getKind() == tok::eof) {
593  break;
594  }
595 
596  // If we haven't hit the end of the preprocessor directive, skip this
597  // token.
598  if (!TheTok.isAtStartOfLine())
599  continue;
600 
601  // We've passed the end of the preprocessor directive, and will look
602  // at this token again below.
603  InPreprocessorDirective = false;
604  }
605 
606  // Keep track of the # of lines in the preamble.
607  if (TheTok.isAtStartOfLine()) {
608  unsigned TokOffset = TheTok.getLocation().getRawEncoding() - StartOffset;
609 
610  // If we were asked to limit the number of lines in the preamble,
611  // and we're about to exceed that limit, we're done.
612  if (MaxLineOffset && TokOffset >= MaxLineOffset)
613  break;
614  }
615 
616  // Comments are okay; skip over them.
617  if (TheTok.getKind() == tok::comment) {
618  if (ActiveCommentLoc.isInvalid())
619  ActiveCommentLoc = TheTok.getLocation();
620  continue;
621  }
622 
623  if (TheTok.isAtStartOfLine() && TheTok.getKind() == tok::hash) {
624  // This is the start of a preprocessor directive.
625  Token HashTok = TheTok;
626  InPreprocessorDirective = true;
627  ActiveCommentLoc = SourceLocation();
628 
629  // Figure out which directive this is. Since we're lexing raw tokens,
630  // we don't have an identifier table available. Instead, just look at
631  // the raw identifier to recognize and categorize preprocessor directives.
632  TheLexer.LexFromRawLexer(TheTok);
633  if (TheTok.getKind() == tok::raw_identifier && !TheTok.needsCleaning()) {
634  StringRef Keyword = TheTok.getRawIdentifier();
636  = llvm::StringSwitch<PreambleDirectiveKind>(Keyword)
637  .Case("include", PDK_Skipped)
638  .Case("__include_macros", PDK_Skipped)
639  .Case("define", PDK_Skipped)
640  .Case("undef", PDK_Skipped)
641  .Case("line", PDK_Skipped)
642  .Case("error", PDK_Skipped)
643  .Case("pragma", PDK_Skipped)
644  .Case("import", PDK_Skipped)
645  .Case("include_next", PDK_Skipped)
646  .Case("warning", PDK_Skipped)
647  .Case("ident", PDK_Skipped)
648  .Case("sccs", PDK_Skipped)
649  .Case("assert", PDK_Skipped)
650  .Case("unassert", PDK_Skipped)
651  .Case("if", PDK_Skipped)
652  .Case("ifdef", PDK_Skipped)
653  .Case("ifndef", PDK_Skipped)
654  .Case("elif", PDK_Skipped)
655  .Case("else", PDK_Skipped)
656  .Case("endif", PDK_Skipped)
657  .Default(PDK_Unknown);
658 
659  switch (PDK) {
660  case PDK_Skipped:
661  continue;
662 
663  case PDK_Unknown:
664  // We don't know what this directive is; stop at the '#'.
665  break;
666  }
667  }
668 
669  // We only end up here if we didn't recognize the preprocessor
670  // directive or it was one that can't occur in the preamble at this
671  // point. Roll back the current token to the location of the '#'.
672  InPreprocessorDirective = false;
673  TheTok = HashTok;
674  }
675 
676  // We hit a token that we don't recognize as being in the
677  // "preprocessing only" part of the file, so we're no longer in
678  // the preamble.
679  break;
680  } while (true);
681 
683  if (ActiveCommentLoc.isValid())
684  End = ActiveCommentLoc; // don't truncate a decl comment.
685  else
686  End = TheTok.getLocation();
687 
688  return PreambleBounds(End.getRawEncoding() - FileLoc.getRawEncoding(),
689  TheTok.isAtStartOfLine());
690 }
691 
692 /// AdvanceToTokenCharacter - Given a location that specifies the start of a
693 /// token, return a new location that specifies a character within the token.
695  unsigned CharNo,
696  const SourceManager &SM,
697  const LangOptions &LangOpts) {
698  // Figure out how many physical characters away the specified expansion
699  // character is. This needs to take into consideration newlines and
700  // trigraphs.
701  bool Invalid = false;
702  const char *TokPtr = SM.getCharacterData(TokStart, &Invalid);
703 
704  // If they request the first char of the token, we're trivially done.
705  if (Invalid || (CharNo == 0 && Lexer::isObviouslySimpleCharacter(*TokPtr)))
706  return TokStart;
707 
708  unsigned PhysOffset = 0;
709 
710  // The usual case is that tokens don't contain anything interesting. Skip
711  // over the uninteresting characters. If a token only consists of simple
712  // chars, this method is extremely fast.
713  while (Lexer::isObviouslySimpleCharacter(*TokPtr)) {
714  if (CharNo == 0)
715  return TokStart.getLocWithOffset(PhysOffset);
716  ++TokPtr;
717  --CharNo;
718  ++PhysOffset;
719  }
720 
721  // If we have a character that may be a trigraph or escaped newline, use a
722  // lexer to parse it correctly.
723  for (; CharNo; --CharNo) {
724  unsigned Size;
725  Lexer::getCharAndSizeNoWarn(TokPtr, Size, LangOpts);
726  TokPtr += Size;
727  PhysOffset += Size;
728  }
729 
730  // Final detail: if we end up on an escaped newline, we want to return the
731  // location of the actual byte of the token. For example foo<newline>bar
732  // advanced by 3 should return the location of b, not of \\. One compounding
733  // detail of this is that the escape may be made by a trigraph.
734  if (!Lexer::isObviouslySimpleCharacter(*TokPtr))
735  PhysOffset += Lexer::SkipEscapedNewLines(TokPtr)-TokPtr;
736 
737  return TokStart.getLocWithOffset(PhysOffset);
738 }
739 
740 /// \brief Computes the source location just past the end of the
741 /// token at this source location.
742 ///
743 /// This routine can be used to produce a source location that
744 /// points just past the end of the token referenced by \p Loc, and
745 /// is generally used when a diagnostic needs to point just after a
746 /// token where it expected something different that it received. If
747 /// the returned source location would not be meaningful (e.g., if
748 /// it points into a macro), this routine returns an invalid
749 /// source location.
750 ///
751 /// \param Offset an offset from the end of the token, where the source
752 /// location should refer to. The default offset (0) produces a source
753 /// location pointing just past the end of the token; an offset of 1 produces
754 /// a source location pointing to the last character in the token, etc.
756  const SourceManager &SM,
757  const LangOptions &LangOpts) {
758  if (Loc.isInvalid())
759  return SourceLocation();
760 
761  if (Loc.isMacroID()) {
762  if (Offset > 0 || !isAtEndOfMacroExpansion(Loc, SM, LangOpts, &Loc))
763  return SourceLocation(); // Points inside the macro expansion.
764  }
765 
766  unsigned Len = Lexer::MeasureTokenLength(Loc, SM, LangOpts);
767  if (Len > Offset)
768  Len = Len - Offset;
769  else
770  return Loc;
771 
772  return Loc.getLocWithOffset(Len);
773 }
774 
775 /// \brief Returns true if the given MacroID location points at the first
776 /// token of the macro expansion.
778  const SourceManager &SM,
779  const LangOptions &LangOpts,
780  SourceLocation *MacroBegin) {
781  assert(loc.isValid() && loc.isMacroID() && "Expected a valid macro loc");
782 
783  SourceLocation expansionLoc;
784  if (!SM.isAtStartOfImmediateMacroExpansion(loc, &expansionLoc))
785  return false;
786 
787  if (expansionLoc.isFileID()) {
788  // No other macro expansions, this is the first.
789  if (MacroBegin)
790  *MacroBegin = expansionLoc;
791  return true;
792  }
793 
794  return isAtStartOfMacroExpansion(expansionLoc, SM, LangOpts, MacroBegin);
795 }
796 
797 /// \brief Returns true if the given MacroID location points at the last
798 /// token of the macro expansion.
800  const SourceManager &SM,
801  const LangOptions &LangOpts,
802  SourceLocation *MacroEnd) {
803  assert(loc.isValid() && loc.isMacroID() && "Expected a valid macro loc");
804 
805  SourceLocation spellLoc = SM.getSpellingLoc(loc);
806  unsigned tokLen = MeasureTokenLength(spellLoc, SM, LangOpts);
807  if (tokLen == 0)
808  return false;
809 
810  SourceLocation afterLoc = loc.getLocWithOffset(tokLen);
811  SourceLocation expansionLoc;
812  if (!SM.isAtEndOfImmediateMacroExpansion(afterLoc, &expansionLoc))
813  return false;
814 
815  if (expansionLoc.isFileID()) {
816  // No other macro expansions.
817  if (MacroEnd)
818  *MacroEnd = expansionLoc;
819  return true;
820  }
821 
822  return isAtEndOfMacroExpansion(expansionLoc, SM, LangOpts, MacroEnd);
823 }
824 
826  const SourceManager &SM,
827  const LangOptions &LangOpts) {
828  SourceLocation Begin = Range.getBegin();
829  SourceLocation End = Range.getEnd();
830  assert(Begin.isFileID() && End.isFileID());
831  if (Range.isTokenRange()) {
832  End = Lexer::getLocForEndOfToken(End, 0, SM,LangOpts);
833  if (End.isInvalid())
834  return CharSourceRange();
835  }
836 
837  // Break down the source locations.
838  FileID FID;
839  unsigned BeginOffs;
840  std::tie(FID, BeginOffs) = SM.getDecomposedLoc(Begin);
841  if (FID.isInvalid())
842  return CharSourceRange();
843 
844  unsigned EndOffs;
845  if (!SM.isInFileID(End, FID, &EndOffs) ||
846  BeginOffs > EndOffs)
847  return CharSourceRange();
848 
849  return CharSourceRange::getCharRange(Begin, End);
850 }
851 
853  const SourceManager &SM,
854  const LangOptions &LangOpts) {
855  SourceLocation Begin = Range.getBegin();
856  SourceLocation End = Range.getEnd();
857  if (Begin.isInvalid() || End.isInvalid())
858  return CharSourceRange();
859 
860  if (Begin.isFileID() && End.isFileID())
861  return makeRangeFromFileLocs(Range, SM, LangOpts);
862 
863  if (Begin.isMacroID() && End.isFileID()) {
864  if (!isAtStartOfMacroExpansion(Begin, SM, LangOpts, &Begin))
865  return CharSourceRange();
866  Range.setBegin(Begin);
867  return makeRangeFromFileLocs(Range, SM, LangOpts);
868  }
869 
870  if (Begin.isFileID() && End.isMacroID()) {
871  if ((Range.isTokenRange() && !isAtEndOfMacroExpansion(End, SM, LangOpts,
872  &End)) ||
873  (Range.isCharRange() && !isAtStartOfMacroExpansion(End, SM, LangOpts,
874  &End)))
875  return CharSourceRange();
876  Range.setEnd(End);
877  return makeRangeFromFileLocs(Range, SM, LangOpts);
878  }
879 
880  assert(Begin.isMacroID() && End.isMacroID());
881  SourceLocation MacroBegin, MacroEnd;
882  if (isAtStartOfMacroExpansion(Begin, SM, LangOpts, &MacroBegin) &&
883  ((Range.isTokenRange() && isAtEndOfMacroExpansion(End, SM, LangOpts,
884  &MacroEnd)) ||
885  (Range.isCharRange() && isAtStartOfMacroExpansion(End, SM, LangOpts,
886  &MacroEnd)))) {
887  Range.setBegin(MacroBegin);
888  Range.setEnd(MacroEnd);
889  return makeRangeFromFileLocs(Range, SM, LangOpts);
890  }
891 
892  bool Invalid = false;
893  const SrcMgr::SLocEntry &BeginEntry = SM.getSLocEntry(SM.getFileID(Begin),
894  &Invalid);
895  if (Invalid)
896  return CharSourceRange();
897 
898  if (BeginEntry.getExpansion().isMacroArgExpansion()) {
899  const SrcMgr::SLocEntry &EndEntry = SM.getSLocEntry(SM.getFileID(End),
900  &Invalid);
901  if (Invalid)
902  return CharSourceRange();
903 
904  if (EndEntry.getExpansion().isMacroArgExpansion() &&
905  BeginEntry.getExpansion().getExpansionLocStart() ==
906  EndEntry.getExpansion().getExpansionLocStart()) {
907  Range.setBegin(SM.getImmediateSpellingLoc(Begin));
908  Range.setEnd(SM.getImmediateSpellingLoc(End));
909  return makeFileCharRange(Range, SM, LangOpts);
910  }
911  }
912 
913  return CharSourceRange();
914 }
915 
917  const SourceManager &SM,
918  const LangOptions &LangOpts,
919  bool *Invalid) {
920  Range = makeFileCharRange(Range, SM, LangOpts);
921  if (Range.isInvalid()) {
922  if (Invalid) *Invalid = true;
923  return StringRef();
924  }
925 
926  // Break down the source location.
927  std::pair<FileID, unsigned> beginInfo = SM.getDecomposedLoc(Range.getBegin());
928  if (beginInfo.first.isInvalid()) {
929  if (Invalid) *Invalid = true;
930  return StringRef();
931  }
932 
933  unsigned EndOffs;
934  if (!SM.isInFileID(Range.getEnd(), beginInfo.first, &EndOffs) ||
935  beginInfo.second > EndOffs) {
936  if (Invalid) *Invalid = true;
937  return StringRef();
938  }
939 
940  // Try to the load the file buffer.
941  bool invalidTemp = false;
942  StringRef file = SM.getBufferData(beginInfo.first, &invalidTemp);
943  if (invalidTemp) {
944  if (Invalid) *Invalid = true;
945  return StringRef();
946  }
947 
948  if (Invalid) *Invalid = false;
949  return file.substr(beginInfo.second, EndOffs - beginInfo.second);
950 }
951 
953  const SourceManager &SM,
954  const LangOptions &LangOpts) {
955  assert(Loc.isMacroID() && "Only reasonble to call this on macros");
956 
957  // Find the location of the immediate macro expansion.
958  while (true) {
959  FileID FID = SM.getFileID(Loc);
960  const SrcMgr::SLocEntry *E = &SM.getSLocEntry(FID);
961  const SrcMgr::ExpansionInfo &Expansion = E->getExpansion();
962  Loc = Expansion.getExpansionLocStart();
963  if (!Expansion.isMacroArgExpansion())
964  break;
965 
966  // For macro arguments we need to check that the argument did not come
967  // from an inner macro, e.g: "MAC1( MAC2(foo) )"
968 
969  // Loc points to the argument id of the macro definition, move to the
970  // macro expansion.
971  Loc = SM.getImmediateExpansionRange(Loc).first;
972  SourceLocation SpellLoc = Expansion.getSpellingLoc();
973  if (SpellLoc.isFileID())
974  break; // No inner macro.
975 
976  // If spelling location resides in the same FileID as macro expansion
977  // location, it means there is no inner macro.
978  FileID MacroFID = SM.getFileID(Loc);
979  if (SM.isInFileID(SpellLoc, MacroFID))
980  break;
981 
982  // Argument came from inner macro.
983  Loc = SpellLoc;
984  }
985 
986  // Find the spelling location of the start of the non-argument expansion
987  // range. This is where the macro name was spelled in order to begin
988  // expanding this macro.
989  Loc = SM.getSpellingLoc(Loc);
990 
991  // Dig out the buffer where the macro name was spelled and the extents of the
992  // name so that we can render it into the expansion note.
993  std::pair<FileID, unsigned> ExpansionInfo = SM.getDecomposedLoc(Loc);
994  unsigned MacroTokenLength = Lexer::MeasureTokenLength(Loc, SM, LangOpts);
995  StringRef ExpansionBuffer = SM.getBufferData(ExpansionInfo.first);
996  return ExpansionBuffer.substr(ExpansionInfo.second, MacroTokenLength);
997 }
998 
1000  SourceLocation Loc, const SourceManager &SM, const LangOptions &LangOpts) {
1001  assert(Loc.isMacroID() && "Only reasonble to call this on macros");
1002  // Walk past macro argument expanions.
1003  while (SM.isMacroArgExpansion(Loc))
1004  Loc = SM.getImmediateExpansionRange(Loc).first;
1005 
1006  // If the macro's spelling has no FileID, then it's actually a token paste
1007  // or stringization (or similar) and not a macro at all.
1008  if (!SM.getFileEntryForID(SM.getFileID(SM.getSpellingLoc(Loc))))
1009  return StringRef();
1010 
1011  // Find the spelling location of the start of the non-argument expansion
1012  // range. This is where the macro name was spelled in order to begin
1013  // expanding this macro.
1014  Loc = SM.getSpellingLoc(SM.getImmediateExpansionRange(Loc).first);
1015 
1016  // Dig out the buffer where the macro name was spelled and the extents of the
1017  // name so that we can render it into the expansion note.
1018  std::pair<FileID, unsigned> ExpansionInfo = SM.getDecomposedLoc(Loc);
1019  unsigned MacroTokenLength = Lexer::MeasureTokenLength(Loc, SM, LangOpts);
1020  StringRef ExpansionBuffer = SM.getBufferData(ExpansionInfo.first);
1021  return ExpansionBuffer.substr(ExpansionInfo.second, MacroTokenLength);
1022 }
1023 
1024 bool Lexer::isIdentifierBodyChar(char c, const LangOptions &LangOpts) {
1025  return isIdentifierBody(c, LangOpts.DollarIdents);
1026 }
1027 
1028 bool Lexer::isNewLineEscaped(const char *BufferStart, const char *Str) {
1029  assert(isVerticalWhitespace(Str[0]));
1030  if (Str - 1 < BufferStart)
1031  return false;
1032 
1033  if ((Str[0] == '\n' && Str[-1] == '\r') ||
1034  (Str[0] == '\r' && Str[-1] == '\n')) {
1035  if (Str - 2 < BufferStart)
1036  return false;
1037  --Str;
1038  }
1039  --Str;
1040 
1041  // Rewind to first non-space character:
1042  while (Str > BufferStart && isHorizontalWhitespace(*Str))
1043  --Str;
1044 
1045  return *Str == '\\';
1046 }
1047 
1049  const SourceManager &SM) {
1050  if (Loc.isInvalid() || Loc.isMacroID())
1051  return "";
1052  std::pair<FileID, unsigned> LocInfo = SM.getDecomposedLoc(Loc);
1053  if (LocInfo.first.isInvalid())
1054  return "";
1055  bool Invalid = false;
1056  StringRef Buffer = SM.getBufferData(LocInfo.first, &Invalid);
1057  if (Invalid)
1058  return "";
1059  const char *Line = findBeginningOfLine(Buffer, LocInfo.second);
1060  if (!Line)
1061  return "";
1062  StringRef Rest = Buffer.substr(Line - Buffer.data());
1063  size_t NumWhitespaceChars = Rest.find_first_not_of(" \t");
1064  return NumWhitespaceChars == StringRef::npos
1065  ? ""
1066  : Rest.take_front(NumWhitespaceChars);
1067 }
1068 
1069 //===----------------------------------------------------------------------===//
1070 // Diagnostics forwarding code.
1071 //===----------------------------------------------------------------------===//
1072 
1073 /// GetMappedTokenLoc - If lexing out of a 'mapped buffer', where we pretend the
1074 /// lexer buffer was all expanded at a single point, perform the mapping.
1075 /// This is currently only used for _Pragma implementation, so it is the slow
1076 /// path of the hot getSourceLocation method. Do not allow it to be inlined.
1077 static LLVM_ATTRIBUTE_NOINLINE SourceLocation GetMappedTokenLoc(
1078  Preprocessor &PP, SourceLocation FileLoc, unsigned CharNo, unsigned TokLen);
1080  SourceLocation FileLoc,
1081  unsigned CharNo, unsigned TokLen) {
1082  assert(FileLoc.isMacroID() && "Must be a macro expansion");
1083 
1084  // Otherwise, we're lexing "mapped tokens". This is used for things like
1085  // _Pragma handling. Combine the expansion location of FileLoc with the
1086  // spelling location.
1088 
1089  // Create a new SLoc which is expanded from Expansion(FileLoc) but whose
1090  // characters come from spelling(FileLoc)+Offset.
1091  SourceLocation SpellingLoc = SM.getSpellingLoc(FileLoc);
1092  SpellingLoc = SpellingLoc.getLocWithOffset(CharNo);
1093 
1094  // Figure out the expansion loc range, which is the range covered by the
1095  // original _Pragma(...) sequence.
1096  std::pair<SourceLocation,SourceLocation> II =
1097  SM.getImmediateExpansionRange(FileLoc);
1098 
1099  return SM.createExpansionLoc(SpellingLoc, II.first, II.second, TokLen);
1100 }
1101 
1102 /// getSourceLocation - Return a source location identifier for the specified
1103 /// offset in the current file.
1105  unsigned TokLen) const {
1106  assert(Loc >= BufferStart && Loc <= BufferEnd &&
1107  "Location out of range for this buffer!");
1108 
1109  // In the normal case, we're just lexing from a simple file buffer, return
1110  // the file id from FileLoc with the offset specified.
1111  unsigned CharNo = Loc-BufferStart;
1112  if (FileLoc.isFileID())
1113  return FileLoc.getLocWithOffset(CharNo);
1114 
1115  // Otherwise, this is the _Pragma lexer case, which pretends that all of the
1116  // tokens are lexed from where the _Pragma was defined.
1117  assert(PP && "This doesn't work on raw lexers");
1118  return GetMappedTokenLoc(*PP, FileLoc, CharNo, TokLen);
1119 }
1120 
1121 /// Diag - Forwarding function for diagnostics. This translate a source
1122 /// position in the current buffer into a SourceLocation object for rendering.
1123 DiagnosticBuilder Lexer::Diag(const char *Loc, unsigned DiagID) const {
1124  return PP->Diag(getSourceLocation(Loc), DiagID);
1125 }
1126 
1127 //===----------------------------------------------------------------------===//
1128 // Trigraph and Escaped Newline Handling Code.
1129 //===----------------------------------------------------------------------===//
1130 
1131 /// GetTrigraphCharForLetter - Given a character that occurs after a ?? pair,
1132 /// return the decoded trigraph letter it corresponds to, or '\0' if nothing.
1133 static char GetTrigraphCharForLetter(char Letter) {
1134  switch (Letter) {
1135  default: return 0;
1136  case '=': return '#';
1137  case ')': return ']';
1138  case '(': return '[';
1139  case '!': return '|';
1140  case '\'': return '^';
1141  case '>': return '}';
1142  case '/': return '\\';
1143  case '<': return '{';
1144  case '-': return '~';
1145  }
1146 }
1147 
1148 /// DecodeTrigraphChar - If the specified character is a legal trigraph when
1149 /// prefixed with ??, emit a trigraph warning. If trigraphs are enabled,
1150 /// return the result character. Finally, emit a warning about trigraph use
1151 /// whether trigraphs are enabled or not.
1152 static char DecodeTrigraphChar(const char *CP, Lexer *L) {
1153  char Res = GetTrigraphCharForLetter(*CP);
1154  if (!Res || !L) return Res;
1155 
1156  if (!L->getLangOpts().Trigraphs) {
1157  if (!L->isLexingRawMode())
1158  L->Diag(CP-2, diag::trigraph_ignored);
1159  return 0;
1160  }
1161 
1162  if (!L->isLexingRawMode())
1163  L->Diag(CP-2, diag::trigraph_converted) << StringRef(&Res, 1);
1164  return Res;
1165 }
1166 
1167 /// getEscapedNewLineSize - Return the size of the specified escaped newline,
1168 /// or 0 if it is not an escaped newline. P[-1] is known to be a "\" or a
1169 /// trigraph equivalent on entry to this function.
1170 unsigned Lexer::getEscapedNewLineSize(const char *Ptr) {
1171  unsigned Size = 0;
1172  while (isWhitespace(Ptr[Size])) {
1173  ++Size;
1174 
1175  if (Ptr[Size-1] != '\n' && Ptr[Size-1] != '\r')
1176  continue;
1177 
1178  // If this is a \r\n or \n\r, skip the other half.
1179  if ((Ptr[Size] == '\r' || Ptr[Size] == '\n') &&
1180  Ptr[Size-1] != Ptr[Size])
1181  ++Size;
1182 
1183  return Size;
1184  }
1185 
1186  // Not an escaped newline, must be a \t or something else.
1187  return 0;
1188 }
1189 
1190 /// SkipEscapedNewLines - If P points to an escaped newline (or a series of
1191 /// them), skip over them and return the first non-escaped-newline found,
1192 /// otherwise return P.
1193 const char *Lexer::SkipEscapedNewLines(const char *P) {
1194  while (true) {
1195  const char *AfterEscape;
1196  if (*P == '\\') {
1197  AfterEscape = P+1;
1198  } else if (*P == '?') {
1199  // If not a trigraph for escape, bail out.
1200  if (P[1] != '?' || P[2] != '/')
1201  return P;
1202  // FIXME: Take LangOpts into account; the language might not
1203  // support trigraphs.
1204  AfterEscape = P+3;
1205  } else {
1206  return P;
1207  }
1208 
1209  unsigned NewLineSize = Lexer::getEscapedNewLineSize(AfterEscape);
1210  if (NewLineSize == 0) return P;
1211  P = AfterEscape+NewLineSize;
1212  }
1213 }
1214 
1216  const SourceManager &SM,
1217  const LangOptions &LangOpts) {
1218  if (Loc.isMacroID()) {
1219  if (!Lexer::isAtEndOfMacroExpansion(Loc, SM, LangOpts, &Loc))
1220  return None;
1221  }
1222  Loc = Lexer::getLocForEndOfToken(Loc, 0, SM, LangOpts);
1223 
1224  // Break down the source location.
1225  std::pair<FileID, unsigned> LocInfo = SM.getDecomposedLoc(Loc);
1226 
1227  // Try to load the file buffer.
1228  bool InvalidTemp = false;
1229  StringRef File = SM.getBufferData(LocInfo.first, &InvalidTemp);
1230  if (InvalidTemp)
1231  return None;
1232 
1233  const char *TokenBegin = File.data() + LocInfo.second;
1234 
1235  // Lex from the start of the given location.
1236  Lexer lexer(SM.getLocForStartOfFile(LocInfo.first), LangOpts, File.begin(),
1237  TokenBegin, File.end());
1238  // Find the token.
1239  Token Tok;
1240  lexer.LexFromRawLexer(Tok);
1241  return Tok;
1242 }
1243 
1244 /// \brief Checks that the given token is the first token that occurs after the
1245 /// given location (this excludes comments and whitespace). Returns the location
1246 /// immediately after the specified token. If the token is not found or the
1247 /// location is inside a macro, the returned source location will be invalid.
1249  SourceLocation Loc, tok::TokenKind TKind, const SourceManager &SM,
1250  const LangOptions &LangOpts, bool SkipTrailingWhitespaceAndNewLine) {
1251  Optional<Token> Tok = findNextToken(Loc, SM, LangOpts);
1252  if (!Tok || Tok->isNot(TKind))
1253  return SourceLocation();
1254  SourceLocation TokenLoc = Tok->getLocation();
1255 
1256  // Calculate how much whitespace needs to be skipped if any.
1257  unsigned NumWhitespaceChars = 0;
1258  if (SkipTrailingWhitespaceAndNewLine) {
1259  const char *TokenEnd = SM.getCharacterData(TokenLoc) + Tok->getLength();
1260  unsigned char C = *TokenEnd;
1261  while (isHorizontalWhitespace(C)) {
1262  C = *(++TokenEnd);
1263  NumWhitespaceChars++;
1264  }
1265 
1266  // Skip \r, \n, \r\n, or \n\r
1267  if (C == '\n' || C == '\r') {
1268  char PrevC = C;
1269  C = *(++TokenEnd);
1270  NumWhitespaceChars++;
1271  if ((C == '\n' || C == '\r') && C != PrevC)
1272  NumWhitespaceChars++;
1273  }
1274  }
1275 
1276  return TokenLoc.getLocWithOffset(Tok->getLength() + NumWhitespaceChars);
1277 }
1278 
1279 /// getCharAndSizeSlow - Peek a single 'character' from the specified buffer,
1280 /// get its size, and return it. This is tricky in several cases:
1281 /// 1. If currently at the start of a trigraph, we warn about the trigraph,
1282 /// then either return the trigraph (skipping 3 chars) or the '?',
1283 /// depending on whether trigraphs are enabled or not.
1284 /// 2. If this is an escaped newline (potentially with whitespace between
1285 /// the backslash and newline), implicitly skip the newline and return
1286 /// the char after it.
1287 ///
1288 /// This handles the slow/uncommon case of the getCharAndSize method. Here we
1289 /// know that we can accumulate into Size, and that we have already incremented
1290 /// Ptr by Size bytes.
1291 ///
1292 /// NOTE: When this method is updated, getCharAndSizeSlowNoWarn (below) should
1293 /// be updated to match.
1294 ///
1295 char Lexer::getCharAndSizeSlow(const char *Ptr, unsigned &Size,
1296  Token *Tok) {
1297  // If we have a slash, look for an escaped newline.
1298  if (Ptr[0] == '\\') {
1299  ++Size;
1300  ++Ptr;
1301 Slash:
1302  // Common case, backslash-char where the char is not whitespace.
1303  if (!isWhitespace(Ptr[0])) return '\\';
1304 
1305  // See if we have optional whitespace characters between the slash and
1306  // newline.
1307  if (unsigned EscapedNewLineSize = getEscapedNewLineSize(Ptr)) {
1308  // Remember that this token needs to be cleaned.
1309  if (Tok) Tok->setFlag(Token::NeedsCleaning);
1310 
1311  // Warn if there was whitespace between the backslash and newline.
1312  if (Ptr[0] != '\n' && Ptr[0] != '\r' && Tok && !isLexingRawMode())
1313  Diag(Ptr, diag::backslash_newline_space);
1314 
1315  // Found backslash<whitespace><newline>. Parse the char after it.
1316  Size += EscapedNewLineSize;
1317  Ptr += EscapedNewLineSize;
1318 
1319  // Use slow version to accumulate a correct size field.
1320  return getCharAndSizeSlow(Ptr, Size, Tok);
1321  }
1322 
1323  // Otherwise, this is not an escaped newline, just return the slash.
1324  return '\\';
1325  }
1326 
1327  // If this is a trigraph, process it.
1328  if (Ptr[0] == '?' && Ptr[1] == '?') {
1329  // If this is actually a legal trigraph (not something like "??x"), emit
1330  // a trigraph warning. If so, and if trigraphs are enabled, return it.
1331  if (char C = DecodeTrigraphChar(Ptr+2, Tok ? this : nullptr)) {
1332  // Remember that this token needs to be cleaned.
1333  if (Tok) Tok->setFlag(Token::NeedsCleaning);
1334 
1335  Ptr += 3;
1336  Size += 3;
1337  if (C == '\\') goto Slash;
1338  return C;
1339  }
1340  }
1341 
1342  // If this is neither, return a single character.
1343  ++Size;
1344  return *Ptr;
1345 }
1346 
1347 /// getCharAndSizeSlowNoWarn - Handle the slow/uncommon case of the
1348 /// getCharAndSizeNoWarn method. Here we know that we can accumulate into Size,
1349 /// and that we have already incremented Ptr by Size bytes.
1350 ///
1351 /// NOTE: When this method is updated, getCharAndSizeSlow (above) should
1352 /// be updated to match.
1353 char Lexer::getCharAndSizeSlowNoWarn(const char *Ptr, unsigned &Size,
1354  const LangOptions &LangOpts) {
1355  // If we have a slash, look for an escaped newline.
1356  if (Ptr[0] == '\\') {
1357  ++Size;
1358  ++Ptr;
1359 Slash:
1360  // Common case, backslash-char where the char is not whitespace.
1361  if (!isWhitespace(Ptr[0])) return '\\';
1362 
1363  // See if we have optional whitespace characters followed by a newline.
1364  if (unsigned EscapedNewLineSize = getEscapedNewLineSize(Ptr)) {
1365  // Found backslash<whitespace><newline>. Parse the char after it.
1366  Size += EscapedNewLineSize;
1367  Ptr += EscapedNewLineSize;
1368 
1369  // Use slow version to accumulate a correct size field.
1370  return getCharAndSizeSlowNoWarn(Ptr, Size, LangOpts);
1371  }
1372 
1373  // Otherwise, this is not an escaped newline, just return the slash.
1374  return '\\';
1375  }
1376 
1377  // If this is a trigraph, process it.
1378  if (LangOpts.Trigraphs && Ptr[0] == '?' && Ptr[1] == '?') {
1379  // If this is actually a legal trigraph (not something like "??x"), return
1380  // it.
1381  if (char C = GetTrigraphCharForLetter(Ptr[2])) {
1382  Ptr += 3;
1383  Size += 3;
1384  if (C == '\\') goto Slash;
1385  return C;
1386  }
1387  }
1388 
1389  // If this is neither, return a single character.
1390  ++Size;
1391  return *Ptr;
1392 }
1393 
1394 //===----------------------------------------------------------------------===//
1395 // Helper methods for lexing.
1396 //===----------------------------------------------------------------------===//
1397 
1398 /// \brief Routine that indiscriminately sets the offset into the source file.
1399 void Lexer::SetByteOffset(unsigned Offset, bool StartOfLine) {
1400  BufferPtr = BufferStart + Offset;
1401  if (BufferPtr > BufferEnd)
1402  BufferPtr = BufferEnd;
1403  // FIXME: What exactly does the StartOfLine bit mean? There are two
1404  // possible meanings for the "start" of the line: the first token on the
1405  // unexpanded line, or the first token on the expanded line.
1406  IsAtStartOfLine = StartOfLine;
1407  IsAtPhysicalStartOfLine = StartOfLine;
1408 }
1409 
1410 static bool isAllowedIDChar(uint32_t C, const LangOptions &LangOpts) {
1411  if (LangOpts.AsmPreprocessor) {
1412  return false;
1413  } else if (LangOpts.CPlusPlus11 || LangOpts.C11) {
1414  static const llvm::sys::UnicodeCharSet C11AllowedIDChars(
1416  return C11AllowedIDChars.contains(C);
1417  } else if (LangOpts.CPlusPlus) {
1418  static const llvm::sys::UnicodeCharSet CXX03AllowedIDChars(
1420  return CXX03AllowedIDChars.contains(C);
1421  } else {
1422  static const llvm::sys::UnicodeCharSet C99AllowedIDChars(
1424  return C99AllowedIDChars.contains(C);
1425  }
1426 }
1427 
1428 static bool isAllowedInitiallyIDChar(uint32_t C, const LangOptions &LangOpts) {
1429  assert(isAllowedIDChar(C, LangOpts));
1430  if (LangOpts.AsmPreprocessor) {
1431  return false;
1432  } else if (LangOpts.CPlusPlus11 || LangOpts.C11) {
1433  static const llvm::sys::UnicodeCharSet C11DisallowedInitialIDChars(
1435  return !C11DisallowedInitialIDChars.contains(C);
1436  } else if (LangOpts.CPlusPlus) {
1437  return true;
1438  } else {
1439  static const llvm::sys::UnicodeCharSet C99DisallowedInitialIDChars(
1441  return !C99DisallowedInitialIDChars.contains(C);
1442  }
1443 }
1444 
1445 static inline CharSourceRange makeCharRange(Lexer &L, const char *Begin,
1446  const char *End) {
1448  L.getSourceLocation(End));
1449 }
1450 
1451 static void maybeDiagnoseIDCharCompat(DiagnosticsEngine &Diags, uint32_t C,
1452  CharSourceRange Range, bool IsFirst) {
1453  // Check C99 compatibility.
1454  if (!Diags.isIgnored(diag::warn_c99_compat_unicode_id, Range.getBegin())) {
1455  enum {
1456  CannotAppearInIdentifier = 0,
1457  CannotStartIdentifier
1458  };
1459 
1460  static const llvm::sys::UnicodeCharSet C99AllowedIDChars(
1462  static const llvm::sys::UnicodeCharSet C99DisallowedInitialIDChars(
1464  if (!C99AllowedIDChars.contains(C)) {
1465  Diags.Report(Range.getBegin(), diag::warn_c99_compat_unicode_id)
1466  << Range
1467  << CannotAppearInIdentifier;
1468  } else if (IsFirst && C99DisallowedInitialIDChars.contains(C)) {
1469  Diags.Report(Range.getBegin(), diag::warn_c99_compat_unicode_id)
1470  << Range
1471  << CannotStartIdentifier;
1472  }
1473  }
1474 
1475  // Check C++98 compatibility.
1476  if (!Diags.isIgnored(diag::warn_cxx98_compat_unicode_id, Range.getBegin())) {
1477  static const llvm::sys::UnicodeCharSet CXX03AllowedIDChars(
1479  if (!CXX03AllowedIDChars.contains(C)) {
1480  Diags.Report(Range.getBegin(), diag::warn_cxx98_compat_unicode_id)
1481  << Range;
1482  }
1483  }
1484 }
1485 
1486 bool Lexer::tryConsumeIdentifierUCN(const char *&CurPtr, unsigned Size,
1487  Token &Result) {
1488  const char *UCNPtr = CurPtr + Size;
1489  uint32_t CodePoint = tryReadUCN(UCNPtr, CurPtr, /*Token=*/nullptr);
1490  if (CodePoint == 0 || !isAllowedIDChar(CodePoint, LangOpts))
1491  return false;
1492 
1493  if (!isLexingRawMode())
1494  maybeDiagnoseIDCharCompat(PP->getDiagnostics(), CodePoint,
1495  makeCharRange(*this, CurPtr, UCNPtr),
1496  /*IsFirst=*/false);
1497 
1498  Result.setFlag(Token::HasUCN);
1499  if ((UCNPtr - CurPtr == 6 && CurPtr[1] == 'u') ||
1500  (UCNPtr - CurPtr == 10 && CurPtr[1] == 'U'))
1501  CurPtr = UCNPtr;
1502  else
1503  while (CurPtr != UCNPtr)
1504  (void)getAndAdvanceChar(CurPtr, Result);
1505  return true;
1506 }
1507 
1508 bool Lexer::tryConsumeIdentifierUTF8Char(const char *&CurPtr) {
1509  const char *UnicodePtr = CurPtr;
1510  llvm::UTF32 CodePoint;
1511  llvm::ConversionResult Result =
1512  llvm::convertUTF8Sequence((const llvm::UTF8 **)&UnicodePtr,
1513  (const llvm::UTF8 *)BufferEnd,
1514  &CodePoint,
1515  llvm::strictConversion);
1516  if (Result != llvm::conversionOK ||
1517  !isAllowedIDChar(static_cast<uint32_t>(CodePoint), LangOpts))
1518  return false;
1519 
1520  if (!isLexingRawMode())
1521  maybeDiagnoseIDCharCompat(PP->getDiagnostics(), CodePoint,
1522  makeCharRange(*this, CurPtr, UnicodePtr),
1523  /*IsFirst=*/false);
1524 
1525  CurPtr = UnicodePtr;
1526  return true;
1527 }
1528 
1529 bool Lexer::LexIdentifier(Token &Result, const char *CurPtr) {
1530  // Match [_A-Za-z0-9]*, we have already matched [_A-Za-z$]
1531  unsigned Size;
1532  unsigned char C = *CurPtr++;
1533  while (isIdentifierBody(C))
1534  C = *CurPtr++;
1535 
1536  --CurPtr; // Back up over the skipped character.
1537 
1538  // Fast path, no $,\,? in identifier found. '\' might be an escaped newline
1539  // or UCN, and ? might be a trigraph for '\', an escaped newline or UCN.
1540  //
1541  // TODO: Could merge these checks into an InfoTable flag to make the
1542  // comparison cheaper
1543  if (isASCII(C) && C != '\\' && C != '?' &&
1544  (C != '$' || !LangOpts.DollarIdents)) {
1545 FinishIdentifier:
1546  const char *IdStart = BufferPtr;
1547  FormTokenWithChars(Result, CurPtr, tok::raw_identifier);
1548  Result.setRawIdentifierData(IdStart);
1549 
1550  // If we are in raw mode, return this identifier raw. There is no need to
1551  // look up identifier information or attempt to macro expand it.
1552  if (LexingRawMode)
1553  return true;
1554 
1555  // Fill in Result.IdentifierInfo and update the token kind,
1556  // looking up the identifier in the identifier table.
1557  IdentifierInfo *II = PP->LookUpIdentifierInfo(Result);
1558 
1559  // Finally, now that we know we have an identifier, pass this off to the
1560  // preprocessor, which may macro expand it or something.
1561  if (II->isHandleIdentifierCase())
1562  return PP->HandleIdentifier(Result);
1563 
1564  if (II->getTokenID() == tok::identifier && isCodeCompletionPoint(CurPtr)
1565  && II->getPPKeywordID() == tok::pp_not_keyword
1566  && II->getObjCKeywordID() == tok::objc_not_keyword) {
1567  // Return the code-completion token.
1568  Result.setKind(tok::code_completion);
1569  cutOffLexing();
1570  return true;
1571  }
1572  return true;
1573  }
1574 
1575  // Otherwise, $,\,? in identifier found. Enter slower path.
1576 
1577  C = getCharAndSize(CurPtr, Size);
1578  while (true) {
1579  if (C == '$') {
1580  // If we hit a $ and they are not supported in identifiers, we are done.
1581  if (!LangOpts.DollarIdents) goto FinishIdentifier;
1582 
1583  // Otherwise, emit a diagnostic and continue.
1584  if (!isLexingRawMode())
1585  Diag(CurPtr, diag::ext_dollar_in_identifier);
1586  CurPtr = ConsumeChar(CurPtr, Size, Result);
1587  C = getCharAndSize(CurPtr, Size);
1588  continue;
1589 
1590  } else if (C == '\\' && tryConsumeIdentifierUCN(CurPtr, Size, Result)) {
1591  C = getCharAndSize(CurPtr, Size);
1592  continue;
1593  } else if (!isASCII(C) && tryConsumeIdentifierUTF8Char(CurPtr)) {
1594  C = getCharAndSize(CurPtr, Size);
1595  continue;
1596  } else if (!isIdentifierBody(C)) {
1597  goto FinishIdentifier;
1598  }
1599 
1600  // Otherwise, this character is good, consume it.
1601  CurPtr = ConsumeChar(CurPtr, Size, Result);
1602 
1603  C = getCharAndSize(CurPtr, Size);
1604  while (isIdentifierBody(C)) {
1605  CurPtr = ConsumeChar(CurPtr, Size, Result);
1606  C = getCharAndSize(CurPtr, Size);
1607  }
1608  }
1609 }
1610 
1611 /// isHexaLiteral - Return true if Start points to a hex constant.
1612 /// in microsoft mode (where this is supposed to be several different tokens).
1613 bool Lexer::isHexaLiteral(const char *Start, const LangOptions &LangOpts) {
1614  unsigned Size;
1615  char C1 = Lexer::getCharAndSizeNoWarn(Start, Size, LangOpts);
1616  if (C1 != '0')
1617  return false;
1618  char C2 = Lexer::getCharAndSizeNoWarn(Start + Size, Size, LangOpts);
1619  return (C2 == 'x' || C2 == 'X');
1620 }
1621 
1622 /// LexNumericConstant - Lex the remainder of a integer or floating point
1623 /// constant. From[-1] is the first character lexed. Return the end of the
1624 /// constant.
1625 bool Lexer::LexNumericConstant(Token &Result, const char *CurPtr) {
1626  unsigned Size;
1627  char C = getCharAndSize(CurPtr, Size);
1628  char PrevCh = 0;
1629  while (isPreprocessingNumberBody(C)) {
1630  CurPtr = ConsumeChar(CurPtr, Size, Result);
1631  PrevCh = C;
1632  C = getCharAndSize(CurPtr, Size);
1633  }
1634 
1635  // If we fell out, check for a sign, due to 1e+12. If we have one, continue.
1636  if ((C == '-' || C == '+') && (PrevCh == 'E' || PrevCh == 'e')) {
1637  // If we are in Microsoft mode, don't continue if the constant is hex.
1638  // For example, MSVC will accept the following as 3 tokens: 0x1234567e+1
1639  if (!LangOpts.MicrosoftExt || !isHexaLiteral(BufferPtr, LangOpts))
1640  return LexNumericConstant(Result, ConsumeChar(CurPtr, Size, Result));
1641  }
1642 
1643  // If we have a hex FP constant, continue.
1644  if ((C == '-' || C == '+') && (PrevCh == 'P' || PrevCh == 'p')) {
1645  // Outside C99 and C++17, we accept hexadecimal floating point numbers as a
1646  // not-quite-conforming extension. Only do so if this looks like it's
1647  // actually meant to be a hexfloat, and not if it has a ud-suffix.
1648  bool IsHexFloat = true;
1649  if (!LangOpts.C99) {
1650  if (!isHexaLiteral(BufferPtr, LangOpts))
1651  IsHexFloat = false;
1652  else if (!getLangOpts().CPlusPlus1z &&
1653  std::find(BufferPtr, CurPtr, '_') != CurPtr)
1654  IsHexFloat = false;
1655  }
1656  if (IsHexFloat)
1657  return LexNumericConstant(Result, ConsumeChar(CurPtr, Size, Result));
1658  }
1659 
1660  // If we have a digit separator, continue.
1661  if (C == '\'' && getLangOpts().CPlusPlus14) {
1662  unsigned NextSize;
1663  char Next = getCharAndSizeNoWarn(CurPtr + Size, NextSize, getLangOpts());
1664  if (isIdentifierBody(Next)) {
1665  if (!isLexingRawMode())
1666  Diag(CurPtr, diag::warn_cxx11_compat_digit_separator);
1667  CurPtr = ConsumeChar(CurPtr, Size, Result);
1668  CurPtr = ConsumeChar(CurPtr, NextSize, Result);
1669  return LexNumericConstant(Result, CurPtr);
1670  }
1671  }
1672 
1673  // If we have a UCN or UTF-8 character (perhaps in a ud-suffix), continue.
1674  if (C == '\\' && tryConsumeIdentifierUCN(CurPtr, Size, Result))
1675  return LexNumericConstant(Result, CurPtr);
1676  if (!isASCII(C) && tryConsumeIdentifierUTF8Char(CurPtr))
1677  return LexNumericConstant(Result, CurPtr);
1678 
1679  // Update the location of token as well as BufferPtr.
1680  const char *TokStart = BufferPtr;
1681  FormTokenWithChars(Result, CurPtr, tok::numeric_constant);
1682  Result.setLiteralData(TokStart);
1683  return true;
1684 }
1685 
1686 /// LexUDSuffix - Lex the ud-suffix production for user-defined literal suffixes
1687 /// in C++11, or warn on a ud-suffix in C++98.
1688 const char *Lexer::LexUDSuffix(Token &Result, const char *CurPtr,
1689  bool IsStringLiteral) {
1690  assert(getLangOpts().CPlusPlus);
1691 
1692  // Maximally munch an identifier.
1693  unsigned Size;
1694  char C = getCharAndSize(CurPtr, Size);
1695  bool Consumed = false;
1696 
1697  if (!isIdentifierHead(C)) {
1698  if (C == '\\' && tryConsumeIdentifierUCN(CurPtr, Size, Result))
1699  Consumed = true;
1700  else if (!isASCII(C) && tryConsumeIdentifierUTF8Char(CurPtr))
1701  Consumed = true;
1702  else
1703  return CurPtr;
1704  }
1705 
1706  if (!getLangOpts().CPlusPlus11) {
1707  if (!isLexingRawMode())
1708  Diag(CurPtr,
1709  C == '_' ? diag::warn_cxx11_compat_user_defined_literal
1710  : diag::warn_cxx11_compat_reserved_user_defined_literal)
1712  return CurPtr;
1713  }
1714 
1715  // C++11 [lex.ext]p10, [usrlit.suffix]p1: A program containing a ud-suffix
1716  // that does not start with an underscore is ill-formed. As a conforming
1717  // extension, we treat all such suffixes as if they had whitespace before
1718  // them. We assume a suffix beginning with a UCN or UTF-8 character is more
1719  // likely to be a ud-suffix than a macro, however, and accept that.
1720  if (!Consumed) {
1721  bool IsUDSuffix = false;
1722  if (C == '_')
1723  IsUDSuffix = true;
1724  else if (IsStringLiteral && getLangOpts().CPlusPlus14) {
1725  // In C++1y, we need to look ahead a few characters to see if this is a
1726  // valid suffix for a string literal or a numeric literal (this could be
1727  // the 'operator""if' defining a numeric literal operator).
1728  const unsigned MaxStandardSuffixLength = 3;
1729  char Buffer[MaxStandardSuffixLength] = { C };
1730  unsigned Consumed = Size;
1731  unsigned Chars = 1;
1732  while (true) {
1733  unsigned NextSize;
1734  char Next = getCharAndSizeNoWarn(CurPtr + Consumed, NextSize,
1735  getLangOpts());
1736  if (!isIdentifierBody(Next)) {
1737  // End of suffix. Check whether this is on the whitelist.
1738  const StringRef CompleteSuffix(Buffer, Chars);
1740  CompleteSuffix);
1741  break;
1742  }
1743 
1744  if (Chars == MaxStandardSuffixLength)
1745  // Too long: can't be a standard suffix.
1746  break;
1747 
1748  Buffer[Chars++] = Next;
1749  Consumed += NextSize;
1750  }
1751  }
1752 
1753  if (!IsUDSuffix) {
1754  if (!isLexingRawMode())
1755  Diag(CurPtr, getLangOpts().MSVCCompat
1756  ? diag::ext_ms_reserved_user_defined_literal
1757  : diag::ext_reserved_user_defined_literal)
1759  return CurPtr;
1760  }
1761 
1762  CurPtr = ConsumeChar(CurPtr, Size, Result);
1763  }
1764 
1765  Result.setFlag(Token::HasUDSuffix);
1766  while (true) {
1767  C = getCharAndSize(CurPtr, Size);
1768  if (isIdentifierBody(C)) { CurPtr = ConsumeChar(CurPtr, Size, Result); }
1769  else if (C == '\\' && tryConsumeIdentifierUCN(CurPtr, Size, Result)) {}
1770  else if (!isASCII(C) && tryConsumeIdentifierUTF8Char(CurPtr)) {}
1771  else break;
1772  }
1773 
1774  return CurPtr;
1775 }
1776 
1777 /// LexStringLiteral - Lex the remainder of a string literal, after having lexed
1778 /// either " or L" or u8" or u" or U".
1779 bool Lexer::LexStringLiteral(Token &Result, const char *CurPtr,
1780  tok::TokenKind Kind) {
1781  // Does this string contain the \0 character?
1782  const char *NulCharacter = nullptr;
1783 
1784  if (!isLexingRawMode() &&
1785  (Kind == tok::utf8_string_literal ||
1786  Kind == tok::utf16_string_literal ||
1787  Kind == tok::utf32_string_literal))
1788  Diag(BufferPtr, getLangOpts().CPlusPlus
1789  ? diag::warn_cxx98_compat_unicode_literal
1790  : diag::warn_c99_compat_unicode_literal);
1791 
1792  char C = getAndAdvanceChar(CurPtr, Result);
1793  while (C != '"') {
1794  // Skip escaped characters. Escaped newlines will already be processed by
1795  // getAndAdvanceChar.
1796  if (C == '\\')
1797  C = getAndAdvanceChar(CurPtr, Result);
1798 
1799  if (C == '\n' || C == '\r' || // Newline.
1800  (C == 0 && CurPtr-1 == BufferEnd)) { // End of file.
1801  if (!isLexingRawMode() && !LangOpts.AsmPreprocessor)
1802  Diag(BufferPtr, diag::ext_unterminated_char_or_string) << 1;
1803  FormTokenWithChars(Result, CurPtr-1, tok::unknown);
1804  return true;
1805  }
1806 
1807  if (C == 0) {
1808  if (isCodeCompletionPoint(CurPtr-1)) {
1810  FormTokenWithChars(Result, CurPtr-1, tok::unknown);
1811  cutOffLexing();
1812  return true;
1813  }
1814 
1815  NulCharacter = CurPtr-1;
1816  }
1817  C = getAndAdvanceChar(CurPtr, Result);
1818  }
1819 
1820  // If we are in C++11, lex the optional ud-suffix.
1821  if (getLangOpts().CPlusPlus)
1822  CurPtr = LexUDSuffix(Result, CurPtr, true);
1823 
1824  // If a nul character existed in the string, warn about it.
1825  if (NulCharacter && !isLexingRawMode())
1826  Diag(NulCharacter, diag::null_in_char_or_string) << 1;
1827 
1828  // Update the location of the token as well as the BufferPtr instance var.
1829  const char *TokStart = BufferPtr;
1830  FormTokenWithChars(Result, CurPtr, Kind);
1831  Result.setLiteralData(TokStart);
1832  return true;
1833 }
1834 
1835 /// LexRawStringLiteral - Lex the remainder of a raw string literal, after
1836 /// having lexed R", LR", u8R", uR", or UR".
1837 bool Lexer::LexRawStringLiteral(Token &Result, const char *CurPtr,
1838  tok::TokenKind Kind) {
1839  // This function doesn't use getAndAdvanceChar because C++0x [lex.pptoken]p3:
1840  // Between the initial and final double quote characters of the raw string,
1841  // any transformations performed in phases 1 and 2 (trigraphs,
1842  // universal-character-names, and line splicing) are reverted.
1843 
1844  if (!isLexingRawMode())
1845  Diag(BufferPtr, diag::warn_cxx98_compat_raw_string_literal);
1846 
1847  unsigned PrefixLen = 0;
1848 
1849  while (PrefixLen != 16 && isRawStringDelimBody(CurPtr[PrefixLen]))
1850  ++PrefixLen;
1851 
1852  // If the last character was not a '(', then we didn't lex a valid delimiter.
1853  if (CurPtr[PrefixLen] != '(') {
1854  if (!isLexingRawMode()) {
1855  const char *PrefixEnd = &CurPtr[PrefixLen];
1856  if (PrefixLen == 16) {
1857  Diag(PrefixEnd, diag::err_raw_delim_too_long);
1858  } else {
1859  Diag(PrefixEnd, diag::err_invalid_char_raw_delim)
1860  << StringRef(PrefixEnd, 1);
1861  }
1862  }
1863 
1864  // Search for the next '"' in hopes of salvaging the lexer. Unfortunately,
1865  // it's possible the '"' was intended to be part of the raw string, but
1866  // there's not much we can do about that.
1867  while (true) {
1868  char C = *CurPtr++;
1869 
1870  if (C == '"')
1871  break;
1872  if (C == 0 && CurPtr-1 == BufferEnd) {
1873  --CurPtr;
1874  break;
1875  }
1876  }
1877 
1878  FormTokenWithChars(Result, CurPtr, tok::unknown);
1879  return true;
1880  }
1881 
1882  // Save prefix and move CurPtr past it
1883  const char *Prefix = CurPtr;
1884  CurPtr += PrefixLen + 1; // skip over prefix and '('
1885 
1886  while (true) {
1887  char C = *CurPtr++;
1888 
1889  if (C == ')') {
1890  // Check for prefix match and closing quote.
1891  if (strncmp(CurPtr, Prefix, PrefixLen) == 0 && CurPtr[PrefixLen] == '"') {
1892  CurPtr += PrefixLen + 1; // skip over prefix and '"'
1893  break;
1894  }
1895  } else if (C == 0 && CurPtr-1 == BufferEnd) { // End of file.
1896  if (!isLexingRawMode())
1897  Diag(BufferPtr, diag::err_unterminated_raw_string)
1898  << StringRef(Prefix, PrefixLen);
1899  FormTokenWithChars(Result, CurPtr-1, tok::unknown);
1900  return true;
1901  }
1902  }
1903 
1904  // If we are in C++11, lex the optional ud-suffix.
1905  if (getLangOpts().CPlusPlus)
1906  CurPtr = LexUDSuffix(Result, CurPtr, true);
1907 
1908  // Update the location of token as well as BufferPtr.
1909  const char *TokStart = BufferPtr;
1910  FormTokenWithChars(Result, CurPtr, Kind);
1911  Result.setLiteralData(TokStart);
1912  return true;
1913 }
1914 
1915 /// LexAngledStringLiteral - Lex the remainder of an angled string literal,
1916 /// after having lexed the '<' character. This is used for #include filenames.
1917 bool Lexer::LexAngledStringLiteral(Token &Result, const char *CurPtr) {
1918  // Does this string contain the \0 character?
1919  const char *NulCharacter = nullptr;
1920  const char *AfterLessPos = CurPtr;
1921  char C = getAndAdvanceChar(CurPtr, Result);
1922  while (C != '>') {
1923  // Skip escaped characters.
1924  if (C == '\\' && CurPtr < BufferEnd) {
1925  // Skip the escaped character.
1926  getAndAdvanceChar(CurPtr, Result);
1927  } else if (C == '\n' || C == '\r' || // Newline.
1928  (C == 0 && (CurPtr-1 == BufferEnd || // End of file.
1929  isCodeCompletionPoint(CurPtr-1)))) {
1930  // If the filename is unterminated, then it must just be a lone <
1931  // character. Return this as such.
1932  FormTokenWithChars(Result, AfterLessPos, tok::less);
1933  return true;
1934  } else if (C == 0) {
1935  NulCharacter = CurPtr-1;
1936  }
1937  C = getAndAdvanceChar(CurPtr, Result);
1938  }
1939 
1940  // If a nul character existed in the string, warn about it.
1941  if (NulCharacter && !isLexingRawMode())
1942  Diag(NulCharacter, diag::null_in_char_or_string) << 1;
1943 
1944  // Update the location of token as well as BufferPtr.
1945  const char *TokStart = BufferPtr;
1946  FormTokenWithChars(Result, CurPtr, tok::angle_string_literal);
1947  Result.setLiteralData(TokStart);
1948  return true;
1949 }
1950 
1951 /// LexCharConstant - Lex the remainder of a character constant, after having
1952 /// lexed either ' or L' or u8' or u' or U'.
1953 bool Lexer::LexCharConstant(Token &Result, const char *CurPtr,
1954  tok::TokenKind Kind) {
1955  // Does this character contain the \0 character?
1956  const char *NulCharacter = nullptr;
1957 
1958  if (!isLexingRawMode()) {
1959  if (Kind == tok::utf16_char_constant || Kind == tok::utf32_char_constant)
1960  Diag(BufferPtr, getLangOpts().CPlusPlus
1961  ? diag::warn_cxx98_compat_unicode_literal
1962  : diag::warn_c99_compat_unicode_literal);
1963  else if (Kind == tok::utf8_char_constant)
1964  Diag(BufferPtr, diag::warn_cxx14_compat_u8_character_literal);
1965  }
1966 
1967  char C = getAndAdvanceChar(CurPtr, Result);
1968  if (C == '\'') {
1969  if (!isLexingRawMode() && !LangOpts.AsmPreprocessor)
1970  Diag(BufferPtr, diag::ext_empty_character);
1971  FormTokenWithChars(Result, CurPtr, tok::unknown);
1972  return true;
1973  }
1974 
1975  while (C != '\'') {
1976  // Skip escaped characters.
1977  if (C == '\\')
1978  C = getAndAdvanceChar(CurPtr, Result);
1979 
1980  if (C == '\n' || C == '\r' || // Newline.
1981  (C == 0 && CurPtr-1 == BufferEnd)) { // End of file.
1982  if (!isLexingRawMode() && !LangOpts.AsmPreprocessor)
1983  Diag(BufferPtr, diag::ext_unterminated_char_or_string) << 0;
1984  FormTokenWithChars(Result, CurPtr-1, tok::unknown);
1985  return true;
1986  }
1987 
1988  if (C == 0) {
1989  if (isCodeCompletionPoint(CurPtr-1)) {
1991  FormTokenWithChars(Result, CurPtr-1, tok::unknown);
1992  cutOffLexing();
1993  return true;
1994  }
1995 
1996  NulCharacter = CurPtr-1;
1997  }
1998  C = getAndAdvanceChar(CurPtr, Result);
1999  }
2000 
2001  // If we are in C++11, lex the optional ud-suffix.
2002  if (getLangOpts().CPlusPlus)
2003  CurPtr = LexUDSuffix(Result, CurPtr, false);
2004 
2005  // If a nul character existed in the character, warn about it.
2006  if (NulCharacter && !isLexingRawMode())
2007  Diag(NulCharacter, diag::null_in_char_or_string) << 0;
2008 
2009  // Update the location of token as well as BufferPtr.
2010  const char *TokStart = BufferPtr;
2011  FormTokenWithChars(Result, CurPtr, Kind);
2012  Result.setLiteralData(TokStart);
2013  return true;
2014 }
2015 
2016 /// SkipWhitespace - Efficiently skip over a series of whitespace characters.
2017 /// Update BufferPtr to point to the next non-whitespace character and return.
2018 ///
2019 /// This method forms a token and returns true if KeepWhitespaceMode is enabled.
2020 ///
2021 bool Lexer::SkipWhitespace(Token &Result, const char *CurPtr,
2022  bool &TokAtPhysicalStartOfLine) {
2023  // Whitespace - Skip it, then return the token after the whitespace.
2024  bool SawNewline = isVerticalWhitespace(CurPtr[-1]);
2025 
2026  unsigned char Char = *CurPtr;
2027 
2028  // Skip consecutive spaces efficiently.
2029  while (true) {
2030  // Skip horizontal whitespace very aggressively.
2031  while (isHorizontalWhitespace(Char))
2032  Char = *++CurPtr;
2033 
2034  // Otherwise if we have something other than whitespace, we're done.
2035  if (!isVerticalWhitespace(Char))
2036  break;
2037 
2039  // End of preprocessor directive line, let LexTokenInternal handle this.
2040  BufferPtr = CurPtr;
2041  return false;
2042  }
2043 
2044  // OK, but handle newline.
2045  SawNewline = true;
2046  Char = *++CurPtr;
2047  }
2048 
2049  // If the client wants us to return whitespace, return it now.
2050  if (isKeepWhitespaceMode()) {
2051  FormTokenWithChars(Result, CurPtr, tok::unknown);
2052  if (SawNewline) {
2053  IsAtStartOfLine = true;
2054  IsAtPhysicalStartOfLine = true;
2055  }
2056  // FIXME: The next token will not have LeadingSpace set.
2057  return true;
2058  }
2059 
2060  // If this isn't immediately after a newline, there is leading space.
2061  char PrevChar = CurPtr[-1];
2062  bool HasLeadingSpace = !isVerticalWhitespace(PrevChar);
2063 
2064  Result.setFlagValue(Token::LeadingSpace, HasLeadingSpace);
2065  if (SawNewline) {
2066  Result.setFlag(Token::StartOfLine);
2067  TokAtPhysicalStartOfLine = true;
2068  }
2069 
2070  BufferPtr = CurPtr;
2071  return false;
2072 }
2073 
2074 /// We have just read the // characters from input. Skip until we find the
2075 /// newline character thats terminate the comment. Then update BufferPtr and
2076 /// return.
2077 ///
2078 /// If we're in KeepCommentMode or any CommentHandler has inserted
2079 /// some tokens, this will store the first token and return true.
2080 bool Lexer::SkipLineComment(Token &Result, const char *CurPtr,
2081  bool &TokAtPhysicalStartOfLine) {
2082  // If Line comments aren't explicitly enabled for this language, emit an
2083  // extension warning.
2084  if (!LangOpts.LineComment && !isLexingRawMode()) {
2085  Diag(BufferPtr, diag::ext_line_comment);
2086 
2087  // Mark them enabled so we only emit one warning for this translation
2088  // unit.
2089  LangOpts.LineComment = true;
2090  }
2091 
2092  // Scan over the body of the comment. The common case, when scanning, is that
2093  // the comment contains normal ascii characters with nothing interesting in
2094  // them. As such, optimize for this case with the inner loop.
2095  //
2096  // This loop terminates with CurPtr pointing at the newline (or end of buffer)
2097  // character that ends the line comment.
2098  char C;
2099  while (true) {
2100  C = *CurPtr;
2101  // Skip over characters in the fast loop.
2102  while (C != 0 && // Potentially EOF.
2103  C != '\n' && C != '\r') // Newline or DOS-style newline.
2104  C = *++CurPtr;
2105 
2106  const char *NextLine = CurPtr;
2107  if (C != 0) {
2108  // We found a newline, see if it's escaped.
2109  const char *EscapePtr = CurPtr-1;
2110  bool HasSpace = false;
2111  while (isHorizontalWhitespace(*EscapePtr)) { // Skip whitespace.
2112  --EscapePtr;
2113  HasSpace = true;
2114  }
2115 
2116  if (*EscapePtr == '\\')
2117  // Escaped newline.
2118  CurPtr = EscapePtr;
2119  else if (EscapePtr[0] == '/' && EscapePtr[-1] == '?' &&
2120  EscapePtr[-2] == '?' && LangOpts.Trigraphs)
2121  // Trigraph-escaped newline.
2122  CurPtr = EscapePtr-2;
2123  else
2124  break; // This is a newline, we're done.
2125 
2126  // If there was space between the backslash and newline, warn about it.
2127  if (HasSpace && !isLexingRawMode())
2128  Diag(EscapePtr, diag::backslash_newline_space);
2129  }
2130 
2131  // Otherwise, this is a hard case. Fall back on getAndAdvanceChar to
2132  // properly decode the character. Read it in raw mode to avoid emitting
2133  // diagnostics about things like trigraphs. If we see an escaped newline,
2134  // we'll handle it below.
2135  const char *OldPtr = CurPtr;
2136  bool OldRawMode = isLexingRawMode();
2137  LexingRawMode = true;
2138  C = getAndAdvanceChar(CurPtr, Result);
2139  LexingRawMode = OldRawMode;
2140 
2141  // If we only read only one character, then no special handling is needed.
2142  // We're done and can skip forward to the newline.
2143  if (C != 0 && CurPtr == OldPtr+1) {
2144  CurPtr = NextLine;
2145  break;
2146  }
2147 
2148  // If we read multiple characters, and one of those characters was a \r or
2149  // \n, then we had an escaped newline within the comment. Emit diagnostic
2150  // unless the next line is also a // comment.
2151  if (CurPtr != OldPtr + 1 && C != '/' &&
2152  (CurPtr == BufferEnd + 1 || CurPtr[0] != '/')) {
2153  for (; OldPtr != CurPtr; ++OldPtr)
2154  if (OldPtr[0] == '\n' || OldPtr[0] == '\r') {
2155  // Okay, we found a // comment that ends in a newline, if the next
2156  // line is also a // comment, but has spaces, don't emit a diagnostic.
2157  if (isWhitespace(C)) {
2158  const char *ForwardPtr = CurPtr;
2159  while (isWhitespace(*ForwardPtr)) // Skip whitespace.
2160  ++ForwardPtr;
2161  if (ForwardPtr[0] == '/' && ForwardPtr[1] == '/')
2162  break;
2163  }
2164 
2165  if (!isLexingRawMode())
2166  Diag(OldPtr-1, diag::ext_multi_line_line_comment);
2167  break;
2168  }
2169  }
2170 
2171  if (C == '\r' || C == '\n' || CurPtr == BufferEnd + 1) {
2172  --CurPtr;
2173  break;
2174  }
2175 
2176  if (C == '\0' && isCodeCompletionPoint(CurPtr-1)) {
2178  cutOffLexing();
2179  return false;
2180  }
2181  }
2182 
2183  // Found but did not consume the newline. Notify comment handlers about the
2184  // comment unless we're in a #if 0 block.
2185  if (PP && !isLexingRawMode() &&
2186  PP->HandleComment(Result, SourceRange(getSourceLocation(BufferPtr),
2187  getSourceLocation(CurPtr)))) {
2188  BufferPtr = CurPtr;
2189  return true; // A token has to be returned.
2190  }
2191 
2192  // If we are returning comments as tokens, return this comment as a token.
2193  if (inKeepCommentMode())
2194  return SaveLineComment(Result, CurPtr);
2195 
2196  // If we are inside a preprocessor directive and we see the end of line,
2197  // return immediately, so that the lexer can return this as an EOD token.
2198  if (ParsingPreprocessorDirective || CurPtr == BufferEnd) {
2199  BufferPtr = CurPtr;
2200  return false;
2201  }
2202 
2203  // Otherwise, eat the \n character. We don't care if this is a \n\r or
2204  // \r\n sequence. This is an efficiency hack (because we know the \n can't
2205  // contribute to another token), it isn't needed for correctness. Note that
2206  // this is ok even in KeepWhitespaceMode, because we would have returned the
2207  /// comment above in that mode.
2208  ++CurPtr;
2209 
2210  // The next returned token is at the start of the line.
2211  Result.setFlag(Token::StartOfLine);
2212  TokAtPhysicalStartOfLine = true;
2213  // No leading whitespace seen so far.
2215  BufferPtr = CurPtr;
2216  return false;
2217 }
2218 
2219 /// If in save-comment mode, package up this Line comment in an appropriate
2220 /// way and return it.
2221 bool Lexer::SaveLineComment(Token &Result, const char *CurPtr) {
2222  // If we're not in a preprocessor directive, just return the // comment
2223  // directly.
2224  FormTokenWithChars(Result, CurPtr, tok::comment);
2225 
2227  return true;
2228 
2229  // If this Line-style comment is in a macro definition, transmogrify it into
2230  // a C-style block comment.
2231  bool Invalid = false;
2232  std::string Spelling = PP->getSpelling(Result, &Invalid);
2233  if (Invalid)
2234  return true;
2235 
2236  assert(Spelling[0] == '/' && Spelling[1] == '/' && "Not line comment?");
2237  Spelling[1] = '*'; // Change prefix to "/*".
2238  Spelling += "*/"; // add suffix.
2239 
2240  Result.setKind(tok::comment);
2241  PP->CreateString(Spelling, Result,
2242  Result.getLocation(), Result.getLocation());
2243  return true;
2244 }
2245 
2246 /// isBlockCommentEndOfEscapedNewLine - Return true if the specified newline
2247 /// character (either \\n or \\r) is part of an escaped newline sequence. Issue
2248 /// a diagnostic if so. We know that the newline is inside of a block comment.
2249 static bool isEndOfBlockCommentWithEscapedNewLine(const char *CurPtr,
2250  Lexer *L) {
2251  assert(CurPtr[0] == '\n' || CurPtr[0] == '\r');
2252 
2253  // Back up off the newline.
2254  --CurPtr;
2255 
2256  // If this is a two-character newline sequence, skip the other character.
2257  if (CurPtr[0] == '\n' || CurPtr[0] == '\r') {
2258  // \n\n or \r\r -> not escaped newline.
2259  if (CurPtr[0] == CurPtr[1])
2260  return false;
2261  // \n\r or \r\n -> skip the newline.
2262  --CurPtr;
2263  }
2264 
2265  // If we have horizontal whitespace, skip over it. We allow whitespace
2266  // between the slash and newline.
2267  bool HasSpace = false;
2268  while (isHorizontalWhitespace(*CurPtr) || *CurPtr == 0) {
2269  --CurPtr;
2270  HasSpace = true;
2271  }
2272 
2273  // If we have a slash, we know this is an escaped newline.
2274  if (*CurPtr == '\\') {
2275  if (CurPtr[-1] != '*') return false;
2276  } else {
2277  // It isn't a slash, is it the ?? / trigraph?
2278  if (CurPtr[0] != '/' || CurPtr[-1] != '?' || CurPtr[-2] != '?' ||
2279  CurPtr[-3] != '*')
2280  return false;
2281 
2282  // This is the trigraph ending the comment. Emit a stern warning!
2283  CurPtr -= 2;
2284 
2285  // If no trigraphs are enabled, warn that we ignored this trigraph and
2286  // ignore this * character.
2287  if (!L->getLangOpts().Trigraphs) {
2288  if (!L->isLexingRawMode())
2289  L->Diag(CurPtr, diag::trigraph_ignored_block_comment);
2290  return false;
2291  }
2292  if (!L->isLexingRawMode())
2293  L->Diag(CurPtr, diag::trigraph_ends_block_comment);
2294  }
2295 
2296  // Warn about having an escaped newline between the */ characters.
2297  if (!L->isLexingRawMode())
2298  L->Diag(CurPtr, diag::escaped_newline_block_comment_end);
2299 
2300  // If there was space between the backslash and newline, warn about it.
2301  if (HasSpace && !L->isLexingRawMode())
2302  L->Diag(CurPtr, diag::backslash_newline_space);
2303 
2304  return true;
2305 }
2306 
2307 #ifdef __SSE2__
2308 #include <emmintrin.h>
2309 #elif __ALTIVEC__
2310 #include <altivec.h>
2311 #undef bool
2312 #endif
2313 
2314 /// We have just read from input the / and * characters that started a comment.
2315 /// Read until we find the * and / characters that terminate the comment.
2316 /// Note that we don't bother decoding trigraphs or escaped newlines in block
2317 /// comments, because they cannot cause the comment to end. The only thing
2318 /// that can happen is the comment could end with an escaped newline between
2319 /// the terminating * and /.
2320 ///
2321 /// If we're in KeepCommentMode or any CommentHandler has inserted
2322 /// some tokens, this will store the first token and return true.
2323 bool Lexer::SkipBlockComment(Token &Result, const char *CurPtr,
2324  bool &TokAtPhysicalStartOfLine) {
2325  // Scan one character past where we should, looking for a '/' character. Once
2326  // we find it, check to see if it was preceded by a *. This common
2327  // optimization helps people who like to put a lot of * characters in their
2328  // comments.
2329 
2330  // The first character we get with newlines and trigraphs skipped to handle
2331  // the degenerate /*/ case below correctly if the * has an escaped newline
2332  // after it.
2333  unsigned CharSize;
2334  unsigned char C = getCharAndSize(CurPtr, CharSize);
2335  CurPtr += CharSize;
2336  if (C == 0 && CurPtr == BufferEnd+1) {
2337  if (!isLexingRawMode())
2338  Diag(BufferPtr, diag::err_unterminated_block_comment);
2339  --CurPtr;
2340 
2341  // KeepWhitespaceMode should return this broken comment as a token. Since
2342  // it isn't a well formed comment, just return it as an 'unknown' token.
2343  if (isKeepWhitespaceMode()) {
2344  FormTokenWithChars(Result, CurPtr, tok::unknown);
2345  return true;
2346  }
2347 
2348  BufferPtr = CurPtr;
2349  return false;
2350  }
2351 
2352  // Check to see if the first character after the '/*' is another /. If so,
2353  // then this slash does not end the block comment, it is part of it.
2354  if (C == '/')
2355  C = *CurPtr++;
2356 
2357  while (true) {
2358  // Skip over all non-interesting characters until we find end of buffer or a
2359  // (probably ending) '/' character.
2360  if (CurPtr + 24 < BufferEnd &&
2361  // If there is a code-completion point avoid the fast scan because it
2362  // doesn't check for '\0'.
2363  !(PP && PP->getCodeCompletionFileLoc() == FileLoc)) {
2364  // While not aligned to a 16-byte boundary.
2365  while (C != '/' && ((intptr_t)CurPtr & 0x0F) != 0)
2366  C = *CurPtr++;
2367 
2368  if (C == '/') goto FoundSlash;
2369 
2370 #ifdef __SSE2__
2371  __m128i Slashes = _mm_set1_epi8('/');
2372  while (CurPtr+16 <= BufferEnd) {
2373  int cmp = _mm_movemask_epi8(_mm_cmpeq_epi8(*(const __m128i*)CurPtr,
2374  Slashes));
2375  if (cmp != 0) {
2376  // Adjust the pointer to point directly after the first slash. It's
2377  // not necessary to set C here, it will be overwritten at the end of
2378  // the outer loop.
2379  CurPtr += llvm::countTrailingZeros<unsigned>(cmp) + 1;
2380  goto FoundSlash;
2381  }
2382  CurPtr += 16;
2383  }
2384 #elif __ALTIVEC__
2385  __vector unsigned char Slashes = {
2386  '/', '/', '/', '/', '/', '/', '/', '/',
2387  '/', '/', '/', '/', '/', '/', '/', '/'
2388  };
2389  while (CurPtr+16 <= BufferEnd &&
2390  !vec_any_eq(*(const vector unsigned char*)CurPtr, Slashes))
2391  CurPtr += 16;
2392 #else
2393  // Scan for '/' quickly. Many block comments are very large.
2394  while (CurPtr[0] != '/' &&
2395  CurPtr[1] != '/' &&
2396  CurPtr[2] != '/' &&
2397  CurPtr[3] != '/' &&
2398  CurPtr+4 < BufferEnd) {
2399  CurPtr += 4;
2400  }
2401 #endif
2402 
2403  // It has to be one of the bytes scanned, increment to it and read one.
2404  C = *CurPtr++;
2405  }
2406 
2407  // Loop to scan the remainder.
2408  while (C != '/' && C != '\0')
2409  C = *CurPtr++;
2410 
2411  if (C == '/') {
2412  FoundSlash:
2413  if (CurPtr[-2] == '*') // We found the final */. We're done!
2414  break;
2415 
2416  if ((CurPtr[-2] == '\n' || CurPtr[-2] == '\r')) {
2417  if (isEndOfBlockCommentWithEscapedNewLine(CurPtr-2, this)) {
2418  // We found the final */, though it had an escaped newline between the
2419  // * and /. We're done!
2420  break;
2421  }
2422  }
2423  if (CurPtr[0] == '*' && CurPtr[1] != '/') {
2424  // If this is a /* inside of the comment, emit a warning. Don't do this
2425  // if this is a /*/, which will end the comment. This misses cases with
2426  // embedded escaped newlines, but oh well.
2427  if (!isLexingRawMode())
2428  Diag(CurPtr-1, diag::warn_nested_block_comment);
2429  }
2430  } else if (C == 0 && CurPtr == BufferEnd+1) {
2431  if (!isLexingRawMode())
2432  Diag(BufferPtr, diag::err_unterminated_block_comment);
2433  // Note: the user probably forgot a */. We could continue immediately
2434  // after the /*, but this would involve lexing a lot of what really is the
2435  // comment, which surely would confuse the parser.
2436  --CurPtr;
2437 
2438  // KeepWhitespaceMode should return this broken comment as a token. Since
2439  // it isn't a well formed comment, just return it as an 'unknown' token.
2440  if (isKeepWhitespaceMode()) {
2441  FormTokenWithChars(Result, CurPtr, tok::unknown);
2442  return true;
2443  }
2444 
2445  BufferPtr = CurPtr;
2446  return false;
2447  } else if (C == '\0' && isCodeCompletionPoint(CurPtr-1)) {
2449  cutOffLexing();
2450  return false;
2451  }
2452 
2453  C = *CurPtr++;
2454  }
2455 
2456  // Notify comment handlers about the comment unless we're in a #if 0 block.
2457  if (PP && !isLexingRawMode() &&
2458  PP->HandleComment(Result, SourceRange(getSourceLocation(BufferPtr),
2459  getSourceLocation(CurPtr)))) {
2460  BufferPtr = CurPtr;
2461  return true; // A token has to be returned.
2462  }
2463 
2464  // If we are returning comments as tokens, return this comment as a token.
2465  if (inKeepCommentMode()) {
2466  FormTokenWithChars(Result, CurPtr, tok::comment);
2467  return true;
2468  }
2469 
2470  // It is common for the tokens immediately after a /**/ comment to be
2471  // whitespace. Instead of going through the big switch, handle it
2472  // efficiently now. This is safe even in KeepWhitespaceMode because we would
2473  // have already returned above with the comment as a token.
2474  if (isHorizontalWhitespace(*CurPtr)) {
2475  SkipWhitespace(Result, CurPtr+1, TokAtPhysicalStartOfLine);
2476  return false;
2477  }
2478 
2479  // Otherwise, just return so that the next character will be lexed as a token.
2480  BufferPtr = CurPtr;
2481  Result.setFlag(Token::LeadingSpace);
2482  return false;
2483 }
2484 
2485 //===----------------------------------------------------------------------===//
2486 // Primary Lexing Entry Points
2487 //===----------------------------------------------------------------------===//
2488 
2489 /// ReadToEndOfLine - Read the rest of the current preprocessor line as an
2490 /// uninterpreted string. This switches the lexer out of directive mode.
2492  assert(ParsingPreprocessorDirective && ParsingFilename == false &&
2493  "Must be in a preprocessing directive!");
2494  Token Tmp;
2495 
2496  // CurPtr - Cache BufferPtr in an automatic variable.
2497  const char *CurPtr = BufferPtr;
2498  while (true) {
2499  char Char = getAndAdvanceChar(CurPtr, Tmp);
2500  switch (Char) {
2501  default:
2502  if (Result)
2503  Result->push_back(Char);
2504  break;
2505  case 0: // Null.
2506  // Found end of file?
2507  if (CurPtr-1 != BufferEnd) {
2508  if (isCodeCompletionPoint(CurPtr-1)) {
2510  cutOffLexing();
2511  return;
2512  }
2513 
2514  // Nope, normal character, continue.
2515  if (Result)
2516  Result->push_back(Char);
2517  break;
2518  }
2519  // FALL THROUGH.
2520  LLVM_FALLTHROUGH;
2521  case '\r':
2522  case '\n':
2523  // Okay, we found the end of the line. First, back up past the \0, \r, \n.
2524  assert(CurPtr[-1] == Char && "Trigraphs for newline?");
2525  BufferPtr = CurPtr-1;
2526 
2527  // Next, lex the character, which should handle the EOD transition.
2528  Lex(Tmp);
2529  if (Tmp.is(tok::code_completion)) {
2530  if (PP)
2532  Lex(Tmp);
2533  }
2534  assert(Tmp.is(tok::eod) && "Unexpected token!");
2535 
2536  // Finally, we're done;
2537  return;
2538  }
2539  }
2540 }
2541 
2542 /// LexEndOfFile - CurPtr points to the end of this file. Handle this
2543 /// condition, reporting diagnostics and handling other edge cases as required.
2544 /// This returns true if Result contains a token, false if PP.Lex should be
2545 /// called again.
2546 bool Lexer::LexEndOfFile(Token &Result, const char *CurPtr) {
2547  // If we hit the end of the file while parsing a preprocessor directive,
2548  // end the preprocessor directive first. The next token returned will
2549  // then be the end of file.
2551  // Done parsing the "line".
2553  // Update the location of token as well as BufferPtr.
2554  FormTokenWithChars(Result, CurPtr, tok::eod);
2555 
2556  // Restore comment saving mode, in case it was disabled for directive.
2557  if (PP)
2559  return true; // Have a token.
2560  }
2561 
2562  // If we are in raw mode, return this event as an EOF token. Let the caller
2563  // that put us in raw mode handle the event.
2564  if (isLexingRawMode()) {
2565  Result.startToken();
2566  BufferPtr = BufferEnd;
2567  FormTokenWithChars(Result, BufferEnd, tok::eof);
2568  return true;
2569  }
2570 
2571  if (PP->isRecordingPreamble() && PP->isInPrimaryFile()) {
2573  ConditionalStack.clear();
2574  }
2575 
2576  // Issue diagnostics for unterminated #if and missing newline.
2577 
2578  // If we are in a #if directive, emit an error.
2579  while (!ConditionalStack.empty()) {
2580  if (PP->getCodeCompletionFileLoc() != FileLoc)
2581  PP->Diag(ConditionalStack.back().IfLoc,
2582  diag::err_pp_unterminated_conditional);
2583  ConditionalStack.pop_back();
2584  }
2585 
2586  // C99 5.1.1.2p2: If the file is non-empty and didn't end in a newline, issue
2587  // a pedwarn.
2588  if (CurPtr != BufferStart && (CurPtr[-1] != '\n' && CurPtr[-1] != '\r')) {
2589  DiagnosticsEngine &Diags = PP->getDiagnostics();
2590  SourceLocation EndLoc = getSourceLocation(BufferEnd);
2591  unsigned DiagID;
2592 
2593  if (LangOpts.CPlusPlus11) {
2594  // C++11 [lex.phases] 2.2 p2
2595  // Prefer the C++98 pedantic compatibility warning over the generic,
2596  // non-extension, user-requested "missing newline at EOF" warning.
2597  if (!Diags.isIgnored(diag::warn_cxx98_compat_no_newline_eof, EndLoc)) {
2598  DiagID = diag::warn_cxx98_compat_no_newline_eof;
2599  } else {
2600  DiagID = diag::warn_no_newline_eof;
2601  }
2602  } else {
2603  DiagID = diag::ext_no_newline_eof;
2604  }
2605 
2606  Diag(BufferEnd, DiagID)
2607  << FixItHint::CreateInsertion(EndLoc, "\n");
2608  }
2609 
2610  BufferPtr = CurPtr;
2611 
2612  // Finally, let the preprocessor handle this.
2613  return PP->HandleEndOfFile(Result, isPragmaLexer());
2614 }
2615 
2616 /// isNextPPTokenLParen - Return 1 if the next unexpanded token lexed from
2617 /// the specified lexer will return a tok::l_paren token, 0 if it is something
2618 /// else and 2 if there are no more tokens in the buffer controlled by the
2619 /// lexer.
2620 unsigned Lexer::isNextPPTokenLParen() {
2621  assert(!LexingRawMode && "How can we expand a macro from a skipping buffer?");
2622 
2623  // Switch to 'skipping' mode. This will ensure that we can lex a token
2624  // without emitting diagnostics, disables macro expansion, and will cause EOF
2625  // to return an EOF token instead of popping the include stack.
2626  LexingRawMode = true;
2627 
2628  // Save state that can be changed while lexing so that we can restore it.
2629  const char *TmpBufferPtr = BufferPtr;
2630  bool inPPDirectiveMode = ParsingPreprocessorDirective;
2631  bool atStartOfLine = IsAtStartOfLine;
2632  bool atPhysicalStartOfLine = IsAtPhysicalStartOfLine;
2633  bool leadingSpace = HasLeadingSpace;
2634 
2635  Token Tok;
2636  Lex(Tok);
2637 
2638  // Restore state that may have changed.
2639  BufferPtr = TmpBufferPtr;
2640  ParsingPreprocessorDirective = inPPDirectiveMode;
2641  HasLeadingSpace = leadingSpace;
2642  IsAtStartOfLine = atStartOfLine;
2643  IsAtPhysicalStartOfLine = atPhysicalStartOfLine;
2644 
2645  // Restore the lexer back to non-skipping mode.
2646  LexingRawMode = false;
2647 
2648  if (Tok.is(tok::eof))
2649  return 2;
2650  return Tok.is(tok::l_paren);
2651 }
2652 
2653 /// \brief Find the end of a version control conflict marker.
2654 static const char *FindConflictEnd(const char *CurPtr, const char *BufferEnd,
2655  ConflictMarkerKind CMK) {
2656  const char *Terminator = CMK == CMK_Perforce ? "<<<<\n" : ">>>>>>>";
2657  size_t TermLen = CMK == CMK_Perforce ? 5 : 7;
2658  auto RestOfBuffer = StringRef(CurPtr, BufferEnd - CurPtr).substr(TermLen);
2659  size_t Pos = RestOfBuffer.find(Terminator);
2660  while (Pos != StringRef::npos) {
2661  // Must occur at start of line.
2662  if (Pos == 0 ||
2663  (RestOfBuffer[Pos - 1] != '\r' && RestOfBuffer[Pos - 1] != '\n')) {
2664  RestOfBuffer = RestOfBuffer.substr(Pos+TermLen);
2665  Pos = RestOfBuffer.find(Terminator);
2666  continue;
2667  }
2668  return RestOfBuffer.data()+Pos;
2669  }
2670  return nullptr;
2671 }
2672 
2673 /// IsStartOfConflictMarker - If the specified pointer is the start of a version
2674 /// control conflict marker like '<<<<<<<', recognize it as such, emit an error
2675 /// and recover nicely. This returns true if it is a conflict marker and false
2676 /// if not.
2677 bool Lexer::IsStartOfConflictMarker(const char *CurPtr) {
2678  // Only a conflict marker if it starts at the beginning of a line.
2679  if (CurPtr != BufferStart &&
2680  CurPtr[-1] != '\n' && CurPtr[-1] != '\r')
2681  return false;
2682 
2683  // Check to see if we have <<<<<<< or >>>>.
2684  if (!StringRef(CurPtr, BufferEnd - CurPtr).startswith("<<<<<<<") &&
2685  !StringRef(CurPtr, BufferEnd - CurPtr).startswith(">>>> "))
2686  return false;
2687 
2688  // If we have a situation where we don't care about conflict markers, ignore
2689  // it.
2690  if (CurrentConflictMarkerState || isLexingRawMode())
2691  return false;
2692 
2693  ConflictMarkerKind Kind = *CurPtr == '<' ? CMK_Normal : CMK_Perforce;
2694 
2695  // Check to see if there is an ending marker somewhere in the buffer at the
2696  // start of a line to terminate this conflict marker.
2697  if (FindConflictEnd(CurPtr, BufferEnd, Kind)) {
2698  // We found a match. We are really in a conflict marker.
2699  // Diagnose this, and ignore to the end of line.
2700  Diag(CurPtr, diag::err_conflict_marker);
2701  CurrentConflictMarkerState = Kind;
2702 
2703  // Skip ahead to the end of line. We know this exists because the
2704  // end-of-conflict marker starts with \r or \n.
2705  while (*CurPtr != '\r' && *CurPtr != '\n') {
2706  assert(CurPtr != BufferEnd && "Didn't find end of line");
2707  ++CurPtr;
2708  }
2709  BufferPtr = CurPtr;
2710  return true;
2711  }
2712 
2713  // No end of conflict marker found.
2714  return false;
2715 }
2716 
2717 /// HandleEndOfConflictMarker - If this is a '====' or '||||' or '>>>>', or if
2718 /// it is '<<<<' and the conflict marker started with a '>>>>' marker, then it
2719 /// is the end of a conflict marker. Handle it by ignoring up until the end of
2720 /// the line. This returns true if it is a conflict marker and false if not.
2721 bool Lexer::HandleEndOfConflictMarker(const char *CurPtr) {
2722  // Only a conflict marker if it starts at the beginning of a line.
2723  if (CurPtr != BufferStart &&
2724  CurPtr[-1] != '\n' && CurPtr[-1] != '\r')
2725  return false;
2726 
2727  // If we have a situation where we don't care about conflict markers, ignore
2728  // it.
2729  if (!CurrentConflictMarkerState || isLexingRawMode())
2730  return false;
2731 
2732  // Check to see if we have the marker (4 characters in a row).
2733  for (unsigned i = 1; i != 4; ++i)
2734  if (CurPtr[i] != CurPtr[0])
2735  return false;
2736 
2737  // If we do have it, search for the end of the conflict marker. This could
2738  // fail if it got skipped with a '#if 0' or something. Note that CurPtr might
2739  // be the end of conflict marker.
2740  if (const char *End = FindConflictEnd(CurPtr, BufferEnd,
2741  CurrentConflictMarkerState)) {
2742  CurPtr = End;
2743 
2744  // Skip ahead to the end of line.
2745  while (CurPtr != BufferEnd && *CurPtr != '\r' && *CurPtr != '\n')
2746  ++CurPtr;
2747 
2748  BufferPtr = CurPtr;
2749 
2750  // No longer in the conflict marker.
2751  CurrentConflictMarkerState = CMK_None;
2752  return true;
2753  }
2754 
2755  return false;
2756 }
2757 
2758 static const char *findPlaceholderEnd(const char *CurPtr,
2759  const char *BufferEnd) {
2760  if (CurPtr == BufferEnd)
2761  return nullptr;
2762  BufferEnd -= 1; // Scan until the second last character.
2763  for (; CurPtr != BufferEnd; ++CurPtr) {
2764  if (CurPtr[0] == '#' && CurPtr[1] == '>')
2765  return CurPtr + 2;
2766  }
2767  return nullptr;
2768 }
2769 
2770 bool Lexer::lexEditorPlaceholder(Token &Result, const char *CurPtr) {
2771  assert(CurPtr[-1] == '<' && CurPtr[0] == '#' && "Not a placeholder!");
2773  return false;
2774  const char *End = findPlaceholderEnd(CurPtr + 1, BufferEnd);
2775  if (!End)
2776  return false;
2777  const char *Start = CurPtr - 1;
2778  if (!LangOpts.AllowEditorPlaceholders)
2779  Diag(Start, diag::err_placeholder_in_source);
2780  Result.startToken();
2781  FormTokenWithChars(Result, End, tok::raw_identifier);
2782  Result.setRawIdentifierData(Start);
2783  PP->LookUpIdentifierInfo(Result);
2785  BufferPtr = End;
2786  return true;
2787 }
2788 
2789 bool Lexer::isCodeCompletionPoint(const char *CurPtr) const {
2790  if (PP && PP->isCodeCompletionEnabled()) {
2791  SourceLocation Loc = FileLoc.getLocWithOffset(CurPtr-BufferStart);
2792  return Loc == PP->getCodeCompletionLoc();
2793  }
2794 
2795  return false;
2796 }
2797 
2798 uint32_t Lexer::tryReadUCN(const char *&StartPtr, const char *SlashLoc,
2799  Token *Result) {
2800  unsigned CharSize;
2801  char Kind = getCharAndSize(StartPtr, CharSize);
2802 
2803  unsigned NumHexDigits;
2804  if (Kind == 'u')
2805  NumHexDigits = 4;
2806  else if (Kind == 'U')
2807  NumHexDigits = 8;
2808  else
2809  return 0;
2810 
2811  if (!LangOpts.CPlusPlus && !LangOpts.C99) {
2812  if (Result && !isLexingRawMode())
2813  Diag(SlashLoc, diag::warn_ucn_not_valid_in_c89);
2814  return 0;
2815  }
2816 
2817  const char *CurPtr = StartPtr + CharSize;
2818  const char *KindLoc = &CurPtr[-1];
2819 
2820  uint32_t CodePoint = 0;
2821  for (unsigned i = 0; i < NumHexDigits; ++i) {
2822  char C = getCharAndSize(CurPtr, CharSize);
2823 
2824  unsigned Value = llvm::hexDigitValue(C);
2825  if (Value == -1U) {
2826  if (Result && !isLexingRawMode()) {
2827  if (i == 0) {
2828  Diag(BufferPtr, diag::warn_ucn_escape_no_digits)
2829  << StringRef(KindLoc, 1);
2830  } else {
2831  Diag(BufferPtr, diag::warn_ucn_escape_incomplete);
2832 
2833  // If the user wrote \U1234, suggest a fixit to \u.
2834  if (i == 4 && NumHexDigits == 8) {
2835  CharSourceRange URange = makeCharRange(*this, KindLoc, KindLoc + 1);
2836  Diag(KindLoc, diag::note_ucn_four_not_eight)
2837  << FixItHint::CreateReplacement(URange, "u");
2838  }
2839  }
2840  }
2841 
2842  return 0;
2843  }
2844 
2845  CodePoint <<= 4;
2846  CodePoint += Value;
2847 
2848  CurPtr += CharSize;
2849  }
2850 
2851  if (Result) {
2852  Result->setFlag(Token::HasUCN);
2853  if (CurPtr - StartPtr == (ptrdiff_t)NumHexDigits + 2)
2854  StartPtr = CurPtr;
2855  else
2856  while (StartPtr != CurPtr)
2857  (void)getAndAdvanceChar(StartPtr, *Result);
2858  } else {
2859  StartPtr = CurPtr;
2860  }
2861 
2862  // Don't apply C family restrictions to UCNs in assembly mode
2863  if (LangOpts.AsmPreprocessor)
2864  return CodePoint;
2865 
2866  // C99 6.4.3p2: A universal character name shall not specify a character whose
2867  // short identifier is less than 00A0 other than 0024 ($), 0040 (@), or
2868  // 0060 (`), nor one in the range D800 through DFFF inclusive.)
2869  // C++11 [lex.charset]p2: If the hexadecimal value for a
2870  // universal-character-name corresponds to a surrogate code point (in the
2871  // range 0xD800-0xDFFF, inclusive), the program is ill-formed. Additionally,
2872  // if the hexadecimal value for a universal-character-name outside the
2873  // c-char-sequence, s-char-sequence, or r-char-sequence of a character or
2874  // string literal corresponds to a control character (in either of the
2875  // ranges 0x00-0x1F or 0x7F-0x9F, both inclusive) or to a character in the
2876  // basic source character set, the program is ill-formed.
2877  if (CodePoint < 0xA0) {
2878  if (CodePoint == 0x24 || CodePoint == 0x40 || CodePoint == 0x60)
2879  return CodePoint;
2880 
2881  // We don't use isLexingRawMode() here because we need to warn about bad
2882  // UCNs even when skipping preprocessing tokens in a #if block.
2883  if (Result && PP) {
2884  if (CodePoint < 0x20 || CodePoint >= 0x7F)
2885  Diag(BufferPtr, diag::err_ucn_control_character);
2886  else {
2887  char C = static_cast<char>(CodePoint);
2888  Diag(BufferPtr, diag::err_ucn_escape_basic_scs) << StringRef(&C, 1);
2889  }
2890  }
2891 
2892  return 0;
2893 
2894  } else if (CodePoint >= 0xD800 && CodePoint <= 0xDFFF) {
2895  // C++03 allows UCNs representing surrogate characters. C99 and C++11 don't.
2896  // We don't use isLexingRawMode() here because we need to diagnose bad
2897  // UCNs even when skipping preprocessing tokens in a #if block.
2898  if (Result && PP) {
2899  if (LangOpts.CPlusPlus && !LangOpts.CPlusPlus11)
2900  Diag(BufferPtr, diag::warn_ucn_escape_surrogate);
2901  else
2902  Diag(BufferPtr, diag::err_ucn_escape_invalid);
2903  }
2904  return 0;
2905  }
2906 
2907  return CodePoint;
2908 }
2909 
2910 bool Lexer::CheckUnicodeWhitespace(Token &Result, uint32_t C,
2911  const char *CurPtr) {
2912  static const llvm::sys::UnicodeCharSet UnicodeWhitespaceChars(
2914  if (!isLexingRawMode() && !PP->isPreprocessedOutput() &&
2915  UnicodeWhitespaceChars.contains(C)) {
2916  Diag(BufferPtr, diag::ext_unicode_whitespace)
2917  << makeCharRange(*this, BufferPtr, CurPtr);
2918 
2919  Result.setFlag(Token::LeadingSpace);
2920  return true;
2921  }
2922  return false;
2923 }
2924 
2925 bool Lexer::LexUnicode(Token &Result, uint32_t C, const char *CurPtr) {
2926  if (isAllowedIDChar(C, LangOpts) && isAllowedInitiallyIDChar(C, LangOpts)) {
2928  !PP->isPreprocessedOutput()) {
2930  makeCharRange(*this, BufferPtr, CurPtr),
2931  /*IsFirst=*/true);
2932  }
2933 
2934  MIOpt.ReadToken();
2935  return LexIdentifier(Result, CurPtr);
2936  }
2937 
2939  !PP->isPreprocessedOutput() &&
2940  !isASCII(*BufferPtr) && !isAllowedIDChar(C, LangOpts)) {
2941  // Non-ASCII characters tend to creep into source code unintentionally.
2942  // Instead of letting the parser complain about the unknown token,
2943  // just drop the character.
2944  // Note that we can /only/ do this when the non-ASCII character is actually
2945  // spelled as Unicode, not written as a UCN. The standard requires that
2946  // we not throw away any possible preprocessor tokens, but there's a
2947  // loophole in the mapping of Unicode characters to basic character set
2948  // characters that allows us to map these particular characters to, say,
2949  // whitespace.
2950  Diag(BufferPtr, diag::err_non_ascii)
2951  << FixItHint::CreateRemoval(makeCharRange(*this, BufferPtr, CurPtr));
2952 
2953  BufferPtr = CurPtr;
2954  return false;
2955  }
2956 
2957  // Otherwise, we have an explicit UCN or a character that's unlikely to show
2958  // up by accident.
2959  MIOpt.ReadToken();
2960  FormTokenWithChars(Result, CurPtr, tok::unknown);
2961  return true;
2962 }
2963 
2964 void Lexer::PropagateLineStartLeadingSpaceInfo(Token &Result) {
2965  IsAtStartOfLine = Result.isAtStartOfLine();
2966  HasLeadingSpace = Result.hasLeadingSpace();
2967  HasLeadingEmptyMacro = Result.hasLeadingEmptyMacro();
2968  // Note that this doesn't affect IsAtPhysicalStartOfLine.
2969 }
2970 
2971 bool Lexer::Lex(Token &Result) {
2972  // Start a new token.
2973  Result.startToken();
2974 
2975  // Set up misc whitespace flags for LexTokenInternal.
2976  if (IsAtStartOfLine) {
2977  Result.setFlag(Token::StartOfLine);
2978  IsAtStartOfLine = false;
2979  }
2980 
2981  if (HasLeadingSpace) {
2982  Result.setFlag(Token::LeadingSpace);
2983  HasLeadingSpace = false;
2984  }
2985 
2986  if (HasLeadingEmptyMacro) {
2988  HasLeadingEmptyMacro = false;
2989  }
2990 
2991  bool atPhysicalStartOfLine = IsAtPhysicalStartOfLine;
2992  IsAtPhysicalStartOfLine = false;
2993  bool isRawLex = isLexingRawMode();
2994  (void) isRawLex;
2995  bool returnedToken = LexTokenInternal(Result, atPhysicalStartOfLine);
2996  // (After the LexTokenInternal call, the lexer might be destroyed.)
2997  assert((returnedToken || !isRawLex) && "Raw lex must succeed");
2998  return returnedToken;
2999 }
3000 
3001 /// LexTokenInternal - This implements a simple C family lexer. It is an
3002 /// extremely performance critical piece of code. This assumes that the buffer
3003 /// has a null character at the end of the file. This returns a preprocessing
3004 /// token, not a normal token, as such, it is an internal interface. It assumes
3005 /// that the Flags of result have been cleared before calling this.
3006 bool Lexer::LexTokenInternal(Token &Result, bool TokAtPhysicalStartOfLine) {
3007 LexNextToken:
3008  // New token, can't need cleaning yet.
3010  Result.setIdentifierInfo(nullptr);
3011 
3012  // CurPtr - Cache BufferPtr in an automatic variable.
3013  const char *CurPtr = BufferPtr;
3014 
3015  // Small amounts of horizontal whitespace is very common between tokens.
3016  if ((*CurPtr == ' ') || (*CurPtr == '\t')) {
3017  ++CurPtr;
3018  while ((*CurPtr == ' ') || (*CurPtr == '\t'))
3019  ++CurPtr;
3020 
3021  // If we are keeping whitespace and other tokens, just return what we just
3022  // skipped. The next lexer invocation will return the token after the
3023  // whitespace.
3024  if (isKeepWhitespaceMode()) {
3025  FormTokenWithChars(Result, CurPtr, tok::unknown);
3026  // FIXME: The next token will not have LeadingSpace set.
3027  return true;
3028  }
3029 
3030  BufferPtr = CurPtr;
3031  Result.setFlag(Token::LeadingSpace);
3032  }
3033 
3034  unsigned SizeTmp, SizeTmp2; // Temporaries for use in cases below.
3035 
3036  // Read a character, advancing over it.
3037  char Char = getAndAdvanceChar(CurPtr, Result);
3039 
3040  switch (Char) {
3041  case 0: // Null.
3042  // Found end of file?
3043  if (CurPtr-1 == BufferEnd)
3044  return LexEndOfFile(Result, CurPtr-1);
3045 
3046  // Check if we are performing code completion.
3047  if (isCodeCompletionPoint(CurPtr-1)) {
3048  // Return the code-completion token.
3049  Result.startToken();
3050  FormTokenWithChars(Result, CurPtr, tok::code_completion);
3051  return true;
3052  }
3053 
3054  if (!isLexingRawMode())
3055  Diag(CurPtr-1, diag::null_in_file);
3056  Result.setFlag(Token::LeadingSpace);
3057  if (SkipWhitespace(Result, CurPtr, TokAtPhysicalStartOfLine))
3058  return true; // KeepWhitespaceMode
3059 
3060  // We know the lexer hasn't changed, so just try again with this lexer.
3061  // (We manually eliminate the tail call to avoid recursion.)
3062  goto LexNextToken;
3063 
3064  case 26: // DOS & CP/M EOF: "^Z".
3065  // If we're in Microsoft extensions mode, treat this as end of file.
3066  if (LangOpts.MicrosoftExt) {
3067  if (!isLexingRawMode())
3068  Diag(CurPtr-1, diag::ext_ctrl_z_eof_microsoft);
3069  return LexEndOfFile(Result, CurPtr-1);
3070  }
3071 
3072  // If Microsoft extensions are disabled, this is just random garbage.
3073  Kind = tok::unknown;
3074  break;
3075 
3076  case '\r':
3077  if (CurPtr[0] == '\n')
3078  Char = getAndAdvanceChar(CurPtr, Result);
3079  LLVM_FALLTHROUGH;
3080  case '\n':
3081  // If we are inside a preprocessor directive and we see the end of line,
3082  // we know we are done with the directive, so return an EOD token.
3084  // Done parsing the "line".
3086 
3087  // Restore comment saving mode, in case it was disabled for directive.
3088  if (PP)
3090 
3091  // Since we consumed a newline, we are back at the start of a line.
3092  IsAtStartOfLine = true;
3093  IsAtPhysicalStartOfLine = true;
3094 
3095  Kind = tok::eod;
3096  break;
3097  }
3098 
3099  // No leading whitespace seen so far.
3101 
3102  if (SkipWhitespace(Result, CurPtr, TokAtPhysicalStartOfLine))
3103  return true; // KeepWhitespaceMode
3104 
3105  // We only saw whitespace, so just try again with this lexer.
3106  // (We manually eliminate the tail call to avoid recursion.)
3107  goto LexNextToken;
3108  case ' ':
3109  case '\t':
3110  case '\f':
3111  case '\v':
3112  SkipHorizontalWhitespace:
3113  Result.setFlag(Token::LeadingSpace);
3114  if (SkipWhitespace(Result, CurPtr, TokAtPhysicalStartOfLine))
3115  return true; // KeepWhitespaceMode
3116 
3117  SkipIgnoredUnits:
3118  CurPtr = BufferPtr;
3119 
3120  // If the next token is obviously a // or /* */ comment, skip it efficiently
3121  // too (without going through the big switch stmt).
3122  if (CurPtr[0] == '/' && CurPtr[1] == '/' && !inKeepCommentMode() &&
3123  LangOpts.LineComment &&
3124  (LangOpts.CPlusPlus || !LangOpts.TraditionalCPP)) {
3125  if (SkipLineComment(Result, CurPtr+2, TokAtPhysicalStartOfLine))
3126  return true; // There is a token to return.
3127  goto SkipIgnoredUnits;
3128  } else if (CurPtr[0] == '/' && CurPtr[1] == '*' && !inKeepCommentMode()) {
3129  if (SkipBlockComment(Result, CurPtr+2, TokAtPhysicalStartOfLine))
3130  return true; // There is a token to return.
3131  goto SkipIgnoredUnits;
3132  } else if (isHorizontalWhitespace(*CurPtr)) {
3133  goto SkipHorizontalWhitespace;
3134  }
3135  // We only saw whitespace, so just try again with this lexer.
3136  // (We manually eliminate the tail call to avoid recursion.)
3137  goto LexNextToken;
3138 
3139  // C99 6.4.4.1: Integer Constants.
3140  // C99 6.4.4.2: Floating Constants.
3141  case '0': case '1': case '2': case '3': case '4':
3142  case '5': case '6': case '7': case '8': case '9':
3143  // Notify MIOpt that we read a non-whitespace/non-comment token.
3144  MIOpt.ReadToken();
3145  return LexNumericConstant(Result, CurPtr);
3146 
3147  case 'u': // Identifier (uber) or C11/C++11 UTF-8 or UTF-16 string literal
3148  // Notify MIOpt that we read a non-whitespace/non-comment token.
3149  MIOpt.ReadToken();
3150 
3151  if (LangOpts.CPlusPlus11 || LangOpts.C11) {
3152  Char = getCharAndSize(CurPtr, SizeTmp);
3153 
3154  // UTF-16 string literal
3155  if (Char == '"')
3156  return LexStringLiteral(Result, ConsumeChar(CurPtr, SizeTmp, Result),
3157  tok::utf16_string_literal);
3158 
3159  // UTF-16 character constant
3160  if (Char == '\'')
3161  return LexCharConstant(Result, ConsumeChar(CurPtr, SizeTmp, Result),
3162  tok::utf16_char_constant);
3163 
3164  // UTF-16 raw string literal
3165  if (Char == 'R' && LangOpts.CPlusPlus11 &&
3166  getCharAndSize(CurPtr + SizeTmp, SizeTmp2) == '"')
3167  return LexRawStringLiteral(Result,
3168  ConsumeChar(ConsumeChar(CurPtr, SizeTmp, Result),
3169  SizeTmp2, Result),
3170  tok::utf16_string_literal);
3171 
3172  if (Char == '8') {
3173  char Char2 = getCharAndSize(CurPtr + SizeTmp, SizeTmp2);
3174 
3175  // UTF-8 string literal
3176  if (Char2 == '"')
3177  return LexStringLiteral(Result,
3178  ConsumeChar(ConsumeChar(CurPtr, SizeTmp, Result),
3179  SizeTmp2, Result),
3180  tok::utf8_string_literal);
3181  if (Char2 == '\'' && LangOpts.CPlusPlus1z)
3182  return LexCharConstant(
3183  Result, ConsumeChar(ConsumeChar(CurPtr, SizeTmp, Result),
3184  SizeTmp2, Result),
3185  tok::utf8_char_constant);
3186 
3187  if (Char2 == 'R' && LangOpts.CPlusPlus11) {
3188  unsigned SizeTmp3;
3189  char Char3 = getCharAndSize(CurPtr + SizeTmp + SizeTmp2, SizeTmp3);
3190  // UTF-8 raw string literal
3191  if (Char3 == '"') {
3192  return LexRawStringLiteral(Result,
3193  ConsumeChar(ConsumeChar(ConsumeChar(CurPtr, SizeTmp, Result),
3194  SizeTmp2, Result),
3195  SizeTmp3, Result),
3196  tok::utf8_string_literal);
3197  }
3198  }
3199  }
3200  }
3201 
3202  // treat u like the start of an identifier.
3203  return LexIdentifier(Result, CurPtr);
3204 
3205  case 'U': // Identifier (Uber) or C11/C++11 UTF-32 string literal
3206  // Notify MIOpt that we read a non-whitespace/non-comment token.
3207  MIOpt.ReadToken();
3208 
3209  if (LangOpts.CPlusPlus11 || LangOpts.C11) {
3210  Char = getCharAndSize(CurPtr, SizeTmp);
3211 
3212  // UTF-32 string literal
3213  if (Char == '"')
3214  return LexStringLiteral(Result, ConsumeChar(CurPtr, SizeTmp, Result),
3215  tok::utf32_string_literal);
3216 
3217  // UTF-32 character constant
3218  if (Char == '\'')
3219  return LexCharConstant(Result, ConsumeChar(CurPtr, SizeTmp, Result),
3220  tok::utf32_char_constant);
3221 
3222  // UTF-32 raw string literal
3223  if (Char == 'R' && LangOpts.CPlusPlus11 &&
3224  getCharAndSize(CurPtr + SizeTmp, SizeTmp2) == '"')
3225  return LexRawStringLiteral(Result,
3226  ConsumeChar(ConsumeChar(CurPtr, SizeTmp, Result),
3227  SizeTmp2, Result),
3228  tok::utf32_string_literal);
3229  }
3230 
3231  // treat U like the start of an identifier.
3232  return LexIdentifier(Result, CurPtr);
3233 
3234  case 'R': // Identifier or C++0x raw string literal
3235  // Notify MIOpt that we read a non-whitespace/non-comment token.
3236  MIOpt.ReadToken();
3237 
3238  if (LangOpts.CPlusPlus11) {
3239  Char = getCharAndSize(CurPtr, SizeTmp);
3240 
3241  if (Char == '"')
3242  return LexRawStringLiteral(Result,
3243  ConsumeChar(CurPtr, SizeTmp, Result),
3244  tok::string_literal);
3245  }
3246 
3247  // treat R like the start of an identifier.
3248  return LexIdentifier(Result, CurPtr);
3249 
3250  case 'L': // Identifier (Loony) or wide literal (L'x' or L"xyz").
3251  // Notify MIOpt that we read a non-whitespace/non-comment token.
3252  MIOpt.ReadToken();
3253  Char = getCharAndSize(CurPtr, SizeTmp);
3254 
3255  // Wide string literal.
3256  if (Char == '"')
3257  return LexStringLiteral(Result, ConsumeChar(CurPtr, SizeTmp, Result),
3258  tok::wide_string_literal);
3259 
3260  // Wide raw string literal.
3261  if (LangOpts.CPlusPlus11 && Char == 'R' &&
3262  getCharAndSize(CurPtr + SizeTmp, SizeTmp2) == '"')
3263  return LexRawStringLiteral(Result,
3264  ConsumeChar(ConsumeChar(CurPtr, SizeTmp, Result),
3265  SizeTmp2, Result),
3266  tok::wide_string_literal);
3267 
3268  // Wide character constant.
3269  if (Char == '\'')
3270  return LexCharConstant(Result, ConsumeChar(CurPtr, SizeTmp, Result),
3271  tok::wide_char_constant);
3272  // FALL THROUGH, treating L like the start of an identifier.
3273  LLVM_FALLTHROUGH;
3274 
3275  // C99 6.4.2: Identifiers.
3276  case 'A': case 'B': case 'C': case 'D': case 'E': case 'F': case 'G':
3277  case 'H': case 'I': case 'J': case 'K': /*'L'*/case 'M': case 'N':
3278  case 'O': case 'P': case 'Q': /*'R'*/case 'S': case 'T': /*'U'*/
3279  case 'V': case 'W': case 'X': case 'Y': case 'Z':
3280  case 'a': case 'b': case 'c': case 'd': case 'e': case 'f': case 'g':
3281  case 'h': case 'i': case 'j': case 'k': case 'l': case 'm': case 'n':
3282  case 'o': case 'p': case 'q': case 'r': case 's': case 't': /*'u'*/
3283  case 'v': case 'w': case 'x': case 'y': case 'z':
3284  case '_':
3285  // Notify MIOpt that we read a non-whitespace/non-comment token.
3286  MIOpt.ReadToken();
3287  return LexIdentifier(Result, CurPtr);
3288 
3289  case '$': // $ in identifiers.
3290  if (LangOpts.DollarIdents) {
3291  if (!isLexingRawMode())
3292  Diag(CurPtr-1, diag::ext_dollar_in_identifier);
3293  // Notify MIOpt that we read a non-whitespace/non-comment token.
3294  MIOpt.ReadToken();
3295  return LexIdentifier(Result, CurPtr);
3296  }
3297 
3298  Kind = tok::unknown;
3299  break;
3300 
3301  // C99 6.4.4: Character Constants.
3302  case '\'':
3303  // Notify MIOpt that we read a non-whitespace/non-comment token.
3304  MIOpt.ReadToken();
3305  return LexCharConstant(Result, CurPtr, tok::char_constant);
3306 
3307  // C99 6.4.5: String Literals.
3308  case '"':
3309  // Notify MIOpt that we read a non-whitespace/non-comment token.
3310  MIOpt.ReadToken();
3311  return LexStringLiteral(Result, CurPtr, tok::string_literal);
3312 
3313  // C99 6.4.6: Punctuators.
3314  case '?':
3315  Kind = tok::question;
3316  break;
3317  case '[':
3318  Kind = tok::l_square;
3319  break;
3320  case ']':
3321  Kind = tok::r_square;
3322  break;
3323  case '(':
3324  Kind = tok::l_paren;
3325  break;
3326  case ')':
3327  Kind = tok::r_paren;
3328  break;
3329  case '{':
3330  Kind = tok::l_brace;
3331  break;
3332  case '}':
3333  Kind = tok::r_brace;
3334  break;
3335  case '.':
3336  Char = getCharAndSize(CurPtr, SizeTmp);
3337  if (Char >= '0' && Char <= '9') {
3338  // Notify MIOpt that we read a non-whitespace/non-comment token.
3339  MIOpt.ReadToken();
3340 
3341  return LexNumericConstant(Result, ConsumeChar(CurPtr, SizeTmp, Result));
3342  } else if (LangOpts.CPlusPlus && Char == '*') {
3343  Kind = tok::periodstar;
3344  CurPtr += SizeTmp;
3345  } else if (Char == '.' &&
3346  getCharAndSize(CurPtr+SizeTmp, SizeTmp2) == '.') {
3347  Kind = tok::ellipsis;
3348  CurPtr = ConsumeChar(ConsumeChar(CurPtr, SizeTmp, Result),
3349  SizeTmp2, Result);
3350  } else {
3351  Kind = tok::period;
3352  }
3353  break;
3354  case '&':
3355  Char = getCharAndSize(CurPtr, SizeTmp);
3356  if (Char == '&') {
3357  Kind = tok::ampamp;
3358  CurPtr = ConsumeChar(CurPtr, SizeTmp, Result);
3359  } else if (Char == '=') {
3360  Kind = tok::ampequal;
3361  CurPtr = ConsumeChar(CurPtr, SizeTmp, Result);
3362  } else {
3363  Kind = tok::amp;
3364  }
3365  break;
3366  case '*':
3367  if (getCharAndSize(CurPtr, SizeTmp) == '=') {
3368  Kind = tok::starequal;
3369  CurPtr = ConsumeChar(CurPtr, SizeTmp, Result);
3370  } else {
3371  Kind = tok::star;
3372  }
3373  break;
3374  case '+':
3375  Char = getCharAndSize(CurPtr, SizeTmp);
3376  if (Char == '+') {
3377  CurPtr = ConsumeChar(CurPtr, SizeTmp, Result);
3378  Kind = tok::plusplus;
3379  } else if (Char == '=') {
3380  CurPtr = ConsumeChar(CurPtr, SizeTmp, Result);
3381  Kind = tok::plusequal;
3382  } else {
3383  Kind = tok::plus;
3384  }
3385  break;
3386  case '-':
3387  Char = getCharAndSize(CurPtr, SizeTmp);
3388  if (Char == '-') { // --
3389  CurPtr = ConsumeChar(CurPtr, SizeTmp, Result);
3390  Kind = tok::minusminus;
3391  } else if (Char == '>' && LangOpts.CPlusPlus &&
3392  getCharAndSize(CurPtr+SizeTmp, SizeTmp2) == '*') { // C++ ->*
3393  CurPtr = ConsumeChar(ConsumeChar(CurPtr, SizeTmp, Result),
3394  SizeTmp2, Result);
3395  Kind = tok::arrowstar;
3396  } else if (Char == '>') { // ->
3397  CurPtr = ConsumeChar(CurPtr, SizeTmp, Result);
3398  Kind = tok::arrow;
3399  } else if (Char == '=') { // -=
3400  CurPtr = ConsumeChar(CurPtr, SizeTmp, Result);
3401  Kind = tok::minusequal;
3402  } else {
3403  Kind = tok::minus;
3404  }
3405  break;
3406  case '~':
3407  Kind = tok::tilde;
3408  break;
3409  case '!':
3410  if (getCharAndSize(CurPtr, SizeTmp) == '=') {
3411  Kind = tok::exclaimequal;
3412  CurPtr = ConsumeChar(CurPtr, SizeTmp, Result);
3413  } else {
3414  Kind = tok::exclaim;
3415  }
3416  break;
3417  case '/':
3418  // 6.4.9: Comments
3419  Char = getCharAndSize(CurPtr, SizeTmp);
3420  if (Char == '/') { // Line comment.
3421  // Even if Line comments are disabled (e.g. in C89 mode), we generally
3422  // want to lex this as a comment. There is one problem with this though,
3423  // that in one particular corner case, this can change the behavior of the
3424  // resultant program. For example, In "foo //**/ bar", C89 would lex
3425  // this as "foo / bar" and langauges with Line comments would lex it as
3426  // "foo". Check to see if the character after the second slash is a '*'.
3427  // If so, we will lex that as a "/" instead of the start of a comment.
3428  // However, we never do this if we are just preprocessing.
3429  bool TreatAsComment = LangOpts.LineComment &&
3430  (LangOpts.CPlusPlus || !LangOpts.TraditionalCPP);
3431  if (!TreatAsComment)
3432  if (!(PP && PP->isPreprocessedOutput()))
3433  TreatAsComment = getCharAndSize(CurPtr+SizeTmp, SizeTmp2) != '*';
3434 
3435  if (TreatAsComment) {
3436  if (SkipLineComment(Result, ConsumeChar(CurPtr, SizeTmp, Result),
3437  TokAtPhysicalStartOfLine))
3438  return true; // There is a token to return.
3439 
3440  // It is common for the tokens immediately after a // comment to be
3441  // whitespace (indentation for the next line). Instead of going through
3442  // the big switch, handle it efficiently now.
3443  goto SkipIgnoredUnits;
3444  }
3445  }
3446 
3447  if (Char == '*') { // /**/ comment.
3448  if (SkipBlockComment(Result, ConsumeChar(CurPtr, SizeTmp, Result),
3449  TokAtPhysicalStartOfLine))
3450  return true; // There is a token to return.
3451 
3452  // We only saw whitespace, so just try again with this lexer.
3453  // (We manually eliminate the tail call to avoid recursion.)
3454  goto LexNextToken;
3455  }
3456 
3457  if (Char == '=') {
3458  CurPtr = ConsumeChar(CurPtr, SizeTmp, Result);
3459  Kind = tok::slashequal;
3460  } else {
3461  Kind = tok::slash;
3462  }
3463  break;
3464  case '%':
3465  Char = getCharAndSize(CurPtr, SizeTmp);
3466  if (Char == '=') {
3467  Kind = tok::percentequal;
3468  CurPtr = ConsumeChar(CurPtr, SizeTmp, Result);
3469  } else if (LangOpts.Digraphs && Char == '>') {
3470  Kind = tok::r_brace; // '%>' -> '}'
3471  CurPtr = ConsumeChar(CurPtr, SizeTmp, Result);
3472  } else if (LangOpts.Digraphs && Char == ':') {
3473  CurPtr = ConsumeChar(CurPtr, SizeTmp, Result);
3474  Char = getCharAndSize(CurPtr, SizeTmp);
3475  if (Char == '%' && getCharAndSize(CurPtr+SizeTmp, SizeTmp2) == ':') {
3476  Kind = tok::hashhash; // '%:%:' -> '##'
3477  CurPtr = ConsumeChar(ConsumeChar(CurPtr, SizeTmp, Result),
3478  SizeTmp2, Result);
3479  } else if (Char == '@' && LangOpts.MicrosoftExt) {// %:@ -> #@ -> Charize
3480  CurPtr = ConsumeChar(CurPtr, SizeTmp, Result);
3481  if (!isLexingRawMode())
3482  Diag(BufferPtr, diag::ext_charize_microsoft);
3483  Kind = tok::hashat;
3484  } else { // '%:' -> '#'
3485  // We parsed a # character. If this occurs at the start of the line,
3486  // it's actually the start of a preprocessing directive. Callback to
3487  // the preprocessor to handle it.
3488  // TODO: -fpreprocessed mode??
3489  if (TokAtPhysicalStartOfLine && !LexingRawMode && !Is_PragmaLexer)
3490  goto HandleDirective;
3491 
3492  Kind = tok::hash;
3493  }
3494  } else {
3495  Kind = tok::percent;
3496  }
3497  break;
3498  case '<':
3499  Char = getCharAndSize(CurPtr, SizeTmp);
3500  if (ParsingFilename) {
3501  return LexAngledStringLiteral(Result, CurPtr);
3502  } else if (Char == '<') {
3503  char After = getCharAndSize(CurPtr+SizeTmp, SizeTmp2);
3504  if (After == '=') {
3505  Kind = tok::lesslessequal;
3506  CurPtr = ConsumeChar(ConsumeChar(CurPtr, SizeTmp, Result),
3507  SizeTmp2, Result);
3508  } else if (After == '<' && IsStartOfConflictMarker(CurPtr-1)) {
3509  // If this is actually a '<<<<<<<' version control conflict marker,
3510  // recognize it as such and recover nicely.
3511  goto LexNextToken;
3512  } else if (After == '<' && HandleEndOfConflictMarker(CurPtr-1)) {
3513  // If this is '<<<<' and we're in a Perforce-style conflict marker,
3514  // ignore it.
3515  goto LexNextToken;
3516  } else if (LangOpts.CUDA && After == '<') {
3517  Kind = tok::lesslessless;
3518  CurPtr = ConsumeChar(ConsumeChar(CurPtr, SizeTmp, Result),
3519  SizeTmp2, Result);
3520  } else {
3521  CurPtr = ConsumeChar(CurPtr, SizeTmp, Result);
3522  Kind = tok::lessless;
3523  }
3524  } else if (Char == '=') {
3525  CurPtr = ConsumeChar(CurPtr, SizeTmp, Result);
3526  Kind = tok::lessequal;
3527  } else if (LangOpts.Digraphs && Char == ':') { // '<:' -> '['
3528  if (LangOpts.CPlusPlus11 &&
3529  getCharAndSize(CurPtr + SizeTmp, SizeTmp2) == ':') {
3530  // C++0x [lex.pptoken]p3:
3531  // Otherwise, if the next three characters are <:: and the subsequent
3532  // character is neither : nor >, the < is treated as a preprocessor
3533  // token by itself and not as the first character of the alternative
3534  // token <:.
3535  unsigned SizeTmp3;
3536  char After = getCharAndSize(CurPtr + SizeTmp + SizeTmp2, SizeTmp3);
3537  if (After != ':' && After != '>') {
3538  Kind = tok::less;
3539  if (!isLexingRawMode())
3540  Diag(BufferPtr, diag::warn_cxx98_compat_less_colon_colon);
3541  break;
3542  }
3543  }
3544 
3545  CurPtr = ConsumeChar(CurPtr, SizeTmp, Result);
3546  Kind = tok::l_square;
3547  } else if (LangOpts.Digraphs && Char == '%') { // '<%' -> '{'
3548  CurPtr = ConsumeChar(CurPtr, SizeTmp, Result);
3549  Kind = tok::l_brace;
3550  } else if (Char == '#' && /*Not a trigraph*/ SizeTmp == 1 &&
3551  lexEditorPlaceholder(Result, CurPtr)) {
3552  return true;
3553  } else {
3554  Kind = tok::less;
3555  }
3556  break;
3557  case '>':
3558  Char = getCharAndSize(CurPtr, SizeTmp);
3559  if (Char == '=') {
3560  CurPtr = ConsumeChar(CurPtr, SizeTmp, Result);
3561  Kind = tok::greaterequal;
3562  } else if (Char == '>') {
3563  char After = getCharAndSize(CurPtr+SizeTmp, SizeTmp2);
3564  if (After == '=') {
3565  CurPtr = ConsumeChar(ConsumeChar(CurPtr, SizeTmp, Result),
3566  SizeTmp2, Result);
3567  Kind = tok::greatergreaterequal;
3568  } else if (After == '>' && IsStartOfConflictMarker(CurPtr-1)) {
3569  // If this is actually a '>>>>' conflict marker, recognize it as such
3570  // and recover nicely.
3571  goto LexNextToken;
3572  } else if (After == '>' && HandleEndOfConflictMarker(CurPtr-1)) {
3573  // If this is '>>>>>>>' and we're in a conflict marker, ignore it.
3574  goto LexNextToken;
3575  } else if (LangOpts.CUDA && After == '>') {
3576  Kind = tok::greatergreatergreater;
3577  CurPtr = ConsumeChar(ConsumeChar(CurPtr, SizeTmp, Result),
3578  SizeTmp2, Result);
3579  } else {
3580  CurPtr = ConsumeChar(CurPtr, SizeTmp, Result);
3581  Kind = tok::greatergreater;
3582  }
3583  } else {
3584  Kind = tok::greater;
3585  }
3586  break;
3587  case '^':
3588  Char = getCharAndSize(CurPtr, SizeTmp);
3589  if (Char == '=') {
3590  CurPtr = ConsumeChar(CurPtr, SizeTmp, Result);
3591  Kind = tok::caretequal;
3592  } else if (LangOpts.OpenCL && Char == '^') {
3593  CurPtr = ConsumeChar(CurPtr, SizeTmp, Result);
3594  Kind = tok::caretcaret;
3595  } else {
3596  Kind = tok::caret;
3597  }
3598  break;
3599  case '|':
3600  Char = getCharAndSize(CurPtr, SizeTmp);
3601  if (Char == '=') {
3602  Kind = tok::pipeequal;
3603  CurPtr = ConsumeChar(CurPtr, SizeTmp, Result);
3604  } else if (Char == '|') {
3605  // If this is '|||||||' and we're in a conflict marker, ignore it.
3606  if (CurPtr[1] == '|' && HandleEndOfConflictMarker(CurPtr-1))
3607  goto LexNextToken;
3608  Kind = tok::pipepipe;
3609  CurPtr = ConsumeChar(CurPtr, SizeTmp, Result);
3610  } else {
3611  Kind = tok::pipe;
3612  }
3613  break;
3614  case ':':
3615  Char = getCharAndSize(CurPtr, SizeTmp);
3616  if (LangOpts.Digraphs && Char == '>') {
3617  Kind = tok::r_square; // ':>' -> ']'
3618  CurPtr = ConsumeChar(CurPtr, SizeTmp, Result);
3619  } else if ((LangOpts.CPlusPlus ||
3620  LangOpts.DoubleSquareBracketAttributes) &&
3621  Char == ':') {
3622  Kind = tok::coloncolon;
3623  CurPtr = ConsumeChar(CurPtr, SizeTmp, Result);
3624  } else {
3625  Kind = tok::colon;
3626  }
3627  break;
3628  case ';':
3629  Kind = tok::semi;
3630  break;
3631  case '=':
3632  Char = getCharAndSize(CurPtr, SizeTmp);
3633  if (Char == '=') {
3634  // If this is '====' and we're in a conflict marker, ignore it.
3635  if (CurPtr[1] == '=' && HandleEndOfConflictMarker(CurPtr-1))
3636  goto LexNextToken;
3637 
3638  Kind = tok::equalequal;
3639  CurPtr = ConsumeChar(CurPtr, SizeTmp, Result);
3640  } else {
3641  Kind = tok::equal;
3642  }
3643  break;
3644  case ',':
3645  Kind = tok::comma;
3646  break;
3647  case '#':
3648  Char = getCharAndSize(CurPtr, SizeTmp);
3649  if (Char == '#') {
3650  Kind = tok::hashhash;
3651  CurPtr = ConsumeChar(CurPtr, SizeTmp, Result);
3652  } else if (Char == '@' && LangOpts.MicrosoftExt) { // #@ -> Charize
3653  Kind = tok::hashat;
3654  if (!isLexingRawMode())
3655  Diag(BufferPtr, diag::ext_charize_microsoft);
3656  CurPtr = ConsumeChar(CurPtr, SizeTmp, Result);
3657  } else {
3658  // We parsed a # character. If this occurs at the start of the line,
3659  // it's actually the start of a preprocessing directive. Callback to
3660  // the preprocessor to handle it.
3661  // TODO: -fpreprocessed mode??
3662  if (TokAtPhysicalStartOfLine && !LexingRawMode && !Is_PragmaLexer)
3663  goto HandleDirective;
3664 
3665  Kind = tok::hash;
3666  }
3667  break;
3668 
3669  case '@':
3670  // Objective C support.
3671  if (CurPtr[-1] == '@' && LangOpts.ObjC1)
3672  Kind = tok::at;
3673  else
3674  Kind = tok::unknown;
3675  break;
3676 
3677  // UCNs (C99 6.4.3, C++11 [lex.charset]p2)
3678  case '\\':
3679  if (!LangOpts.AsmPreprocessor) {
3680  if (uint32_t CodePoint = tryReadUCN(CurPtr, BufferPtr, &Result)) {
3681  if (CheckUnicodeWhitespace(Result, CodePoint, CurPtr)) {
3682  if (SkipWhitespace(Result, CurPtr, TokAtPhysicalStartOfLine))
3683  return true; // KeepWhitespaceMode
3684 
3685  // We only saw whitespace, so just try again with this lexer.
3686  // (We manually eliminate the tail call to avoid recursion.)
3687  goto LexNextToken;
3688  }
3689 
3690  return LexUnicode(Result, CodePoint, CurPtr);
3691  }
3692  }
3693 
3694  Kind = tok::unknown;
3695  break;
3696 
3697  default: {
3698  if (isASCII(Char)) {
3699  Kind = tok::unknown;
3700  break;
3701  }
3702 
3703  llvm::UTF32 CodePoint;
3704 
3705  // We can't just reset CurPtr to BufferPtr because BufferPtr may point to
3706  // an escaped newline.
3707  --CurPtr;
3708  llvm::ConversionResult Status =
3709  llvm::convertUTF8Sequence((const llvm::UTF8 **)&CurPtr,
3710  (const llvm::UTF8 *)BufferEnd,
3711  &CodePoint,
3712  llvm::strictConversion);
3713  if (Status == llvm::conversionOK) {
3714  if (CheckUnicodeWhitespace(Result, CodePoint, CurPtr)) {
3715  if (SkipWhitespace(Result, CurPtr, TokAtPhysicalStartOfLine))
3716  return true; // KeepWhitespaceMode
3717 
3718  // We only saw whitespace, so just try again with this lexer.
3719  // (We manually eliminate the tail call to avoid recursion.)
3720  goto LexNextToken;
3721  }
3722  return LexUnicode(Result, CodePoint, CurPtr);
3723  }
3724 
3726  PP->isPreprocessedOutput()) {
3727  ++CurPtr;
3728  Kind = tok::unknown;
3729  break;
3730  }
3731 
3732  // Non-ASCII characters tend to creep into source code unintentionally.
3733  // Instead of letting the parser complain about the unknown token,
3734  // just diagnose the invalid UTF-8, then drop the character.
3735  Diag(CurPtr, diag::err_invalid_utf8);
3736 
3737  BufferPtr = CurPtr+1;
3738  // We're pretending the character didn't exist, so just try again with
3739  // this lexer.
3740  // (We manually eliminate the tail call to avoid recursion.)
3741  goto LexNextToken;
3742  }
3743  }
3744 
3745  // Notify MIOpt that we read a non-whitespace/non-comment token.
3746  MIOpt.ReadToken();
3747 
3748  // Update the location of token as well as BufferPtr.
3749  FormTokenWithChars(Result, CurPtr, Kind);
3750  return true;
3751 
3752 HandleDirective:
3753  // We parsed a # character and it's the start of a preprocessing directive.
3754 
3755  FormTokenWithChars(Result, CurPtr, tok::hash);
3756  PP->HandleDirective(Result);
3757 
3758  if (PP->hadModuleLoaderFatalFailure()) {
3759  // With a fatal failure in the module loader, we abort parsing.
3760  assert(Result.is(tok::eof) && "Preprocessor did not set tok:eof");
3761  return true;
3762  }
3763 
3764  // We parsed the directive; lex a token with the new state.
3765  return false;
3766 }
SourceLocation getLocForStartOfFile(FileID FID) const
Return the source location corresponding to the first byte of the specified file. ...
Describes the bounds (start, size) of the preamble and a flag required by PreprocessorOptions::Precom...
Definition: Lexer.h:45
static unsigned getSpelling(const Token &Tok, const char *&Buffer, const SourceManager &SourceMgr, const LangOptions &LangOpts, bool *Invalid=nullptr)
getSpelling - This method is used to get the spelling of a token into a preallocated buffer...
Definition: Lexer.cpp:370
Lexer - This provides a simple interface that turns a text buffer into a stream of tokens...
Definition: Lexer.h:63
SourceLocation getLocWithOffset(int Offset) const
Return a source location with the specified offset from this SourceLocation.
This is a discriminated union of FileInfo and ExpansionInfo.
unsigned getRawEncoding() const
When a SourceLocation itself cannot be used, this returns an (opaque) 32-bit integer encoding for it...
SourceLocation getSpellingLoc() const
void setFlagValue(TokenFlags Flag, bool Val)
Set a flag to either true or false.
Definition: Token.h:257
to be on a line of there are analogous operations *that might be executed after the last line has been for finding a split after the last line that needs *to be for calculating the line length in *columns of the remainder of the token
static const llvm::sys::UnicodeCharRange C11AllowedIDCharRanges[]
void setBegin(SourceLocation b)
static __inline__ int __ATTRS_o_ai vec_any_eq(vector signed char __a, vector signed char __b)
Definition: altivec.h:14868
bool is(tok::TokenKind K) const
is/isNot - Predicates to check if this token is a specific kind, as in "if (Tok.is(tok::l_brace)) {...
Definition: Token.h:95
Defines the SourceManager interface.
LLVM_READNONE bool isASCII(char c)
Returns true if this is an ASCII character.
Definition: CharInfo.h:43
static bool isAllowedIDChar(uint32_t C, const LangOptions &LangOpts)
Definition: Lexer.cpp:1410
bool isInPrimaryFile() const
Return true if we&#39;re in the top-level file, not in a #include.
StringRef P
const char * getCharacterData(SourceLocation SL, bool *Invalid=nullptr) const
Return a pointer to the start of the specified location in the appropriate spelling MemoryBuffer...
Each ExpansionInfo encodes the expansion location - where the token was ultimately expanded...
DiagnosticBuilder Report(SourceLocation Loc, unsigned DiagID)
Issue the message to the client.
Definition: Diagnostic.h:1205
void setFlag(TokenFlags Flag)
Set the specified flag.
Definition: Token.h:234
static char getCharAndSizeNoWarn(const char *Ptr, unsigned &Size, const LangOptions &LangOpts)
getCharAndSizeNoWarn - Like the getCharAndSize method, but does not ever emit a warning.
Definition: Lexer.h:496
StringRef getBufferData(FileID FID, bool *Invalid=nullptr) const
Return a StringRef to the source buffer data for the specified FileID.
bool hadModuleLoaderFatalFailure() const
Definition: Preprocessor.h:780
static bool isAtStartOfMacroExpansion(SourceLocation loc, const SourceManager &SM, const LangOptions &LangOpts, SourceLocation *MacroBegin=nullptr)
Returns true if the given MacroID location points at the first token of the macro expansion...
Definition: Lexer.cpp:777
LLVM_READONLY bool isHorizontalWhitespace(unsigned char c)
Returns true if this character is horizontal ASCII whitespace: &#39; &#39;, &#39;\t&#39;, &#39;\f&#39;, &#39;\v&#39;.
Definition: CharInfo.h:71
bool isStringLiteral(TokenKind K)
Return true if this is a C or C++ string-literal (or C++11 user-defined-string-literal) token...
Definition: TokenKinds.h:79
ConflictMarkerKind
ConflictMarkerKind - Kinds of conflict marker which the lexer might be recovering from...
Definition: Lexer.h:31
static LLVM_ATTRIBUTE_NOINLINE SourceLocation GetMappedTokenLoc(Preprocessor &PP, SourceLocation FileLoc, unsigned CharNo, unsigned TokLen)
GetMappedTokenLoc - If lexing out of a &#39;mapped buffer&#39;, where we pretend the lexer buffer was all exp...
Definition: Lexer.cpp:1079
Like System, but searched after the system directories.
SourceLocation getCodeCompletionFileLoc() const
Returns the start location of the file of code-completion point.
static Lexer * Create_PragmaLexer(SourceLocation SpellingLoc, SourceLocation ExpansionLocStart, SourceLocation ExpansionLocEnd, unsigned TokLen, Preprocessor &PP)
Create_PragmaLexer: Lexer constructor - Create a new lexer object for _Pragma expansion.
Definition: Lexer.cpp:177
bool isAnnotation() const
Return true if this is any of tok::annot_* kind tokens.
Definition: Token.h:118
tok::TokenKind getKind() const
Definition: Token.h:90
bool isLiteral() const
Return true if this is a "literal", like a numeric constant, string, etc.
Definition: Token.h:113
One of these records is kept for each identifier that is lexed.
static StringRef getIndentationForLine(SourceLocation Loc, const SourceManager &SM)
Returns the leading whitespace for line that corresponds to the given location Loc.
Definition: Lexer.cpp:1048
SourceLocation getBegin() const
bool ParsingPreprocessorDirective
True when parsing #XXX; turns &#39;\n&#39; into a tok::eod token.
void setRawIdentifierData(const char *Ptr)
Definition: Token.h:207
static SourceLocation getFromRawEncoding(unsigned Encoding)
Turn a raw encoding of a SourceLocation object into a real SourceLocation.
SmallVector< PPConditionalInfo, 4 > ConditionalStack
Information about the set of #if/#ifdef/#ifndef blocks we are currently in.
Token - This structure provides full information about a lexed token.
Definition: Token.h:35
void setKind(tok::TokenKind K)
Definition: Token.h:91
Keeps track of the various options that can be enabled, which controls the dialect of C or C++ that i...
Definition: LangOptions.h:48
bool isPragmaLexer() const
isPragmaLexer - Returns true if this Lexer is being used to lex a pragma.
Definition: Lexer.h:160
void resetExtendedTokenMode()
Sets the extended token mode back to its initial value, according to the language options and preproc...
Definition: Lexer.cpp:133
A Perforce-style conflict marker, initiated by 4 ">"s, separated by 4 "="s, and terminated by 4 "<"s...
Definition: Lexer.h:39
bool isObjCAtKeyword(tok::ObjCKeywordKind objcKey) const
Return true if we have an ObjC keyword identifier.
Definition: Lexer.cpp:46
SourceLocation getSourceLocation() override
getSourceLocation - Return a source location for the next character in the current file...
Definition: Lexer.h:238
bool isInvalid() const
bool isAtEndOfImmediateMacroExpansion(SourceLocation Loc, SourceLocation *MacroEnd=nullptr) const
Returns true if the given MacroID location points at the character end of the immediate macro expansi...
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_set1_epi8(char __b)
Initializes all values in a 128-bit vector of [16 x i8] with the specified 8-bit value.
Definition: emmintrin.h:3839
static SourceLocation getBeginningOfFileToken(SourceLocation Loc, const SourceManager &SM, const LangOptions &LangOpts)
Definition: Lexer.cpp:479
static bool isNewLineEscaped(const char *BufferStart, const char *Str)
Checks whether new line pointed by Str is preceded by escape sequence.
Definition: Lexer.cpp:1028
LLVM_READONLY bool isWhitespace(unsigned char c)
Return true if this character is horizontal or vertical ASCII whitespace: &#39; &#39;, &#39;\t&#39;, &#39;\f&#39;, &#39;\v&#39;, &#39;\n&#39;, &#39;\r&#39;.
Definition: CharInfo.h:88
StringRef getSpelling(SourceLocation loc, SmallVectorImpl< char > &buffer, bool *invalid=nullptr) const
Return the &#39;spelling&#39; of the token at the given location; does not go up to the spelling location or ...
tok::TokenKind getTokenID() const
If this is a source-language token (e.g.
uint32_t Offset
Definition: CacheTokens.cpp:43
const FormatToken & Tok
SourceLocation getExpansionLoc(SourceLocation Loc) const
Given a SourceLocation object Loc, return the expansion location referenced by the ID...
static bool getRawToken(SourceLocation Loc, Token &Result, const SourceManager &SM, const LangOptions &LangOpts, bool IgnoreWhiteSpace=false)
Relex the token at the specified location.
Definition: Lexer.cpp:428
void HandleDirective(Token &Result)
Callback invoked when the lexer sees a # token at the start of a line.
Concrete class used by the front-end to report problems and issues.
Definition: Diagnostic.h:147
SourceLocation getSpellingLoc(SourceLocation Loc) const
Given a SourceLocation object, return the spelling location referenced by the ID. ...
__INTPTR_TYPE__ intptr_t
A signed integer type with the property that any valid pointer to void can be converted to this type...
Definition: opencl-c.h:75
const FileID FID
The SourceManager FileID corresponding to the file being lexed.
static SourceLocation AdvanceToTokenCharacter(SourceLocation TokStart, unsigned Character, const SourceManager &SM, const LangOptions &LangOpts)
AdvanceToTokenCharacter - If the current SourceLocation specifies a location at the start of a token...
Definition: Lexer.cpp:694
LLVM_READONLY bool isRawStringDelimBody(unsigned char c)
Return true if this is the body character of a C++ raw string delimiter.
Definition: CharInfo.h:155
static bool isEndOfBlockCommentWithEscapedNewLine(const char *CurPtr, Lexer *L)
isBlockCommentEndOfEscapedNewLine - Return true if the specified newline character (either \n or \r) ...
Definition: Lexer.cpp:2249
static bool isValidUDSuffix(const LangOptions &LangOpts, StringRef Suffix)
Determine whether a suffix is a valid ud-suffix.
static CharSourceRange makeCharRange(Lexer &L, const char *Begin, const char *End)
Definition: Lexer.cpp:1445
A little helper class used to produce diagnostics.
Definition: Diagnostic.h:953
bool ParsingFilename
True after #include; turns <xx> into a tok::angle_string_literal token.
const LangOptions & getLangOpts() const
getLangOpts - Return the language features currently enabled.
Definition: Lexer.h:145
static const llvm::sys::UnicodeCharRange C11DisallowedInitialIDCharRanges[]
bool isInFileID(SourceLocation Loc, FileID FID, unsigned *RelativeOffset=nullptr) const
Given a specific FileID, returns true if Loc is inside that FileID chunk and sets relative offset (of...
LLVM_READONLY bool isIdentifierHead(unsigned char c, bool AllowDollar=false)
Returns true if this is a valid first character of a C identifier, which is [a-zA-Z_].
Definition: CharInfo.h:49
static StringRef getSourceText(CharSourceRange Range, const SourceManager &SM, const LangOptions &LangOpts, bool *Invalid=nullptr)
Returns a string for the source that the range encompasses.
Definition: Lexer.cpp:916
IdentifierInfo * LookUpIdentifierInfo(Token &Identifier) const
Given a tok::raw_identifier token, look up the identifier information for the token and install it in...
static bool isAtEndOfMacroExpansion(SourceLocation loc, const SourceManager &SM, const LangOptions &LangOpts, SourceLocation *MacroEnd=nullptr)
Returns true if the given MacroID location points at the last token of the macro expansion.
Definition: Lexer.cpp:799
bool isCodeCompletionEnabled() const
Determine if we are performing code completion.
SourceLocation getImmediateSpellingLoc(SourceLocation Loc) const
Given a SourceLocation object, return the spelling location referenced by the ID. ...
bool LexingRawMode
True if in raw mode.
static SourceLocation getLocForEndOfToken(SourceLocation Loc, unsigned Offset, const SourceManager &SM, const LangOptions &LangOpts)
Computes the source location just past the end of the token at this source location.
Definition: Lexer.cpp:755
SourceLocation End
Represents a character-granular source range.
bool isKeepWhitespaceMode() const
isKeepWhitespaceMode - Return true if the lexer should return tokens for every character in the file...
Definition: Lexer.h:183
static PreambleBounds ComputePreamble(StringRef Buffer, const LangOptions &LangOpts, unsigned MaxLines=0)
Compute the preamble of the given file.
Definition: Lexer.cpp:555
static unsigned MeasureTokenLength(SourceLocation Loc, const SourceManager &SM, const LangOptions &LangOpts)
MeasureTokenLength - Relex the token at the specified location and return its length in bytes in the ...
Definition: Lexer.cpp:417
const FileEntry * getFileEntryForID(FileID FID) const
Returns the FileEntry record for the provided FileID.
const AnnotatedLine * Line
static SourceLocation findLocationAfterToken(SourceLocation loc, tok::TokenKind TKind, const SourceManager &SM, const LangOptions &LangOpts, bool SkipTrailingWhitespaceAndNewLine)
Checks that the given token is the first token that occurs after the given location (this excludes co...
Definition: Lexer.cpp:1248
bool hasLeadingEmptyMacro() const
Return true if this token has an empty macro before it.
Definition: Token.h:287
SourceLocation getSourceLocation(const char *Loc, unsigned TokLen=1) const
getSourceLocation - Return a source location identifier for the specified offset in the current file...
Definition: Lexer.cpp:1104
SourceLocation getLocation() const
Return a source location identifier for the specified offset in the current file. ...
Definition: Token.h:124
Defines the clang::Preprocessor interface.
tok::ObjCKeywordKind getObjCKeywordID() const
Return the Objective-C keyword ID for the this identifier.
PreambleDirectiveKind
Definition: Lexer.cpp:548
SourceLocation Begin
MultipleIncludeOpt MIOpt
A state machine that detects the #ifndef-wrapping a file idiom for the multiple-include optimization...
void setEnd(SourceLocation e)
bool getCommentRetentionState() const
Definition: Preprocessor.h:795
Defines the clang::IdentifierInfo, clang::IdentifierTable, and clang::Selector interfaces.
static const char * findBeginningOfLine(StringRef Buffer, unsigned Offset)
Returns the pointer that points to the beginning of line that contains the given offset, or null if the offset if invalid.
Definition: Lexer.cpp:462
bool HandleEndOfFile(Token &Result, bool isEndOfMacro=false)
Callback invoked when the lexer hits the end of the current file.
SourceLocation createExpansionLoc(SourceLocation Loc, SourceLocation ExpansionLocStart, SourceLocation ExpansionLocEnd, unsigned TokLength, int LoadedID=0, unsigned LoadedOffset=0)
Return a new SourceLocation that encodes the fact that a token from SpellingLoc should actually be re...
The result type of a method or function.
float __ovld __cnfn length(float p)
Return the length of vector p, i.e., sqrt(p.x2 + p.y 2 + ...)
const SourceManager & SM
Definition: Format.cpp:1327
ObjCKeywordKind
Provides a namespace for Objective-C keywords which start with an &#39;@&#39;.
Definition: TokenKinds.h:41
const ExpansionInfo & getExpansion() const
bool isRecordingPreamble() const
static CharSourceRange getCharRange(SourceRange R)
SourceManager & getSourceManager() const
Definition: Preprocessor.h:755
bool isAtStartOfImmediateMacroExpansion(SourceLocation Loc, SourceLocation *MacroBegin=nullptr) const
Returns true if the given MacroID location points at the beginning of the immediate macro expansion...
Kind
llvm::MemoryBuffer * getBuffer(FileID FID, SourceLocation Loc, bool *Invalid=nullptr) const
Return the buffer for the specified FileID.
Encodes a location in the source.
std::pair< SourceLocation, SourceLocation > getImmediateExpansionRange(SourceLocation Loc) const
Return the start/end of the expansion information for an expansion location.
IdentifierInfo * getIdentifierInfo() const
Definition: Token.h:177
static Optional< Token > findNextToken(SourceLocation Loc, const SourceManager &SM, const LangOptions &LangOpts)
Finds the token that comes right after the given location.
Definition: Lexer.cpp:1215
static void maybeDiagnoseIDCharCompat(DiagnosticsEngine &Diags, uint32_t C, CharSourceRange Range, bool IsFirst)
Definition: Lexer.cpp:1451
void setIdentifierInfo(IdentifierInfo *II)
Definition: Token.h:186
static const llvm::sys::UnicodeCharRange C99DisallowedInitialIDCharRanges[]
bool isAtStartOfLine() const
isAtStartOfLine - Return true if this token is at the start of a line.
Definition: Token.h:266
static SourceLocation GetBeginningOfToken(SourceLocation Loc, const SourceManager &SM, const LangOptions &LangOpts)
Given a location any where in a source buffer, find the location that corresponds to the beginning of...
Definition: Lexer.cpp:527
static CharSourceRange makeRangeFromFileLocs(CharSourceRange Range, const SourceManager &SM, const LangOptions &LangOpts)
Definition: Lexer.cpp:825
TokenKind
Provides a simple uniform namespace for tokens from all C languages.
Definition: TokenKinds.h:25
tok::ObjCKeywordKind getObjCKeywordID() const
Return the ObjC keyword kind.
Definition: Lexer.cpp:55
SourceLocation getCodeCompletionLoc() const
Returns the location of the code-completion point.
SourceLocation getExpansionLocStart() const
DiagnosticBuilder Diag(const char *Loc, unsigned DiagID) const
Diag - Forwarding function for diagnostics.
Definition: Lexer.cpp:1123
static const llvm::sys::UnicodeCharRange C99AllowedIDCharRanges[]
__PTRDIFF_TYPE__ ptrdiff_t
A signed integer type that is the result of subtracting two pointers.
Definition: opencl-c.h:68
static StringRef getImmediateMacroName(SourceLocation Loc, const SourceManager &SM, const LangOptions &LangOpts)
Retrieve the name of the immediate macro expansion.
Definition: Lexer.cpp:952
static StringRef getImmediateMacroNameForDiagnostics(SourceLocation Loc, const SourceManager &SM, const LangOptions &LangOpts)
Retrieve the name of the immediate macro expansion.
Definition: Lexer.cpp:999
bool inKeepCommentMode() const
inKeepCommentMode - Return true if the lexer should return comments as tokens.
Definition: Lexer.h:197
bool isTokenRange() const
Return true if the end of this range specifies the start of the last token.
static const llvm::sys::UnicodeCharRange UnicodeWhitespaceCharRanges[]
An opaque identifier used by SourceManager which refers to a source file (MemoryBuffer) along with it...
Dataflow Directional Tag Classes.
bool isValid() const
Return true if this is a valid SourceLocation object.
LLVM_READONLY bool isVerticalWhitespace(unsigned char c)
Returns true if this character is vertical ASCII whitespace: &#39;\n&#39;, &#39;\r&#39;.
Definition: CharInfo.h:79
static CharSourceRange makeFileCharRange(CharSourceRange Range, const SourceManager &SM, const LangOptions &LangOpts)
Accepts a range and returns a character range with file locations.
Definition: Lexer.cpp:852
static size_t getSpellingSlow(const Token &Tok, const char *BufPtr, const LangOptions &LangOpts, char *Spelling)
Slow case of getSpelling.
Definition: Lexer.cpp:243
static FixItHint CreateRemoval(CharSourceRange RemoveRange)
Create a code modification hint that removes the given source range.
Definition: Diagnostic.h:116
bool isHandleIdentifierCase() const
Return true if the Preprocessor::HandleIdentifier must be called on a token of this identifier...
bool isLexingRawMode() const
Return true if this lexer is in raw mode or not.
LLVM_READONLY bool isIdentifierBody(unsigned char c, bool AllowDollar=false)
Returns true if this is a body character of a C identifier, which is [a-zA-Z0-9_].
Definition: CharInfo.h:59
tok::PPKeywordKind getPPKeywordID() const
Return the preprocessor keyword ID for this identifier.
void CodeCompleteNaturalLanguage()
Hook used by the lexer to invoke the "natural language" code completion point.
unsigned getLength() const
Definition: Token.h:127
static const char * findPlaceholderEnd(const char *CurPtr, const char *BufferEnd)
Definition: Lexer.cpp:2758
bool isMacroID() const
void setLiteralData(const char *Ptr)
Definition: Token.h:219
const char * getLiteralData() const
getLiteralData - For a literal token (numeric constant, string, etc), this returns a pointer to the s...
Definition: Token.h:215
FileID getFileID(SourceLocation SpellingLoc) const
Return the FileID for a SourceLocation.
static const llvm::sys::UnicodeCharRange CXX03AllowedIDCharRanges[]
bool isMacroArgExpansion(SourceLocation Loc, SourceLocation *StartLoc=nullptr) const
Tests whether the given source location represents a macro argument&#39;s expansion into the function-lik...
bool HandleIdentifier(Token &Identifier)
Callback invoked when the lexer reads an identifier and has filled in the tokens IdentifierInfo membe...
void CreateString(StringRef Str, Token &Tok, SourceLocation ExpansionLocStart=SourceLocation(), SourceLocation ExpansionLocEnd=SourceLocation())
Plop the specified string into a scratch buffer and set the specified token&#39;s location and length to ...
static bool isAllowedInitiallyIDChar(uint32_t C, const LangOptions &LangOpts)
Definition: Lexer.cpp:1428
SourceLocation getEnd() const
static FixItHint CreateInsertion(SourceLocation InsertionLoc, StringRef Code, bool BeforePreviousInsertions=false)
Create a code modification hint that inserts the given code string at a specific location.
Definition: Diagnostic.h:90
PreprocessorOptions & getPreprocessorOpts() const
Retrieve the preprocessor options used to initialize this preprocessor.
Definition: Preprocessor.h:746
const SrcMgr::SLocEntry & getSLocEntry(FileID FID, bool *Invalid=nullptr) const
static char GetTrigraphCharForLetter(char Letter)
GetTrigraphCharForLetter - Given a character that occurs after a ?? pair, return the decoded trigraph...
Definition: Lexer.cpp:1133
static bool isIdentifierBodyChar(char c, const LangOptions &LangOpts)
Returns true if the given character could appear in an identifier.
Definition: Lexer.cpp:1024
bool HandleComment(Token &Token, SourceRange Comment)
void ReadToEndOfLine(SmallVectorImpl< char > *Result=nullptr)
ReadToEndOfLine - Read the rest of the current preprocessor line as an uninterpreted string...
Definition: Lexer.cpp:2491
DiagnosticsEngine & getDiagnostics() const
Definition: Preprocessor.h:748
StringRef getRawIdentifier() const
getRawIdentifier - For a raw identifier token (i.e., an identifier lexed in raw mode), returns a reference to the text substring in the buffer if known.
Definition: Token.h:203
Not within a conflict marker.
Definition: Lexer.h:33
static char DecodeTrigraphChar(const char *CP, Lexer *L)
DecodeTrigraphChar - If the specified character is a legal trigraph when prefixed with ...
Definition: Lexer.cpp:1152
static const char * FindConflictEnd(const char *CurPtr, const char *BufferEnd, ConflictMarkerKind CMK)
Find the end of a version control conflict marker.
Definition: Lexer.cpp:2654
static FixItHint CreateReplacement(CharSourceRange RemoveRange, StringRef Code)
Create a code modification hint that replaces the given source range with the given code string...
Definition: Diagnostic.h:127
void SetCommentRetentionState(bool Mode)
SetCommentRetentionMode - Change the comment retention mode of the lexer to the specified mode...
Definition: Lexer.h:204
bool needsCleaning() const
Return true if this token has trigraphs or escaped newlines in it.
Definition: Token.h:283
static __inline__ int __DEFAULT_FN_ATTRS _mm_movemask_epi8(__m128i __a)
Copies the values of the most significant bits from each 8-bit element in a 128-bit integer vector of...
Definition: emmintrin.h:4341
bool isIgnored(unsigned DiagID, SourceLocation Loc) const
Determine whether the diagnostic is known to be ignored.
Definition: Diagnostic.h:732
A normal or diff3 conflict marker, initiated by at least 7 "<"s, separated by at least 7 "="s or "|"s...
Definition: Lexer.h:36
A trivial tuple used to represent a source range.
void clearFlag(TokenFlags Flag)
Unset the specified flag.
Definition: Token.h:244
bool hasUCN() const
Returns true if this token contains a universal character name.
Definition: Token.h:294
bool isPreprocessedOutput() const
Returns true if the preprocessor is responsible for generating output, false if it is producing token...
Definition: Preprocessor.h:816
void SetKeepWhitespaceMode(bool Val)
SetKeepWhitespaceMode - This method lets clients enable or disable whitespace retention mode...
Definition: Lexer.h:189
LLVM_READONLY bool isPreprocessingNumberBody(unsigned char c)
Return true if this is the body character of a C preprocessing number, which is [a-zA-Z0-9_.
Definition: CharInfo.h:148
bool hasLeadingSpace() const
Return true if this token has whitespace before it.
Definition: Token.h:270
DiagnosticBuilder Diag(SourceLocation Loc, unsigned DiagID) const
Forwarding function for diagnostics.
void setRecordedPreambleConditionalStack(ArrayRef< PPConditionalInfo > s)
This class handles loading and caching of source files into memory.
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_cmpeq_epi8(__m128i __a, __m128i __b)
Compares each of the corresponding 8-bit values of the 128-bit integer vectors for equality...
Definition: emmintrin.h:3168
void startToken()
Reset all flags to cleared.
Definition: Token.h:169
std::pair< FileID, unsigned > getDecomposedLoc(SourceLocation Loc) const
Decompose the specified location into a raw FileID + Offset pair.
static std::string Stringify(StringRef Str, bool Charify=false)
Stringify - Convert the specified string into a C string by escaping &#39;\&#39; and " characters. This does not add surrounding ""&#39;s to the string.
Definition: Lexer.cpp:214
Engages in a tight little dance with the lexer to efficiently preprocess tokens.
Definition: Preprocessor.h:98
bool LexEditorPlaceholders
When enabled, the preprocessor will construct editor placeholder tokens.