21#include "llvm/Support/Regex.h"
29 llvm::SpecificBumpPtrAllocator<FormatToken> &Allocator,
34 Style(Style), IdentTable(IdentTable), Keywords(IdentTable),
35 Encoding(Encoding), Allocator(Allocator), FirstInLineIndex(0),
36 FormattingDisabled(
false), FormatOffRegex(Style.OneLineFormatOffRegex),
37 MacroBlockBeginRegex(Style.MacroBlockBegin),
38 MacroBlockEndRegex(Style.MacroBlockEnd) {
39 Lex.reset(
new Lexer(ID, SourceMgr.getBufferOrFake(ID), SourceMgr, LangOpts));
40 Lex->SetKeepWhitespaceMode(
true);
42 for (
const std::string &ForEachMacro : Style.ForEachMacros) {
43 auto Identifier = &IdentTable.get(ForEachMacro);
44 Macros.insert({Identifier, TT_ForEachMacro});
46 for (
const std::string &IfMacro : Style.IfMacros) {
47 auto Identifier = &IdentTable.get(IfMacro);
48 Macros.insert({Identifier, TT_IfMacro});
50 for (
const std::string &AttributeMacro : Style.AttributeMacros) {
51 auto Identifier = &IdentTable.get(AttributeMacro);
52 Macros.insert({Identifier, TT_AttributeMacro});
54 for (
const std::string &StatementMacro : Style.StatementMacros) {
55 auto Identifier = &IdentTable.get(StatementMacro);
56 Macros.insert({Identifier, TT_StatementMacro});
58 for (
const std::string &TypenameMacro : Style.TypenameMacros) {
59 auto Identifier = &IdentTable.get(TypenameMacro);
60 Macros.insert({Identifier, TT_TypenameMacro});
62 for (
const std::string &NamespaceMacro : Style.NamespaceMacros) {
63 auto Identifier = &IdentTable.get(NamespaceMacro);
64 Macros.insert({Identifier, TT_NamespaceMacro});
66 for (
const std::string &WhitespaceSensitiveMacro :
67 Style.WhitespaceSensitiveMacros) {
68 auto Identifier = &IdentTable.get(WhitespaceSensitiveMacro);
69 Macros.insert({Identifier, TT_UntouchableMacroFunc});
71 for (
const std::string &StatementAttributeLikeMacro :
72 Style.StatementAttributeLikeMacros) {
73 auto Identifier = &IdentTable.get(StatementAttributeLikeMacro);
74 Macros.insert({Identifier, TT_StatementAttributeLikeMacro});
77 for (
const auto &
Macro : Style.MacrosSkippedByRemoveParentheses)
81 for (
const auto &TypeName : Style.TypeNames)
82 TypeNames.insert(&IdentTable.get(TypeName));
83 for (
const auto &VariableTemplate : Style.VariableTemplates)
88 assert(Tokens.empty());
89 assert(FirstInLineIndex == 0);
90 enum { FO_None, FO_CurrentLine, FO_NextLine } FormatOff = FO_None;
92 Tokens.push_back(getNextToken());
93 auto &
Tok = *Tokens.back();
100 Tok.Finalized =
true;
101 FormatOff = FO_CurrentLine;
106 Tok.Finalized =
true;
112 if (!FormattingDisabled && FormatOffRegex.match(
Tok.TokenText)) {
113 if (
Tok.is(tok::comment) &&
115 Tok.Finalized =
true;
116 FormatOff = FO_NextLine;
118 for (
auto *
Token : reverse(Tokens)) {
119 Token->Finalized =
true;
120 if (
Token->NewlinesBefore > 0)
123 FormatOff = FO_CurrentLine;
127 if (Style.isJavaScript()) {
128 tryParseJSRegexLiteral();
129 handleTemplateStrings();
130 }
else if (Style.isTextProto()) {
131 tryParsePythonComment();
133 tryMergePreviousTokens();
134 if (Style.isCSharp()) {
137 handleCSharpVerbatimAndInterpolatedStrings();
138 }
else if (Style.isTableGen()) {
139 handleTableGenMultilineString();
140 handleTableGenNumericLikeIdentifier();
142 if (Tokens.back()->NewlinesBefore > 0 || Tokens.back()->IsMultiline)
143 FirstInLineIndex = Tokens.size() - 1;
144 }
while (Tokens.back()->isNot(tok::eof));
145 if (Style.InsertNewlineAtEOF) {
146 auto &TokEOF = *Tokens.back();
147 if (TokEOF.NewlinesBefore == 0) {
148 TokEOF.NewlinesBefore = 1;
149 TokEOF.OriginalColumn = 0;
155void FormatTokenLexer::tryMergePreviousTokens() {
156 if (tryMerge_TMacro())
158 if (tryMergeConflictMarkers())
160 if (tryMergeLessLess())
162 if (tryMergeGreaterGreater())
164 if (tryMergeForEach())
166 if (Style.isCpp() && tryTransformTryUsageForC())
169 if ((Style.Language == FormatStyle::LK_Cpp ||
170 Style.Language == FormatStyle::LK_ObjC) &&
171 tryMergeUserDefinedLiteral()) {
175 if (Style.isJavaScript() || Style.isCSharp()) {
176 static const tok::TokenKind NullishCoalescingOperator[] = {tok::question,
178 static const tok::TokenKind NullPropagatingOperator[] = {tok::question,
180 static const tok::TokenKind FatArrow[] = {tok::equal, tok::greater};
182 if (tryMergeTokens(FatArrow, TT_FatArrow))
184 if (tryMergeTokens(NullishCoalescingOperator, TT_NullCoalescingOperator)) {
186 Tokens.back()->Tok.setKind(tok::pipepipe);
189 if (tryMergeTokens(NullPropagatingOperator, TT_NullPropagatingOperator)) {
191 Tokens.back()->Tok.setKind(tok::period);
194 if (tryMergeNullishCoalescingEqual())
197 if (Style.isCSharp()) {
199 tok::question, tok::l_square};
201 if (tryMergeCSharpKeywordVariables())
203 if (tryMergeCSharpStringLiteral())
205 if (tryTransformCSharpForEach())
207 if (tryMergeTokens(CSharpNullConditionalLSquare,
208 TT_CSharpNullConditionalLSquare)) {
210 Tokens.back()->Tok.setKind(tok::l_square);
216 if (tryMergeNSStringLiteral())
219 if (Style.isJavaScript()) {
220 static const tok::TokenKind JSIdentity[] = {tok::equalequal, tok::equal};
223 static const tok::TokenKind JSShiftEqual[] = {tok::greater, tok::greater,
225 static const tok::TokenKind JSExponentiation[] = {tok::star, tok::star};
228 static const tok::TokenKind JSPipePipeEqual[] = {tok::pipepipe, tok::equal};
229 static const tok::TokenKind JSAndAndEqual[] = {tok::ampamp, tok::equal};
232 if (tryMergeTokens(JSIdentity, TT_BinaryOperator))
234 if (tryMergeTokens(JSNotIdentity, TT_BinaryOperator))
236 if (tryMergeTokens(JSShiftEqual, TT_BinaryOperator))
238 if (tryMergeTokens(JSExponentiation, TT_JsExponentiation))
240 if (tryMergeTokens(JSExponentiationEqual, TT_JsExponentiationEqual)) {
241 Tokens.back()->Tok.setKind(tok::starequal);
244 if (tryMergeTokens(JSAndAndEqual, TT_JsAndAndEqual) ||
245 tryMergeTokens(JSPipePipeEqual, TT_JsPipePipeEqual)) {
247 Tokens.back()->Tok.setKind(tok::equal);
250 if (tryMergeJSPrivateIdentifier())
252 }
else if (Style.isJava()) {
254 tok::greater, tok::greater, tok::greaterequal};
255 if (tryMergeTokens(JavaRightLogicalShiftAssign, TT_BinaryOperator))
257 }
else if (Style.isVerilog()) {
259 if (Tokens.size() >= 3 && Tokens.end()[-3]->is(TT_VerilogNumberBase) &&
260 Tokens.end()[-2]->is(tok::numeric_constant) &&
261 Tokens.back()->isOneOf(tok::numeric_constant, tok::identifier,
263 tryMergeTokens(2, TT_Unknown)) {
267 if (tryMergeTokensAny({{tok::minus, tok::colon}, {tok::plus, tok::colon}},
275 if (Tokens.back()->TokenText.size() == 1 &&
276 tryMergeTokensAny({{tok::caret, tok::tilde}, {tok::tilde, tok::caret}},
277 TT_BinaryOperator)) {
278 Tokens.back()->Tok.setKind(tok::caret);
282 if (tryMergeTokens({tok::less, tok::less}, TT_BinaryOperator)) {
283 Tokens.back()->Tok.setKind(tok::lessless);
286 if (tryMergeTokens({tok::greater, tok::greater}, TT_BinaryOperator)) {
287 Tokens.back()->Tok.setKind(tok::greatergreater);
290 if (tryMergeTokensAny({{tok::lessless, tok::equal},
291 {tok::lessless, tok::lessequal},
292 {tok::greatergreater, tok::equal},
293 {tok::greatergreater, tok::greaterequal},
294 {tok::colon, tok::equal},
295 {tok::colon, tok::slash}},
296 TT_BinaryOperator)) {
301 if (tryMergeTokensAny({{tok::star, tok::star},
302 {tok::lessless, tok::less},
303 {tok::greatergreater, tok::greater},
304 {tok::exclaimequal, tok::equal},
305 {tok::exclaimequal, tok::question},
306 {tok::equalequal, tok::equal},
307 {tok::equalequal, tok::question}},
308 TT_BinaryOperator)) {
313 if (tryMergeTokensAny({{tok::plusequal, tok::greater},
314 {tok::plus, tok::star, tok::greater},
315 {tok::minusequal, tok::greater},
316 {tok::minus, tok::star, tok::greater},
317 {tok::less, tok::arrow},
318 {tok::equal, tok::greater},
319 {tok::star, tok::greater},
320 {tok::pipeequal, tok::greater},
321 {tok::pipe, tok::arrow}},
322 TT_BinaryOperator) ||
323 Tokens.back()->
is(tok::arrow)) {
327 if (Tokens.size() >= 3 &&
328 Tokens[Tokens.size() - 3]->is(Keywords.kw_verilogHash) &&
329 Tokens[Tokens.size() - 2]->isOneOf(tok::minus, tok::equal) &&
330 Tokens[Tokens.size() - 1]->is(Keywords.kw_verilogHash) &&
331 tryMergeTokens(3, TT_BinaryOperator)) {
332 Tokens.back()->setFinalizedType(TT_BinaryOperator);
336 }
else if (Style.isTableGen()) {
338 if (tryMergeTokens({tok::l_square, tok::l_brace},
339 TT_TableGenMultiLineString)) {
341 Tokens.back()->setFinalizedType(TT_TableGenMultiLineString);
342 Tokens.back()->Tok.setKind(tok::string_literal);
347 if (tryMergeTokens({tok::exclaim, tok::identifier},
348 TT_TableGenBangOperator)) {
349 Tokens.back()->Tok.setKind(tok::identifier);
350 Tokens.back()->Tok.setIdentifierInfo(
nullptr);
351 if (Tokens.back()->TokenText ==
"!cond")
352 Tokens.back()->setFinalizedType(TT_TableGenCondOperator);
354 Tokens.back()->setFinalizedType(TT_TableGenBangOperator);
357 if (tryMergeTokens({tok::exclaim, tok::kw_if}, TT_TableGenBangOperator)) {
360 Tokens.back()->Tok.setKind(tok::identifier);
361 Tokens.back()->Tok.setIdentifierInfo(
nullptr);
362 Tokens.back()->setFinalizedType(TT_TableGenBangOperator);
366 if (tryMergeTokens({tok::plus, tok::numeric_constant}, TT_Unknown)) {
367 Tokens.back()->Tok.setKind(tok::numeric_constant);
370 if (tryMergeTokens({tok::minus, tok::numeric_constant}, TT_Unknown)) {
371 Tokens.back()->Tok.setKind(tok::numeric_constant);
377bool FormatTokenLexer::tryMergeNSStringLiteral() {
378 if (Tokens.size() < 2)
380 auto &At = *(Tokens.end() - 2);
381 auto &String = *(Tokens.end() - 1);
382 if (At->isNot(tok::at) || String->isNot(tok::string_literal))
384 At->Tok.setKind(tok::string_literal);
385 At->TokenText = StringRef(At->TokenText.begin(),
386 String->TokenText.end() - At->TokenText.begin());
387 At->ColumnWidth += String->ColumnWidth;
388 At->setType(TT_ObjCStringLiteral);
389 Tokens.erase(Tokens.end() - 1);
393bool FormatTokenLexer::tryMergeJSPrivateIdentifier() {
396 if (Tokens.size() < 2)
398 auto &Hash = *(Tokens.end() - 2);
399 auto &Identifier = *(Tokens.end() - 1);
400 if (Hash->isNot(tok::hash) || Identifier->isNot(tok::identifier))
402 Hash->Tok.setKind(tok::identifier);
404 StringRef(Hash->TokenText.begin(),
405 Identifier->TokenText.end() - Hash->TokenText.begin());
406 Hash->ColumnWidth += Identifier->ColumnWidth;
407 Hash->setType(TT_JsPrivateIdentifier);
408 Tokens.erase(Tokens.end() - 1);
417bool FormatTokenLexer::tryMergeCSharpStringLiteral() {
418 if (Tokens.size() < 2)
422 const auto String = *(Tokens.end() - 1);
423 if (String->isNot(tok::string_literal))
426 auto Prefix = *(Tokens.end() - 2);
427 if (Prefix->isNot(tok::at) && Prefix->TokenText !=
"$")
430 if (Tokens.size() > 2) {
431 const auto Tok = *(Tokens.end() - 3);
432 if ((
Tok->TokenText ==
"$" && Prefix->is(tok::at)) ||
433 (
Tok->is(tok::at) && Prefix->TokenText ==
"$")) {
435 Tok->ColumnWidth += Prefix->ColumnWidth;
436 Tokens.erase(Tokens.end() - 2);
442 Prefix->Tok.setKind(tok::string_literal);
444 StringRef(Prefix->TokenText.begin(),
445 String->TokenText.end() - Prefix->TokenText.begin());
446 Prefix->ColumnWidth += String->ColumnWidth;
447 Prefix->setType(TT_CSharpStringLiteral);
448 Tokens.erase(Tokens.end() - 1);
454const llvm::StringSet<> FormatTokenLexer::CSharpAttributeTargets = {
455 "assembly",
"module",
"field",
"event",
"method",
456 "param",
"property",
"return",
"type",
459bool FormatTokenLexer::tryMergeNullishCoalescingEqual() {
460 if (Tokens.size() < 2)
462 auto &NullishCoalescing = *(Tokens.end() - 2);
463 auto &
Equal = *(Tokens.end() - 1);
464 if (NullishCoalescing->isNot(TT_NullCoalescingOperator) ||
465 Equal->isNot(tok::equal)) {
468 NullishCoalescing->Tok.setKind(tok::equal);
469 NullishCoalescing->TokenText =
470 StringRef(NullishCoalescing->TokenText.begin(),
471 Equal->TokenText.end() - NullishCoalescing->TokenText.begin());
472 NullishCoalescing->ColumnWidth +=
Equal->ColumnWidth;
473 NullishCoalescing->setType(TT_NullCoalescingEqual);
474 Tokens.erase(Tokens.end() - 1);
478bool FormatTokenLexer::tryMergeCSharpKeywordVariables() {
479 if (Tokens.size() < 2)
481 const auto At = *(Tokens.end() - 2);
482 if (At->isNot(tok::at))
484 const auto Keyword = *(Tokens.end() - 1);
487 if (!Keywords.isCSharpKeyword(*
Keyword))
490 At->Tok.setKind(tok::identifier);
491 At->TokenText = StringRef(At->TokenText.begin(),
492 Keyword->TokenText.end() - At->TokenText.begin());
493 At->ColumnWidth +=
Keyword->ColumnWidth;
494 At->setType(
Keyword->getType());
495 Tokens.erase(Tokens.end() - 1);
500bool FormatTokenLexer::tryTransformCSharpForEach() {
503 auto &Identifier = *(Tokens.end() - 1);
504 if (Identifier->isNot(tok::identifier))
506 if (Identifier->TokenText !=
"foreach")
509 Identifier->setType(TT_ForEachMacro);
510 Identifier->Tok.setKind(tok::kw_for);
514bool FormatTokenLexer::tryMergeForEach() {
515 if (Tokens.size() < 2)
517 auto &For = *(Tokens.end() - 2);
518 auto &Each = *(Tokens.end() - 1);
519 if (For->isNot(tok::kw_for))
521 if (Each->isNot(tok::identifier))
523 if (Each->TokenText !=
"each")
526 For->setType(TT_ForEachMacro);
527 For->Tok.setKind(tok::kw_for);
529 For->TokenText = StringRef(For->TokenText.begin(),
530 Each->TokenText.end() - For->TokenText.begin());
531 For->ColumnWidth += Each->ColumnWidth;
532 Tokens.erase(Tokens.end() - 1);
536bool FormatTokenLexer::tryTransformTryUsageForC() {
537 if (Tokens.size() < 2)
539 auto &Try = *(Tokens.end() - 2);
540 if (Try->isNot(tok::kw_try))
542 auto &
Next = *(Tokens.end() - 1);
543 if (
Next->isOneOf(tok::l_brace, tok::colon, tok::hash, tok::comment))
546 if (Tokens.size() > 2) {
547 auto &At = *(Tokens.end() - 3);
552 Try->Tok.setKind(tok::identifier);
556bool FormatTokenLexer::tryMergeLessLess() {
558 if (Tokens.size() < 3)
561 auto First = Tokens.end() - 3;
566 if (
First[1]->hasWhitespaceBefore())
569 auto X = Tokens.size() > 3 ?
First[-1] :
nullptr;
570 if (
X &&
X->is(tok::less))
574 if ((!
X ||
X->isNot(tok::kw_operator)) && Y->is(tok::less))
577 First[0]->Tok.setKind(tok::lessless);
578 First[0]->TokenText =
"<<";
579 First[0]->ColumnWidth += 1;
580 Tokens.erase(Tokens.end() - 2);
584bool FormatTokenLexer::tryMergeGreaterGreater() {
586 if (Tokens.size() < 2)
589 auto First = Tokens.end() - 2;
594 if (
First[1]->hasWhitespaceBefore())
597 auto Tok = Tokens.size() > 2 ?
First[-1] :
nullptr;
598 if (
Tok &&
Tok->isNot(tok::kw_operator))
601 First[0]->Tok.setKind(tok::greatergreater);
602 First[0]->TokenText =
">>";
603 First[0]->ColumnWidth += 1;
604 Tokens.erase(Tokens.end() - 1);
608bool FormatTokenLexer::tryMergeUserDefinedLiteral() {
609 if (Tokens.size() < 2)
612 auto *
First = Tokens.end() - 2;
613 auto &Suffix =
First[1];
614 if (Suffix->hasWhitespaceBefore() || Suffix->TokenText !=
"$")
622 if (!
Text.ends_with(
"_"))
627 Tokens.erase(&Suffix);
633 if (Tokens.size() < Kinds.size())
636 const auto *
First = Tokens.end() - Kinds.size();
637 for (
unsigned i = 0; i < Kinds.size(); ++i)
641 return tryMergeTokens(Kinds.size(), NewType);
644bool FormatTokenLexer::tryMergeTokens(
size_t Count,
TokenType NewType) {
645 if (Tokens.size() < Count)
648 const auto *
First = Tokens.end() - Count;
649 unsigned AddLength = 0;
650 for (
size_t i = 1; i < Count; ++i) {
653 if (
First[i]->hasWhitespaceBefore())
655 AddLength +=
First[i]->TokenText.size();
658 Tokens.resize(Tokens.size() - Count + 1);
660 First[0]->TokenText.size() + AddLength);
661 First[0]->ColumnWidth += AddLength;
662 First[0]->setType(NewType);
666bool FormatTokenLexer::tryMergeTokensAny(
668 return llvm::any_of(Kinds, [
this, NewType](ArrayRef<tok::TokenKind> Kinds) {
669 return tryMergeTokens(Kinds, NewType);
678 return Tok->isOneOf(tok::period, tok::l_paren, tok::comma, tok::l_brace,
679 tok::r_brace, tok::l_square, tok::semi, tok::exclaim,
680 tok::colon, tok::question, tok::tilde) ||
681 Tok->isOneOf(tok::kw_return, tok::kw_do, tok::kw_case, tok::kw_throw,
682 tok::kw_else, tok::kw_void, tok::kw_typeof,
683 Keywords.kw_instanceof, Keywords.kw_in) ||
684 Tok->isPlacementOperator() ||
Tok->isBinaryOperator();
687bool FormatTokenLexer::canPrecedeRegexLiteral(
FormatToken *Prev) {
697 if (Prev->isOneOf(tok::plusplus, tok::minusminus, tok::exclaim))
698 return Tokens.size() < 3 || precedesOperand(Tokens[Tokens.size() - 3]);
702 if (!precedesOperand(Prev))
708void FormatTokenLexer::tryParseJavaTextBlock() {
709 if (FormatTok->TokenText !=
"\"\"")
712 const auto *S = Lex->getBufferLocation();
713 const auto *End = Lex->getBuffer().end();
715 if (S == End || *S !=
'\"')
721 for (
int Count = 0; Count < 3 && S < End; ++S) {
735 resetLexer(SourceMgr.getFileOffset(Lex->getSourceLocation(S)));
742void FormatTokenLexer::tryParseJSRegexLiteral() {
744 if (RegexToken->isNoneOf(tok::slash, tok::slashequal))
748 for (
FormatToken *FT : llvm::drop_begin(llvm::reverse(Tokens))) {
751 if (FT->isNot(tok::comment)) {
757 if (!canPrecedeRegexLiteral(Prev))
761 const char *Offset = Lex->getBufferLocation();
762 const char *RegexBegin = Offset - RegexToken->TokenText.size();
763 StringRef Buffer = Lex->getBuffer();
764 bool InCharacterClass =
false;
765 bool HaveClosingSlash =
false;
766 for (; !HaveClosingSlash && Offset != Buffer.end(); ++Offset) {
776 InCharacterClass =
true;
779 InCharacterClass =
false;
782 if (!InCharacterClass)
783 HaveClosingSlash =
true;
788 RegexToken->setType(TT_RegexLiteral);
790 RegexToken->Tok.setKind(tok::string_literal);
791 RegexToken->TokenText = StringRef(RegexBegin, Offset - RegexBegin);
792 RegexToken->ColumnWidth = RegexToken->TokenText.size();
794 resetLexer(SourceMgr.getFileOffset(Lex->getSourceLocation(Offset)));
799 auto Repeated = [&Begin, End]() {
800 return Begin + 1 < End && Begin[1] == Begin[0];
815 for (
int UnmatchedOpeningBraceCount = 0; Begin < End; ++Begin) {
827 ++UnmatchedOpeningBraceCount;
835 else if (UnmatchedOpeningBraceCount > 0)
836 --UnmatchedOpeningBraceCount;
842 if (UnmatchedOpeningBraceCount > 0)
845 if (Verbatim && Repeated()) {
856void FormatTokenLexer::handleCSharpVerbatimAndInterpolatedStrings() {
859 if (CSharpStringLiteral->isNot(TT_CSharpStringLiteral))
862 auto &
TokenText = CSharpStringLiteral->TokenText;
864 bool Verbatim =
false;
865 bool Interpolated =
false;
869 }
else if (
TokenText.starts_with(R
"(@")")) {
871 }
else if (
TokenText.starts_with(R
"($")")) {
876 if (!Verbatim && !Interpolated)
879 const char *StrBegin = Lex->getBufferLocation() -
TokenText.size();
880 const char *Offset = StrBegin;
881 Offset += Verbatim && Interpolated ? 3 : 2;
883 const auto End = Lex->getBuffer().end();
891 StringRef LiteralText(StrBegin, Offset - StrBegin + 1);
895 size_t FirstBreak = LiteralText.find(
'\n');
896 StringRef FirstLineText = FirstBreak == StringRef::npos
898 : LiteralText.substr(0, FirstBreak);
900 FirstLineText, CSharpStringLiteral->OriginalColumn, Style.TabWidth,
902 size_t LastBreak = LiteralText.rfind(
'\n');
903 if (LastBreak != StringRef::npos) {
904 CSharpStringLiteral->IsMultiline =
true;
905 unsigned StartColumn = 0;
906 CSharpStringLiteral->LastLineColumnWidth =
908 StartColumn, Style.TabWidth, Encoding);
911 assert(Offset < End);
912 resetLexer(SourceMgr.getFileOffset(Lex->getSourceLocation(Offset + 1)));
915void FormatTokenLexer::handleTableGenMultilineString() {
917 if (MultiLineString->isNot(TT_TableGenMultiLineString))
920 auto OpenOffset = Lex->getCurrentBufferOffset() - 2 ;
922 auto CloseOffset = Lex->getBuffer().find(
"}]", OpenOffset);
923 if (CloseOffset == StringRef::npos)
925 auto Text = Lex->getBuffer().substr(OpenOffset, CloseOffset - OpenOffset + 2);
926 MultiLineString->TokenText =
Text;
927 resetLexer(SourceMgr.getFileOffset(
928 Lex->getSourceLocation(Lex->getBufferLocation() - 2 +
Text.size())));
929 auto FirstLineText =
Text;
930 auto FirstBreak =
Text.find(
'\n');
932 if (FirstBreak != StringRef::npos) {
933 MultiLineString->IsMultiline =
true;
934 FirstLineText =
Text.substr(0, FirstBreak + 1);
936 auto LastBreak =
Text.rfind(
'\n');
938 Text.substr(LastBreak + 1), MultiLineString->OriginalColumn,
939 Style.TabWidth, Encoding);
943 FirstLineText, MultiLineString->OriginalColumn, Style.TabWidth, Encoding);
946void FormatTokenLexer::handleTableGenNumericLikeIdentifier() {
950 if (
Tok->isNot(tok::numeric_constant))
952 StringRef
Text =
Tok->TokenText;
963 const auto NonDigitPos =
Text.find_if([](
char C) {
return !isdigit(
C); });
965 if (NonDigitPos == StringRef::npos)
967 char FirstNonDigit =
Text[NonDigitPos];
968 if (NonDigitPos <
Text.size() - 1) {
969 char TheNext =
Text[NonDigitPos + 1];
971 if (FirstNonDigit ==
'b' && (TheNext ==
'0' || TheNext ==
'1'))
974 if (FirstNonDigit ==
'x' && isxdigit(TheNext))
977 if (isalpha(FirstNonDigit) || FirstNonDigit ==
'_') {
979 Tok->Tok.setKind(tok::identifier);
980 Tok->Tok.setIdentifierInfo(
nullptr);
984void FormatTokenLexer::handleTemplateStrings() {
987 if (BacktickToken->is(tok::l_brace)) {
991 if (BacktickToken->is(tok::r_brace)) {
992 if (StateStack.size() == 1)
998 }
else if (BacktickToken->is(tok::unknown) &&
999 BacktickToken->TokenText ==
"`") {
1006 const char *Offset = Lex->getBufferLocation();
1007 const char *TmplBegin = Offset - BacktickToken->TokenText.size();
1008 for (; Offset != Lex->getBuffer().end(); ++Offset) {
1009 if (Offset[0] ==
'`') {
1014 if (Offset[0] ==
'\\') {
1016 }
else if (Offset + 1 < Lex->getBuffer().end() && Offset[0] ==
'$' &&
1025 StringRef LiteralText(TmplBegin, Offset - TmplBegin);
1026 BacktickToken->setType(TT_TemplateString);
1027 BacktickToken->Tok.setKind(tok::string_literal);
1028 BacktickToken->TokenText = LiteralText;
1031 size_t FirstBreak = LiteralText.find(
'\n');
1032 StringRef FirstLineText = FirstBreak == StringRef::npos
1034 : LiteralText.substr(0, FirstBreak);
1036 FirstLineText, BacktickToken->OriginalColumn, Style.TabWidth, Encoding);
1037 size_t LastBreak = LiteralText.rfind(
'\n');
1038 if (LastBreak != StringRef::npos) {
1039 BacktickToken->IsMultiline =
true;
1040 unsigned StartColumn = 0;
1041 BacktickToken->LastLineColumnWidth =
1043 StartColumn, Style.TabWidth, Encoding);
1046 SourceLocation loc = Lex->getSourceLocation(Offset);
1047 resetLexer(SourceMgr.getFileOffset(loc));
1050void FormatTokenLexer::tryParsePythonComment() {
1052 if (HashToken->isNoneOf(tok::hash, tok::hashhash))
1055 const char *CommentBegin =
1056 Lex->getBufferLocation() - HashToken->TokenText.size();
1057 size_t From = CommentBegin - Lex->getBuffer().begin();
1058 size_t To = Lex->getBuffer().find_first_of(
'\n', From);
1059 if (To == StringRef::npos)
1060 To = Lex->getBuffer().size();
1061 size_t Len = To - From;
1062 HashToken->setType(TT_LineComment);
1063 HashToken->Tok.setKind(tok::comment);
1064 HashToken->TokenText = Lex->getBuffer().substr(From, Len);
1065 SourceLocation Loc = To < Lex->getBuffer().size()
1066 ? Lex->getSourceLocation(CommentBegin + Len)
1067 : SourceMgr.getLocForEndOfFile(ID);
1068 resetLexer(SourceMgr.getFileOffset(Loc));
1071bool FormatTokenLexer::tryMerge_TMacro() {
1072 if (Tokens.size() < 4)
1075 if (
Last->isNot(tok::r_paren))
1079 if (String->isNot(tok::string_literal) || String->IsMultiline)
1082 if (Tokens[Tokens.size() - 3]->isNot(tok::l_paren))
1086 if (
Macro->TokenText !=
"_T")
1089 const char *Start =
Macro->TokenText.data();
1090 const char *End =
Last->TokenText.data() +
Last->TokenText.size();
1091 String->TokenText = StringRef(Start, End - Start);
1092 String->IsFirst =
Macro->IsFirst;
1093 String->LastNewlineOffset =
Macro->LastNewlineOffset;
1094 String->WhitespaceRange =
Macro->WhitespaceRange;
1095 String->OriginalColumn =
Macro->OriginalColumn;
1097 String->TokenText, String->OriginalColumn, Style.TabWidth, Encoding);
1098 String->NewlinesBefore =
Macro->NewlinesBefore;
1099 String->HasUnescapedNewline =
Macro->HasUnescapedNewline;
1104 Tokens.back() = String;
1105 if (FirstInLineIndex >= Tokens.size())
1106 FirstInLineIndex = Tokens.size() - 1;
1110bool FormatTokenLexer::tryMergeConflictMarkers() {
1111 if (Tokens.back()->NewlinesBefore == 0 && Tokens.back()->isNot(tok::eof))
1125 unsigned FirstInLineOffset;
1126 std::tie(ID, FirstInLineOffset) = SourceMgr.getDecomposedLoc(
1127 Tokens[FirstInLineIndex]->getStartOfNonWhitespace());
1128 StringRef Buffer = SourceMgr.getBufferOrFake(ID).getBuffer();
1130 auto LineOffset = Buffer.rfind(
'\n', FirstInLineOffset);
1131 if (LineOffset == StringRef::npos)
1136 auto FirstSpace = Buffer.find_first_of(
" \n", LineOffset);
1137 StringRef LineStart;
1138 if (FirstSpace == StringRef::npos)
1139 LineStart = Buffer.substr(LineOffset);
1141 LineStart = Buffer.substr(LineOffset, FirstSpace - LineOffset);
1144 if (LineStart ==
"<<<<<<<" || LineStart ==
">>>>") {
1145 Type = TT_ConflictStart;
1146 }
else if (LineStart ==
"|||||||" || LineStart ==
"=======" ||
1147 LineStart ==
"====") {
1148 Type = TT_ConflictAlternative;
1149 }
else if (LineStart ==
">>>>>>>" || LineStart ==
"<<<<") {
1150 Type = TT_ConflictEnd;
1153 if (
Type != TT_Unknown) {
1156 Tokens.resize(FirstInLineIndex + 1);
1160 Tokens.back()->setType(
Type);
1161 Tokens.back()->Tok.setKind(tok::kw___unknown_anytype);
1163 Tokens.push_back(
Next);
1172 Token
Tok = FormatTok->
Tok;
1173 StringRef
TokenText = FormatTok->TokenText;
1176 FormatTok =
new (Allocator.Allocate())
FormatToken;
1177 FormatTok->Tok =
Tok;
1178 SourceLocation TokLocation =
1179 FormatTok->Tok.getLocation().getLocWithOffset(
Tok.getLength() - 1);
1180 FormatTok->Tok.setLocation(TokLocation);
1181 FormatTok->WhitespaceRange = SourceRange(TokLocation, TokLocation);
1183 FormatTok->ColumnWidth = 1;
1195void FormatTokenLexer::truncateToken(
size_t NewLen) {
1196 assert(NewLen <= FormatTok->
TokenText.size());
1197 resetLexer(SourceMgr.getFileOffset(Lex->getSourceLocation(
1198 Lex->getBufferLocation() - FormatTok->TokenText.size() + NewLen)));
1199 FormatTok->TokenText = FormatTok->TokenText.substr(0, NewLen);
1201 FormatTok->TokenText, FormatTok->OriginalColumn, Style.TabWidth,
1203 FormatTok->Tok.setLength(NewLen);
1213 const unsigned char *
const Begin =
Text.bytes_begin();
1214 const unsigned char *
const End =
Text.bytes_end();
1215 const unsigned char *Cur = Begin;
1219 }
else if (Cur[0] ==
'\\') {
1225 const auto *Lookahead = Cur + 1;
1232 Cur = Lookahead + 1;
1243 return getStashedToken();
1246 FormatTok =
new (Allocator.Allocate())
FormatToken;
1247 readRawToken(*FormatTok);
1248 SourceLocation WhitespaceStart =
1250 FormatTok->
IsFirst = IsFirstToken;
1251 IsFirstToken =
false;
1257 unsigned WhitespaceLength = TrailingWhitespace;
1258 while (FormatTok->
isNot(tok::eof)) {
1260 if (LeadingWhitespace == 0)
1262 if (LeadingWhitespace < FormatTok->
TokenText.size())
1263 truncateToken(LeadingWhitespace);
1265 bool InEscape =
false;
1266 for (
int i = 0, e =
Text.size(); i != e; ++i) {
1272 if (i + 1 < e &&
Text[i + 1] ==
'\n')
1287 i > 0 &&
Text[i - 1] ==
'\n' &&
1288 ((i + 1 < e &&
Text[i + 1] ==
'\n') ||
1289 (i + 2 < e &&
Text[i + 1] ==
'\r' &&
Text[i + 2] ==
'\n'))) {
1301 Style.TabWidth - (Style.TabWidth ? Column % Style.TabWidth : 0);
1308 assert([&]() ->
bool {
1312 return j <
Text.size() && (
Text[j] ==
'\n' ||
Text[j] ==
'\r');
1322 WhitespaceLength +=
Text.size();
1323 readRawToken(*FormatTok);
1326 if (FormatTok->is(tok::unknown))
1327 FormatTok->setType(TT_ImplicitStringLiteral);
1329 const bool IsCpp = Style.isCpp();
1338 if (
const auto Text = FormatTok->TokenText;
1339 Text.starts_with(
"//") &&
1340 (IsCpp || Style.isJavaScript() || Style.isJava())) {
1341 assert(FormatTok->is(tok::comment));
1342 for (
auto Pos =
Text.find(
'\\'); Pos++ != StringRef::npos;
1343 Pos =
Text.find(
'\\', Pos)) {
1344 if (Pos <
Text.size() &&
Text[Pos] ==
'\n' &&
1345 (!IsCpp ||
Text.substr(Pos + 1).ltrim().starts_with(
"//"))) {
1352 if (Style.isVerilog()) {
1353 static const llvm::Regex NumberBase(
"^s?[bdho]", llvm::Regex::IgnoreCase);
1354 SmallVector<StringRef, 1> Matches;
1360 if (FormatTok->is(tok::numeric_constant)) {
1362 auto Quote = FormatTok->TokenText.find(
'\'');
1363 if (Quote != StringRef::npos)
1364 truncateToken(Quote);
1365 }
else if (FormatTok->isOneOf(tok::hash, tok::hashhash)) {
1366 FormatTok->Tok.setKind(tok::raw_identifier);
1367 }
else if (FormatTok->is(tok::raw_identifier)) {
1368 if (FormatTok->TokenText ==
"`") {
1369 FormatTok->Tok.setIdentifierInfo(
nullptr);
1370 FormatTok->Tok.setKind(tok::hash);
1371 }
else if (FormatTok->TokenText ==
"``") {
1372 FormatTok->Tok.setIdentifierInfo(
nullptr);
1373 FormatTok->Tok.setKind(tok::hashhash);
1374 }
else if (!Tokens.empty() && Tokens.back()->is(Keywords.kw_apostrophe) &&
1375 NumberBase.match(FormatTok->TokenText, &Matches)) {
1380 truncateToken(Matches[0].size());
1381 FormatTok->setFinalizedType(TT_VerilogNumberBase);
1386 FormatTok->WhitespaceRange = SourceRange(
1387 WhitespaceStart, WhitespaceStart.getLocWithOffset(WhitespaceLength));
1389 FormatTok->OriginalColumn = Column;
1391 TrailingWhitespace = 0;
1392 if (FormatTok->is(tok::comment)) {
1394 StringRef UntrimmedText = FormatTok->TokenText;
1395 FormatTok->TokenText = FormatTok->TokenText.rtrim(
" \t\v\f");
1396 TrailingWhitespace = UntrimmedText.size() - FormatTok->TokenText.size();
1397 }
else if (FormatTok->is(tok::raw_identifier)) {
1398 IdentifierInfo &Info = IdentTable.get(FormatTok->TokenText);
1399 FormatTok->Tok.setIdentifierInfo(&Info);
1400 FormatTok->Tok.setKind(Info.getTokenID());
1401 if (Style.isJava() &&
1402 FormatTok->isOneOf(tok::kw_struct, tok::kw_union, tok::kw_delete,
1403 tok::kw_operator)) {
1404 FormatTok->Tok.setKind(tok::identifier);
1405 }
else if (Style.isJavaScript() &&
1406 FormatTok->isOneOf(tok::kw_struct, tok::kw_union,
1407 tok::kw_operator)) {
1408 FormatTok->Tok.setKind(tok::identifier);
1409 }
else if (Style.isTableGen() && !Keywords.isTableGenKeyword(*FormatTok)) {
1410 FormatTok->Tok.setKind(tok::identifier);
1412 }
else if (
const bool Greater = FormatTok->is(tok::greatergreater);
1413 Greater || FormatTok->is(tok::lessless)) {
1414 FormatTok->Tok.setKind(
Greater ? tok::greater : tok::less);
1415 FormatTok->TokenText = FormatTok->TokenText.substr(0, 1);
1418 }
else if (Style.isJava() && FormatTok->is(tok::string_literal)) {
1419 tryParseJavaTextBlock();
1422 if (Style.isVerilog() && !Tokens.empty() &&
1423 Tokens.back()->is(TT_VerilogNumberBase) &&
1424 FormatTok->Tok.isOneOf(tok::identifier, tok::question)) {
1426 FormatTok->Tok.setKind(tok::numeric_constant);
1431 StringRef
Text = FormatTok->TokenText;
1432 size_t FirstNewlinePos =
Text.find(
'\n');
1433 if (FirstNewlinePos == StringRef::npos) {
1436 FormatTok->ColumnWidth =
1438 Column += FormatTok->ColumnWidth;
1440 FormatTok->IsMultiline =
true;
1444 Text.substr(0, FirstNewlinePos), Column, Style.TabWidth, Encoding);
1449 Text.substr(
Text.find_last_of(
'\n') + 1), 0, Style.TabWidth, Encoding);
1450 Column = FormatTok->LastLineColumnWidth;
1454 auto *Identifier = FormatTok->Tok.getIdentifierInfo();
1455 auto it = Macros.find(Identifier);
1456 if ((Tokens.empty() || !Tokens.back()->Tok.getIdentifierInfo() ||
1457 Tokens.back()->Tok.getIdentifierInfo()->getPPKeywordID() !=
1459 it != Macros.end()) {
1460 FormatTok->setType(it->second);
1461 if (it->second == TT_IfMacro) {
1466 FormatTok->Tok.setKind(tok::kw_if);
1468 }
else if (FormatTok->is(tok::identifier)) {
1469 if (MacroBlockBeginRegex.match(
Text))
1470 FormatTok->setType(TT_MacroBlockBegin);
1471 else if (MacroBlockEndRegex.match(
Text))
1472 FormatTok->setType(TT_MacroBlockEnd);
1473 else if (MacrosSkippedByRemoveParentheses.contains(Identifier))
1474 FormatTok->setFinalizedType(TT_FunctionLikeMacro);
1475 else if (TemplateNames.contains(Identifier))
1476 FormatTok->setFinalizedType(TT_TemplateName);
1477 else if (TypeNames.contains(Identifier))
1478 FormatTok->setFinalizedType(TT_TypeName);
1479 else if (VariableTemplates.contains(Identifier))
1480 FormatTok->setFinalizedType(TT_VariableTemplate);
1487bool FormatTokenLexer::readRawTokenVerilogSpecific(Token &
Tok) {
1488 const char *Start = Lex->getBufferLocation();
1498 if (Start[1] ==
'`')
1511 if (Start[1] ==
'\r' || Start[1] ==
'\n')
1514 while (Start[Len] !=
'\0' && Start[Len] !=
'\f' && Start[Len] !=
'\n' &&
1515 Start[Len] !=
'\r' && Start[Len] !=
'\t' && Start[Len] !=
'\v' &&
1516 Start[Len] !=
' ') {
1519 if (Start[Len] ==
'\\' && Start[Len + 1] ==
'\r' &&
1520 Start[Len + 2] ==
'\n') {
1522 }
else if (Start[Len] ==
'\\' &&
1523 (Start[Len + 1] ==
'\r' || Start[Len + 1] ==
'\n')) {
1537 Tok.setKind(tok::raw_identifier);
1539 Tok.setLocation(Lex->getSourceLocation(Start, Len));
1540 Tok.setRawIdentifierData(Start);
1541 Lex->seek(Lex->getCurrentBufferOffset() + Len,
false);
1548 if (!Style.isVerilog() || !readRawTokenVerilogSpecific(
Tok.Tok))
1549 Lex->LexFromRawLexer(
Tok.Tok);
1550 Tok.TokenText = StringRef(SourceMgr.getCharacterData(
Tok.Tok.getLocation()),
1551 Tok.Tok.getLength());
1554 if (
Tok.is(tok::unknown)) {
1555 if (
Tok.TokenText.starts_with(
"\"")) {
1556 Tok.Tok.setKind(tok::string_literal);
1557 Tok.IsUnterminatedLiteral =
true;
1558 }
else if (Style.isJavaScript() &&
Tok.TokenText ==
"''") {
1559 Tok.Tok.setKind(tok::string_literal);
1563 if ((Style.isJavaScript() || Style.isProto()) &&
Tok.is(tok::char_constant))
1564 Tok.Tok.setKind(tok::string_literal);
1567 FormattingDisabled =
false;
1569 Tok.Finalized = FormattingDisabled;
1572 FormattingDisabled =
true;
1575void FormatTokenLexer::resetLexer(
unsigned Offset) {
1576 StringRef Buffer = SourceMgr.getBufferData(ID);
1577 Lex.reset(
new Lexer(SourceMgr.getLocForStartOfFile(ID), LangOpts,
1578 Buffer.begin(), Buffer.begin() + Offset, Buffer.end()));
1579 Lex->SetKeepWhitespaceMode(
true);
1580 TrailingWhitespace = 0;
Defines the clang::SourceLocation class and associated facilities.
Defines the SourceManager interface.
An opaque identifier used by SourceManager which refers to a source file (MemoryBuffer) along with it...
Implements an efficient mapping from strings to IdentifierInfo nodes.
Lexer - This provides a simple interface that turns a text buffer into a stream of tokens.
SourceLocation getLocWithOffset(IntTy Offset) const
Return a source location with the specified offset from this SourceLocation.
This class handles loading and caching of source files into memory.
Token - This structure provides full information about a lexed token.
SourceLocation getLocation() const
Return a source location identifier for the specified offset in the current file.
uint32_t Literal
Literals are represented as positive integers.
TokenKind
Provides a simple uniform namespace for tokens from all C languages.
The JSON file list parser is used to communicate input to InstallAPI.
LLVM_READONLY bool isVerticalWhitespace(unsigned char c)
Returns true if this character is vertical ASCII whitespace: '\n', '\r'.
std::vector< std::string > Macros
A list of macros of the form <definition>=<expansion> .
@ TemplateName
The identifier is a template name. FIXME: Add an annotation for that.
nullptr
This class represents a compute construct, representing a 'Kind' of ‘parallel’, 'serial',...
std::vector< std::string > TypeNames
A vector of non-keyword identifiers that should be interpreted as type names.
LLVM_READONLY bool isHorizontalWhitespace(unsigned char c)
Returns true if this character is horizontal ASCII whitespace: ' ', '\t', '\f', '\v'.
LLVM_READONLY bool isWhitespace(unsigned char c)
Return true if this character is horizontal or vertical ASCII whitespace: ' ', '\t',...
@ Keyword
The name has been typo-corrected to a keyword.
@ Type
The name was classified as a type.
std::vector< std::string > MacrosSkippedByRemoveParentheses
A vector of function-like macros whose invocations should be skipped by RemoveParentheses.
std::vector< std::string > TemplateNames
A vector of non-keyword identifiers that should be interpreted as template names.
std::vector< std::string > VariableTemplates
A vector of non-keyword identifiers that should be interpreted as variable template names.