kbobyrev updated this revision to Diff 294614.
kbobyrev marked an inline comment as not done.
kbobyrev added a comment.

Update the patch. Still WIP, stash progress.


Repository:
  rG LLVM Github Monorepo

CHANGES SINCE LAST ACTION
  https://reviews.llvm.org/D87831/new/

https://reviews.llvm.org/D87831

Files:
  clang-tools-extra/clangd/SemanticSelection.cpp
  clang-tools-extra/clangd/unittests/SemanticSelectionTests.cpp
  clang/include/clang/Basic/TokenKinds.h
  clang/lib/Basic/IdentifierTable.cpp
  clang/lib/Basic/TokenKinds.cpp

Index: clang/lib/Basic/TokenKinds.cpp
===================================================================
--- clang/lib/Basic/TokenKinds.cpp
+++ clang/lib/Basic/TokenKinds.cpp
@@ -65,3 +65,58 @@
   }
   return false;
 }
+
+tok::PPKeywordKind tok::getPPKeywordFromSpelling(llvm::StringRef Spelling) {
+  // We use a perfect hash function here involving the length of the keyword,
+  // the first and third character.  For preprocessor ID's there are no
+  // collisions (if there were, the switch below would complain about duplicate
+  // case values).  Note that this depends on 'if' being null terminated.
+  unsigned Len = Spelling.size();
+  assert((*(Spelling.begin() + Len) == '\0') &&
+         "Spelling has to be null-terminated string.");
+
+#define HASH(LEN, FIRST, THIRD)                                                \
+  (LEN << 5) + (((FIRST - 'a') + (THIRD - 'a')) & 31)
+#define CASE(LEN, FIRST, THIRD, NAME)                                          \
+  case HASH(LEN, FIRST, THIRD):                                                \
+    return memcmp(Spelling.begin(), #NAME, LEN) ? tok::pp_not_keyword          \
+                                                : tok::pp_##NAME
+
+  if (Len < 2)
+    return tok::pp_not_keyword;
+  switch (HASH(Len, Spelling[0], *(Spelling.begin() + 2))) {
+  default:
+    return tok::pp_not_keyword;
+    CASE(2, 'i', '\0', if);
+    CASE(4, 'e', 'i', elif);
+    CASE(4, 'e', 's', else);
+    CASE(4, 'l', 'n', line);
+    CASE(4, 's', 'c', sccs);
+    CASE(5, 'e', 'd', endif);
+    CASE(5, 'e', 'r', error);
+    CASE(5, 'i', 'e', ident);
+    CASE(5, 'i', 'd', ifdef);
+    CASE(5, 'u', 'd', undef);
+
+    CASE(6, 'a', 's', assert);
+    CASE(6, 'd', 'f', define);
+    CASE(6, 'i', 'n', ifndef);
+    CASE(6, 'i', 'p', import);
+    CASE(6, 'p', 'a', pragma);
+
+    CASE(7, 'd', 'f', defined);
+    CASE(7, 'i', 'c', include);
+    CASE(7, 'w', 'r', warning);
+
+    CASE(8, 'u', 'a', unassert);
+    CASE(12, 'i', 'c', include_next);
+
+    CASE(14, '_', 'p', __public_macro);
+
+    CASE(15, '_', 'p', __private_macro);
+
+    CASE(16, '_', 'i', __include_macros);
+#undef CASE
+#undef HASH
+  }
+}
Index: clang/lib/Basic/IdentifierTable.cpp
===================================================================
--- clang/lib/Basic/IdentifierTable.cpp
+++ clang/lib/Basic/IdentifierTable.cpp
@@ -271,54 +271,7 @@
 }
 
 tok::PPKeywordKind IdentifierInfo::getPPKeywordID() const {
-  // We use a perfect hash function here involving the length of the keyword,
-  // the first and third character.  For preprocessor ID's there are no
-  // collisions (if there were, the switch below would complain about duplicate
-  // case values).  Note that this depends on 'if' being null terminated.
-
-#define HASH(LEN, FIRST, THIRD) \
-  (LEN << 5) + (((FIRST-'a') + (THIRD-'a')) & 31)
-#define CASE(LEN, FIRST, THIRD, NAME) \
-  case HASH(LEN, FIRST, THIRD): \
-    return memcmp(Name, #NAME, LEN) ? tok::pp_not_keyword : tok::pp_ ## NAME
-
-  unsigned Len = getLength();
-  if (Len < 2) return tok::pp_not_keyword;
-  const char *Name = getNameStart();
-  switch (HASH(Len, Name[0], Name[2])) {
-  default: return tok::pp_not_keyword;
-  CASE( 2, 'i', '\0', if);
-  CASE( 4, 'e', 'i', elif);
-  CASE( 4, 'e', 's', else);
-  CASE( 4, 'l', 'n', line);
-  CASE( 4, 's', 'c', sccs);
-  CASE( 5, 'e', 'd', endif);
-  CASE( 5, 'e', 'r', error);
-  CASE( 5, 'i', 'e', ident);
-  CASE( 5, 'i', 'd', ifdef);
-  CASE( 5, 'u', 'd', undef);
-
-  CASE( 6, 'a', 's', assert);
-  CASE( 6, 'd', 'f', define);
-  CASE( 6, 'i', 'n', ifndef);
-  CASE( 6, 'i', 'p', import);
-  CASE( 6, 'p', 'a', pragma);
-
-  CASE( 7, 'd', 'f', defined);
-  CASE( 7, 'i', 'c', include);
-  CASE( 7, 'w', 'r', warning);
-
-  CASE( 8, 'u', 'a', unassert);
-  CASE(12, 'i', 'c', include_next);
-
-  CASE(14, '_', 'p', __public_macro);
-
-  CASE(15, '_', 'p', __private_macro);
-
-  CASE(16, '_', 'i', __include_macros);
-#undef CASE
-#undef HASH
-  }
+  return tok::getPPKeywordFromSpelling(getNameStart());
 }
 
 //===----------------------------------------------------------------------===//
Index: clang/include/clang/Basic/TokenKinds.h
===================================================================
--- clang/include/clang/Basic/TokenKinds.h
+++ clang/include/clang/Basic/TokenKinds.h
@@ -96,6 +96,9 @@
 /// Return true if this is an annotation token representing a pragma.
 bool isPragmaAnnotation(TokenKind K);
 
+// NOTE: \p Spelling has to be null-terminated C string.
+PPKeywordKind getPPKeywordFromSpelling(llvm::StringRef Spelling);
+
 } // end namespace tok
 } // end namespace clang
 
Index: clang-tools-extra/clangd/unittests/SemanticSelectionTests.cpp
===================================================================
--- clang-tools-extra/clangd/unittests/SemanticSelectionTests.cpp
+++ clang-tools-extra/clangd/unittests/SemanticSelectionTests.cpp
@@ -234,6 +234,23 @@
   }
 }
 
+TEST(FoldingRanges, Preprocessor) {
+  const char *Tests[] = {
+      R"cpp(
+        [[#if true
+        void foo();
+        #endif]]
+      )cpp",
+  };
+  for (const char *Test : Tests) {
+    auto T = Annotations(Test);
+    auto AST = TestTU::withCode(T.code()).build();
+    EXPECT_THAT(gatherFoldingRanges(llvm::cantFail(getFoldingRanges(AST))),
+                UnorderedElementsAreArray(T.ranges()))
+        << Test;
+  }
+}
+
 } // namespace
 } // namespace clangd
 } // namespace clang
Index: clang-tools-extra/clangd/SemanticSelection.cpp
===================================================================
--- clang-tools-extra/clangd/SemanticSelection.cpp
+++ clang-tools-extra/clangd/SemanticSelection.cpp
@@ -5,6 +5,7 @@
 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
 //
 //===----------------------------------------------------------------------===//
+
 #include "SemanticSelection.h"
 #include "FindSymbols.h"
 #include "ParsedAST.h"
@@ -13,8 +14,10 @@
 #include "SourceCode.h"
 #include "clang/AST/DeclBase.h"
 #include "clang/Basic/SourceLocation.h"
+#include "clang/Basic/TokenKinds.h"
 #include "llvm/ADT/ArrayRef.h"
 #include "llvm/Support/Error.h"
+#include <stack>
 
 namespace clang {
 namespace clangd {
@@ -41,6 +44,33 @@
     collectFoldingRanges(Child, Result);
 }
 
+void collectPreprocessorFoldingRanges(SourceManager &SM,
+                                      std::vector<FoldingRange> &Result) {
+  const syntax::TokenBuffer Buffer(SM);
+  const auto Tokens = Buffer.spelledTokens(SM.getMainFileID());
+  std::stack<clang::SourceLocation> FoldingRangeBeginnings;
+  for (size_t I = 1; I < Tokens.size(); ++I) {
+    const auto TokenText = Tokens[I].text(SM).str();
+    if (tok::getPPKeywordFromSpelling(TokenText) == tok::PPKeywordKind::pp_if &&
+        +Tokens[I - 1].kind() == tok::TokenKind::hash) {
+      FoldingRangeBeginnings.push(Tokens[I - 1].location());
+    }
+    if (tok::getPPKeywordFromSpelling(TokenText) ==
+            tok::PPKeywordKind::pp_endif &&
+        +Tokens[I - 1].kind() == tok::TokenKind::hash) {
+      auto StartLoc = FoldingRangeBeginnings.top();
+      FoldingRangeBeginnings.pop();
+      auto EndLoc = Tokens[I].endLocation();
+      FoldingRange NextRange;
+      NextRange.startLine = SM.getSpellingLineNumber(StartLoc);
+      NextRange.startCharacter = SM.getSpellingColumnNumber(StartLoc);
+      NextRange.endLine = SM.getSpellingLineNumber(EndLoc);
+      NextRange.endCharacter = SM.getSpellingColumnNumber(EndLoc);
+      Result.push_back(NextRange);
+    }
+  }
+}
+
 } // namespace
 
 llvm::Expected<SelectionRange> getSemanticRanges(ParsedAST &AST, Position Pos) {
@@ -113,6 +143,7 @@
   std::vector<FoldingRange> Result;
   for (const auto &Symbol : *DocumentSymbols)
     collectFoldingRanges(Symbol, Result);
+  collectPreprocessorFoldingRanges(AST.getSourceManager(), Result);
   return Result;
 }
 
_______________________________________________
cfe-commits mailing list
cfe-commits@lists.llvm.org
https://lists.llvm.org/cgi-bin/mailman/listinfo/cfe-commits

Reply via email to