[llvm-branch-commits] [mlir] a7eab64 - [mlir] Change ABI breaking use of NDEBUG to LLVM_ENABLE_ABI_BREAKING_CHECKS
Author: Markus Böck
Date: 2021-08-03T09:14:57+02:00
New Revision: a7eab64b7c45d40154ec6f04d3a1f38b13dacfab
URL:
https://github.com/llvm/llvm-project/commit/a7eab64b7c45d40154ec6f04d3a1f38b13dacfab
DIFF:
https://github.com/llvm/llvm-project/commit/a7eab64b7c45d40154ec6f04d3a1f38b13dacfab.diff
LOG: [mlir] Change ABI breaking use of NDEBUG to LLVM_ENABLE_ABI_BREAKING_CHECKS
The `DataLayout` class currently contains the member `layoutStack` which is
hidden behind a preprocessor region dependant on the NDEBUG macro. Code wise
this makes a lot of sense, as the `layoutStack` is used for extra assertions
that users will want when compiling a debug build.
It however has the uncomfortable consequence of leading to a different ABI in
Debug and Release builds. This I think is a bit annoying for downstream
projects and others as they may want to build against a stable Release of MLIR
in Release mode, but be able to debug their own project depending on MLIR.
This patch changes the related uses of NDEBUG to
LLVM_ENABLE_ABI_BREAKING_CHECKS. As the macro is computed at configure time of
LLVM, it may not change based on compiler settings of a downstream projects
like NDEBUG would.
Differential Revision: https://reviews.llvm.org/D107227
(cherry picked from commit 97335ad13fd4f231a75163a1e5c232aed5efe921)
Added:
Modified:
mlir/include/mlir/Interfaces/DataLayoutInterfaces.h
mlir/lib/Interfaces/DataLayoutInterfaces.cpp
Removed:
diff --git a/mlir/include/mlir/Interfaces/DataLayoutInterfaces.h
b/mlir/include/mlir/Interfaces/DataLayoutInterfaces.h
index 0633eb341f11e..84fee54086539 100644
--- a/mlir/include/mlir/Interfaces/DataLayoutInterfaces.h
+++ b/mlir/include/mlir/Interfaces/DataLayoutInterfaces.h
@@ -163,7 +163,7 @@ class DataLayout {
/// Combined layout spec at the given scope.
const DataLayoutSpecInterface originalLayout;
-#ifndef NDEBUG
+#if LLVM_ENABLE_ABI_BREAKING_CHECKS
/// List of enclosing layout specs.
SmallVector layoutStack;
#endif
diff --git a/mlir/lib/Interfaces/DataLayoutInterfaces.cpp
b/mlir/lib/Interfaces/DataLayoutInterfaces.cpp
index 6244ac13ead95..5a63b287f5f41 100644
--- a/mlir/lib/Interfaces/DataLayoutInterfaces.cpp
+++ b/mlir/lib/Interfaces/DataLayoutInterfaces.cpp
@@ -269,7 +269,7 @@ mlir::DataLayout::DataLayout() : DataLayout(ModuleOp()) {}
mlir::DataLayout::DataLayout(DataLayoutOpInterface op)
: originalLayout(getCombinedDataLayout(op)), scope(op) {
-#ifndef NDEBUG
+#if LLVM_ENABLE_ABI_BREAKING_CHECKS
checkMissingLayout(originalLayout, op);
collectParentLayouts(op, layoutStack);
#endif
@@ -277,7 +277,7 @@ mlir::DataLayout::DataLayout(DataLayoutOpInterface op)
mlir::DataLayout::DataLayout(ModuleOp op)
: originalLayout(getCombinedDataLayout(op)), scope(op) {
-#ifndef NDEBUG
+#if LLVM_ENABLE_ABI_BREAKING_CHECKS
checkMissingLayout(originalLayout, op);
collectParentLayouts(op, layoutStack);
#endif
@@ -297,7 +297,7 @@ mlir::DataLayout mlir::DataLayout::closest(Operation *op) {
}
void mlir::DataLayout::checkValid() const {
-#ifndef NDEBUG
+#if LLVM_ENABLE_ABI_BREAKING_CHECKS
SmallVector specs;
collectParentLayouts(scope, specs);
assert(specs.size() == layoutStack.size() &&
___
llvm-branch-commits mailing list
[email protected]
https://lists.llvm.org/cgi-bin/mailman/listinfo/llvm-branch-commits
[llvm-branch-commits] [clang] 4ae3353 - [clang] fix concepts crash on substitution failure during normalization
Author: Matheus Izvekov
Date: 2021-08-03T12:53:18+02:00
New Revision: 4ae33534bd8c52c4f054bb4676632c37f49f56b2
URL:
https://github.com/llvm/llvm-project/commit/4ae33534bd8c52c4f054bb4676632c37f49f56b2
DIFF:
https://github.com/llvm/llvm-project/commit/4ae33534bd8c52c4f054bb4676632c37f49f56b2.diff
LOG: [clang] fix concepts crash on substitution failure during normalization
When substitution failed on the first constrained template argument (but
only the first), we would assert / crash. Checking for failure was only
being performed from the second constraint on.
This changes it so the checking is performed in that case,
and the code is also now simplified a little bit to hopefully
avoid this confusion.
Signed-off-by: Matheus Izvekov
Reviewed By: rsmith
Differential Revision: https://reviews.llvm.org/D106907
Added:
Modified:
clang/lib/Sema/SemaConcept.cpp
clang/test/CXX/temp/temp.constr/temp.constr.normal/p1.cpp
Removed:
diff --git a/clang/lib/Sema/SemaConcept.cpp b/clang/lib/Sema/SemaConcept.cpp
index f2c70d0a56efb..931c9e3e2738d 100644
--- a/clang/lib/Sema/SemaConcept.cpp
+++ b/clang/lib/Sema/SemaConcept.cpp
@@ -742,22 +742,15 @@ Optional
NormalizedConstraint::fromConstraintExprs(Sema &S, NamedDecl *D,
ArrayRef E) {
assert(E.size() != 0);
- auto First = fromConstraintExpr(S, D, E[0]);
- if (E.size() == 1)
-return First;
- auto Second = fromConstraintExpr(S, D, E[1]);
- if (!Second)
+ auto Conjunction = fromConstraintExpr(S, D, E[0]);
+ if (!Conjunction)
return None;
- llvm::Optional Conjunction;
- Conjunction.emplace(S.Context, std::move(*First), std::move(*Second),
- CCK_Conjunction);
- for (unsigned I = 2; I < E.size(); ++I) {
+ for (unsigned I = 1; I < E.size(); ++I) {
auto Next = fromConstraintExpr(S, D, E[I]);
if (!Next)
- return llvm::Optional{};
-NormalizedConstraint NewConjunction(S.Context, std::move(*Conjunction),
+ return None;
+*Conjunction = NormalizedConstraint(S.Context, std::move(*Conjunction),
std::move(*Next), CCK_Conjunction);
-*Conjunction = std::move(NewConjunction);
}
return Conjunction;
}
diff --git a/clang/test/CXX/temp/temp.constr/temp.constr.normal/p1.cpp
b/clang/test/CXX/temp/temp.constr/temp.constr.normal/p1.cpp
index 153d4a56bea31..2134968101470 100644
--- a/clang/test/CXX/temp/temp.constr/temp.constr.normal/p1.cpp
+++ b/clang/test/CXX/temp/temp.constr/temp.constr.normal/p1.cpp
@@ -67,3 +67,18 @@ namespace non_type_pack {
static_assert((foo<1>(), true));
}
+
+namespace PR47174 {
+// This checks that we don't crash with a failed substitution on the first
constrained argument when
+// performing normalization.
+template
+requires true struct S3; // expected-note {{template is declared here}}
+template
+requires true struct S3; // expected-error {{class template partial
specialization is not more specialized than the primary template}}
+
+// Same as above, for the second position (but this was already working).
+template
+requires true struct S4; // expected-note {{template is declared here}}
+template
+requires true struct S4; // expected-error {{class template partial
specialization is not more specialized than the primary template}}
+} // namespace PR47174
___
llvm-branch-commits mailing list
[email protected]
https://lists.llvm.org/cgi-bin/mailman/listinfo/llvm-branch-commits
[llvm-branch-commits] [lldb] 45d9885 - [lldb] Add "memory tag write" command
Author: David Spickett
Date: 2021-08-03T12:25:35Z
New Revision: 45d98857f8f979552d7e1e7e781d6ca92a5e9a48
URL:
https://github.com/llvm/llvm-project/commit/45d98857f8f979552d7e1e7e781d6ca92a5e9a48
DIFF:
https://github.com/llvm/llvm-project/commit/45d98857f8f979552d7e1e7e781d6ca92a5e9a48.diff
LOG: [lldb] Add "memory tag write" command
This adds a new command for writing memory tags.
It is based on the existing "memory write" command.
Syntax: memory tag write [ [...]]
(where "value" is a tag value)
(lldb) memory tag write mte_buf 1 2
(lldb) memory tag read mte_buf mte_buf+32
Logical tag: 0x0
Allocation tags:
[0xf7ff9000, 0xf7ff9010): 0x1
[0xf7ff9010, 0xf7ff9020): 0x2
The range you are writing to will be calculated by
aligning the address down to a granule boundary then
adding as many granules as there are tags.
(a repeating mode with an end address will be in a follow
up patch)
This is why "memory tag write" uses MakeTaggedRange but has
some extra steps to get this specific behaviour.
The command does all the usual argument validation:
* Address must evaluate
* You must supply at least one tag value
(though lldb-server would just treat that as a nop anyway)
* Those tag values must be valid for your tagging scheme
(e.g. for MTE the value must be > 0 and < 0xf)
* The calculated range must be memory tagged
That last error will show you the final range, not just
the start address you gave the command.
(lldb) memory tag write mte_buf_2+page_size-16 6
(lldb) memory tag write mte_buf_2+page_size-16 6 7
error: Address range 0xf7ffaff0:0xf7ffb010 is not in a memory tagged
region
(note that we do not check if the region is writeable
since lldb can write to it anyway)
The read and write tag tests have been merged into
a single set of "tag access" tests as their test programs would
have been almost identical.
(also I have renamed some of the buffers to better
show what each one is used for)
Reviewed By: omjavaid
Differential Revision: https://reviews.llvm.org/D105182
(cherry picked from commit 6a7a2ee8161da84d9a58a88b497b0b47c8df99f3)
Added:
lldb/test/API/linux/aarch64/mte_tag_access/Makefile
lldb/test/API/linux/aarch64/mte_tag_access/TestAArch64LinuxMTEMemoryTagAccess.py
lldb/test/API/linux/aarch64/mte_tag_access/main.c
Modified:
lldb/source/Commands/CommandObjectMemoryTag.cpp
lldb/test/API/functionalities/memory/tag/TestMemoryTag.py
Removed:
lldb/test/API/linux/aarch64/mte_tag_read/Makefile
lldb/test/API/linux/aarch64/mte_tag_read/TestAArch64LinuxMTEMemoryTagRead.py
lldb/test/API/linux/aarch64/mte_tag_read/main.c
diff --git a/lldb/source/Commands/CommandObjectMemoryTag.cpp
b/lldb/source/Commands/CommandObjectMemoryTag.cpp
index 1dfb32a92f3bb..7c244befe0da4 100644
--- a/lldb/source/Commands/CommandObjectMemoryTag.cpp
+++ b/lldb/source/Commands/CommandObjectMemoryTag.cpp
@@ -115,6 +115,114 @@ class CommandObjectMemoryTagRead : public
CommandObjectParsed {
}
};
+#define LLDB_OPTIONS_memory_tag_write
+#include "CommandOptions.inc"
+
+class CommandObjectMemoryTagWrite : public CommandObjectParsed {
+public:
+ CommandObjectMemoryTagWrite(CommandInterpreter &interpreter)
+ : CommandObjectParsed(interpreter, "tag",
+"Write memory tags starting from the granule that "
+"contains the given address.",
+nullptr,
+eCommandRequiresTarget | eCommandRequiresProcess |
+eCommandProcessMustBePaused) {
+// Address
+m_arguments.push_back(
+
CommandArgumentEntry{CommandArgumentData(eArgTypeAddressOrExpression)});
+// One or more tag values
+m_arguments.push_back(CommandArgumentEntry{
+CommandArgumentData(eArgTypeValue, eArgRepeatPlus)});
+ }
+
+ ~CommandObjectMemoryTagWrite() override = default;
+
+protected:
+ bool DoExecute(Args &command, CommandReturnObject &result) override {
+if (command.GetArgumentCount() < 2) {
+ result.AppendError("wrong number of arguments; expected "
+ " [ [...]]");
+ return false;
+}
+
+Status error;
+addr_t start_addr = OptionArgParser::ToAddress(
+&m_exe_ctx, command[0].ref(), LLDB_INVALID_ADDRESS, &error);
+if (start_addr == LLDB_INVALID_ADDRESS) {
+ result.AppendErrorWithFormatv("Invalid address expression, {0}",
+error.AsCString());
+ return false;
+}
+
+command.Shift(); // shift off start address
+
+std::vector tags;
+for (auto &entry : command) {
+ lldb::addr_t tag_value;
+ // getAsInteger returns true on failure
+ if (entry.ref().getAsInteger(0, tag_value)) {
+result.AppendErrorWithFormat(
+"'%s' is not a valid unsigned decimal string value.\n",
+entry.c_str());
+return
[llvm-branch-commits] [lldb] dc00e19 - [lldb] Add "memory tag write" --end-addr option
Author: David Spickett
Date: 2021-08-03T12:25:35Z
New Revision: dc00e1915e66533de4b9c778528e8dd7b4922a22
URL:
https://github.com/llvm/llvm-project/commit/dc00e1915e66533de4b9c778528e8dd7b4922a22
DIFF:
https://github.com/llvm/llvm-project/commit/dc00e1915e66533de4b9c778528e8dd7b4922a22.diff
LOG: [lldb] Add "memory tag write" --end-addr option
The default mode of "memory tag write" is to calculate the
range from the start address and the number of tags given.
(just like "memory write" does)
(lldb) memory tag write mte_buf 1 2
(lldb) memory tag read mte_buf mte_buf+48
Logical tag: 0x0
Allocation tags:
[0xf7ff9000, 0xf7ff9010): 0x1
[0xf7ff9010, 0xf7ff9020): 0x2
[0xf7ff9020, 0xf7ff9030): 0x0
This new option allows you to set an end address and have
the tags repeat until that point.
(lldb) memory tag write mte_buf 1 2 --end-addr mte_buf+64
(lldb) memory tag read mte_buf mte_buf+80
Logical tag: 0x0
Allocation tags:
[0xf7ff9000, 0xf7ff9010): 0x1
[0xf7ff9010, 0xf7ff9020): 0x2
[0xf7ff9020, 0xf7ff9030): 0x1
[0xf7ff9030, 0xf7ff9040): 0x2
[0xf7ff9040, 0xf7ff9050): 0x0
This is implemented using the QMemTags packet previously
added. We skip validating the number of tags in lldb and send
them on to lldb-server, which repeats them as needed.
Apart from the number of tags, all the other client side checks
remain. Tag values, memory range must be tagged, etc.
Reviewed By: omjavaid
Differential Revision: https://reviews.llvm.org/D105183
(cherry picked from commit 6eded00e0c6b4e06225df74292c078030556b8ce)
Added:
Modified:
lldb/source/Commands/CommandObjectMemoryTag.cpp
lldb/source/Commands/Options.td
lldb/test/API/linux/aarch64/mte_tag_access/TestAArch64LinuxMTEMemoryTagAccess.py
Removed:
diff --git a/lldb/source/Commands/CommandObjectMemoryTag.cpp
b/lldb/source/Commands/CommandObjectMemoryTag.cpp
index 7c244befe0da4..76296bf4b49af 100644
--- a/lldb/source/Commands/CommandObjectMemoryTag.cpp
+++ b/lldb/source/Commands/CommandObjectMemoryTag.cpp
@@ -7,8 +7,11 @@
//===--===//
#include "CommandObjectMemoryTag.h"
+#include "lldb/Host/OptionParser.h"
#include "lldb/Interpreter/CommandReturnObject.h"
#include "lldb/Interpreter/OptionArgParser.h"
+#include "lldb/Interpreter/OptionGroupFormat.h"
+#include "lldb/Interpreter/OptionValueString.h"
#include "lldb/Target/Process.h"
using namespace lldb;
@@ -120,23 +123,64 @@ class CommandObjectMemoryTagRead : public
CommandObjectParsed {
class CommandObjectMemoryTagWrite : public CommandObjectParsed {
public:
+ class OptionGroupTagWrite : public OptionGroup {
+ public:
+OptionGroupTagWrite() : OptionGroup(), m_end_addr(LLDB_INVALID_ADDRESS) {}
+
+~OptionGroupTagWrite() override = default;
+
+llvm::ArrayRef GetDefinitions() override {
+ return llvm::makeArrayRef(g_memory_tag_write_options);
+}
+
+Status SetOptionValue(uint32_t option_idx, llvm::StringRef option_value,
+ ExecutionContext *execution_context) override {
+ Status status;
+ const int short_option =
+ g_memory_tag_write_options[option_idx].short_option;
+
+ switch (short_option) {
+ case 'e':
+m_end_addr = OptionArgParser::ToAddress(execution_context,
option_value,
+LLDB_INVALID_ADDRESS, &status);
+break;
+ default:
+llvm_unreachable("Unimplemented option");
+ }
+
+ return status;
+}
+
+void OptionParsingStarting(ExecutionContext *execution_context) override {
+ m_end_addr = LLDB_INVALID_ADDRESS;
+}
+
+lldb::addr_t m_end_addr;
+ };
+
CommandObjectMemoryTagWrite(CommandInterpreter &interpreter)
: CommandObjectParsed(interpreter, "tag",
"Write memory tags starting from the granule that "
"contains the given address.",
nullptr,
eCommandRequiresTarget | eCommandRequiresProcess |
-eCommandProcessMustBePaused) {
+eCommandProcessMustBePaused),
+m_option_group(), m_tag_write_options() {
// Address
m_arguments.push_back(
CommandArgumentEntry{CommandArgumentData(eArgTypeAddressOrExpression)});
// One or more tag values
m_arguments.push_back(CommandArgumentEntry{
CommandArgumentData(eArgTypeValue, eArgRepeatPlus)});
+
+m_option_group.Append(&m_tag_write_options);
+m_option_group.Finalize();
}
~CommandObjectMemoryTagWrite() override = default;
+ Options *GetOptions() override { return &m_option_group; }
+
protected:
bool DoExecute(Args &command, CommandReturnObject &result) override {
if (command.GetArgumentCount() < 2) {
@@
[llvm-branch-commits] [lldb] bc0cc10 - [lldb][AArch64] Annotate synchronous tag faults
Author: David Spickett
Date: 2021-08-03T12:25:35Z
New Revision: bc0cc109dfa744090f7ba35fe91d5967f0ed4fc7
URL:
https://github.com/llvm/llvm-project/commit/bc0cc109dfa744090f7ba35fe91d5967f0ed4fc7
DIFF:
https://github.com/llvm/llvm-project/commit/bc0cc109dfa744090f7ba35fe91d5967f0ed4fc7.diff
LOG: [lldb][AArch64] Annotate synchronous tag faults
In the latest Linux kernels synchronous tag faults
include the tag bits in their address.
This change adds logical and allocation tags to the
description of synchronous tag faults.
(asynchronous faults have no address)
Process 1626 stopped
* thread #1, name = 'a.out', stop reason = signal SIGSEGV: sync tag check fault
(fault address: 0x900f7ff9010 logical tag: 0x9 allocation tag: 0x0)
This extends the existing description and will
show as much as it can on the rare occasion something
fails.
This change supports AArch64 MTE only but other
architectures could be added by extending the
switch at the start of AnnotateSyncTagCheckFault.
The rest of the function is generic code.
Tests have been added for synchronous and asynchronous
MTE faults.
Reviewed By: omjavaid
Differential Revision: https://reviews.llvm.org/D105178
(cherry picked from commit d510b5f199d6e7a3062b5a6ea43181c4cc00a605)
Added:
lldb/test/API/linux/aarch64/mte_tag_faults/Makefile
lldb/test/API/linux/aarch64/mte_tag_faults/TestAArch64LinuxMTEMemoryTagFaults.py
lldb/test/API/linux/aarch64/mte_tag_faults/main.c
Modified:
lldb/source/Plugins/Process/Linux/NativeThreadLinux.cpp
lldb/source/Plugins/Process/Linux/NativeThreadLinux.h
Removed:
diff --git a/lldb/source/Plugins/Process/Linux/NativeThreadLinux.cpp
b/lldb/source/Plugins/Process/Linux/NativeThreadLinux.cpp
index d8ba5415a983e..a7e4e9b13ff0a 100644
--- a/lldb/source/Plugins/Process/Linux/NativeThreadLinux.cpp
+++ b/lldb/source/Plugins/Process/Linux/NativeThreadLinux.cpp
@@ -26,6 +26,7 @@
#include "llvm/ADT/SmallString.h"
#include "Plugins/Process/POSIX/CrashReason.h"
+#include "Plugins/Process/Utility/MemoryTagManagerAArch64MTE.h"
#include
// Try to define a macro to encapsulate the tgkill syscall
@@ -299,11 +300,69 @@ void NativeThreadLinux::SetStoppedBySignal(uint32_t signo,
? CrashReason::eInvalidAddress
: GetCrashReason(*info);
m_stop_description = GetCrashReasonString(reason, *info);
+
+ if (reason == CrashReason::eSyncTagCheckFault) {
+AnnotateSyncTagCheckFault(info);
+ }
+
break;
}
}
}
+void NativeThreadLinux::AnnotateSyncTagCheckFault(const siginfo_t *info) {
+ int32_t allocation_tag_type = 0;
+ switch (GetProcess().GetArchitecture().GetMachine()) {
+ // aarch64_32 deliberately not here because there's no 32 bit MTE
+ case llvm::Triple::aarch64:
+ case llvm::Triple::aarch64_be:
+allocation_tag_type = MemoryTagManagerAArch64MTE::eMTE_allocation;
+break;
+ default:
+return;
+ }
+
+ auto details =
+ GetRegisterContext().GetMemoryTaggingDetails(allocation_tag_type);
+ if (!details) {
+llvm::consumeError(details.takeError());
+return;
+ }
+
+ // We assume that the stop description is currently:
+ // signal SIGSEGV: sync tag check fault (fault address: )
+ // Remove the closing )
+ m_stop_description.pop_back();
+
+ std::stringstream ss;
+ lldb::addr_t fault_addr = reinterpret_cast(info->si_addr);
+ std::unique_ptr manager(std::move(details->manager));
+
+ ss << " logical tag: 0x" << std::hex << manager->GetLogicalTag(fault_addr);
+
+ std::vector allocation_tag_data;
+ // The fault address may not be granule aligned. ReadMemoryTags will granule
+ // align any range you give it, potentially making it larger.
+ // To prevent this set len to 1. This always results in a range that is at
+ // most 1 granule in size and includes fault_addr.
+ Status status = GetProcess().ReadMemoryTags(allocation_tag_type, fault_addr,
+ 1, allocation_tag_data);
+
+ if (status.Success()) {
+llvm::Expected> allocation_tag =
+manager->UnpackTagsData(allocation_tag_data, 1);
+if (allocation_tag) {
+ ss << " allocation tag: 0x" << std::hex << allocation_tag->front() <<
")";
+} else {
+ llvm::consumeError(allocation_tag.takeError());
+ ss << ")";
+}
+ } else
+ss << ")";
+
+ m_stop_description += ss.str();
+}
+
bool NativeThreadLinux::IsStopped(int *signo) {
if (!StateIsStoppedState(m_state, false))
return false;
diff --git a/lldb/source/Plugins/Process/Linux/NativeThreadLinux.h
b/lldb/source/Plugins/Process/Linux/NativeThreadLinux.h
index f03de755c7bf3..c18665b0107ef 100644
--- a/lldb/source/Plugins/Process/Linux/NativeThreadLinux.h
+++ b/lldb/source/Plugins/Process/Linux/NativeThreadLinux.h
@@ -102,6 +102,11 @@ class NativeThreadLinux : public NativeThreadProtocol {
void SetStopped();
+ /// Extend m_stop_descri
[llvm-branch-commits] [lldb] c47d79b - [lldb] Correct format of qMemTags type field
Author: David Spickett
Date: 2021-08-03T12:25:36Z
New Revision: c47d79b3b7a7cab21020ec69b66b5f48823f513f
URL:
https://github.com/llvm/llvm-project/commit/c47d79b3b7a7cab21020ec69b66b5f48823f513f
DIFF:
https://github.com/llvm/llvm-project/commit/c47d79b3b7a7cab21020ec69b66b5f48823f513f.diff
LOG: [lldb] Correct format of qMemTags type field
The type field is a signed integer.
(https://sourceware.org/gdb/current/onlinedocs/gdb/General-Query-Packets.html)
However it's not packed in the packet in the way
you might think. For example the type -1 should be:
qMemTags:,:
Instead of:
qMemTags:,:-1
This change makes lldb-server's parsing more strict
and adds more tests to check that we handle negative types
correctly in lldb and lldb-server.
We only support one tag type value at this point,
for AArch64 MTE, which is positive. So this doesn't change
any of those interactions. It just brings us in line with GDB.
Also check that the test target has MTE. Previously
we just checked that we were AArch64 with a toolchain
that supports MTE.
Finally, update the tag type check for QMemTags to use
the same conversion steps that qMemTags now does.
Using static_cast can invoke UB and though we do do a limit
check to avoid this, I think it's clearer with the new method.
Reviewed By: omjavaid
Differential Revision: https://reviews.llvm.org/D104914
(cherry picked from commit 555cd03193c9c098d787bec93eadfe43b179db9c)
Added:
Modified:
lldb/source/Plugins/Process/gdb-remote/GDBRemoteCommunicationServerLLGS.cpp
lldb/test/API/tools/lldb-server/memory-tagging/TestGdbRemoteMemoryTagging.py
lldb/unittests/Process/gdb-remote/GDBRemoteCommunicationClientTest.cpp
Removed:
diff --git
a/lldb/source/Plugins/Process/gdb-remote/GDBRemoteCommunicationServerLLGS.cpp
b/lldb/source/Plugins/Process/gdb-remote/GDBRemoteCommunicationServerLLGS.cpp
index 5e69b5793f9f0..8e1f6bc29a6f8 100644
---
a/lldb/source/Plugins/Process/gdb-remote/GDBRemoteCommunicationServerLLGS.cpp
+++
b/lldb/source/Plugins/Process/gdb-remote/GDBRemoteCommunicationServerLLGS.cpp
@@ -3474,15 +3474,31 @@ GDBRemoteCommunicationServerLLGS::Handle_qMemTags(
if (packet.GetBytesLeft() < 1 || packet.GetChar() != ':')
return SendIllFormedResponse(packet, invalid_type_err);
- int32_t type =
- packet.GetS32(std::numeric_limits::max(), /*base=*/16);
- if (type == std::numeric_limits::max() ||
+ // Type is a signed integer but packed into the packet as its raw bytes.
+ // However, our GetU64 uses strtoull which allows +/-. We do not want this.
+ const char *first_type_char = packet.Peek();
+ if (first_type_char && (*first_type_char == '+' || *first_type_char == '-'))
+return SendIllFormedResponse(packet, invalid_type_err);
+
+ // Extract type as unsigned then cast to signed.
+ // Using a uint64_t here so that we have some value outside of the 32 bit
+ // range to use as the invalid return value.
+ uint64_t raw_type =
+ packet.GetU64(std::numeric_limits::max(), /*base=*/16);
+
+ if ( // Make sure the cast below would be valid
+ raw_type > std::numeric_limits::max() ||
// To catch inputs like "123aardvark" that will parse but clearly aren't
// valid in this case.
packet.GetBytesLeft()) {
return SendIllFormedResponse(packet, invalid_type_err);
}
+ // First narrow to 32 bits otherwise the copy into type would take
+ // the wrong 4 bytes on big endian.
+ uint32_t raw_type_32 = raw_type;
+ int32_t type = reinterpret_cast(raw_type_32);
+
StreamGDBRemote response;
std::vector tags;
Status error = m_current_process->ReadMemoryTags(type, addr, length, tags);
@@ -3552,7 +3568,11 @@ GDBRemoteCommunicationServerLLGS::Handle_QMemTags(
packet.GetU64(std::numeric_limits::max(), /*base=*/16);
if (raw_type > std::numeric_limits::max())
return SendIllFormedResponse(packet, invalid_type_err);
- int32_t type = static_cast(raw_type);
+
+ // First narrow to 32 bits. Otherwise the copy below would get the wrong
+ // 4 bytes on big endian.
+ uint32_t raw_type_32 = raw_type;
+ int32_t type = reinterpret_cast(raw_type_32);
// Tag data
if (packet.GetBytesLeft() < 1 || packet.GetChar() != ':')
diff --git
a/lldb/test/API/tools/lldb-server/memory-tagging/TestGdbRemoteMemoryTagging.py
b/lldb/test/API/tools/lldb-server/memory-tagging/TestGdbRemoteMemoryTagging.py
index d292f20993c66..983e1aa54b327 100644
---
a/lldb/test/API/tools/lldb-server/memory-tagging/TestGdbRemoteMemoryTagging.py
+++
b/lldb/test/API/tools/lldb-server/memory-tagging/TestGdbRemoteMemoryTagging.py
@@ -105,13 +105,20 @@ def test_qMemTags_packets(self):
self.check_tag_read("{:x},10:".format(buf_address), "E03")
# Types we don't support
self.check_tag_read("{:x},10:FF".format(buf_address), "E01")
+# Types can also be negative, -1 in this case.
+# So this is E01 for not
[llvm-branch-commits] [lldb] 0b8dc91 - [lldb][AArch64] Mark mismatched tags in tag read output
Author: David Spickett
Date: 2021-08-03T12:25:36Z
New Revision: 0b8dc914e1bd02ed34b4b50d3de81f2162c4402f
URL:
https://github.com/llvm/llvm-project/commit/0b8dc914e1bd02ed34b4b50d3de81f2162c4402f
DIFF:
https://github.com/llvm/llvm-project/commit/0b8dc914e1bd02ed34b4b50d3de81f2162c4402f.diff
LOG: [lldb][AArch64] Mark mismatched tags in tag read output
The "memory tag read" command will now tell you
when the allocation tag read does not match the logical
tag.
(lldb) memory tag read mte_buf+(8*16) mte_buf+(8*16)+48
Logical tag: 0x9
Allocation tags:
[0xf7ff7080, 0xf7ff7090): 0x8 (mismatch)
[0xf7ff7090, 0xf7ff70a0): 0x9
[0xf7ff70a0, 0xf7ff70b0): 0xa (mismatch)
The logical tag will be taken from the start address
so the end could have a different tag. You could for example
read from ptr_to_array_1 to ptr_to_array_2. Where the latter
is tagged differently to prevent buffer overflow.
The existing command will read 1 granule if you leave
off the end address. So you can also use it as a quick way
to check a single location.
(lldb) memory tag read mte_buf
Logical tag: 0x9
Allocation tags:
[0xf7ff7000, 0xf7ff7010): 0x0 (mismatch)
This avoids the need for a seperate "memory tag check" command.
Reviewed By: omjavaid
Differential Revision: https://reviews.llvm.org/D106880
(cherry picked from commit 98b5659b53ff93f3b68e48ea28ec54081196ae3b)
Added:
Modified:
lldb/source/Commands/CommandObjectMemoryTag.cpp
lldb/test/API/linux/aarch64/mte_tag_access/TestAArch64LinuxMTEMemoryTagAccess.py
Removed:
diff --git a/lldb/source/Commands/CommandObjectMemoryTag.cpp
b/lldb/source/Commands/CommandObjectMemoryTag.cpp
index 76296bf4b49af..840f81719d7dc 100644
--- a/lldb/source/Commands/CommandObjectMemoryTag.cpp
+++ b/lldb/source/Commands/CommandObjectMemoryTag.cpp
@@ -24,7 +24,8 @@ class CommandObjectMemoryTagRead : public CommandObjectParsed
{
public:
CommandObjectMemoryTagRead(CommandInterpreter &interpreter)
: CommandObjectParsed(interpreter, "tag",
-"Read memory tags for the given range of memory.",
+"Read memory tags for the given range of memory."
+" Mismatched tags will be marked.",
nullptr,
eCommandRequiresTarget | eCommandRequiresProcess |
eCommandProcessMustBePaused) {
@@ -100,16 +101,17 @@ class CommandObjectMemoryTagRead : public
CommandObjectParsed {
return false;
}
-result.AppendMessageWithFormatv("Logical tag: {0:x}",
-tag_manager->GetLogicalTag(start_addr));
+lldb::addr_t logical_tag = tag_manager->GetLogicalTag(start_addr);
+result.AppendMessageWithFormatv("Logical tag: {0:x}", logical_tag);
result.AppendMessage("Allocation tags:");
addr_t addr = tagged_range->GetRangeBase();
for (auto tag : *tags) {
addr_t next_addr = addr + tag_manager->GetGranuleSize();
// Showing tagged adresses here until we have non address bit handling
- result.AppendMessageWithFormatv("[{0:x}, {1:x}): {2:x}", addr, next_addr,
- tag);
+ result.AppendMessageWithFormatv("[{0:x}, {1:x}): {2:x}{3}", addr,
+ next_addr, tag,
+ logical_tag == tag ? "" : " (mismatch)");
addr = next_addr;
}
diff --git
a/lldb/test/API/linux/aarch64/mte_tag_access/TestAArch64LinuxMTEMemoryTagAccess.py
b/lldb/test/API/linux/aarch64/mte_tag_access/TestAArch64LinuxMTEMemoryTagAccess.py
index 58ceb9c7348a9..b0ce9c1f55c44 100644
---
a/lldb/test/API/linux/aarch64/mte_tag_access/TestAArch64LinuxMTEMemoryTagAccess.py
+++
b/lldb/test/API/linux/aarch64/mte_tag_access/TestAArch64LinuxMTEMemoryTagAccess.py
@@ -81,20 +81,20 @@ def test_mte_tag_read(self):
self.expect("memory tag read mte_buf",
patterns=["Logical tag: 0x9\n"
"Allocation tags:\n"
- "\[0x[0-9A-Fa-f]+00, 0x[0-9A-Fa-f]+10\): 0x0$"])
+ "\[0x[0-9A-Fa-f]+00, 0x[0-9A-Fa-f]+10\): 0x0
\(mismatch\)$"])
# Range of <1 granule is rounded up to 1 granule
self.expect("memory tag read mte_buf mte_buf+8",
patterns=["Logical tag: 0x9\n"
"Allocation tags:\n"
- "\[0x[0-9A-Fa-f]+00, 0x[0-9A-Fa-f]+10\): 0x0$"])
+ "\[0x[0-9A-Fa-f]+00, 0x[0-9A-Fa-f]+10\): 0x0
\(mismatch\)$"])
# Start address is aligned down, end aligned up
self.expect("memory tag read mte_buf+8 mte_buf+24",
patterns=["Logical tag: 0x9\n"
"Allocation tags:\n"
- "\[0x[0-9A-Fa-f]+00, 0x[0-9A-Fa-f]+10\): 0x0\n"
-
[llvm-branch-commits] [llvm] 11a0a68 - [llvm][Release notes] Add memory tagging support to lldb changes
Author: David Spickett Date: 2021-08-03T12:25:36Z New Revision: 11a0a68f2eb8f419f2faab380c8af73dbea4035c URL: https://github.com/llvm/llvm-project/commit/11a0a68f2eb8f419f2faab380c8af73dbea4035c DIFF: https://github.com/llvm/llvm-project/commit/11a0a68f2eb8f419f2faab380c8af73dbea4035c.diff LOG: [llvm][Release notes] Add memory tagging support to lldb changes Added: Modified: llvm/docs/ReleaseNotes.rst Removed: diff --git a/llvm/docs/ReleaseNotes.rst b/llvm/docs/ReleaseNotes.rst index ddcdb322386ee..73c0450485d01 100644 --- a/llvm/docs/ReleaseNotes.rst +++ b/llvm/docs/ReleaseNotes.rst @@ -243,6 +243,12 @@ Changes to the LLVM tools Changes to LLDB - +* LLDB now supports debugging programs on AArch64 Linux that use memory tagging (MTE). +* Added ``memory tag read`` and ``memory tag write`` commands. +* The ``memory region`` command will note when a region has memory tagging enabled. +* Synchronous and asynchronous tag faults are recognised. +* Synchronous tag faults have memory tag annotations in addition to the usual fault address. + Changes to Sanitizers - ___ llvm-branch-commits mailing list [email protected] https://lists.llvm.org/cgi-bin/mailman/listinfo/llvm-branch-commits
[llvm-branch-commits] [llvm] 9413df3 - [llvm][Release notes] Add AArch64 SVE, PAC and LLDB prebuilt binary
Author: Muhammad Omair Javaid Date: 2021-08-03T20:20:07+05:00 New Revision: 9413df3891f43eb25a9aeb3483a3157e4eeb803a URL: https://github.com/llvm/llvm-project/commit/9413df3891f43eb25a9aeb3483a3157e4eeb803a DIFF: https://github.com/llvm/llvm-project/commit/9413df3891f43eb25a9aeb3483a3157e4eeb803a.diff LOG: [llvm][Release notes] Add AArch64 SVE, PAC and LLDB prebuilt binary This patch updates LLVM release notes to add a announcement about AArch64 SVE, PAC and LLDB prebuilt binary. Added: Modified: llvm/docs/ReleaseNotes.rst Removed: diff --git a/llvm/docs/ReleaseNotes.rst b/llvm/docs/ReleaseNotes.rst index 73c0450485d0..9f0befb33249 100644 --- a/llvm/docs/ReleaseNotes.rst +++ b/llvm/docs/ReleaseNotes.rst @@ -243,6 +243,12 @@ Changes to the LLVM tools Changes to LLDB - +* LLDB executable is now included in pre-built LLVM binaries. + +* LLDB now includes full featured support for AArch64 SVE register access. + +* LLDB now supports AArch64 Pointer Authentication, allowing stack unwind with signed return address. + * LLDB now supports debugging programs on AArch64 Linux that use memory tagging (MTE). * Added ``memory tag read`` and ``memory tag write`` commands. * The ``memory region`` command will note when a region has memory tagging enabled. ___ llvm-branch-commits mailing list [email protected] https://lists.llvm.org/cgi-bin/mailman/listinfo/llvm-branch-commits
[llvm-branch-commits] [llvm] eff062e - [ReleaseNotes] Add scalable matrix extension support to AArch64 changes
Author: Cullen Rhodes Date: 2021-08-03T15:24:36Z New Revision: eff062e5206a5751c51f03f6a6dc35937d1c95c7 URL: https://github.com/llvm/llvm-project/commit/eff062e5206a5751c51f03f6a6dc35937d1c95c7 DIFF: https://github.com/llvm/llvm-project/commit/eff062e5206a5751c51f03f6a6dc35937d1c95c7.diff LOG: [ReleaseNotes] Add scalable matrix extension support to AArch64 changes Reviewed By: sdesmalen Differential Revision: https://reviews.llvm.org/D106853 Added: Modified: llvm/docs/ReleaseNotes.rst Removed: diff --git a/llvm/docs/ReleaseNotes.rst b/llvm/docs/ReleaseNotes.rst index 9f0befb33249..de442580a149 100644 --- a/llvm/docs/ReleaseNotes.rst +++ b/llvm/docs/ReleaseNotes.rst @@ -77,7 +77,8 @@ Changes to TableGen Changes to the AArch64 Backend -- -* Introduced support for Armv9-A's Realm Management Extension. +* Introduced assembly support for Armv9-A's Realm Management Extension (RME) + and Scalable Matrix Extension (SME). Changes to the ARM Backend -- ___ llvm-branch-commits mailing list [email protected] https://lists.llvm.org/cgi-bin/mailman/listinfo/llvm-branch-commits
[llvm-branch-commits] [clang] 7cbe047 - COFF/ELF: Place llvm.global_ctors elements in llvm.used if comdat is used
Author: Fangrui Song
Date: 2021-08-03T17:17:28-07:00
New Revision: 7cbe047bafe8951702011d030b055dfa2ec62ea2
URL:
https://github.com/llvm/llvm-project/commit/7cbe047bafe8951702011d030b055dfa2ec62ea2
DIFF:
https://github.com/llvm/llvm-project/commit/7cbe047bafe8951702011d030b055dfa2ec62ea2.diff
LOG: COFF/ELF: Place llvm.global_ctors elements in llvm.used if comdat is used
On ELF, an SHT_INIT_ARRAY outside a section group is a GC root. The current
codegen abuses SHT_INIT_ARRAY in a section group to mean a GC root.
On PE/COFF, the dynamic initialization for `__declspec(selectany)` in a comdat
can be garbage collected by `-opt:ref`.
Call `addUsedGlobal` for the two cases to fix the abuse/bug.
Reviewed By: rnk
Differential Revision: https://reviews.llvm.org/D106925
(cherry picked from commit 828767f325b5dd0356c5fd90e40a1c047010853e)
Added:
Modified:
clang/lib/CodeGen/CGDeclCXX.cpp
clang/test/CodeGenCXX/microsoft-abi-template-static-init.cpp
clang/test/CodeGenCXX/static-member-variable-explicit-specialization.cpp
Removed:
diff --git a/clang/lib/CodeGen/CGDeclCXX.cpp b/clang/lib/CodeGen/CGDeclCXX.cpp
index d43fb99550a85..553fedebfe56b 100644
--- a/clang/lib/CodeGen/CGDeclCXX.cpp
+++ b/clang/lib/CodeGen/CGDeclCXX.cpp
@@ -555,7 +555,8 @@ CodeGenModule::EmitCXXGlobalVarDeclInitFunc(const VarDecl
*D,
PrioritizedCXXGlobalInits.size());
PrioritizedCXXGlobalInits.push_back(std::make_pair(Key, Fn));
} else if (isTemplateInstantiation(D->getTemplateSpecializationKind()) ||
- getContext().GetGVALinkageForVariable(D) == GVA_DiscardableODR) {
+ getContext().GetGVALinkageForVariable(D) == GVA_DiscardableODR ||
+ D->hasAttr()) {
// C++ [basic.start.init]p2:
// Definitions of explicitly specialized class template static data
// members have ordered initialization. Other class template static data
@@ -568,17 +569,18 @@ CodeGenModule::EmitCXXGlobalVarDeclInitFunc(const VarDecl
*D,
// group with the global being initialized. On most platforms, this is a
// minor startup time optimization. In the MS C++ ABI, there are no guard
// variables, so this COMDAT key is required for correctness.
-AddGlobalCtor(Fn, 65535, COMDATKey);
-if (getTarget().getCXXABI().isMicrosoft() && COMDATKey) {
- // In The MS C++, MS add template static data member in the linker
- // drective.
- addUsedGlobal(COMDATKey);
-}
- } else if (D->hasAttr()) {
+//
// SelectAny globals will be comdat-folded. Put the initializer into a
// COMDAT group associated with the global, so the initializers get folded
// too.
+
AddGlobalCtor(Fn, 65535, COMDATKey);
+if (COMDATKey && (getTriple().isOSBinFormatELF() ||
+ getTarget().getCXXABI().isMicrosoft())) {
+ // When COMDAT is used on ELF or in the MS C++ ABI, the key must be in
+ // llvm.used to prevent linker GC.
+ addUsedGlobal(COMDATKey);
+}
} else {
I = DelayedCXXInitPosition.find(D); // Re-do lookup in case of re-hash.
if (I == DelayedCXXInitPosition.end()) {
diff --git a/clang/test/CodeGenCXX/microsoft-abi-template-static-init.cpp
b/clang/test/CodeGenCXX/microsoft-abi-template-static-init.cpp
index 3b419c18c0e23..d8d0ed3950803 100644
--- a/clang/test/CodeGenCXX/microsoft-abi-template-static-init.cpp
+++ b/clang/test/CodeGenCXX/microsoft-abi-template-static-init.cpp
@@ -88,5 +88,4 @@ int foo();
inline int zoo = foo();
inline static int boo = foo();
-
-// CHECK: @llvm.used = appending global [7 x i8*] [i8* bitcast (i32*
@"?x1@selectany_init@@3HA" to i8*), i8* bitcast (i32*
@"?x@?$A@H@explicit_template_instantiation@@2HA" to i8*), i8* bitcast (i32*
@"?ioo@?$X_@H@@2HA" to i8*), i8* getelementptr inbounds (%struct.A, %struct.A*
@"?aoo@S1@@2UA@@A", i32 0, i32 0), i8* bitcast (i32* @"?zoo@@3HA" to i8*), i8*
getelementptr inbounds (%struct.S, %struct.S*
@"?s@?$ExportedTemplate@H@@2US@@A", i32 0, i32 0), i8* bitcast (i32*
@"?x@?$A@H@implicit_template_instantiation@@2HA" to i8*)], section
"llvm.metadata"
+// CHECK: @llvm.used = appending global [8 x i8*] [i8* bitcast (i32*
@"?x@selectany_init@@3HA" to i8*), i8* bitcast (i32* @"?x1@selectany_init@@3HA"
to i8*), i8* bitcast (i32* @"?x@?$A@H@explicit_template_instantiation@@2HA" to
i8*), i8* bitcast (i32* @"?ioo@?$X_@H@@2HA" to i8*), i8* getelementptr inbounds
(%struct.A, %struct.A* @"?aoo@S1@@2UA@@A", i32 0, i32 0), i8* bitcast (i32*
@"?zoo@@3HA" to i8*), i8* getelementptr inbounds (%struct.S, %struct.S*
@"?s@?$ExportedTemplate@H@@2US@@A", i32 0, i32 0), i8* bitcast (i32*
@"?x@?$A@H@implicit_template_instantiation@@2HA" to i8*)], section
"llvm.metadata"
diff --git
a/clang/test/CodeGenCXX/static-member-variable-explicit-specialization.cpp
b/clang/test/CodeGenCXX/static-member-variable-explicit-specializatio
[llvm-branch-commits] [mlir] 143edec - [mlir][tosa] Shape inference for a few remaining easy cases:
Author: Rob Suderman
Date: 2021-08-03T17:20:32-07:00
New Revision: 143edeca6dfe5d9c006cdadb5be1ec4afd483ca3
URL:
https://github.com/llvm/llvm-project/commit/143edeca6dfe5d9c006cdadb5be1ec4afd483ca3
DIFF:
https://github.com/llvm/llvm-project/commit/143edeca6dfe5d9c006cdadb5be1ec4afd483ca3.diff
LOG: [mlir][tosa] Shape inference for a few remaining easy cases:
Handles shape inference for identity, cast, and rescale. These were missed
during the initialy elementwise work. This includes resize shape propagation
which includes both attribute and input type based propagation.
Reviewed By: jpienaar
Differential Revision: https://reviews.llvm.org/D105845
Added:
Modified:
mlir/include/mlir/Dialect/Tosa/IR/TosaOps.td
mlir/lib/Dialect/Tosa/IR/TosaOps.cpp
mlir/test/Dialect/Tosa/tosa-infer-shapes.mlir
Removed:
diff --git a/mlir/include/mlir/Dialect/Tosa/IR/TosaOps.td
b/mlir/include/mlir/Dialect/Tosa/IR/TosaOps.td
index f17e13c66a449..7a29350467814 100644
--- a/mlir/include/mlir/Dialect/Tosa/IR/TosaOps.td
+++ b/mlir/include/mlir/Dialect/Tosa/IR/TosaOps.td
@@ -1582,7 +1582,10 @@ def Tosa_ScatterOp : Tosa_Op<"scatter", [
//===--===//
// Operator: resize
//===--===//
-def Tosa_ResizeOp : Tosa_Op<"resize", [NoSideEffect]> {
+def Tosa_ResizeOp : Tosa_Op<"resize", [
+ DeclareOpInterfaceMethods,
+ NoSideEffect]> {
let summary = "Resize operation, supports various resize/upsample modes";
@@ -1617,7 +1620,9 @@ def Tosa_ResizeOp : Tosa_Op<"resize", [NoSideEffect]> {
//===--===//
// Operator: cast
//===--===//
-def Tosa_CastOp: Tosa_Op<"cast", [NoSideEffect]> {
+def Tosa_CastOp: Tosa_Op<"cast", [NoSideEffect,
+ DeclareOpInterfaceMethods]> {
let summary = "Cast operation";
@@ -1655,7 +1660,9 @@ def Tosa_CastOp: Tosa_Op<"cast", [NoSideEffect]> {
//===--===//
// Operator: rescale
//===--===//
-def Tosa_RescaleOp: Tosa_Op<"rescale", [NoSideEffect]> {
+def Tosa_RescaleOp: Tosa_Op<"rescale", [NoSideEffect,
+ DeclareOpInterfaceMethods]> {
let summary = "Tosa rescale operator";
let description = [{
@@ -1723,7 +1730,9 @@ def Tosa_ConstOp : Tosa_Op<"const", [ConstantLike,
NoSideEffect,
//===--===//
// Operator: identity
//===--===//
-def Tosa_IdentityOp: Tosa_Op<"identity", [NoSideEffect]> {
+def Tosa_IdentityOp: Tosa_Op<"identity", [NoSideEffect,
+ DeclareOpInterfaceMethods]> {
let summary = "Identity operator";
let description = [{
Returns a tensor with the same shape, size, type
diff --git a/mlir/lib/Dialect/Tosa/IR/TosaOps.cpp
b/mlir/lib/Dialect/Tosa/IR/TosaOps.cpp
index cfc9220c97501..9ae2e95d146ae 100644
--- a/mlir/lib/Dialect/Tosa/IR/TosaOps.cpp
+++ b/mlir/lib/Dialect/Tosa/IR/TosaOps.cpp
@@ -345,6 +345,12 @@ static void getI64Values(ArrayAttr arrayAttr,
SmallVector &values) {
}
}
+static void getF64Values(ArrayAttr arrayAttr, SmallVector &values) {
+ for (auto it : arrayAttr) {
+values.push_back(it.cast().getValueAsDouble());
+ }
+}
+
LogicalResult tosa::ArgMaxOp::inferReturnTypeComponents(
MLIRContext *context, ::llvm::Optional location,
ValueShapeRange operands, DictionaryAttr attributes, RegionRange regions,
@@ -386,13 +392,13 @@ LogicalResult tosa::ConcatOp::inferReturnTypeComponents(
// Copy the Operand's rank.
if (!hasRankedInput)
- outputShape.resize(operandTy.getRank(), -1);
+ outputShape.resize(operandTy.getRank(), ShapedType::kDynamicSize);
// Copy shapes until the dim is non-dynamic.
for (int i = 0, s = operandTy.getRank(); i < s; i++) {
if (i == axis || operandTy.isDynamicDim(i))
continue;
- if (outputShape[i] == -1)
+ if (outputShape[i] == ShapedType::kDynamicSize)
outputShape[i] = operandTy.getDimSize(i);
if (outputShape[i] != operandTy.getDimSize(i))
return failure();
@@ -414,7 +420,7 @@ LogicalResult tosa::ConcatOp::inferReturnTypeComponents(
// We need to know the length of the concatenation axis of all inputs to
// determine the dimension size of the output shape.
if (!operandTy.hasRank() || operandTy.isDynamicDim(axis)) {
- concatDimSize = -1;
+ concatDimSize = ShapedType::kDynamicSize;
break;
}
@@ -437,7 +443,7 @@ LogicalResult
tosa::FullyConnectedOp::inferReturnTypeComponents(
// All shapes are dynamic.
SmallVector outShape;
-
