================
@@ -1089,6 +1089,116 @@ int64_t ValueObject::GetValueAsSigned(int64_t 
fail_value, bool *success) {
   return fail_value;
 }
 
+llvm::APSInt ValueObject::GetValueAsAPSInt() {
+  lldb::TargetSP target = GetTargetSP();
+  uint64_t byte_size = 0;
+  if (auto temp = GetCompilerType().GetByteSize(target.get()))
+    byte_size = temp.value();
+
+  unsigned bit_width = static_cast<unsigned>(byte_size * CHAR_BIT);
+  bool success = true;
+  uint64_t fail_value = 0;
+  uint64_t ret_val = GetValueAsUnsigned(fail_value, &success);
+  uint64_t new_value = fail_value;
+  if (success)
+    new_value = ret_val;
+  bool is_signed = GetCompilerType().IsSigned();
+
+  return llvm::APSInt(llvm::APInt(bit_width, new_value, is_signed), 
!is_signed);
+}
+
+llvm::APFloat ValueObject::GetValueAsFloat() {
+  lldb::BasicType basic_type =
+      GetCompilerType().GetCanonicalType().GetBasicTypeEnumeration();
+  lldb::DataExtractorSP data_sp(new DataExtractor());
+  Status error;
+
+  switch (basic_type) {
+  case lldb::eBasicTypeFloat: {
+    float v = 0;
----------------
clayborg wrote:

We really don't want to use native float types, we want to use real APFloat 
values that are correctly encoded. For instance if we are debugging from 
x86_64, we don't want to create a 10 byte Intel float and do the math with the 
native float, we want to use the value from the Scalar that is already set 
correctly. If we are cross debugging to an iPhone, we want the floating point 
values to match exactly, which requires us to use the APFloat and not do any 
manually create a native float and convert it. This all works for the value 
objects when they get the float as a string, and will be similar where we 
resolve the scalar and then can just use the Scalar::m_float directly.

https://github.com/llvm/llvm-project/pull/87197
_______________________________________________
lldb-commits mailing list
lldb-commits@lists.llvm.org
https://lists.llvm.org/cgi-bin/mailman/listinfo/lldb-commits

Reply via email to