[NFC][Py Reformat] Reformat python files in lldb

This is an ongoing series of commits that are reformatting our Python
code. Reformatting is done with `black` (23.1.0).

If you end up having problems merging this commit because you have made
changes to a python file, the best way to handle that is to run `git
checkout --ours <yourfile>` and then reformat it with black.

RFC: https://discourse.llvm.org/t/rfc-document-and-standardize-python-code-style

Differential revision: https://reviews.llvm.org/D151460
This commit is contained in:
Jonas Devlieghere
2023-05-25 08:48:57 -07:00
parent daeee56798
commit 2238dcc393
1282 changed files with 53068 additions and 39383 deletions

View File

@@ -5,5 +5,5 @@ import types
# This package acts as a mock implementation of the native _lldb module so
# that generating the LLDB documentation doesn't actually require building all
# of LLDB.
module_name = '_lldb'
module_name = "_lldb"
sys.modules[module_name] = Mock()

View File

@@ -15,7 +15,7 @@ from __future__ import print_function
import sys, os, re, shutil
from datetime import date
building_man_page = tags.has('builder-man')
building_man_page = tags.has("builder-man")
# For the website we need to setup the path to the generated LLDB module that
# we can generate documentation for its API.
@@ -33,43 +33,43 @@ if not building_man_page:
# Put the generated Python API documentation in the 'python_api' folder. This
# also defines the URL these files will have in the generated website.
automodapi_toctreedirnm = 'python_api'
automodapi_toctreedirnm = "python_api"
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.todo', 'sphinx.ext.mathjax', 'sphinx.ext.intersphinx']
extensions = ["sphinx.ext.todo", "sphinx.ext.mathjax", "sphinx.ext.intersphinx"]
autodoc_default_options = {
'special-members': '__int__, __len__, __hex__, __oct__, __iter__',
"special-members": "__int__, __len__, __hex__, __oct__, __iter__",
}
# Unless we only generate the basic manpage we need the plugin for generating
# the Python API documentation.
if not building_man_page:
extensions.append('sphinx_automodapi.automodapi')
extensions.append("sphinx_automodapi.automodapi")
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
templates_path = ["_templates"]
# The suffix of source filenames.
source_suffix = {
'.rst': 'restructuredtext',
".rst": "restructuredtext",
}
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
master_doc = "index"
# General information about the project.
project = u'LLDB'
copyright = u'2007-%d, The LLDB Team' % date.today().year
project = "LLDB"
copyright = "2007-%d, The LLDB Team" % date.today().year
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
@@ -78,23 +78,23 @@ copyright = u'2007-%d, The LLDB Team' % date.today().year
# everytime a new release comes out.
#
# The short version.
#version = '0'
# version = '0'
# The full version, including alpha/beta/rc tags.
#release = '0'
# release = '0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build', 'analyzer']
exclude_patterns = ["_build", "analyzer"]
# Ignore the generated Python documentation that is only used on the website.
# Without this we will get a lot of warnings about doc pages that aren't
# included by any doctree (as the manpage ignores those pages but they are
@@ -103,116 +103,114 @@ if building_man_page:
exclude_patterns.append(automodapi_toctreedirnm)
# Use the recommended 'any' rule so that referencing SB API classes is possible
# by just writing `SBData`.
default_role = 'any'
default_role = "any"
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'friendly'
pygments_style = "friendly"
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'alabaster'
html_theme = "alabaster"
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
html_theme_options = {
'font_size': '11pt',
"font_size": "11pt",
# Don't generate any links to GitHub.
'github_button' : 'false',
"github_button": "false",
}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
html_title = 'The LLDB Debugger'
html_title = "The LLDB Debugger"
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
html_static_path = ["_static"]
html_context = {
'css_files': [
'_static/lldb.css'
],
}
"css_files": ["_static/lldb.css"],
}
html_extra_path = ['.htaccess']
html_extra_path = [".htaccess"]
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
html_last_updated_fmt = '%b %d, %Y'
html_last_updated_fmt = "%b %d, %Y"
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'LLDBdoc'
htmlhelp_basename = "LLDBdoc"
# If true, the reST sources are included in the HTML build as
# _sources/name. The default is True.
@@ -221,54 +219,52 @@ html_copy_source = False
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'LLDB.tex', u'LLDB Documentation',
u'The LLDB Team', 'manual'),
("index", "LLDB.tex", "LLDB Documentation", "The LLDB Team", "manual"),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [('man/lldb', 'lldb', u'LLDB Documentation', [u'LLVM project'], 1),
('man/lldb-server', 'lldb-server', u'LLDB Documentation', [u'LLVM project'], 1),
]
man_pages = [
("man/lldb", "lldb", "LLDB Documentation", ["LLVM project"], 1),
("man/lldb-server", "lldb-server", "LLDB Documentation", ["LLVM project"], 1),
]
# If true, show URL addresses after external links.
#man_show_urls = False
# man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
@@ -276,28 +272,39 @@ man_pages = [('man/lldb', 'lldb', u'LLDB Documentation', [u'LLVM project'], 1),
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'LLDB', u'LLDB Documentation',
u'The LLDB Team', 'LLDB', 'One line description of project.',
'Miscellaneous'),
(
"index",
"LLDB",
"LLDB Documentation",
"The LLDB Team",
"LLDB",
"One line description of project.",
"Miscellaneous",
),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# texinfo_show_urls = 'footnote'
empty_attr_summary = re.compile(
r"\.\. rubric:: Attributes Summary\s*\.\. autosummary::\s*\.\. rubric::"
)
empty_attr_documentation = re.compile(
r"\.\. rubric:: Attributes Documentation\s*\.\. rubric::"
)
empty_attr_summary = re.compile(r'\.\. rubric:: Attributes Summary\s*\.\. autosummary::\s*\.\. rubric::')
empty_attr_documentation = re.compile(r'\.\. rubric:: Attributes Documentation\s*\.\. rubric::')
def preprocess_source(app, docname, source):
""" Preprocesses source files generated by automodapi. """
"""Preprocesses source files generated by automodapi."""
# Don't cleanup anything beside automodapi-generated sources.
if not automodapi_toctreedirnm in docname:
return
return
processed = source[0]
# Don't show the list of inheritance info as there is no inheritance in the
@@ -307,25 +314,27 @@ def preprocess_source(app, docname, source):
processed = processed.replace(":show-inheritance:", "")
# Remove the SWIG generated 'thisown' attribute. It just bloats the generated
# documentation and users shouldn't fiddle with the value anyway.
processed = re.sub(r'~SB[a-zA-Z]+\.thisown', "", processed)
processed = re.sub(r"~SB[a-zA-Z]+\.thisown", "", processed)
processed = processed.replace(" .. autoattribute:: thisown", "")
# After removing 'thisown', many objects don't have any attributes left.
# Remove all now empty attribute summary/documentation sections with
# some rather ugly regex.
processed = empty_attr_summary.sub('.. rubric::', processed)
processed = empty_attr_documentation.sub('.. rubric::', processed)
processed = empty_attr_summary.sub(".. rubric::", processed)
processed = empty_attr_documentation.sub(".. rubric::", processed)
# Replace the original source with the processed one (source is a single
# element list).
source[0] = processed
def cleanup_source(app, exception):
""" Remove files generated by automodapi in the source tree. """
if hasattr(app.config, 'automodapi_toctreedirnm'):
api_source_dir = os.path.join(app.srcdir, app.config.automodapi_toctreedirnm)
shutil.rmtree(api_source_dir, ignore_errors=True)
"""Remove files generated by automodapi in the source tree."""
if hasattr(app.config, "automodapi_toctreedirnm"):
api_source_dir = os.path.join(app.srcdir, app.config.automodapi_toctreedirnm)
shutil.rmtree(api_source_dir, ignore_errors=True)
def setup(app):
app.connect('source-read', preprocess_source)
app.connect('build-finished', cleanup_source)
app.connect("source-read", preprocess_source)
app.connect("build-finished", cleanup_source)

View File

@@ -1,4 +1,5 @@
"Collection of tools for displaying bit representation of numbers."""
"Collection of tools for displaying bit representation of numbers." ""
def binary(n, width=None):
"""
@@ -26,7 +27,7 @@ def twos_complement(n, width):
Return a list of (0|1)'s for the binary representation of a width-bit two's
complement numeral system of an integer n which may be negative.
"""
val = 2**(width - 1)
val = 2 ** (width - 1)
if n >= 0:
if n > (val - 1):
return None
@@ -39,6 +40,7 @@ def twos_complement(n, width):
# It is safe to represent n (a negative int) with width-bits.
return binary(val * 2 - abs(n))
# print binary(0xABCD)
# [1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 0, 0, 1, 1, 0, 1]
# print binary(0x1F, 8)
@@ -57,7 +59,7 @@ def positions(width):
"""Helper function returning a list describing the bit positions.
Bit positions greater than 99 are truncated to 2 digits, for example,
100 -> 00 and 127 -> 27."""
return ['{0:2}'.format(i)[-2:] for i in reversed(range(width))]
return ["{0:2}".format(i)[-2:] for i in reversed(range(width))]
def utob(debugger, command_line, result, dict):
@@ -88,8 +90,8 @@ def utob(debugger, command_line, result, dict):
return
if verbose and width > 0:
pos = positions(width)
print(' ' + ' '.join(pos))
print(' %s' % str(bits))
print(" " + " ".join(pos))
print(" %s" % str(bits))
def itob(debugger, command_line, result, dict):
@@ -118,5 +120,5 @@ def itob(debugger, command_line, result, dict):
return
if verbose and width > 0:
pos = positions(width)
print(' ' + ' '.join(pos))
print(' %s' % str(bits))
print(" " + " ".join(pos))
print(" %s" % str(bits))

View File

@@ -14,9 +14,9 @@ def ensure_has_dir_in_path(dirname):
def do_import(debugger, modname):
if (len(modname) > 4 and modname[-4:] == '.pyc'):
if len(modname) > 4 and modname[-4:] == ".pyc":
modname = modname[:-4]
if (len(modname) > 3 and modname[-3:] == '.py'):
if len(modname) > 3 and modname[-3:] == ".py":
modname = modname[:-3]
debugger.HandleCommand("script import " + modname)
@@ -28,10 +28,10 @@ def pyimport_cmd(debugger, args, result, dict):
return "no module path given"
if not (os.sep in args):
modname = args
ensure_has_dir_in_path('.')
ensure_has_dir_in_path(".")
else:
endofdir = args.rfind(os.sep)
modname = args[endofdir + 1:]
modname = args[endofdir + 1 :]
args = args[0:endofdir]
ensure_has_dir_in_path(args)
do_import(debugger, modname)

View File

@@ -9,6 +9,7 @@ import subprocess
class Holder:
"""Holds the _prev_dir_ class attribute for chdir() function."""
_prev_dir_ = None
@classmethod
@@ -25,8 +26,8 @@ def chdir(debugger, args, result, dict):
You can also issue 'cd -' to change to the previous working directory."""
new_dir = args.strip()
if not new_dir:
new_dir = os.path.expanduser('~')
elif new_dir == '-':
new_dir = os.path.expanduser("~")
elif new_dir == "-":
if not Holder.prev_dir():
# Bad directory, not changing.
print("bad directory, not changing")
@@ -42,10 +43,7 @@ def chdir(debugger, args, result, dict):
def system(debugger, command_line, result, dict):
"""Execute the command (a string) in a subshell."""
args = shlex.split(command_line)
process = subprocess.Popen(
args,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
process = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
output, error = process.communicate()
retcode = process.poll()
if output and error:

File diff suppressed because it is too large Load Diff

View File

@@ -1,5 +1,5 @@
#!/usr/bin/env python
#===-- armv7_cortex_m_target_definition.py.py ------------------*- C++ -*-===//
# ===-- armv7_cortex_m_target_definition.py.py ------------------*- C++ -*-===//
#
# The LLVM Compiler Infrastructure
#
@@ -7,134 +7,252 @@
# See https://llvm.org/LICENSE.txt for license information.
# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
#
#===----------------------------------------------------------------------===//
# ===----------------------------------------------------------------------===//
#----------------------------------------------------------------------
# ----------------------------------------------------------------------
# DESCRIPTION
#
# This file can be used with the following setting:
# plugin.process.gdb-remote.target-definition-file
# This setting should be used when you are trying to connect to a
# This setting should be used when you are trying to connect to a
# remote GDB server that doesn't support any of the register discovery
# packets that LLDB normally uses.
# packets that LLDB normally uses.
#
# Why is this necessary? LLDB doesn't require a new build of LLDB that
# targets each new architecture you will debug with. Instead, all
# architectures are supported and LLDB relies on extra GDB server
# architectures are supported and LLDB relies on extra GDB server
# packets to discover the target we are connecting to so that is can
# show the right registers for each target. This allows the GDB server
# to change and add new registers without requiring a new LLDB build
# just so we can see new registers.
#
# This file implements the x86_64 registers for the darwin version of
# GDB and allows you to connect to servers that use this register set.
#
# GDB and allows you to connect to servers that use this register set.
#
# USAGE
#
# (lldb) settings set plugin.process.gdb-remote.target-definition-file /path/to/armv7_cortex_m_target_definition.py
# (lldb) gdb-remote other.baz.com:1234
#
# The target definition file will get used if and only if the
# The target definition file will get used if and only if the
# qRegisterInfo packets are not supported when connecting to a remote
# GDB server.
#----------------------------------------------------------------------
# ----------------------------------------------------------------------
from lldb import *
# DWARF register numbers
name_to_dwarf_regnum = {
'r0' : 0 ,
'r1' : 1 ,
'r2' : 2 ,
'r3' : 3 ,
'r4' : 4 ,
'r5' : 5 ,
'r6' : 6 ,
'r7' : 7 ,
'r9' : 8 ,
'r10' : 9 ,
'r11' : 10,
'r12' : 11,
'sp' : 12,
'lr' : 13,
'pc' : 14,
'r15' : 15,
'xpsr' : 16,
};
"r0": 0,
"r1": 1,
"r2": 2,
"r3": 3,
"r4": 4,
"r5": 5,
"r6": 6,
"r7": 7,
"r9": 8,
"r10": 9,
"r11": 10,
"r12": 11,
"sp": 12,
"lr": 13,
"pc": 14,
"r15": 15,
"xpsr": 16,
}
name_to_generic_regnum = {
'pc' : LLDB_REGNUM_GENERIC_PC,
'sp' : LLDB_REGNUM_GENERIC_SP,
'r7' : LLDB_REGNUM_GENERIC_FP,
'lr' : LLDB_REGNUM_GENERIC_RA,
'r0' : LLDB_REGNUM_GENERIC_ARG1,
'r1' : LLDB_REGNUM_GENERIC_ARG2,
'r2' : LLDB_REGNUM_GENERIC_ARG3,
'r3' : LLDB_REGNUM_GENERIC_ARG4
};
"pc": LLDB_REGNUM_GENERIC_PC,
"sp": LLDB_REGNUM_GENERIC_SP,
"r7": LLDB_REGNUM_GENERIC_FP,
"lr": LLDB_REGNUM_GENERIC_RA,
"r0": LLDB_REGNUM_GENERIC_ARG1,
"r1": LLDB_REGNUM_GENERIC_ARG2,
"r2": LLDB_REGNUM_GENERIC_ARG3,
"r3": LLDB_REGNUM_GENERIC_ARG4,
}
def get_reg_num (reg_num_dict, reg_name):
def get_reg_num(reg_num_dict, reg_name):
if reg_name in reg_num_dict:
return reg_num_dict[reg_name]
return LLDB_INVALID_REGNUM
def get_reg_num (reg_num_dict, reg_name):
def get_reg_num(reg_num_dict, reg_name):
if reg_name in reg_num_dict:
return reg_num_dict[reg_name]
return LLDB_INVALID_REGNUM
armv7_register_infos = [
{ 'name':'r0' , 'set':0, 'bitsize':32 , 'encoding':eEncodingUint , 'format':eFormatAddressInfo, 'alt-name':'arg1' },
{ 'name':'r1' , 'set':0, 'bitsize':32 , 'encoding':eEncodingUint , 'format':eFormatAddressInfo, 'alt-name':'arg2' },
{ 'name':'r2' , 'set':0, 'bitsize':32 , 'encoding':eEncodingUint , 'format':eFormatAddressInfo, 'alt-name':'arg3' },
{ 'name':'r3' , 'set':0, 'bitsize':32 , 'encoding':eEncodingUint , 'format':eFormatAddressInfo, 'alt-name':'arg4' },
{ 'name':'r4' , 'set':0, 'bitsize':32 , 'encoding':eEncodingUint , 'format':eFormatAddressInfo },
{ 'name':'r5' , 'set':0, 'bitsize':32 , 'encoding':eEncodingUint , 'format':eFormatAddressInfo },
{ 'name':'r6' , 'set':0, 'bitsize':32 , 'encoding':eEncodingUint , 'format':eFormatAddressInfo },
{ 'name':'r7' , 'set':0, 'bitsize':32 , 'encoding':eEncodingUint , 'format':eFormatAddressInfo, 'alt-name':'fp' },
{ 'name':'r8' , 'set':0, 'bitsize':32 , 'encoding':eEncodingUint , 'format':eFormatAddressInfo },
{ 'name':'r9' , 'set':0, 'bitsize':32 , 'encoding':eEncodingUint , 'format':eFormatAddressInfo },
{ 'name':'r10' , 'set':0, 'bitsize':32 , 'encoding':eEncodingUint , 'format':eFormatAddressInfo },
{ 'name':'r11' , 'set':0, 'bitsize':32 , 'encoding':eEncodingUint , 'format':eFormatAddressInfo },
{ 'name':'r12' , 'set':0, 'bitsize':32 , 'encoding':eEncodingUint , 'format':eFormatAddressInfo },
{ 'name':'sp' , 'set':0, 'bitsize':32 , 'encoding':eEncodingUint , 'format':eFormatAddressInfo, 'alt-name':'r13' },
{ 'name':'lr' , 'set':0, 'bitsize':32 , 'encoding':eEncodingUint , 'format':eFormatAddressInfo, 'alt-name':'r14' },
{ 'name':'pc' , 'set':0, 'bitsize':32 , 'encoding':eEncodingUint , 'format':eFormatAddressInfo, 'alt-name':'r15' },
{ 'name':'xpsr' , 'set':0, 'bitsize':32 , 'encoding':eEncodingUint , 'format':eFormatAddressInfo, 'alt-name':'cpsr' },
];
{
"name": "r0",
"set": 0,
"bitsize": 32,
"encoding": eEncodingUint,
"format": eFormatAddressInfo,
"alt-name": "arg1",
},
{
"name": "r1",
"set": 0,
"bitsize": 32,
"encoding": eEncodingUint,
"format": eFormatAddressInfo,
"alt-name": "arg2",
},
{
"name": "r2",
"set": 0,
"bitsize": 32,
"encoding": eEncodingUint,
"format": eFormatAddressInfo,
"alt-name": "arg3",
},
{
"name": "r3",
"set": 0,
"bitsize": 32,
"encoding": eEncodingUint,
"format": eFormatAddressInfo,
"alt-name": "arg4",
},
{
"name": "r4",
"set": 0,
"bitsize": 32,
"encoding": eEncodingUint,
"format": eFormatAddressInfo,
},
{
"name": "r5",
"set": 0,
"bitsize": 32,
"encoding": eEncodingUint,
"format": eFormatAddressInfo,
},
{
"name": "r6",
"set": 0,
"bitsize": 32,
"encoding": eEncodingUint,
"format": eFormatAddressInfo,
},
{
"name": "r7",
"set": 0,
"bitsize": 32,
"encoding": eEncodingUint,
"format": eFormatAddressInfo,
"alt-name": "fp",
},
{
"name": "r8",
"set": 0,
"bitsize": 32,
"encoding": eEncodingUint,
"format": eFormatAddressInfo,
},
{
"name": "r9",
"set": 0,
"bitsize": 32,
"encoding": eEncodingUint,
"format": eFormatAddressInfo,
},
{
"name": "r10",
"set": 0,
"bitsize": 32,
"encoding": eEncodingUint,
"format": eFormatAddressInfo,
},
{
"name": "r11",
"set": 0,
"bitsize": 32,
"encoding": eEncodingUint,
"format": eFormatAddressInfo,
},
{
"name": "r12",
"set": 0,
"bitsize": 32,
"encoding": eEncodingUint,
"format": eFormatAddressInfo,
},
{
"name": "sp",
"set": 0,
"bitsize": 32,
"encoding": eEncodingUint,
"format": eFormatAddressInfo,
"alt-name": "r13",
},
{
"name": "lr",
"set": 0,
"bitsize": 32,
"encoding": eEncodingUint,
"format": eFormatAddressInfo,
"alt-name": "r14",
},
{
"name": "pc",
"set": 0,
"bitsize": 32,
"encoding": eEncodingUint,
"format": eFormatAddressInfo,
"alt-name": "r15",
},
{
"name": "xpsr",
"set": 0,
"bitsize": 32,
"encoding": eEncodingUint,
"format": eFormatAddressInfo,
"alt-name": "cpsr",
},
]
g_target_definition = None
def get_target_definition ():
def get_target_definition():
global g_target_definition
if g_target_definition == None:
g_target_definition = {}
offset = 0
for reg_info in armv7_register_infos:
reg_name = reg_info['name']
reg_name = reg_info["name"]
if 'slice' not in reg_info and 'composite' not in reg_info:
reg_info['offset'] = offset
offset += reg_info['bitsize'] / 8
if "slice" not in reg_info and "composite" not in reg_info:
reg_info["offset"] = offset
offset += reg_info["bitsize"] / 8
# Set the DWARF/eh_frame register number for this register if it has one
# Set the DWARF/eh_frame register number for this register if it has one
reg_num = get_reg_num(name_to_dwarf_regnum, reg_name)
if reg_num != LLDB_INVALID_REGNUM:
reg_info['gcc'] = reg_num
reg_info['ehframe'] = reg_num
reg_info["gcc"] = reg_num
reg_info["ehframe"] = reg_num
# Set the generic register number for this register if it has one
# Set the generic register number for this register if it has one
reg_num = get_reg_num(name_to_generic_regnum, reg_name)
if reg_num != LLDB_INVALID_REGNUM:
reg_info['generic'] = reg_num
reg_info["generic"] = reg_num
g_target_definition['sets'] = ['General Purpose Registers']
g_target_definition['registers'] = armv7_register_infos
g_target_definition['host-info'] = { 'triple' : 'armv7em--', 'endian': eByteOrderLittle }
g_target_definition['g-packet-size'] = offset
g_target_definition["sets"] = ["General Purpose Registers"]
g_target_definition["registers"] = armv7_register_infos
g_target_definition["host-info"] = {
"triple": "armv7em--",
"endian": eByteOrderLittle,
}
g_target_definition["g-packet-size"] = offset
return g_target_definition
def get_dynamic_setting(target, setting_name):
if setting_name == 'gdb-server-target-definition':
if setting_name == "gdb-server-target-definition":
return get_target_definition()

View File

@@ -14,20 +14,18 @@ AR_EFMT1 = "#1/"
def memdump(src, bytes_per_line=16, address=0):
FILTER = ''.join([(len(repr(chr(x))) == 3) and chr(x) or '.'
for x in range(256)])
FILTER = "".join([(len(repr(chr(x))) == 3) and chr(x) or "." for x in range(256)])
for i in range(0, len(src), bytes_per_line):
s = src[i:i+bytes_per_line]
hex_bytes = ' '.join(["%02x" % (ord(x)) for x in s])
s = src[i : i + bytes_per_line]
hex_bytes = " ".join(["%02x" % (ord(x)) for x in s])
ascii = s.translate(FILTER)
print("%#08.8x: %-*s %s" % (address+i, bytes_per_line*3, hex_bytes,
ascii))
print("%#08.8x: %-*s %s" % (address + i, bytes_per_line * 3, hex_bytes, ascii))
class Object(object):
def __init__(self, file):
def read_str(file, str_len):
return file.read(str_len).rstrip('\0 ')
return file.read(str_len).rstrip("\0 ")
def read_int(file, str_len, base):
return int(read_str(file, str_len), base)
@@ -41,12 +39,11 @@ class Object(object):
self.mode = read_int(file, 8, 8)
self.size = read_int(file, 10, 10)
if file.read(2) != ARFMAG:
raise ValueError('invalid BSD object at offset %#08.8x' % (
self.offset))
raise ValueError("invalid BSD object at offset %#08.8x" % (self.offset))
# If we have an extended name read it. Extended names start with
name_len = 0
if self.name.startswith(AR_EFMT1):
name_len = int(self.name[len(AR_EFMT1):], 10)
name_len = int(self.name[len(AR_EFMT1) :], 10)
self.name = read_str(file, name_len)
self.obj_offset = file.tell()
self.obj_size = self.size - name_len
@@ -54,20 +51,29 @@ class Object(object):
def dump(self, f=sys.stdout, flat=True):
if flat:
f.write('%#08.8x: %#08.8x %5u %5u %6o %#08.8x %s\n' % (self.offset,
self.date, self.uid, self.gid, self.mode, self.size,
self.name))
f.write(
"%#08.8x: %#08.8x %5u %5u %6o %#08.8x %s\n"
% (
self.offset,
self.date,
self.uid,
self.gid,
self.mode,
self.size,
self.name,
)
)
else:
f.write('%#08.8x: \n' % self.offset)
f.write("%#08.8x: \n" % self.offset)
f.write(' name = "%s"\n' % self.name)
f.write(' date = %#08.8x\n' % self.date)
f.write(' uid = %i\n' % self.uid)
f.write(' gid = %i\n' % self.gid)
f.write(' mode = %o\n' % self.mode)
f.write(' size = %#08.8x\n' % (self.size))
f.write(" date = %#08.8x\n" % self.date)
f.write(" uid = %i\n" % self.uid)
f.write(" gid = %i\n" % self.gid)
f.write(" mode = %o\n" % self.mode)
f.write(" size = %#08.8x\n" % (self.size))
self.file.seek(self.obj_offset, 0)
first_bytes = self.file.read(4)
f.write('bytes = ')
f.write("bytes = ")
memdump(first_bytes)
def get_bytes(self):
@@ -78,11 +84,11 @@ class Object(object):
return bytes
def save(self, path=None, overwrite=False):
'''
Save the contents of the object to disk using 'path' argument as
the path, or save it to the current working directory using the
object name.
'''
"""
Save the contents of the object to disk using 'path' argument as
the path, or save it to the current working directory using the
object name.
"""
if path is None:
path = self.name
@@ -90,7 +96,7 @@ class Object(object):
print('error: outfile "%s" already exists' % (path))
return
print('Saving "%s" to "%s"...' % (self.name, path))
with open(path, 'w') as f:
with open(path, "w") as f:
f.write(self.get_bytes())
@@ -102,13 +108,13 @@ class StringTable(object):
length = len(self.bytes)
if offset >= length:
return None
return self.bytes[offset:self.bytes.find('\0', offset)]
return self.bytes[offset : self.bytes.find("\0", offset)]
class Archive(object):
def __init__(self, path):
self.path = path
self.file = open(path, 'r')
self.file = open(path, "r")
self.objects = []
self.offset_to_object = {}
if self.file.read(SARMAG) != ARMAG:
@@ -129,12 +135,12 @@ class Archive(object):
return None
def find(self, name, mtime=None, f=sys.stdout):
'''
Find an object(s) by name with optional modification time. There
can be multple objects with the same name inside and possibly with
the same modification time within a BSD archive so clients must be
prepared to get multiple results.
'''
"""
Find an object(s) by name with optional modification time. There
can be multple objects with the same name inside and possibly with
the same modification time within a BSD archive so clients must be
prepared to get multiple results.
"""
matches = []
for obj in self.objects:
if obj.name == name and (mtime is None or mtime == obj.date):
@@ -143,14 +149,15 @@ class Archive(object):
@classmethod
def dump_header(self, f=sys.stdout):
f.write(' DATE UID GID MODE SIZE NAME\n')
f.write(' ---------- ----- ----- ------ ---------- '
'--------------\n')
f.write(" DATE UID GID MODE SIZE NAME\n")
f.write(
" ---------- ----- ----- ------ ---------- " "--------------\n"
)
def get_symdef(self):
def get_uint32(file):
'''Extract a uint32_t from the current file position.'''
v, = struct.unpack('=I', file.read(4))
"""Extract a uint32_t from the current file position."""
(v,) = struct.unpack("=I", file.read(4))
return v
for obj in self.objects:
@@ -158,7 +165,7 @@ class Archive(object):
if obj.name.startswith("__.SYMDEF"):
self.file.seek(obj.obj_offset, 0)
ranlib_byte_size = get_uint32(self.file)
num_ranlib_structs = ranlib_byte_size/8
num_ranlib_structs = ranlib_byte_size / 8
str_offset_pairs = []
for _ in range(num_ranlib_structs):
strx = get_uint32(self.file)
@@ -171,62 +178,66 @@ class Archive(object):
return symdef
def get_object_dicts(self):
'''
Returns an array of object dictionaries that contain they following
keys:
'object': the actual bsd.Object instance
'symdefs': an array of symbol names that the object contains
as found in the "__.SYMDEF" item in the archive
'''
"""
Returns an array of object dictionaries that contain they following
keys:
'object': the actual bsd.Object instance
'symdefs': an array of symbol names that the object contains
as found in the "__.SYMDEF" item in the archive
"""
symdefs = self.get_symdef()
symdef_dict = {}
if symdefs:
for (name, offset) in symdefs:
for name, offset in symdefs:
if offset in symdef_dict:
object_dict = symdef_dict[offset]
else:
object_dict = {
'object': self.get_object_at_offset(offset),
'symdefs': []
"object": self.get_object_at_offset(offset),
"symdefs": [],
}
symdef_dict[offset] = object_dict
object_dict['symdefs'].append(name)
object_dict["symdefs"].append(name)
object_dicts = []
for offset in sorted(symdef_dict):
object_dicts.append(symdef_dict[offset])
return object_dicts
def dump(self, f=sys.stdout, flat=True):
f.write('%s:\n' % self.path)
f.write("%s:\n" % self.path)
if flat:
self.dump_header(f=f)
for obj in self.objects:
obj.dump(f=f, flat=flat)
class Interactive(cmd.Cmd):
'''Interactive prompt for exploring contents of BSD archive files, type
"help" to see a list of supported commands.'''
"""Interactive prompt for exploring contents of BSD archive files, type
"help" to see a list of supported commands."""
image_option_parser = None
def __init__(self, archives):
cmd.Cmd.__init__(self)
self.use_rawinput = False
self.intro = ('Interactive BSD archive prompt, type "help" to see a '
'list of supported commands.')
self.intro = (
'Interactive BSD archive prompt, type "help" to see a '
"list of supported commands."
)
self.archives = archives
self.prompt = '% '
self.prompt = "% "
def default(self, line):
'''Catch all for unknown command, which will exit the interpreter.'''
"""Catch all for unknown command, which will exit the interpreter."""
print("unknown command: %s" % line)
return True
def do_q(self, line):
'''Quit command'''
"""Quit command"""
return True
def do_quit(self, line):
'''Quit command'''
"""Quit command"""
return True
def do_extract(self, line):
@@ -241,10 +252,9 @@ class Interactive(cmd.Cmd):
object.save(overwrite=False)
extracted = True
if not extracted:
print('error: no object matches "%s" in any archives' % (
object_name))
print('error: no object matches "%s" in any archives' % (object_name))
else:
print('error: must specify the name of an object to extract')
print("error: must specify the name of an object to extract")
def do_ls(self, line):
args = shlex.split(line)
@@ -256,78 +266,103 @@ class Interactive(cmd.Cmd):
for object in matches:
object.dump(flat=False)
else:
print('error: no object matches "%s" in "%s"' % (
object_name, archive.path))
print(
'error: no object matches "%s" in "%s"'
% (object_name, archive.path)
)
else:
for archive in self.archives:
archive.dump(flat=True)
print('')
print("")
def main():
parser = optparse.OptionParser(
prog='bsd',
description='Utility for BSD archives')
parser = optparse.OptionParser(prog="bsd", description="Utility for BSD archives")
parser.add_option(
'--object',
type='string',
dest='object_name',
"--object",
type="string",
dest="object_name",
default=None,
help=('Specify the name of a object within the BSD archive to get '
'information on'))
help=(
"Specify the name of a object within the BSD archive to get "
"information on"
),
)
parser.add_option(
'-s', '--symbol',
type='string',
dest='find_symbol',
"-s",
"--symbol",
type="string",
dest="find_symbol",
default=None,
help=('Specify the name of a symbol within the BSD archive to get '
'information on from SYMDEF'))
help=(
"Specify the name of a symbol within the BSD archive to get "
"information on from SYMDEF"
),
)
parser.add_option(
'--symdef',
action='store_true',
dest='symdef',
"--symdef",
action="store_true",
dest="symdef",
default=False,
help=('Dump the information in the SYMDEF.'))
help=("Dump the information in the SYMDEF."),
)
parser.add_option(
'-v', '--verbose',
action='store_true',
dest='verbose',
"-v",
"--verbose",
action="store_true",
dest="verbose",
default=False,
help='Enable verbose output')
help="Enable verbose output",
)
parser.add_option(
'-e', '--extract',
action='store_true',
dest='extract',
"-e",
"--extract",
action="store_true",
dest="extract",
default=False,
help=('Specify this to extract the object specified with the --object '
'option. There must be only one object with a matching name or '
'the --mtime option must be specified to uniquely identify a '
'single object.'))
help=(
"Specify this to extract the object specified with the --object "
"option. There must be only one object with a matching name or "
"the --mtime option must be specified to uniquely identify a "
"single object."
),
)
parser.add_option(
'-m', '--mtime',
type='int',
dest='mtime',
"-m",
"--mtime",
type="int",
dest="mtime",
default=None,
help=('Specify the modification time of the object an object. This '
'option is used with either the --object or --extract options.'))
help=(
"Specify the modification time of the object an object. This "
"option is used with either the --object or --extract options."
),
)
parser.add_option(
'-o', '--outfile',
type='string',
dest='outfile',
"-o",
"--outfile",
type="string",
dest="outfile",
default=None,
help=('Specify a different name or path for the file to extract when '
'using the --extract option. If this option isn\'t specified, '
'then the extracted object file will be extracted into the '
'current working directory if a file doesn\'t already exist '
'with that name.'))
help=(
"Specify a different name or path for the file to extract when "
"using the --extract option. If this option isn't specified, "
"then the extracted object file will be extracted into the "
"current working directory if a file doesn't already exist "
"with that name."
),
)
parser.add_option(
'-i', '--interactive',
action='store_true',
dest='interactive',
"-i",
"--interactive",
action="store_true",
dest="interactive",
default=False,
help=('Enter an interactive shell that allows users to interactively '
'explore contents of .a files.'))
help=(
"Enter an interactive shell that allows users to interactively "
"explore contents of .a files."
),
)
(options, args) = parser.parse_args(sys.argv[1:])
@@ -342,7 +377,7 @@ def main():
for path in args:
archive = Archive(path)
if options.object_name:
print('%s:\n' % (path))
print("%s:\n" % (path))
matches = archive.find(options.object_name, options.mtime)
if matches:
dump_all = True
@@ -351,58 +386,63 @@ def main():
dump_all = False
matches[0].save(path=options.outfile, overwrite=False)
else:
print('error: multiple objects match "%s". Specify '
'the modification time using --mtime.' % (
options.object_name))
print(
'error: multiple objects match "%s". Specify '
"the modification time using --mtime."
% (options.object_name)
)
if dump_all:
for obj in matches:
obj.dump(flat=False)
else:
print('error: object "%s" not found in archive' % (
options.object_name))
print('error: object "%s" not found in archive' % (options.object_name))
elif options.find_symbol:
symdefs = archive.get_symdef()
if symdefs:
success = False
for (name, offset) in symdefs:
for name, offset in symdefs:
obj = archive.get_object_at_offset(offset)
if name == options.find_symbol:
print('Found "%s" in:' % (options.find_symbol))
obj.dump(flat=False)
success = True
if not success:
print('Didn\'t find "%s" in any objects' % (
options.find_symbol))
print('Didn\'t find "%s" in any objects' % (options.find_symbol))
else:
print("error: no __.SYMDEF was found")
elif options.symdef:
object_dicts = archive.get_object_dicts()
for object_dict in object_dicts:
object_dict['object'].dump(flat=False)
object_dict["object"].dump(flat=False)
print("symbols:")
for name in object_dict['symdefs']:
for name in object_dict["symdefs"]:
print(" %s" % (name))
else:
archive.dump(flat=not options.verbose)
if __name__ == '__main__':
if __name__ == "__main__":
main()
def print_mtime_error(result, dmap_mtime, actual_mtime):
print("error: modification time in debug map (%#08.8x) doesn't "
"match the .o file modification time (%#08.8x)" % (
dmap_mtime, actual_mtime), file=result)
print(
"error: modification time in debug map (%#08.8x) doesn't "
"match the .o file modification time (%#08.8x)" % (dmap_mtime, actual_mtime),
file=result,
)
def print_file_missing_error(result, path):
print("error: file \"%s\" doesn't exist" % (path), file=result)
print('error: file "%s" doesn\'t exist' % (path), file=result)
def print_multiple_object_matches(result, object_name, mtime, matches):
print("error: multiple matches for object '%s' with with "
"modification time %#08.8x:" % (object_name, mtime), file=result)
print(
"error: multiple matches for object '%s' with with "
"modification time %#08.8x:" % (object_name, mtime),
file=result,
)
Archive.dump_header(f=result)
for match in matches:
match.dump(f=result, flat=True)
@@ -411,15 +451,18 @@ def print_multiple_object_matches(result, object_name, mtime, matches):
def print_archive_object_error(result, object_name, mtime, archive):
matches = archive.find(object_name, f=result)
if len(matches) > 0:
print("error: no objects have a modification time that "
"matches %#08.8x for '%s'. Potential matches:" % (
mtime, object_name), file=result)
print(
"error: no objects have a modification time that "
"matches %#08.8x for '%s'. Potential matches:" % (mtime, object_name),
file=result,
)
Archive.dump_header(f=result)
for match in matches:
match.dump(f=result, flat=True)
else:
print("error: no object named \"%s\" found in archive:" % (
object_name), file=result)
print(
'error: no object named "%s" found in archive:' % (object_name), file=result
)
Archive.dump_header(f=result)
for match in archive.objects:
match.dump(f=result, flat=True)
@@ -431,21 +474,21 @@ class VerifyDebugMapCommand:
def create_options(self):
usage = "usage: %prog [options]"
description = '''This command reports any .o files that are missing
or whose modification times don't match in the debug map of an executable.'''
description = """This command reports any .o files that are missing
or whose modification times don't match in the debug map of an executable."""
self.parser = optparse.OptionParser(
description=description,
prog=self.name,
usage=usage,
add_help_option=False)
description=description, prog=self.name, usage=usage, add_help_option=False
)
self.parser.add_option(
'-e', '--errors',
action='store_true',
dest='errors',
"-e",
"--errors",
action="store_true",
dest="errors",
default=False,
help="Only show errors")
help="Only show errors",
)
def get_short_help(self):
return "Verify debug map object files."
@@ -459,6 +502,7 @@ or whose modification times don't match in the debug map of an executable.'''
def __call__(self, debugger, command, exe_ctx, result):
import lldb
# Use the Shell Lexer to properly parse up command options just like a
# shell would
command_args = shlex.split(command)
@@ -478,9 +522,11 @@ or whose modification times don't match in the debug map of an executable.'''
for module_spec in args:
module = target.module[module_spec]
if not (module and module.IsValid()):
result.SetError('error: invalid module specification: "%s". '
'Specify the full path, basename, or UUID of '
'a module ' % (module_spec))
result.SetError(
'error: invalid module specification: "%s". '
"Specify the full path, basename, or UUID of "
"a module " % (module_spec)
)
return
num_symbols = module.GetNumSymbols()
num_errors = 0
@@ -493,30 +539,28 @@ or whose modification times don't match in the debug map of an executable.'''
continue
# Extract the value of the symbol by dumping the
# symbol. The value is the mod time.
dmap_mtime = int(str(symbol).split('value = ')
[1].split(',')[0], 16)
dmap_mtime = int(str(symbol).split("value = ")[1].split(",")[0], 16)
if not options.errors:
print('%s' % (path), file=result)
print("%s" % (path), file=result)
if os.path.exists(path):
actual_mtime = int(os.stat(path).st_mtime)
if dmap_mtime != actual_mtime:
num_errors += 1
if options.errors:
print('%s' % (path), end=' ', file=result)
print_mtime_error(result, dmap_mtime,
actual_mtime)
elif path[-1] == ')':
(archive_path, object_name) = path[0:-1].split('(')
print("%s" % (path), end=" ", file=result)
print_mtime_error(result, dmap_mtime, actual_mtime)
elif path[-1] == ")":
(archive_path, object_name) = path[0:-1].split("(")
if not archive_path and not object_name:
num_errors += 1
if options.errors:
print('%s' % (path), end=' ', file=result)
print("%s" % (path), end=" ", file=result)
print_file_missing_error(path)
continue
if not os.path.exists(archive_path):
num_errors += 1
if options.errors:
print('%s' % (path), end=' ', file=result)
print("%s" % (path), end=" ", file=result)
print_file_missing_error(archive_path)
continue
if archive_path in archives:
@@ -527,26 +571,27 @@ or whose modification times don't match in the debug map of an executable.'''
matches = archive.find(object_name, dmap_mtime)
num_matches = len(matches)
if num_matches == 1:
print('1 match', file=result)
print("1 match", file=result)
obj = matches[0]
if obj.date != dmap_mtime:
num_errors += 1
if options.errors:
print('%s' % (path), end=' ', file=result)
print("%s" % (path), end=" ", file=result)
print_mtime_error(result, dmap_mtime, obj.date)
elif num_matches == 0:
num_errors += 1
if options.errors:
print('%s' % (path), end=' ', file=result)
print_archive_object_error(result, object_name,
dmap_mtime, archive)
print("%s" % (path), end=" ", file=result)
print_archive_object_error(
result, object_name, dmap_mtime, archive
)
elif num_matches > 1:
num_errors += 1
if options.errors:
print('%s' % (path), end=' ', file=result)
print_multiple_object_matches(result,
object_name,
dmap_mtime, matches)
print("%s" % (path), end=" ", file=result)
print_multiple_object_matches(
result, object_name, dmap_mtime, matches
)
if num_errors > 0:
print("%u errors found" % (num_errors), file=result)
else:
@@ -558,7 +603,10 @@ def __lldb_init_module(debugger, dict):
# interpreter.
# Add any commands contained in this module to LLDB
debugger.HandleCommand(
'command script add -o -c %s.VerifyDebugMapCommand %s' % (
__name__, VerifyDebugMapCommand.name))
print('The "%s" command has been installed, type "help %s" for detailed '
'help.' % (VerifyDebugMapCommand.name, VerifyDebugMapCommand.name))
"command script add -o -c %s.VerifyDebugMapCommand %s"
% (__name__, VerifyDebugMapCommand.name)
)
print(
'The "%s" command has been installed, type "help %s" for detailed '
"help." % (VerifyDebugMapCommand.name, VerifyDebugMapCommand.name)
)

View File

@@ -17,31 +17,36 @@ import sys
class FrameStatCommand:
program = 'framestats'
program = "framestats"
@classmethod
def register_lldb_command(cls, debugger, module_name):
parser = cls.create_options()
cls.__doc__ = parser.format_help()
# Add any commands contained in this module to LLDB
command = 'command script add -o -c %s.%s %s' % (module_name,
cls.__name__,
cls.program)
command = "command script add -o -c %s.%s %s" % (
module_name,
cls.__name__,
cls.program,
)
debugger.HandleCommand(command)
print('The "{0}" command has been installed, type "help {0}" or "{0} '
'--help" for detailed help.'.format(cls.program))
print(
'The "{0}" command has been installed, type "help {0}" or "{0} '
'--help" for detailed help.'.format(cls.program)
)
@classmethod
def create_options(cls):
usage = "usage: %prog [options]"
description = ('This command is meant to be an example of how to make '
'an LLDB command that does something useful, follows '
'best practices, and exploits the SB API. '
'Specifically, this command computes the aggregate '
'and average size of the variables in the current '
'frame and allows you to tweak exactly which variables '
'are to be accounted in the computation.')
description = (
"This command is meant to be an example of how to make "
"an LLDB command that does something useful, follows "
"best practices, and exploits the SB API. "
"Specifically, this command computes the aggregate "
"and average size of the variables in the current "
"frame and allows you to tweak exactly which variables "
"are to be accounted in the computation."
)
# Pass add_help_option = False, since this keeps the command in line
# with lldb commands, and we wire up "help command" to work by
@@ -50,39 +55,44 @@ class FrameStatCommand:
description=description,
prog=cls.program,
usage=usage,
add_help_option=False)
add_help_option=False,
)
parser.add_option(
'-i',
'--in-scope',
action='store_true',
dest='inscope',
help='in_scope_only = True',
default=True)
"-i",
"--in-scope",
action="store_true",
dest="inscope",
help="in_scope_only = True",
default=True,
)
parser.add_option(
'-a',
'--arguments',
action='store_true',
dest='arguments',
help='arguments = True',
default=True)
"-a",
"--arguments",
action="store_true",
dest="arguments",
help="arguments = True",
default=True,
)
parser.add_option(
'-l',
'--locals',
action='store_true',
dest='locals',
help='locals = True',
default=True)
"-l",
"--locals",
action="store_true",
dest="locals",
help="locals = True",
default=True,
)
parser.add_option(
'-s',
'--statics',
action='store_true',
dest='statics',
help='statics = True',
default=True)
"-s",
"--statics",
action="store_true",
dest="statics",
help="statics = True",
default=True,
)
return parser
@@ -118,10 +128,8 @@ class FrameStatCommand:
return
variables_list = frame.GetVariables(
options.arguments,
options.locals,
options.statics,
options.inscope)
options.arguments, options.locals, options.statics, options.inscope
)
variables_count = variables_list.GetSize()
if variables_count == 0:
print("no variables here", file=result)
@@ -132,16 +140,19 @@ class FrameStatCommand:
variable_type = variable.GetType()
total_size = total_size + variable_type.GetByteSize()
average_size = float(total_size) / variables_count
print("Your frame has %d variables. Their total size "
"is %d bytes. The average size is %f bytes" % (
variables_count, total_size, average_size), file=result)
print(
"Your frame has %d variables. Their total size "
"is %d bytes. The average size is %f bytes"
% (variables_count, total_size, average_size),
file=result,
)
# not returning anything is akin to returning success
def __lldb_init_module(debugger, dict):
# Register all classes that have a register_lldb_command method
for _name, cls in inspect.getmembers(sys.modules[__name__]):
if inspect.isclass(cls) and callable(getattr(cls,
"register_lldb_command",
None)):
if inspect.isclass(cls) and callable(
getattr(cls, "register_lldb_command", None)
):
cls.register_lldb_command(debugger, __name__)

File diff suppressed because it is too large Load Diff

View File

@@ -1,6 +1,6 @@
#!/usr/bin/env python
#----------------------------------------------------------------------
# ----------------------------------------------------------------------
# This module will enable GDB remote packet logging when the
# 'start_gdb_log' command is called with a filename to log to. When the
# 'stop_gdb_log' command is called, it will disable the logging and
@@ -14,7 +14,7 @@
# (lldb) command script import /path/to/gdbremote.py
# Or it can be added to your ~/.lldbinit file so this module is always
# available.
#----------------------------------------------------------------------
# ----------------------------------------------------------------------
import optparse
import os
@@ -24,14 +24,14 @@ import tempfile
def start_gdb_log(debugger, command, result, dict):
'''Start logging GDB remote packets by enabling logging with timestamps and
"""Start logging GDB remote packets by enabling logging with timestamps and
thread safe logging. Follow a call to this function with a call to "stop_gdb_log"
in order to dump out the commands.'''
in order to dump out the commands."""
global log_file
if log_file:
result.PutCString(
'error: logging is already in progress with file "%s"',
log_file)
'error: logging is already in progress with file "%s"', log_file
)
else:
args_len = len(args)
if args_len == 0:
@@ -41,14 +41,16 @@ def start_gdb_log(debugger, command, result, dict):
if log_file:
debugger.HandleCommand(
'log enable --threadsafe --timestamp --file "%s" gdb-remote packets' %
log_file)
'log enable --threadsafe --timestamp --file "%s" gdb-remote packets'
% log_file
)
result.PutCString(
"GDB packet logging enable with log file '%s'\nUse the 'stop_gdb_log' command to stop logging and show packet statistics." %
log_file)
"GDB packet logging enable with log file '%s'\nUse the 'stop_gdb_log' command to stop logging and show packet statistics."
% log_file
)
return
result.PutCString('error: invalid log file path')
result.PutCString("error: invalid log file path")
result.PutCString(usage)
@@ -61,18 +63,18 @@ def parse_time_log(debugger, command, result, dict):
def parse_time_log_args(command_args):
usage = "usage: parse_time_log [options] [<LOGFILEPATH>]"
description = '''Parse a log file that contains timestamps and convert the timestamps to delta times between log lines.'''
description = """Parse a log file that contains timestamps and convert the timestamps to delta times between log lines."""
parser = optparse.OptionParser(
description=description,
prog='parse_time_log',
usage=usage)
description=description, prog="parse_time_log", usage=usage
)
parser.add_option(
'-v',
'--verbose',
action='store_true',
dest='verbose',
help='display verbose debug info',
default=False)
"-v",
"--verbose",
action="store_true",
dest="verbose",
help="display verbose debug info",
default=False,
)
try:
(options, args) = parser.parse_args(command_args)
except:
@@ -82,7 +84,7 @@ def parse_time_log_args(command_args):
def parse_log_file(file, options):
'''Parse a log file that was contains timestamps. These logs are typically
"""Parse a log file that was contains timestamps. These logs are typically
generated using:
(lldb) log enable --threadsafe --timestamp --file <FILE> ....
@@ -91,13 +93,13 @@ def parse_log_file(file, options):
show delta times between log lines and also keep track of how long it takes
for GDB remote commands to make a send/receive round trip. This can be
handy when trying to figure out why some operation in the debugger is taking
a long time during a preset set of debugger commands.'''
a long time during a preset set of debugger commands."""
print('#----------------------------------------------------------------------')
print("#----------------------------------------------------------------------")
print("# Log file: '%s'" % file)
print('#----------------------------------------------------------------------')
print("#----------------------------------------------------------------------")
timestamp_regex = re.compile('(\s*)([1-9][0-9]+\.[0-9]+)([^0-9].*)$')
timestamp_regex = re.compile("(\s*)([1-9][0-9]+\.[0-9]+)([^0-9].*)$")
base_time = 0.0
last_time = 0.0
@@ -113,20 +115,27 @@ def parse_log_file(file, options):
else:
base_time = curr_time
print('%s%.6f %+.6f%s' % (match.group(1), curr_time - base_time, delta, match.group(3)))
print(
"%s%.6f %+.6f%s"
% (match.group(1), curr_time - base_time, delta, match.group(3))
)
last_time = curr_time
else:
print(line)
if __name__ == '__main__':
if __name__ == "__main__":
import sys
parse_time_log_args(sys.argv[1:])
def __lldb_init_module(debugger, internal_dict):
# This initializer is being run from LLDB in the embedded command interpreter
# Add any commands contained in this module to LLDB
debugger.HandleCommand(
'command script add -o -f delta.parse_time_log parse_time_log')
print('The "parse_time_log" command is now installed and ready for use, type "parse_time_log --help" for more information')
# This initializer is being run from LLDB in the embedded command interpreter
# Add any commands contained in this module to LLDB
debugger.HandleCommand(
"command script add -o -f delta.parse_time_log parse_time_log"
)
print(
'The "parse_time_log" command is now installed and ready for use, type "parse_time_log --help" for more information'
)

View File

@@ -19,13 +19,13 @@ def read_memory(process, location, size):
data = data + "0x%x" % byte
if byte == 0:
data = data + "(\\0)"
elif byte == 0xa:
elif byte == 0xA:
data = data + "(\\a)"
elif byte == 0xb:
elif byte == 0xB:
data = data + "(\\b)"
elif byte == 0xc:
elif byte == 0xC:
data = data + "(\\c)"
elif byte == '\n':
elif byte == "\n":
data = data + "(\\n)"
else:
data = data + "(%s)" % chr(byte)
@@ -105,22 +105,37 @@ struct $__lldb__CFString {\
dumped = target.EvaluateExpression(expression, options)
print(str(dumped), file=result)
little_endian = (target.byte_order == lldb.eByteOrderLittle)
little_endian = target.byte_order == lldb.eByteOrderLittle
ptr_size = target.addr_size
info_bits = dumped.GetChildMemberWithName("_cfinfo").GetChildAtIndex(
0 if little_endian else 3).GetValueAsUnsigned(0)
info_bits = (
dumped.GetChildMemberWithName("_cfinfo")
.GetChildAtIndex(0 if little_endian else 3)
.GetValueAsUnsigned(0)
)
is_mutable = (info_bits & 1) == 1
is_inline = (info_bits & 0x60) == 0
has_explicit_length = (info_bits & (1 | 4)) != 4
is_unicode = (info_bits & 0x10) == 0x10
is_special = (
nsstring.GetDynamicValue(
lldb.eDynamicCanRunTarget).GetTypeName() == "NSPathStore2")
nsstring.GetDynamicValue(lldb.eDynamicCanRunTarget).GetTypeName()
== "NSPathStore2"
)
has_null = (info_bits & 8) == 8
print("\nInfo=%d\nMutable=%s\nInline=%s\nExplicit=%s\nUnicode=%s\nSpecial=%s\nNull=%s\n" % \
(info_bits, "yes" if is_mutable else "no", "yes" if is_inline else "no", "yes" if has_explicit_length else "no", "yes" if is_unicode else "no", "yes" if is_special else "no", "yes" if has_null else "no"), file=result)
print(
"\nInfo=%d\nMutable=%s\nInline=%s\nExplicit=%s\nUnicode=%s\nSpecial=%s\nNull=%s\n"
% (
info_bits,
"yes" if is_mutable else "no",
"yes" if is_inline else "no",
"yes" if has_explicit_length else "no",
"yes" if is_unicode else "no",
"yes" if is_special else "no",
"yes" if has_null else "no",
),
file=result,
)
explicit_length_offset = 0
if not has_null and has_explicit_length and not is_special:
@@ -139,20 +154,33 @@ struct $__lldb__CFString {\
else:
explicit_length_offset = nsstring_address + explicit_length_offset
explicit_length = process.ReadUnsignedFromMemory(
explicit_length_offset, 4, error)
print("Explicit length location is at 0x%x - read value is %d\n" % (
explicit_length_offset, explicit_length), file=result)
explicit_length_offset, 4, error
)
print(
"Explicit length location is at 0x%x - read value is %d\n"
% (explicit_length_offset, explicit_length),
file=result,
)
if is_mutable:
location = 2 * ptr_size + nsstring_address
location = process.ReadPointerFromMemory(location, error)
elif is_inline and has_explicit_length and not is_unicode and not is_special and not is_mutable:
elif (
is_inline
and has_explicit_length
and not is_unicode
and not is_special
and not is_mutable
):
location = 3 * ptr_size + nsstring_address
elif is_unicode:
location = 2 * ptr_size + nsstring_address
if is_inline:
if not has_explicit_length:
print("Unicode & Inline & !Explicit is a new combo - no formula for it", file=result)
print(
"Unicode & Inline & !Explicit is a new combo - no formula for it",
file=result,
)
else:
location += ptr_size
else:
@@ -167,17 +195,26 @@ struct $__lldb__CFString {\
location = 2 * ptr_size + nsstring_address
location = process.ReadPointerFromMemory(location, error)
print("Expected data location: 0x%x\n" % (location), file=result)
print("1K of data around location: %s\n" % read_memory(
process, location, 1024), file=result)
print("5K of data around string pointer: %s\n" % read_memory(
process, nsstring_address, 1024 * 5), file=result)
print(
"1K of data around location: %s\n" % read_memory(process, location, 1024),
file=result,
)
print(
"5K of data around string pointer: %s\n"
% read_memory(process, nsstring_address, 1024 * 5),
file=result,
)
def __lldb_init_module(debugger, internal_dict):
debugger.HandleCommand(
"command script add -o -f %s.diagnose_nsstring_Command_Impl diagnose-nsstring" %
__name__)
print('The "diagnose-nsstring" command has been installed, type "help diagnose-nsstring" for detailed help.')
"command script add -o -f %s.diagnose_nsstring_Command_Impl diagnose-nsstring"
% __name__
)
print(
'The "diagnose-nsstring" command has been installed, type "help diagnose-nsstring" for detailed help.'
)
__lldb_init_module(lldb.debugger, None)
__lldb_init_module = None

View File

@@ -36,22 +36,48 @@ def backtrace_print_frame(target, frame_num, addr, fp):
if module_filename is None:
module_filename = ""
if module_uuid_str != "" or module_filename != "":
module_description = '%s %s' % (
module_filename, module_uuid_str)
module_description = "%s %s" % (module_filename, module_uuid_str)
except Exception:
print('%2d: pc==0x%-*x fp==0x%-*x' % (frame_num, addr_width, addr_for_printing, addr_width, fp))
print(
"%2d: pc==0x%-*x fp==0x%-*x"
% (frame_num, addr_width, addr_for_printing, addr_width, fp)
)
return
sym_ctx = target.ResolveSymbolContextForAddress(
sbaddr, lldb.eSymbolContextEverything)
sbaddr, lldb.eSymbolContextEverything
)
if sym_ctx.IsValid() and sym_ctx.GetSymbol().IsValid():
function_start = sym_ctx.GetSymbol().GetStartAddress().GetLoadAddress(target)
offset = addr - function_start
print('%2d: pc==0x%-*x fp==0x%-*x %s %s + %d' % (frame_num, addr_width, addr_for_printing, addr_width, fp, module_description, sym_ctx.GetSymbol().GetName(), offset))
print(
"%2d: pc==0x%-*x fp==0x%-*x %s %s + %d"
% (
frame_num,
addr_width,
addr_for_printing,
addr_width,
fp,
module_description,
sym_ctx.GetSymbol().GetName(),
offset,
)
)
else:
print('%2d: pc==0x%-*x fp==0x%-*x %s' % (frame_num, addr_width, addr_for_printing, addr_width, fp, module_description))
print(
"%2d: pc==0x%-*x fp==0x%-*x %s"
% (
frame_num,
addr_width,
addr_for_printing,
addr_width,
fp,
module_description,
)
)
return sbaddr.GetModule()
# A simple stack walk algorithm that follows the frame chain.
# Returns a two-element list; the first element is a list of modules
# seen and the second element is a list of addresses seen during the backtrace.
@@ -76,7 +102,8 @@ def simple_backtrace(debugger):
module_list = []
address_list = [cur_thread.GetFrameAtIndex(0).GetPC()]
this_module = backtrace_print_frame(
target, 0, cur_thread.GetFrameAtIndex(0).GetPC(), initial_fp)
target, 0, cur_thread.GetFrameAtIndex(0).GetPC(), initial_fp
)
print_stack_frame(process, initial_fp)
print("")
if this_module is not None:
@@ -86,11 +113,17 @@ def simple_backtrace(debugger):
cur_fp = process.ReadPointerFromMemory(initial_fp, lldb.SBError())
cur_pc = process.ReadPointerFromMemory(
initial_fp + process.GetAddressByteSize(), lldb.SBError())
initial_fp + process.GetAddressByteSize(), lldb.SBError()
)
frame_num = 1
while cur_pc != 0 and cur_fp != 0 and cur_pc != lldb.LLDB_INVALID_ADDRESS and cur_fp != lldb.LLDB_INVALID_ADDRESS:
while (
cur_pc != 0
and cur_fp != 0
and cur_pc != lldb.LLDB_INVALID_ADDRESS
and cur_fp != lldb.LLDB_INVALID_ADDRESS
):
address_list.append(cur_pc)
this_module = backtrace_print_frame(target, frame_num, cur_pc, cur_fp)
print_stack_frame(process, cur_fp)
@@ -100,13 +133,15 @@ def simple_backtrace(debugger):
frame_num = frame_num + 1
next_pc = 0
next_fp = 0
if target.triple[
0:6] == "x86_64" or target.triple[
0:4] == "i386" or target.triple[
0:3] == "arm":
if (
target.triple[0:6] == "x86_64"
or target.triple[0:4] == "i386"
or target.triple[0:3] == "arm"
):
error = lldb.SBError()
next_pc = process.ReadPointerFromMemory(
cur_fp + process.GetAddressByteSize(), error)
cur_fp + process.GetAddressByteSize(), error
)
if not error.Success():
next_pc = 0
next_fp = process.ReadPointerFromMemory(cur_fp, error)
@@ -135,8 +170,7 @@ def print_stack_frame(process, fp):
error = lldb.SBError()
try:
while i < 5 and error.Success():
address = process.ReadPointerFromMemory(
addr + (i * addr_size), error)
address = process.ReadPointerFromMemory(addr + (i * addr_size), error)
outline += " 0x%x" % address
i += 1
print(outline)
@@ -146,11 +180,11 @@ def print_stack_frame(process, fp):
def diagnose_unwind(debugger, command, result, dict):
"""
Gather diagnostic information to help debug incorrect unwind (backtrace)
behavior in lldb. When there is a backtrace that doesn't look
correct, run this command with the correct thread selected and a
large amount of diagnostic information will be printed, it is likely
to be helpful when reporting the problem.
Gather diagnostic information to help debug incorrect unwind (backtrace)
behavior in lldb. When there is a backtrace that doesn't look
correct, run this command with the correct thread selected and a
large amount of diagnostic information will be printed, it is likely
to be helpful when reporting the problem.
"""
command_args = shlex.split(command)
@@ -166,34 +200,44 @@ def diagnose_unwind(debugger, command, result, dict):
thread = process.GetSelectedThread()
if thread:
lldb_versions_match = re.search(
r'[lL][lL][dD][bB]-(\d+)([.](\d+))?([.](\d+))?',
debugger.GetVersionString())
r"[lL][lL][dD][bB]-(\d+)([.](\d+))?([.](\d+))?",
debugger.GetVersionString(),
)
lldb_version = 0
lldb_minor = 0
if len(lldb_versions_match.groups()
) >= 1 and lldb_versions_match.groups()[0]:
if (
len(lldb_versions_match.groups()) >= 1
and lldb_versions_match.groups()[0]
):
lldb_major = int(lldb_versions_match.groups()[0])
if len(lldb_versions_match.groups()
) >= 5 and lldb_versions_match.groups()[4]:
if (
len(lldb_versions_match.groups()) >= 5
and lldb_versions_match.groups()[4]
):
lldb_minor = int(lldb_versions_match.groups()[4])
modules_seen = []
addresses_seen = []
print('LLDB version %s' % debugger.GetVersionString())
print('Unwind diagnostics for thread %d' % thread.GetIndexID())
print("LLDB version %s" % debugger.GetVersionString())
print("Unwind diagnostics for thread %d" % thread.GetIndexID())
print("")
print("=============================================================================================")
print(
"============================================================================================="
)
print("")
print("OS plugin setting:")
debugger.HandleCommand(
"settings show target.process.python-os-plugin-path")
"settings show target.process.python-os-plugin-path"
)
print("")
print("Live register context:")
thread.SetSelectedFrame(0)
debugger.HandleCommand("register read")
print("")
print("=============================================================================================")
print(
"============================================================================================="
)
print("")
print("lldb's unwind algorithm:")
print("")
@@ -201,7 +245,8 @@ def diagnose_unwind(debugger, command, result, dict):
for frame in thread.frames:
if not frame.IsInlined():
this_module = backtrace_print_frame(
target, frame_num, frame.GetPC(), frame.GetFP())
target, frame_num, frame.GetPC(), frame.GetFP()
)
print_stack_frame(process, frame.GetFP())
print("")
if this_module is not None:
@@ -209,7 +254,9 @@ def diagnose_unwind(debugger, command, result, dict):
addresses_seen.append(frame.GetPC())
frame_num = frame_num + 1
print("")
print("=============================================================================================")
print(
"============================================================================================="
)
print("")
print("Simple stack walk algorithm:")
print("")
@@ -221,94 +268,122 @@ def diagnose_unwind(debugger, command, result, dict):
addresses_seen.update(set(address_list))
print("")
print("=============================================================================================")
print(
"============================================================================================="
)
print("")
print("Modules seen in stack walks:")
print("")
modules_already_seen = set()
for module in modules_seen:
if module is not None and module.GetFileSpec().GetFilename() is not None:
if not module.GetFileSpec().GetFilename() in modules_already_seen:
if (
module is not None
and module.GetFileSpec().GetFilename() is not None
):
if (
not module.GetFileSpec().GetFilename()
in modules_already_seen
):
debugger.HandleCommand(
'image list %s' %
module.GetFileSpec().GetFilename())
modules_already_seen.add(
module.GetFileSpec().GetFilename())
"image list %s" % module.GetFileSpec().GetFilename()
)
modules_already_seen.add(module.GetFileSpec().GetFilename())
print("")
print("=============================================================================================")
print(
"============================================================================================="
)
print("")
print("Disassembly ofaddresses seen in stack walks:")
print("")
additional_addresses_to_disassemble = addresses_seen
for frame in thread.frames:
if not frame.IsInlined():
print("--------------------------------------------------------------------------------------")
print(
"--------------------------------------------------------------------------------------"
)
print("")
print("Disassembly of %s, frame %d, address 0x%x" % (frame.GetFunctionName(), frame.GetFrameID(), frame.GetPC()))
print(
"Disassembly of %s, frame %d, address 0x%x"
% (
frame.GetFunctionName(),
frame.GetFrameID(),
frame.GetPC(),
)
)
print("")
if target.triple[
0:6] == "x86_64" or target.triple[
0:4] == "i386":
if (
target.triple[0:6] == "x86_64"
or target.triple[0:4] == "i386"
):
debugger.HandleCommand(
'disassemble -F att -a 0x%x' % frame.GetPC())
"disassemble -F att -a 0x%x" % frame.GetPC()
)
else:
debugger.HandleCommand(
'disassemble -a 0x%x' %
frame.GetPC())
"disassemble -a 0x%x" % frame.GetPC()
)
if frame.GetPC() in additional_addresses_to_disassemble:
additional_addresses_to_disassemble.remove(
frame.GetPC())
additional_addresses_to_disassemble.remove(frame.GetPC())
for address in list(additional_addresses_to_disassemble):
print("--------------------------------------------------------------------------------------")
print(
"--------------------------------------------------------------------------------------"
)
print("")
print("Disassembly of 0x%x" % address)
print("")
if target.triple[
0:6] == "x86_64" or target.triple[
0:4] == "i386":
debugger.HandleCommand(
'disassemble -F att -a 0x%x' % address)
if target.triple[0:6] == "x86_64" or target.triple[0:4] == "i386":
debugger.HandleCommand("disassemble -F att -a 0x%x" % address)
else:
debugger.HandleCommand('disassemble -a 0x%x' % address)
debugger.HandleCommand("disassemble -a 0x%x" % address)
print("")
print("=============================================================================================")
print(
"============================================================================================="
)
print("")
additional_addresses_to_show_unwind = addresses_seen
for frame in thread.frames:
if not frame.IsInlined():
print("--------------------------------------------------------------------------------------")
print(
"--------------------------------------------------------------------------------------"
)
print("")
print("Unwind instructions for %s, frame %d" % (frame.GetFunctionName(), frame.GetFrameID()))
print(
"Unwind instructions for %s, frame %d"
% (frame.GetFunctionName(), frame.GetFrameID())
)
print("")
debugger.HandleCommand(
'image show-unwind -a "0x%x"' % frame.GetPC())
'image show-unwind -a "0x%x"' % frame.GetPC()
)
if frame.GetPC() in additional_addresses_to_show_unwind:
additional_addresses_to_show_unwind.remove(
frame.GetPC())
additional_addresses_to_show_unwind.remove(frame.GetPC())
for address in list(additional_addresses_to_show_unwind):
print("--------------------------------------------------------------------------------------")
print(
"--------------------------------------------------------------------------------------"
)
print("")
print("Unwind instructions for 0x%x" % address)
print("")
debugger.HandleCommand(
'image show-unwind -a "0x%x"' % address)
debugger.HandleCommand('image show-unwind -a "0x%x"' % address)
def create_diagnose_unwind_options():
usage = "usage: %prog"
description = '''Print diagnostic information about a thread backtrace which will help to debug unwind problems'''
description = """Print diagnostic information about a thread backtrace which will help to debug unwind problems"""
parser = optparse.OptionParser(
description=description,
prog='diagnose_unwind',
usage=usage)
description=description, prog="diagnose_unwind", usage=usage
)
return parser
def __lldb_init_module(debugger, internal_dict):
debugger.HandleCommand(
'command script add -o -f %s.diagnose_unwind diagnose-unwind' %
__name__)
print('The "diagnose-unwind" command has been installed, type "help diagnose-unwind" for detailed help.')
"command script add -o -f %s.diagnose_unwind diagnose-unwind" % __name__
)
print(
'The "diagnose-unwind" command has been installed, type "help diagnose-unwind" for detailed help.'
)

View File

@@ -1,4 +1,3 @@
class LookupDictionary(dict):
"""
a dictionary which can lookup value by key, or keys by value
@@ -30,7 +29,6 @@ class LookupDictionary(dict):
class Enum(LookupDictionary):
def __init__(self, initial_value=0, items=[]):
"""items can be a list of pair_lists or a dictionary"""
LookupDictionary.__init__(self, items)
@@ -38,7 +36,7 @@ class Enum(LookupDictionary):
def set_value(self, v):
v_typename = typeof(v).__name__
if v_typename == 'str':
if v_typename == "str":
if str in self:
v = self[v]
else:

View File

@@ -8,51 +8,60 @@ import sys
import time
parser = argparse.ArgumentParser(
description="Run an exhaustive test of the LLDB disassembler for a specific architecture.")
description="Run an exhaustive test of the LLDB disassembler for a specific architecture."
)
parser.add_argument(
'--arch',
"--arch",
required=True,
action='store',
help='The architecture whose disassembler is to be tested')
action="store",
help="The architecture whose disassembler is to be tested",
)
parser.add_argument(
'--bytes',
"--bytes",
required=True,
action='store',
action="store",
type=int,
help='The byte width of instructions for that architecture')
help="The byte width of instructions for that architecture",
)
parser.add_argument(
'--random',
"--random",
required=False,
action='store_true',
help='Enables non-sequential testing')
action="store_true",
help="Enables non-sequential testing",
)
parser.add_argument(
'--start',
"--start",
required=False,
action='store',
action="store",
type=int,
help='The first instruction value to test')
help="The first instruction value to test",
)
parser.add_argument(
'--skip',
"--skip",
required=False,
action='store',
action="store",
type=int,
help='The interval between instructions to test')
help="The interval between instructions to test",
)
parser.add_argument(
'--log',
"--log",
required=False,
action='store',
help='A log file to write the most recent instruction being tested')
action="store",
help="A log file to write the most recent instruction being tested",
)
parser.add_argument(
'--time',
"--time",
required=False,
action='store_true',
help='Every 100,000 instructions, print an ETA to standard out')
action="store_true",
help="Every 100,000 instructions, print an ETA to standard out",
)
parser.add_argument(
'--lldb',
"--lldb",
required=False,
action='store',
help='The path to LLDB.framework, if LLDB should be overridden')
action="store",
help="The path to LLDB.framework, if LLDB should be overridden",
)
arguments = sys.argv[1:]
@@ -76,6 +85,7 @@ def AddLLDBToSysPathOnMacOSX():
sys.path.append(lldb_framework_path + "/Resources/Python")
if arg_ns.lldb is None:
AddLLDBToSysPathOnMacOSX()
else:
@@ -103,12 +113,11 @@ def ResetLogFile(log_file):
def PrintByteArray(log_file, byte_array):
for byte in byte_array:
print(hex(byte) + " ", end=' ', file=log_file)
print(hex(byte) + " ", end=" ", file=log_file)
print(file=log_file)
class SequentialInstructionProvider:
def __init__(self, byte_width, log_file, start=0, skip=1):
self.m_byte_width = byte_width
self.m_log_file = log_file
@@ -146,11 +155,10 @@ class SequentialInstructionProvider:
class RandomInstructionProvider:
def __init__(self, byte_width, log_file):
self.m_byte_width = byte_width
self.m_log_file = log_file
self.m_random_file = open("/dev/random", 'r')
self.m_random_file = open("/dev/random", "r")
def PrintCurrentState(self, ret):
ResetLogFile(self.m_log_file)
@@ -172,13 +180,14 @@ class RandomInstructionProvider:
raise StopIteration
return ret
log_file = None
def GetProviderWithArguments(args):
global log_file
if args.log is not None:
log_file = open(args.log, 'w')
log_file = open(args.log, "w")
else:
log_file = sys.stdout
instruction_provider = None
@@ -192,9 +201,11 @@ def GetProviderWithArguments(args):
if args.skip is not None:
skip = args.skip
instruction_provider = SequentialInstructionProvider(
args.bytes, log_file, start, skip)
args.bytes, log_file, start, skip
)
return instruction_provider
instruction_provider = GetProviderWithArguments(arg_ns)
fake_address = lldb.SBAddress()
@@ -208,13 +219,12 @@ if actually_time:
for inst_bytes in instruction_provider:
if actually_time:
if (num_instructions_logged != 0) and (
num_instructions_logged % 100000 == 0):
if (num_instructions_logged != 0) and (num_instructions_logged % 100000 == 0):
curr_time = time.time()
elapsed_time = curr_time - start_time
remaining_time = float(
total_num_instructions - num_instructions_logged) * (
float(elapsed_time) / float(num_instructions_logged))
remaining_time = float(total_num_instructions - num_instructions_logged) * (
float(elapsed_time) / float(num_instructions_logged)
)
print(str(datetime.timedelta(seconds=remaining_time)))
num_instructions_logged = num_instructions_logged + 1
inst_list = target.GetInstructions(fake_address, inst_bytes)

View File

@@ -1,12 +1,12 @@
#!/usr/bin/env python
#----------------------------------------------------------------------
# ----------------------------------------------------------------------
# Be sure to add the python path that points to the LLDB shared library.
# On MacOSX csh, tcsh:
# setenv PYTHONPATH /Developer/Library/PrivateFrameworks/LLDB.framework/Resources/Python
# On MacOSX sh, bash:
# export PYTHONPATH=/Developer/Library/PrivateFrameworks/LLDB.framework/Resources/Python
#----------------------------------------------------------------------
# ----------------------------------------------------------------------
import lldb
import os
@@ -23,11 +23,12 @@ def usage():
print(" By default, it breaks at and disassembles the 'main' function.")
sys.exit(0)
if len(sys.argv) == 2:
fname = 'main'
fname = "main"
exe = sys.argv[1]
elif len(sys.argv) == 4:
if sys.argv[1] != '-n':
if sys.argv[1] != "-n":
usage()
else:
fname = sys.argv[2]
@@ -49,8 +50,7 @@ target = debugger.CreateTargetWithFileAndArch(exe, lldb.LLDB_ARCH_DEFAULT)
if target:
# If the target is valid set a breakpoint at main
main_bp = target.BreakpointCreateByName(
fname, target.GetExecutable().GetFilename())
main_bp = target.BreakpointCreateByName(fname, target.GetExecutable().GetFilename())
print(main_bp)
@@ -98,16 +98,26 @@ if target:
disassemble_instructions(insts)
registerList = frame.GetRegisters()
print("Frame registers (size of register set = %d):" % registerList.GetSize())
print(
"Frame registers (size of register set = %d):"
% registerList.GetSize()
)
for value in registerList:
# print value
print("%s (number of children = %d):" % (value.GetName(), value.GetNumChildren()))
print(
"%s (number of children = %d):"
% (value.GetName(), value.GetNumChildren())
)
for child in value:
print("Name: ", child.GetName(), " Value: ", child.GetValue())
print(
"Name: ", child.GetName(), " Value: ", child.GetValue()
)
print("Hit the breakpoint at main, enter to continue and wait for program to exit or 'Ctrl-D'/'quit' to terminate the program")
print(
"Hit the breakpoint at main, enter to continue and wait for program to exit or 'Ctrl-D'/'quit' to terminate the program"
)
next = sys.stdin.readline()
if not next or next.rstrip('\n') == 'quit':
if not next or next.rstrip("\n") == "quit":
print("Terminating the inferior process...")
process.Kill()
else:
@@ -119,7 +129,10 @@ if target:
elif state == lldb.eStateExited:
print("Didn't hit the breakpoint at main, program has exited...")
else:
print("Unexpected process state: %s, killing process..." % debugger.StateAsCString(state))
print(
"Unexpected process state: %s, killing process..."
% debugger.StateAsCString(state)
)
process.Kill()

View File

@@ -1,41 +1,42 @@
""" Adds the 'toggle-disassembly' command to switch you into a disassembly only mode """
import lldb
class DisassemblyMode:
def __init__(self, debugger, unused):
self.dbg = debugger
self.interp = debugger.GetCommandInterpreter()
self.store_state()
self.mode_off = True
def store_state(self):
self.dis_count = self.get_string_value("stop-disassembly-count")
self.dis_display = self.get_string_value("stop-disassembly-display")
self.before_count = self.get_string_value("stop-line-count-before")
self.after_count = self.get_string_value("stop-line-count-after")
def get_string_value(self, setting):
result = lldb.SBCommandReturnObject()
self.interp.HandleCommand("settings show " + setting, result)
value = result.GetOutput().split(" = ")[1].rstrip("\n")
return value
def set_value(self, setting, value):
result = lldb.SBCommandReturnObject()
self.interp.HandleCommand("settings set " + setting + " " + value, result)
def __call__(self, debugger, command, exe_ctx, result):
if self.mode_off:
self.mode_off = False
self.store_state()
self.set_value("stop-disassembly-display","always")
self.set_value("stop-disassembly-display", "always")
self.set_value("stop-disassembly-count", "8")
self.set_value("stop-line-count-before", "0")
self.set_value("stop-line-count-after", "0")
result.AppendMessage("Disassembly mode on.")
else:
self.mode_off = True
self.set_value("stop-disassembly-display",self.dis_display)
self.set_value("stop-disassembly-display", self.dis_display)
self.set_value("stop-disassembly-count", self.dis_count)
self.set_value("stop-line-count-before", self.before_count)
self.set_value("stop-line-count-after", self.after_count)
@@ -44,5 +45,8 @@ class DisassemblyMode:
def get_short_help(self):
return "Toggles between a disassembly only mode and normal source mode\n"
def __lldb_init_module(debugger, unused):
debugger.HandleCommand("command script add -o -c disassembly_mode.DisassemblyMode toggle-disassembly")
debugger.HandleCommand(
"command script add -o -c disassembly_mode.DisassemblyMode toggle-disassembly"
)

View File

@@ -6,10 +6,10 @@ import sys
class FileExtract:
'''Decode binary data from a file'''
"""Decode binary data from a file"""
def __init__(self, f, b='='):
'''Initialize with an open binary file and optional byte order'''
def __init__(self, f, b="="):
"""Initialize with an open binary file and optional byte order"""
self.file = f
self.byte_order = b
@@ -17,16 +17,16 @@ class FileExtract:
def set_byte_order(self, b):
'''Set the byte order, valid values are "big", "little", "swap", "native", "<", ">", "@", "="'''
if b == 'big':
self.byte_order = '>'
elif b == 'little':
self.byte_order = '<'
elif b == 'swap':
if b == "big":
self.byte_order = ">"
elif b == "little":
self.byte_order = "<"
elif b == "swap":
# swap what ever the current byte order is
self.byte_order = swap_unpack_char()
elif b == 'native':
self.byte_order = '='
elif b == '<' or b == '>' or b == '@' or b == '=':
elif b == "native":
self.byte_order = "="
elif b == "<" or b == ">" or b == "@" or b == "=":
self.byte_order = b
else:
print("error: invalid byte order specified: '%s'" % b)
@@ -56,91 +56,89 @@ class FileExtract:
self.file.seek(offset, 0)
def pop_offset_and_seek(self):
'''Pop a previously pushed file offset, or do nothing if there were no previously pushed offsets'''
"""Pop a previously pushed file offset, or do nothing if there were no previously pushed offsets"""
if len(self.offsets) > 0:
self.file.seek(self.offsets.pop())
def get_sint8(self, fail_value=0):
'''Extract a single int8_t from the binary file at the current file position, returns a single integer'''
"""Extract a single int8_t from the binary file at the current file position, returns a single integer"""
s = self.read_size(1)
if s:
v, = struct.unpack(self.byte_order + 'b', s)
(v,) = struct.unpack(self.byte_order + "b", s)
return v
else:
return fail_value
def get_uint8(self, fail_value=0):
'''Extract a single uint8_t from the binary file at the current file position, returns a single integer'''
"""Extract a single uint8_t from the binary file at the current file position, returns a single integer"""
s = self.read_size(1)
if s:
v, = struct.unpack(self.byte_order + 'B', s)
(v,) = struct.unpack(self.byte_order + "B", s)
return v
else:
return fail_value
def get_sint16(self, fail_value=0):
'''Extract a single int16_t from the binary file at the current file position, returns a single integer'''
"""Extract a single int16_t from the binary file at the current file position, returns a single integer"""
s = self.read_size(2)
if s:
v, = struct.unpack(self.byte_order + 'h', s)
(v,) = struct.unpack(self.byte_order + "h", s)
return v
else:
return fail_value
def get_uint16(self, fail_value=0):
'''Extract a single uint16_t from the binary file at the current file position, returns a single integer'''
"""Extract a single uint16_t from the binary file at the current file position, returns a single integer"""
s = self.read_size(2)
if s:
v, = struct.unpack(self.byte_order + 'H', s)
(v,) = struct.unpack(self.byte_order + "H", s)
return v
else:
return fail_value
def get_sint32(self, fail_value=0):
'''Extract a single int32_t from the binary file at the current file position, returns a single integer'''
"""Extract a single int32_t from the binary file at the current file position, returns a single integer"""
s = self.read_size(4)
if s:
v, = struct.unpack(self.byte_order + 'i', s)
(v,) = struct.unpack(self.byte_order + "i", s)
return v
else:
return fail_value
def get_uint32(self, fail_value=0):
'''Extract a single uint32_t from the binary file at the current file position, returns a single integer'''
"""Extract a single uint32_t from the binary file at the current file position, returns a single integer"""
s = self.read_size(4)
if s:
v, = struct.unpack(self.byte_order + 'I', s)
(v,) = struct.unpack(self.byte_order + "I", s)
return v
else:
return fail_value
def get_sint64(self, fail_value=0):
'''Extract a single int64_t from the binary file at the current file position, returns a single integer'''
"""Extract a single int64_t from the binary file at the current file position, returns a single integer"""
s = self.read_size(8)
if s:
v, = struct.unpack(self.byte_order + 'q', s)
(v,) = struct.unpack(self.byte_order + "q", s)
return v
else:
return fail_value
def get_uint64(self, fail_value=0):
'''Extract a single uint64_t from the binary file at the current file position, returns a single integer'''
"""Extract a single uint64_t from the binary file at the current file position, returns a single integer"""
s = self.read_size(8)
if s:
v, = struct.unpack(self.byte_order + 'Q', s)
(v,) = struct.unpack(self.byte_order + "Q", s)
return v
else:
return fail_value
def get_fixed_length_c_string(
self,
n,
fail_value='',
isprint_only_with_space_padding=False):
'''Extract a single fixed length C string from the binary file at the current file position, returns a single C string'''
self, n, fail_value="", isprint_only_with_space_padding=False
):
"""Extract a single fixed length C string from the binary file at the current file position, returns a single C string"""
s = self.read_size(n)
if s:
cstr, = struct.unpack(self.byte_order + ("%i" % n) + 's', s)
(cstr,) = struct.unpack(self.byte_order + ("%i" % n) + "s", s)
# Strip trialing NULLs
cstr = string.strip(cstr, "\0")
if isprint_only_with_space_padding:
@@ -153,8 +151,8 @@ class FileExtract:
return fail_value
def get_c_string(self):
'''Extract a single NULL terminated C string from the binary file at the current file position, returns a single C string'''
cstr = ''
"""Extract a single NULL terminated C string from the binary file at the current file position, returns a single C string"""
cstr = ""
byte = self.get_uint8()
while byte != 0:
cstr += "%c" % byte
@@ -162,65 +160,65 @@ class FileExtract:
return cstr
def get_n_sint8(self, n, fail_value=0):
'''Extract "n" int8_t integers from the binary file at the current file position, returns a list of integers'''
"""Extract "n" int8_t integers from the binary file at the current file position, returns a list of integers"""
s = self.read_size(n)
if s:
return struct.unpack(self.byte_order + ("%u" % n) + 'b', s)
return struct.unpack(self.byte_order + ("%u" % n) + "b", s)
else:
return (fail_value,) * n
def get_n_uint8(self, n, fail_value=0):
'''Extract "n" uint8_t integers from the binary file at the current file position, returns a list of integers'''
"""Extract "n" uint8_t integers from the binary file at the current file position, returns a list of integers"""
s = self.read_size(n)
if s:
return struct.unpack(self.byte_order + ("%u" % n) + 'B', s)
return struct.unpack(self.byte_order + ("%u" % n) + "B", s)
else:
return (fail_value,) * n
def get_n_sint16(self, n, fail_value=0):
'''Extract "n" int16_t integers from the binary file at the current file position, returns a list of integers'''
"""Extract "n" int16_t integers from the binary file at the current file position, returns a list of integers"""
s = self.read_size(2 * n)
if s:
return struct.unpack(self.byte_order + ("%u" % n) + 'h', s)
return struct.unpack(self.byte_order + ("%u" % n) + "h", s)
else:
return (fail_value,) * n
def get_n_uint16(self, n, fail_value=0):
'''Extract "n" uint16_t integers from the binary file at the current file position, returns a list of integers'''
"""Extract "n" uint16_t integers from the binary file at the current file position, returns a list of integers"""
s = self.read_size(2 * n)
if s:
return struct.unpack(self.byte_order + ("%u" % n) + 'H', s)
return struct.unpack(self.byte_order + ("%u" % n) + "H", s)
else:
return (fail_value,) * n
def get_n_sint32(self, n, fail_value=0):
'''Extract "n" int32_t integers from the binary file at the current file position, returns a list of integers'''
"""Extract "n" int32_t integers from the binary file at the current file position, returns a list of integers"""
s = self.read_size(4 * n)
if s:
return struct.unpack(self.byte_order + ("%u" % n) + 'i', s)
return struct.unpack(self.byte_order + ("%u" % n) + "i", s)
else:
return (fail_value,) * n
def get_n_uint32(self, n, fail_value=0):
'''Extract "n" uint32_t integers from the binary file at the current file position, returns a list of integers'''
"""Extract "n" uint32_t integers from the binary file at the current file position, returns a list of integers"""
s = self.read_size(4 * n)
if s:
return struct.unpack(self.byte_order + ("%u" % n) + 'I', s)
return struct.unpack(self.byte_order + ("%u" % n) + "I", s)
else:
return (fail_value,) * n
def get_n_sint64(self, n, fail_value=0):
'''Extract "n" int64_t integers from the binary file at the current file position, returns a list of integers'''
"""Extract "n" int64_t integers from the binary file at the current file position, returns a list of integers"""
s = self.read_size(8 * n)
if s:
return struct.unpack(self.byte_order + ("%u" % n) + 'q', s)
return struct.unpack(self.byte_order + ("%u" % n) + "q", s)
else:
return (fail_value,) * n
def get_n_uint64(self, n, fail_value=0):
'''Extract "n" uint64_t integers from the binary file at the current file position, returns a list of integers'''
"""Extract "n" uint64_t integers from the binary file at the current file position, returns a list of integers"""
s = self.read_size(8 * n)
if s:
return struct.unpack(self.byte_order + ("%u" % n) + 'Q', s)
return struct.unpack(self.byte_order + ("%u" % n) + "Q", s)
else:
return (fail_value,) * n

View File

@@ -16,12 +16,20 @@ def disassemble(debugger, command, result, dict):
inst_offset = inst_addr - start_addr
comment = inst.comment
if comment:
print("<%s + %-4u> 0x%x %8s %s ; %s" % (name, inst_offset, inst_addr, inst.mnemonic, inst.operands, comment))
print(
"<%s + %-4u> 0x%x %8s %s ; %s"
% (name, inst_offset, inst_addr, inst.mnemonic, inst.operands, comment)
)
else:
print("<%s + %-4u> 0x%x %8s %s" % (name, inst_offset, inst_addr, inst.mnemonic, inst.operands))
print(
"<%s + %-4u> 0x%x %8s %s"
% (name, inst_offset, inst_addr, inst.mnemonic, inst.operands)
)
# Install the command when the module gets imported
def __lldb_init_module(debugger, internal_dict):
debugger.HandleCommand(
'command script add -o -f gdb_disassemble.disassemble gdb-disassemble')
"command script add -o -f gdb_disassemble.disassemble gdb-disassemble"
)
print('Installed "gdb-disassemble" command for disassembly')

File diff suppressed because it is too large Load Diff

View File

@@ -1,12 +1,12 @@
#!/usr/bin/env python
#----------------------------------------------------------------------
# ----------------------------------------------------------------------
# For the shells csh, tcsh:
# ( setenv PYTHONPATH /Applications/Xcode.app/Contents/SharedFrameworks/LLDB.framework/Resources/Python ; ./globals.py <path> [<path> ...])
#
# For the shells sh, bash:
# PYTHONPATH=/Applications/Xcode.app/Contents/SharedFrameworks/LLDB.framework/Resources/Python ./globals.py <path> [<path> ...]
#----------------------------------------------------------------------
# ----------------------------------------------------------------------
import lldb
import optparse
@@ -21,7 +21,8 @@ def get_globals(raw_path, options):
path = os.path.expanduser(raw_path)
# Create a target using path + options
target = lldb.debugger.CreateTarget(
path, options.arch, options.platform, False, error)
path, options.arch, options.platform, False, error
)
if target:
# Get the executable module
module = target.module[target.executable.basename]
@@ -40,58 +41,63 @@ def get_globals(raw_path, options):
global_names.append(global_name)
# Find all global variables by name
global_variable_list = module.FindGlobalVariables(
target, global_name, lldb.UINT32_MAX)
target, global_name, lldb.UINT32_MAX
)
if global_variable_list:
# Print results for anything that matched
for global_variable in global_variable_list:
# returns the global variable name as a string
print('name = %s' % global_variable.name)
print("name = %s" % global_variable.name)
# Returns the variable value as a string
print('value = %s' % global_variable.value)
print('type = %s' % global_variable.type) # Returns an lldb.SBType object
print("value = %s" % global_variable.value)
print(
"type = %s" % global_variable.type
) # Returns an lldb.SBType object
# Returns an lldb.SBAddress (section offset
# address) for this global
print('addr = %s' % global_variable.addr)
print("addr = %s" % global_variable.addr)
# Returns the file virtual address for this
# global
print('file_addr = 0x%x' % global_variable.addr.file_addr)
print(
"file_addr = 0x%x" % global_variable.addr.file_addr
)
# returns the global variable value as a string
print('location = %s' % global_variable.location)
print("location = %s" % global_variable.location)
# Returns the size in bytes of this global
# variable
print('size = %s' % global_variable.size)
print("size = %s" % global_variable.size)
print()
def globals(command_args):
'''Extract all globals from any arguments which must be paths to object files.'''
"""Extract all globals from any arguments which must be paths to object files."""
usage = "usage: %prog [options] <PATH> [PATH ...]"
description = '''This command will find all globals in the specified object file and return an list() of lldb.SBValue objects (which might be empty).'''
parser = optparse.OptionParser(
description=description,
prog='globals',
usage=usage)
description = """This command will find all globals in the specified object file and return an list() of lldb.SBValue objects (which might be empty)."""
parser = optparse.OptionParser(description=description, prog="globals", usage=usage)
parser.add_option(
'-v',
'--verbose',
action='store_true',
dest='verbose',
help='display verbose debug info',
default=False)
"-v",
"--verbose",
action="store_true",
dest="verbose",
help="display verbose debug info",
default=False,
)
parser.add_option(
'-a',
'--arch',
type='string',
metavar='arch',
dest='arch',
help='Specify an architecture (or triple) to use when extracting from a file.')
"-a",
"--arch",
type="string",
metavar="arch",
dest="arch",
help="Specify an architecture (or triple) to use when extracting from a file.",
)
parser.add_option(
'-p',
'--platform',
type='string',
metavar='platform',
dest='platform',
help='Specify the platform to use when creating the debug target. Valid values include "localhost", "darwin-kernel", "ios-simulator", "remote-freebsd", "remote-macosx", "remote-ios", "remote-linux".')
"-p",
"--platform",
type="string",
metavar="platform",
dest="platform",
help='Specify the platform to use when creating the debug target. Valid values include "localhost", "darwin-kernel", "ios-simulator", "remote-freebsd", "remote-macosx", "remote-ios", "remote-linux".',
)
try:
(options, args) = parser.parse_args(command_args)
except:
@@ -100,6 +106,7 @@ def globals(command_args):
for path in args:
get_globals(path, options)
if __name__ == '__main__':
if __name__ == "__main__":
lldb.debugger = lldb.SBDebugger.Create()
globals(sys.argv[1:])

View File

@@ -2,23 +2,23 @@
def __lldb_init_module(debugger, internal_dict):
debugger.HandleCommand(
f'command alias in_call_stack breakpoint command add --python-function {__name__}.in_call_stack -k name -v %1'
)
debugger.HandleCommand(
f"command alias in_call_stack breakpoint command add --python-function {__name__}.in_call_stack -k name -v %1"
)
def in_call_stack(frame, bp_loc, arg_dict, _):
"""Only break if the given name is in the current call stack."""
name = arg_dict.GetValueForKey('name').GetStringValue(1000)
thread = frame.GetThread()
found = False
for frame in thread.frames:
# Check the symbol.
symbol = frame.GetSymbol()
if symbol and name in frame.GetSymbol().GetName():
return True
# Check the function.
function = frame.GetFunction()
if function and name in function.GetName():
return True
return False
"""Only break if the given name is in the current call stack."""
name = arg_dict.GetValueForKey("name").GetStringValue(1000)
thread = frame.GetThread()
found = False
for frame in thread.frames:
# Check the symbol.
symbol = frame.GetSymbol()
if symbol and name in frame.GetSymbol().GetName():
return True
# Check the function.
function = frame.GetFunction()
if function and name in function.GetName():
return True
return False

View File

@@ -5,91 +5,97 @@ import re
def parse_linespec(linespec, frame, result):
"""Handles a subset of GDB-style linespecs. Specifically:
number - A line in the current file
+offset - The line /offset/ lines after this line
-offset - The line /offset/ lines before this line
filename:number - Line /number/ in file /filename/
function - The start of /function/
*address - The pointer target of /address/, which must be a literal (but see `` in LLDB)
number - A line in the current file
+offset - The line /offset/ lines after this line
-offset - The line /offset/ lines before this line
filename:number - Line /number/ in file /filename/
function - The start of /function/
*address - The pointer target of /address/, which must be a literal (but see `` in LLDB)
We explicitly do not handle filename:function because it is ambiguous in Objective-C.
We explicitly do not handle filename:function because it is ambiguous in Objective-C.
This function returns a list of addresses."""
This function returns a list of addresses."""
breakpoint = None
target = frame.GetThread().GetProcess().GetTarget()
matched = False
if (not matched):
if not matched:
mo = re.match("^([0-9]+)$", linespec)
if (mo is not None):
if mo is not None:
matched = True
# print "Matched <linenum>"
line_number = int(mo.group(1))
line_entry = frame.GetLineEntry()
if not line_entry.IsValid():
result.AppendMessage(
"Specified a line in the current file, but the current frame doesn't have line table information.")
"Specified a line in the current file, but the current frame doesn't have line table information."
)
return
breakpoint = target.BreakpointCreateByLocation(
line_entry.GetFileSpec(), line_number)
line_entry.GetFileSpec(), line_number
)
if (not matched):
if not matched:
mo = re.match("^\+([0-9]+)$", linespec)
if (mo is not None):
if mo is not None:
matched = True
# print "Matched +<count>"
line_number = int(mo.group(1))
line_entry = frame.GetLineEntry()
if not line_entry.IsValid():
result.AppendMessage(
"Specified a line in the current file, but the current frame doesn't have line table information.")
"Specified a line in the current file, but the current frame doesn't have line table information."
)
return
breakpoint = target.BreakpointCreateByLocation(
line_entry.GetFileSpec(), (line_entry.GetLine() + line_number))
line_entry.GetFileSpec(), (line_entry.GetLine() + line_number)
)
if (not matched):
if not matched:
mo = re.match("^\-([0-9]+)$", linespec)
if (mo is not None):
if mo is not None:
matched = True
# print "Matched -<count>"
line_number = int(mo.group(1))
line_entry = frame.GetLineEntry()
if not line_entry.IsValid():
result.AppendMessage(
"Specified a line in the current file, but the current frame doesn't have line table information.")
"Specified a line in the current file, but the current frame doesn't have line table information."
)
return
breakpoint = target.BreakpointCreateByLocation(
line_entry.GetFileSpec(), (line_entry.GetLine() - line_number))
line_entry.GetFileSpec(), (line_entry.GetLine() - line_number)
)
if (not matched):
if not matched:
mo = re.match("^(.*):([0-9]+)$", linespec)
if (mo is not None):
if mo is not None:
matched = True
# print "Matched <filename>:<linenum>"
file_name = mo.group(1)
line_number = int(mo.group(2))
breakpoint = target.BreakpointCreateByLocation(
file_name, line_number)
breakpoint = target.BreakpointCreateByLocation(file_name, line_number)
if (not matched):
if not matched:
mo = re.match("\*((0x)?([0-9a-f]+))$", linespec)
if (mo is not None):
if mo is not None:
matched = True
# print "Matched <address-expression>"
address = int(mo.group(1), base=0)
breakpoint = target.BreakpointCreateByAddress(address)
if (not matched):
if not matched:
# print "Trying <function-name>"
breakpoint = target.BreakpointCreateByName(linespec)
num_locations = breakpoint.GetNumLocations()
if (num_locations == 0):
if num_locations == 0:
result.AppendMessage(
"The line specification provided doesn't resolve to any addresses.")
"The line specification provided doesn't resolve to any addresses."
)
addr_list = []
@@ -119,7 +125,7 @@ Command Options Usage:
def jump(debugger, command, result, internal_dict):
if (command == ""):
if command == "":
result.AppendMessage(usage_string())
args = command.split()
@@ -164,33 +170,31 @@ def jump(debugger, command, result, internal_dict):
desired_address = addresses[desired_index]
else:
result.AppendMessage(
"Desired index " +
args[1] +
" is not one of the options.")
"Desired index " + args[1] + " is not one of the options."
)
return
else:
index = 0
result.AppendMessage(
"The specified location resolves to multiple targets.")
result.AppendMessage("The specified location resolves to multiple targets.")
for address in addresses:
stream.Clear()
address.GetDescription(stream)
result.AppendMessage(
" Location ID " +
str(index) +
": " +
stream.GetData())
" Location ID " + str(index) + ": " + stream.GetData()
)
index = index + 1
result.AppendMessage(
"Please type 'jump " +
command +
" <location-id>' to choose one.")
"Please type 'jump " + command + " <location-id>' to choose one."
)
return
frame.SetPC(desired_address.GetLoadAddress(target))
def __lldb_init_module(debugger, internal_dict):
# Module is being run inside the LLDB interpreter
jump.__doc__ = usage_string()
debugger.HandleCommand('command script add -o -f jump.jump jump')
print('The "jump" command has been installed, type "help jump" or "jump <ENTER>" for detailed help.')
debugger.HandleCommand("command script add -o -f jump.jump jump")
print(
'The "jump" command has been installed, type "help jump" or "jump <ENTER>" for detailed help.'
)

View File

@@ -9,22 +9,25 @@ import sys
class DumpLineTables:
command_name = "dump-line-tables"
short_description = "Dumps full paths to compile unit files and optionally all line table files."
description = 'Dumps all line tables from all compile units for any modules specified as arguments. Specifying the --verbose flag will output address ranges for each line entry.'
short_description = (
"Dumps full paths to compile unit files and optionally all line table files."
)
description = "Dumps all line tables from all compile units for any modules specified as arguments. Specifying the --verbose flag will output address ranges for each line entry."
usage = "usage: %prog [options] MODULE1 [MODULE2 ...]"
def create_options(self):
self.parser = optparse.OptionParser(
description=self.description,
prog=self.command_name,
usage=self.usage)
description=self.description, prog=self.command_name, usage=self.usage
)
self.parser.add_option(
'-v',
'--verbose',
action='store_true',
dest='verbose',
help='Display verbose output.',
default=False)
"-v",
"--verbose",
action="store_true",
dest="verbose",
help="Display verbose output.",
default=False,
)
def get_short_help(self):
return self.short_description
@@ -61,14 +64,14 @@ class DumpLineTables:
result.SetError('no module found that matches "%s".' % (module_path))
return
num_cus = module.GetNumCompileUnits()
print('Module: "%s"' % (module.file.fullpath), end=' ', file=result)
print('Module: "%s"' % (module.file.fullpath), end=" ", file=result)
if num_cus == 0:
print('no debug info.', file=result)
print("no debug info.", file=result)
continue
print('has %u compile units:' % (num_cus), file=result)
print("has %u compile units:" % (num_cus), file=result)
for cu_idx in range(num_cus):
cu = module.GetCompileUnitAtIndex(cu_idx)
print(' Compile Unit: %s' % (cu.file.fullpath), file=result)
print(" Compile Unit: %s" % (cu.file.fullpath), file=result)
for line_idx in range(cu.GetNumLineEntries()):
line_entry = cu.GetLineEntryAtIndex(line_idx)
start_file_addr = line_entry.addr.file_addr
@@ -78,25 +81,27 @@ class DumpLineTables:
if options.verbose:
if start_file_addr != end_file_addr:
result.PutCString(
' [%#x - %#x): %s' %
(start_file_addr, end_file_addr, line_entry))
" [%#x - %#x): %s"
% (start_file_addr, end_file_addr, line_entry)
)
else:
if start_file_addr == end_file_addr:
result.PutCString(' %#x: END' %
(start_file_addr))
result.PutCString(" %#x: END" % (start_file_addr))
else:
result.PutCString(
' %#x: %s' %
(start_file_addr, line_entry))
" %#x: %s" % (start_file_addr, line_entry)
)
if start_file_addr == end_file_addr:
result.PutCString("\n")
class DumpFiles:
command_name = "dump-files"
short_description = "Dumps full paths to compile unit files and optionally all line table files."
short_description = (
"Dumps full paths to compile unit files and optionally all line table files."
)
usage = "usage: %prog [options] MODULE1 [MODULE2 ...]"
description = '''This class adds a dump-files command to the LLDB interpreter.
description = """This class adds a dump-files command to the LLDB interpreter.
This command will dump all compile unit file paths found for each source file
for the binaries specified as arguments in the current target. Specify the
@@ -106,23 +111,26 @@ working in IDEs that specify full paths to source files when setting file and
line breakpoints. Sometimes symlinks cause the debug info to contain the symlink
path and an IDE will resolve the path to the actual file and use the resolved
path when setting breakpoints.
'''
"""
def create_options(self):
# Pass add_help_option = False, since this keeps the command in line with lldb commands,
# and we wire up "help command" to work by providing the long & short help methods below.
self.parser = optparse.OptionParser(
description = self.description,
prog = self.command_name,
usage = self.usage,
add_help_option = False)
description=self.description,
prog=self.command_name,
usage=self.usage,
add_help_option=False,
)
self.parser.add_option(
'-s',
'--support-files',
action = 'store_true',
dest = 'support_files',
help = 'Dumps full paths to all files used in a compile unit.',
default = False)
"-s",
"--support-files",
action="store_true",
dest="support_files",
help="Dumps full paths to all files used in a compile unit.",
default=False,
)
def get_short_help(self):
return self.short_description
@@ -163,19 +171,19 @@ path when setting breakpoints.
result.SetError('no module found that matches "%s".' % (module_path))
return
num_cus = module.GetNumCompileUnits()
print('Module: "%s"' % (module.file.fullpath), end=' ', file=result)
print('Module: "%s"' % (module.file.fullpath), end=" ", file=result)
if num_cus == 0:
print('no debug info.', file=result)
print("no debug info.", file=result)
continue
print('has %u compile units:' % (num_cus), file=result)
print("has %u compile units:" % (num_cus), file=result)
for i in range(num_cus):
cu = module.GetCompileUnitAtIndex(i)
print(' Compile Unit: %s' % (cu.file.fullpath), file=result)
print(" Compile Unit: %s" % (cu.file.fullpath), file=result)
if options.support_files:
num_support_files = cu.GetNumSupportFiles()
for j in range(num_support_files):
path = cu.GetSupportFileAtIndex(j).fullpath
print(' file[%u]: %s' % (j, path), file=result)
print(" file[%u]: %s" % (j, path), file=result)
def __lldb_init_module(debugger, dict):
@@ -183,9 +191,13 @@ def __lldb_init_module(debugger, dict):
# Add any commands contained in this module to LLDB
debugger.HandleCommand(
'command script add -o -c %s.DumpLineTables %s' % (__name__,
DumpLineTables.command_name))
"command script add -o -c %s.DumpLineTables %s"
% (__name__, DumpLineTables.command_name)
)
debugger.HandleCommand(
'command script add -o -c %s.DumpFiles %s' % (__name__, DumpFiles.command_name))
print('The "%s" and "%s" commands have been installed.' % (DumpLineTables.command_name,
DumpFiles.command_name))
"command script add -o -c %s.DumpFiles %s" % (__name__, DumpFiles.command_name)
)
print(
'The "%s" and "%s" commands have been installed.'
% (DumpLineTables.command_name, DumpFiles.command_name)
)

View File

@@ -3,6 +3,7 @@
import lldb
import shlex
import sys
try:
from tkinter import *
import tkinter.ttk as ttk
@@ -12,62 +13,58 @@ except ImportError:
class ValueTreeItemDelegate(object):
def __init__(self, value):
self.value = value
def get_item_dictionary(self):
name = self.value.name
if name is None:
name = ''
name = ""
typename = self.value.type
if typename is None:
typename = ''
typename = ""
value = self.value.value
if value is None:
value = ''
value = ""
summary = self.value.summary
if summary is None:
summary = ''
summary = ""
has_children = self.value.MightHaveChildren()
return {'#0': name,
'typename': typename,
'value': value,
'summary': summary,
'children': has_children,
'tree-item-delegate': self}
return {
"#0": name,
"typename": typename,
"value": value,
"summary": summary,
"children": has_children,
"tree-item-delegate": self,
}
def get_child_item_dictionaries(self):
item_dicts = list()
for i in range(self.value.num_children):
item_delegate = ValueTreeItemDelegate(
self.value.GetChildAtIndex(i))
item_delegate = ValueTreeItemDelegate(self.value.GetChildAtIndex(i))
item_dicts.append(item_delegate.get_item_dictionary())
return item_dicts
class FrameTreeItemDelegate(object):
def __init__(self, frame):
self.frame = frame
def get_item_dictionary(self):
id = self.frame.GetFrameID()
name = 'frame #%u' % (id)
value = '0x%16.16x' % (self.frame.GetPC())
name = "frame #%u" % (id)
value = "0x%16.16x" % (self.frame.GetPC())
stream = lldb.SBStream()
self.frame.GetDescription(stream)
summary = stream.GetData().split("`")[1]
return {
'#0': name,
'value': value,
'summary': summary,
'children': self.frame.GetVariables(
True,
True,
True,
True).GetSize() > 0,
'tree-item-delegate': self}
"#0": name,
"value": value,
"summary": summary,
"children": self.frame.GetVariables(True, True, True, True).GetSize() > 0,
"tree-item-delegate": self,
}
def get_child_item_dictionaries(self):
item_dicts = list()
@@ -80,20 +77,21 @@ class FrameTreeItemDelegate(object):
class ThreadTreeItemDelegate(object):
def __init__(self, thread):
self.thread = thread
def get_item_dictionary(self):
num_frames = self.thread.GetNumFrames()
name = 'thread #%u' % (self.thread.GetIndexID())
value = '0x%x' % (self.thread.GetThreadID())
summary = '%u frames' % (num_frames)
return {'#0': name,
'value': value,
'summary': summary,
'children': num_frames > 0,
'tree-item-delegate': self}
name = "thread #%u" % (self.thread.GetIndexID())
value = "0x%x" % (self.thread.GetThreadID())
summary = "%u frames" % (num_frames)
return {
"#0": name,
"value": value,
"summary": summary,
"children": num_frames > 0,
"tree-item-delegate": self,
}
def get_child_item_dictionaries(self):
item_dicts = list()
@@ -104,7 +102,6 @@ class ThreadTreeItemDelegate(object):
class ProcessTreeItemDelegate(object):
def __init__(self, process):
self.process = process
@@ -113,11 +110,13 @@ class ProcessTreeItemDelegate(object):
num_threads = self.process.GetNumThreads()
value = str(self.process.GetProcessID())
summary = self.process.target.executable.fullpath
return {'#0': 'process',
'value': value,
'summary': summary,
'children': num_threads > 0,
'tree-item-delegate': self}
return {
"#0": "process",
"value": value,
"summary": summary,
"children": num_threads > 0,
"tree-item-delegate": self,
}
def get_child_item_dictionaries(self):
item_dicts = list()
@@ -128,18 +127,19 @@ class ProcessTreeItemDelegate(object):
class TargetTreeItemDelegate(object):
def __init__(self, target):
self.target = target
def get_item_dictionary(self):
value = str(self.target.triple)
summary = self.target.executable.fullpath
return {'#0': 'target',
'value': value,
'summary': summary,
'children': True,
'tree-item-delegate': self}
return {
"#0": "target",
"value": value,
"summary": summary,
"children": True,
"tree-item-delegate": self,
}
def get_child_item_dictionaries(self):
item_dicts = list()
@@ -149,7 +149,6 @@ class TargetTreeItemDelegate(object):
class TargetImagesTreeItemDelegate(object):
def __init__(self, target):
self.target = target
@@ -157,70 +156,74 @@ class TargetImagesTreeItemDelegate(object):
value = str(self.target.triple)
summary = self.target.executable.fullpath
num_modules = self.target.GetNumModules()
return {'#0': 'images',
'value': '',
'summary': '%u images' % num_modules,
'children': num_modules > 0,
'tree-item-delegate': self}
return {
"#0": "images",
"value": "",
"summary": "%u images" % num_modules,
"children": num_modules > 0,
"tree-item-delegate": self,
}
def get_child_item_dictionaries(self):
item_dicts = list()
for i in range(self.target.GetNumModules()):
module = self.target.GetModuleAtIndex(i)
image_item_delegate = ModuleTreeItemDelegate(
self.target, module, i)
image_item_delegate = ModuleTreeItemDelegate(self.target, module, i)
item_dicts.append(image_item_delegate.get_item_dictionary())
return item_dicts
class ModuleTreeItemDelegate(object):
def __init__(self, target, module, index):
self.target = target
self.module = module
self.index = index
def get_item_dictionary(self):
name = 'module %u' % (self.index)
name = "module %u" % (self.index)
value = self.module.file.basename
summary = self.module.file.dirname
return {'#0': name,
'value': value,
'summary': summary,
'children': True,
'tree-item-delegate': self}
return {
"#0": name,
"value": value,
"summary": summary,
"children": True,
"tree-item-delegate": self,
}
def get_child_item_dictionaries(self):
item_dicts = list()
sections_item_delegate = ModuleSectionsTreeItemDelegate(
self.target, self.module)
self.target, self.module
)
item_dicts.append(sections_item_delegate.get_item_dictionary())
symbols_item_delegate = ModuleSymbolsTreeItemDelegate(
self.target, self.module)
symbols_item_delegate = ModuleSymbolsTreeItemDelegate(self.target, self.module)
item_dicts.append(symbols_item_delegate.get_item_dictionary())
comp_units_item_delegate = ModuleCompileUnitsTreeItemDelegate(
self.target, self.module)
self.target, self.module
)
item_dicts.append(comp_units_item_delegate.get_item_dictionary())
return item_dicts
class ModuleSectionsTreeItemDelegate(object):
def __init__(self, target, module):
self.target = target
self.module = module
def get_item_dictionary(self):
name = 'sections'
value = ''
summary = '%u sections' % (self.module.GetNumSections())
return {'#0': name,
'value': value,
'summary': summary,
'children': True,
'tree-item-delegate': self}
name = "sections"
value = ""
summary = "%u sections" % (self.module.GetNumSections())
return {
"#0": name,
"value": value,
"summary": summary,
"children": True,
"tree-item-delegate": self,
}
def get_child_item_dictionaries(self):
item_dicts = list()
@@ -233,7 +236,6 @@ class ModuleSectionsTreeItemDelegate(object):
class SectionTreeItemDelegate(object):
def __init__(self, target, section):
self.target = target
self.section = section
@@ -242,15 +244,17 @@ class SectionTreeItemDelegate(object):
name = self.section.name
section_load_addr = self.section.GetLoadAddress(self.target)
if section_load_addr != lldb.LLDB_INVALID_ADDRESS:
value = '0x%16.16x' % (section_load_addr)
value = "0x%16.16x" % (section_load_addr)
else:
value = '0x%16.16x *' % (self.section.file_addr)
summary = ''
return {'#0': name,
'value': value,
'summary': summary,
'children': self.section.GetNumSubSections() > 0,
'tree-item-delegate': self}
value = "0x%16.16x *" % (self.section.file_addr)
summary = ""
return {
"#0": name,
"value": value,
"summary": summary,
"children": self.section.GetNumSubSections() > 0,
"tree-item-delegate": self,
}
def get_child_item_dictionaries(self):
item_dicts = list()
@@ -263,20 +267,21 @@ class SectionTreeItemDelegate(object):
class ModuleCompileUnitsTreeItemDelegate(object):
def __init__(self, target, module):
self.target = target
self.module = module
def get_item_dictionary(self):
name = 'compile units'
value = ''
summary = '%u compile units' % (self.module.GetNumSections())
return {'#0': name,
'value': value,
'summary': summary,
'children': self.module.GetNumCompileUnits() > 0,
'tree-item-delegate': self}
name = "compile units"
value = ""
summary = "%u compile units" % (self.module.GetNumSections())
return {
"#0": name,
"value": value,
"summary": summary,
"children": self.module.GetNumCompileUnits() > 0,
"tree-item-delegate": self,
}
def get_child_item_dictionaries(self):
item_dicts = list()
@@ -289,21 +294,22 @@ class ModuleCompileUnitsTreeItemDelegate(object):
class CompileUnitTreeItemDelegate(object):
def __init__(self, target, cu):
self.target = target
self.cu = cu
def get_item_dictionary(self):
name = self.cu.GetFileSpec().basename
value = ''
value = ""
num_lines = self.cu.GetNumLineEntries()
summary = ''
return {'#0': name,
'value': value,
'summary': summary,
'children': num_lines > 0,
'tree-item-delegate': self}
summary = ""
return {
"#0": name,
"value": value,
"summary": summary,
"children": num_lines > 0,
"tree-item-delegate": self,
}
def get_child_item_dictionaries(self):
item_dicts = list()
@@ -313,35 +319,34 @@ class CompileUnitTreeItemDelegate(object):
class LineTableTreeItemDelegate(object):
def __init__(self, target, cu):
self.target = target
self.cu = cu
def get_item_dictionary(self):
name = 'line table'
value = ''
name = "line table"
value = ""
num_lines = self.cu.GetNumLineEntries()
summary = '%u line entries' % (num_lines)
return {'#0': name,
'value': value,
'summary': summary,
'children': num_lines > 0,
'tree-item-delegate': self}
summary = "%u line entries" % (num_lines)
return {
"#0": name,
"value": value,
"summary": summary,
"children": num_lines > 0,
"tree-item-delegate": self,
}
def get_child_item_dictionaries(self):
item_dicts = list()
num_lines = self.cu.GetNumLineEntries()
for i in range(num_lines):
line_entry = self.cu.GetLineEntryAtIndex(i)
item_delegate = LineEntryTreeItemDelegate(
self.target, line_entry, i)
item_delegate = LineEntryTreeItemDelegate(self.target, line_entry, i)
item_dicts.append(item_delegate.get_item_dictionary())
return item_dicts
class LineEntryTreeItemDelegate(object):
def __init__(self, target, line_entry, index):
self.target = target
self.line_entry = line_entry
@@ -352,16 +357,19 @@ class LineEntryTreeItemDelegate(object):
address = self.line_entry.GetStartAddress()
load_addr = address.GetLoadAddress(self.target)
if load_addr != lldb.LLDB_INVALID_ADDRESS:
value = '0x%16.16x' % (load_addr)
value = "0x%16.16x" % (load_addr)
else:
value = '0x%16.16x *' % (address.file_addr)
summary = self.line_entry.GetFileSpec().fullpath + ':' + \
str(self.line_entry.line)
return {'#0': name,
'value': value,
'summary': summary,
'children': False,
'tree-item-delegate': self}
value = "0x%16.16x *" % (address.file_addr)
summary = (
self.line_entry.GetFileSpec().fullpath + ":" + str(self.line_entry.line)
)
return {
"#0": name,
"value": value,
"summary": summary,
"children": False,
"tree-item-delegate": self,
}
def get_child_item_dictionaries(self):
item_dicts = list()
@@ -369,7 +377,6 @@ class LineEntryTreeItemDelegate(object):
class InstructionTreeItemDelegate(object):
def __init__(self, target, instr):
self.target = target
self.instr = instr
@@ -378,48 +385,52 @@ class InstructionTreeItemDelegate(object):
address = self.instr.GetAddress()
load_addr = address.GetLoadAddress(self.target)
if load_addr != lldb.LLDB_INVALID_ADDRESS:
name = '0x%16.16x' % (load_addr)
name = "0x%16.16x" % (load_addr)
else:
name = '0x%16.16x *' % (address.file_addr)
value = self.instr.GetMnemonic(
self.target) + ' ' + self.instr.GetOperands(self.target)
name = "0x%16.16x *" % (address.file_addr)
value = (
self.instr.GetMnemonic(self.target)
+ " "
+ self.instr.GetOperands(self.target)
)
summary = self.instr.GetComment(self.target)
return {'#0': name,
'value': value,
'summary': summary,
'children': False,
'tree-item-delegate': self}
return {
"#0": name,
"value": value,
"summary": summary,
"children": False,
"tree-item-delegate": self,
}
class ModuleSymbolsTreeItemDelegate(object):
def __init__(self, target, module):
self.target = target
self.module = module
def get_item_dictionary(self):
name = 'symbols'
value = ''
summary = '%u symbols' % (self.module.GetNumSymbols())
return {'#0': name,
'value': value,
'summary': summary,
'children': True,
'tree-item-delegate': self}
name = "symbols"
value = ""
summary = "%u symbols" % (self.module.GetNumSymbols())
return {
"#0": name,
"value": value,
"summary": summary,
"children": True,
"tree-item-delegate": self,
}
def get_child_item_dictionaries(self):
item_dicts = list()
num_symbols = self.module.GetNumSymbols()
for i in range(num_symbols):
symbol = self.module.GetSymbolAtIndex(i)
image_item_delegate = SymbolTreeItemDelegate(
self.target, symbol, i)
image_item_delegate = SymbolTreeItemDelegate(self.target, symbol, i)
item_dicts.append(image_item_delegate.get_item_dictionary())
return item_dicts
class SymbolTreeItemDelegate(object):
def __init__(self, target, symbol, index):
self.target = target
self.symbol = symbol
@@ -427,18 +438,20 @@ class SymbolTreeItemDelegate(object):
def get_item_dictionary(self):
address = self.symbol.GetStartAddress()
name = '[%u]' % self.index
name = "[%u]" % self.index
symbol_load_addr = address.GetLoadAddress(self.target)
if symbol_load_addr != lldb.LLDB_INVALID_ADDRESS:
value = '0x%16.16x' % (symbol_load_addr)
value = "0x%16.16x" % (symbol_load_addr)
else:
value = '0x%16.16x *' % (address.file_addr)
value = "0x%16.16x *" % (address.file_addr)
summary = self.symbol.name
return {'#0': name,
'value': value,
'summary': summary,
'children': False,
'tree-item-delegate': self}
return {
"#0": name,
"value": value,
"summary": summary,
"children": False,
"tree-item-delegate": self,
}
def get_child_item_dictionaries(self):
item_dicts = list()
@@ -446,7 +459,6 @@ class SymbolTreeItemDelegate(object):
class DelegateTree(ttk.Frame):
def __init__(self, column_dicts, delegate, title, name):
ttk.Frame.__init__(self, name=name)
self.pack(expand=Y, fill=BOTH)
@@ -465,25 +477,23 @@ class DelegateTree(ttk.Frame):
column_ids = list()
for i in range(1, len(self.columns_dicts)):
column_ids.append(self.columns_dicts[i]['id'])
column_ids.append(self.columns_dicts[i]["id"])
# create the tree and scrollbars
self.tree = ttk.Treeview(columns=column_ids)
scroll_bar_v = ttk.Scrollbar(orient=VERTICAL, command=self.tree.yview)
scroll_bar_h = ttk.Scrollbar(
orient=HORIZONTAL, command=self.tree.xview)
self.tree['yscroll'] = scroll_bar_v.set
self.tree['xscroll'] = scroll_bar_h.set
scroll_bar_h = ttk.Scrollbar(orient=HORIZONTAL, command=self.tree.xview)
self.tree["yscroll"] = scroll_bar_v.set
self.tree["xscroll"] = scroll_bar_h.set
# setup column headings and columns properties
for columns_dict in self.columns_dicts:
self.tree.heading(
columns_dict['id'],
text=columns_dict['text'],
anchor=columns_dict['anchor'])
self.tree.column(
columns_dict['id'],
stretch=columns_dict['stretch'])
columns_dict["id"],
text=columns_dict["text"],
anchor=columns_dict["anchor"],
)
self.tree.column(columns_dict["id"], stretch=columns_dict["stretch"])
# add tree and scrollbars to frame
self.tree.grid(in_=frame, row=0, column=0, sticky=NSEW)
@@ -495,7 +505,7 @@ class DelegateTree(ttk.Frame):
frame.columnconfigure(0, weight=1)
# action to perform when a node is expanded
self.tree.bind('<<TreeviewOpen>>', self._update_tree)
self.tree.bind("<<TreeviewOpen>>", self._update_tree)
def insert_items(self, parent_id, item_dicts):
for item_dict in item_dicts:
@@ -504,43 +514,43 @@ class DelegateTree(ttk.Frame):
first = True
for columns_dict in self.columns_dicts:
if first:
name = item_dict[columns_dict['id']]
name = item_dict[columns_dict["id"]]
first = False
else:
values.append(item_dict[columns_dict['id']])
item_id = self.tree.insert(parent_id, # root item has an empty name
END,
text=name,
values=values)
values.append(item_dict[columns_dict["id"]])
item_id = self.tree.insert(
parent_id, END, text=name, values=values # root item has an empty name
)
self.item_id_to_item_dict[item_id] = item_dict
if item_dict['children']:
self.tree.insert(item_id, END, text='dummy')
if item_dict["children"]:
self.tree.insert(item_id, END, text="dummy")
def _populate_root(self):
# use current directory as root node
self.insert_items('', self.delegate.get_child_item_dictionaries())
self.insert_items("", self.delegate.get_child_item_dictionaries())
def _update_tree(self, event):
# user expanded a node - build the related directory
item_id = self.tree.focus() # the id of the expanded node
item_id = self.tree.focus() # the id of the expanded node
children = self.tree.get_children(item_id)
if len(children):
first_child = children[0]
# if the node only has a 'dummy' child, remove it and
# build new directory; skip if the node is already
# populated
if self.tree.item(first_child, option='text') == 'dummy':
if self.tree.item(first_child, option="text") == "dummy":
self.tree.delete(first_child)
item_dict = self.item_id_to_item_dict[item_id]
item_dicts = item_dict[
'tree-item-delegate'].get_child_item_dictionaries()
"tree-item-delegate"
].get_child_item_dictionaries()
self.insert_items(item_id, item_dicts)
@lldb.command("tk-variables")
def tk_variable_display(debugger, command, result, dict):
# needed for tree creation in TK library as it uses sys.argv...
sys.argv = ['tk-variables']
sys.argv = ["tk-variables"]
target = debugger.GetSelectedTarget()
if not target:
print("invalid target", file=result)
@@ -559,22 +569,22 @@ def tk_variable_display(debugger, command, result, dict):
return
# Parse command line args
command_args = shlex.split(command)
column_dicts = [{'id': '#0', 'text': 'Name', 'anchor': W, 'stretch': 0},
{'id': 'typename', 'text': 'Type', 'anchor': W, 'stretch': 0},
{'id': 'value', 'text': 'Value', 'anchor': W, 'stretch': 0},
{'id': 'summary', 'text': 'Summary', 'anchor': W, 'stretch': 1}]
column_dicts = [
{"id": "#0", "text": "Name", "anchor": W, "stretch": 0},
{"id": "typename", "text": "Type", "anchor": W, "stretch": 0},
{"id": "value", "text": "Value", "anchor": W, "stretch": 0},
{"id": "summary", "text": "Summary", "anchor": W, "stretch": 1},
]
tree = DelegateTree(
column_dicts,
FrameTreeItemDelegate(frame),
'Variables',
'lldb-tk-variables')
column_dicts, FrameTreeItemDelegate(frame), "Variables", "lldb-tk-variables"
)
tree.mainloop()
@lldb.command("tk-process")
def tk_process_display(debugger, command, result, dict):
# needed for tree creation in TK library as it uses sys.argv...
sys.argv = ['tk-process']
sys.argv = ["tk-process"]
target = debugger.GetSelectedTarget()
if not target:
print("invalid target", file=result)
@@ -584,34 +594,34 @@ def tk_process_display(debugger, command, result, dict):
print("invalid process", file=result)
return
# Parse command line args
columnd_dicts = [{'id': '#0', 'text': 'Name', 'anchor': W, 'stretch': 0},
{'id': 'value', 'text': 'Value', 'anchor': W, 'stretch': 0},
{'id': 'summary', 'text': 'Summary', 'anchor': W, 'stretch': 1}]
columnd_dicts = [
{"id": "#0", "text": "Name", "anchor": W, "stretch": 0},
{"id": "value", "text": "Value", "anchor": W, "stretch": 0},
{"id": "summary", "text": "Summary", "anchor": W, "stretch": 1},
]
command_args = shlex.split(command)
tree = DelegateTree(
columnd_dicts,
ProcessTreeItemDelegate(process),
'Process',
'lldb-tk-process')
columnd_dicts, ProcessTreeItemDelegate(process), "Process", "lldb-tk-process"
)
tree.mainloop()
@lldb.command("tk-target")
def tk_target_display(debugger, command, result, dict):
# needed for tree creation in TK library as it uses sys.argv...
sys.argv = ['tk-target']
sys.argv = ["tk-target"]
target = debugger.GetSelectedTarget()
if not target:
print("invalid target", file=result)
return
# Parse command line args
columnd_dicts = [{'id': '#0', 'text': 'Name', 'anchor': W, 'stretch': 0},
{'id': 'value', 'text': 'Value', 'anchor': W, 'stretch': 0},
{'id': 'summary', 'text': 'Summary', 'anchor': W, 'stretch': 1}]
columnd_dicts = [
{"id": "#0", "text": "Name", "anchor": W, "stretch": 0},
{"id": "value", "text": "Value", "anchor": W, "stretch": 0},
{"id": "summary", "text": "Summary", "anchor": W, "stretch": 1},
]
command_args = shlex.split(command)
tree = DelegateTree(
columnd_dicts,
TargetTreeItemDelegate(target),
'Target',
'lldb-tk-target')
columnd_dicts, TargetTreeItemDelegate(target), "Target", "lldb-tk-target"
)
tree.mainloop()

File diff suppressed because it is too large Load Diff

View File

@@ -1,13 +1,13 @@
#!/usr/bin/env python
#----------------------------------------------------------------------
# ----------------------------------------------------------------------
# Be sure to add the python path that points to the LLDB shared library.
#
# # To use this in the embedded python interpreter using "lldb" just
# import it with the full path using the "command script import"
# command
# (lldb) command script import /path/to/cmdtemplate.py
#----------------------------------------------------------------------
# ----------------------------------------------------------------------
import platform
import os
@@ -22,18 +22,21 @@ except ImportError:
lldb_python_dirs = list()
# lldb is not in the PYTHONPATH, try some defaults for the current platform
platform_system = platform.system()
if platform_system == 'Darwin':
if platform_system == "Darwin":
# On Darwin, try the currently selected Xcode directory
xcode_dir = subprocess.check_output("xcode-select --print-path", shell=True)
if xcode_dir:
lldb_python_dirs.append(
os.path.realpath(
xcode_dir +
'/../SharedFrameworks/LLDB.framework/Resources/Python'))
xcode_dir + "/../SharedFrameworks/LLDB.framework/Resources/Python"
)
)
lldb_python_dirs.append(
xcode_dir + '/Library/PrivateFrameworks/LLDB.framework/Resources/Python')
xcode_dir + "/Library/PrivateFrameworks/LLDB.framework/Resources/Python"
)
lldb_python_dirs.append(
'/System/Library/PrivateFrameworks/LLDB.framework/Resources/Python')
"/System/Library/PrivateFrameworks/LLDB.framework/Resources/Python"
)
success = False
for lldb_python_dir in lldb_python_dirs:
if os.path.exists(lldb_python_dir):
@@ -48,7 +51,9 @@ except ImportError:
success = True
break
if not success:
print("error: couldn't locate the 'lldb' module, please set PYTHONPATH correctly")
print(
"error: couldn't locate the 'lldb' module, please set PYTHONPATH correctly"
)
sys.exit(1)
import optparse
@@ -61,120 +66,126 @@ import time
def append_data_callback(option, opt_str, value, parser):
if opt_str == "--uint8":
int8 = int(value, 0)
parser.values.data += struct.pack('1B', int8)
parser.values.data += struct.pack("1B", int8)
if opt_str == "--uint16":
int16 = int(value, 0)
parser.values.data += struct.pack('1H', int16)
parser.values.data += struct.pack("1H", int16)
if opt_str == "--uint32":
int32 = int(value, 0)
parser.values.data += struct.pack('1I', int32)
parser.values.data += struct.pack("1I", int32)
if opt_str == "--uint64":
int64 = int(value, 0)
parser.values.data += struct.pack('1Q', int64)
parser.values.data += struct.pack("1Q", int64)
if opt_str == "--int8":
int8 = int(value, 0)
parser.values.data += struct.pack('1b', int8)
parser.values.data += struct.pack("1b", int8)
if opt_str == "--int16":
int16 = int(value, 0)
parser.values.data += struct.pack('1h', int16)
parser.values.data += struct.pack("1h", int16)
if opt_str == "--int32":
int32 = int(value, 0)
parser.values.data += struct.pack('1i', int32)
parser.values.data += struct.pack("1i", int32)
if opt_str == "--int64":
int64 = int(value, 0)
parser.values.data += struct.pack('1q', int64)
parser.values.data += struct.pack("1q", int64)
def create_memfind_options():
usage = "usage: %prog [options] STARTADDR [ENDADDR]"
description = '''This command can find data in a specified address range.
description = """This command can find data in a specified address range.
Options are used to specify the data that is to be looked for and the options
can be specified multiple times to look for longer streams of data.
'''
parser = optparse.OptionParser(
description=description,
prog='memfind',
usage=usage)
"""
parser = optparse.OptionParser(description=description, prog="memfind", usage=usage)
parser.add_option(
'-s',
'--size',
type='int',
metavar='BYTESIZE',
dest='size',
help='Specify the byte size to search.',
default=0)
"-s",
"--size",
type="int",
metavar="BYTESIZE",
dest="size",
help="Specify the byte size to search.",
default=0,
)
parser.add_option(
'--int8',
"--int8",
action="callback",
callback=append_data_callback,
type='string',
metavar='INT',
dest='data',
help='Specify a 8 bit signed integer value to search for in memory.',
default='')
type="string",
metavar="INT",
dest="data",
help="Specify a 8 bit signed integer value to search for in memory.",
default="",
)
parser.add_option(
'--int16',
"--int16",
action="callback",
callback=append_data_callback,
type='string',
metavar='INT',
dest='data',
help='Specify a 16 bit signed integer value to search for in memory.',
default='')
type="string",
metavar="INT",
dest="data",
help="Specify a 16 bit signed integer value to search for in memory.",
default="",
)
parser.add_option(
'--int32',
"--int32",
action="callback",
callback=append_data_callback,
type='string',
metavar='INT',
dest='data',
help='Specify a 32 bit signed integer value to search for in memory.',
default='')
type="string",
metavar="INT",
dest="data",
help="Specify a 32 bit signed integer value to search for in memory.",
default="",
)
parser.add_option(
'--int64',
"--int64",
action="callback",
callback=append_data_callback,
type='string',
metavar='INT',
dest='data',
help='Specify a 64 bit signed integer value to search for in memory.',
default='')
type="string",
metavar="INT",
dest="data",
help="Specify a 64 bit signed integer value to search for in memory.",
default="",
)
parser.add_option(
'--uint8',
"--uint8",
action="callback",
callback=append_data_callback,
type='string',
metavar='INT',
dest='data',
help='Specify a 8 bit unsigned integer value to search for in memory.',
default='')
type="string",
metavar="INT",
dest="data",
help="Specify a 8 bit unsigned integer value to search for in memory.",
default="",
)
parser.add_option(
'--uint16',
"--uint16",
action="callback",
callback=append_data_callback,
type='string',
metavar='INT',
dest='data',
help='Specify a 16 bit unsigned integer value to search for in memory.',
default='')
type="string",
metavar="INT",
dest="data",
help="Specify a 16 bit unsigned integer value to search for in memory.",
default="",
)
parser.add_option(
'--uint32',
"--uint32",
action="callback",
callback=append_data_callback,
type='string',
metavar='INT',
dest='data',
help='Specify a 32 bit unsigned integer value to search for in memory.',
default='')
type="string",
metavar="INT",
dest="data",
help="Specify a 32 bit unsigned integer value to search for in memory.",
default="",
)
parser.add_option(
'--uint64',
"--uint64",
action="callback",
callback=append_data_callback,
type='string',
metavar='INT',
dest='data',
help='Specify a 64 bit unsigned integer value to search for in memory.',
default='')
type="string",
metavar="INT",
dest="data",
help="Specify a 64 bit unsigned integer value to search for in memory.",
default="",
)
return parser
@@ -209,7 +220,8 @@ def memfind(target, options, args, result):
print_error(
"error: --size must be specified if there is no ENDADDR argument",
True,
result)
result,
)
return
start_addr = int(args[0], 0)
elif num_args == 2:
@@ -217,14 +229,17 @@ def memfind(target, options, args, result):
print_error(
"error: --size can't be specified with an ENDADDR argument",
True,
result)
result,
)
return
start_addr = int(args[0], 0)
end_addr = int(args[1], 0)
if start_addr >= end_addr:
print_error(
"error: inavlid memory range [%#x - %#x)" %
(start_addr, end_addr), True, result)
"error: inavlid memory range [%#x - %#x)" % (start_addr, end_addr),
True,
result,
)
return
options.size = end_addr - start_addr
else:
@@ -232,45 +247,52 @@ def memfind(target, options, args, result):
return
if not options.data:
print('error: no data specified to search for', file=result)
print("error: no data specified to search for", file=result)
return
if not target:
print('error: invalid target', file=result)
print("error: invalid target", file=result)
return
process = target.process
if not process:
print('error: invalid process', file=result)
print("error: invalid process", file=result)
return
error = lldb.SBError()
bytes = process.ReadMemory(start_addr, options.size, error)
if error.Success():
num_matches = 0
print("Searching memory range [%#x - %#x) for" % (
start_addr, end_addr), end=' ', file=result)
print(
"Searching memory range [%#x - %#x) for" % (start_addr, end_addr),
end=" ",
file=result,
)
for byte in options.data:
print('%2.2x' % ord(byte), end=' ', file=result)
print("%2.2x" % ord(byte), end=" ", file=result)
print(file=result)
match_index = string.find(bytes, options.data)
while match_index != -1:
num_matches = num_matches + 1
print('%#x: %#x + %u' % (start_addr +
match_index, start_addr, match_index), file=result)
print(
"%#x: %#x + %u" % (start_addr + match_index, start_addr, match_index),
file=result,
)
match_index = string.find(bytes, options.data, match_index + 1)
if num_matches == 0:
print("error: no matches found", file=result)
else:
print('error: %s' % (error.GetCString()), file=result)
print("error: %s" % (error.GetCString()), file=result)
if __name__ == '__main__':
print('error: this script is designed to be used within the embedded script interpreter in LLDB')
if __name__ == "__main__":
print(
"error: this script is designed to be used within the embedded script interpreter in LLDB"
)
def __lldb_init_module(debugger, internal_dict):
memfind_command.__doc__ = create_memfind_options().format_help()
debugger.HandleCommand(
'command script add -o -f memory.memfind_command memfind')
debugger.HandleCommand("command script add -o -f memory.memfind_command memfind")
print('"memfind" command installed, use the "--help" option for detailed help')

View File

@@ -8,10 +8,10 @@ class OperatingSystemPlugIn(object):
"""Class that provides data for an instance of a LLDB 'OperatingSystemPython' plug-in class"""
def __init__(self, process):
'''Initialization needs a valid.SBProcess object.
"""Initialization needs a valid.SBProcess object.
This plug-in will get created after a live process is valid and has stopped for the
first time.'''
first time."""
self.process = None
self.registers = None
self.threads = None
@@ -28,11 +28,12 @@ class OperatingSystemPlugIn(object):
def create_thread(self, tid, context):
if tid == 0x444444444:
thread_info = {
'tid': tid,
'name': 'four',
'queue': 'queue4',
'state': 'stopped',
'stop_reason': 'none'}
"tid": tid,
"name": "four",
"queue": "queue4",
"state": "stopped",
"stop_reason": "none",
}
self.threads.append(thread_info)
return thread_info
return None
@@ -56,22 +57,30 @@ class OperatingSystemPlugIn(object):
# in memory. Don't specify this if your register layout in memory doesn't match the layout
# described by the dictionary returned from a call to the
# get_register_info() method.
self.threads = [{'tid': 0x111111111,
'name': 'one',
'queue': 'queue1',
'state': 'stopped',
'stop_reason': 'breakpoint'},
{'tid': 0x222222222,
'name': 'two',
'queue': 'queue2',
'state': 'stopped',
'stop_reason': 'none'},
{'tid': 0x333333333,
'name': 'three',
'queue': 'queue3',
'state': 'stopped',
'stop_reason': 'trace',
'register_data_addr': 0x100000000}]
self.threads = [
{
"tid": 0x111111111,
"name": "one",
"queue": "queue1",
"state": "stopped",
"stop_reason": "breakpoint",
},
{
"tid": 0x222222222,
"name": "two",
"queue": "queue2",
"state": "stopped",
"stop_reason": "none",
},
{
"tid": 0x333333333,
"name": "three",
"queue": "queue3",
"state": "stopped",
"stop_reason": "trace",
"register_data_addr": 0x100000000,
},
]
return self.threads
def get_register_info(self):
@@ -79,38 +88,239 @@ class OperatingSystemPlugIn(object):
self.registers = dict()
triple = self.process.target.triple
if triple:
arch = triple.split('-')[0]
if arch == 'x86_64':
self.registers['sets'] = ['GPR', 'FPU', 'EXC']
self.registers['registers'] = [
{'name': 'rax', 'bitsize': 64, 'offset': 0, 'encoding': 'uint', 'format': 'hex', 'set': 0, 'gcc': 0, 'dwarf': 0},
{'name': 'rbx', 'bitsize': 64, 'offset': 8, 'encoding': 'uint', 'format': 'hex', 'set': 0, 'gcc': 3, 'dwarf': 3},
{'name': 'rcx', 'bitsize': 64, 'offset': 16, 'encoding': 'uint', 'format': 'hex', 'set': 0, 'gcc': 2, 'dwarf': 2, 'generic': 'arg4', 'alt-name': 'arg4', },
{'name': 'rdx', 'bitsize': 64, 'offset': 24, 'encoding': 'uint', 'format': 'hex', 'set': 0, 'gcc': 1, 'dwarf': 1, 'generic': 'arg3', 'alt-name': 'arg3', },
{'name': 'rdi', 'bitsize': 64, 'offset': 32, 'encoding': 'uint', 'format': 'hex', 'set': 0, 'gcc': 5, 'dwarf': 5, 'generic': 'arg1', 'alt-name': 'arg1', },
{'name': 'rsi', 'bitsize': 64, 'offset': 40, 'encoding': 'uint', 'format': 'hex', 'set': 0, 'gcc': 4, 'dwarf': 4, 'generic': 'arg2', 'alt-name': 'arg2', },
{'name': 'rbp', 'bitsize': 64, 'offset': 48, 'encoding': 'uint', 'format': 'hex', 'set': 0, 'gcc': 6, 'dwarf': 6, 'generic': 'fp', 'alt-name': 'fp', },
{'name': 'rsp', 'bitsize': 64, 'offset': 56, 'encoding': 'uint', 'format': 'hex', 'set': 0, 'gcc': 7, 'dwarf': 7, 'generic': 'sp', 'alt-name': 'sp', },
{'name': 'r8', 'bitsize': 64, 'offset': 64, 'encoding': 'uint', 'format': 'hex', 'set': 0, 'gcc': 8, 'dwarf': 8, 'generic': 'arg5', 'alt-name': 'arg5', },
{'name': 'r9', 'bitsize': 64, 'offset': 72, 'encoding': 'uint', 'format': 'hex', 'set': 0, 'gcc': 9, 'dwarf': 9, 'generic': 'arg6', 'alt-name': 'arg6', },
{'name': 'r10', 'bitsize': 64, 'offset': 80, 'encoding': 'uint', 'format': 'hex', 'set': 0, 'gcc': 10, 'dwarf': 10},
{'name': 'r11', 'bitsize': 64, 'offset': 88, 'encoding': 'uint', 'format': 'hex', 'set': 0, 'gcc': 11, 'dwarf': 11},
{'name': 'r12', 'bitsize': 64, 'offset': 96, 'encoding': 'uint', 'format': 'hex', 'set': 0, 'gcc': 12, 'dwarf': 12},
{'name': 'r13', 'bitsize': 64, 'offset': 104, 'encoding': 'uint', 'format': 'hex', 'set': 0, 'gcc': 13, 'dwarf': 13},
{'name': 'r14', 'bitsize': 64, 'offset': 112, 'encoding': 'uint', 'format': 'hex', 'set': 0, 'gcc': 14, 'dwarf': 14},
{'name': 'r15', 'bitsize': 64, 'offset': 120, 'encoding': 'uint', 'format': 'hex', 'set': 0, 'gcc': 15, 'dwarf': 15},
{'name': 'rip', 'bitsize': 64, 'offset': 128, 'encoding': 'uint', 'format': 'hex', 'set': 0, 'gcc': 16, 'dwarf': 16, 'generic': 'pc', 'alt-name': 'pc'},
{'name': 'rflags', 'bitsize': 64, 'offset': 136, 'encoding': 'uint', 'format': 'hex', 'set': 0, 'generic': 'flags', 'alt-name': 'flags'},
{'name': 'cs', 'bitsize': 64, 'offset': 144, 'encoding': 'uint', 'format': 'hex', 'set': 0},
{'name': 'fs', 'bitsize': 64, 'offset': 152, 'encoding': 'uint', 'format': 'hex', 'set': 0},
{'name': 'gs', 'bitsize': 64, 'offset': 160, 'encoding': 'uint', 'format': 'hex', 'set': 0},
arch = triple.split("-")[0]
if arch == "x86_64":
self.registers["sets"] = ["GPR", "FPU", "EXC"]
self.registers["registers"] = [
{
"name": "rax",
"bitsize": 64,
"offset": 0,
"encoding": "uint",
"format": "hex",
"set": 0,
"gcc": 0,
"dwarf": 0,
},
{
"name": "rbx",
"bitsize": 64,
"offset": 8,
"encoding": "uint",
"format": "hex",
"set": 0,
"gcc": 3,
"dwarf": 3,
},
{
"name": "rcx",
"bitsize": 64,
"offset": 16,
"encoding": "uint",
"format": "hex",
"set": 0,
"gcc": 2,
"dwarf": 2,
"generic": "arg4",
"alt-name": "arg4",
},
{
"name": "rdx",
"bitsize": 64,
"offset": 24,
"encoding": "uint",
"format": "hex",
"set": 0,
"gcc": 1,
"dwarf": 1,
"generic": "arg3",
"alt-name": "arg3",
},
{
"name": "rdi",
"bitsize": 64,
"offset": 32,
"encoding": "uint",
"format": "hex",
"set": 0,
"gcc": 5,
"dwarf": 5,
"generic": "arg1",
"alt-name": "arg1",
},
{
"name": "rsi",
"bitsize": 64,
"offset": 40,
"encoding": "uint",
"format": "hex",
"set": 0,
"gcc": 4,
"dwarf": 4,
"generic": "arg2",
"alt-name": "arg2",
},
{
"name": "rbp",
"bitsize": 64,
"offset": 48,
"encoding": "uint",
"format": "hex",
"set": 0,
"gcc": 6,
"dwarf": 6,
"generic": "fp",
"alt-name": "fp",
},
{
"name": "rsp",
"bitsize": 64,
"offset": 56,
"encoding": "uint",
"format": "hex",
"set": 0,
"gcc": 7,
"dwarf": 7,
"generic": "sp",
"alt-name": "sp",
},
{
"name": "r8",
"bitsize": 64,
"offset": 64,
"encoding": "uint",
"format": "hex",
"set": 0,
"gcc": 8,
"dwarf": 8,
"generic": "arg5",
"alt-name": "arg5",
},
{
"name": "r9",
"bitsize": 64,
"offset": 72,
"encoding": "uint",
"format": "hex",
"set": 0,
"gcc": 9,
"dwarf": 9,
"generic": "arg6",
"alt-name": "arg6",
},
{
"name": "r10",
"bitsize": 64,
"offset": 80,
"encoding": "uint",
"format": "hex",
"set": 0,
"gcc": 10,
"dwarf": 10,
},
{
"name": "r11",
"bitsize": 64,
"offset": 88,
"encoding": "uint",
"format": "hex",
"set": 0,
"gcc": 11,
"dwarf": 11,
},
{
"name": "r12",
"bitsize": 64,
"offset": 96,
"encoding": "uint",
"format": "hex",
"set": 0,
"gcc": 12,
"dwarf": 12,
},
{
"name": "r13",
"bitsize": 64,
"offset": 104,
"encoding": "uint",
"format": "hex",
"set": 0,
"gcc": 13,
"dwarf": 13,
},
{
"name": "r14",
"bitsize": 64,
"offset": 112,
"encoding": "uint",
"format": "hex",
"set": 0,
"gcc": 14,
"dwarf": 14,
},
{
"name": "r15",
"bitsize": 64,
"offset": 120,
"encoding": "uint",
"format": "hex",
"set": 0,
"gcc": 15,
"dwarf": 15,
},
{
"name": "rip",
"bitsize": 64,
"offset": 128,
"encoding": "uint",
"format": "hex",
"set": 0,
"gcc": 16,
"dwarf": 16,
"generic": "pc",
"alt-name": "pc",
},
{
"name": "rflags",
"bitsize": 64,
"offset": 136,
"encoding": "uint",
"format": "hex",
"set": 0,
"generic": "flags",
"alt-name": "flags",
},
{
"name": "cs",
"bitsize": 64,
"offset": 144,
"encoding": "uint",
"format": "hex",
"set": 0,
},
{
"name": "fs",
"bitsize": 64,
"offset": 152,
"encoding": "uint",
"format": "hex",
"set": 0,
},
{
"name": "gs",
"bitsize": 64,
"offset": 160,
"encoding": "uint",
"format": "hex",
"set": 0,
},
]
return self.registers
def get_register_data(self, tid):
if tid == 0x111111111:
return struct.pack(
'21Q',
"21Q",
1,
2,
3,
@@ -131,10 +341,11 @@ class OperatingSystemPlugIn(object):
18,
19,
20,
21)
21,
)
elif tid == 0x222222222:
return struct.pack(
'21Q',
"21Q",
11,
12,
13,
@@ -155,10 +366,11 @@ class OperatingSystemPlugIn(object):
118,
119,
120,
121)
121,
)
elif tid == 0x333333333:
return struct.pack(
'21Q',
"21Q",
21,
22,
23,
@@ -179,10 +391,11 @@ class OperatingSystemPlugIn(object):
218,
219,
220,
221)
221,
)
elif tid == 0x444444444:
return struct.pack(
'21Q',
"21Q",
31,
32,
33,
@@ -203,10 +416,11 @@ class OperatingSystemPlugIn(object):
318,
319,
320,
321)
321,
)
else:
return struct.pack(
'21Q',
"21Q",
41,
42,
43,
@@ -227,5 +441,6 @@ class OperatingSystemPlugIn(object):
418,
419,
420,
421)
421,
)
return None

View File

@@ -1,12 +1,12 @@
#!/usr/bin/env python
#----------------------------------------------------------------------
# ----------------------------------------------------------------------
# Be sure to add the python path that points to the LLDB shared library.
# On MacOSX csh, tcsh:
# setenv PYTHONPATH /Applications/Xcode.app/Contents/SharedFrameworks/LLDB.framework/Resources/Python
# On MacOSX sh, bash:
# export PYTHONPATH=/Applications/Xcode.app/Contents/SharedFrameworks/LLDB.framework/Resources/Python
#----------------------------------------------------------------------
# ----------------------------------------------------------------------
import optparse
import os
@@ -18,9 +18,9 @@ import subprocess
import time
import types
#----------------------------------------------------------------------
# ----------------------------------------------------------------------
# Code that auto imports LLDB
#----------------------------------------------------------------------
# ----------------------------------------------------------------------
try:
# Just try for LLDB in case PYTHONPATH is already correctly setup
import lldb
@@ -28,18 +28,21 @@ except ImportError:
lldb_python_dirs = list()
# lldb is not in the PYTHONPATH, try some defaults for the current platform
platform_system = platform.system()
if platform_system == 'Darwin':
if platform_system == "Darwin":
# On Darwin, try the currently selected Xcode directory
xcode_dir = subprocess.check_output("xcode-select --print-path", shell=True)
if xcode_dir:
lldb_python_dirs.append(
os.path.realpath(
xcode_dir +
'/../SharedFrameworks/LLDB.framework/Resources/Python'))
xcode_dir + "/../SharedFrameworks/LLDB.framework/Resources/Python"
)
)
lldb_python_dirs.append(
xcode_dir + '/Library/PrivateFrameworks/LLDB.framework/Resources/Python')
xcode_dir + "/Library/PrivateFrameworks/LLDB.framework/Resources/Python"
)
lldb_python_dirs.append(
'/System/Library/PrivateFrameworks/LLDB.framework/Resources/Python')
"/System/Library/PrivateFrameworks/LLDB.framework/Resources/Python"
)
success = False
for lldb_python_dir in lldb_python_dirs:
if os.path.exists(lldb_python_dir):
@@ -54,12 +57,13 @@ except ImportError:
success = True
break
if not success:
print("error: couldn't locate the 'lldb' module, please set PYTHONPATH correctly")
print(
"error: couldn't locate the 'lldb' module, please set PYTHONPATH correctly"
)
sys.exit(1)
class Timer:
def __enter__(self):
self.start = time.clock()
return self
@@ -77,11 +81,12 @@ class Action(object):
self.callback_owner = callback_owner
def ThreadStopped(self, thread):
assert False, "performance.Action.ThreadStopped(self, thread) must be overridden in a subclass"
assert (
False
), "performance.Action.ThreadStopped(self, thread) must be overridden in a subclass"
class PlanCompleteAction (Action):
class PlanCompleteAction(Action):
def __init__(self, callback=None, callback_owner=None):
Action.__init__(self, callback, callback_owner)
@@ -96,17 +101,17 @@ class PlanCompleteAction (Action):
return False
class BreakpointAction (Action):
class BreakpointAction(Action):
def __init__(
self,
callback=None,
callback_owner=None,
name=None,
module=None,
file=None,
line=None,
breakpoint=None):
self,
callback=None,
callback_owner=None,
name=None,
module=None,
file=None,
line=None,
breakpoint=None,
):
Action.__init__(self, callback, callback_owner)
self.modules = lldb.SBFileSpecList()
self.files = lldb.SBFileSpecList()
@@ -118,8 +123,7 @@ class BreakpointAction (Action):
if module:
if isinstance(module, types.ListType):
for module_path in module:
self.modules.Append(
lldb.SBFileSpec(module_path, False))
self.modules.Append(lldb.SBFileSpec(module_path, False))
elif isinstance(module, types.StringTypes):
self.modules.Append(lldb.SBFileSpec(module, False))
if name:
@@ -132,12 +136,12 @@ class BreakpointAction (Action):
elif isinstance(file, types.StringTypes):
self.files.Append(lldb.SBFileSpec(file, False))
self.breakpoints.append(
self.target.BreakpointCreateByName(
name, self.modules, self.files))
self.target.BreakpointCreateByName(name, self.modules, self.files)
)
elif file and line:
self.breakpoints.append(
self.target.BreakpointCreateByLocation(
file, line))
self.target.BreakpointCreateByLocation(file, line)
)
def ThreadStopped(self, thread):
if thread.GetStopReason() == lldb.eStopReasonBreakpoint:
@@ -181,8 +185,11 @@ class TestCase:
if not error.Success():
print("error: %s" % error.GetCString())
if self.process:
self.process.GetBroadcaster().AddListener(self.listener,
lldb.SBProcess.eBroadcastBitStateChanged | lldb.SBProcess.eBroadcastBitInterrupt)
self.process.GetBroadcaster().AddListener(
self.listener,
lldb.SBProcess.eBroadcastBitStateChanged
| lldb.SBProcess.eBroadcastBitInterrupt,
)
return True
return False
@@ -197,10 +204,23 @@ class TestCase:
print("event = %s" % (lldb.SBDebugger.StateAsCString(state)))
if lldb.SBProcess.GetRestartedFromEvent(process_event):
continue
if state == lldb.eStateInvalid or state == lldb.eStateDetached or state == lldb.eStateCrashed or state == lldb.eStateUnloaded or state == lldb.eStateExited:
if (
state == lldb.eStateInvalid
or state == lldb.eStateDetached
or state == lldb.eStateCrashed
or state == lldb.eStateUnloaded
or state == lldb.eStateExited
):
event = process_event
self.done = True
elif state == lldb.eStateConnected or state == lldb.eStateAttaching or state == lldb.eStateLaunching or state == lldb.eStateRunning or state == lldb.eStateStepping or state == lldb.eStateSuspended:
elif (
state == lldb.eStateConnected
or state == lldb.eStateAttaching
or state == lldb.eStateLaunching
or state == lldb.eStateRunning
or state == lldb.eStateStepping
or state == lldb.eStateSuspended
):
continue
elif state == lldb.eStateStopped:
event = process_event
@@ -213,7 +233,11 @@ class TestCase:
stop_reason = thread.GetStopReason()
if self.verbose:
print("tid = %#x pc = %#x " % (thread.GetThreadID(), frame.GetPC()), end=' ')
print(
"tid = %#x pc = %#x "
% (thread.GetThreadID(), frame.GetPC()),
end=" ",
)
if stop_reason == lldb.eStopReasonNone:
if self.verbose:
print("none")
@@ -248,25 +272,36 @@ class TestCase:
elif stop_reason == lldb.eStopReasonWatchpoint:
select_thread = True
if self.verbose:
print("watchpoint id = %d" % (thread.GetStopReasonDataAtIndex(0)))
print(
"watchpoint id = %d"
% (thread.GetStopReasonDataAtIndex(0))
)
elif stop_reason == lldb.eStopReasonSignal:
select_thread = True
if self.verbose:
print("signal %d" % (thread.GetStopReasonDataAtIndex(0)))
print(
"signal %d"
% (thread.GetStopReasonDataAtIndex(0))
)
elif stop_reason == lldb.eStopReasonFork:
if self.verbose:
print("fork pid = %d" % (thread.GetStopReasonDataAtIndex(0)))
print(
"fork pid = %d"
% (thread.GetStopReasonDataAtIndex(0))
)
elif stop_reason == lldb.eStopReasonVFork:
if self.verbose:
print("vfork pid = %d" % (thread.GetStopReasonDataAtIndex(0)))
print(
"vfork pid = %d"
% (thread.GetStopReasonDataAtIndex(0))
)
elif stop_reason == lldb.eStopReasonVForkDone:
if self.verbose:
print("vfork done")
if select_thread and not selected_thread:
self.thread = thread
selected_thread = self.process.SetSelectedThread(
thread)
selected_thread = self.process.SetSelectedThread(thread)
for action in self.user_actions:
action.ThreadStopped(thread)
@@ -279,7 +314,7 @@ class TestCase:
class Measurement:
'''A class that encapsulates a measurement'''
"""A class that encapsulates a measurement"""
def __init__(self):
object.__init__(self)
@@ -289,7 +324,7 @@ class Measurement:
class MemoryMeasurement(Measurement):
'''A class that can measure memory statistics for a process.'''
"""A class that can measure memory statistics for a process."""
def __init__(self, pid):
Measurement.__init__(self)
@@ -304,33 +339,33 @@ class MemoryMeasurement(Measurement):
"kshrd",
"faults",
"cow",
"pageins"]
self.command = "top -l 1 -pid %u -stats %s" % (
self.pid, ",".join(self.stats))
"pageins",
]
self.command = "top -l 1 -pid %u -stats %s" % (self.pid, ",".join(self.stats))
self.value = dict()
def Measure(self):
output = subprocess.getoutput(self.command).split("\n")[-1]
values = re.split('[-+\s]+', output)
for (idx, stat) in enumerate(values):
values = re.split("[-+\s]+", output)
for idx, stat in enumerate(values):
multiplier = 1
if stat:
if stat[-1] == 'K':
if stat[-1] == "K":
multiplier = 1024
stat = stat[:-1]
elif stat[-1] == 'M':
elif stat[-1] == "M":
multiplier = 1024 * 1024
stat = stat[:-1]
elif stat[-1] == 'G':
elif stat[-1] == "G":
multiplier = 1024 * 1024 * 1024
elif stat[-1] == 'T':
elif stat[-1] == "T":
multiplier = 1024 * 1024 * 1024 * 1024
stat = stat[:-1]
self.value[self.stats[idx]] = int(stat) * multiplier
def __str__(self):
'''Dump the MemoryMeasurement current value'''
s = ''
"""Dump the MemoryMeasurement current value"""
s = ""
for key in self.value.keys():
if s:
s += "\n"
@@ -339,7 +374,6 @@ class MemoryMeasurement(Measurement):
class TesterTestCase(TestCase):
def __init__(self):
TestCase.__init__(self)
self.verbose = True
@@ -348,7 +382,10 @@ class TesterTestCase(TestCase):
def BreakpointHit(self, thread):
bp_id = thread.GetStopReasonDataAtIndex(0)
loc_id = thread.GetStopReasonDataAtIndex(1)
print("Breakpoint %i.%i hit: %s" % (bp_id, loc_id, thread.process.target.FindBreakpointByID(bp_id)))
print(
"Breakpoint %i.%i hit: %s"
% (bp_id, loc_id, thread.process.target.FindBreakpointByID(bp_id))
)
thread.StepOver()
def PlanComplete(self, thread):
@@ -365,19 +402,20 @@ class TesterTestCase(TestCase):
if self.target:
with Timer() as breakpoint_timer:
bp = self.target.BreakpointCreateByName("main")
print(
'Breakpoint time = %.03f sec.' %
breakpoint_timer.interval)
print("Breakpoint time = %.03f sec." % breakpoint_timer.interval)
self.user_actions.append(
BreakpointAction(
breakpoint=bp,
callback=TesterTestCase.BreakpointHit,
callback_owner=self))
callback_owner=self,
)
)
self.user_actions.append(
PlanCompleteAction(
callback=TesterTestCase.PlanComplete,
callback_owner=self))
callback=TesterTestCase.PlanComplete, callback_owner=self
)
)
if self.Launch():
while not self.done:
@@ -386,10 +424,10 @@ class TesterTestCase(TestCase):
print("error: failed to launch process")
else:
print("error: failed to create target with '%s'" % (args[0]))
print('Total time = %.03f sec.' % total_time.interval)
print("Total time = %.03f sec." % total_time.interval)
if __name__ == '__main__':
if __name__ == "__main__":
lldb.SBDebugger.Initialize()
test = TesterTestCase()
test.Run(sys.argv[1:])

View File

@@ -1,12 +1,12 @@
#!/usr/bin/env python
#----------------------------------------------------------------------
# ----------------------------------------------------------------------
# Be sure to add the python path that points to the LLDB shared library.
# On MacOSX csh, tcsh:
# setenv PYTHONPATH /Applications/Xcode.app/Contents/SharedFrameworks/LLDB.framework/Resources/Python
# On MacOSX sh, bash:
# export PYTHONPATH=/Applications/Xcode.app/Contents/SharedFrameworks/LLDB.framework/Resources/Python
#----------------------------------------------------------------------
# ----------------------------------------------------------------------
import optparse
import os
@@ -14,9 +14,9 @@ import platform
import sys
import subprocess
#----------------------------------------------------------------------
# ----------------------------------------------------------------------
# Code that auto imports LLDB
#----------------------------------------------------------------------
# ----------------------------------------------------------------------
try:
# Just try for LLDB in case PYTHONPATH is already correctly setup
import lldb
@@ -24,18 +24,21 @@ except ImportError:
lldb_python_dirs = list()
# lldb is not in the PYTHONPATH, try some defaults for the current platform
platform_system = platform.system()
if platform_system == 'Darwin':
if platform_system == "Darwin":
# On Darwin, try the currently selected Xcode directory
xcode_dir = subprocess.check_output("xcode-select --print-path", shell=True)
if xcode_dir:
lldb_python_dirs.append(
os.path.realpath(
xcode_dir +
'/../SharedFrameworks/LLDB.framework/Resources/Python'))
xcode_dir + "/../SharedFrameworks/LLDB.framework/Resources/Python"
)
)
lldb_python_dirs.append(
xcode_dir + '/Library/PrivateFrameworks/LLDB.framework/Resources/Python')
xcode_dir + "/Library/PrivateFrameworks/LLDB.framework/Resources/Python"
)
lldb_python_dirs.append(
'/System/Library/PrivateFrameworks/LLDB.framework/Resources/Python')
"/System/Library/PrivateFrameworks/LLDB.framework/Resources/Python"
)
success = False
for lldb_python_dir in lldb_python_dirs:
if os.path.exists(lldb_python_dir):
@@ -50,14 +53,16 @@ except ImportError:
success = True
break
if not success:
print("error: couldn't locate the 'lldb' module, please set PYTHONPATH correctly")
print(
"error: couldn't locate the 'lldb' module, please set PYTHONPATH correctly"
)
sys.exit(1)
def print_threads(process, options):
if options.show_threads:
for thread in process:
print('%s %s' % (thread, thread.GetFrameAtIndex(0)))
print("%s %s" % (thread, thread.GetFrameAtIndex(0)))
def run_commands(command_interpreter, commands):
@@ -73,8 +78,8 @@ def run_commands(command_interpreter, commands):
def main(argv):
description = '''Debugs a program using the LLDB python API and uses asynchronous broadcast events to watch for process state changes.'''
epilog = '''Examples:
description = """Debugs a program using the LLDB python API and uses asynchronous broadcast events to watch for process state changes."""
epilog = """Examples:
#----------------------------------------------------------------------
# Run "/bin/ls" with the arguments "-lAF /tmp/", and set a breakpoint
@@ -82,146 +87,164 @@ def main(argv):
#----------------------------------------------------------------------
% ./process_events.py --breakpoint malloc --stop-command bt --stop-command 'register read' -- /bin/ls -lAF /tmp/
'''
"""
optparse.OptionParser.format_epilog = lambda self, formatter: self.epilog
parser = optparse.OptionParser(
description=description,
prog='process_events',
usage='usage: process_events [options] program [arg1 arg2]',
epilog=epilog)
prog="process_events",
usage="usage: process_events [options] program [arg1 arg2]",
epilog=epilog,
)
parser.add_option(
'-v',
'--verbose',
action='store_true',
dest='verbose',
"-v",
"--verbose",
action="store_true",
dest="verbose",
help="Enable verbose logging.",
default=False)
default=False,
)
parser.add_option(
'-b',
'--breakpoint',
action='append',
type='string',
metavar='BPEXPR',
dest='breakpoints',
help='Breakpoint commands to create after the target has been created, the values will be sent to the "_regexp-break" command which supports breakpoints by name, file:line, and address.')
"-b",
"--breakpoint",
action="append",
type="string",
metavar="BPEXPR",
dest="breakpoints",
help='Breakpoint commands to create after the target has been created, the values will be sent to the "_regexp-break" command which supports breakpoints by name, file:line, and address.',
)
parser.add_option(
'-a',
'--arch',
type='string',
dest='arch',
help='The architecture to use when creating the debug target.',
default=None)
"-a",
"--arch",
type="string",
dest="arch",
help="The architecture to use when creating the debug target.",
default=None,
)
parser.add_option(
'--platform',
type='string',
metavar='platform',
dest='platform',
"--platform",
type="string",
metavar="platform",
dest="platform",
help='Specify the platform to use when creating the debug target. Valid values include "localhost", "darwin-kernel", "ios-simulator", "remote-freebsd", "remote-macosx", "remote-ios", "remote-linux".',
default=None)
default=None,
)
parser.add_option(
'-l',
'--launch-command',
action='append',
type='string',
metavar='CMD',
dest='launch_commands',
help='LLDB command interpreter commands to run once after the process has launched. This option can be specified more than once.',
default=[])
"-l",
"--launch-command",
action="append",
type="string",
metavar="CMD",
dest="launch_commands",
help="LLDB command interpreter commands to run once after the process has launched. This option can be specified more than once.",
default=[],
)
parser.add_option(
'-s',
'--stop-command',
action='append',
type='string',
metavar='CMD',
dest='stop_commands',
help='LLDB command interpreter commands to run each time the process stops. This option can be specified more than once.',
default=[])
"-s",
"--stop-command",
action="append",
type="string",
metavar="CMD",
dest="stop_commands",
help="LLDB command interpreter commands to run each time the process stops. This option can be specified more than once.",
default=[],
)
parser.add_option(
'-c',
'--crash-command',
action='append',
type='string',
metavar='CMD',
dest='crash_commands',
help='LLDB command interpreter commands to run in case the process crashes. This option can be specified more than once.',
default=[])
"-c",
"--crash-command",
action="append",
type="string",
metavar="CMD",
dest="crash_commands",
help="LLDB command interpreter commands to run in case the process crashes. This option can be specified more than once.",
default=[],
)
parser.add_option(
'-x',
'--exit-command',
action='append',
type='string',
metavar='CMD',
dest='exit_commands',
help='LLDB command interpreter commands to run once after the process has exited. This option can be specified more than once.',
default=[])
"-x",
"--exit-command",
action="append",
type="string",
metavar="CMD",
dest="exit_commands",
help="LLDB command interpreter commands to run once after the process has exited. This option can be specified more than once.",
default=[],
)
parser.add_option(
'-T',
'--no-threads',
action='store_false',
dest='show_threads',
"-T",
"--no-threads",
action="store_false",
dest="show_threads",
help="Don't show threads when process stops.",
default=True)
default=True,
)
parser.add_option(
'--ignore-errors',
action='store_false',
dest='stop_on_error',
"--ignore-errors",
action="store_false",
dest="stop_on_error",
help="Don't stop executing LLDB commands if the command returns an error. This applies to all of the LLDB command interpreter commands that get run for launch, stop, crash and exit.",
default=True)
default=True,
)
parser.add_option(
'-n',
'--run-count',
type='int',
dest='run_count',
metavar='N',
help='How many times to run the process in case the process exits.',
default=1)
"-n",
"--run-count",
type="int",
dest="run_count",
metavar="N",
help="How many times to run the process in case the process exits.",
default=1,
)
parser.add_option(
'-t',
'--event-timeout',
type='int',
dest='event_timeout',
metavar='SEC',
help='Specify the timeout in seconds to wait for process state change events.',
default=lldb.UINT32_MAX)
"-t",
"--event-timeout",
type="int",
dest="event_timeout",
metavar="SEC",
help="Specify the timeout in seconds to wait for process state change events.",
default=lldb.UINT32_MAX,
)
parser.add_option(
'-e',
'--environment',
action='append',
type='string',
metavar='ENV',
dest='env_vars',
help='Environment variables to set in the inferior process when launching a process.')
"-e",
"--environment",
action="append",
type="string",
metavar="ENV",
dest="env_vars",
help="Environment variables to set in the inferior process when launching a process.",
)
parser.add_option(
'-d',
'--working-dir',
type='string',
metavar='DIR',
dest='working_dir',
help='The current working directory when launching a process.',
default=None)
"-d",
"--working-dir",
type="string",
metavar="DIR",
dest="working_dir",
help="The current working directory when launching a process.",
default=None,
)
parser.add_option(
'-p',
'--attach-pid',
type='int',
dest='attach_pid',
metavar='PID',
help='Specify a process to attach to by process ID.',
default=-1)
"-p",
"--attach-pid",
type="int",
dest="attach_pid",
metavar="PID",
help="Specify a process to attach to by process ID.",
default=-1,
)
parser.add_option(
'-P',
'--attach-name',
type='string',
dest='attach_name',
metavar='PROCESSNAME',
help='Specify a process to attach to by name.',
default=None)
"-P",
"--attach-name",
type="string",
dest="attach_name",
metavar="PROCESSNAME",
help="Specify a process to attach to by name.",
default=None,
)
parser.add_option(
'-w',
'--attach-wait',
action='store_true',
dest='attach_wait',
help='Wait for the next process to launch when attaching to a process by name.',
default=False)
"-w",
"--attach-wait",
action="store_true",
dest="attach_wait",
help="Wait for the next process to launch when attaching to a process by name.",
default=False,
)
try:
(options, args) = parser.parse_args(argv)
except:
@@ -245,13 +268,14 @@ def main(argv):
sys.exit(1)
elif not options.attach_name is None:
if options.run_count == 1:
attach_info = lldb.SBAttachInfo(
options.attach_name, options.attach_wait)
attach_info = lldb.SBAttachInfo(options.attach_name, options.attach_wait)
else:
print("error: --run-count can't be used with the --attach-name option")
sys.exit(1)
else:
print('error: a program path for a program to debug and its arguments are required')
print(
"error: a program path for a program to debug and its arguments are required"
)
sys.exit(1)
# Create a new debugger instance
@@ -263,18 +287,16 @@ def main(argv):
if exe:
print("Creating a target for '%s'" % exe)
error = lldb.SBError()
target = debugger.CreateTarget(
exe, options.arch, options.platform, True, error)
target = debugger.CreateTarget(exe, options.arch, options.platform, True, error)
if target:
# Set any breakpoints that were specified in the args if we are launching. We use the
# command line command to take advantage of the shorthand breakpoint
# creation
if launch_info and options.breakpoints:
for bp in options.breakpoints:
debugger.HandleCommand("_regexp-break %s" % (bp))
run_commands(command_interpreter, ['breakpoint list'])
run_commands(command_interpreter, ["breakpoint list"])
for run_idx in range(options.run_count):
# Launch the process. Since we specified synchronous mode, we won't return
@@ -285,24 +307,32 @@ def main(argv):
if options.run_count == 1:
print('Launching "%s"...' % (exe))
else:
print('Launching "%s"... (launch %u of %u)' % (exe, run_idx + 1, options.run_count))
print(
'Launching "%s"... (launch %u of %u)'
% (exe, run_idx + 1, options.run_count)
)
process = target.Launch(launch_info, error)
else:
if options.attach_pid != -1:
print('Attaching to process %i...' % (options.attach_pid))
print("Attaching to process %i..." % (options.attach_pid))
else:
if options.attach_wait:
print('Waiting for next to process named "%s" to launch...' % (options.attach_name))
print(
'Waiting for next to process named "%s" to launch...'
% (options.attach_name)
)
else:
print('Attaching to existing process named "%s"...' % (options.attach_name))
print(
'Attaching to existing process named "%s"...'
% (options.attach_name)
)
process = target.Attach(attach_info, error)
# Make sure the launch went ok
if process and process.GetProcessID() != lldb.LLDB_INVALID_PROCESS_ID:
pid = process.GetProcessID()
print('Process is %i' % (pid))
print("Process is %i" % (pid))
if attach_info:
# continue process if we attached as we won't get an
# initial event
@@ -319,15 +349,19 @@ def main(argv):
state = lldb.SBProcess.GetStateFromEvent(event)
if state == lldb.eStateInvalid:
# Not a state event
print('process event = %s' % (event))
print("process event = %s" % (event))
else:
print("process state changed event: %s" % (lldb.SBDebugger.StateAsCString(state)))
print(
"process state changed event: %s"
% (lldb.SBDebugger.StateAsCString(state))
)
if state == lldb.eStateStopped:
if stop_idx == 0:
if launch_info:
print("process %u launched" % (pid))
run_commands(
command_interpreter, ['breakpoint list'])
command_interpreter, ["breakpoint list"]
)
else:
print("attached to process %u" % (pid))
for m in target.modules:
@@ -335,16 +369,21 @@ def main(argv):
if options.breakpoints:
for bp in options.breakpoints:
debugger.HandleCommand(
"_regexp-break %s" % (bp))
"_regexp-break %s" % (bp)
)
run_commands(
command_interpreter, ['breakpoint list'])
command_interpreter,
["breakpoint list"],
)
run_commands(
command_interpreter, options.launch_commands)
command_interpreter, options.launch_commands
)
else:
if options.verbose:
print("process %u stopped" % (pid))
run_commands(
command_interpreter, options.stop_commands)
command_interpreter, options.stop_commands
)
stop_idx += 1
print_threads(process, options)
print("continuing process %u" % (pid))
@@ -352,17 +391,25 @@ def main(argv):
elif state == lldb.eStateExited:
exit_desc = process.GetExitDescription()
if exit_desc:
print("process %u exited with status %u: %s" % (pid, process.GetExitStatus(), exit_desc))
print(
"process %u exited with status %u: %s"
% (pid, process.GetExitStatus(), exit_desc)
)
else:
print("process %u exited with status %u" % (pid, process.GetExitStatus()))
print(
"process %u exited with status %u"
% (pid, process.GetExitStatus())
)
run_commands(
command_interpreter, options.exit_commands)
command_interpreter, options.exit_commands
)
done = True
elif state == lldb.eStateCrashed:
print("process %u crashed" % (pid))
print_threads(process, options)
run_commands(
command_interpreter, options.crash_commands)
command_interpreter, options.crash_commands
)
done = True
elif state == lldb.eStateDetached:
print("process %u detached" % (pid))
@@ -374,7 +421,10 @@ def main(argv):
if options.verbose:
print("process %u resumed" % (pid))
elif state == lldb.eStateUnloaded:
print("process %u unloaded, this shouldn't happen" % (pid))
print(
"process %u unloaded, this shouldn't happen"
% (pid)
)
done = True
elif state == lldb.eStateConnected:
print("process connected")
@@ -383,10 +433,13 @@ def main(argv):
elif state == lldb.eStateLaunching:
print("process launching")
else:
print('event = %s' % (event))
print("event = %s" % (event))
else:
# timeout waiting for an event
print("no process event for %u seconds, killing the process..." % (options.event_timeout))
print(
"no process event for %u seconds, killing the process..."
% (options.event_timeout)
)
done = True
# Now that we are done dump the stdout and stderr
process_stdout = process.GetSTDOUT(1024)
@@ -407,11 +460,12 @@ def main(argv):
print(error)
else:
if launch_info:
print('error: launch failed')
print("error: launch failed")
else:
print('error: attach failed')
print("error: attach failed")
lldb.SBDebugger.Terminate()
if __name__ == '__main__':
if __name__ == "__main__":
main(sys.argv[1:])

View File

@@ -4,7 +4,6 @@ from collections import OrderedDict
class TracebackFancy:
def __init__(self, traceback):
self.t = traceback
@@ -20,13 +19,11 @@ class TracebackFancy:
def __str__(self):
if self.t is None:
return ""
str_self = "%s @ %s" % (
self.getFrame().getName(), self.getLineNumber())
str_self = "%s @ %s" % (self.getFrame().getName(), self.getLineNumber())
return str_self + "\n" + self.getNext().__str__()
class ExceptionFancy:
def __init__(self, frame):
self.etraceback = frame.f_exc_traceback
self.etype = frame.exc_type
@@ -41,7 +38,11 @@ class ExceptionFancy:
return TracebackFancy(self.etraceback)
def __nonzero__(self):
return self.etraceback is not None or self.etype is not None or self.evalue is not None
return (
self.etraceback is not None
or self.etype is not None
or self.evalue is not None
)
def getType(self):
return str(self.etype)
@@ -51,7 +52,6 @@ class ExceptionFancy:
class CodeFancy:
def __init__(self, code):
self.c = code
@@ -72,7 +72,6 @@ class CodeFancy:
class ArgsFancy:
def __init__(self, frame, arginfo):
self.f = frame
self.a = arginfo
@@ -127,7 +126,6 @@ class ArgsFancy:
class FrameFancy:
def __init__(self, frame):
self.f = frame
@@ -153,13 +151,14 @@ class FrameFancy:
return self.f.f_locals if self.f is not None else {}
def getArgumentInfo(self):
return ArgsFancy(
self.f, inspect.getargvalues(
self.f)) if self.f is not None else None
return (
ArgsFancy(self.f, inspect.getargvalues(self.f))
if self.f is not None
else None
)
class TracerClass:
def callEvent(self, frame):
pass
@@ -181,6 +180,7 @@ class TracerClass:
def cExceptionEvent(self, frame, cfunct):
pass
tracer_impl = TracerClass()
@@ -202,7 +202,8 @@ def the_tracer_entrypoint(frame, event, args):
elif event == "exception":
exty, exva, extb = args
exception_retval = tracer_impl.exceptionEvent(
FrameFancy(frame), ExceptionFancy(extb, exty, exva))
FrameFancy(frame), ExceptionFancy(extb, exty, exva)
)
if not exception_retval:
return None
return the_tracer_entrypoint
@@ -227,40 +228,78 @@ def disable():
class LoggingTracer:
def callEvent(self, frame):
print("call " + frame.getName() + " from " + frame.getCaller().getName() + " @ " + str(frame.getCaller().getLineNumber()) + " args are " + str(frame.getArgumentInfo()))
print(
"call "
+ frame.getName()
+ " from "
+ frame.getCaller().getName()
+ " @ "
+ str(frame.getCaller().getLineNumber())
+ " args are "
+ str(frame.getArgumentInfo())
)
def lineEvent(self, frame):
print("running " + frame.getName() + " @ " + str(frame.getLineNumber()) + " locals are " + str(frame.getLocals()) + " in " + frame.getFileName())
print(
"running "
+ frame.getName()
+ " @ "
+ str(frame.getLineNumber())
+ " locals are "
+ str(frame.getLocals())
+ " in "
+ frame.getFileName()
)
def returnEvent(self, frame, retval):
print("return from " + frame.getName() + " value is " + str(retval) + " locals are " + str(frame.getLocals()))
print(
"return from "
+ frame.getName()
+ " value is "
+ str(retval)
+ " locals are "
+ str(frame.getLocals())
)
def exceptionEvent(self, frame, exception):
print("exception %s %s raised from %s @ %s" % (exception.getType(), str(exception.getValue()), frame.getName(), frame.getLineNumber()))
print(
"exception %s %s raised from %s @ %s"
% (
exception.getType(),
str(exception.getValue()),
frame.getName(),
frame.getLineNumber(),
)
)
print("tb: " + str(exception.getTraceback()))
# the same functionality as LoggingTracer, but with a little more
# lldb-specific smarts
class LLDBAwareTracer:
def callEvent(self, frame):
if frame.getName() == "<module>":
return
if frame.getName() == "run_one_line":
print("call run_one_line(%s)" % (frame.getArgumentInfo().getArgs()["input_string"]))
print(
"call run_one_line(%s)"
% (frame.getArgumentInfo().getArgs()["input_string"])
)
return
if "Python.framework" in frame.getFileName():
print("call into Python at " + frame.getName())
return
if frame.getName() == "__init__" and frame.getCaller().getName(
) == "run_one_line" and frame.getCaller().getLineNumber() == 101:
if (
frame.getName() == "__init__"
and frame.getCaller().getName() == "run_one_line"
and frame.getCaller().getLineNumber() == 101
):
return False
strout = "call " + frame.getName()
if (frame.getCaller().getFileName() == ""):
if frame.getCaller().getFileName() == "":
strout += " from LLDB - args are "
args = frame.getArgumentInfo().getArgs()
for arg in args:
@@ -268,22 +307,44 @@ class LLDBAwareTracer:
continue
strout = strout + ("%s = %s " % (arg, args[arg]))
else:
strout += " from " + frame.getCaller().getName() + " @ " + \
str(frame.getCaller().getLineNumber()) + " args are " + str(frame.getArgumentInfo())
strout += (
" from "
+ frame.getCaller().getName()
+ " @ "
+ str(frame.getCaller().getLineNumber())
+ " args are "
+ str(frame.getArgumentInfo())
)
print(strout)
def lineEvent(self, frame):
if frame.getName() == "<module>":
return
if frame.getName() == "run_one_line":
print("running run_one_line(%s) @ %s" % (frame.getArgumentInfo().getArgs()["input_string"], frame.getLineNumber()))
print(
"running run_one_line(%s) @ %s"
% (
frame.getArgumentInfo().getArgs()["input_string"],
frame.getLineNumber(),
)
)
return
if "Python.framework" in frame.getFileName():
print("running into Python at " + frame.getName() + " @ " + str(frame.getLineNumber()))
print(
"running into Python at "
+ frame.getName()
+ " @ "
+ str(frame.getLineNumber())
)
return
strout = "running " + frame.getName() + " @ " + str(frame.getLineNumber()) + \
" locals are "
if (frame.getCaller().getFileName() == ""):
strout = (
"running "
+ frame.getName()
+ " @ "
+ str(frame.getLineNumber())
+ " locals are "
)
if frame.getCaller().getFileName() == "":
locals = frame.getLocals()
for local in locals:
if local == "dict" or local == "internal_dict":
@@ -298,14 +359,27 @@ class LLDBAwareTracer:
if frame.getName() == "<module>":
return
if frame.getName() == "run_one_line":
print("return from run_one_line(%s) return value is %s" % (frame.getArgumentInfo().getArgs()["input_string"], retval))
print(
"return from run_one_line(%s) return value is %s"
% (frame.getArgumentInfo().getArgs()["input_string"], retval)
)
return
if "Python.framework" in frame.getFileName():
print("return from Python at " + frame.getName() + " return value is " + str(retval))
print(
"return from Python at "
+ frame.getName()
+ " return value is "
+ str(retval)
)
return
strout = "return from " + frame.getName() + " return value is " + \
str(retval) + " locals are "
if (frame.getCaller().getFileName() == ""):
strout = (
"return from "
+ frame.getName()
+ " return value is "
+ str(retval)
+ " locals are "
)
if frame.getCaller().getFileName() == "":
locals = frame.getLocals()
for local in locals:
if local == "dict" or local == "internal_dict":
@@ -319,7 +393,15 @@ class LLDBAwareTracer:
def exceptionEvent(self, frame, exception):
if frame.getName() == "<module>":
return
print("exception %s %s raised from %s @ %s" % (exception.getType(), str(exception.getValue()), frame.getName(), frame.getLineNumber()))
print(
"exception %s %s raised from %s @ %s"
% (
exception.getType(),
str(exception.getValue()),
frame.getName(),
frame.getLineNumber(),
)
)
print("tb: " + str(exception.getTraceback()))
@@ -347,6 +429,7 @@ def total(initial=5, *numbers, **keywords):
count += keywords[key]
return count
if __name__ == "__main__":
enable(LoggingTracer())
f(5)

View File

@@ -4,7 +4,7 @@ import lldb
class value(object):
'''A class that wraps an lldb.SBValue object and returns an object that
"""A class that wraps an lldb.SBValue object and returns an object that
can be used as an object with attribytes:\n
argv = a.value(lldb.frame.FindVariable('argv'))\n
argv.name - return the name of the value that this object contains\n
@@ -24,7 +24,7 @@ class value(object):
argv.frame - return the lldb.SBFrame for this value
argv.num_children - return the number of children this value has
argv.children - return a list of sbvalue objects that represents all of the children of this value
'''
"""
def __init__(self, sbvalue):
self.sbvalue = sbvalue
@@ -40,55 +40,50 @@ class value(object):
def __getitem__(self, key):
if isinstance(key, int):
return value(
self.sbvalue.GetChildAtIndex(
key, lldb.eNoDynamicValues, True))
return value(self.sbvalue.GetChildAtIndex(key, lldb.eNoDynamicValues, True))
raise TypeError
def __getattr__(self, name):
if name == 'name':
if name == "name":
return self.sbvalue.GetName()
if name == 'type':
if name == "type":
return self.sbvalue.GetType()
if name == 'type_name':
if name == "type_name":
return self.sbvalue.GetTypeName()
if name == 'size':
if name == "size":
return self.sbvalue.GetByteSize()
if name == 'is_in_scope':
if name == "is_in_scope":
return self.sbvalue.IsInScope()
if name == 'is_pointer':
if name == "is_pointer":
return self.sbvalue.TypeIsPointerType()
if name == 'format':
if name == "format":
return self.sbvalue.GetFormat()
if name == 'value':
if name == "value":
return self.sbvalue.GetValue()
if name == 'summary':
if name == "summary":
return self.sbvalue.GetSummary()
if name == 'description':
if name == "description":
return self.sbvalue.GetObjectDescription()
if name == 'location':
if name == "location":
return self.sbvalue.GetLocation()
if name == 'target':
if name == "target":
return self.sbvalue.GetTarget()
if name == 'process':
if name == "process":
return self.sbvalue.GetProcess()
if name == 'thread':
if name == "thread":
return self.sbvalue.GetThread()
if name == 'frame':
if name == "frame":
return self.sbvalue.GetFrame()
if name == 'num_children':
if name == "num_children":
return self.sbvalue.GetNumChildren()
if name == 'children':
if name == "children":
# Returns an array of sbvalue objects, one for each child of
# the value for the lldb.SBValue
children = []
for i in range(self.sbvalue.GetNumChildren()):
children.append(
value(
self.sbvalue.GetChildAtIndex(
i,
lldb.eNoDynamicValues,
True)))
value(self.sbvalue.GetChildAtIndex(i, lldb.eNoDynamicValues, True))
)
return children
raise AttributeError
@@ -113,10 +108,7 @@ class variable(object):
def __getitem__(self, key):
# Allow array access if this value has children...
if isinstance(key, int):
return variable(
self.sbvalue.GetValueForExpressionPath(
"[%i]" %
key))
return variable(self.sbvalue.GetValueForExpressionPath("[%i]" % key))
raise TypeError
def __getattr__(self, name):
@@ -262,7 +254,7 @@ class variable(object):
return float(self.sbvalue.GetValueAsSigned())
def __oct__(self):
return '0%o' % self.sbvalue.GetValueAsSigned()
return "0%o" % self.sbvalue.GetValueAsSigned()
def __hex__(self):
return '0x%x' % self.sbvalue.GetValueAsSigned()
return "0x%x" % self.sbvalue.GetValueAsSigned()

View File

@@ -6,7 +6,8 @@ import lldb
from lldb.plugins.scripted_process import ScriptedProcess
from lldb.plugins.scripted_process import ScriptedThread
from lldb.macosx.crashlog import CrashLog,CrashLogParser
from lldb.macosx.crashlog import CrashLog, CrashLogParser
class CrashLogScriptedProcess(ScriptedProcess):
def set_crashlog(self, crashlog):
@@ -23,9 +24,9 @@ class CrashLogScriptedProcess(ScriptedProcess):
self.loaded_images = []
self.exception = self.crashlog.exception
self.app_specific_thread = None
if hasattr(self.crashlog, 'asi'):
self.metadata['asi'] = self.crashlog.asi
if hasattr(self.crashlog, 'asb'):
if hasattr(self.crashlog, "asi"):
self.metadata["asi"] = self.crashlog.asi
if hasattr(self.crashlog, "asb"):
self.extended_thread_info = self.crashlog.asb
if self.load_all_images:
@@ -51,7 +52,10 @@ class CrashLogScriptedProcess(ScriptedProcess):
self.loaded_images.append(image)
for thread in self.crashlog.threads:
if hasattr(thread, 'app_specific_backtrace') and thread.app_specific_backtrace:
if (
hasattr(thread, "app_specific_backtrace")
and thread.app_specific_backtrace
):
# We don't want to include the Application Specific Backtrace
# Thread into the Scripted Process' Thread list.
# Instead, we will try to extract the stackframe pcs from the
@@ -61,14 +65,12 @@ class CrashLogScriptedProcess(ScriptedProcess):
self.threads[thread.index] = CrashLogScriptedThread(self, None, thread)
if self.app_specific_thread:
self.extended_thread_info = \
CrashLogScriptedThread.resolve_stackframes(self.app_specific_thread,
self.addr_mask,
self.target)
self.extended_thread_info = CrashLogScriptedThread.resolve_stackframes(
self.app_specific_thread, self.addr_mask, self.target
)
def __init__(self, exe_ctx: lldb.SBExecutionContext, args : lldb.SBStructuredData):
def __init__(self, exe_ctx: lldb.SBExecutionContext, args: lldb.SBStructuredData):
super().__init__(exe_ctx, args)
if not self.target or not self.target.IsValid():
@@ -99,7 +101,9 @@ class CrashLogScriptedProcess(ScriptedProcess):
self.exception = None
self.extended_thread_info = None
def read_memory_at_address(self, addr: int, size: int, error: lldb.SBError) -> lldb.SBData:
def read_memory_at_address(
self, addr: int, size: int, error: lldb.SBError
) -> lldb.SBData:
# NOTE: CrashLogs don't contain any memory.
return lldb.SBData()
@@ -120,16 +124,21 @@ class CrashLogScriptedProcess(ScriptedProcess):
def get_process_metadata(self):
return self.metadata
class CrashLogScriptedThread(ScriptedThread):
def create_register_ctx(self):
if not self.has_crashed:
return dict.fromkeys([*map(lambda reg: reg['name'], self.register_info['registers'])] , 0)
return dict.fromkeys(
[*map(lambda reg: reg["name"], self.register_info["registers"])], 0
)
if not self.backing_thread or not len(self.backing_thread.registers):
return dict.fromkeys([*map(lambda reg: reg['name'], self.register_info['registers'])] , 0)
return dict.fromkeys(
[*map(lambda reg: reg["name"], self.register_info["registers"])], 0
)
for reg in self.register_info['registers']:
reg_name = reg['name']
for reg in self.register_info["registers"]:
reg_name = reg["name"]
if reg_name in self.backing_thread.registers:
self.register_ctx[reg_name] = self.backing_thread.registers[reg_name]
else:
@@ -141,7 +150,7 @@ class CrashLogScriptedThread(ScriptedThread):
frames = []
for frame in thread.frames:
frame_pc = frame.pc & addr_mask
pc = frame_pc if frame.index == 0 or frame_pc == 0 else frame_pc - 1
pc = frame_pc if frame.index == 0 or frame_pc == 0 else frame_pc - 1
sym_addr = lldb.SBAddress()
sym_addr.SetLoadAddress(pc, target)
if not sym_addr.IsValid():
@@ -149,7 +158,6 @@ class CrashLogScriptedThread(ScriptedThread):
frames.append({"idx": frame.index, "pc": pc})
return frames
def create_stackframes(self):
if not (self.scripted_process.load_all_images or self.has_crashed):
return None
@@ -157,9 +165,9 @@ class CrashLogScriptedThread(ScriptedThread):
if not self.backing_thread or not len(self.backing_thread.frames):
return None
self.frames = CrashLogScriptedThread.resolve_stackframes(self.backing_thread,
self.scripted_process.addr_mask,
self.target)
self.frames = CrashLogScriptedThread.resolve_stackframes(
self.backing_thread, self.scripted_process.addr_mask, self.target
)
return self.frames
@@ -174,7 +182,7 @@ class CrashLogScriptedThread(ScriptedThread):
else:
self.name = self.backing_thread.name
self.queue = self.backing_thread.queue
self.has_crashed = (self.scripted_process.crashed_thread_idx == self.idx)
self.has_crashed = self.scripted_process.crashed_thread_idx == self.idx
self.create_stackframes()
def get_state(self):
@@ -184,21 +192,22 @@ class CrashLogScriptedThread(ScriptedThread):
def get_stop_reason(self) -> Dict[str, Any]:
if not self.has_crashed:
return { "type": lldb.eStopReasonNone }
return {"type": lldb.eStopReasonNone}
# TODO: Investigate what stop reason should be reported when crashed
stop_reason = { "type": lldb.eStopReasonException, "data": { }}
stop_reason = {"type": lldb.eStopReasonException, "data": {}}
if self.scripted_process.exception:
stop_reason['data']['mach_exception'] = self.scripted_process.exception
stop_reason["data"]["mach_exception"] = self.scripted_process.exception
return stop_reason
def get_register_context(self) -> str:
if not self.register_ctx:
self.register_ctx = self.create_register_ctx()
return struct.pack("{}Q".format(len(self.register_ctx)), *self.register_ctx.values())
return struct.pack(
"{}Q".format(len(self.register_ctx)), *self.register_ctx.values()
)
def get_extended_info(self):
if (self.has_crashed):
if self.has_crashed:
self.extended_info = self.scripted_process.extended_thread_info
return self.extended_info

View File

@@ -2,6 +2,7 @@ from abc import ABCMeta, abstractmethod
import lldb
class ScriptedPlatform(metaclass=ABCMeta):
"""
@@ -18,7 +19,7 @@ class ScriptedPlatform(metaclass=ABCMeta):
@abstractmethod
def __init__(self, exe_ctx, args):
""" Construct a scripted platform.
"""Construct a scripted platform.
Args:
exe_ctx (lldb.SBExecutionContext): The execution context for the scripted platform
@@ -29,7 +30,7 @@ class ScriptedPlatform(metaclass=ABCMeta):
@abstractmethod
def list_processes(self):
""" Get a list of processes that are running or that can be attached to on the platform.
"""Get a list of processes that are running or that can be attached to on the platform.
processes = {
420: {
@@ -51,7 +52,7 @@ class ScriptedPlatform(metaclass=ABCMeta):
pass
def get_process_info(self, pid):
""" Get the dictionary describing the process.
"""Get the dictionary describing the process.
Returns:
Dict: The dictionary of process info that matched process ID.
@@ -61,7 +62,7 @@ class ScriptedPlatform(metaclass=ABCMeta):
@abstractmethod
def attach_to_process(self, attach_info):
""" Attach to a process.
"""Attach to a process.
Args:
attach_info (lldb.SBAttachInfo): The information related to attach to a process.
@@ -73,7 +74,7 @@ class ScriptedPlatform(metaclass=ABCMeta):
@abstractmethod
def launch_process(self, launch_info):
""" Launch a process.
"""Launch a process.
Args:
launch_info (lldb.SBLaunchInfo): The information related to the process launch.
@@ -85,7 +86,7 @@ class ScriptedPlatform(metaclass=ABCMeta):
@abstractmethod
def kill_process(self, pid):
""" Kill a process.
"""Kill a process.
Args:
pid (int): Process ID for the process to be killed.

View File

@@ -2,6 +2,7 @@ from abc import ABCMeta, abstractmethod
import lldb
class ScriptedProcess(metaclass=ABCMeta):
"""
@@ -22,7 +23,7 @@ class ScriptedProcess(metaclass=ABCMeta):
@abstractmethod
def __init__(self, exe_ctx, args):
""" Construct a scripted process.
"""Construct a scripted process.
Args:
exe_ctx (lldb.SBExecutionContext): The execution context for the scripted process.
@@ -39,7 +40,7 @@ class ScriptedProcess(metaclass=ABCMeta):
self.target = target
triple = self.target.triple
if triple:
self.arch = triple.split('-')[0]
self.arch = triple.split("-")[0]
self.dbg = target.GetDebugger()
if isinstance(args, lldb.SBStructuredData) and args.IsValid():
self.args = args
@@ -50,7 +51,7 @@ class ScriptedProcess(metaclass=ABCMeta):
self.pid = 42
def get_capabilities(self):
""" Get a dictionary containing the process capabilities.
"""Get a dictionary containing the process capabilities.
Returns:
Dict[str:bool]: The dictionary of capability, with the capability
@@ -60,7 +61,7 @@ class ScriptedProcess(metaclass=ABCMeta):
return self.capabilities
def get_memory_region_containing_address(self, addr):
""" Get the memory region for the scripted process, containing a
"""Get the memory region for the scripted process, containing a
specific address.
Args:
@@ -74,7 +75,7 @@ class ScriptedProcess(metaclass=ABCMeta):
return None
def get_threads_info(self):
""" Get the dictionary describing the process' Scripted Threads.
"""Get the dictionary describing the process' Scripted Threads.
Returns:
Dict: The dictionary of threads, with the thread ID as the key and
@@ -85,7 +86,7 @@ class ScriptedProcess(metaclass=ABCMeta):
@abstractmethod
def read_memory_at_address(self, addr, size, error):
""" Get a memory buffer from the scripted process at a certain address,
"""Get a memory buffer from the scripted process at a certain address,
of a certain size.
Args:
@@ -100,7 +101,7 @@ class ScriptedProcess(metaclass=ABCMeta):
pass
def write_memory_at_address(self, addr, data, error):
""" Write a buffer to the scripted process memory.
"""Write a buffer to the scripted process memory.
Args:
addr (int): Address from which we should start reading.
@@ -111,11 +112,13 @@ class ScriptedProcess(metaclass=ABCMeta):
Returns:
size (int): Size of the memory to read.
"""
error.SetErrorString("%s doesn't support memory writes." % self.__class__.__name__)
error.SetErrorString(
"%s doesn't support memory writes." % self.__class__.__name__
)
return 0
def get_loaded_images(self):
""" Get the list of loaded images for the scripted process.
"""Get the list of loaded images for the scripted process.
```
scripted_image = {
@@ -134,7 +137,7 @@ class ScriptedProcess(metaclass=ABCMeta):
return self.loaded_images
def get_process_id(self):
""" Get the scripted process identifier.
"""Get the scripted process identifier.
Returns:
int: The scripted process identifier.
@@ -142,7 +145,7 @@ class ScriptedProcess(metaclass=ABCMeta):
return self.pid
def launch(self):
""" Simulate the scripted process launch.
"""Simulate the scripted process launch.
Returns:
lldb.SBError: An `lldb.SBError` with error code 0.
@@ -150,7 +153,7 @@ class ScriptedProcess(metaclass=ABCMeta):
return lldb.SBError()
def attach(self, attach_info):
""" Simulate the scripted process attach.
"""Simulate the scripted process attach.
Args:
attach_info (lldb.SBAttachInfo): The information related to the
@@ -162,7 +165,7 @@ class ScriptedProcess(metaclass=ABCMeta):
return lldb.SBError()
def resume(self, should_stop=True):
""" Simulate the scripted process resume.
"""Simulate the scripted process resume.
Args:
should_stop (bool): If True, resume will also force the process
@@ -177,14 +180,14 @@ class ScriptedProcess(metaclass=ABCMeta):
error.SetErrorString("Invalid process.")
return error
process.ForceScriptedState(lldb.eStateRunning);
if (should_stop):
process.ForceScriptedState(lldb.eStateStopped);
process.ForceScriptedState(lldb.eStateRunning)
if should_stop:
process.ForceScriptedState(lldb.eStateStopped)
return lldb.SBError()
@abstractmethod
def is_alive(self):
""" Check if the scripted process is alive.
"""Check if the scripted process is alive.
Returns:
bool: True if scripted process is alive. False otherwise.
@@ -193,7 +196,7 @@ class ScriptedProcess(metaclass=ABCMeta):
@abstractmethod
def get_scripted_thread_plugin(self):
""" Get scripted thread plugin name.
"""Get scripted thread plugin name.
Returns:
str: Name of the scripted thread plugin.
@@ -201,7 +204,7 @@ class ScriptedProcess(metaclass=ABCMeta):
return None
def get_process_metadata(self):
""" Get some metadata for the scripted process.
"""Get some metadata for the scripted process.
Returns:
Dict: A dictionary containing metadata for the scripted process.
@@ -210,7 +213,7 @@ class ScriptedProcess(metaclass=ABCMeta):
return self.metadata
def create_breakpoint(self, addr, error):
""" Create a breakpoint in the scripted process from an address.
"""Create a breakpoint in the scripted process from an address.
This is mainly used with interactive scripted process debugging.
Args:
@@ -221,8 +224,9 @@ class ScriptedProcess(metaclass=ABCMeta):
SBBreakpoint: A valid breakpoint object that was created a the specified
address. None if the breakpoint creation failed.
"""
error.SetErrorString("%s doesn't support creating breakpoints."
% self.__class__.__name__)
error.SetErrorString(
"%s doesn't support creating breakpoints." % self.__class__.__name__
)
return False
@@ -240,7 +244,7 @@ class ScriptedThread(metaclass=ABCMeta):
@abstractmethod
def __init__(self, scripted_process, args):
""" Construct a scripted thread.
"""Construct a scripted thread.
Args:
process (ScriptedProcess): The scripted process owning this thread.
@@ -270,7 +274,7 @@ class ScriptedThread(metaclass=ABCMeta):
self.get_register_info()
def get_thread_idx(self):
""" Get the scripted thread index.
"""Get the scripted thread index.
Returns:
int: The index of the scripted thread in the scripted process.
@@ -278,7 +282,7 @@ class ScriptedThread(metaclass=ABCMeta):
return self.idx
def get_thread_id(self):
""" Get the scripted thread identifier.
"""Get the scripted thread identifier.
Returns:
int: The identifier of the scripted thread.
@@ -286,7 +290,7 @@ class ScriptedThread(metaclass=ABCMeta):
return self.tid
def get_name(self):
""" Get the scripted thread name.
"""Get the scripted thread name.
Returns:
str: The name of the scripted thread.
@@ -294,7 +298,7 @@ class ScriptedThread(metaclass=ABCMeta):
return self.name
def get_state(self):
""" Get the scripted thread state type.
"""Get the scripted thread state type.
eStateStopped, ///< Process or thread is stopped and can be examined.
eStateRunning, ///< Process or thread is running and can't be examined.
@@ -309,7 +313,7 @@ class ScriptedThread(metaclass=ABCMeta):
return lldb.eStateStopped
def get_queue(self):
""" Get the scripted thread associated queue name.
"""Get the scripted thread associated queue name.
This method is optional.
Returns:
@@ -319,7 +323,7 @@ class ScriptedThread(metaclass=ABCMeta):
@abstractmethod
def get_stop_reason(self):
""" Get the dictionary describing the stop reason type with some data.
"""Get the dictionary describing the stop reason type with some data.
This method is optional.
Returns:
@@ -329,7 +333,7 @@ class ScriptedThread(metaclass=ABCMeta):
pass
def get_stackframes(self):
""" Get the list of stack frames for the scripted thread.
"""Get the list of stack frames for the scripted thread.
```
scripted_frame = {
@@ -349,18 +353,19 @@ class ScriptedThread(metaclass=ABCMeta):
def get_register_info(self):
if self.register_info is None:
self.register_info = dict()
if self.scripted_process.arch == 'x86_64':
self.register_info['sets'] = ['General Purpose Registers']
self.register_info['registers'] = INTEL64_GPR
elif 'arm64' in self.scripted_process.arch:
self.register_info['sets'] = ['General Purpose Registers']
self.register_info['registers'] = ARM64_GPR
else: raise ValueError('Unknown architecture', self.scripted_process.arch)
if self.scripted_process.arch == "x86_64":
self.register_info["sets"] = ["General Purpose Registers"]
self.register_info["registers"] = INTEL64_GPR
elif "arm64" in self.scripted_process.arch:
self.register_info["sets"] = ["General Purpose Registers"]
self.register_info["registers"] = ARM64_GPR
else:
raise ValueError("Unknown architecture", self.scripted_process.arch)
return self.register_info
@abstractmethod
def get_register_context(self):
""" Get the scripted thread register context
"""Get the scripted thread register context
Returns:
str: A byte representing all register's value.
@@ -368,7 +373,7 @@ class ScriptedThread(metaclass=ABCMeta):
pass
def get_extended_info(self):
""" Get scripted thread extended information.
"""Get scripted thread extended information.
Returns:
List: A list containing the extended information for the scripted process.
@@ -376,63 +381,595 @@ class ScriptedThread(metaclass=ABCMeta):
"""
return self.extended_info
ARM64_GPR = [ {'name': 'x0', 'bitsize': 64, 'offset': 0, 'encoding': 'uint', 'format': 'hex', 'set': 0, 'gcc': 0, 'dwarf': 0, 'generic': 'arg0', 'alt-name': 'arg0'},
{'name': 'x1', 'bitsize': 64, 'offset': 8, 'encoding': 'uint', 'format': 'hex', 'set': 0, 'gcc': 1, 'dwarf': 1, 'generic': 'arg1', 'alt-name': 'arg1'},
{'name': 'x2', 'bitsize': 64, 'offset': 16, 'encoding': 'uint', 'format': 'hex', 'set': 0, 'gcc': 2, 'dwarf': 2, 'generic': 'arg2', 'alt-name': 'arg2'},
{'name': 'x3', 'bitsize': 64, 'offset': 24, 'encoding': 'uint', 'format': 'hex', 'set': 0, 'gcc': 3, 'dwarf': 3, 'generic': 'arg3', 'alt-name': 'arg3'},
{'name': 'x4', 'bitsize': 64, 'offset': 32, 'encoding': 'uint', 'format': 'hex', 'set': 0, 'gcc': 4, 'dwarf': 4, 'generic': 'arg4', 'alt-name': 'arg4'},
{'name': 'x5', 'bitsize': 64, 'offset': 40, 'encoding': 'uint', 'format': 'hex', 'set': 0, 'gcc': 5, 'dwarf': 5, 'generic': 'arg5', 'alt-name': 'arg5'},
{'name': 'x6', 'bitsize': 64, 'offset': 48, 'encoding': 'uint', 'format': 'hex', 'set': 0, 'gcc': 6, 'dwarf': 6, 'generic': 'arg6', 'alt-name': 'arg6'},
{'name': 'x7', 'bitsize': 64, 'offset': 56, 'encoding': 'uint', 'format': 'hex', 'set': 0, 'gcc': 7, 'dwarf': 7, 'generic': 'arg7', 'alt-name': 'arg7'},
{'name': 'x8', 'bitsize': 64, 'offset': 64, 'encoding': 'uint', 'format': 'hex', 'set': 0, 'gcc': 8, 'dwarf': 8 },
{'name': 'x9', 'bitsize': 64, 'offset': 72, 'encoding': 'uint', 'format': 'hex', 'set': 0, 'gcc': 9, 'dwarf': 9 },
{'name': 'x10', 'bitsize': 64, 'offset': 80, 'encoding': 'uint', 'format': 'hex', 'set': 0, 'gcc': 10, 'dwarf': 10},
{'name': 'x11', 'bitsize': 64, 'offset': 88, 'encoding': 'uint', 'format': 'hex', 'set': 0, 'gcc': 11, 'dwarf': 11},
{'name': 'x12', 'bitsize': 64, 'offset': 96, 'encoding': 'uint', 'format': 'hex', 'set': 0, 'gcc': 12, 'dwarf': 12},
{'name': 'x13', 'bitsize': 64, 'offset': 104, 'encoding': 'uint', 'format': 'hex', 'set': 0, 'gcc': 13, 'dwarf': 13},
{'name': 'x14', 'bitsize': 64, 'offset': 112, 'encoding': 'uint', 'format': 'hex', 'set': 0, 'gcc': 14, 'dwarf': 14},
{'name': 'x15', 'bitsize': 64, 'offset': 120, 'encoding': 'uint', 'format': 'hex', 'set': 0, 'gcc': 15, 'dwarf': 15},
{'name': 'x16', 'bitsize': 64, 'offset': 128, 'encoding': 'uint', 'format': 'hex', 'set': 0, 'gcc': 16, 'dwarf': 16},
{'name': 'x17', 'bitsize': 64, 'offset': 136, 'encoding': 'uint', 'format': 'hex', 'set': 0, 'gcc': 17, 'dwarf': 17},
{'name': 'x18', 'bitsize': 64, 'offset': 144, 'encoding': 'uint', 'format': 'hex', 'set': 0, 'gcc': 18, 'dwarf': 18},
{'name': 'x19', 'bitsize': 64, 'offset': 152, 'encoding': 'uint', 'format': 'hex', 'set': 0, 'gcc': 19, 'dwarf': 19},
{'name': 'x20', 'bitsize': 64, 'offset': 160, 'encoding': 'uint', 'format': 'hex', 'set': 0, 'gcc': 20, 'dwarf': 20},
{'name': 'x21', 'bitsize': 64, 'offset': 168, 'encoding': 'uint', 'format': 'hex', 'set': 0, 'gcc': 21, 'dwarf': 21},
{'name': 'x22', 'bitsize': 64, 'offset': 176, 'encoding': 'uint', 'format': 'hex', 'set': 0, 'gcc': 22, 'dwarf': 22},
{'name': 'x23', 'bitsize': 64, 'offset': 184, 'encoding': 'uint', 'format': 'hex', 'set': 0, 'gcc': 23, 'dwarf': 23},
{'name': 'x24', 'bitsize': 64, 'offset': 192, 'encoding': 'uint', 'format': 'hex', 'set': 0, 'gcc': 24, 'dwarf': 24},
{'name': 'x25', 'bitsize': 64, 'offset': 200, 'encoding': 'uint', 'format': 'hex', 'set': 0, 'gcc': 25, 'dwarf': 25},
{'name': 'x26', 'bitsize': 64, 'offset': 208, 'encoding': 'uint', 'format': 'hex', 'set': 0, 'gcc': 26, 'dwarf': 26},
{'name': 'x27', 'bitsize': 64, 'offset': 216, 'encoding': 'uint', 'format': 'hex', 'set': 0, 'gcc': 27, 'dwarf': 27},
{'name': 'x28', 'bitsize': 64, 'offset': 224, 'encoding': 'uint', 'format': 'hex', 'set': 0, 'gcc': 28, 'dwarf': 28},
{'name': 'x29', 'bitsize': 64, 'offset': 232, 'encoding': 'uint', 'format': 'hex', 'set': 0, 'gcc': 29, 'dwarf': 29, 'generic': 'fp', 'alt-name': 'fp'},
{'name': 'x30', 'bitsize': 64, 'offset': 240, 'encoding': 'uint', 'format': 'hex', 'set': 0, 'gcc': 30, 'dwarf': 30, 'generic': 'lr', 'alt-name': 'lr'},
{'name': 'sp', 'bitsize': 64, 'offset': 248, 'encoding': 'uint', 'format': 'hex', 'set': 0, 'gcc': 31, 'dwarf': 31, 'generic': 'sp', 'alt-name': 'sp'},
{'name': 'pc', 'bitsize': 64, 'offset': 256, 'encoding': 'uint', 'format': 'hex', 'set': 0, 'gcc': 32, 'dwarf': 32, 'generic': 'pc', 'alt-name': 'pc'},
{'name': 'cpsr', 'bitsize': 32, 'offset': 264, 'encoding': 'uint', 'format': 'hex', 'set': 0, 'gcc': 33, 'dwarf': 33}
]
INTEL64_GPR = [ {'name': 'rax', 'bitsize': 64, 'offset': 0, 'encoding': 'uint', 'format': 'hex', 'set': 0, 'gcc': 0, 'dwarf': 0},
{'name': 'rbx', 'bitsize': 64, 'offset': 8, 'encoding': 'uint', 'format': 'hex', 'set': 0, 'gcc': 3, 'dwarf': 3},
{'name': 'rcx', 'bitsize': 64, 'offset': 16, 'encoding': 'uint', 'format': 'hex', 'set': 0, 'gcc': 2, 'dwarf': 2, 'generic': 'arg4', 'alt-name': 'arg4'},
{'name': 'rdx', 'bitsize': 64, 'offset': 24, 'encoding': 'uint', 'format': 'hex', 'set': 0, 'gcc': 1, 'dwarf': 1, 'generic': 'arg3', 'alt-name': 'arg3'},
{'name': 'rdi', 'bitsize': 64, 'offset': 32, 'encoding': 'uint', 'format': 'hex', 'set': 0, 'gcc': 5, 'dwarf': 5, 'generic': 'arg1', 'alt-name': 'arg1'},
{'name': 'rsi', 'bitsize': 64, 'offset': 40, 'encoding': 'uint', 'format': 'hex', 'set': 0, 'gcc': 4, 'dwarf': 4, 'generic': 'arg2', 'alt-name': 'arg2'},
{'name': 'rbp', 'bitsize': 64, 'offset': 48, 'encoding': 'uint', 'format': 'hex', 'set': 0, 'gcc': 6, 'dwarf': 6, 'generic': 'fp', 'alt-name': 'fp'},
{'name': 'rsp', 'bitsize': 64, 'offset': 56, 'encoding': 'uint', 'format': 'hex', 'set': 0, 'gcc': 7, 'dwarf': 7, 'generic': 'sp', 'alt-name': 'sp'},
{'name': 'r8', 'bitsize': 64, 'offset': 64, 'encoding': 'uint', 'format': 'hex', 'set': 0, 'gcc': 8, 'dwarf': 8, 'generic': 'arg5', 'alt-name': 'arg5'},
{'name': 'r9', 'bitsize': 64, 'offset': 72, 'encoding': 'uint', 'format': 'hex', 'set': 0, 'gcc': 9, 'dwarf': 9, 'generic': 'arg6', 'alt-name': 'arg6'},
{'name': 'r10', 'bitsize': 64, 'offset': 80, 'encoding': 'uint', 'format': 'hex', 'set': 0, 'gcc': 10, 'dwarf': 10},
{'name': 'r11', 'bitsize': 64, 'offset': 88, 'encoding': 'uint', 'format': 'hex', 'set': 0, 'gcc': 11, 'dwarf': 11},
{'name': 'r12', 'bitsize': 64, 'offset': 96, 'encoding': 'uint', 'format': 'hex', 'set': 0, 'gcc': 12, 'dwarf': 12},
{'name': 'r13', 'bitsize': 64, 'offset': 104, 'encoding': 'uint', 'format': 'hex', 'set': 0, 'gcc': 13, 'dwarf': 13},
{'name': 'r14', 'bitsize': 64, 'offset': 112, 'encoding': 'uint', 'format': 'hex', 'set': 0, 'gcc': 14, 'dwarf': 14},
{'name': 'r15', 'bitsize': 64, 'offset': 120, 'encoding': 'uint', 'format': 'hex', 'set': 0, 'gcc': 15, 'dwarf': 15},
{'name': 'rip', 'bitsize': 64, 'offset': 128, 'encoding': 'uint', 'format': 'hex', 'set': 0, 'gcc': 16, 'dwarf': 16, 'generic': 'pc', 'alt-name': 'pc'},
{'name': 'rflags', 'bitsize': 64, 'offset': 136, 'encoding': 'uint', 'format': 'hex', 'set': 0, 'generic': 'flags', 'alt-name': 'flags'},
{'name': 'cs', 'bitsize': 64, 'offset': 144, 'encoding': 'uint', 'format': 'hex', 'set': 0},
{'name': 'fs', 'bitsize': 64, 'offset': 152, 'encoding': 'uint', 'format': 'hex', 'set': 0},
{'name': 'gs', 'bitsize': 64, 'offset': 160, 'encoding': 'uint', 'format': 'hex', 'set': 0}
]
ARM64_GPR = [
{
"name": "x0",
"bitsize": 64,
"offset": 0,
"encoding": "uint",
"format": "hex",
"set": 0,
"gcc": 0,
"dwarf": 0,
"generic": "arg0",
"alt-name": "arg0",
},
{
"name": "x1",
"bitsize": 64,
"offset": 8,
"encoding": "uint",
"format": "hex",
"set": 0,
"gcc": 1,
"dwarf": 1,
"generic": "arg1",
"alt-name": "arg1",
},
{
"name": "x2",
"bitsize": 64,
"offset": 16,
"encoding": "uint",
"format": "hex",
"set": 0,
"gcc": 2,
"dwarf": 2,
"generic": "arg2",
"alt-name": "arg2",
},
{
"name": "x3",
"bitsize": 64,
"offset": 24,
"encoding": "uint",
"format": "hex",
"set": 0,
"gcc": 3,
"dwarf": 3,
"generic": "arg3",
"alt-name": "arg3",
},
{
"name": "x4",
"bitsize": 64,
"offset": 32,
"encoding": "uint",
"format": "hex",
"set": 0,
"gcc": 4,
"dwarf": 4,
"generic": "arg4",
"alt-name": "arg4",
},
{
"name": "x5",
"bitsize": 64,
"offset": 40,
"encoding": "uint",
"format": "hex",
"set": 0,
"gcc": 5,
"dwarf": 5,
"generic": "arg5",
"alt-name": "arg5",
},
{
"name": "x6",
"bitsize": 64,
"offset": 48,
"encoding": "uint",
"format": "hex",
"set": 0,
"gcc": 6,
"dwarf": 6,
"generic": "arg6",
"alt-name": "arg6",
},
{
"name": "x7",
"bitsize": 64,
"offset": 56,
"encoding": "uint",
"format": "hex",
"set": 0,
"gcc": 7,
"dwarf": 7,
"generic": "arg7",
"alt-name": "arg7",
},
{
"name": "x8",
"bitsize": 64,
"offset": 64,
"encoding": "uint",
"format": "hex",
"set": 0,
"gcc": 8,
"dwarf": 8,
},
{
"name": "x9",
"bitsize": 64,
"offset": 72,
"encoding": "uint",
"format": "hex",
"set": 0,
"gcc": 9,
"dwarf": 9,
},
{
"name": "x10",
"bitsize": 64,
"offset": 80,
"encoding": "uint",
"format": "hex",
"set": 0,
"gcc": 10,
"dwarf": 10,
},
{
"name": "x11",
"bitsize": 64,
"offset": 88,
"encoding": "uint",
"format": "hex",
"set": 0,
"gcc": 11,
"dwarf": 11,
},
{
"name": "x12",
"bitsize": 64,
"offset": 96,
"encoding": "uint",
"format": "hex",
"set": 0,
"gcc": 12,
"dwarf": 12,
},
{
"name": "x13",
"bitsize": 64,
"offset": 104,
"encoding": "uint",
"format": "hex",
"set": 0,
"gcc": 13,
"dwarf": 13,
},
{
"name": "x14",
"bitsize": 64,
"offset": 112,
"encoding": "uint",
"format": "hex",
"set": 0,
"gcc": 14,
"dwarf": 14,
},
{
"name": "x15",
"bitsize": 64,
"offset": 120,
"encoding": "uint",
"format": "hex",
"set": 0,
"gcc": 15,
"dwarf": 15,
},
{
"name": "x16",
"bitsize": 64,
"offset": 128,
"encoding": "uint",
"format": "hex",
"set": 0,
"gcc": 16,
"dwarf": 16,
},
{
"name": "x17",
"bitsize": 64,
"offset": 136,
"encoding": "uint",
"format": "hex",
"set": 0,
"gcc": 17,
"dwarf": 17,
},
{
"name": "x18",
"bitsize": 64,
"offset": 144,
"encoding": "uint",
"format": "hex",
"set": 0,
"gcc": 18,
"dwarf": 18,
},
{
"name": "x19",
"bitsize": 64,
"offset": 152,
"encoding": "uint",
"format": "hex",
"set": 0,
"gcc": 19,
"dwarf": 19,
},
{
"name": "x20",
"bitsize": 64,
"offset": 160,
"encoding": "uint",
"format": "hex",
"set": 0,
"gcc": 20,
"dwarf": 20,
},
{
"name": "x21",
"bitsize": 64,
"offset": 168,
"encoding": "uint",
"format": "hex",
"set": 0,
"gcc": 21,
"dwarf": 21,
},
{
"name": "x22",
"bitsize": 64,
"offset": 176,
"encoding": "uint",
"format": "hex",
"set": 0,
"gcc": 22,
"dwarf": 22,
},
{
"name": "x23",
"bitsize": 64,
"offset": 184,
"encoding": "uint",
"format": "hex",
"set": 0,
"gcc": 23,
"dwarf": 23,
},
{
"name": "x24",
"bitsize": 64,
"offset": 192,
"encoding": "uint",
"format": "hex",
"set": 0,
"gcc": 24,
"dwarf": 24,
},
{
"name": "x25",
"bitsize": 64,
"offset": 200,
"encoding": "uint",
"format": "hex",
"set": 0,
"gcc": 25,
"dwarf": 25,
},
{
"name": "x26",
"bitsize": 64,
"offset": 208,
"encoding": "uint",
"format": "hex",
"set": 0,
"gcc": 26,
"dwarf": 26,
},
{
"name": "x27",
"bitsize": 64,
"offset": 216,
"encoding": "uint",
"format": "hex",
"set": 0,
"gcc": 27,
"dwarf": 27,
},
{
"name": "x28",
"bitsize": 64,
"offset": 224,
"encoding": "uint",
"format": "hex",
"set": 0,
"gcc": 28,
"dwarf": 28,
},
{
"name": "x29",
"bitsize": 64,
"offset": 232,
"encoding": "uint",
"format": "hex",
"set": 0,
"gcc": 29,
"dwarf": 29,
"generic": "fp",
"alt-name": "fp",
},
{
"name": "x30",
"bitsize": 64,
"offset": 240,
"encoding": "uint",
"format": "hex",
"set": 0,
"gcc": 30,
"dwarf": 30,
"generic": "lr",
"alt-name": "lr",
},
{
"name": "sp",
"bitsize": 64,
"offset": 248,
"encoding": "uint",
"format": "hex",
"set": 0,
"gcc": 31,
"dwarf": 31,
"generic": "sp",
"alt-name": "sp",
},
{
"name": "pc",
"bitsize": 64,
"offset": 256,
"encoding": "uint",
"format": "hex",
"set": 0,
"gcc": 32,
"dwarf": 32,
"generic": "pc",
"alt-name": "pc",
},
{
"name": "cpsr",
"bitsize": 32,
"offset": 264,
"encoding": "uint",
"format": "hex",
"set": 0,
"gcc": 33,
"dwarf": 33,
},
]
INTEL64_GPR = [
{
"name": "rax",
"bitsize": 64,
"offset": 0,
"encoding": "uint",
"format": "hex",
"set": 0,
"gcc": 0,
"dwarf": 0,
},
{
"name": "rbx",
"bitsize": 64,
"offset": 8,
"encoding": "uint",
"format": "hex",
"set": 0,
"gcc": 3,
"dwarf": 3,
},
{
"name": "rcx",
"bitsize": 64,
"offset": 16,
"encoding": "uint",
"format": "hex",
"set": 0,
"gcc": 2,
"dwarf": 2,
"generic": "arg4",
"alt-name": "arg4",
},
{
"name": "rdx",
"bitsize": 64,
"offset": 24,
"encoding": "uint",
"format": "hex",
"set": 0,
"gcc": 1,
"dwarf": 1,
"generic": "arg3",
"alt-name": "arg3",
},
{
"name": "rdi",
"bitsize": 64,
"offset": 32,
"encoding": "uint",
"format": "hex",
"set": 0,
"gcc": 5,
"dwarf": 5,
"generic": "arg1",
"alt-name": "arg1",
},
{
"name": "rsi",
"bitsize": 64,
"offset": 40,
"encoding": "uint",
"format": "hex",
"set": 0,
"gcc": 4,
"dwarf": 4,
"generic": "arg2",
"alt-name": "arg2",
},
{
"name": "rbp",
"bitsize": 64,
"offset": 48,
"encoding": "uint",
"format": "hex",
"set": 0,
"gcc": 6,
"dwarf": 6,
"generic": "fp",
"alt-name": "fp",
},
{
"name": "rsp",
"bitsize": 64,
"offset": 56,
"encoding": "uint",
"format": "hex",
"set": 0,
"gcc": 7,
"dwarf": 7,
"generic": "sp",
"alt-name": "sp",
},
{
"name": "r8",
"bitsize": 64,
"offset": 64,
"encoding": "uint",
"format": "hex",
"set": 0,
"gcc": 8,
"dwarf": 8,
"generic": "arg5",
"alt-name": "arg5",
},
{
"name": "r9",
"bitsize": 64,
"offset": 72,
"encoding": "uint",
"format": "hex",
"set": 0,
"gcc": 9,
"dwarf": 9,
"generic": "arg6",
"alt-name": "arg6",
},
{
"name": "r10",
"bitsize": 64,
"offset": 80,
"encoding": "uint",
"format": "hex",
"set": 0,
"gcc": 10,
"dwarf": 10,
},
{
"name": "r11",
"bitsize": 64,
"offset": 88,
"encoding": "uint",
"format": "hex",
"set": 0,
"gcc": 11,
"dwarf": 11,
},
{
"name": "r12",
"bitsize": 64,
"offset": 96,
"encoding": "uint",
"format": "hex",
"set": 0,
"gcc": 12,
"dwarf": 12,
},
{
"name": "r13",
"bitsize": 64,
"offset": 104,
"encoding": "uint",
"format": "hex",
"set": 0,
"gcc": 13,
"dwarf": 13,
},
{
"name": "r14",
"bitsize": 64,
"offset": 112,
"encoding": "uint",
"format": "hex",
"set": 0,
"gcc": 14,
"dwarf": 14,
},
{
"name": "r15",
"bitsize": 64,
"offset": 120,
"encoding": "uint",
"format": "hex",
"set": 0,
"gcc": 15,
"dwarf": 15,
},
{
"name": "rip",
"bitsize": 64,
"offset": 128,
"encoding": "uint",
"format": "hex",
"set": 0,
"gcc": 16,
"dwarf": 16,
"generic": "pc",
"alt-name": "pc",
},
{
"name": "rflags",
"bitsize": 64,
"offset": 136,
"encoding": "uint",
"format": "hex",
"set": 0,
"generic": "flags",
"alt-name": "flags",
},
{
"name": "cs",
"bitsize": 64,
"offset": 144,
"encoding": "uint",
"format": "hex",
"set": 0,
},
{
"name": "fs",
"bitsize": 64,
"offset": 152,
"encoding": "uint",
"format": "hex",
"set": 0,
},
{
"name": "gs",
"bitsize": 64,
"offset": 160,
"encoding": "uint",
"format": "hex",
"set": 0,
},
]

View File

@@ -101,7 +101,6 @@ import lldb
class SimpleStep:
def __init__(self, thread_plan, dict):
self.thread_plan = thread_plan
self.start_address = thread_plan.GetThread().GetFrameAtIndex(0).GetPC()
@@ -129,13 +128,14 @@ class SimpleStep:
def stop_description(self, stream):
stream.Print("Simple step completed")
class StepWithPlan:
class StepWithPlan:
def __init__(self, thread_plan, dict):
self.thread_plan = thread_plan
self.start_address = thread_plan.GetThread().GetFrameAtIndex(0).GetPCAddress()
self.step_thread_plan = thread_plan.QueueThreadPlanForStepOverRange(
self.start_address, 20)
self.start_address, 20
)
def explains_stop(self, event):
# Since all I'm doing is running a plan, I will only ever get askedthis
@@ -155,13 +155,13 @@ class StepWithPlan:
def stop_description(self, stream):
self.step_thread_plan.GetDescription(stream, lldb.eDescriptionLevelBrief)
# Here's another example which does "step over" through the current function,
# and when it stops at each line, it checks some condition (in this example the
# value of a variable) and stops if that condition is true.
class StepCheckingCondition:
def __init__(self, thread_plan, dict):
self.thread_plan = thread_plan
self.start_frame = thread_plan.GetThread().GetFrameAtIndex(0)
@@ -174,7 +174,8 @@ class StepCheckingCondition:
end_address = cur_line_entry.GetEndAddress()
line_range = end_address.GetFileAddress() - start_address.GetFileAddress()
self.step_thread_plan = self.thread_plan.QueueThreadPlanForStepOverRange(
start_address, line_range)
start_address, line_range
)
def explains_stop(self, event):
# We are stepping, so if we stop for any other reason, it isn't
@@ -217,6 +218,7 @@ class StepCheckingCondition:
def stop_description(self, stream):
stream.Print(f"Stepped until a == 20")
# Here's an example that steps out of the current frame, gathers some information
# and then continues. The information in this case is rax. Currently the thread
# plans are not a safe place to call lldb command-line commands, so the information
@@ -224,11 +226,9 @@ class StepCheckingCondition:
class FinishPrintAndContinue:
def __init__(self, thread_plan, dict):
self.thread_plan = thread_plan
self.step_out_thread_plan = thread_plan.QueueThreadPlanForStepOut(
0, True)
self.step_out_thread_plan = thread_plan.QueueThreadPlanForStepOut(0, True)
self.thread = self.thread_plan.GetThread()
def is_stale(self):

View File

@@ -6,12 +6,15 @@ import shlex
@lldb.command("shadow")
def check_shadow_command(debugger, command, exe_ctx, result, dict):
'''Check the currently selected stack frame for shadowed variables'''
"""Check the currently selected stack frame for shadowed variables"""
process = exe_ctx.GetProcess()
state = process.GetState()
if state != lldb.eStateStopped:
print("process must be stopped, state is %s" % lldb.SBDebugger.StateAsCString(
state), file=result)
print(
"process must be stopped, state is %s"
% lldb.SBDebugger.StateAsCString(state),
file=result,
)
return
frame = exe_ctx.GetFrame()
if not frame:
@@ -55,4 +58,4 @@ def check_shadow_command(debugger, command, exe_ctx, result, dict):
for shadow_var in shadow_vars:
print(str(shadow_var.GetDeclaration()), file=result)
if num_shadowed_variables == 0:
print('no variables are shadowed', file=result)
print("no variables are shadowed", file=result)

View File

@@ -13,7 +13,7 @@ def dump_module_sources(module, result):
def info_sources(debugger, command, result, dict):
description = '''This command will dump all compile units in any modules that are listed as arguments, or for all modules if no arguments are supplied.'''
description = """This command will dump all compile units in any modules that are listed as arguments, or for all modules if no arguments are supplied."""
module_names = shlex.split(command)
target = debugger.GetSelectedTarget()
if module_names:
@@ -26,6 +26,7 @@ def info_sources(debugger, command, result, dict):
def __lldb_init_module(debugger, dict):
# Add any commands contained in this module to LLDB
debugger.HandleCommand(
'command script add -o -f sources.info_sources info_sources')
print('The "info_sources" command has been installed, type "help info_sources" or "info_sources --help" for detailed help.')
debugger.HandleCommand("command script add -o -f sources.info_sources info_sources")
print(
'The "info_sources" command has been installed, type "help info_sources" or "info_sources --help" for detailed help.'
)

View File

@@ -7,16 +7,16 @@ import shlex
def stack_frames(debugger, command, result, dict):
command_args = shlex.split(command)
usage = "usage: %prog [options] <PATH> [PATH ...]"
description = '''This command will enumerate all stack frames, print the stack size for each, and print an aggregation of which functions have the largest stack frame sizes at the end.'''
parser = optparse.OptionParser(
description=description, prog='ls', usage=usage)
description = """This command will enumerate all stack frames, print the stack size for each, and print an aggregation of which functions have the largest stack frame sizes at the end."""
parser = optparse.OptionParser(description=description, prog="ls", usage=usage)
parser.add_option(
'-v',
'--verbose',
action='store_true',
dest='verbose',
help='display verbose debug info',
default=False)
"-v",
"--verbose",
action="store_true",
dest="verbose",
help="display verbose debug info",
default=False,
)
try:
(options, args) = parser.parse_args(command_args)
except:
@@ -63,6 +63,7 @@ def stack_frames(debugger, command, result, dict):
def __lldb_init_module(debugger, internal_dict):
debugger.HandleCommand(
"command script add -o -f stacks.stack_frames stack_frames")
print("A new command called 'stack_frames' was added, type 'stack_frames --help' for more information.")
debugger.HandleCommand("command script add -o -f stacks.stack_frames stack_frames")
print(
"A new command called 'stack_frames' was added, type 'stack_frames --help' for more information."
)

View File

@@ -1,6 +1,7 @@
""" Does a step-over then prints the local variables or only the ones passed in """
import lldb
class StepAndPrint:
def __init__(self, debugger, unused):
return
@@ -13,12 +14,15 @@ class StepAndPrint:
debugger.HandleCommand("thread step-over")
print("---------- Values: -------------------\n")
debugger.HandleCommand("frame variable %s"%(command))
debugger.HandleCommand("frame variable %s" % (command))
debugger.SetAsync(old_async)
def get_short_help(self):
return "Does a step-over then runs frame variable passing the command args to it\n"
return (
"Does a step-over then runs frame variable passing the command args to it\n"
)
def __lldb_init_module(debugger, unused):
debugger.HandleCommand("command script add -o -c step_and_print.StepAndPrint sap")

View File

@@ -1,6 +1,6 @@
#!/usr/bin/env python
#----------------------------------------------------------------------
# ----------------------------------------------------------------------
# Be sure to add the python path that points to the LLDB shared library.
#
# To use this in the embedded python interpreter using "lldb":
@@ -24,7 +24,7 @@
#
# On MacOSX sh, bash:
# PYTHONPATH=/path/to/LLDB.framework/Resources/Python ./crashlog.py ~/Library/Logs/DiagnosticReports/a.crash
#----------------------------------------------------------------------
# ----------------------------------------------------------------------
import lldb
import optparse
@@ -52,7 +52,9 @@ class Address:
# Any original textual description of this address to be used as a
# backup in case symbolication fails
self.description = None
self.symbolication = None # The cached symbolicated string that describes this address
self.symbolication = (
None # The cached symbolicated string that describes this address
)
self.inlined = False
def __str__(self):
@@ -78,7 +80,8 @@ class Address:
sb_addr = self.resolve_addr()
if sb_addr:
self.sym_ctx = self.target.ResolveSymbolContextForAddress(
sb_addr, lldb.eSymbolContextEverything)
sb_addr, lldb.eSymbolContextEverything
)
else:
self.sym_ctx = lldb.SBSymbolContext()
return self.sym_ctx
@@ -94,7 +97,7 @@ class Address:
def symbolicate(self, verbose=False):
if self.symbolication is None:
self.symbolication = ''
self.symbolication = ""
self.inlined = False
sym_ctx = self.get_symbol_context()
if sym_ctx:
@@ -102,9 +105,9 @@ class Address:
if module:
# Print full source file path in verbose mode
if verbose:
self.symbolication += str(module.GetFileSpec()) + '`'
self.symbolication += str(module.GetFileSpec()) + "`"
else:
self.symbolication += module.GetFileSpec().GetFilename() + '`'
self.symbolication += module.GetFileSpec().GetFilename() + "`"
function_start_load_addr = -1
function = sym_ctx.GetFunction()
block = sym_ctx.GetBlock()
@@ -116,22 +119,30 @@ class Address:
if inlined_block:
self.inlined = True
self.symbolication += ' [inlined] ' + \
inlined_block.GetInlinedName()
block_range_idx = inlined_block.GetRangeIndexForBlockAddress(
self.so_addr)
self.symbolication += (
" [inlined] " + inlined_block.GetInlinedName()
)
block_range_idx = (
inlined_block.GetRangeIndexForBlockAddress(self.so_addr)
)
if block_range_idx < lldb.UINT32_MAX:
block_range_start_addr = inlined_block.GetRangeStartAddress(
block_range_idx)
function_start_load_addr = block_range_start_addr.GetLoadAddress(
self.target)
block_range_start_addr = (
inlined_block.GetRangeStartAddress(block_range_idx)
)
function_start_load_addr = (
block_range_start_addr.GetLoadAddress(self.target)
)
if function_start_load_addr == -1:
function_start_load_addr = function.GetStartAddress().GetLoadAddress(self.target)
function_start_load_addr = (
function.GetStartAddress().GetLoadAddress(self.target)
)
elif symbol:
self.symbolication += symbol.GetName()
function_start_load_addr = symbol.GetStartAddress().GetLoadAddress(self.target)
function_start_load_addr = (
symbol.GetStartAddress().GetLoadAddress(self.target)
)
else:
self.symbolication = ''
self.symbolication = ""
return False
# Dump the offset from the current function or symbol if it
@@ -140,29 +151,36 @@ class Address:
if function_offset > 0:
self.symbolication += " + %u" % (function_offset)
elif function_offset < 0:
self.symbolication += " %i (invalid negative offset, file a bug) " % function_offset
self.symbolication += (
" %i (invalid negative offset, file a bug) "
% function_offset
)
# Print out any line information if any is available
if line_entry.GetFileSpec():
# Print full source file path in verbose mode
if verbose:
self.symbolication += ' at %s' % line_entry.GetFileSpec()
self.symbolication += " at %s" % line_entry.GetFileSpec()
else:
self.symbolication += ' at %s' % line_entry.GetFileSpec().GetFilename()
self.symbolication += ':%u' % line_entry.GetLine()
self.symbolication += (
" at %s" % line_entry.GetFileSpec().GetFilename()
)
self.symbolication += ":%u" % line_entry.GetLine()
column = line_entry.GetColumn()
if column > 0:
self.symbolication += ':%u' % column
self.symbolication += ":%u" % column
return True
return False
class Section:
"""Class that represents an load address range"""
sect_info_regex = re.compile('(?P<name>[^=]+)=(?P<range>.*)')
addr_regex = re.compile('^\s*(?P<start>0x[0-9A-Fa-f]+)\s*$')
sect_info_regex = re.compile("(?P<name>[^=]+)=(?P<range>.*)")
addr_regex = re.compile("^\s*(?P<start>0x[0-9A-Fa-f]+)\s*$")
range_regex = re.compile(
'^\s*(?P<start>0x[0-9A-Fa-f]+)\s*(?P<op>[-+])\s*(?P<end>0x[0-9A-Fa-f]+)\s*$')
"^\s*(?P<start>0x[0-9A-Fa-f]+)\s*(?P<op>[-+])\s*(?P<end>0x[0-9A-Fa-f]+)\s*$"
)
def __init__(self, start_addr=None, end_addr=None, name=None):
self.start_addr = start_addr
@@ -173,11 +191,7 @@ class Section:
def InitWithSBTargetAndSBSection(cls, target, section):
sect_load_addr = section.GetLoadAddress(target)
if sect_load_addr != lldb.LLDB_INVALID_ADDRESS:
obj = cls(
sect_load_addr,
sect_load_addr +
section.size,
section.name)
obj = cls(sect_load_addr, sect_load_addr + section.size, section.name)
return obj
else:
return None
@@ -188,29 +202,35 @@ class Section:
def set_from_string(self, s):
match = self.sect_info_regex.match(s)
if match:
self.name = match.group('name')
range_str = match.group('range')
self.name = match.group("name")
range_str = match.group("range")
addr_match = self.addr_regex.match(range_str)
if addr_match:
self.start_addr = int(addr_match.group('start'), 16)
self.start_addr = int(addr_match.group("start"), 16)
self.end_addr = None
return True
range_match = self.range_regex.match(range_str)
if range_match:
self.start_addr = int(range_match.group('start'), 16)
self.end_addr = int(range_match.group('end'), 16)
op = range_match.group('op')
if op == '+':
self.start_addr = int(range_match.group("start"), 16)
self.end_addr = int(range_match.group("end"), 16)
op = range_match.group("op")
if op == "+":
self.end_addr += self.start_addr
return True
print('error: invalid section info string "%s"' % s)
print('Valid section info formats are:')
print('Format Example Description')
print('--------------------- -----------------------------------------------')
print('<name>=<base> __TEXT=0x123000 Section from base address only')
print('<name>=<base>-<end> __TEXT=0x123000-0x124000 Section from base address and end address')
print('<name>=<base>+<size> __TEXT=0x123000+0x1000 Section from base address and size')
print("Valid section info formats are:")
print("Format Example Description")
print("--------------------- -----------------------------------------------")
print(
"<name>=<base> __TEXT=0x123000 Section from base address only"
)
print(
"<name>=<base>-<end> __TEXT=0x123000-0x124000 Section from base address and end address"
)
print(
"<name>=<base>+<size> __TEXT=0x123000+0x1000 Section from base address and size"
)
return False
def __str__(self):
@@ -218,7 +238,10 @@ class Section:
if self.end_addr is not None:
if self.start_addr is not None:
return "%s=[0x%16.16x - 0x%16.16x)" % (
self.name, self.start_addr, self.end_addr)
self.name,
self.start_addr,
self.end_addr,
)
else:
if self.start_addr is not None:
return "%s=0x%16.16x" % (self.name, self.start_addr)
@@ -247,13 +270,12 @@ class Image:
@classmethod
def InitWithSBTargetAndSBModule(cls, target, module):
'''Initialize this Image object with a module from a target.'''
"""Initialize this Image object with a module from a target."""
obj = cls(module.file.fullpath, module.uuid)
obj.resolved_path = module.platform_file.fullpath
obj.resolved = True
for section in module.sections:
symb_section = Section.InitWithSBTargetAndSBSection(
target, section)
symb_section = Section.InitWithSBTargetAndSBSection(target, section)
if symb_section:
obj.section_infos.append(symb_section)
obj.arch = module.triple
@@ -268,19 +290,19 @@ class Image:
def debug_dump(self):
print('path = "%s"' % (self.path))
print('resolved_path = "%s"' % (self.resolved_path))
print('resolved = %i' % (self.resolved))
print('unavailable = %i' % (self.unavailable))
print('uuid = %s' % (self.uuid))
print('section_infos = %s' % (self.section_infos))
print("resolved = %i" % (self.resolved))
print("unavailable = %i" % (self.unavailable))
print("uuid = %s" % (self.uuid))
print("section_infos = %s" % (self.section_infos))
print('identifier = "%s"' % (self.identifier))
print('version = %s' % (self.version))
print('arch = %s' % (self.arch))
print('module = %s' % (self.module))
print("version = %s" % (self.version))
print("arch = %s" % (self.arch))
print("module = %s" % (self.module))
print('symfile = "%s"' % (self.symfile))
print('slide = %i (0x%x)' % (self.slide, self.slide))
print("slide = %i (0x%x)" % (self.slide, self.slide))
def __str__(self):
s = ''
s = ""
if self.uuid:
s += "%s " % (self.get_uuid())
if self.arch:
@@ -293,7 +315,7 @@ class Image:
for section_info in self.section_infos:
s += ", %s" % (section_info)
if self.slide is not None:
s += ', slide = 0x%16.16x' % self.slide
s += ", slide = 0x%16.16x" % self.slide
return s
def add_section(self, section):
@@ -339,37 +361,41 @@ class Image:
num_sections_loaded = 0
for section_info in self.section_infos:
if section_info.name:
section = self.module.FindSection(
section_info.name)
section = self.module.FindSection(section_info.name)
if section:
error = target.SetSectionLoadAddress(
section, section_info.start_addr)
section, section_info.start_addr
)
if error.Success():
num_sections_loaded += 1
else:
return 'error: %s' % error.GetCString()
return "error: %s" % error.GetCString()
else:
return 'error: unable to find the section named "%s"' % section_info.name
return (
'error: unable to find the section named "%s"'
% section_info.name
)
else:
return 'error: unable to find "%s" section in "%s"' % (
range.name, self.get_resolved_path())
range.name,
self.get_resolved_path(),
)
if num_sections_loaded == 0:
return 'error: no sections were successfully loaded'
return "error: no sections were successfully loaded"
else:
err = target.SetModuleLoadAddress(
self.module, self.slide)
err = target.SetModuleLoadAddress(self.module, self.slide)
if err.Fail():
return err.GetCString()
return None
else:
return 'error: invalid module'
return "error: invalid module"
else:
return 'error: invalid target'
return "error: invalid target"
else:
return 'error: no section infos'
return "error: no section infos"
def add_module(self, target, obj_dir=None):
'''Add the Image described in this object to "target" and load the sections if "load" is True.'''
"""Add the Image described in this object to "target" and load the sections if "load" is True."""
if target:
# Try and find using UUID only first so that paths need not match
# up
@@ -381,23 +407,26 @@ class Image:
if not self.unavailable:
resolved_path = self.get_resolved_path()
self.module = target.AddModule(
resolved_path, None, uuid_str, self.symfile)
resolved_path, None, uuid_str, self.symfile
)
if not self.module and self.section_infos:
name = os.path.basename(self.path)
if obj_dir and os.path.isdir(obj_dir):
data = {
'triple': target.triple,
'uuid': uuid_str,
'type': 'sharedlibrary',
'sections': list(),
'symbols': list()
"triple": target.triple,
"uuid": uuid_str,
"type": "sharedlibrary",
"sections": list(),
"symbols": list(),
}
for section in self.section_infos:
data['sections'].append({
'name' : section.name,
'size': section.end_addr - section.start_addr
})
data['symbols'] = list(self.symbols.values())
data["sections"].append(
{
"name": section.name,
"size": section.end_addr - section.start_addr,
}
)
data["symbols"] = list(self.symbols.values())
obj_file = os.path.join(obj_dir, name)
with open(obj_file, "w") as f:
f.write(json.dumps(data, indent=4))
@@ -409,13 +438,17 @@ class Image:
self.unavailable = False
if not self.module and not self.unavailable:
return 'error: unable to get module for (%s) "%s"' % (
self.arch, self.get_resolved_path())
self.arch,
self.get_resolved_path(),
)
if self.has_section_load_info():
return self.load_module(target)
else:
return None # No sections, the module was added to the target, so success
return (
None # No sections, the module was added to the target, so success
)
else:
return 'error: invalid target'
return "error: invalid target"
def locate_module_and_debug_symbols(self):
# By default, just use the paths that were supplied in:
@@ -438,7 +471,7 @@ class Image:
return None
def create_target(self, debugger):
'''Create a target using the information in this Image object.'''
"""Create a target using the information in this Image object."""
if self.unavailable:
return None
@@ -446,24 +479,28 @@ class Image:
resolved_path = self.get_resolved_path()
path_spec = lldb.SBFileSpec(resolved_path)
error = lldb.SBError()
target = debugger.CreateTarget(
resolved_path, self.arch, None, False, error)
target = debugger.CreateTarget(resolved_path, self.arch, None, False, error)
if target:
self.module = target.FindModule(path_spec)
if self.has_section_load_info():
err = self.load_module(target)
if err:
print('ERROR: ', err)
print("ERROR: ", err)
return target
else:
print('error: unable to create a valid target for (%s) "%s"' % (self.arch, self.path))
print(
'error: unable to create a valid target for (%s) "%s"'
% (self.arch, self.path)
)
else:
print('error: unable to locate main executable (%s) "%s"' % (self.arch, self.path))
print(
'error: unable to locate main executable (%s) "%s"'
% (self.arch, self.path)
)
return None
class Symbolicator:
def __init__(self, debugger=None, target=None, images=list()):
"""A class the represents the information needed to symbolicate
addresses in a program.
@@ -474,7 +511,7 @@ class Symbolicator:
self.debugger = debugger
self.target = target
self.images = images # a list of images to be used when symbolicating
self.addr_mask = 0xffffffffffffffff
self.addr_mask = 0xFFFFFFFFFFFFFFFF
@classmethod
def InitWithSBTarget(cls, target):
@@ -482,9 +519,9 @@ class Symbolicator:
obj = cls(target=target)
triple = target.triple
if triple:
arch = triple.split('-')[0]
arch = triple.split("-")[0]
if "arm" in arch:
obj.addr_mask = 0xfffffffffffffffe
obj.addr_mask = 0xFFFFFFFFFFFFFFFE
for module in target.modules:
image = Image.InitWithSBTargetAndSBModule(target, module)
@@ -507,7 +544,7 @@ class Symbolicator:
s += str(m) + "\n"
s += "Images:\n"
for image in self.images:
s += ' %s\n' % (image)
s += " %s\n" % (image)
return s
def find_images_with_identifier(self, identifier):
@@ -516,7 +553,7 @@ class Symbolicator:
if image.identifier == identifier:
images.append(image)
if len(images) == 0:
regex_text = '^.*\.%s$' % (re.escape(identifier))
regex_text = "^.*\.%s$" % (re.escape(identifier))
regex = re.compile(regex_text)
for image in self.images:
if regex.match(image.identifier):
@@ -540,9 +577,9 @@ class Symbolicator:
if self.target.GetAddressByteSize() == 4:
triple = self.target.triple
if triple:
arch = triple.split('-')[0]
arch = triple.split("-")[0]
if "arm" in arch:
self.addr_mask = 0xfffffffffffffffe
self.addr_mask = 0xFFFFFFFFFFFFFFFE
return self.target
return None
@@ -571,16 +608,20 @@ class Symbolicator:
# See if we were able to reconstruct anything?
while True:
inlined_parent_so_addr = lldb.SBAddress()
inlined_parent_sym_ctx = symbolicated_address.sym_ctx.GetParentOfInlinedScope(
symbolicated_address.so_addr, inlined_parent_so_addr)
inlined_parent_sym_ctx = (
symbolicated_address.sym_ctx.GetParentOfInlinedScope(
symbolicated_address.so_addr, inlined_parent_so_addr
)
)
if not inlined_parent_sym_ctx:
break
if not inlined_parent_so_addr:
break
symbolicated_address = Address(
self.target, inlined_parent_so_addr.GetLoadAddress(
self.target))
self.target,
inlined_parent_so_addr.GetLoadAddress(self.target),
)
symbolicated_address.sym_ctx = inlined_parent_sym_ctx
symbolicated_address.so_addr = inlined_parent_so_addr
symbolicated_address.symbolicate(verbose)
@@ -591,17 +632,13 @@ class Symbolicator:
if symbolicated_addresses:
return symbolicated_addresses
else:
print('error: no target in Symbolicator')
print("error: no target in Symbolicator")
return None
def disassemble_instructions(
target,
instructions,
pc,
insts_before_pc,
insts_after_pc,
non_zeroeth_frame):
target, instructions, pc, insts_before_pc, insts_after_pc, non_zeroeth_frame
):
lines = list()
pc_index = -1
comment_column = 50
@@ -616,7 +653,7 @@ def disassemble_instructions(
if comment:
line_len = len(lines[-1])
if line_len < comment_column:
lines[-1] += ' ' * (comment_column - line_len)
lines[-1] += " " * (comment_column - line_len)
lines[-1] += "; %s" % comment
if pc_index >= 0:
@@ -638,9 +675,9 @@ def disassemble_instructions(
end_idx = inst_idx
for i in range(start_idx, end_idx + 1):
if i == pc_index:
print(' -> ', lines[i])
print(" -> ", lines[i])
else:
print(' ', lines[i])
print(" ", lines[i])
def print_module_section_data(section):
@@ -657,8 +694,7 @@ def print_module_section(section, depth):
if depth > 0:
num_sub_sections = section.GetNumSubSections()
for sect_idx in range(num_sub_sections):
print_module_section(
section.GetSubSectionAtIndex(sect_idx), depth - 1)
print_module_section(section.GetSubSectionAtIndex(sect_idx), depth - 1)
def print_module_sections(module, depth):
@@ -672,55 +708,61 @@ def print_module_symbols(module):
def Symbolicate(debugger, command_args):
usage = "usage: %prog [options] <addr1> [addr2 ...]"
description = '''Symbolicate one or more addresses using LLDB's python scripting API..'''
description = (
"""Symbolicate one or more addresses using LLDB's python scripting API.."""
)
parser = optparse.OptionParser(
description=description,
prog='crashlog.py',
usage=usage)
description=description, prog="crashlog.py", usage=usage
)
parser.add_option(
'-v',
'--verbose',
action='store_true',
dest='verbose',
help='display verbose debug info',
default=False)
"-v",
"--verbose",
action="store_true",
dest="verbose",
help="display verbose debug info",
default=False,
)
parser.add_option(
'-p',
'--platform',
type='string',
metavar='platform',
dest='platform',
help='Specify the platform to use when creating the debug target. Valid values include "localhost", "darwin-kernel", "ios-simulator", "remote-freebsd", "remote-macosx", "remote-ios", "remote-linux".')
"-p",
"--platform",
type="string",
metavar="platform",
dest="platform",
help='Specify the platform to use when creating the debug target. Valid values include "localhost", "darwin-kernel", "ios-simulator", "remote-freebsd", "remote-macosx", "remote-ios", "remote-linux".',
)
parser.add_option(
'-f',
'--file',
type='string',
metavar='file',
dest='file',
help='Specify a file to use when symbolicating')
"-f",
"--file",
type="string",
metavar="file",
dest="file",
help="Specify a file to use when symbolicating",
)
parser.add_option(
'-a',
'--arch',
type='string',
metavar='arch',
dest='arch',
help='Specify a architecture to use when symbolicating')
"-a",
"--arch",
type="string",
metavar="arch",
dest="arch",
help="Specify a architecture to use when symbolicating",
)
parser.add_option(
'-s',
'--slide',
type='int',
metavar='slide',
dest='slide',
help='Specify the slide to use on the file specified with the --file option',
default=None)
"-s",
"--slide",
type="int",
metavar="slide",
dest="slide",
help="Specify the slide to use on the file specified with the --file option",
default=None,
)
parser.add_option(
'--section',
type='string',
action='append',
dest='section_strings',
help='specify <sect-name>=<start-addr> or <sect-name>=<start-addr>-<end-addr>')
"--section",
type="string",
action="append",
dest="section_strings",
help="specify <sect-name>=<start-addr> or <sect-name>=<start-addr>-<end-addr>",
)
try:
(options, args) = parser.parse_args(command_args)
except:
@@ -749,15 +791,15 @@ def Symbolicate(debugger, command_args):
if target:
for addr_str in args:
addr = int(addr_str, 0)
symbolicated_addrs = symbolicator.symbolicate(
addr, options.verbose)
symbolicated_addrs = symbolicator.symbolicate(addr, options.verbose)
for symbolicated_addr in symbolicated_addrs:
print(symbolicated_addr)
print()
else:
print('error: no target for %s' % (symbolicator))
print("error: no target for %s" % (symbolicator))
if __name__ == '__main__':
if __name__ == "__main__":
# Create a new debugger instance
debugger = lldb.SBDebugger.Create()
Symbolicate(debugger, sys.argv[1:])

View File

@@ -1,13 +1,13 @@
#!/usr/bin/env python
#----------------------------------------------------------------------
# ----------------------------------------------------------------------
# Be sure to add the python path that points to the LLDB shared library.
#
# # To use this in the embedded python interpreter using "lldb" just
# import it with the full path using the "command script import"
# command
# (lldb) command script import /path/to/cmdtemplate.py
#----------------------------------------------------------------------
# ----------------------------------------------------------------------
import platform
import os
@@ -23,18 +23,21 @@ except ImportError:
lldb_python_dirs = list()
# lldb is not in the PYTHONPATH, try some defaults for the current platform
platform_system = platform.system()
if platform_system == 'Darwin':
if platform_system == "Darwin":
# On Darwin, try the currently selected Xcode directory
xcode_dir = subprocess.check_output("xcode-select --print-path", shell=True)
if xcode_dir:
lldb_python_dirs.append(
os.path.realpath(
xcode_dir +
'/../SharedFrameworks/LLDB.framework/Resources/Python'))
xcode_dir + "/../SharedFrameworks/LLDB.framework/Resources/Python"
)
)
lldb_python_dirs.append(
xcode_dir + '/Library/PrivateFrameworks/LLDB.framework/Resources/Python')
xcode_dir + "/Library/PrivateFrameworks/LLDB.framework/Resources/Python"
)
lldb_python_dirs.append(
'/System/Library/PrivateFrameworks/LLDB.framework/Resources/Python')
"/System/Library/PrivateFrameworks/LLDB.framework/Resources/Python"
)
success = False
for lldb_python_dir in lldb_python_dirs:
if os.path.exists(lldb_python_dir):
@@ -49,7 +52,9 @@ except ImportError:
success = True
break
if not success:
print("error: couldn't locate the 'lldb' module, please set PYTHONPATH correctly")
print(
"error: couldn't locate the 'lldb' module, please set PYTHONPATH correctly"
)
sys.exit(1)
import optparse
@@ -59,7 +64,7 @@ import time
def regex_option_callback(option, opt_str, value, parser):
if opt_str == "--std":
value = '^std::'
value = "^std::"
regex = re.compile(value)
parser.values.skip_type_regexes.append(regex)
@@ -67,91 +72,98 @@ def regex_option_callback(option, opt_str, value, parser):
def create_types_options(for_lldb_command):
if for_lldb_command:
usage = "usage: %prog [options]"
description = '''This command will help check for padding in between
description = """This command will help check for padding in between
base classes and members in structs and classes. It will summarize the types
and how much padding was found. If no types are specified with the --types TYPENAME
option, all structure and class types will be verified. If no modules are
specified with the --module option, only the target's main executable will be
searched.
'''
"""
else:
usage = "usage: %prog [options] EXEPATH [EXEPATH ...]"
description = '''This command will help check for padding in between
description = """This command will help check for padding in between
base classes and members in structures and classes. It will summarize the types
and how much padding was found. One or more paths to executable files must be
specified and targets will be created with these modules. If no types are
specified with the --types TYPENAME option, all structure and class types will
be verified in all specified modules.
'''
"""
parser = optparse.OptionParser(
description=description,
prog='framestats',
usage=usage)
description=description, prog="framestats", usage=usage
)
if not for_lldb_command:
parser.add_option(
'-a',
'--arch',
type='string',
dest='arch',
help='The architecture to use when creating the debug target.',
default=None)
"-a",
"--arch",
type="string",
dest="arch",
help="The architecture to use when creating the debug target.",
default=None,
)
parser.add_option(
'-p',
'--platform',
type='string',
metavar='platform',
dest='platform',
help='Specify the platform to use when creating the debug target. Valid values include "localhost", "darwin-kernel", "ios-simulator", "remote-freebsd", "remote-macosx", "remote-ios", "remote-linux".')
"-p",
"--platform",
type="string",
metavar="platform",
dest="platform",
help='Specify the platform to use when creating the debug target. Valid values include "localhost", "darwin-kernel", "ios-simulator", "remote-freebsd", "remote-macosx", "remote-ios", "remote-linux".',
)
parser.add_option(
'-m',
'--module',
action='append',
type='string',
metavar='MODULE',
dest='modules',
help='Specify one or more modules which will be used to verify the types.',
default=[])
"-m",
"--module",
action="append",
type="string",
metavar="MODULE",
dest="modules",
help="Specify one or more modules which will be used to verify the types.",
default=[],
)
parser.add_option(
'-d',
'--debug',
action='store_true',
dest='debug',
help='Pause 10 seconds to wait for a debugger to attach.',
default=False)
"-d",
"--debug",
action="store_true",
dest="debug",
help="Pause 10 seconds to wait for a debugger to attach.",
default=False,
)
parser.add_option(
'-t',
'--type',
action='append',
type='string',
metavar='TYPENAME',
dest='typenames',
help='Specify one or more type names which should be verified. If no type names are specified, all class and struct types will be verified.',
default=[])
"-t",
"--type",
action="append",
type="string",
metavar="TYPENAME",
dest="typenames",
help="Specify one or more type names which should be verified. If no type names are specified, all class and struct types will be verified.",
default=[],
)
parser.add_option(
'-v',
'--verbose',
action='store_true',
dest='verbose',
help='Enable verbose logging and information.',
default=False)
"-v",
"--verbose",
action="store_true",
dest="verbose",
help="Enable verbose logging and information.",
default=False,
)
parser.add_option(
'-s',
'--skip-type-regex',
"-s",
"--skip-type-regex",
action="callback",
callback=regex_option_callback,
type='string',
metavar='REGEX',
dest='skip_type_regexes',
help='Regular expressions that, if they match the current member typename, will cause the type to no be recursively displayed.',
default=[])
type="string",
metavar="REGEX",
dest="skip_type_regexes",
help="Regular expressions that, if they match the current member typename, will cause the type to no be recursively displayed.",
default=[],
)
parser.add_option(
'--std',
"--std",
action="callback",
callback=regex_option_callback,
metavar='REGEX',
dest='skip_type_regexes',
metavar="REGEX",
dest="skip_type_regexes",
help="Don't' recurse into types in the std namespace.",
default=[])
default=[],
)
return parser
@@ -159,35 +171,35 @@ def verify_type(target, options, type):
print(type)
typename = type.GetName()
# print 'type: %s' % (typename)
(end_offset, padding) = verify_type_recursive(
target, options, type, None, 0, 0, 0)
(end_offset, padding) = verify_type_recursive(target, options, type, None, 0, 0, 0)
byte_size = type.GetByteSize()
# if end_offset < byte_size:
# last_member_padding = byte_size - end_offset
# print '%+4u <%u> padding' % (end_offset, last_member_padding)
# padding += last_member_padding
print('Total byte size: %u' % (byte_size))
print('Total pad bytes: %u' % (padding))
print("Total byte size: %u" % (byte_size))
print("Total pad bytes: %u" % (padding))
if padding > 0:
print('Padding percentage: %2.2f %%' % ((float(padding) / float(byte_size)) * 100.0))
print(
"Padding percentage: %2.2f %%"
% ((float(padding) / float(byte_size)) * 100.0)
)
print()
def verify_type_recursive(
target,
options,
type,
member_name,
depth,
base_offset,
padding):
target, options, type, member_name, depth, base_offset, padding
):
prev_end_offset = base_offset
typename = type.GetName()
byte_size = type.GetByteSize()
if member_name and member_name != typename:
print('%+4u <%3u> %s%s %s;' % (base_offset, byte_size, ' ' * depth, typename, member_name))
print(
"%+4u <%3u> %s%s %s;"
% (base_offset, byte_size, " " * depth, typename, member_name)
)
else:
print('%+4u {%3u} %s%s' % (base_offset, byte_size, ' ' * depth, typename))
print("%+4u {%3u} %s%s" % (base_offset, byte_size, " " * depth, typename))
for type_regex in options.skip_type_regexes:
match = type_regex.match(typename)
@@ -205,44 +217,82 @@ def verify_type_recursive(
member_total_offset = member_offset + base_offset
member_byte_size = member_type.GetByteSize()
member_is_class_or_struct = False
if member_type_class == lldb.eTypeClassStruct or member_type_class == lldb.eTypeClassClass:
if (
member_type_class == lldb.eTypeClassStruct
or member_type_class == lldb.eTypeClassClass
):
member_is_class_or_struct = True
if member_idx == 0 and member_offset == target.GetAddressByteSize(
) and type.IsPolymorphicClass():
if (
member_idx == 0
and member_offset == target.GetAddressByteSize()
and type.IsPolymorphicClass()
):
ptr_size = target.GetAddressByteSize()
print('%+4u <%3u> %s__vtbl_ptr_type * _vptr;' % (prev_end_offset, ptr_size, ' ' * (depth + 1)))
print(
"%+4u <%3u> %s__vtbl_ptr_type * _vptr;"
% (prev_end_offset, ptr_size, " " * (depth + 1))
)
prev_end_offset = ptr_size
else:
if prev_end_offset < member_total_offset:
member_padding = member_total_offset - prev_end_offset
padding = padding + member_padding
print('%+4u <%3u> %s<PADDING>' % (prev_end_offset, member_padding, ' ' * (depth + 1)))
print(
"%+4u <%3u> %s<PADDING>"
% (prev_end_offset, member_padding, " " * (depth + 1))
)
if member_is_class_or_struct:
(prev_end_offset,
padding) = verify_type_recursive(target,
options,
member_canonical_type,
member_name,
depth + 1,
member_total_offset,
padding)
(prev_end_offset, padding) = verify_type_recursive(
target,
options,
member_canonical_type,
member_name,
depth + 1,
member_total_offset,
padding,
)
else:
prev_end_offset = member_total_offset + member_byte_size
member_typename = member_type.GetName()
if member.IsBitfield():
print('%+4u <%3u> %s%s:%u %s;' % (member_total_offset, member_byte_size, ' ' * (depth + 1), member_typename, member.GetBitfieldSizeInBits(), member_name))
print(
"%+4u <%3u> %s%s:%u %s;"
% (
member_total_offset,
member_byte_size,
" " * (depth + 1),
member_typename,
member.GetBitfieldSizeInBits(),
member_name,
)
)
else:
print('%+4u <%3u> %s%s %s;' % (member_total_offset, member_byte_size, ' ' * (depth + 1), member_typename, member_name))
print(
"%+4u <%3u> %s%s %s;"
% (
member_total_offset,
member_byte_size,
" " * (depth + 1),
member_typename,
member_name,
)
)
if prev_end_offset < byte_size:
last_member_padding = byte_size - prev_end_offset
print('%+4u <%3u> %s<PADDING>' % (prev_end_offset, last_member_padding, ' ' * (depth + 1)))
print(
"%+4u <%3u> %s<PADDING>"
% (prev_end_offset, last_member_padding, " " * (depth + 1))
)
padding += last_member_padding
else:
if type.IsPolymorphicClass():
ptr_size = target.GetAddressByteSize()
print('%+4u <%3u> %s__vtbl_ptr_type * _vptr;' % (prev_end_offset, ptr_size, ' ' * (depth + 1)))
print(
"%+4u <%3u> %s__vtbl_ptr_type * _vptr;"
% (prev_end_offset, ptr_size, " " * (depth + 1))
)
prev_end_offset = ptr_size
prev_end_offset = base_offset + byte_size
@@ -281,9 +331,8 @@ def parse_all_struct_class_types(debugger, command, result, dict):
def verify_types(target, options):
if not target:
print('error: invalid target')
print("error: invalid target")
return
modules = list()
@@ -300,26 +349,32 @@ def verify_types(target, options):
if modules:
for module in modules:
print('module: %s' % (module.file))
print("module: %s" % (module.file))
if options.typenames:
for typename in options.typenames:
types = module.FindTypes(typename)
if types.GetSize():
print('Found %u types matching "%s" in "%s"' % (len(types), typename, module.file))
print(
'Found %u types matching "%s" in "%s"'
% (len(types), typename, module.file)
)
for type in types:
verify_type(target, options, type)
else:
print('error: no type matches "%s" in "%s"' % (typename, module.file))
print(
'error: no type matches "%s" in "%s"'
% (typename, module.file)
)
else:
types = module.GetTypes(
lldb.eTypeClassClass | lldb.eTypeClassStruct)
types = module.GetTypes(lldb.eTypeClassClass | lldb.eTypeClassStruct)
print('Found %u types in "%s"' % (len(types), module.file))
for type in types:
verify_type(target, options, type)
else:
print('error: no modules')
print("error: no modules")
if __name__ == '__main__':
if __name__ == "__main__":
debugger = lldb.SBDebugger.Create()
parser = create_types_options(False)
@@ -339,17 +394,19 @@ if __name__ == '__main__':
# this is the best practice to access those objects from within a
# command
error = lldb.SBError()
target = debugger.CreateTarget(path,
options.arch,
options.platform,
True,
error)
target = debugger.CreateTarget(
path, options.arch, options.platform, True, error
)
if error.Fail():
print(error.GetCString())
continue
verify_types(target, options)
def __lldb_init_module(debugger, internal_dict):
debugger.HandleCommand(
'command script add -o -f types.check_padding_command check_padding')
print('"check_padding" command installed, use the "--help" option for detailed help')
"command script add -o -f types.check_padding_command check_padding"
)
print(
'"check_padding" command installed, use the "--help" option for detailed help'
)

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@@ -47,7 +47,7 @@ def DFS(root, word, cur_path):
if root_word[0] == '"' and root_word[end] == '"':
root_word = root_word[1:end]
end = len(root_word) - 1
if root_word[0] == '\'' and root_word[end] == '\'':
if root_word[0] == "'" and root_word[end] == "'":
root_word = root_word[1:end]
# Main depth first search
@@ -55,7 +55,6 @@ def DFS(root, word, cur_path):
if root_word == word:
return cur_path
elif word < root_word:
# Check to see if left child is NULL
if left_child_ptr.GetValue() is None:
@@ -64,7 +63,6 @@ def DFS(root, word, cur_path):
cur_path = cur_path + "L"
return DFS(left_child_ptr, word, cur_path)
else:
# Check to see if right child is NULL
if right_child_ptr.GetValue() is None:
@@ -83,10 +81,10 @@ def tree_size(root):
the one defined in dictionary.c It uses LLDB API
functions to examine and traverse the tree nodes.
"""
if (root.GetValue is None):
if root.GetValue is None:
return 0
if (int(root.GetValue(), 16) == 0):
if int(root.GetValue(), 16) == 0:
return 0
left_size = tree_size(root.GetChildAtIndex(1))
@@ -107,11 +105,13 @@ def print_tree(root):
functions to examine and traverse the tree nodes.
"""
if (root.GetChildAtIndex(1).GetValue() is not None) and (
int(root.GetChildAtIndex(1).GetValue(), 16) != 0):
int(root.GetChildAtIndex(1).GetValue(), 16) != 0
):
print_tree(root.GetChildAtIndex(1))
print(root.GetChildAtIndex(0).GetSummary())
if (root.GetChildAtIndex(2).GetValue() is not None) and (
int(root.GetChildAtIndex(2).GetValue(), 16) != 0):
int(root.GetChildAtIndex(2).GetValue(), 16) != 0
):
print_tree(root.GetChildAtIndex(2))

View File

@@ -14,10 +14,10 @@ import lldb.formatters.metrics
import lldb.formatters.Logger
statistics = lldb.formatters.metrics.Metrics()
statistics.add_metric('invalid_isa')
statistics.add_metric('invalid_pointer')
statistics.add_metric('unknown_class')
statistics.add_metric('code_notrun')
statistics.add_metric("invalid_isa")
statistics.add_metric("invalid_pointer")
statistics.add_metric("unknown_class")
statistics.add_metric("code_notrun")
# much less functional than the other two cases below
# just runs code to get to the count and then returns
@@ -25,7 +25,6 @@ statistics.add_metric('code_notrun')
class NSArrayKVC_SynthProvider:
def adjust_for_architecture(self):
pass
@@ -43,18 +42,19 @@ class NSArrayKVC_SynthProvider:
stream = lldb.SBStream()
self.valobj.GetExpressionPath(stream)
num_children_vo = self.valobj.CreateValueFromExpression(
"count", "(int)[" + stream.GetData() + " count]")
"count", "(int)[" + stream.GetData() + " count]"
)
if num_children_vo.IsValid():
return num_children_vo.GetValueAsUnsigned(0)
return "<variable is not NSArray>"
# much less functional than the other two cases below
# just runs code to get to the count and then returns
# no children
class NSArrayCF_SynthProvider:
def adjust_for_architecture(self):
pass
@@ -63,8 +63,9 @@ class NSArrayCF_SynthProvider:
self.valobj = valobj
self.sys_params = params
if not (self.sys_params.types_cache.ulong):
self.sys_params.types_cache.ulong = self.valobj.GetType(
).GetBasicType(lldb.eBasicTypeUnsignedLong)
self.sys_params.types_cache.ulong = self.valobj.GetType().GetBasicType(
lldb.eBasicTypeUnsignedLong
)
self.update()
def update(self):
@@ -74,12 +75,12 @@ class NSArrayCF_SynthProvider:
def num_children(self):
logger = lldb.formatters.Logger.Logger()
num_children_vo = self.valobj.CreateChildAtOffset(
"count", self.sys_params.cfruntime_size, self.sys_params.types_cache.ulong)
"count", self.sys_params.cfruntime_size, self.sys_params.types_cache.ulong
)
return num_children_vo.GetValueAsUnsigned(0)
class NSArrayI_SynthProvider:
def adjust_for_architecture(self):
pass
@@ -87,9 +88,10 @@ class NSArrayI_SynthProvider:
logger = lldb.formatters.Logger.Logger()
self.valobj = valobj
self.sys_params = params
if not(self.sys_params.types_cache.long):
self.sys_params.types_cache.long = self.valobj.GetType(
).GetBasicType(lldb.eBasicTypeLong)
if not (self.sys_params.types_cache.long):
self.sys_params.types_cache.long = self.valobj.GetType().GetBasicType(
lldb.eBasicTypeLong
)
self.update()
def update(self):
@@ -100,14 +102,12 @@ class NSArrayI_SynthProvider:
def num_children(self):
logger = lldb.formatters.Logger.Logger()
count = self.valobj.CreateChildAtOffset(
"count",
self.sys_params.pointer_size,
self.sys_params.types_cache.long)
"count", self.sys_params.pointer_size, self.sys_params.types_cache.long
)
return count.GetValueAsUnsigned(0)
class NSArrayM_SynthProvider:
def adjust_for_architecture(self):
pass
@@ -115,9 +115,10 @@ class NSArrayM_SynthProvider:
logger = lldb.formatters.Logger.Logger()
self.valobj = valobj
self.sys_params = params
if not(self.sys_params.types_cache.long):
self.sys_params.types_cache.long = self.valobj.GetType(
).GetBasicType(lldb.eBasicTypeLong)
if not (self.sys_params.types_cache.long):
self.sys_params.types_cache.long = self.valobj.GetType().GetBasicType(
lldb.eBasicTypeLong
)
self.update()
def update(self):
@@ -128,18 +129,17 @@ class NSArrayM_SynthProvider:
def num_children(self):
logger = lldb.formatters.Logger.Logger()
count = self.valobj.CreateChildAtOffset(
"count",
self.sys_params.pointer_size,
self.sys_params.types_cache.long)
"count", self.sys_params.pointer_size, self.sys_params.types_cache.long
)
return count.GetValueAsUnsigned(0)
# this is the actual synth provider, but is just a wrapper that checks
# whether valobj is an instance of __NSArrayI or __NSArrayM and sets up an
# appropriate backend layer to do the computations
class NSArray_SynthProvider:
def adjust_for_architecture(self):
pass
@@ -149,7 +149,7 @@ class NSArray_SynthProvider:
self.adjust_for_architecture()
self.error = False
self.wrapper = self.make_wrapper()
self.invalid = (self.wrapper is None)
self.invalid = self.wrapper is None
def num_children(self):
logger = lldb.formatters.Logger.Logger()
@@ -169,12 +169,15 @@ class NSArray_SynthProvider:
logger = lldb.formatters.Logger.Logger()
if self.valobj.GetValueAsUnsigned() == 0:
self.error = True
return lldb.runtime.objc.objc_runtime.InvalidPointer_Description(
True)
return lldb.runtime.objc.objc_runtime.InvalidPointer_Description(True)
else:
global statistics
class_data, wrapper = lldb.runtime.objc.objc_runtime.Utilities.prepare_class_detection(
self.valobj, statistics)
(
class_data,
wrapper,
) = lldb.runtime.objc.objc_runtime.Utilities.prepare_class_detection(
self.valobj, statistics
)
if wrapper:
self.error = True
return wrapper
@@ -183,24 +186,20 @@ class NSArray_SynthProvider:
logger >> "Class name is " + str(name_string)
if name_string == '__NSArrayI':
wrapper = NSArrayI_SynthProvider(
self.valobj, dict, class_data.sys_params)
statistics.metric_hit('code_notrun', self.valobj.GetName())
elif name_string == '__NSArrayM':
wrapper = NSArrayM_SynthProvider(
self.valobj, dict, class_data.sys_params)
statistics.metric_hit('code_notrun', self.valobj.GetName())
elif name_string == '__NSCFArray':
wrapper = NSArrayCF_SynthProvider(
self.valobj, dict, class_data.sys_params)
statistics.metric_hit('code_notrun', self.valobj.GetName())
if name_string == "__NSArrayI":
wrapper = NSArrayI_SynthProvider(self.valobj, dict, class_data.sys_params)
statistics.metric_hit("code_notrun", self.valobj.GetName())
elif name_string == "__NSArrayM":
wrapper = NSArrayM_SynthProvider(self.valobj, dict, class_data.sys_params)
statistics.metric_hit("code_notrun", self.valobj.GetName())
elif name_string == "__NSCFArray":
wrapper = NSArrayCF_SynthProvider(self.valobj, dict, class_data.sys_params)
statistics.metric_hit("code_notrun", self.valobj.GetName())
else:
wrapper = NSArrayKVC_SynthProvider(
self.valobj, dict, class_data.sys_params)
wrapper = NSArrayKVC_SynthProvider(self.valobj, dict, class_data.sys_params)
statistics.metric_hit(
'unknown_class', str(
self.valobj.GetName()) + " seen as " + name_string)
"unknown_class", str(self.valobj.GetName()) + " seen as " + name_string
)
return wrapper
@@ -216,18 +215,20 @@ def CFArray_SummaryProvider(valobj, dict):
summary = None
logger >> "provider gave me " + str(summary)
if summary is None:
summary = '<variable is not NSArray>'
summary = "<variable is not NSArray>"
elif isinstance(summary, str):
pass
else:
# we format it like it were a CFString to make it look the same as
# the summary from Xcode
summary = '@"' + str(summary) + \
(" objects" if summary != 1 else " object") + '"'
summary = (
'@"' + str(summary) + (" objects" if summary != 1 else " object") + '"'
)
return summary
return 'Summary Unavailable'
return "Summary Unavailable"
def __lldb_init_module(debugger, dict):
debugger.HandleCommand(
"type summary add -F CFArray.CFArray_SummaryProvider NSArray CFArrayRef CFMutableArrayRef")
"type summary add -F CFArray.CFArray_SummaryProvider NSArray CFArrayRef CFMutableArrayRef"
)

View File

@@ -14,10 +14,10 @@ import lldb.formatters.metrics
import lldb.formatters.Logger
statistics = lldb.formatters.metrics.Metrics()
statistics.add_metric('invalid_isa')
statistics.add_metric('invalid_pointer')
statistics.add_metric('unknown_class')
statistics.add_metric('code_notrun')
statistics.add_metric("invalid_isa")
statistics.add_metric("invalid_pointer")
statistics.add_metric("unknown_class")
statistics.add_metric("code_notrun")
# despite the similary to synthetic children providers, these classes are not
# trying to provide anything but the length for an CFBag, so they need not
@@ -25,7 +25,6 @@ statistics.add_metric('code_notrun')
class CFBagRef_SummaryProvider:
def adjust_for_architecture(self):
pass
@@ -33,13 +32,15 @@ class CFBagRef_SummaryProvider:
logger = lldb.formatters.Logger.Logger()
self.valobj = valobj
self.sys_params = params
if not(self.sys_params.types_cache.NSUInteger):
if not (self.sys_params.types_cache.NSUInteger):
if self.sys_params.is_64_bit:
self.sys_params.types_cache.NSUInteger = self.valobj.GetType(
).GetBasicType(lldb.eBasicTypeUnsignedLong)
self.sys_params.types_cache.NSUInteger = (
self.valobj.GetType().GetBasicType(lldb.eBasicTypeUnsignedLong)
)
else:
self.sys_params.types_cache.NSUInteger = self.valobj.GetType(
).GetBasicType(lldb.eBasicTypeUnsignedInt)
self.sys_params.types_cache.NSUInteger = (
self.valobj.GetType().GetBasicType(lldb.eBasicTypeUnsignedInt)
)
self.update()
def update(self):
@@ -59,12 +60,12 @@ class CFBagRef_SummaryProvider:
def length(self):
logger = lldb.formatters.Logger.Logger()
size = self.valobj.CreateChildAtOffset(
"count", self.offset(), self.sys_params.types_cache.NSUInteger)
"count", self.offset(), self.sys_params.types_cache.NSUInteger
)
return size.GetValueAsUnsigned(0)
class CFBagUnknown_SummaryProvider:
def adjust_for_architecture(self):
pass
@@ -83,7 +84,8 @@ class CFBagUnknown_SummaryProvider:
stream = lldb.SBStream()
self.valobj.GetExpressionPath(stream)
num_children_vo = self.valobj.CreateValueFromExpression(
"count", "(int)CFBagGetCount(" + stream.GetData() + " )")
"count", "(int)CFBagGetCount(" + stream.GetData() + " )"
)
if num_children_vo.IsValid():
return num_children_vo.GetValueAsUnsigned(0)
return "<variable is not CFBag>"
@@ -92,16 +94,21 @@ class CFBagUnknown_SummaryProvider:
def GetSummary_Impl(valobj):
logger = lldb.formatters.Logger.Logger()
global statistics
class_data, wrapper = lldb.runtime.objc.objc_runtime.Utilities.prepare_class_detection(
valobj, statistics)
(
class_data,
wrapper,
) = lldb.runtime.objc.objc_runtime.Utilities.prepare_class_detection(
valobj, statistics
)
if wrapper:
return wrapper
name_string = class_data.class_name()
actual_name = name_string
logger >> "name string got was " + \
str(name_string) + " but actual name is " + str(actual_name)
logger >> "name string got was " + str(name_string) + " but actual name is " + str(
actual_name
)
if class_data.is_cftype():
# CFBag does not expose an actual NSWrapper type, so we have to check that this is
@@ -111,17 +118,12 @@ def GetSummary_Impl(valobj):
valobj_type = valobj_type.GetPointeeType()
if valobj_type.IsValid():
actual_name = valobj_type.GetName()
if actual_name == '__CFBag' or \
actual_name == 'const struct __CFBag':
if actual_name == "__CFBag" or actual_name == "const struct __CFBag":
wrapper = CFBagRef_SummaryProvider(valobj, class_data.sys_params)
statistics.metric_hit('code_notrun', valobj)
statistics.metric_hit("code_notrun", valobj)
return wrapper
wrapper = CFBagUnknown_SummaryProvider(valobj, class_data.sys_params)
statistics.metric_hit(
'unknown_class',
valobj.GetName() +
" seen as " +
actual_name)
statistics.metric_hit("unknown_class", valobj.GetName() + " seen as " + actual_name)
return wrapper
@@ -130,8 +132,8 @@ def CFBag_SummaryProvider(valobj, dict):
provider = GetSummary_Impl(valobj)
if provider is not None:
if isinstance(
provider,
lldb.runtime.objc.objc_runtime.SpecialSituation_Description):
provider, lldb.runtime.objc.objc_runtime.SpecialSituation_Description
):
return provider.message()
try:
summary = provider.length()
@@ -144,20 +146,21 @@ def CFBag_SummaryProvider(valobj, dict):
# (if counts start looking weird, then most probably
# the mask needs to be changed)
if summary is None:
summary = '<variable is not CFBag>'
summary = "<variable is not CFBag>"
elif isinstance(summary, str):
pass
else:
if provider.sys_params.is_64_bit:
summary = summary & ~0x1fff000000000000
summary = summary & ~0x1FFF000000000000
if summary == 1:
summary = '@"1 value"'
else:
summary = '@"' + str(summary) + ' values"'
return summary
return 'Summary Unavailable'
return "Summary Unavailable"
def __lldb_init_module(debugger, dict):
debugger.HandleCommand(
"type summary add -F CFBag.CFBag_SummaryProvider CFBagRef CFMutableBagRef")
"type summary add -F CFBag.CFBag_SummaryProvider CFBagRef CFMutableBagRef"
)

View File

@@ -14,10 +14,10 @@ import lldb.formatters.metrics
import lldb.formatters.Logger
statistics = lldb.formatters.metrics.Metrics()
statistics.add_metric('invalid_isa')
statistics.add_metric('invalid_pointer')
statistics.add_metric('unknown_class')
statistics.add_metric('code_notrun')
statistics.add_metric("invalid_isa")
statistics.add_metric("invalid_pointer")
statistics.add_metric("unknown_class")
statistics.add_metric("code_notrun")
# despite the similary to synthetic children providers, these classes are not
# trying to provide anything but the length for an CFBinaryHeap, so they need not
@@ -25,7 +25,6 @@ statistics.add_metric('code_notrun')
class CFBinaryHeapRef_SummaryProvider:
def adjust_for_architecture(self):
pass
@@ -33,13 +32,15 @@ class CFBinaryHeapRef_SummaryProvider:
logger = lldb.formatters.Logger.Logger()
self.valobj = valobj
self.sys_params = params
if not(self.sys_params.types_cache.NSUInteger):
if not (self.sys_params.types_cache.NSUInteger):
if self.sys_params.is_64_bit:
self.sys_params.types_cache.NSUInteger = self.valobj.GetType(
).GetBasicType(lldb.eBasicTypeUnsignedLong)
self.sys_params.types_cache.NSUInteger = (
self.valobj.GetType().GetBasicType(lldb.eBasicTypeUnsignedLong)
)
else:
self.sys_params.types_cache.NSUInteger = self.valobj.GetType(
).GetBasicType(lldb.eBasicTypeUnsignedInt)
self.sys_params.types_cache.NSUInteger = (
self.valobj.GetType().GetBasicType(lldb.eBasicTypeUnsignedInt)
)
self.update()
def update(self):
@@ -56,12 +57,12 @@ class CFBinaryHeapRef_SummaryProvider:
def length(self):
logger = lldb.formatters.Logger.Logger()
size = self.valobj.CreateChildAtOffset(
"count", self.offset(), self.sys_params.types_cache.NSUInteger)
"count", self.offset(), self.sys_params.types_cache.NSUInteger
)
return size.GetValueAsUnsigned(0)
class CFBinaryHeapUnknown_SummaryProvider:
def adjust_for_architecture(self):
pass
@@ -80,25 +81,31 @@ class CFBinaryHeapUnknown_SummaryProvider:
stream = lldb.SBStream()
self.valobj.GetExpressionPath(stream)
num_children_vo = self.valobj.CreateValueFromExpression(
"count", "(int)CFBinaryHeapGetCount(" + stream.GetData() + " )")
"count", "(int)CFBinaryHeapGetCount(" + stream.GetData() + " )"
)
if num_children_vo.IsValid():
return num_children_vo.GetValueAsUnsigned(0)
return '<variable is not CFBinaryHeap>'
return "<variable is not CFBinaryHeap>"
def GetSummary_Impl(valobj):
logger = lldb.formatters.Logger.Logger()
global statistics
class_data, wrapper = lldb.runtime.objc.objc_runtime.Utilities.prepare_class_detection(
valobj, statistics)
(
class_data,
wrapper,
) = lldb.runtime.objc.objc_runtime.Utilities.prepare_class_detection(
valobj, statistics
)
if wrapper:
return wrapper
name_string = class_data.class_name()
actual_name = class_data.class_name()
logger >> "name string got was " + \
str(name_string) + " but actual name is " + str(actual_name)
logger >> "name string got was " + str(name_string) + " but actual name is " + str(
actual_name
)
if class_data.is_cftype():
# CFBinaryHeap does not expose an actual NSWrapper type, so we have to check that this is
@@ -108,18 +115,12 @@ def GetSummary_Impl(valobj):
valobj_type = valobj_type.GetPointeeType()
if valobj_type.IsValid():
actual_name = valobj_type.GetName()
if actual_name == '__CFBinaryHeap':
wrapper = CFBinaryHeapRef_SummaryProvider(
valobj, class_data.sys_params)
statistics.metric_hit('code_notrun', valobj)
if actual_name == "__CFBinaryHeap":
wrapper = CFBinaryHeapRef_SummaryProvider(valobj, class_data.sys_params)
statistics.metric_hit("code_notrun", valobj)
return wrapper
wrapper = CFBinaryHeapUnknown_SummaryProvider(
valobj, class_data.sys_params)
statistics.metric_hit(
'unknown_class',
valobj.GetName() +
" seen as " +
name_string)
wrapper = CFBinaryHeapUnknown_SummaryProvider(valobj, class_data.sys_params)
statistics.metric_hit("unknown_class", valobj.GetName() + " seen as " + name_string)
return wrapper
@@ -128,8 +129,8 @@ def CFBinaryHeap_SummaryProvider(valobj, dict):
provider = GetSummary_Impl(valobj)
if provider is not None:
if isinstance(
provider,
lldb.runtime.objc.objc_runtime.SpecialSituation_Description):
provider, lldb.runtime.objc.objc_runtime.SpecialSituation_Description
):
return provider.message()
try:
summary = provider.length()
@@ -142,20 +143,21 @@ def CFBinaryHeap_SummaryProvider(valobj, dict):
# (if counts start looking weird, then most probably
# the mask needs to be changed)
if summary is None:
summary = '<variable is not CFBinaryHeap>'
summary = "<variable is not CFBinaryHeap>"
elif isinstance(summary, str):
pass
else:
if provider.sys_params.is_64_bit:
summary = summary & ~0x1fff000000000000
summary = summary & ~0x1FFF000000000000
if summary == 1:
return '@"1 item"'
else:
summary = '@"' + str(summary) + ' items"'
return summary
return 'Summary Unavailable'
return "Summary Unavailable"
def __lldb_init_module(debugger, dict):
debugger.HandleCommand(
"type summary add -F CFBinaryHeap.CFBinaryHeap_SummaryProvider CFBinaryHeapRef")
"type summary add -F CFBinaryHeap.CFBinaryHeap_SummaryProvider CFBinaryHeapRef"
)

View File

@@ -37,11 +37,12 @@ def grab_array_item_data(pointer, index):
logger = lldb.formatters.Logger.Logger()
return pointer.GetPointeeData(index, 1)
statistics = lldb.formatters.metrics.Metrics()
statistics.add_metric('invalid_isa')
statistics.add_metric('invalid_pointer')
statistics.add_metric('unknown_class')
statistics.add_metric('code_notrun')
statistics.add_metric("invalid_isa")
statistics.add_metric("invalid_pointer")
statistics.add_metric("unknown_class")
statistics.add_metric("code_notrun")
# despite the similary to synthetic children providers, these classes are not
# trying to provide anything but a summary for a CF*BitVector, so they need not
@@ -49,7 +50,6 @@ statistics.add_metric('code_notrun')
class CFBitVectorKnown_SummaryProvider:
def adjust_for_architecture(self):
logger = lldb.formatters.Logger.Logger()
self.uiint_size = self.sys_params.types_cache.NSUInteger.GetByteSize()
@@ -59,16 +59,19 @@ class CFBitVectorKnown_SummaryProvider:
logger = lldb.formatters.Logger.Logger()
self.valobj = valobj
self.sys_params = params
if not(self.sys_params.types_cache.NSUInteger):
if not (self.sys_params.types_cache.NSUInteger):
if self.sys_params.is_64_bit:
self.sys_params.types_cache.NSUInteger = self.valobj.GetType(
).GetBasicType(lldb.eBasicTypeUnsignedLong)
self.sys_params.types_cache.NSUInteger = (
self.valobj.GetType().GetBasicType(lldb.eBasicTypeUnsignedLong)
)
else:
self.sys_params.types_cache.NSUInteger = self.valobj.GetType(
).GetBasicType(lldb.eBasicTypeUnsignedInt)
if not(self.sys_params.types_cache.charptr):
self.sys_params.types_cache.charptr = self.valobj.GetType(
).GetBasicType(lldb.eBasicTypeChar).GetPointerType()
self.sys_params.types_cache.NSUInteger = (
self.valobj.GetType().GetBasicType(lldb.eBasicTypeUnsignedInt)
)
if not (self.sys_params.types_cache.charptr):
self.sys_params.types_cache.charptr = (
self.valobj.GetType().GetBasicType(lldb.eBasicTypeChar).GetPointerType()
)
self.update()
def update(self):
@@ -85,17 +88,17 @@ class CFBitVectorKnown_SummaryProvider:
count_vo = self.valobj.CreateChildAtOffset(
"count",
self.sys_params.cfruntime_size,
self.sys_params.types_cache.NSUInteger)
self.sys_params.types_cache.NSUInteger,
)
count = count_vo.GetValueAsUnsigned(0)
if count == 0:
return '(empty)'
return "(empty)"
array_vo = self.valobj.CreateChildAtOffset(
"data",
self.sys_params.cfruntime_size +
2 *
self.uiint_size,
self.sys_params.types_cache.charptr)
self.sys_params.cfruntime_size + 2 * self.uiint_size,
self.sys_params.types_cache.charptr,
)
data_list = []
cur_byte_pos = None
@@ -113,16 +116,15 @@ class CFBitVectorKnown_SummaryProvider:
cur_byte_val = cur_byte.uint8[0]
bit = get_bit(cur_byte_val, bit_index(i))
if (i % 4) == 0:
data_list.append(' ')
data_list.append(" ")
if bit == 1:
data_list.append('1')
data_list.append("1")
else:
data_list.append('0')
return ''.join(data_list)
data_list.append("0")
return "".join(data_list)
class CFBitVectorUnknown_SummaryProvider:
def adjust_for_architecture(self):
pass
@@ -138,22 +140,27 @@ class CFBitVectorUnknown_SummaryProvider:
def contents(self):
logger = lldb.formatters.Logger.Logger()
return '<unable to summarize this CFBitVector>'
return "<unable to summarize this CFBitVector>"
def GetSummary_Impl(valobj):
logger = lldb.formatters.Logger.Logger()
global statistics
class_data, wrapper = lldb.runtime.objc.objc_runtime.Utilities.prepare_class_detection(
valobj, statistics)
(
class_data,
wrapper,
) = lldb.runtime.objc.objc_runtime.Utilities.prepare_class_detection(
valobj, statistics
)
if wrapper:
return wrapper
name_string = class_data.class_name()
actual_name = name_string
logger >> "name string got was " + \
str(name_string) + " but actual name is " + str(actual_name)
logger >> "name string got was " + str(name_string) + " but actual name is " + str(
actual_name
)
if class_data.is_cftype():
# CFBitVectorRef does not expose an actual NSWrapper type, so we have to check that this is
@@ -163,23 +170,18 @@ def GetSummary_Impl(valobj):
valobj_type = valobj_type.GetPointeeType()
if valobj_type.IsValid():
actual_name = valobj_type.GetName()
if actual_name == '__CFBitVector' or actual_name == '__CFMutableBitVector':
wrapper = CFBitVectorKnown_SummaryProvider(
valobj, class_data.sys_params)
statistics.metric_hit('code_notrun', valobj)
if actual_name == "__CFBitVector" or actual_name == "__CFMutableBitVector":
wrapper = CFBitVectorKnown_SummaryProvider(valobj, class_data.sys_params)
statistics.metric_hit("code_notrun", valobj)
else:
wrapper = CFBitVectorUnknown_SummaryProvider(
valobj, class_data.sys_params)
wrapper = CFBitVectorUnknown_SummaryProvider(valobj, class_data.sys_params)
print(actual_name)
else:
wrapper = CFBitVectorUnknown_SummaryProvider(
valobj, class_data.sys_params)
wrapper = CFBitVectorUnknown_SummaryProvider(valobj, class_data.sys_params)
print(name_string)
statistics.metric_hit(
'unknown_class',
valobj.GetName() +
" seen as " +
name_string)
"unknown_class", valobj.GetName() + " seen as " + name_string
)
return wrapper
@@ -188,20 +190,21 @@ def CFBitVector_SummaryProvider(valobj, dict):
provider = GetSummary_Impl(valobj)
if provider is not None:
if isinstance(
provider,
lldb.runtime.objc.objc_runtime.SpecialSituation_Description):
provider, lldb.runtime.objc.objc_runtime.SpecialSituation_Description
):
return provider.message()
try:
summary = provider.contents()
except:
summary = None
logger >> "summary got from provider: " + str(summary)
if summary is None or summary == '':
summary = '<variable is not CFBitVector>'
if summary is None or summary == "":
summary = "<variable is not CFBitVector>"
return summary
return 'Summary Unavailable'
return "Summary Unavailable"
def __lldb_init_module(debugger, dict):
debugger.HandleCommand(
"type summary add -F CFBitVector.CFBitVector_SummaryProvider CFBitVectorRef CFMutableBitVectorRef")
"type summary add -F CFBitVector.CFBitVector_SummaryProvider CFBitVectorRef CFMutableBitVectorRef"
)

View File

@@ -14,10 +14,10 @@ import lldb.formatters.metrics
import lldb.formatters.Logger
statistics = lldb.formatters.metrics.Metrics()
statistics.add_metric('invalid_isa')
statistics.add_metric('invalid_pointer')
statistics.add_metric('unknown_class')
statistics.add_metric('code_notrun')
statistics.add_metric("invalid_isa")
statistics.add_metric("invalid_pointer")
statistics.add_metric("unknown_class")
statistics.add_metric("code_notrun")
# despite the similary to synthetic children providers, these classes are not
# trying to provide anything but the count for an NSDictionary, so they need not
@@ -25,7 +25,6 @@ statistics.add_metric('code_notrun')
class NSCFDictionary_SummaryProvider:
def adjust_for_architecture(self):
pass
@@ -33,13 +32,15 @@ class NSCFDictionary_SummaryProvider:
logger = lldb.formatters.Logger.Logger()
self.valobj = valobj
self.sys_params = params
if not(self.sys_params.types_cache.NSUInteger):
if not (self.sys_params.types_cache.NSUInteger):
if self.sys_params.is_64_bit:
self.sys_params.types_cache.NSUInteger = self.valobj.GetType(
).GetBasicType(lldb.eBasicTypeUnsignedLong)
self.sys_params.types_cache.NSUInteger = (
self.valobj.GetType().GetBasicType(lldb.eBasicTypeUnsignedLong)
)
else:
self.sys_params.types_cache.NSUInteger = self.valobj.GetType(
).GetBasicType(lldb.eBasicTypeUnsignedInt)
self.sys_params.types_cache.NSUInteger = (
self.valobj.GetType().GetBasicType(lldb.eBasicTypeUnsignedInt)
)
self.update()
def update(self):
@@ -60,12 +61,12 @@ class NSCFDictionary_SummaryProvider:
def num_children(self):
logger = lldb.formatters.Logger.Logger()
num_children_vo = self.valobj.CreateChildAtOffset(
"count", self.offset(), self.sys_params.types_cache.NSUInteger)
"count", self.offset(), self.sys_params.types_cache.NSUInteger
)
return num_children_vo.GetValueAsUnsigned(0)
class NSDictionaryI_SummaryProvider:
def adjust_for_architecture(self):
pass
@@ -73,13 +74,15 @@ class NSDictionaryI_SummaryProvider:
logger = lldb.formatters.Logger.Logger()
self.valobj = valobj
self.sys_params = params
if not(self.sys_params.types_cache.NSUInteger):
if not (self.sys_params.types_cache.NSUInteger):
if self.sys_params.is_64_bit:
self.sys_params.types_cache.NSUInteger = self.valobj.GetType(
).GetBasicType(lldb.eBasicTypeUnsignedLong)
self.sys_params.types_cache.NSUInteger = (
self.valobj.GetType().GetBasicType(lldb.eBasicTypeUnsignedLong)
)
else:
self.sys_params.types_cache.NSUInteger = self.valobj.GetType(
).GetBasicType(lldb.eBasicTypeUnsignedInt)
self.sys_params.types_cache.NSUInteger = (
self.valobj.GetType().GetBasicType(lldb.eBasicTypeUnsignedInt)
)
self.update()
def update(self):
@@ -94,7 +97,8 @@ class NSDictionaryI_SummaryProvider:
def num_children(self):
logger = lldb.formatters.Logger.Logger()
num_children_vo = self.valobj.CreateChildAtOffset(
"count", self.offset(), self.sys_params.types_cache.NSUInteger)
"count", self.offset(), self.sys_params.types_cache.NSUInteger
)
value = num_children_vo.GetValueAsUnsigned(0)
if value is not None:
# the MS6bits on immutable dictionaries seem to be taken by the LSB of capacity
@@ -108,7 +112,6 @@ class NSDictionaryI_SummaryProvider:
class NSDictionaryM_SummaryProvider:
def adjust_for_architecture(self):
pass
@@ -116,13 +119,15 @@ class NSDictionaryM_SummaryProvider:
logger = lldb.formatters.Logger.Logger()
self.valobj = valobj
self.sys_params = params
if not(self.sys_params.types_cache.NSUInteger):
if not (self.sys_params.types_cache.NSUInteger):
if self.sys_params.is_64_bit:
self.sys_params.types_cache.NSUInteger = self.valobj.GetType(
).GetBasicType(lldb.eBasicTypeUnsignedLong)
self.sys_params.types_cache.NSUInteger = (
self.valobj.GetType().GetBasicType(lldb.eBasicTypeUnsignedLong)
)
else:
self.sys_params.types_cache.NSUInteger = self.valobj.GetType(
).GetBasicType(lldb.eBasicTypeUnsignedInt)
self.sys_params.types_cache.NSUInteger = (
self.valobj.GetType().GetBasicType(lldb.eBasicTypeUnsignedInt)
)
self.update()
def update(self):
@@ -136,7 +141,8 @@ class NSDictionaryM_SummaryProvider:
def num_children(self):
logger = lldb.formatters.Logger.Logger()
num_children_vo = self.valobj.CreateChildAtOffset(
"count", self.offset(), self.sys_params.types_cache.NSUInteger)
"count", self.offset(), self.sys_params.types_cache.NSUInteger
)
value = num_children_vo.GetValueAsUnsigned(0)
if value is not None:
# the MS6bits on immutable dictionaries seem to be taken by the LSB of capacity
@@ -150,7 +156,6 @@ class NSDictionaryM_SummaryProvider:
class NSDictionaryUnknown_SummaryProvider:
def adjust_for_architecture(self):
pass
@@ -169,17 +174,22 @@ class NSDictionaryUnknown_SummaryProvider:
stream = lldb.SBStream()
self.valobj.GetExpressionPath(stream)
num_children_vo = self.valobj.CreateValueFromExpression(
"count", "(int)[" + stream.GetData() + " count]")
"count", "(int)[" + stream.GetData() + " count]"
)
if num_children_vo.IsValid():
return num_children_vo.GetValueAsUnsigned(0)
return '<variable is not NSDictionary>'
return "<variable is not NSDictionary>"
def GetSummary_Impl(valobj):
logger = lldb.formatters.Logger.Logger()
global statistics
class_data, wrapper = lldb.runtime.objc.objc_runtime.Utilities.prepare_class_detection(
valobj, statistics)
(
class_data,
wrapper,
) = lldb.runtime.objc.objc_runtime.Utilities.prepare_class_detection(
valobj, statistics
)
if wrapper:
return wrapper
@@ -187,23 +197,20 @@ def GetSummary_Impl(valobj):
logger >> "class name is: " + str(name_string)
if name_string == '__NSCFDictionary':
if name_string == "__NSCFDictionary":
wrapper = NSCFDictionary_SummaryProvider(valobj, class_data.sys_params)
statistics.metric_hit('code_notrun', valobj)
elif name_string == '__NSDictionaryI':
statistics.metric_hit("code_notrun", valobj)
elif name_string == "__NSDictionaryI":
wrapper = NSDictionaryI_SummaryProvider(valobj, class_data.sys_params)
statistics.metric_hit('code_notrun', valobj)
elif name_string == '__NSDictionaryM':
statistics.metric_hit("code_notrun", valobj)
elif name_string == "__NSDictionaryM":
wrapper = NSDictionaryM_SummaryProvider(valobj, class_data.sys_params)
statistics.metric_hit('code_notrun', valobj)
statistics.metric_hit("code_notrun", valobj)
else:
wrapper = NSDictionaryUnknown_SummaryProvider(
valobj, class_data.sys_params)
wrapper = NSDictionaryUnknown_SummaryProvider(valobj, class_data.sys_params)
statistics.metric_hit(
'unknown_class',
valobj.GetName() +
" seen as " +
name_string)
"unknown_class", valobj.GetName() + " seen as " + name_string
)
return wrapper
@@ -212,8 +219,8 @@ def CFDictionary_SummaryProvider(valobj, dict):
provider = GetSummary_Impl(valobj)
if provider is not None:
if isinstance(
provider,
lldb.runtime.objc.objc_runtime.SpecialSituation_Description):
provider, lldb.runtime.objc.objc_runtime.SpecialSituation_Description
):
return provider.message()
try:
summary = provider.num_children()
@@ -221,12 +228,13 @@ def CFDictionary_SummaryProvider(valobj, dict):
summary = None
logger >> "got summary " + str(summary)
if summary is None:
return '<variable is not NSDictionary>'
return "<variable is not NSDictionary>"
if isinstance(summary, str):
return summary
return str(summary) + (" key/value pairs" if summary !=
1 else " key/value pair")
return 'Summary Unavailable'
return str(summary) + (
" key/value pairs" if summary != 1 else " key/value pair"
)
return "Summary Unavailable"
def CFDictionary_SummaryProvider2(valobj, dict):
@@ -234,8 +242,8 @@ def CFDictionary_SummaryProvider2(valobj, dict):
provider = GetSummary_Impl(valobj)
if provider is not None:
if isinstance(
provider,
lldb.runtime.objc.objc_runtime.SpecialSituation_Description):
provider, lldb.runtime.objc.objc_runtime.SpecialSituation_Description
):
return provider.message()
try:
summary = provider.num_children()
@@ -243,21 +251,22 @@ def CFDictionary_SummaryProvider2(valobj, dict):
summary = None
logger >> "got summary " + str(summary)
if summary is None:
summary = '<variable is not CFDictionary>'
summary = "<variable is not CFDictionary>"
if isinstance(summary, str):
return summary
else:
# needed on OSX Mountain Lion
if provider.sys_params.is_64_bit:
summary = summary & ~0x0f1f000000000000
summary = '@"' + str(summary) + \
(' entries"' if summary != 1 else ' entry"')
summary = summary & ~0x0F1F000000000000
summary = '@"' + str(summary) + (' entries"' if summary != 1 else ' entry"')
return summary
return 'Summary Unavailable'
return "Summary Unavailable"
def __lldb_init_module(debugger, dict):
debugger.HandleCommand(
"type summary add -F CFDictionary.CFDictionary_SummaryProvider NSDictionary")
"type summary add -F CFDictionary.CFDictionary_SummaryProvider NSDictionary"
)
debugger.HandleCommand(
"type summary add -F CFDictionary.CFDictionary_SummaryProvider2 CFDictionaryRef CFMutableDictionaryRef")
"type summary add -F CFDictionary.CFDictionary_SummaryProvider2 CFDictionaryRef CFMutableDictionaryRef"
)

View File

@@ -16,13 +16,13 @@ try:
except NameError:
unichr = chr
def CFString_SummaryProvider(valobj, dict):
logger = lldb.formatters.Logger.Logger()
provider = CFStringSynthProvider(valobj, dict)
if not provider.invalid:
try:
summary = provider.get_child_at_index(
provider.get_child_index("content"))
summary = provider.get_child_at_index(provider.get_child_index("content"))
if isinstance(summary, lldb.SBValue):
summary = summary.GetSummary()
else:
@@ -30,45 +30,47 @@ def CFString_SummaryProvider(valobj, dict):
except:
summary = None
if summary is None:
summary = '<variable is not NSString>'
return '@' + summary
return ''
summary = "<variable is not NSString>"
return "@" + summary
return ""
def CFAttributedString_SummaryProvider(valobj, dict):
logger = lldb.formatters.Logger.Logger()
offset = valobj.GetTarget().GetProcess().GetAddressByteSize()
pointee = valobj.GetValueAsUnsigned(0)
summary = '<variable is not NSAttributedString>'
summary = "<variable is not NSAttributedString>"
if pointee is not None and pointee != 0:
pointee = pointee + offset
child_ptr = valobj.CreateValueFromAddress(
"string_ptr", pointee, valobj.GetType())
"string_ptr", pointee, valobj.GetType()
)
child = child_ptr.CreateValueFromAddress(
"string_data",
child_ptr.GetValueAsUnsigned(),
valobj.GetType()).AddressOf()
"string_data", child_ptr.GetValueAsUnsigned(), valobj.GetType()
).AddressOf()
provider = CFStringSynthProvider(child, dict)
if not provider.invalid:
try:
summary = provider.get_child_at_index(
provider.get_child_index("content")).GetSummary()
provider.get_child_index("content")
).GetSummary()
except:
summary = '<variable is not NSAttributedString>'
summary = "<variable is not NSAttributedString>"
if summary is None:
summary = '<variable is not NSAttributedString>'
return '@' + summary
summary = "<variable is not NSAttributedString>"
return "@" + summary
def __lldb_init_module(debugger, dict):
debugger.HandleCommand(
"type summary add -F CFString.CFString_SummaryProvider NSString CFStringRef CFMutableStringRef")
"type summary add -F CFString.CFString_SummaryProvider NSString CFStringRef CFMutableStringRef"
)
debugger.HandleCommand(
"type summary add -F CFString.CFAttributedString_SummaryProvider NSAttributedString")
"type summary add -F CFString.CFAttributedString_SummaryProvider NSAttributedString"
)
class CFStringSynthProvider:
def __init__(self, valobj, dict):
logger = lldb.formatters.Logger.Logger()
self.valobj = valobj
@@ -86,7 +88,7 @@ class CFStringSynthProvider:
logger = lldb.formatters.Logger.Logger()
process = self.valobj.GetTarget().GetProcess()
error = lldb.SBError()
pystr = u''
pystr = ""
# cannot do the read at once because the length value has
# a weird encoding. better play it safe here
while max_len > 0:
@@ -121,21 +123,22 @@ class CFStringSynthProvider:
pointer = self.valobj.GetValueAsUnsigned(0) + offset
pystr = self.read_unicode(pointer)
return self.valobj.CreateValueFromExpression(
"content", "(char*)\"" + pystr.encode('utf-8') + "\"")
"content", '(char*)"' + pystr.encode("utf-8") + '"'
)
# last resort call, use ObjC code to read; the final aim is to
# be able to strip this call away entirely and only do the read
# ourselves
def handle_unicode_string_safe(self):
return self.valobj.CreateValueFromExpression(
"content", "(char*)\"" + self.valobj.GetObjectDescription() + "\"")
"content", '(char*)"' + self.valobj.GetObjectDescription() + '"'
)
def handle_unicode_string(self):
logger = lldb.formatters.Logger.Logger()
# step 1: find offset
if self.inline:
pointer = self.valobj.GetValueAsUnsigned(
0) + self.size_of_cfruntime_base()
pointer = self.valobj.GetValueAsUnsigned(0) + self.size_of_cfruntime_base()
if not self.explicit:
# untested, use the safe code path
return self.handle_unicode_string_safe()
@@ -144,79 +147,93 @@ class CFStringSynthProvider:
# data
pointer = pointer + self.pointer_size
else:
pointer = self.valobj.GetValueAsUnsigned(
0) + self.size_of_cfruntime_base()
pointer = self.valobj.GetValueAsUnsigned(0) + self.size_of_cfruntime_base()
# read 8 bytes here and make an address out of them
try:
char_type = self.valobj.GetType().GetBasicType(
lldb.eBasicTypeChar).GetPointerType()
char_type = (
self.valobj.GetType()
.GetBasicType(lldb.eBasicTypeChar)
.GetPointerType()
)
vopointer = self.valobj.CreateValueFromAddress(
"dummy", pointer, char_type)
"dummy", pointer, char_type
)
pointer = vopointer.GetValueAsUnsigned(0)
except:
return self.valobj.CreateValueFromExpression(
"content", '(char*)"@\"invalid NSString\""')
"content", '(char*)"@"invalid NSString""'
)
# step 2: read Unicode data at pointer
pystr = self.read_unicode(pointer)
# step 3: return it
return pystr.encode('utf-8')
return pystr.encode("utf-8")
def handle_inline_explicit(self):
logger = lldb.formatters.Logger.Logger()
offset = 3 * self.pointer_size
offset = offset + self.valobj.GetValueAsUnsigned(0)
return self.valobj.CreateValueFromExpression(
"content", "(char*)(" + str(offset) + ")")
"content", "(char*)(" + str(offset) + ")"
)
def handle_mutable_string(self):
logger = lldb.formatters.Logger.Logger()
offset = 2 * self.pointer_size
data = self.valobj.CreateChildAtOffset(
"content", offset, self.valobj.GetType().GetBasicType(
lldb.eBasicTypeChar).GetPointerType())
"content",
offset,
self.valobj.GetType().GetBasicType(lldb.eBasicTypeChar).GetPointerType(),
)
data_value = data.GetValueAsUnsigned(0)
if self.explicit and self.unicode:
return self.read_unicode(data_value).encode('utf-8')
return self.read_unicode(data_value).encode("utf-8")
else:
data_value = data_value + 1
return self.valobj.CreateValueFromExpression(
"content", "(char*)(" + str(data_value) + ")")
"content", "(char*)(" + str(data_value) + ")"
)
def handle_UTF8_inline(self):
logger = lldb.formatters.Logger.Logger()
offset = self.valobj.GetValueAsUnsigned(
0) + self.size_of_cfruntime_base()
offset = self.valobj.GetValueAsUnsigned(0) + self.size_of_cfruntime_base()
if not self.explicit:
offset = offset + 1
return self.valobj.CreateValueFromAddress(
"content", offset, self.valobj.GetType().GetBasicType(
lldb.eBasicTypeChar)).AddressOf()
"content", offset, self.valobj.GetType().GetBasicType(lldb.eBasicTypeChar)
).AddressOf()
def handle_UTF8_not_inline(self):
logger = lldb.formatters.Logger.Logger()
offset = self.size_of_cfruntime_base()
return self.valobj.CreateChildAtOffset(
"content", offset, self.valobj.GetType().GetBasicType(
lldb.eBasicTypeChar).GetPointerType())
"content",
offset,
self.valobj.GetType().GetBasicType(lldb.eBasicTypeChar).GetPointerType(),
)
def get_child_at_index(self, index):
logger = lldb.formatters.Logger.Logger()
logger >> "Querying for child [" + str(index) + "]"
if index == 0:
return self.valobj.CreateValueFromExpression(
"mutable", str(int(self.mutable)))
"mutable", str(int(self.mutable))
)
if index == 1:
return self.valobj.CreateValueFromExpression("inline",
str(int(self.inline)))
return self.valobj.CreateValueFromExpression(
"inline", str(int(self.inline))
)
if index == 2:
return self.valobj.CreateValueFromExpression(
"explicit", str(int(self.explicit)))
"explicit", str(int(self.explicit))
)
if index == 3:
return self.valobj.CreateValueFromExpression(
"unicode", str(int(self.unicode)))
"unicode", str(int(self.unicode))
)
if index == 4:
return self.valobj.CreateValueFromExpression(
"special", str(int(self.special)))
"special", str(int(self.special))
)
if index == 5:
# we are handling the several possible combinations of flags.
# for each known combination we have a function that knows how to
@@ -233,9 +250,13 @@ class CFStringSynthProvider:
# print 'special = ' + str(self.special)
if self.mutable:
return self.handle_mutable_string()
elif self.inline and self.explicit and \
self.unicode == False and self.special == False and \
self.mutable == False:
elif (
self.inline
and self.explicit
and self.unicode == False
and self.special == False
and self.mutable == False
):
return self.handle_inline_explicit()
elif self.unicode:
return self.handle_unicode_string()
@@ -287,8 +308,8 @@ class CFStringSynthProvider:
cfinfo = self.valobj.CreateChildAtOffset(
"cfinfo",
self.offset_of_info_bits(),
self.valobj.GetType().GetBasicType(
lldb.eBasicTypeChar))
self.valobj.GetType().GetBasicType(lldb.eBasicTypeChar),
)
cfinfo.SetFormat(11)
info = cfinfo.GetValue()
if info is not None:
@@ -333,8 +354,9 @@ class CFStringSynthProvider:
logger = lldb.formatters.Logger.Logger()
self.pointer_size = self.valobj.GetTarget().GetProcess().GetAddressByteSize()
self.is_64_bit = self.pointer_size == 8
self.is_little = self.valobj.GetTarget().GetProcess(
).GetByteOrder() == lldb.eByteOrderLittle
self.is_little = (
self.valobj.GetTarget().GetProcess().GetByteOrder() == lldb.eByteOrderLittle
)
# reading info bits out of the CFString and computing
# useful values to get at the real data

View File

@@ -12,11 +12,10 @@ import lldb.formatters.Logger
def Class_Summary(valobj, dict):
logger = lldb.formatters.Logger.Logger()
runtime = lldb.runtime.objc.objc_runtime.ObjCRuntime.runtime_from_isa(
valobj)
runtime = lldb.runtime.objc.objc_runtime.ObjCRuntime.runtime_from_isa(valobj)
if runtime is None or not runtime.is_valid():
return '<error: unknown Class>'
return "<error: unknown Class>"
class_data = runtime.read_class_data()
if class_data is None or not class_data.is_valid():
return '<error: unknown Class>'
return "<error: unknown Class>"
return class_data.class_name()

View File

@@ -4,7 +4,6 @@ import inspect
class NopLogger:
def __init__(self):
pass
@@ -19,7 +18,6 @@ class NopLogger:
class StdoutLogger:
def __init__(self):
pass
@@ -34,15 +32,14 @@ class StdoutLogger:
class FileLogger:
def __init__(self, name):
self.file = None
try:
name = os.path.abspath(name)
self.file = open(name, 'a')
self.file = open(name, "a")
except:
try:
self.file = open('formatters.log', 'a')
self.file = open("formatters.log", "a")
except:
pass
@@ -61,6 +58,7 @@ class FileLogger:
self.file.close()
self.file = None
# to enable logging:
# define lldb.formatters.Logger._lldb_formatters_debug_level to any number greater than 0
# if you define it to any value greater than 1, the log will be automatically flushed after each write (slower but should make sure most of the stuff makes it to the log even if we crash)
@@ -71,14 +69,13 @@ class FileLogger:
class Logger:
def __init__(self, autoflush=False, logcaller=False):
global _lldb_formatters_debug_level
global _lldb_formatters_debug_filename
self.autoflush = autoflush
want_log = False
try:
want_log = (_lldb_formatters_debug_level > 0)
want_log = _lldb_formatters_debug_level > 0
except:
pass
if not (want_log):
@@ -86,8 +83,11 @@ class Logger:
return
want_file = False
try:
want_file = (_lldb_formatters_debug_filename is not None and _lldb_formatters_debug_filename !=
'' and _lldb_formatters_debug_filename != 0)
want_file = (
_lldb_formatters_debug_filename is not None
and _lldb_formatters_debug_filename != ""
and _lldb_formatters_debug_filename != 0
)
except:
pass
if want_file:
@@ -95,12 +95,12 @@ class Logger:
else:
self.impl = StdoutLogger()
try:
self.autoflush = (_lldb_formatters_debug_level > 1)
self.autoflush = _lldb_formatters_debug_level > 1
except:
self.autoflush = autoflush
want_caller_info = False
try:
want_caller_info = (_lldb_formatters_debug_level > 2)
want_caller_info = _lldb_formatters_debug_level > 2
except:
pass
if want_caller_info:
@@ -110,10 +110,11 @@ class Logger:
caller = inspect.stack()[2]
try:
if caller is not None and len(caller) > 3:
self.write('Logging from function ' + str(caller))
self.write("Logging from function " + str(caller))
else:
self.write(
'Caller info not available - Required caller logging not possible')
"Caller info not available - Required caller logging not possible"
)
finally:
del caller # needed per Python docs to avoid keeping objects alive longer than we care

View File

@@ -15,10 +15,10 @@ import NSURL
import lldb.formatters.Logger
statistics = lldb.formatters.metrics.Metrics()
statistics.add_metric('invalid_isa')
statistics.add_metric('invalid_pointer')
statistics.add_metric('unknown_class')
statistics.add_metric('code_notrun')
statistics.add_metric("invalid_isa")
statistics.add_metric("invalid_pointer")
statistics.add_metric("unknown_class")
statistics.add_metric("code_notrun")
# despite the similary to synthetic children providers, these classes are not
# trying to provide anything but a summary for an NSURL, so they need not
@@ -26,7 +26,6 @@ statistics.add_metric('code_notrun')
class NSBundleKnown_SummaryProvider:
def adjust_for_architecture(self):
pass
@@ -34,9 +33,10 @@ class NSBundleKnown_SummaryProvider:
logger = lldb.formatters.Logger.Logger()
self.valobj = valobj
self.sys_params = params
if not(self.sys_params.types_cache.NSString):
self.sys_params.types_cache.NSString = self.valobj.GetTarget(
).FindFirstType('NSString').GetPointerType()
if not (self.sys_params.types_cache.NSString):
self.sys_params.types_cache.NSString = (
self.valobj.GetTarget().FindFirstType("NSString").GetPointerType()
)
self.update()
def update(self):
@@ -54,21 +54,23 @@ class NSBundleKnown_SummaryProvider:
logger = lldb.formatters.Logger.Logger()
global statistics
text = self.valobj.CreateChildAtOffset(
"text", self.offset(), self.sys_params.types_cache.NSString)
"text", self.offset(), self.sys_params.types_cache.NSString
)
my_string = text.GetSummary()
if (my_string is None) or (my_string == ''):
if (my_string is None) or (my_string == ""):
statistics.metric_hit(
'unknown_class', str(
self.valobj.GetName()) + " triggered unknown pointer location")
"unknown_class",
str(self.valobj.GetName()) + " triggered unknown pointer location",
)
return NSBundleUnknown_SummaryProvider(
self.valobj, self.sys_params).url_text()
self.valobj, self.sys_params
).url_text()
else:
statistics.metric_hit('code_notrun', self.valobj)
statistics.metric_hit("code_notrun", self.valobj)
return my_string
class NSBundleUnknown_SummaryProvider:
def adjust_for_architecture(self):
pass
@@ -90,34 +92,35 @@ class NSBundleUnknown_SummaryProvider:
url_text_vo = self.valobj.CreateValueFromExpression("path", expr)
if url_text_vo.IsValid():
return url_text_vo.GetSummary()
return '<variable is not NSBundle>'
return "<variable is not NSBundle>"
def GetSummary_Impl(valobj):
logger = lldb.formatters.Logger.Logger()
global statistics
class_data, wrapper = lldb.runtime.objc.objc_runtime.Utilities.prepare_class_detection(
valobj, statistics)
(
class_data,
wrapper,
) = lldb.runtime.objc.objc_runtime.Utilities.prepare_class_detection(
valobj, statistics
)
if wrapper:
return wrapper
name_string = class_data.class_name()
logger >> "class name is: " + str(name_string)
if name_string == 'NSBundle':
if name_string == "NSBundle":
wrapper = NSBundleKnown_SummaryProvider(valobj, class_data.sys_params)
# [NSBundle mainBundle] does return an object that is
# not correctly filled out for our purposes, so we still
# end up having to run code in that case
# statistics.metric_hit('code_notrun',valobj)
else:
wrapper = NSBundleUnknown_SummaryProvider(
valobj, class_data.sys_params)
wrapper = NSBundleUnknown_SummaryProvider(valobj, class_data.sys_params)
statistics.metric_hit(
'unknown_class',
valobj.GetName() +
" seen as " +
name_string)
"unknown_class", valobj.GetName() + " seen as " + name_string
)
return wrapper
@@ -126,20 +129,21 @@ def NSBundle_SummaryProvider(valobj, dict):
provider = GetSummary_Impl(valobj)
if provider is not None:
if isinstance(
provider,
lldb.runtime.objc.objc_runtime.SpecialSituation_Description):
provider, lldb.runtime.objc.objc_runtime.SpecialSituation_Description
):
return provider.message()
try:
summary = provider.url_text()
except:
summary = None
logger >> "got summary " + str(summary)
if summary is None or summary == '':
summary = '<variable is not NSBundle>'
if summary is None or summary == "":
summary = "<variable is not NSBundle>"
return summary
return 'Summary Unavailable'
return "Summary Unavailable"
def __lldb_init_module(debugger, dict):
debugger.HandleCommand(
"type summary add -F NSBundle.NSBundle_SummaryProvider NSBundle")
"type summary add -F NSBundle.NSBundle_SummaryProvider NSBundle"
)

View File

@@ -14,10 +14,10 @@ import lldb.formatters.metrics
import lldb.formatters.Logger
statistics = lldb.formatters.metrics.Metrics()
statistics.add_metric('invalid_isa')
statistics.add_metric('invalid_pointer')
statistics.add_metric('unknown_class')
statistics.add_metric('code_notrun')
statistics.add_metric("invalid_isa")
statistics.add_metric("invalid_pointer")
statistics.add_metric("unknown_class")
statistics.add_metric("code_notrun")
# despite the similary to synthetic children providers, these classes are not
# trying to provide anything but the length for an NSData, so they need not
@@ -25,7 +25,6 @@ statistics.add_metric('code_notrun')
class NSConcreteData_SummaryProvider:
def adjust_for_architecture(self):
pass
@@ -34,13 +33,15 @@ class NSConcreteData_SummaryProvider:
logger >> "NSConcreteData_SummaryProvider __init__"
self.valobj = valobj
self.sys_params = params
if not(self.sys_params.types_cache.NSUInteger):
if not (self.sys_params.types_cache.NSUInteger):
if self.sys_params.is_64_bit:
self.sys_params.types_cache.NSUInteger = self.valobj.GetType(
).GetBasicType(lldb.eBasicTypeUnsignedLong)
self.sys_params.types_cache.NSUInteger = (
self.valobj.GetType().GetBasicType(lldb.eBasicTypeUnsignedLong)
)
else:
self.sys_params.types_cache.NSUInteger = self.valobj.GetType(
).GetBasicType(lldb.eBasicTypeUnsignedInt)
self.sys_params.types_cache.NSUInteger = (
self.valobj.GetType().GetBasicType(lldb.eBasicTypeUnsignedInt)
)
self.update()
def update(self):
@@ -58,14 +59,14 @@ class NSConcreteData_SummaryProvider:
logger = lldb.formatters.Logger.Logger()
logger >> "NSConcreteData_SummaryProvider length"
size = self.valobj.CreateChildAtOffset(
"count", self.offset(), self.sys_params.types_cache.NSUInteger)
"count", self.offset(), self.sys_params.types_cache.NSUInteger
)
logger >> str(size)
logger >> str(size.GetValueAsUnsigned(0))
return size.GetValueAsUnsigned(0)
class NSDataUnknown_SummaryProvider:
def adjust_for_architecture(self):
pass
@@ -86,40 +87,46 @@ class NSDataUnknown_SummaryProvider:
self.valobj.GetExpressionPath(stream)
logger >> stream.GetData()
num_children_vo = self.valobj.CreateValueFromExpression(
"count", "(int)[" + stream.GetData() + " length]")
"count", "(int)[" + stream.GetData() + " length]"
)
logger >> "still in after expression: " + str(num_children_vo)
if num_children_vo.IsValid():
logger >> "wow - expr output is valid: " + \
str(num_children_vo.GetValueAsUnsigned())
logger >> "wow - expr output is valid: " + str(
num_children_vo.GetValueAsUnsigned()
)
return num_children_vo.GetValueAsUnsigned(0)
logger >> "invalid expr output - too bad"
return '<variable is not NSData>'
return "<variable is not NSData>"
def GetSummary_Impl(valobj):
global statistics
logger = lldb.formatters.Logger.Logger()
logger >> "NSData GetSummary_Impl"
class_data, wrapper = lldb.runtime.objc.objc_runtime.Utilities.prepare_class_detection(
valobj, statistics)
(
class_data,
wrapper,
) = lldb.runtime.objc.objc_runtime.Utilities.prepare_class_detection(
valobj, statistics
)
if wrapper:
logger >> "got a wrapper summary - using it"
return wrapper
name_string = class_data.class_name()
logger >> "class name: " + name_string
if name_string == 'NSConcreteData' or \
name_string == 'NSConcreteMutableData' or \
name_string == '__NSCFData':
if (
name_string == "NSConcreteData"
or name_string == "NSConcreteMutableData"
or name_string == "__NSCFData"
):
wrapper = NSConcreteData_SummaryProvider(valobj, class_data.sys_params)
statistics.metric_hit('code_notrun', valobj)
statistics.metric_hit("code_notrun", valobj)
else:
wrapper = NSDataUnknown_SummaryProvider(valobj, class_data.sys_params)
statistics.metric_hit(
'unknown_class',
valobj.GetName() +
" seen as " +
name_string)
"unknown_class", valobj.GetName() + " seen as " + name_string
)
return wrapper
@@ -135,16 +142,16 @@ def NSData_SummaryProvider(valobj, dict):
summary = None
logger >> "got a summary: it is " + str(summary)
if summary is None:
summary = '<variable is not NSData>'
summary = "<variable is not NSData>"
elif isinstance(summary, str):
pass
else:
if summary == 1:
summary = '1 byte'
summary = "1 byte"
else:
summary = str(summary) + ' bytes'
summary = str(summary) + " bytes"
return summary
return 'Summary Unavailable'
return "Summary Unavailable"
def NSData_SummaryProvider2(valobj, dict):
@@ -154,8 +161,8 @@ def NSData_SummaryProvider2(valobj, dict):
logger >> "found a summary provider, it is: " + str(provider)
if provider is not None:
if isinstance(
provider,
lldb.runtime.objc.objc_runtime.SpecialSituation_Description):
provider, lldb.runtime.objc.objc_runtime.SpecialSituation_Description
):
return provider.message()
try:
summary = provider.length()
@@ -163,7 +170,7 @@ def NSData_SummaryProvider2(valobj, dict):
summary = None
logger >> "got a summary: it is " + str(summary)
if summary is None:
summary = '<variable is not CFData>'
summary = "<variable is not CFData>"
elif isinstance(summary, str):
pass
else:
@@ -172,11 +179,11 @@ def NSData_SummaryProvider2(valobj, dict):
else:
summary = '@"' + str(summary) + ' bytes"'
return summary
return 'Summary Unavailable'
return "Summary Unavailable"
def __lldb_init_module(debugger, dict):
debugger.HandleCommand("type summary add -F NSData.NSData_SummaryProvider NSData")
debugger.HandleCommand(
"type summary add -F NSData.NSData_SummaryProvider NSData")
debugger.HandleCommand(
"type summary add -F NSData.NSData_SummaryProvider2 CFDataRef CFMutableDataRef")
"type summary add -F NSData.NSData_SummaryProvider2 CFDataRef CFMutableDataRef"
)

View File

@@ -18,10 +18,10 @@ import CFString
import lldb.formatters.Logger
statistics = lldb.formatters.metrics.Metrics()
statistics.add_metric('invalid_isa')
statistics.add_metric('invalid_pointer')
statistics.add_metric('unknown_class')
statistics.add_metric('code_notrun')
statistics.add_metric("invalid_isa")
statistics.add_metric("invalid_pointer")
statistics.add_metric("unknown_class")
statistics.add_metric("code_notrun")
# Python promises to start counting time at midnight on Jan 1st on the epoch year
# hence, all we need to know is the epoch year
@@ -34,6 +34,7 @@ def mkgmtime(t):
logger = lldb.formatters.Logger.Logger()
return time.mktime(t) - time.timezone
osx_epoch = mkgmtime(osx_epoch)
@@ -44,12 +45,14 @@ def osx_to_python_time(osx):
else:
return osx - osx_epoch
# represent a struct_time as a string in the format used by Xcode
def xcode_format_time(X):
logger = lldb.formatters.Logger.Logger()
return time.strftime('%Y-%m-%d %H:%M:%S %Z', X)
return time.strftime("%Y-%m-%d %H:%M:%S %Z", X)
# represent a count-since-epoch as a string in the format used by Xcode
@@ -58,13 +61,13 @@ def xcode_format_count(X):
logger = lldb.formatters.Logger.Logger()
return xcode_format_time(time.localtime(X))
# despite the similary to synthetic children providers, these classes are not
# trying to provide anything but the summary for NSDate, so they need not
# obey the interface specification for synthetic children providers
class NSTaggedDate_SummaryProvider:
def adjust_for_architecture(self):
pass
@@ -75,7 +78,7 @@ class NSTaggedDate_SummaryProvider:
self.update()
# NSDate is not using its info_bits for info like NSNumber is
# so we need to regroup info_bits and data
self.data = ((data << 8) | (info_bits << 4))
self.data = (data << 8) | (info_bits << 4)
def update(self):
logger = lldb.formatters.Logger.Logger()
@@ -87,14 +90,13 @@ class NSTaggedDate_SummaryProvider:
# unfortunately, it is made as a time-delta after Jan 1 2001 midnight GMT
# while all Python knows about is the "epoch", which is a platform-dependent
# year (1970 of *nix) whose Jan 1 at midnight is taken as reference
value_double = struct.unpack('d', struct.pack('Q', self.data))[0]
value_double = struct.unpack("d", struct.pack("Q", self.data))[0]
if value_double == -63114076800.0:
return '0001-12-30 00:00:00 +0000'
return "0001-12-30 00:00:00 +0000"
return xcode_format_count(osx_to_python_time(value_double))
class NSUntaggedDate_SummaryProvider:
def adjust_for_architecture(self):
pass
@@ -103,8 +105,9 @@ class NSUntaggedDate_SummaryProvider:
self.valobj = valobj
self.sys_params = params
if not (self.sys_params.types_cache.double):
self.sys_params.types_cache.double = self.valobj.GetType(
).GetBasicType(lldb.eBasicTypeDouble)
self.sys_params.types_cache.double = self.valobj.GetType().GetBasicType(
lldb.eBasicTypeDouble
)
self.update()
def update(self):
@@ -118,17 +121,17 @@ class NSUntaggedDate_SummaryProvider:
def value(self):
logger = lldb.formatters.Logger.Logger()
value = self.valobj.CreateChildAtOffset(
"value", self.offset(), self.sys_params.types_cache.double)
value_double = struct.unpack(
'd', struct.pack(
'Q', value.GetData().uint64[0]))[0]
"value", self.offset(), self.sys_params.types_cache.double
)
value_double = struct.unpack("d", struct.pack("Q", value.GetData().uint64[0]))[
0
]
if value_double == -63114076800.0:
return '0001-12-30 00:00:00 +0000'
return "0001-12-30 00:00:00 +0000"
return xcode_format_count(osx_to_python_time(value_double))
class NSCalendarDate_SummaryProvider:
def adjust_for_architecture(self):
pass
@@ -137,8 +140,9 @@ class NSCalendarDate_SummaryProvider:
self.valobj = valobj
self.sys_params = params
if not (self.sys_params.types_cache.double):
self.sys_params.types_cache.double = self.valobj.GetType(
).GetBasicType(lldb.eBasicTypeDouble)
self.sys_params.types_cache.double = self.valobj.GetType().GetBasicType(
lldb.eBasicTypeDouble
)
self.update()
def update(self):
@@ -152,15 +156,15 @@ class NSCalendarDate_SummaryProvider:
def value(self):
logger = lldb.formatters.Logger.Logger()
value = self.valobj.CreateChildAtOffset(
"value", self.offset(), self.sys_params.types_cache.double)
value_double = struct.unpack(
'd', struct.pack(
'Q', value.GetData().uint64[0]))[0]
"value", self.offset(), self.sys_params.types_cache.double
)
value_double = struct.unpack("d", struct.pack("Q", value.GetData().uint64[0]))[
0
]
return xcode_format_count(osx_to_python_time(value_double))
class NSTimeZoneClass_SummaryProvider:
def adjust_for_architecture(self):
pass
@@ -169,8 +173,9 @@ class NSTimeZoneClass_SummaryProvider:
self.valobj = valobj
self.sys_params = params
if not (self.sys_params.types_cache.voidptr):
self.sys_params.types_cache.voidptr = self.valobj.GetType(
).GetBasicType(lldb.eBasicTypeVoid).GetPointerType()
self.sys_params.types_cache.voidptr = (
self.valobj.GetType().GetBasicType(lldb.eBasicTypeVoid).GetPointerType()
)
self.update()
def update(self):
@@ -184,12 +189,12 @@ class NSTimeZoneClass_SummaryProvider:
def timezone(self):
logger = lldb.formatters.Logger.Logger()
tz_string = self.valobj.CreateChildAtOffset(
"tz_name", self.offset(), self.sys_params.types_cache.voidptr)
"tz_name", self.offset(), self.sys_params.types_cache.voidptr
)
return CFString.CFString_SummaryProvider(tz_string, None)
class NSUnknownDate_SummaryProvider:
def adjust_for_architecture(self):
pass
@@ -210,43 +215,51 @@ class NSUnknownDate_SummaryProvider:
num_children_vo = self.valobj.CreateValueFromExpression("str", expr)
if num_children_vo.IsValid():
return num_children_vo.GetSummary()
return '<variable is not NSDate>'
return "<variable is not NSDate>"
def GetSummary_Impl(valobj):
logger = lldb.formatters.Logger.Logger()
global statistics
class_data, wrapper = lldb.runtime.objc.objc_runtime.Utilities.prepare_class_detection(
valobj, statistics)
(
class_data,
wrapper,
) = lldb.runtime.objc.objc_runtime.Utilities.prepare_class_detection(
valobj, statistics
)
if wrapper:
return wrapper
name_string = class_data.class_name()
logger >> "class name is: " + str(name_string)
if name_string == 'NSDate' or name_string == '__NSDate' or name_string == '__NSTaggedDate':
if (
name_string == "NSDate"
or name_string == "__NSDate"
or name_string == "__NSTaggedDate"
):
if class_data.is_tagged():
wrapper = NSTaggedDate_SummaryProvider(
valobj, class_data.info_bits(), class_data.value(), class_data.sys_params)
statistics.metric_hit('code_notrun', valobj)
valobj,
class_data.info_bits(),
class_data.value(),
class_data.sys_params,
)
statistics.metric_hit("code_notrun", valobj)
else:
wrapper = NSUntaggedDate_SummaryProvider(
valobj, class_data.sys_params)
statistics.metric_hit('code_notrun', valobj)
elif name_string == 'NSCalendarDate':
wrapper = NSUntaggedDate_SummaryProvider(valobj, class_data.sys_params)
statistics.metric_hit("code_notrun", valobj)
elif name_string == "NSCalendarDate":
wrapper = NSCalendarDate_SummaryProvider(valobj, class_data.sys_params)
statistics.metric_hit('code_notrun', valobj)
elif name_string == '__NSTimeZone':
wrapper = NSTimeZoneClass_SummaryProvider(
valobj, class_data.sys_params)
statistics.metric_hit('code_notrun', valobj)
statistics.metric_hit("code_notrun", valobj)
elif name_string == "__NSTimeZone":
wrapper = NSTimeZoneClass_SummaryProvider(valobj, class_data.sys_params)
statistics.metric_hit("code_notrun", valobj)
else:
wrapper = NSUnknownDate_SummaryProvider(valobj)
statistics.metric_hit(
'unknown_class',
valobj.GetName() +
" seen as " +
name_string)
"unknown_class", valobj.GetName() + " seen as " + name_string
)
return wrapper
@@ -255,17 +268,17 @@ def NSDate_SummaryProvider(valobj, dict):
provider = GetSummary_Impl(valobj)
if provider is not None:
if isinstance(
provider,
lldb.runtime.objc.objc_runtime.SpecialSituation_Description):
provider, lldb.runtime.objc.objc_runtime.SpecialSituation_Description
):
return provider.message()
try:
summary = provider.value()
except:
summary = None
if summary is None:
summary = '<variable is not NSDate>'
summary = "<variable is not NSDate>"
return str(summary)
return 'Summary Unavailable'
return "Summary Unavailable"
def NSTimeZone_SummaryProvider(valobj, dict):
@@ -273,8 +286,8 @@ def NSTimeZone_SummaryProvider(valobj, dict):
provider = GetSummary_Impl(valobj)
if provider is not None:
if isinstance(
provider,
lldb.runtime.objc.objc_runtime.SpecialSituation_Description):
provider, lldb.runtime.objc.objc_runtime.SpecialSituation_Description
):
return provider.message()
try:
summary = provider.timezone()
@@ -282,26 +295,27 @@ def NSTimeZone_SummaryProvider(valobj, dict):
summary = None
logger >> "got summary " + str(summary)
if summary is None:
summary = '<variable is not NSTimeZone>'
summary = "<variable is not NSTimeZone>"
return str(summary)
return 'Summary Unavailable'
return "Summary Unavailable"
def CFAbsoluteTime_SummaryProvider(valobj, dict):
logger = lldb.formatters.Logger.Logger()
try:
value_double = struct.unpack(
'd', struct.pack(
'Q', valobj.GetData().uint64[0]))[0]
value_double = struct.unpack("d", struct.pack("Q", valobj.GetData().uint64[0]))[
0
]
return xcode_format_count(osx_to_python_time(value_double))
except:
return 'Summary Unavailable'
return "Summary Unavailable"
def __lldb_init_module(debugger, dict):
debugger.HandleCommand("type summary add -F NSDate.NSDate_SummaryProvider NSDate")
debugger.HandleCommand(
"type summary add -F NSDate.NSDate_SummaryProvider NSDate")
"type summary add -F NSDate.CFAbsoluteTime_SummaryProvider CFAbsoluteTime"
)
debugger.HandleCommand(
"type summary add -F NSDate.CFAbsoluteTime_SummaryProvider CFAbsoluteTime")
debugger.HandleCommand(
"type summary add -F NSDate.NSTimeZone_SummaryProvider NSTimeZone CFTimeZoneRef")
"type summary add -F NSDate.NSTimeZone_SummaryProvider NSTimeZone CFTimeZoneRef"
)

View File

@@ -13,14 +13,13 @@ import lldb
import lldb.formatters.Logger
statistics = lldb.formatters.metrics.Metrics()
statistics.add_metric('invalid_isa')
statistics.add_metric('invalid_pointer')
statistics.add_metric('unknown_class')
statistics.add_metric('code_notrun')
statistics.add_metric("invalid_isa")
statistics.add_metric("invalid_pointer")
statistics.add_metric("unknown_class")
statistics.add_metric("code_notrun")
class NSKnownException_SummaryProvider:
def adjust_for_architecture(self):
pass
@@ -29,8 +28,9 @@ class NSKnownException_SummaryProvider:
self.valobj = valobj
self.sys_params = params
if not (self.sys_params.types_cache.id):
self.sys_params.types_cache.id = self.valobj.GetType(
).GetBasicType(lldb.eBasicTypeObjCID)
self.sys_params.types_cache.id = self.valobj.GetType().GetBasicType(
lldb.eBasicTypeObjCID
)
self.update()
def update(self):
@@ -48,15 +48,20 @@ class NSKnownException_SummaryProvider:
def description(self):
logger = lldb.formatters.Logger.Logger()
name_ptr = self.valobj.CreateChildAtOffset(
"name", self.offset_name(), self.sys_params.types_cache.id)
"name", self.offset_name(), self.sys_params.types_cache.id
)
reason_ptr = self.valobj.CreateChildAtOffset(
"reason", self.offset_reason(), self.sys_params.types_cache.id)
return 'name:' + CFString.CFString_SummaryProvider(
name_ptr, None) + ' reason:' + CFString.CFString_SummaryProvider(reason_ptr, None)
"reason", self.offset_reason(), self.sys_params.types_cache.id
)
return (
"name:"
+ CFString.CFString_SummaryProvider(name_ptr, None)
+ " reason:"
+ CFString.CFString_SummaryProvider(reason_ptr, None)
)
class NSUnknownException_SummaryProvider:
def adjust_for_architecture(self):
pass
@@ -75,38 +80,43 @@ class NSUnknownException_SummaryProvider:
stream = lldb.SBStream()
self.valobj.GetExpressionPath(stream)
name_vo = self.valobj.CreateValueFromExpression(
"name", "(NSString*)[" + stream.GetData() + " name]")
"name", "(NSString*)[" + stream.GetData() + " name]"
)
reason_vo = self.valobj.CreateValueFromExpression(
"reason", "(NSString*)[" + stream.GetData() + " reason]")
"reason", "(NSString*)[" + stream.GetData() + " reason]"
)
if name_vo.IsValid() and reason_vo.IsValid():
return CFString.CFString_SummaryProvider(
name_vo, None) + ' ' + CFString.CFString_SummaryProvider(reason_vo, None)
return '<variable is not NSException>'
return (
CFString.CFString_SummaryProvider(name_vo, None)
+ " "
+ CFString.CFString_SummaryProvider(reason_vo, None)
)
return "<variable is not NSException>"
def GetSummary_Impl(valobj):
logger = lldb.formatters.Logger.Logger()
global statistics
class_data, wrapper = lldb.runtime.objc.objc_runtime.Utilities.prepare_class_detection(
valobj, statistics)
(
class_data,
wrapper,
) = lldb.runtime.objc.objc_runtime.Utilities.prepare_class_detection(
valobj, statistics
)
if wrapper:
return wrapper
name_string = class_data.class_name()
logger >> "class name is: " + str(name_string)
if name_string == 'NSException':
wrapper = NSKnownException_SummaryProvider(
valobj, class_data.sys_params)
statistics.metric_hit('code_notrun', valobj)
if name_string == "NSException":
wrapper = NSKnownException_SummaryProvider(valobj, class_data.sys_params)
statistics.metric_hit("code_notrun", valobj)
else:
wrapper = NSUnknownException_SummaryProvider(
valobj, class_data.sys_params)
wrapper = NSUnknownException_SummaryProvider(valobj, class_data.sys_params)
statistics.metric_hit(
'unknown_class',
valobj.GetName() +
" seen as " +
name_string)
"unknown_class", valobj.GetName() + " seen as " + name_string
)
return wrapper
@@ -115,8 +125,8 @@ def NSException_SummaryProvider(valobj, dict):
provider = GetSummary_Impl(valobj)
if provider is not None:
if isinstance(
provider,
lldb.runtime.objc.objc_runtime.SpecialSituation_Description):
provider, lldb.runtime.objc.objc_runtime.SpecialSituation_Description
):
return provider.message()
try:
summary = provider.description()
@@ -124,11 +134,12 @@ def NSException_SummaryProvider(valobj, dict):
summary = None
logger >> "got summary " + str(summary)
if summary is None:
summary = '<variable is not NSException>'
summary = "<variable is not NSException>"
return str(summary)
return 'Summary Unavailable'
return "Summary Unavailable"
def __lldb_init_module(debugger, dict):
debugger.HandleCommand(
"type summary add -F NSException.NSException_SummaryProvider NSException")
"type summary add -F NSException.NSException_SummaryProvider NSException"
)

View File

@@ -14,10 +14,10 @@ import lldb.formatters.metrics
import lldb.formatters.Logger
statistics = lldb.formatters.metrics.Metrics()
statistics.add_metric('invalid_isa')
statistics.add_metric('invalid_pointer')
statistics.add_metric('unknown_class')
statistics.add_metric('code_notrun')
statistics.add_metric("invalid_isa")
statistics.add_metric("invalid_pointer")
statistics.add_metric("unknown_class")
statistics.add_metric("code_notrun")
# despite the similary to synthetic children providers, these classes are not
# trying to provide anything but the count of values for an NSIndexSet, so they need not
@@ -25,7 +25,6 @@ statistics.add_metric('code_notrun')
class NSIndexSetClass_SummaryProvider:
def adjust_for_architecture(self):
pass
@@ -33,20 +32,25 @@ class NSIndexSetClass_SummaryProvider:
logger = lldb.formatters.Logger.Logger()
self.valobj = valobj
self.sys_params = params
if not(self.sys_params.types_cache.NSUInteger):
if not (self.sys_params.types_cache.NSUInteger):
if self.sys_params.is_64_bit:
self.sys_params.types_cache.NSUInteger = self.valobj.GetType(
).GetBasicType(lldb.eBasicTypeUnsignedLong)
self.sys_params.types_cache.uint32 = self.valobj.GetType(
).GetBasicType(lldb.eBasicTypeUnsignedInt)
self.sys_params.types_cache.NSUInteger = (
self.valobj.GetType().GetBasicType(lldb.eBasicTypeUnsignedLong)
)
self.sys_params.types_cache.uint32 = self.valobj.GetType().GetBasicType(
lldb.eBasicTypeUnsignedInt
)
else:
self.sys_params.types_cache.NSUInteger = self.valobj.GetType(
).GetBasicType(lldb.eBasicTypeUnsignedInt)
self.sys_params.types_cache.uint32 = self.valobj.GetType(
).GetBasicType(lldb.eBasicTypeUnsignedInt)
if not(self.sys_params.types_cache.uint32):
self.sys_params.types_cache.uint32 = self.valobj.GetType(
).GetBasicType(lldb.eBasicTypeUnsignedInt)
self.sys_params.types_cache.NSUInteger = (
self.valobj.GetType().GetBasicType(lldb.eBasicTypeUnsignedInt)
)
self.sys_params.types_cache.uint32 = self.valobj.GetType().GetBasicType(
lldb.eBasicTypeUnsignedInt
)
if not (self.sys_params.types_cache.uint32):
self.sys_params.types_cache.uint32 = self.valobj.GetType().GetBasicType(
lldb.eBasicTypeUnsignedInt
)
self.update()
def update(self):
@@ -64,7 +68,8 @@ class NSIndexSetClass_SummaryProvider:
mode_chooser_vo = self.valobj.CreateChildAtOffset(
"mode_chooser",
self.sys_params.pointer_size,
self.sys_params.types_cache.uint32)
self.sys_params.types_cache.uint32,
)
mode_chooser = mode_chooser_vo.GetValueAsUnsigned(0)
if self.sys_params.is_64_bit:
mode_chooser = mode_chooser & 0x00000000FFFFFFFF
@@ -81,23 +86,23 @@ class NSIndexSetClass_SummaryProvider:
count_vo = self.valobj.CreateChildAtOffset(
"count",
3 * self.sys_params.pointer_size,
self.sys_params.types_cache.NSUInteger)
self.sys_params.types_cache.NSUInteger,
)
else:
count_ptr = self.valobj.CreateChildAtOffset(
"count_ptr",
2 * self.sys_params.pointer_size,
self.sys_params.types_cache.NSUInteger)
self.sys_params.types_cache.NSUInteger,
)
count_vo = self.valobj.CreateValueFromAddress(
"count",
count_ptr.GetValueAsUnsigned() +
2 *
self.sys_params.pointer_size,
self.sys_params.types_cache.NSUInteger)
count_ptr.GetValueAsUnsigned() + 2 * self.sys_params.pointer_size,
self.sys_params.types_cache.NSUInteger,
)
return count_vo.GetValueAsUnsigned(0)
class NSIndexSetUnknown_SummaryProvider:
def adjust_for_architecture(self):
pass
@@ -119,32 +124,32 @@ class NSIndexSetUnknown_SummaryProvider:
num_children_vo = self.valobj.CreateValueFromExpression("count", expr)
if num_children_vo.IsValid():
return num_children_vo.GetValueAsUnsigned(0)
return '<variable is not NSIndexSet>'
return "<variable is not NSIndexSet>"
def GetSummary_Impl(valobj):
logger = lldb.formatters.Logger.Logger()
global statistics
class_data, wrapper = lldb.runtime.objc.objc_runtime.Utilities.prepare_class_detection(
valobj, statistics)
(
class_data,
wrapper,
) = lldb.runtime.objc.objc_runtime.Utilities.prepare_class_detection(
valobj, statistics
)
if wrapper:
return wrapper
name_string = class_data.class_name()
logger >> "class name is: " + str(name_string)
if name_string == 'NSIndexSet' or name_string == 'NSMutableIndexSet':
wrapper = NSIndexSetClass_SummaryProvider(
valobj, class_data.sys_params)
statistics.metric_hit('code_notrun', valobj)
if name_string == "NSIndexSet" or name_string == "NSMutableIndexSet":
wrapper = NSIndexSetClass_SummaryProvider(valobj, class_data.sys_params)
statistics.metric_hit("code_notrun", valobj)
else:
wrapper = NSIndexSetUnknown_SummaryProvider(
valobj, class_data.sys_params)
wrapper = NSIndexSetUnknown_SummaryProvider(valobj, class_data.sys_params)
statistics.metric_hit(
'unknown_class',
valobj.GetName() +
" seen as " +
name_string)
"unknown_class", valobj.GetName() + " seen as " + name_string
)
return wrapper
@@ -153,8 +158,8 @@ def NSIndexSet_SummaryProvider(valobj, dict):
provider = GetSummary_Impl(valobj)
if provider is not None:
if isinstance(
provider,
lldb.runtime.objc.objc_runtime.SpecialSituation_Description):
provider, lldb.runtime.objc.objc_runtime.SpecialSituation_Description
):
return provider.message()
try:
summary = provider.count()
@@ -162,15 +167,16 @@ def NSIndexSet_SummaryProvider(valobj, dict):
summary = None
logger >> "got summary " + str(summary)
if summary is None:
summary = '<variable is not NSIndexSet>'
summary = "<variable is not NSIndexSet>"
if isinstance(summary, str):
return summary
else:
summary = str(summary) + (' indexes' if summary != 1 else ' index')
summary = str(summary) + (" indexes" if summary != 1 else " index")
return summary
return 'Summary Unavailable'
return "Summary Unavailable"
def __lldb_init_module(debugger, dict):
debugger.HandleCommand(
"type summary add -F NSIndexSet.NSIndexSet_SummaryProvider NSIndexSet NSMutableIndexSet")
"type summary add -F NSIndexSet.NSIndexSet_SummaryProvider NSIndexSet NSMutableIndexSet"
)

View File

@@ -14,10 +14,10 @@ import lldb.formatters.metrics
import lldb.formatters.Logger
statistics = lldb.formatters.metrics.Metrics()
statistics.add_metric('invalid_isa')
statistics.add_metric('invalid_pointer')
statistics.add_metric('unknown_class')
statistics.add_metric('code_notrun')
statistics.add_metric("invalid_isa")
statistics.add_metric("invalid_pointer")
statistics.add_metric("unknown_class")
statistics.add_metric("code_notrun")
# despite the similary to synthetic children providers, these classes are not
# trying to provide anything but the port number of an NSMachPort, so they need not
@@ -25,7 +25,6 @@ statistics.add_metric('code_notrun')
class NSMachPortKnown_SummaryProvider:
def adjust_for_architecture(self):
pass
@@ -33,13 +32,15 @@ class NSMachPortKnown_SummaryProvider:
logger = lldb.formatters.Logger.Logger()
self.valobj = valobj
self.sys_params = params
if not(self.sys_params.types_cache.NSUInteger):
if not (self.sys_params.types_cache.NSUInteger):
if self.sys_params.is_64_bit:
self.sys_params.types_cache.NSUInteger = self.valobj.GetType(
).GetBasicType(lldb.eBasicTypeUnsignedLong)
self.sys_params.types_cache.NSUInteger = (
self.valobj.GetType().GetBasicType(lldb.eBasicTypeUnsignedLong)
)
else:
self.sys_params.types_cache.NSUInteger = self.valobj.GetType(
).GetBasicType(lldb.eBasicTypeUnsignedInt)
self.sys_params.types_cache.NSUInteger = (
self.valobj.GetType().GetBasicType(lldb.eBasicTypeUnsignedInt)
)
self.update()
def update(self):
@@ -59,12 +60,12 @@ class NSMachPortKnown_SummaryProvider:
def port(self):
logger = lldb.formatters.Logger.Logger()
vport = self.valobj.CreateChildAtOffset(
"port", self.offset(), self.sys_params.types_cache.NSUInteger)
"port", self.offset(), self.sys_params.types_cache.NSUInteger
)
return vport.GetValueAsUnsigned(0)
class NSMachPortUnknown_SummaryProvider:
def adjust_for_architecture(self):
pass
@@ -83,35 +84,36 @@ class NSMachPortUnknown_SummaryProvider:
stream = lldb.SBStream()
self.valobj.GetExpressionPath(stream)
num_children_vo = self.valobj.CreateValueFromExpression(
"port", "(int)[" + stream.GetData() + " machPort]")
"port", "(int)[" + stream.GetData() + " machPort]"
)
if num_children_vo.IsValid():
return num_children_vo.GetValueAsUnsigned(0)
return '<variable is not NSMachPort>'
return "<variable is not NSMachPort>"
def GetSummary_Impl(valobj):
logger = lldb.formatters.Logger.Logger()
global statistics
class_data, wrapper = lldb.runtime.objc.objc_runtime.Utilities.prepare_class_detection(
valobj, statistics)
(
class_data,
wrapper,
) = lldb.runtime.objc.objc_runtime.Utilities.prepare_class_detection(
valobj, statistics
)
if wrapper:
return wrapper
name_string = class_data.class_name()
logger >> "class name is: " + str(name_string)
if name_string == 'NSMachPort':
wrapper = NSMachPortKnown_SummaryProvider(
valobj, class_data.sys_params)
statistics.metric_hit('code_notrun', valobj)
if name_string == "NSMachPort":
wrapper = NSMachPortKnown_SummaryProvider(valobj, class_data.sys_params)
statistics.metric_hit("code_notrun", valobj)
else:
wrapper = NSMachPortUnknown_SummaryProvider(
valobj, class_data.sys_params)
wrapper = NSMachPortUnknown_SummaryProvider(valobj, class_data.sys_params)
statistics.metric_hit(
'unknown_class',
valobj.GetName() +
" seen as " +
name_string)
"unknown_class", valobj.GetName() + " seen as " + name_string
)
return wrapper
@@ -120,8 +122,8 @@ def NSMachPort_SummaryProvider(valobj, dict):
provider = GetSummary_Impl(valobj)
if provider is not None:
if isinstance(
provider,
lldb.runtime.objc.objc_runtime.SpecialSituation_Description):
provider, lldb.runtime.objc.objc_runtime.SpecialSituation_Description
):
return provider.message()
try:
summary = provider.port()
@@ -129,13 +131,14 @@ def NSMachPort_SummaryProvider(valobj, dict):
summary = None
logger >> "got summary " + str(summary)
if summary is None:
summary = '<variable is not NSMachPort>'
summary = "<variable is not NSMachPort>"
if isinstance(summary, str):
return summay
return 'mach port: ' + str(summary)
return 'Summary Unavailable'
return "mach port: " + str(summary)
return "Summary Unavailable"
def __lldb_init_module(debugger, dict):
debugger.HandleCommand(
"type summary add -F NSMachPort.NSMachPort_SummaryProvider NSMachPort")
"type summary add -F NSMachPort.NSMachPort_SummaryProvider NSMachPort"
)

View File

@@ -14,14 +14,13 @@ import lldb
import lldb.formatters.Logger
statistics = lldb.formatters.metrics.Metrics()
statistics.add_metric('invalid_isa')
statistics.add_metric('invalid_pointer')
statistics.add_metric('unknown_class')
statistics.add_metric('code_notrun')
statistics.add_metric("invalid_isa")
statistics.add_metric("invalid_pointer")
statistics.add_metric("unknown_class")
statistics.add_metric("code_notrun")
class NSConcreteNotification_SummaryProvider:
def adjust_for_architecture(self):
pass
@@ -30,8 +29,9 @@ class NSConcreteNotification_SummaryProvider:
self.valobj = valobj
self.sys_params = params
if not (self.sys_params.types_cache.id):
self.sys_params.types_cache.id = self.valobj.GetType(
).GetBasicType(lldb.eBasicTypeObjCID)
self.sys_params.types_cache.id = self.valobj.GetType().GetBasicType(
lldb.eBasicTypeObjCID
)
self.update()
def update(self):
@@ -46,12 +46,12 @@ class NSConcreteNotification_SummaryProvider:
def name(self):
logger = lldb.formatters.Logger.Logger()
string_ptr = self.valobj.CreateChildAtOffset(
"name", self.offset(), self.sys_params.types_cache.id)
"name", self.offset(), self.sys_params.types_cache.id
)
return CFString.CFString_SummaryProvider(string_ptr, None)
class NSNotificationUnknown_SummaryProvider:
def adjust_for_architecture(self):
pass
@@ -70,35 +70,36 @@ class NSNotificationUnknown_SummaryProvider:
stream = lldb.SBStream()
self.valobj.GetExpressionPath(stream)
name_vo = self.valobj.CreateValueFromExpression(
"name", "(NSString*)[" + stream.GetData() + " name]")
"name", "(NSString*)[" + stream.GetData() + " name]"
)
if name_vo.IsValid():
return CFString.CFString_SummaryProvider(name_vo, None)
return '<variable is not NSNotification>'
return "<variable is not NSNotification>"
def GetSummary_Impl(valobj):
logger = lldb.formatters.Logger.Logger()
global statistics
class_data, wrapper = lldb.runtime.objc.objc_runtime.Utilities.prepare_class_detection(
valobj, statistics)
(
class_data,
wrapper,
) = lldb.runtime.objc.objc_runtime.Utilities.prepare_class_detection(
valobj, statistics
)
if wrapper:
return wrapper
name_string = class_data.class_name()
logger >> "class name is: " + str(name_string)
if name_string == 'NSConcreteNotification':
wrapper = NSConcreteNotification_SummaryProvider(
valobj, class_data.sys_params)
statistics.metric_hit('code_notrun', valobj)
if name_string == "NSConcreteNotification":
wrapper = NSConcreteNotification_SummaryProvider(valobj, class_data.sys_params)
statistics.metric_hit("code_notrun", valobj)
else:
wrapper = NSNotificationUnknown_SummaryProvider(
valobj, class_data.sys_params)
wrapper = NSNotificationUnknown_SummaryProvider(valobj, class_data.sys_params)
statistics.metric_hit(
'unknown_class',
valobj.GetName() +
" seen as " +
name_string)
"unknown_class", valobj.GetName() + " seen as " + name_string
)
return wrapper
@@ -107,8 +108,8 @@ def NSNotification_SummaryProvider(valobj, dict):
provider = GetSummary_Impl(valobj)
if provider is not None:
if isinstance(
provider,
lldb.runtime.objc.objc_runtime.SpecialSituation_Description):
provider, lldb.runtime.objc.objc_runtime.SpecialSituation_Description
):
return provider.message()
try:
summary = provider.name()
@@ -116,11 +117,12 @@ def NSNotification_SummaryProvider(valobj, dict):
summary = None
logger >> "got summary " + str(summary)
if summary is None:
summary = '<variable is not NSNotification>'
summary = "<variable is not NSNotification>"
return str(summary)
return 'Summary Unavailable'
return "Summary Unavailable"
def __lldb_init_module(debugger, dict):
debugger.HandleCommand(
"type summary add -F NSNotification.NSNotification_SummaryProvider NSNotification")
"type summary add -F NSNotification.NSNotification_SummaryProvider NSNotification"
)

View File

@@ -16,10 +16,10 @@ import struct
import lldb.formatters.Logger
statistics = lldb.formatters.metrics.Metrics()
statistics.add_metric('invalid_isa')
statistics.add_metric('invalid_pointer')
statistics.add_metric('unknown_class')
statistics.add_metric('code_notrun')
statistics.add_metric("invalid_isa")
statistics.add_metric("invalid_pointer")
statistics.add_metric("unknown_class")
statistics.add_metric("code_notrun")
# despite the similary to synthetic children providers, these classes are not
# trying to provide anything but the port number of an NSNumber, so they need not
@@ -27,7 +27,6 @@ statistics.add_metric('code_notrun')
class NSTaggedNumber_SummaryProvider:
def adjust_for_architecture(self):
pass
@@ -50,23 +49,26 @@ class NSTaggedNumber_SummaryProvider:
# unfortunately, the original type information appears to be lost
# so we try to at least recover the proper magnitude of the data
if self.info_bits == 0:
return '(char)' + \
str(ord(ctypes.c_char(chr(self.data % 256)).value))
return "(char)" + str(ord(ctypes.c_char(chr(self.data % 256)).value))
if self.info_bits == 4:
return '(short)' + \
str(ctypes.c_short(self.data % (256 * 256)).value)
return "(short)" + str(ctypes.c_short(self.data % (256 * 256)).value)
if self.info_bits == 8:
return '(int)' + str(ctypes.c_int(self.data %
(256 * 256 * 256 * 256)).value)
return "(int)" + str(
ctypes.c_int(self.data % (256 * 256 * 256 * 256)).value
)
if self.info_bits == 12:
return '(long)' + str(ctypes.c_long(self.data).value)
return "(long)" + str(ctypes.c_long(self.data).value)
else:
return 'unexpected value:(info=' + str(self.info_bits) + \
", value = " + str(self.data) + ')'
return (
"unexpected value:(info="
+ str(self.info_bits)
+ ", value = "
+ str(self.data)
+ ")"
)
class NSUntaggedNumber_SummaryProvider:
def adjust_for_architecture(self):
pass
@@ -74,35 +76,46 @@ class NSUntaggedNumber_SummaryProvider:
logger = lldb.formatters.Logger.Logger()
self.valobj = valobj
self.sys_params = params
if not(self.sys_params.types_cache.char):
self.sys_params.types_cache.char = self.valobj.GetType(
).GetBasicType(lldb.eBasicTypeChar)
if not(self.sys_params.types_cache.short):
self.sys_params.types_cache.short = self.valobj.GetType(
).GetBasicType(lldb.eBasicTypeShort)
if not(self.sys_params.types_cache.ushort):
self.sys_params.types_cache.ushort = self.valobj.GetType(
).GetBasicType(lldb.eBasicTypeUnsignedShort)
if not(self.sys_params.types_cache.int):
self.sys_params.types_cache.int = self.valobj.GetType().GetBasicType(lldb.eBasicTypeInt)
if not(self.sys_params.types_cache.long):
self.sys_params.types_cache.long = self.valobj.GetType(
).GetBasicType(lldb.eBasicTypeLong)
if not(self.sys_params.types_cache.ulong):
self.sys_params.types_cache.ulong = self.valobj.GetType(
).GetBasicType(lldb.eBasicTypeUnsignedLong)
if not(self.sys_params.types_cache.longlong):
self.sys_params.types_cache.longlong = self.valobj.GetType(
).GetBasicType(lldb.eBasicTypeLongLong)
if not(self.sys_params.types_cache.ulonglong):
self.sys_params.types_cache.ulonglong = self.valobj.GetType(
).GetBasicType(lldb.eBasicTypeUnsignedLongLong)
if not(self.sys_params.types_cache.float):
self.sys_params.types_cache.float = self.valobj.GetType(
).GetBasicType(lldb.eBasicTypeFloat)
if not(self.sys_params.types_cache.double):
self.sys_params.types_cache.double = self.valobj.GetType(
).GetBasicType(lldb.eBasicTypeDouble)
if not (self.sys_params.types_cache.char):
self.sys_params.types_cache.char = self.valobj.GetType().GetBasicType(
lldb.eBasicTypeChar
)
if not (self.sys_params.types_cache.short):
self.sys_params.types_cache.short = self.valobj.GetType().GetBasicType(
lldb.eBasicTypeShort
)
if not (self.sys_params.types_cache.ushort):
self.sys_params.types_cache.ushort = self.valobj.GetType().GetBasicType(
lldb.eBasicTypeUnsignedShort
)
if not (self.sys_params.types_cache.int):
self.sys_params.types_cache.int = self.valobj.GetType().GetBasicType(
lldb.eBasicTypeInt
)
if not (self.sys_params.types_cache.long):
self.sys_params.types_cache.long = self.valobj.GetType().GetBasicType(
lldb.eBasicTypeLong
)
if not (self.sys_params.types_cache.ulong):
self.sys_params.types_cache.ulong = self.valobj.GetType().GetBasicType(
lldb.eBasicTypeUnsignedLong
)
if not (self.sys_params.types_cache.longlong):
self.sys_params.types_cache.longlong = self.valobj.GetType().GetBasicType(
lldb.eBasicTypeLongLong
)
if not (self.sys_params.types_cache.ulonglong):
self.sys_params.types_cache.ulonglong = self.valobj.GetType().GetBasicType(
lldb.eBasicTypeUnsignedLongLong
)
if not (self.sys_params.types_cache.float):
self.sys_params.types_cache.float = self.valobj.GetType().GetBasicType(
lldb.eBasicTypeFloat
)
if not (self.sys_params.types_cache.double):
self.sys_params.types_cache.double = self.valobj.GetType().GetBasicType(
lldb.eBasicTypeDouble
)
self.update()
def update(self):
@@ -117,73 +130,81 @@ class NSUntaggedNumber_SummaryProvider:
# if we are fetching an int64 value, one more pointer must be skipped
# to get at our data
data_type_vo = self.valobj.CreateChildAtOffset(
"dt", self.sys_params.pointer_size, self.sys_params.types_cache.char)
data_type = ((data_type_vo.GetValueAsUnsigned(0) % 256) & 0x1F)
"dt", self.sys_params.pointer_size, self.sys_params.types_cache.char
)
data_type = (data_type_vo.GetValueAsUnsigned(0) % 256) & 0x1F
data_offset = 2 * self.sys_params.pointer_size
if data_type == 0B00001:
if data_type == 0b00001:
data_vo = self.valobj.CreateChildAtOffset(
"data", data_offset, self.sys_params.types_cache.char)
statistics.metric_hit('code_notrun', self.valobj)
return '(char)' + \
str(ord(ctypes.c_char(chr(data_vo.GetValueAsUnsigned(0))).value))
elif data_type == 0B0010:
"data", data_offset, self.sys_params.types_cache.char
)
statistics.metric_hit("code_notrun", self.valobj)
return "(char)" + str(
ord(ctypes.c_char(chr(data_vo.GetValueAsUnsigned(0))).value)
)
elif data_type == 0b0010:
data_vo = self.valobj.CreateChildAtOffset(
"data", data_offset, self.sys_params.types_cache.short)
statistics.metric_hit('code_notrun', self.valobj)
return '(short)' + str(
ctypes.c_short(
data_vo.GetValueAsUnsigned(0) %
(256 * 256)).value)
"data", data_offset, self.sys_params.types_cache.short
)
statistics.metric_hit("code_notrun", self.valobj)
return "(short)" + str(
ctypes.c_short(data_vo.GetValueAsUnsigned(0) % (256 * 256)).value
)
# IF tagged pointers are possible on 32bit+v2 runtime
# (of which the only existing instance should be iOS)
# then values of this type might be tagged
elif data_type == 0B0011:
elif data_type == 0b0011:
data_vo = self.valobj.CreateChildAtOffset(
"data", data_offset, self.sys_params.types_cache.int)
statistics.metric_hit('code_notrun', self.valobj)
return '(int)' + str(ctypes.c_int(data_vo.GetValueAsUnsigned(0) %
(256 * 256 * 256 * 256)).value)
"data", data_offset, self.sys_params.types_cache.int
)
statistics.metric_hit("code_notrun", self.valobj)
return "(int)" + str(
ctypes.c_int(
data_vo.GetValueAsUnsigned(0) % (256 * 256 * 256 * 256)
).value
)
# apparently, on is_64_bit architectures, these are the only values that will ever
# be represented by a non tagged pointers
elif data_type == 0B10001:
elif data_type == 0b10001:
data_offset = data_offset + 8 # 8 is needed even if we are on 32bit
data_vo = self.valobj.CreateChildAtOffset(
"data", data_offset, self.sys_params.types_cache.longlong)
statistics.metric_hit('code_notrun', self.valobj)
return '(long)' + \
str(ctypes.c_long(data_vo.GetValueAsUnsigned(0)).value)
elif data_type == 0B0100:
"data", data_offset, self.sys_params.types_cache.longlong
)
statistics.metric_hit("code_notrun", self.valobj)
return "(long)" + str(ctypes.c_long(data_vo.GetValueAsUnsigned(0)).value)
elif data_type == 0b0100:
if self.sys_params.is_64_bit:
data_offset = data_offset + self.sys_params.pointer_size
data_vo = self.valobj.CreateChildAtOffset(
"data", data_offset, self.sys_params.types_cache.longlong)
statistics.metric_hit('code_notrun', self.valobj)
return '(long)' + \
str(ctypes.c_long(data_vo.GetValueAsUnsigned(0)).value)
elif data_type == 0B0101:
"data", data_offset, self.sys_params.types_cache.longlong
)
statistics.metric_hit("code_notrun", self.valobj)
return "(long)" + str(ctypes.c_long(data_vo.GetValueAsUnsigned(0)).value)
elif data_type == 0b0101:
data_vo = self.valobj.CreateChildAtOffset(
"data", data_offset, self.sys_params.types_cache.longlong)
data_plain = int(
str(data_vo.GetValueAsUnsigned(0) & 0x00000000FFFFFFFF))
packed = struct.pack('I', data_plain)
data_float = struct.unpack('f', packed)[0]
statistics.metric_hit('code_notrun', self.valobj)
return '(float)' + str(data_float)
elif data_type == 0B0110:
"data", data_offset, self.sys_params.types_cache.longlong
)
data_plain = int(str(data_vo.GetValueAsUnsigned(0) & 0x00000000FFFFFFFF))
packed = struct.pack("I", data_plain)
data_float = struct.unpack("f", packed)[0]
statistics.metric_hit("code_notrun", self.valobj)
return "(float)" + str(data_float)
elif data_type == 0b0110:
data_vo = self.valobj.CreateChildAtOffset(
"data", data_offset, self.sys_params.types_cache.longlong)
"data", data_offset, self.sys_params.types_cache.longlong
)
data_plain = data_vo.GetValueAsUnsigned(0)
data_double = struct.unpack('d', struct.pack('Q', data_plain))[0]
statistics.metric_hit('code_notrun', self.valobj)
return '(double)' + str(data_double)
data_double = struct.unpack("d", struct.pack("Q", data_plain))[0]
statistics.metric_hit("code_notrun", self.valobj)
return "(double)" + str(data_double)
statistics.metric_hit(
'unknown_class', str(
valobj.GetName()) + " had unknown data_type " + str(data_type))
return 'unexpected: dt = ' + str(data_type)
"unknown_class",
str(valobj.GetName()) + " had unknown data_type " + str(data_type),
)
return "unexpected: dt = " + str(data_type)
class NSUnknownNumber_SummaryProvider:
def adjust_for_architecture(self):
pass
@@ -205,38 +226,42 @@ class NSUnknownNumber_SummaryProvider:
num_children_vo = self.valobj.CreateValueFromExpression("str", expr)
if num_children_vo.IsValid():
return num_children_vo.GetSummary()
return '<variable is not NSNumber>'
return "<variable is not NSNumber>"
def GetSummary_Impl(valobj):
logger = lldb.formatters.Logger.Logger()
global statistics
class_data, wrapper = lldb.runtime.objc.objc_runtime.Utilities.prepare_class_detection(
valobj, statistics)
(
class_data,
wrapper,
) = lldb.runtime.objc.objc_runtime.Utilities.prepare_class_detection(
valobj, statistics
)
if wrapper:
return wrapper
name_string = class_data.class_name()
logger >> "class name is: " + str(name_string)
if name_string == 'NSNumber' or name_string == '__NSCFNumber':
if name_string == "NSNumber" or name_string == "__NSCFNumber":
if class_data.is_tagged():
wrapper = NSTaggedNumber_SummaryProvider(
valobj, class_data.info_bits(), class_data.value(), class_data.sys_params)
statistics.metric_hit('code_notrun', valobj)
valobj,
class_data.info_bits(),
class_data.value(),
class_data.sys_params,
)
statistics.metric_hit("code_notrun", valobj)
else:
# the wrapper might be unable to decipher what is into the NSNumber
# and then have to run code on it
wrapper = NSUntaggedNumber_SummaryProvider(
valobj, class_data.sys_params)
wrapper = NSUntaggedNumber_SummaryProvider(valobj, class_data.sys_params)
else:
wrapper = NSUnknownNumber_SummaryProvider(
valobj, class_data.sys_params)
wrapper = NSUnknownNumber_SummaryProvider(valobj, class_data.sys_params)
statistics.metric_hit(
'unknown_class',
valobj.GetName() +
" seen as " +
name_string)
"unknown_class", valobj.GetName() + " seen as " + name_string
)
return wrapper
@@ -245,26 +270,29 @@ def NSNumber_SummaryProvider(valobj, dict):
provider = GetSummary_Impl(valobj)
if provider is not None:
if isinstance(
provider,
lldb.runtime.objc.objc_runtime.SpecialSituation_Description):
provider, lldb.runtime.objc.objc_runtime.SpecialSituation_Description
):
return provider.message()
try:
summary = provider.value()
except Exception as foo:
print(foo)
# except:
# except:
summary = None
logger >> "got summary " + str(summary)
if summary is None:
summary = '<variable is not NSNumber>'
summary = "<variable is not NSNumber>"
return str(summary)
return 'Summary Unavailable'
return "Summary Unavailable"
def __lldb_init_module(debugger, dict):
debugger.HandleCommand(
"type summary add -F NSNumber.NSNumber_SummaryProvider NSNumber")
"type summary add -F NSNumber.NSNumber_SummaryProvider NSNumber"
)
debugger.HandleCommand(
"type summary add -F NSNumber.NSNumber_SummaryProvider __NSCFBoolean")
"type summary add -F NSNumber.NSNumber_SummaryProvider __NSCFBoolean"
)
debugger.HandleCommand(
"type summary add -F NSNumber.NSNumber_SummaryProvider __NSCFNumber")
"type summary add -F NSNumber.NSNumber_SummaryProvider __NSCFNumber"
)

View File

@@ -14,10 +14,10 @@ import CFBag
import lldb.formatters.Logger
statistics = lldb.formatters.metrics.Metrics()
statistics.add_metric('invalid_isa')
statistics.add_metric('invalid_pointer')
statistics.add_metric('unknown_class')
statistics.add_metric('code_notrun')
statistics.add_metric("invalid_isa")
statistics.add_metric("invalid_pointer")
statistics.add_metric("unknown_class")
statistics.add_metric("code_notrun")
# despite the similary to synthetic children providers, these classes are not
# trying to provide anything but the port number of an NSMachPort, so they need not
@@ -25,7 +25,6 @@ statistics.add_metric('code_notrun')
class NSCFSet_SummaryProvider:
def adjust_for_architecture(self):
pass
@@ -33,13 +32,15 @@ class NSCFSet_SummaryProvider:
logger = lldb.formatters.Logger.Logger()
self.valobj = valobj
self.sys_params = params
if not(self.sys_params.types_cache.NSUInteger):
if not (self.sys_params.types_cache.NSUInteger):
if self.sys_params.is_64_bit:
self.sys_params.types_cache.NSUInteger = self.valobj.GetType(
).GetBasicType(lldb.eBasicTypeUnsignedLong)
self.sys_params.types_cache.NSUInteger = (
self.valobj.GetType().GetBasicType(lldb.eBasicTypeUnsignedLong)
)
else:
self.sys_params.types_cache.NSUInteger = self.valobj.GetType(
).GetBasicType(lldb.eBasicTypeUnsignedInt)
self.sys_params.types_cache.NSUInteger = (
self.valobj.GetType().GetBasicType(lldb.eBasicTypeUnsignedInt)
)
self.update()
def update(self):
@@ -59,12 +60,12 @@ class NSCFSet_SummaryProvider:
def count(self):
logger = lldb.formatters.Logger.Logger()
vcount = self.valobj.CreateChildAtOffset(
"count", self.offset(), self.sys_params.types_cache.NSUInteger)
"count", self.offset(), self.sys_params.types_cache.NSUInteger
)
return vcount.GetValueAsUnsigned(0)
class NSSetUnknown_SummaryProvider:
def adjust_for_architecture(self):
pass
@@ -86,11 +87,10 @@ class NSSetUnknown_SummaryProvider:
num_children_vo = self.valobj.CreateValueFromExpression("count", expr)
if num_children_vo.IsValid():
return num_children_vo.GetValueAsUnsigned(0)
return '<variable is not NSSet>'
return "<variable is not NSSet>"
class NSSetI_SummaryProvider:
def adjust_for_architecture(self):
pass
@@ -98,13 +98,15 @@ class NSSetI_SummaryProvider:
logger = lldb.formatters.Logger.Logger()
self.valobj = valobj
self.sys_params = params
if not(self.sys_params.types_cache.NSUInteger):
if not (self.sys_params.types_cache.NSUInteger):
if self.sys_params.is_64_bit:
self.sys_params.types_cache.NSUInteger = self.valobj.GetType(
).GetBasicType(lldb.eBasicTypeUnsignedLong)
self.sys_params.types_cache.NSUInteger = (
self.valobj.GetType().GetBasicType(lldb.eBasicTypeUnsignedLong)
)
else:
self.sys_params.types_cache.NSUInteger = self.valobj.GetType(
).GetBasicType(lldb.eBasicTypeUnsignedInt)
self.sys_params.types_cache.NSUInteger = (
self.valobj.GetType().GetBasicType(lldb.eBasicTypeUnsignedInt)
)
self.update()
def update(self):
@@ -119,7 +121,8 @@ class NSSetI_SummaryProvider:
def count(self):
logger = lldb.formatters.Logger.Logger()
num_children_vo = self.valobj.CreateChildAtOffset(
"count", self.offset(), self.sys_params.types_cache.NSUInteger)
"count", self.offset(), self.sys_params.types_cache.NSUInteger
)
value = num_children_vo.GetValueAsUnsigned(0)
if value is not None:
# the MSB on immutable sets seems to be taken by some other data
@@ -134,7 +137,6 @@ class NSSetI_SummaryProvider:
class NSSetM_SummaryProvider:
def adjust_for_architecture(self):
pass
@@ -142,13 +144,15 @@ class NSSetM_SummaryProvider:
logger = lldb.formatters.Logger.Logger()
self.valobj = valobj
self.sys_params = params
if not(self.sys_params.types_cache.NSUInteger):
if not (self.sys_params.types_cache.NSUInteger):
if self.sys_params.is_64_bit:
self.sys_params.types_cache.NSUInteger = self.valobj.GetType(
).GetBasicType(lldb.eBasicTypeUnsignedLong)
self.sys_params.types_cache.NSUInteger = (
self.valobj.GetType().GetBasicType(lldb.eBasicTypeUnsignedLong)
)
else:
self.sys_params.types_cache.NSUInteger = self.valobj.GetType(
).GetBasicType(lldb.eBasicTypeUnsignedInt)
self.sys_params.types_cache.NSUInteger = (
self.valobj.GetType().GetBasicType(lldb.eBasicTypeUnsignedInt)
)
self.update()
def update(self):
@@ -163,12 +167,12 @@ class NSSetM_SummaryProvider:
def count(self):
logger = lldb.formatters.Logger.Logger()
num_children_vo = self.valobj.CreateChildAtOffset(
"count", self.offset(), self.sys_params.types_cache.NSUInteger)
"count", self.offset(), self.sys_params.types_cache.NSUInteger
)
return num_children_vo.GetValueAsUnsigned(0)
class NSCountedSet_SummaryProvider:
def adjust_for_architecture(self):
pass
@@ -177,8 +181,9 @@ class NSCountedSet_SummaryProvider:
self.valobj = valobj
self.sys_params = params
if not (self.sys_params.types_cache.voidptr):
self.sys_params.types_cache.voidptr = self.valobj.GetType(
).GetBasicType(lldb.eBasicTypeVoid).GetPointerType()
self.sys_params.types_cache.voidptr = (
self.valobj.GetType().GetBasicType(lldb.eBasicTypeVoid).GetPointerType()
)
self.update()
def update(self):
@@ -194,41 +199,43 @@ class NSCountedSet_SummaryProvider:
def count(self):
logger = lldb.formatters.Logger.Logger()
cfbag_vo = self.valobj.CreateChildAtOffset(
"bag_impl", self.offset(), self.sys_params.types_cache.voidptr)
return CFBag.CFBagRef_SummaryProvider(
cfbag_vo, self.sys_params).length()
"bag_impl", self.offset(), self.sys_params.types_cache.voidptr
)
return CFBag.CFBagRef_SummaryProvider(cfbag_vo, self.sys_params).length()
def GetSummary_Impl(valobj):
logger = lldb.formatters.Logger.Logger()
global statistics
class_data, wrapper = lldb.runtime.objc.objc_runtime.Utilities.prepare_class_detection(
valobj, statistics)
(
class_data,
wrapper,
) = lldb.runtime.objc.objc_runtime.Utilities.prepare_class_detection(
valobj, statistics
)
if wrapper:
return wrapper
name_string = class_data.class_name()
logger >> "class name is: " + str(name_string)
if name_string == '__NSCFSet':
if name_string == "__NSCFSet":
wrapper = NSCFSet_SummaryProvider(valobj, class_data.sys_params)
statistics.metric_hit('code_notrun', valobj)
elif name_string == '__NSSetI':
statistics.metric_hit("code_notrun", valobj)
elif name_string == "__NSSetI":
wrapper = NSSetI_SummaryProvider(valobj, class_data.sys_params)
statistics.metric_hit('code_notrun', valobj)
elif name_string == '__NSSetM':
statistics.metric_hit("code_notrun", valobj)
elif name_string == "__NSSetM":
wrapper = NSSetM_SummaryProvider(valobj, class_data.sys_params)
statistics.metric_hit('code_notrun', valobj)
elif name_string == 'NSCountedSet':
statistics.metric_hit("code_notrun", valobj)
elif name_string == "NSCountedSet":
wrapper = NSCountedSet_SummaryProvider(valobj, class_data.sys_params)
statistics.metric_hit('code_notrun', valobj)
statistics.metric_hit("code_notrun", valobj)
else:
wrapper = NSSetUnknown_SummaryProvider(valobj, class_data.sys_params)
statistics.metric_hit(
'unknown_class',
valobj.GetName() +
" seen as " +
name_string)
"unknown_class", valobj.GetName() + " seen as " + name_string
)
return wrapper
@@ -241,14 +248,13 @@ def NSSet_SummaryProvider(valobj, dict):
except:
summary = None
if summary is None:
summary = '<variable is not NSSet>'
summary = "<variable is not NSSet>"
if isinstance(summary, str):
return summary
else:
summary = str(summary) + \
(' objects' if summary != 1 else ' object')
summary = str(summary) + (" objects" if summary != 1 else " object")
return summary
return 'Summary Unavailable'
return "Summary Unavailable"
def NSSet_SummaryProvider2(valobj, dict):
@@ -256,8 +262,8 @@ def NSSet_SummaryProvider2(valobj, dict):
provider = GetSummary_Impl(valobj)
if provider is not None:
if isinstance(
provider,
lldb.runtime.objc.objc_runtime.SpecialSituation_Description):
provider, lldb.runtime.objc.objc_runtime.SpecialSituation_Description
):
return provider.message()
try:
summary = provider.count()
@@ -270,20 +276,19 @@ def NSSet_SummaryProvider2(valobj, dict):
# experimentation (if counts start looking weird, then most probably
# the mask needs to be changed)
if summary is None:
summary = '<variable is not CFSet>'
summary = "<variable is not CFSet>"
if isinstance(summary, str):
return summary
else:
if provider.sys_params.is_64_bit:
summary = summary & ~0x1fff000000000000
summary = '@"' + str(summary) + \
(' values"' if summary != 1 else ' value"')
summary = summary & ~0x1FFF000000000000
summary = '@"' + str(summary) + (' values"' if summary != 1 else ' value"')
return summary
return 'Summary Unavailable'
return "Summary Unavailable"
def __lldb_init_module(debugger, dict):
debugger.HandleCommand("type summary add -F NSSet.NSSet_SummaryProvider NSSet")
debugger.HandleCommand(
"type summary add -F NSSet.NSSet_SummaryProvider NSSet")
debugger.HandleCommand(
"type summary add -F NSSet.NSSet_SummaryProvider2 CFSetRef CFMutableSetRef")
"type summary add -F NSSet.NSSet_SummaryProvider2 CFSetRef CFMutableSetRef"
)

View File

@@ -14,10 +14,10 @@ import CFString
import lldb.formatters.Logger
statistics = lldb.formatters.metrics.Metrics()
statistics.add_metric('invalid_isa')
statistics.add_metric('invalid_pointer')
statistics.add_metric('unknown_class')
statistics.add_metric('code_notrun')
statistics.add_metric("invalid_isa")
statistics.add_metric("invalid_pointer")
statistics.add_metric("unknown_class")
statistics.add_metric("code_notrun")
# despite the similary to synthetic children providers, these classes are not
# trying to provide anything but a summary for an NSURL, so they need not
@@ -25,7 +25,6 @@ statistics.add_metric('code_notrun')
class NSURLKnown_SummaryProvider:
def adjust_for_architecture(self):
pass
@@ -33,12 +32,14 @@ class NSURLKnown_SummaryProvider:
logger = lldb.formatters.Logger.Logger()
self.valobj = valobj
self.sys_params = params
if not(self.sys_params.types_cache.NSString):
self.sys_params.types_cache.NSString = self.valobj.GetTarget(
).FindFirstType('NSString').GetPointerType()
if not(self.sys_params.types_cache.NSURL):
self.sys_params.types_cache.NSURL = self.valobj.GetTarget(
).FindFirstType('NSURL').GetPointerType()
if not (self.sys_params.types_cache.NSString):
self.sys_params.types_cache.NSString = (
self.valobj.GetTarget().FindFirstType("NSString").GetPointerType()
)
if not (self.sys_params.types_cache.NSURL):
self.sys_params.types_cache.NSURL = (
self.valobj.GetTarget().FindFirstType("NSURL").GetPointerType()
)
self.update()
def update(self):
@@ -63,14 +64,16 @@ class NSURLKnown_SummaryProvider:
def url_text(self):
logger = lldb.formatters.Logger.Logger()
text = self.valobj.CreateChildAtOffset(
"text", self.offset_text(), self.sys_params.types_cache.NSString)
"text", self.offset_text(), self.sys_params.types_cache.NSString
)
base = self.valobj.CreateChildAtOffset(
"base", self.offset_base(), self.sys_params.types_cache.NSURL)
"base", self.offset_base(), self.sys_params.types_cache.NSURL
)
my_string = CFString.CFString_SummaryProvider(text, None)
if len(my_string) > 0 and base.GetValueAsUnsigned(0) != 0:
# remove final " from myself
my_string = my_string[0:len(my_string) - 1]
my_string = my_string + ' -- '
my_string = my_string[0 : len(my_string) - 1]
my_string = my_string + " -- "
my_base_string = NSURL_SummaryProvider(base, None)
if len(my_base_string) > 2:
# remove @" marker from base URL string
@@ -80,7 +83,6 @@ class NSURLKnown_SummaryProvider:
class NSURLUnknown_SummaryProvider:
def adjust_for_architecture(self):
pass
@@ -99,33 +101,36 @@ class NSURLUnknown_SummaryProvider:
stream = lldb.SBStream()
self.valobj.GetExpressionPath(stream)
url_text_vo = self.valobj.CreateValueFromExpression(
"url", "(NSString*)[" + stream.GetData() + " description]")
"url", "(NSString*)[" + stream.GetData() + " description]"
)
if url_text_vo.IsValid():
return CFString.CFString_SummaryProvider(url_text_vo, None)
return '<variable is not NSURL>'
return "<variable is not NSURL>"
def GetSummary_Impl(valobj):
logger = lldb.formatters.Logger.Logger()
global statistics
class_data, wrapper = lldb.runtime.objc.objc_runtime.Utilities.prepare_class_detection(
valobj, statistics)
(
class_data,
wrapper,
) = lldb.runtime.objc.objc_runtime.Utilities.prepare_class_detection(
valobj, statistics
)
if wrapper:
return wrapper
name_string = class_data.class_name()
logger >> "class name is: " + str(name_string)
if name_string == 'NSURL':
if name_string == "NSURL":
wrapper = NSURLKnown_SummaryProvider(valobj, class_data.sys_params)
statistics.metric_hit('code_notrun', valobj)
statistics.metric_hit("code_notrun", valobj)
else:
wrapper = NSURLUnknown_SummaryProvider(valobj, class_data.sys_params)
statistics.metric_hit(
'unknown_class',
valobj.GetName() +
" seen as " +
name_string)
"unknown_class", valobj.GetName() + " seen as " + name_string
)
return wrapper
@@ -134,20 +139,21 @@ def NSURL_SummaryProvider(valobj, dict):
provider = GetSummary_Impl(valobj)
if provider is not None:
if isinstance(
provider,
lldb.runtime.objc.objc_runtime.SpecialSituation_Description):
provider, lldb.runtime.objc.objc_runtime.SpecialSituation_Description
):
return provider.message()
try:
summary = provider.url_text()
except:
summary = None
logger >> "got summary " + str(summary)
if summary is None or summary == '':
summary = '<variable is not NSURL>'
if summary is None or summary == "":
summary = "<variable is not NSURL>"
return summary
return 'Summary Unavailable'
return "Summary Unavailable"
def __lldb_init_module(debugger, dict):
debugger.HandleCommand(
"type summary add -F NSURL.NSURL_SummaryProvider NSURL CFURLRef")
"type summary add -F NSURL.NSURL_SummaryProvider NSURL CFURLRef"
)

View File

@@ -9,11 +9,18 @@ import lldb
def SEL_Summary(valobj, dict):
return valobj.Cast(valobj.GetType().GetBasicType(
lldb.eBasicTypeChar).GetPointerType()).GetSummary()
return valobj.Cast(
valobj.GetType().GetBasicType(lldb.eBasicTypeChar).GetPointerType()
).GetSummary()
def SELPointer_Summary(valobj, dict):
return valobj.CreateValueFromAddress(
'text', valobj.GetValueAsUnsigned(0), valobj.GetType().GetBasicType(
lldb.eBasicTypeChar)).AddressOf().GetSummary()
return (
valobj.CreateValueFromAddress(
"text",
valobj.GetValueAsUnsigned(0),
valobj.GetType().GetBasicType(lldb.eBasicTypeChar),
)
.AddressOf()
.GetSummary()
)

View File

@@ -8,11 +8,10 @@ SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
class AttributesDictionary:
def __init__(self, allow_reset=True):
# need to do it this way to prevent endless recursion
self.__dict__['_dictionary'] = {}
self.__dict__['_allow_reset'] = allow_reset
self.__dict__["_dictionary"] = {}
self.__dict__["_allow_reset"] = allow_reset
def __getattr__(self, name):
if not self._check_exists(name):

View File

@@ -9,12 +9,11 @@ import lldb.formatters.metrics
class Cache:
def __init__(self):
self.data = {}
self.statistics = lldb.formatters.metrics.Metrics()
self.statistics.add_metric('hit')
self.statistics.add_metric('miss')
self.statistics.add_metric("hit")
self.statistics.add_metric("miss")
def look_for_key(self, key):
if key in self.data:
@@ -22,15 +21,15 @@ class Cache:
return False
def add_item(self, key, value, ok_to_replace=True):
if not(ok_to_replace) and self.look_for_key(key):
if not (ok_to_replace) and self.look_for_key(key):
return False
self.data[key] = value
return True
def get_value(self, key, default=None):
if self.look_for_key(key):
self.statistics.metric_hit('hit', key)
self.statistics.metric_hit("hit", key)
return self.data[key]
else:
self.statistics.metric_hit('miss', key)
self.statistics.metric_hit("miss", key)
return default

View File

@@ -12,7 +12,6 @@ import inspect
class TimeMetrics:
@staticmethod
def generate(label=None):
return TimeMetrics(label)
@@ -28,13 +27,17 @@ class TimeMetrics:
def __exit__(self, a, b, c):
self.exit_time = time.clock()
print("It took " + str(self.exit_time - self.enter_time) +
" time units to run through " + self.function + self.label)
print(
"It took "
+ str(self.exit_time - self.enter_time)
+ " time units to run through "
+ self.function
+ self.label
)
return False
class Counter:
def __init__(self):
self.count = 0
self.list = []
@@ -53,7 +56,6 @@ class Counter:
class MetricsPrinter_Verbose:
def __init__(self, metrics):
self.metrics = metrics
@@ -65,20 +67,24 @@ class MetricsPrinter_Verbose:
class MetricsPrinter_Compact:
def __init__(self, metrics):
self.metrics = metrics
def __str__(self):
string = ""
for key, value in self.metrics.metrics.items():
string = string + "metric " + \
str(key) + " was hit " + str(value.count) + " times\n"
string = (
string
+ "metric "
+ str(key)
+ " was hit "
+ str(value.count)
+ " times\n"
)
return string
class Metrics:
def __init__(self):
self.metrics = {}
@@ -92,12 +98,13 @@ class Metrics:
return self.metrics[key]
def __getattr__(self, name):
if name == 'compact':
if name == "compact":
return MetricsPrinter_Compact(self)
if name == 'verbose':
if name == "verbose":
return MetricsPrinter_Verbose(self)
raise AttributeError("%r object has no attribute %r" %
(type(self).__name__, name))
raise AttributeError(
"%r object has no attribute %r" % (type(self).__name__, name)
)
def __str__(self):
return str(self.verbose)

View File

@@ -13,7 +13,6 @@ import lldb.formatters.Logger
class Utilities:
@staticmethod
def read_ascii(process, pointer, max_len=128):
logger = lldb.formatters.Logger.Logger()
@@ -36,7 +35,7 @@ class Utilities:
return allow_NULL
if allow_tagged and (pointer % 2) == 1:
return 1
return ((pointer % pointer_size) == 0)
return (pointer % pointer_size) == 0
# Objective-C runtime has a rule that pointers in a class_t will only have bits 0 thru 46 set
# so if any pointer has bits 47 thru 63 high we know that this is not a
@@ -46,7 +45,7 @@ class Utilities:
logger = lldb.formatters.Logger.Logger()
if pointer is None:
return 0
return ((pointer & 0xFFFF800000000000) == 0)
return (pointer & 0xFFFF800000000000) == 0
@staticmethod
def read_child_of(valobj, offset, type):
@@ -71,7 +70,8 @@ class Utilities:
# WARNING: this means that you cannot use this runtime implementation if you need to deal
# with class names that use anything but what is allowed here
ok_values = dict.fromkeys(
"$%_.-ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz1234567890")
"$%_.-ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz1234567890"
)
return all(c in ok_values for c in name)
@staticmethod
@@ -80,7 +80,7 @@ class Utilities:
# assume the only thing that has a Foundation.framework is a Mac
# assume anything < Lion does not even exist
try:
mod = target.module['Foundation']
mod = target.module["Foundation"]
except:
mod = None
if mod is None or mod.IsValid() == 0:
@@ -88,7 +88,7 @@ class Utilities:
ver = mod.GetVersion()
if ver is None or ver == []:
return None
return (ver[0] < 900)
return ver[0] < 900
# a utility method that factors out code common to almost all the formatters
# takes in an SBValue and a metrics object
@@ -99,46 +99,47 @@ class Utilities:
logger = lldb.formatters.Logger.Logger()
class_data = ObjCRuntime(valobj)
if class_data.is_valid() == 0:
statistics.metric_hit('invalid_pointer', valobj)
wrapper = InvalidPointer_Description(
valobj.GetValueAsUnsigned(0) == 0)
statistics.metric_hit("invalid_pointer", valobj)
wrapper = InvalidPointer_Description(valobj.GetValueAsUnsigned(0) == 0)
return class_data, wrapper
class_data = class_data.read_class_data()
if class_data.is_valid() == 0:
statistics.metric_hit('invalid_isa', valobj)
statistics.metric_hit("invalid_isa", valobj)
wrapper = InvalidISA_Description()
return class_data, wrapper
if class_data.is_kvo():
class_data = class_data.get_superclass()
if class_data.class_name() == '_NSZombie_OriginalClass':
if class_data.class_name() == "_NSZombie_OriginalClass":
wrapper = ThisIsZombie_Description()
return class_data, wrapper
return class_data, None
class RoT_Data:
def __init__(self, rot_pointer, params):
logger = lldb.formatters.Logger.Logger()
if (Utilities.is_valid_pointer(rot_pointer.GetValueAsUnsigned(),
params.pointer_size, allow_tagged=0)):
if Utilities.is_valid_pointer(
rot_pointer.GetValueAsUnsigned(), params.pointer_size, allow_tagged=0
):
self.sys_params = params
self.valobj = rot_pointer
#self.flags = Utilities.read_child_of(self.valobj,0,self.sys_params.uint32_t)
#self.instanceStart = Utilities.read_child_of(self.valobj,4,self.sys_params.uint32_t)
# self.flags = Utilities.read_child_of(self.valobj,0,self.sys_params.uint32_t)
# self.instanceStart = Utilities.read_child_of(self.valobj,4,self.sys_params.uint32_t)
self.instanceSize = None # lazy fetching
offset = 24 if self.sys_params.is_64_bit else 16
#self.ivarLayoutPtr = Utilities.read_child_of(self.valobj,offset,self.sys_params.addr_ptr_type)
# self.ivarLayoutPtr = Utilities.read_child_of(self.valobj,offset,self.sys_params.addr_ptr_type)
self.namePointer = Utilities.read_child_of(
self.valobj, offset, self.sys_params.types_cache.addr_ptr_type)
self.valobj, offset, self.sys_params.types_cache.addr_ptr_type
)
self.valid = 1 # self.check_valid()
else:
logger >> "Marking as invalid - rot is invalid"
self.valid = 0
if self.valid:
self.name = Utilities.read_ascii(
self.valobj.GetTarget().GetProcess(), self.namePointer)
if not(Utilities.is_valid_identifier(self.name)):
self.valobj.GetTarget().GetProcess(), self.namePointer
)
if not (Utilities.is_valid_identifier(self.name)):
logger >> "Marking as invalid - name is invalid"
self.valid = 0
@@ -147,14 +148,20 @@ class RoT_Data:
self.valid = 1
# misaligned pointers seem to be possible for this field
# if not(Utilities.is_valid_pointer(self.namePointer,self.sys_params.pointer_size,allow_tagged=0)):
# self.valid = 0
# pass
# self.valid = 0
# pass
def __str__(self):
logger = lldb.formatters.Logger.Logger()
return \
"instanceSize = " + hex(self.instance_size()) + "\n" + \
"namePointer = " + hex(self.namePointer) + " --> " + self.name
return (
"instanceSize = "
+ hex(self.instance_size())
+ "\n"
+ "namePointer = "
+ hex(self.namePointer)
+ " --> "
+ self.name
)
def is_valid(self):
return self.valid
@@ -165,7 +172,8 @@ class RoT_Data:
return None
if self.instanceSize is None:
self.instanceSize = Utilities.read_child_of(
self.valobj, 8, self.sys_params.types_cache.uint32_t)
self.valobj, 8, self.sys_params.types_cache.uint32_t
)
if align:
unalign = self.instance_size(0)
if self.sys_params.is_64_bit:
@@ -177,45 +185,50 @@ class RoT_Data:
class RwT_Data:
def __init__(self, rwt_pointer, params):
logger = lldb.formatters.Logger.Logger()
if (Utilities.is_valid_pointer(rwt_pointer.GetValueAsUnsigned(),
params.pointer_size, allow_tagged=0)):
if Utilities.is_valid_pointer(
rwt_pointer.GetValueAsUnsigned(), params.pointer_size, allow_tagged=0
):
self.sys_params = params
self.valobj = rwt_pointer
#self.flags = Utilities.read_child_of(self.valobj,0,self.sys_params.uint32_t)
#self.version = Utilities.read_child_of(self.valobj,4,self.sys_params.uint32_t)
# self.flags = Utilities.read_child_of(self.valobj,0,self.sys_params.uint32_t)
# self.version = Utilities.read_child_of(self.valobj,4,self.sys_params.uint32_t)
self.roPointer = Utilities.read_child_of(
self.valobj, 8, self.sys_params.types_cache.addr_ptr_type)
self.valobj, 8, self.sys_params.types_cache.addr_ptr_type
)
self.check_valid()
else:
logger >> "Marking as invalid - rwt is invald"
self.valid = 0
if self.valid:
self.rot = self.valobj.CreateValueFromData(
"rot", lldb.SBData.CreateDataFromUInt64Array(
self.sys_params.endianness, self.sys_params.pointer_size, [
self.roPointer]), self.sys_params.types_cache.addr_ptr_type)
# self.rot = self.valobj.CreateValueFromAddress("rot",self.roPointer,self.sys_params.types_cache.addr_ptr_type).AddressOf()
"rot",
lldb.SBData.CreateDataFromUInt64Array(
self.sys_params.endianness,
self.sys_params.pointer_size,
[self.roPointer],
),
self.sys_params.types_cache.addr_ptr_type,
)
# self.rot = self.valobj.CreateValueFromAddress("rot",self.roPointer,self.sys_params.types_cache.addr_ptr_type).AddressOf()
self.data = RoT_Data(self.rot, self.sys_params)
# perform sanity checks on the contents of this class_rw_t
def check_valid(self):
logger = lldb.formatters.Logger.Logger()
self.valid = 1
if not(
if not (
Utilities.is_valid_pointer(
self.roPointer,
self.sys_params.pointer_size,
allow_tagged=0)):
self.roPointer, self.sys_params.pointer_size, allow_tagged=0
)
):
logger >> "Marking as invalid - ropointer is invalid"
self.valid = 0
def __str__(self):
logger = lldb.formatters.Logger.Logger()
return \
"roPointer = " + hex(self.roPointer)
return "roPointer = " + hex(self.roPointer)
def is_valid(self):
logger = lldb.formatters.Logger.Logger()
@@ -225,11 +238,13 @@ class RwT_Data:
class Class_Data_V2:
def __init__(self, isa_pointer, params):
logger = lldb.formatters.Logger.Logger()
if (isa_pointer is not None) and (Utilities.is_valid_pointer(
isa_pointer.GetValueAsUnsigned(), params.pointer_size, allow_tagged=0)):
if (isa_pointer is not None) and (
Utilities.is_valid_pointer(
isa_pointer.GetValueAsUnsigned(), params.pointer_size, allow_tagged=0
)
):
self.sys_params = params
self.valobj = isa_pointer
self.check_valid()
@@ -238,10 +253,15 @@ class Class_Data_V2:
self.valid = 0
if self.valid:
self.rwt = self.valobj.CreateValueFromData(
"rwt", lldb.SBData.CreateDataFromUInt64Array(
self.sys_params.endianness, self.sys_params.pointer_size, [
self.dataPointer]), self.sys_params.types_cache.addr_ptr_type)
# self.rwt = self.valobj.CreateValueFromAddress("rwt",self.dataPointer,self.sys_params.types_cache.addr_ptr_type).AddressOf()
"rwt",
lldb.SBData.CreateDataFromUInt64Array(
self.sys_params.endianness,
self.sys_params.pointer_size,
[self.dataPointer],
),
self.sys_params.types_cache.addr_ptr_type,
)
# self.rwt = self.valobj.CreateValueFromAddress("rwt",self.dataPointer,self.sys_params.types_cache.addr_ptr_type).AddressOf()
self.data = RwT_Data(self.rwt, self.sys_params)
# perform sanity checks on the contents of this class_t
@@ -252,16 +272,17 @@ class Class_Data_V2:
self.valid = 1
self.isaPointer = Utilities.read_child_of(
self.valobj, 0, self.sys_params.types_cache.addr_ptr_type)
if not(
self.valobj, 0, self.sys_params.types_cache.addr_ptr_type
)
if not (
Utilities.is_valid_pointer(
self.isaPointer,
self.sys_params.pointer_size,
allow_tagged=0)):
self.isaPointer, self.sys_params.pointer_size, allow_tagged=0
)
):
logger >> "Marking as invalid - isaPointer is invalid"
self.valid = 0
return
if not(Utilities.is_allowed_pointer(self.isaPointer)):
if not (Utilities.is_allowed_pointer(self.isaPointer)):
logger >> "Marking as invalid - isaPointer is not allowed"
self.valid = 0
return
@@ -269,32 +290,34 @@ class Class_Data_V2:
self.cachePointer = Utilities.read_child_of(
self.valobj,
2 * self.sys_params.pointer_size,
self.sys_params.types_cache.addr_ptr_type)
if not(
self.sys_params.types_cache.addr_ptr_type,
)
if not (
Utilities.is_valid_pointer(
self.cachePointer,
self.sys_params.pointer_size,
allow_tagged=0)):
self.cachePointer, self.sys_params.pointer_size, allow_tagged=0
)
):
logger >> "Marking as invalid - cachePointer is invalid"
self.valid = 0
return
if not(Utilities.is_allowed_pointer(self.cachePointer)):
if not (Utilities.is_allowed_pointer(self.cachePointer)):
logger >> "Marking as invalid - cachePointer is not allowed"
self.valid = 0
return
self.dataPointer = Utilities.read_child_of(
self.valobj,
4 * self.sys_params.pointer_size,
self.sys_params.types_cache.addr_ptr_type)
if not(
self.sys_params.types_cache.addr_ptr_type,
)
if not (
Utilities.is_valid_pointer(
self.dataPointer,
self.sys_params.pointer_size,
allow_tagged=0)):
self.dataPointer, self.sys_params.pointer_size, allow_tagged=0
)
):
logger >> "Marking as invalid - dataPointer is invalid"
self.valid = 0
return
if not(Utilities.is_allowed_pointer(self.dataPointer)):
if not (Utilities.is_allowed_pointer(self.dataPointer)):
logger >> "Marking as invalid - dataPointer is not allowed"
self.valid = 0
return
@@ -302,17 +325,20 @@ class Class_Data_V2:
self.superclassIsaPointer = Utilities.read_child_of(
self.valobj,
1 * self.sys_params.pointer_size,
self.sys_params.types_cache.addr_ptr_type)
if not(
self.sys_params.types_cache.addr_ptr_type,
)
if not (
Utilities.is_valid_pointer(
self.superclassIsaPointer,
self.sys_params.pointer_size,
allow_tagged=0,
allow_NULL=1)):
allow_NULL=1,
)
):
logger >> "Marking as invalid - superclassIsa is invalid"
self.valid = 0
return
if not(Utilities.is_allowed_pointer(self.superclassIsaPointer)):
if not (Utilities.is_allowed_pointer(self.superclassIsaPointer)):
logger >> "Marking as invalid - superclassIsa is not allowed"
self.valid = 0
return
@@ -335,13 +361,16 @@ class Class_Data_V2:
def is_cftype(self):
logger = lldb.formatters.Logger.Logger()
if self.is_valid():
return self.class_name() == '__NSCFType' or self.class_name() == 'NSCFType'
return self.class_name() == "__NSCFType" or self.class_name() == "NSCFType"
def get_superclass(self):
logger = lldb.formatters.Logger.Logger()
if self.is_valid():
parent_isa_pointer = self.valobj.CreateChildAtOffset(
"parent_isa", self.sys_params.pointer_size, self.sys_params.addr_ptr_type)
"parent_isa",
self.sys_params.pointer_size,
self.sys_params.addr_ptr_type,
)
return Class_Data_V2(parent_isa_pointer, self.sys_params)
else:
return None
@@ -361,10 +390,19 @@ class Class_Data_V2:
def __str__(self):
logger = lldb.formatters.Logger.Logger()
return 'isaPointer = ' + hex(self.isaPointer) + "\n" + \
"superclassIsaPointer = " + hex(self.superclassIsaPointer) + "\n" + \
"cachePointer = " + hex(self.cachePointer) + "\n" + \
"data = " + hex(self.dataPointer)
return (
"isaPointer = "
+ hex(self.isaPointer)
+ "\n"
+ "superclassIsaPointer = "
+ hex(self.superclassIsaPointer)
+ "\n"
+ "cachePointer = "
+ hex(self.cachePointer)
+ "\n"
+ "data = "
+ hex(self.dataPointer)
)
def is_tagged(self):
return 0
@@ -375,16 +413,19 @@ class Class_Data_V2:
return None
return self.rwt.rot.instance_size(align)
# runtime v1 is much less intricate than v2 and stores relevant
# information directly in the class_t object
class Class_Data_V1:
def __init__(self, isa_pointer, params):
logger = lldb.formatters.Logger.Logger()
if (isa_pointer is not None) and (Utilities.is_valid_pointer(
isa_pointer.GetValueAsUnsigned(), params.pointer_size, allow_tagged=0)):
if (isa_pointer is not None) and (
Utilities.is_valid_pointer(
isa_pointer.GetValueAsUnsigned(), params.pointer_size, allow_tagged=0
)
):
self.valid = 1
self.sys_params = params
self.valobj = isa_pointer
@@ -394,8 +435,9 @@ class Class_Data_V1:
self.valid = 0
if self.valid:
self.name = Utilities.read_ascii(
self.valobj.GetTarget().GetProcess(), self.namePointer)
if not(Utilities.is_valid_identifier(self.name)):
self.valobj.GetTarget().GetProcess(), self.namePointer
)
if not (Utilities.is_valid_identifier(self.name)):
logger >> "Marking as invalid - name is not valid"
self.valid = 0
@@ -405,12 +447,13 @@ class Class_Data_V1:
self.valid = 1
self.isaPointer = Utilities.read_child_of(
self.valobj, 0, self.sys_params.types_cache.addr_ptr_type)
if not(
self.valobj, 0, self.sys_params.types_cache.addr_ptr_type
)
if not (
Utilities.is_valid_pointer(
self.isaPointer,
self.sys_params.pointer_size,
allow_tagged=0)):
self.isaPointer, self.sys_params.pointer_size, allow_tagged=0
)
):
logger >> "Marking as invalid - isaPointer is invalid"
self.valid = 0
return
@@ -418,13 +461,16 @@ class Class_Data_V1:
self.superclassIsaPointer = Utilities.read_child_of(
self.valobj,
1 * self.sys_params.pointer_size,
self.sys_params.types_cache.addr_ptr_type)
if not(
self.sys_params.types_cache.addr_ptr_type,
)
if not (
Utilities.is_valid_pointer(
self.superclassIsaPointer,
self.sys_params.pointer_size,
allow_tagged=0,
allow_NULL=1)):
allow_NULL=1,
)
):
logger >> "Marking as invalid - superclassIsa is invalid"
self.valid = 0
return
@@ -432,10 +478,11 @@ class Class_Data_V1:
self.namePointer = Utilities.read_child_of(
self.valobj,
2 * self.sys_params.pointer_size,
self.sys_params.types_cache.addr_ptr_type)
self.sys_params.types_cache.addr_ptr_type,
)
# if not(Utilities.is_valid_pointer(self.namePointer,self.sys_params.pointer_size,allow_tagged=0,allow_NULL=0)):
# self.valid = 0
# return
# self.valid = 0
# return
# in general, KVO is implemented by transparently subclassing
# however, there could be exceptions where a class does something else
@@ -455,13 +502,16 @@ class Class_Data_V1:
def is_cftype(self):
logger = lldb.formatters.Logger.Logger()
if self.is_valid():
return self.class_name() == '__NSCFType' or self.class_name() == 'NSCFType'
return self.class_name() == "__NSCFType" or self.class_name() == "NSCFType"
def get_superclass(self):
logger = lldb.formatters.Logger.Logger()
if self.is_valid():
parent_isa_pointer = self.valobj.CreateChildAtOffset(
"parent_isa", self.sys_params.pointer_size, self.sys_params.addr_ptr_type)
"parent_isa",
self.sys_params.pointer_size,
self.sys_params.addr_ptr_type,
)
return Class_Data_V1(parent_isa_pointer, self.sys_params)
else:
return None
@@ -478,10 +528,21 @@ class Class_Data_V1:
def __str__(self):
logger = lldb.formatters.Logger.Logger()
return 'isaPointer = ' + hex(self.isaPointer) + "\n" + \
"superclassIsaPointer = " + hex(self.superclassIsaPointer) + "\n" + \
"namePointer = " + hex(self.namePointer) + " --> " + self.name + \
"instanceSize = " + hex(self.instanceSize()) + "\n"
return (
"isaPointer = "
+ hex(self.isaPointer)
+ "\n"
+ "superclassIsaPointer = "
+ hex(self.superclassIsaPointer)
+ "\n"
+ "namePointer = "
+ hex(self.namePointer)
+ " --> "
+ self.name
+ "instanceSize = "
+ hex(self.instanceSize())
+ "\n"
)
def is_tagged(self):
return 0
@@ -494,7 +555,8 @@ class Class_Data_V1:
self.instanceSize = Utilities.read_child_of(
self.valobj,
5 * self.sys_params.pointer_size,
self.sys_params.types_cache.addr_ptr_type)
self.sys_params.types_cache.addr_ptr_type,
)
if align:
unalign = self.instance_size(0)
if self.sys_params.is_64_bit:
@@ -504,23 +566,27 @@ class Class_Data_V1:
else:
return self.instanceSize
# these are the only tagged pointers values for current versions
# of OSX - they might change in future OS releases, and no-one is
# advised to rely on these values, or any of the bitmasking formulas
# in TaggedClass_Data. doing otherwise is at your own risk
TaggedClass_Values_Lion = {1: 'NSNumber',
5: 'NSManagedObject',
6: 'NSDate',
7: 'NSDateTS'}
TaggedClass_Values_NMOS = {0: 'NSAtom',
3: 'NSNumber',
4: 'NSDateTS',
5: 'NSManagedObject',
6: 'NSDate'}
TaggedClass_Values_Lion = {
1: "NSNumber",
5: "NSManagedObject",
6: "NSDate",
7: "NSDateTS",
}
TaggedClass_Values_NMOS = {
0: "NSAtom",
3: "NSNumber",
4: "NSDateTS",
5: "NSManagedObject",
6: "NSDate",
}
class TaggedClass_Data:
def __init__(self, pointer, params):
logger = lldb.formatters.Logger.Logger()
global TaggedClass_Values_Lion, TaggedClass_Values_NMOS
@@ -586,7 +652,6 @@ class TaggedClass_Data:
class InvalidClass_Data:
def __init__(self):
pass
@@ -595,7 +660,6 @@ class InvalidClass_Data:
class Version:
def __init__(self, major, minor, release, build_string):
self._major = major
self._minor = minor
@@ -620,21 +684,23 @@ class Version:
build_string = property(get_build_string, None)
def __lt__(self, other):
if (self.major < other.major):
if self.major < other.major:
return 1
if (self.minor < other.minor):
if self.minor < other.minor:
return 1
if (self.release < other.release):
if self.release < other.release:
return 1
# build strings are not compared since they are heavily platform-dependent and might not always
# be available
return 0
def __eq__(self, other):
return (self.major == other.major) and \
(self.minor == other.minor) and \
(self.release == other.release) and \
(self.build_string == other.build_string)
return (
(self.major == other.major)
and (self.minor == other.minor)
and (self.release == other.release)
and (self.build_string == other.build_string)
)
# Python 2.6 doesn't have functools.total_ordering, so we have to implement
# other comparators
@@ -655,7 +721,6 @@ isa_caches = lldb.formatters.cache.Cache()
class SystemParameters:
def __init__(self, valobj):
logger = lldb.formatters.Logger.Logger()
self.adjust_for_architecture(valobj)
@@ -689,12 +754,15 @@ class SystemParameters:
self.types_cache = types_caches.get_value(self.pid)
else:
self.types_cache = lldb.formatters.attrib_fromdict.AttributesDictionary(
allow_reset=0)
self.types_cache.addr_type = valobj.GetType(
).GetBasicType(lldb.eBasicTypeUnsignedLong)
allow_reset=0
)
self.types_cache.addr_type = valobj.GetType().GetBasicType(
lldb.eBasicTypeUnsignedLong
)
self.types_cache.addr_ptr_type = self.types_cache.addr_type.GetPointerType()
self.types_cache.uint32_t = valobj.GetType(
).GetBasicType(lldb.eBasicTypeUnsignedInt)
self.types_cache.uint32_t = valobj.GetType().GetBasicType(
lldb.eBasicTypeUnsignedInt
)
types_caches.add_item(self.pid, self.types_cache)
if isa_caches.look_for_key(self.pid):
@@ -706,9 +774,9 @@ class SystemParameters:
def adjust_for_architecture(self, valobj):
process = valobj.GetTarget().GetProcess()
self.pointer_size = process.GetAddressByteSize()
self.is_64_bit = (self.pointer_size == 8)
self.is_64_bit = self.pointer_size == 8
self.endianness = process.GetByteOrder()
self.is_little = (self.endianness == lldb.eByteOrderLittle)
self.is_little = self.endianness == lldb.eByteOrderLittle
self.cfruntime_size = 16 if self.is_64_bit else 8
# a simple helper function that makes it more explicit that one is calculating
@@ -717,18 +785,12 @@ class SystemParameters:
# you can pass that in and it will be taken into account (since padding may be different between
# 32 and 64 bit versions, you can pass padding value for both, the right
# one will be used)
def calculate_offset(
self,
num_pointers=0,
bytes_count=0,
padding32=0,
padding64=0):
def calculate_offset(self, num_pointers=0, bytes_count=0, padding32=0, padding64=0):
value = bytes_count + num_pointers * self.pointer_size
return value + padding64 if self.is_64_bit else value + padding32
class ObjCRuntime:
# the ObjC runtime has no explicit "version" field that we can use
# instead, we discriminate v1 from v2 by looking for the presence
# of a well-known section only present in v1
@@ -743,7 +805,7 @@ class ObjCRuntime:
module_objc = None
for idx in range(num_modules):
module = target.GetModuleAtIndex(idx)
if module.GetFileSpec().GetFilename() == 'libobjc.A.dylib':
if module.GetFileSpec().GetFilename() == "libobjc.A.dylib":
module_objc = module
break
if module_objc is None or module_objc.IsValid() == 0:
@@ -753,7 +815,7 @@ class ObjCRuntime:
section_objc = None
for idx in range(num_sections):
section = module.GetSectionAtIndex(idx)
if section.GetName() == '__OBJC':
if section.GetName() == "__OBJC":
section_objc = section
break
if section_objc is not None and section_objc.IsValid():
@@ -780,20 +842,18 @@ class ObjCRuntime:
def adjust_for_architecture(self):
pass
# an ObjC pointer can either be tagged or must be aligned
# an ObjC pointer can either be tagged or must be aligned
def is_tagged(self):
logger = lldb.formatters.Logger.Logger()
if self.valobj is None:
return 0
return (
return Utilities.is_valid_pointer(
self.unsigned_value, self.sys_params.pointer_size, allow_tagged=1
) and not (
Utilities.is_valid_pointer(
self.unsigned_value,
self.sys_params.pointer_size,
allow_tagged=1) and not(
Utilities.is_valid_pointer(
self.unsigned_value,
self.sys_params.pointer_size,
allow_tagged=0)))
self.unsigned_value, self.sys_params.pointer_size, allow_tagged=0
)
)
def is_valid(self):
logger = lldb.formatters.Logger.Logger()
@@ -802,9 +862,8 @@ class ObjCRuntime:
if self.valobj.IsInScope() == 0:
return 0
return Utilities.is_valid_pointer(
self.unsigned_value,
self.sys_params.pointer_size,
allow_tagged=1)
self.unsigned_value, self.sys_params.pointer_size, allow_tagged=1
)
def is_nil(self):
return self.unsigned_value == 0
@@ -815,7 +874,8 @@ class ObjCRuntime:
logger >> "using cached isa"
return self.isa_value
self.isa_pointer = self.valobj.CreateChildAtOffset(
"cfisa", 0, self.sys_params.types_cache.addr_ptr_type)
"cfisa", 0, self.sys_params.types_cache.addr_ptr_type
)
if self.isa_pointer is None or self.isa_pointer.IsValid() == 0:
logger >> "invalid isa - bailing out"
return None
@@ -837,7 +897,8 @@ class ObjCRuntime:
# but unless performance requires it, this seems a cleaner way
# to tackle the task
tentative_tagged = TaggedClass_Data(
self.unsigned_value, self.sys_params)
self.unsigned_value, self.sys_params
)
if tentative_tagged.is_valid():
logger >> "truly tagged"
return tentative_tagged
@@ -849,8 +910,7 @@ class ObjCRuntime:
return InvalidClass_Data()
if self.is_valid() == 0 or self.read_isa() is None:
return InvalidClass_Data()
data = self.sys_params.isa_cache.get_value(
self.isa_value, default=None)
data = self.sys_params.isa_cache.get_value(self.isa_value, default=None)
if data is not None:
return data
if self.sys_params.runtime_version == 2:
@@ -860,22 +920,20 @@ class ObjCRuntime:
if data is None:
return InvalidClass_Data()
if data.is_valid():
self.sys_params.isa_cache.add_item(
self.isa_value, data, ok_to_replace=1)
self.sys_params.isa_cache.add_item(self.isa_value, data, ok_to_replace=1)
return data
# these classes below can be used by the data formatters to provide a
# consistent message that describes a given runtime-generated situation
class SpecialSituation_Description:
def message(self):
return ''
return ""
class InvalidPointer_Description(SpecialSituation_Description):
def __init__(self, nil):
self.is_nil = nil
@@ -883,19 +941,17 @@ class InvalidPointer_Description(SpecialSituation_Description):
if self.is_nil:
return '@"<nil>"'
else:
return '<invalid pointer>'
return "<invalid pointer>"
class InvalidISA_Description(SpecialSituation_Description):
def __init__(self):
pass
def message(self):
return '<not an Objective-C object>'
return "<not an Objective-C object>"
class ThisIsZombie_Description(SpecialSituation_Description):
def message(self):
return '<freed object>'
return "<freed object>"

View File

@@ -6,19 +6,25 @@ def pyobj_summary(value, unused):
return "<invalid>"
refcnt = value.GetChildMemberWithName("ob_refcnt")
expr = "(char*)PyString_AsString( (PyObject*)PyObject_Str( (PyObject*)0x%x) )" % (
value.GetValueAsUnsigned(0))
value.GetValueAsUnsigned(0)
)
expr_summary = value.target.EvaluateExpression(
expr, lldb.SBExpressionOptions()).GetSummary()
expr, lldb.SBExpressionOptions()
).GetSummary()
refcnt_value = "rc = %d" % (refcnt.GetValueAsUnsigned(0))
return "%s (%s)" % (expr_summary, refcnt_value)
def __lldb_init_module(debugger, unused):
debugger.HandleCommand(
"type summary add PyObject --python-function pysummary.pyobj_summary")
"type summary add PyObject --python-function pysummary.pyobj_summary"
)
debugger.HandleCommand(
"type summary add lldb_private::PythonObject -s ${var.m_py_obj%S}")
"type summary add lldb_private::PythonObject -s ${var.m_py_obj%S}"
)
debugger.HandleCommand(
"type summary add lldb_private::PythonDictionary -s ${var.m_py_obj%S}")
"type summary add lldb_private::PythonDictionary -s ${var.m_py_obj%S}"
)
debugger.HandleCommand(
"type summary add lldb_private::PythonString -s ${var.m_py_obj%S}")
"type summary add lldb_private::PythonString -s ${var.m_py_obj%S}"
)

View File

@@ -8,7 +8,6 @@ SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
class SharedPtr_SyntheticChildrenProvider:
def __init__(self, valobj, dict):
self.valobj = valobj
self.update()
@@ -28,20 +27,21 @@ class SharedPtr_SyntheticChildrenProvider:
def get_child_at_index(self, index):
if index == 0:
return self.valobj.GetChildMemberWithName('_M_ptr')
return self.valobj.GetChildMemberWithName("_M_ptr")
if index == 1:
return self.valobj.GetChildMemberWithName('_M_refcount').GetChildMemberWithName(
'_M_pi').GetChildMemberWithName('_M_use_count')
return (
self.valobj.GetChildMemberWithName("_M_refcount")
.GetChildMemberWithName("_M_pi")
.GetChildMemberWithName("_M_use_count")
)
return None
def SharedPtr_SummaryProvider(valobj, dict):
return 'use = ' + \
str(valobj.GetChildMemberWithName("count").GetValueAsUnsigned())
return "use = " + str(valobj.GetChildMemberWithName("count").GetValueAsUnsigned())
class ValueObjectSP_SyntheticChildrenProvider:
def __init__(self, valobj, dict):
self.valobj = valobj
self.update()
@@ -61,24 +61,30 @@ class ValueObjectSP_SyntheticChildrenProvider:
def get_child_at_index(self, index):
if index == 0:
return self.valobj.GetChildMemberWithName('ptr_')
return self.valobj.GetChildMemberWithName("ptr_")
if index == 1:
return self.valobj.GetChildMemberWithName(
'cntrl_').GetChildMemberWithName('shared_owners_')
return self.valobj.GetChildMemberWithName("cntrl_").GetChildMemberWithName(
"shared_owners_"
)
return None
def ValueObjectSP_SummaryProvider(valobj, dict):
return 'use = ' + \
str(1 + valobj.GetChildMemberWithName("count").GetValueAsUnsigned())
return "use = " + str(
1 + valobj.GetChildMemberWithName("count").GetValueAsUnsigned()
)
def __lldb_init_module(debugger, dict):
debugger.HandleCommand(
'type summary add -x ".*ValueObjectSP" --expand -F sp_cp.ValueObjectSP_SummaryProvider')
'type summary add -x ".*ValueObjectSP" --expand -F sp_cp.ValueObjectSP_SummaryProvider'
)
debugger.HandleCommand(
'type synthetic add -x ".*ValueObjectSP" -l sp_cp.ValueObjectSP_SyntheticChildrenProvider')
'type synthetic add -x ".*ValueObjectSP" -l sp_cp.ValueObjectSP_SyntheticChildrenProvider'
)
debugger.HandleCommand(
'type summary add -x ".*SP" --expand -F sp_cp.SharedPtr_SummaryProvider')
'type summary add -x ".*SP" --expand -F sp_cp.SharedPtr_SummaryProvider'
)
debugger.HandleCommand(
'type synthetic add -x ".*SP" -l sp_cp.SharedPtr_SyntheticChildrenProvider')
'type synthetic add -x ".*SP" -l sp_cp.SharedPtr_SyntheticChildrenProvider'
)

View File

@@ -2,7 +2,6 @@ import lldb
class PythonObjectSyntheticChildProvider(object):
def __init__(self, value, internal_dict):
self.value = value
self.values = self.make_children()
@@ -34,18 +33,16 @@ class PythonObjectSyntheticChildProvider(object):
data = None
type = None
if isinstance(value, int):
data = lldb.SBData.CreateDataFromUInt64Array(
self.bo, self.ps, [value])
data = lldb.SBData.CreateDataFromUInt64Array(self.bo, self.ps, [value])
type = self.value.target.GetBasicType(lldb.eBasicTypeLong)
elif isinstance(value, float):
data = lldb.SBData.CreateDataFromDoubleArray(
self.bo, self.ps, [value])
data = lldb.SBData.CreateDataFromDoubleArray(self.bo, self.ps, [value])
type = self.value.target.GetBasicType(lldb.eBasicTypeDouble)
elif isinstance(value, str):
data = lldb.SBData.CreateDataFromCString(self.bo, self.ps, value)
type = self.value.target.GetBasicType(
lldb.eBasicTypeChar).GetArrayType(
len(value))
type = self.value.target.GetBasicType(lldb.eBasicTypeChar).GetArrayType(
len(value)
)
if (data is not None) and (type is not None):
return self.value.CreateValueFromData(name, data, type)
return None

View File

@@ -36,7 +36,7 @@ def utf16_summary(value, unused):
error = lldb.SBError()
string_data = value.process.ReadMemory(pointer, length, error)
# utf8 is safe to emit as-is on OSX
return '"%s"' % (string_data.decode('utf-16').encode('utf-8'))
return '"%s"' % (string_data.decode("utf-16").encode("utf-8"))
def utf32_summary(value, unused):
@@ -50,4 +50,4 @@ def utf32_summary(value, unused):
error = lldb.SBError()
string_data = value.process.ReadMemory(pointer, length, error)
# utf8 is safe to emit as-is on OSX
return '"%s"' % (string_data.decode('utf-32').encode('utf-8'))
return '"%s"' % (string_data.decode("utf-32").encode("utf-8"))

View File

@@ -6,7 +6,6 @@
class MaskedData_SyntheticChildrenProvider:
def __init__(self, valobj, dict):
# remember the SBValue since you will not have another chance to get it
# :-)
@@ -58,45 +57,48 @@ class MaskedData_SyntheticChildrenProvider:
return self.valobj.GetChildMemberWithName("value")
if index == 1:
# fetch the value of the operator
op_chosen = self.valobj.GetChildMemberWithName(
"oper").GetValueAsUnsigned()
op_chosen = self.valobj.GetChildMemberWithName("oper").GetValueAsUnsigned()
# if it is a known value, return a descriptive string for it
# we are not doing this in the most efficient possible way, but the code is very readable
# and easy to maintain - if you change the values on the C++ side,
# the same changes must be made here
if op_chosen == 0:
return self.valobj.CreateValueFromExpression(
"operator", '(const char*)"none"')
"operator", '(const char*)"none"'
)
elif op_chosen == 1:
return self.valobj.CreateValueFromExpression(
"operator", '(const char*)"AND"')
"operator", '(const char*)"AND"'
)
elif op_chosen == 2:
return self.valobj.CreateValueFromExpression(
"operator", '(const char*)"OR"')
"operator", '(const char*)"OR"'
)
elif op_chosen == 3:
return self.valobj.CreateValueFromExpression(
"operator", '(const char*)"XOR"')
"operator", '(const char*)"XOR"'
)
elif op_chosen == 4:
return self.valobj.CreateValueFromExpression(
"operator", '(const char*)"NAND"')
"operator", '(const char*)"NAND"'
)
elif op_chosen == 5:
return self.valobj.CreateValueFromExpression(
"operator", '(const char*)"NOR"')
"operator", '(const char*)"NOR"'
)
else:
return self.valobj.CreateValueFromExpression(
"operator", '(const char*)"unknown"') # something else
"operator", '(const char*)"unknown"'
) # something else
if index == 2:
return self.valobj.GetChildMemberWithName("mask")
if index == 3:
# for this, we must fetch all the other elements
# in an efficient implementation, we would be caching this data for
# efficiency
value = self.valobj.GetChildMemberWithName(
"value").GetValueAsUnsigned()
operator = self.valobj.GetChildMemberWithName(
"oper").GetValueAsUnsigned()
mask = self.valobj.GetChildMemberWithName(
"mask").GetValueAsUnsigned()
value = self.valobj.GetChildMemberWithName("value").GetValueAsUnsigned()
operator = self.valobj.GetChildMemberWithName("oper").GetValueAsUnsigned()
mask = self.valobj.GetChildMemberWithName("mask").GetValueAsUnsigned()
# compute the masked value according to the operator
if operator == 1:
value = value & mask
@@ -110,12 +112,15 @@ class MaskedData_SyntheticChildrenProvider:
value = ~(value | mask)
else:
pass
value &= 0xFFFFFFFF # make sure Python does not extend our values to 64-bits
value &= (
0xFFFFFFFF # make sure Python does not extend our values to 64-bits
)
# return it - again, not the most efficient possible way. we should actually be pushing the computed value
# into an SBData, and using the SBData to create an SBValue - this
# has the advantage of readability
return self.valobj.CreateValueFromExpression(
"apply()", '(uint32_t)(' + str(value) + ')')
"apply()", "(uint32_t)(" + str(value) + ")"
)
def update(self):
# we do not do anything special in update - but this would be the right place to lookup

View File

@@ -5,6 +5,7 @@ import lldb.formatters.Logger
# implementation for your platform before relying on these formatters to do the right
# thing for your setup
def ForwardListSummaryProvider(valobj, dict):
list_capping_size = valobj.GetTarget().GetMaximumNumberOfChildrenToDisplay()
text = "size=" + str(valobj.GetNumChildren())
@@ -13,6 +14,7 @@ def ForwardListSummaryProvider(valobj, dict):
else:
return text
def StdOptionalSummaryProvider(valobj, dict):
has_value = valobj.GetNumChildren() > 0
# We add wrapping spaces for consistency with the libcxx formatter
@@ -25,14 +27,16 @@ class StdOptionalSynthProvider:
def update(self):
try:
self.payload = self.valobj.GetChildMemberWithName('_M_payload')
self.value = self.payload.GetChildMemberWithName('_M_payload')
self.has_value = self.payload.GetChildMemberWithName('_M_engaged').GetValueAsUnsigned(0) != 0
self.payload = self.valobj.GetChildMemberWithName("_M_payload")
self.value = self.payload.GetChildMemberWithName("_M_payload")
self.has_value = (
self.payload.GetChildMemberWithName("_M_engaged").GetValueAsUnsigned(0)
!= 0
)
except:
self.has_value = False
return False
def num_children(self):
return 1 if self.has_value else 0
@@ -41,15 +45,18 @@ class StdOptionalSynthProvider:
def get_child_at_index(self, index):
# some versions of libstdcpp have an additional _M_value child with the actual value
possible_value = self.value.GetChildMemberWithName('_M_value')
possible_value = self.value.GetChildMemberWithName("_M_value")
if possible_value.IsValid():
return possible_value.Clone('Value')
return self.value.Clone('Value')
return possible_value.Clone("Value")
return self.value.Clone("Value")
"""
This formatter can be applied to all
unordered map-like structures (unordered_map, unordered_multimap, unordered_set, unordered_multiset)
"""
class StdUnorderedMapSynthProvider:
def __init__(self, valobj, dict):
self.valobj = valobj
@@ -76,9 +83,9 @@ class StdUnorderedMapSynthProvider:
# later
self.count = None
try:
self.head = self.valobj.GetChildMemberWithName('_M_h')
self.before_begin = self.head.GetChildMemberWithName('_M_before_begin')
self.next = self.before_begin.GetChildMemberWithName('_M_nxt')
self.head = self.valobj.GetChildMemberWithName("_M_h")
self.before_begin = self.head.GetChildMemberWithName("_M_before_begin")
self.next = self.before_begin.GetChildMemberWithName("_M_nxt")
self.data_type = self.extract_type()
self.skip_size = self.next.GetType().GetByteSize()
self.data_size = self.data_type.GetByteSize()
@@ -90,7 +97,7 @@ class StdUnorderedMapSynthProvider:
def get_child_index(self, name):
try:
return int(name.lstrip('[').rstrip(']'))
return int(name.lstrip("[").rstrip("]"))
except:
return -1
@@ -105,9 +112,11 @@ class StdUnorderedMapSynthProvider:
offset = index
current = self.next
while offset > 0:
current = current.GetChildMemberWithName('_M_nxt')
current = current.GetChildMemberWithName("_M_nxt")
offset = offset - 1
return current.CreateChildAtOffset( '[' + str(index) + ']', self.skip_size, self.data_type)
return current.CreateChildAtOffset(
"[" + str(index) + "]", self.skip_size, self.data_type
)
except:
logger >> "Cannot get child"
@@ -121,7 +130,9 @@ class StdUnorderedMapSynthProvider:
def num_children_impl(self):
logger = lldb.formatters.Logger.Logger()
try:
count = self.head.GetChildMemberWithName('_M_element_count').GetValueAsUnsigned(0)
count = self.head.GetChildMemberWithName(
"_M_element_count"
).GetValueAsUnsigned(0)
return count
except:
logger >> "Could not determine the size"
@@ -130,22 +141,25 @@ class StdUnorderedMapSynthProvider:
class AbstractListSynthProvider:
def __init__(self, valobj, dict, has_prev):
'''
"""
:param valobj: The value object of the list
:param dict: A dict with metadata provided by LLDB
:param has_prev: Whether the list supports a 'prev' pointer besides a 'next' one
'''
"""
logger = lldb.formatters.Logger.Logger()
self.valobj = valobj
self.count = None
self.has_prev = has_prev
self.list_capping_size = self.valobj.GetTarget().GetMaximumNumberOfChildrenToDisplay()
logger >> "Providing synthetic children for a list named " + \
str(valobj.GetName())
self.list_capping_size = (
self.valobj.GetTarget().GetMaximumNumberOfChildrenToDisplay()
)
logger >> "Providing synthetic children for a list named " + str(
valobj.GetName()
)
def next_node(self, node):
logger = lldb.formatters.Logger.Logger()
return node.GetChildMemberWithName('_M_next')
return node.GetChildMemberWithName("_M_next")
def is_valid(self, node):
logger = lldb.formatters.Logger.Logger()
@@ -159,8 +173,7 @@ class AbstractListSynthProvider:
def value(self, node):
logger = lldb.formatters.Logger.Logger()
value = node.GetValueAsUnsigned()
logger >> "synthetic value for {}: {}".format(
str(self.valobj.GetName()), value)
logger >> "synthetic value for {}: {}".format(str(self.valobj.GetName()), value)
return value
# Floyd's cycle-finding algorithm
@@ -178,8 +191,7 @@ class AbstractListSynthProvider:
slow_value = self.value(slow)
fast1 = self.next_node(fast2)
fast2 = self.next_node(fast1)
if self.value(fast1) == slow_value or self.value(
fast2) == slow_value:
if self.value(fast1) == slow_value or self.value(fast2) == slow_value:
return True
slow = self.next_node(slow)
return False
@@ -188,7 +200,7 @@ class AbstractListSynthProvider:
logger = lldb.formatters.Logger.Logger()
if self.count is None:
# libstdc++ 6.0.21 added dedicated count field.
count_child = self.node.GetChildMemberWithName('_M_data')
count_child = self.node.GetChildMemberWithName("_M_data")
if count_child and count_child.IsValid():
self.count = count_child.GetValueAsUnsigned(0)
if self.count is None:
@@ -215,9 +227,11 @@ class AbstractListSynthProvider:
return 1
size = 1
current = self.next
while current.GetChildMemberWithName(
'_M_next').GetValueAsUnsigned(0) != self.get_end_of_list_address():
current = current.GetChildMemberWithName('_M_next')
while (
current.GetChildMemberWithName("_M_next").GetValueAsUnsigned(0)
!= self.get_end_of_list_address()
):
current = current.GetChildMemberWithName("_M_next")
if not current.IsValid():
break
size = size + 1
@@ -232,7 +246,7 @@ class AbstractListSynthProvider:
def get_child_index(self, name):
logger = lldb.formatters.Logger.Logger()
try:
return int(name.lstrip('[').rstrip(']'))
return int(name.lstrip("[").rstrip("]"))
except:
return -1
@@ -247,14 +261,15 @@ class AbstractListSynthProvider:
offset = index
current = self.next
while offset > 0:
current = current.GetChildMemberWithName('_M_next')
current = current.GetChildMemberWithName("_M_next")
offset = offset - 1
# C++ lists store the data of a node after its pointers. In the case of a forward list, there's just one pointer (next), and
# in the case of a double-linked list, there's an additional pointer (prev).
return current.CreateChildAtOffset(
'[' + str(index) + ']',
(2 if self.has_prev else 1) * current.GetType().GetByteSize(),
self.data_type)
"[" + str(index) + "]",
(2 if self.has_prev else 1) * current.GetType().GetByteSize(),
self.data_type,
)
except:
return None
@@ -273,7 +288,7 @@ class AbstractListSynthProvider:
# later
self.count = None
try:
self.impl = self.valobj.GetChildMemberWithName('_M_impl')
self.impl = self.valobj.GetChildMemberWithName("_M_impl")
self.data_type = self.extract_type()
if (not self.data_type.IsValid()) or (not self.impl.IsValid()):
self.count = 0
@@ -285,34 +300,35 @@ class AbstractListSynthProvider:
self.count = 0
return False
'''
"""
Method is used to extract the list pointers into the variables (e.g self.node, self.next, and optionally to self.prev)
and is mandatory to be overriden in each AbstractListSynthProvider subclass.
This should return True or False depending on wheter it found valid data.
'''
"""
def updateNodes(self):
raise NotImplementedError
def has_children(self):
return True
'''
"""
Method is used to identify if a node traversal has reached its end
and is mandatory to be overriden in each AbstractListSynthProvider subclass
'''
"""
def get_end_of_list_address(self):
raise NotImplementedError
class StdForwardListSynthProvider(AbstractListSynthProvider):
def __init__(self, valobj, dict):
has_prev = False
super().__init__(valobj, dict, has_prev)
def updateNodes(self):
self.node = self.impl.GetChildMemberWithName('_M_head')
self.next = self.node.GetChildMemberWithName('_M_next')
self.node = self.impl.GetChildMemberWithName("_M_head")
self.next = self.node.GetChildMemberWithName("_M_next")
if (not self.node.IsValid()) or (not self.next.IsValid()):
return False
return True
@@ -322,17 +338,21 @@ class StdForwardListSynthProvider(AbstractListSynthProvider):
class StdListSynthProvider(AbstractListSynthProvider):
def __init__(self, valobj, dict):
has_prev = True
super().__init__(valobj, dict, has_prev)
def updateNodes(self):
self.node_address = self.valobj.AddressOf().GetValueAsUnsigned(0)
self.node = self.impl.GetChildMemberWithName('_M_node')
self.prev = self.node.GetChildMemberWithName('_M_prev')
self.next = self.node.GetChildMemberWithName('_M_next')
if self.node_address == 0 or (not self.node.IsValid()) or (not self.next.IsValid()) or (not self.prev.IsValid()):
self.node = self.impl.GetChildMemberWithName("_M_node")
self.prev = self.node.GetChildMemberWithName("_M_prev")
self.next = self.node.GetChildMemberWithName("_M_next")
if (
self.node_address == 0
or (not self.node.IsValid())
or (not self.next.IsValid())
or (not self.prev.IsValid())
):
return False
return True
@@ -341,9 +361,7 @@ class StdListSynthProvider(AbstractListSynthProvider):
class StdVectorSynthProvider:
class StdVectorImplementation(object):
def __init__(self, valobj):
self.valobj = valobj
self.count = None
@@ -379,7 +397,7 @@ class StdVectorSynthProvider:
# this check might fail, unless the sizeof() we get is itself incremented to take the
# padding bytes into account - on current clang it looks like
# this is the case
num_children = (finish_val - start_val)
num_children = finish_val - start_val
if (num_children % self.data_size) != 0:
return 0
else:
@@ -398,7 +416,8 @@ class StdVectorSynthProvider:
try:
offset = index * self.data_size
return self.start.CreateChildAtOffset(
'[' + str(index) + ']', offset, self.data_type)
"[" + str(index) + "]", offset, self.data_type
)
except:
return None
@@ -407,16 +426,20 @@ class StdVectorSynthProvider:
# mind later
self.count = None
try:
impl = self.valobj.GetChildMemberWithName('_M_impl')
self.start = impl.GetChildMemberWithName('_M_start')
self.finish = impl.GetChildMemberWithName('_M_finish')
self.end = impl.GetChildMemberWithName('_M_end_of_storage')
impl = self.valobj.GetChildMemberWithName("_M_impl")
self.start = impl.GetChildMemberWithName("_M_start")
self.finish = impl.GetChildMemberWithName("_M_finish")
self.end = impl.GetChildMemberWithName("_M_end_of_storage")
self.data_type = self.start.GetType().GetPointeeType()
self.data_size = self.data_type.GetByteSize()
# if any of these objects is invalid, it means there is no
# point in trying to fetch anything
if self.start.IsValid() and self.finish.IsValid(
) and self.end.IsValid() and self.data_type.IsValid():
if (
self.start.IsValid()
and self.finish.IsValid()
and self.end.IsValid()
and self.data_type.IsValid()
):
self.count = None
else:
self.count = 0
@@ -425,7 +448,6 @@ class StdVectorSynthProvider:
return False
class StdVBoolImplementation(object):
def __init__(self, valobj, bool_type):
self.valobj = valobj
self.bool_type = bool_type
@@ -445,28 +467,31 @@ class StdVectorSynthProvider:
return None
element_type = self.start_p.GetType().GetPointeeType()
element_bits = 8 * element_type.GetByteSize()
element_offset = (index // element_bits) * \
element_type.GetByteSize()
element_offset = (index // element_bits) * element_type.GetByteSize()
bit_offset = index % element_bits
element = self.start_p.CreateChildAtOffset(
'[' + str(index) + ']', element_offset, element_type)
"[" + str(index) + "]", element_offset, element_type
)
bit = element.GetValueAsUnsigned(0) & (1 << bit_offset)
if bit != 0:
value_expr = "(bool)true"
else:
value_expr = "(bool)false"
return self.valobj.CreateValueFromExpression(
"[%d]" % index, value_expr)
return self.valobj.CreateValueFromExpression("[%d]" % index, value_expr)
def update(self):
try:
m_impl = self.valobj.GetChildMemberWithName('_M_impl')
self.m_start = m_impl.GetChildMemberWithName('_M_start')
self.m_finish = m_impl.GetChildMemberWithName('_M_finish')
self.start_p = self.m_start.GetChildMemberWithName('_M_p')
self.finish_p = self.m_finish.GetChildMemberWithName('_M_p')
self.offset = self.m_finish.GetChildMemberWithName('_M_offset')
if self.offset.IsValid() and self.start_p.IsValid() and self.finish_p.IsValid():
m_impl = self.valobj.GetChildMemberWithName("_M_impl")
self.m_start = m_impl.GetChildMemberWithName("_M_start")
self.m_finish = m_impl.GetChildMemberWithName("_M_finish")
self.start_p = self.m_start.GetChildMemberWithName("_M_p")
self.finish_p = self.m_finish.GetChildMemberWithName("_M_p")
self.offset = self.m_finish.GetChildMemberWithName("_M_offset")
if (
self.offset.IsValid()
and self.start_p.IsValid()
and self.finish_p.IsValid()
):
self.valid = True
else:
self.valid = False
@@ -478,19 +503,19 @@ class StdVectorSynthProvider:
logger = lldb.formatters.Logger.Logger()
first_template_arg_type = valobj.GetType().GetTemplateArgumentType(0)
if str(first_template_arg_type.GetName()) == "bool":
self.impl = self.StdVBoolImplementation(
valobj, first_template_arg_type)
self.impl = self.StdVBoolImplementation(valobj, first_template_arg_type)
else:
self.impl = self.StdVectorImplementation(valobj)
logger >> "Providing synthetic children for a vector named " + \
str(valobj.GetName())
logger >> "Providing synthetic children for a vector named " + str(
valobj.GetName()
)
def num_children(self):
return self.impl.num_children()
def get_child_index(self, name):
try:
return int(name.lstrip('[').rstrip(']'))
return int(name.lstrip("[").rstrip("]"))
except:
return -1
@@ -507,21 +532,27 @@ class StdVectorSynthProvider:
This formatter can be applied to all
map-like structures (map, multimap, set, multiset)
"""
class StdMapLikeSynthProvider:
class StdMapLikeSynthProvider:
def __init__(self, valobj, dict):
logger = lldb.formatters.Logger.Logger()
self.valobj = valobj
self.count = None
self.kind = self.get_object_kind(valobj)
logger >> "Providing synthetic children for a " + self.kind + " named " + \
str(valobj.GetName())
(
logger
>> "Providing synthetic children for a "
+ self.kind
+ " named "
+ str(valobj.GetName())
)
def get_object_kind(self, valobj):
type_name = valobj.GetTypeName()
for kind in ["multiset", "multimap", "set", "map"]:
if kind in type_name:
return kind
if kind in type_name:
return kind
return type_name
# we need this function as a temporary workaround for rdar://problem/10801549
@@ -533,14 +564,26 @@ class StdMapLikeSynthProvider:
# to find the type name
def fixup_class_name(self, class_name):
logger = lldb.formatters.Logger.Logger()
if class_name == 'std::basic_string<char, std::char_traits<char>, std::allocator<char> >':
return 'std::basic_string<char>', True
if class_name == 'basic_string<char, std::char_traits<char>, std::allocator<char> >':
return 'std::basic_string<char>', True
if class_name == 'std::basic_string<char, std::char_traits<char>, std::allocator<char> >':
return 'std::basic_string<char>', True
if class_name == 'basic_string<char, std::char_traits<char>, std::allocator<char> >':
return 'std::basic_string<char>', True
if (
class_name
== "std::basic_string<char, std::char_traits<char>, std::allocator<char> >"
):
return "std::basic_string<char>", True
if (
class_name
== "basic_string<char, std::char_traits<char>, std::allocator<char> >"
):
return "std::basic_string<char>", True
if (
class_name
== "std::basic_string<char, std::char_traits<char>, std::allocator<char> >"
):
return "std::basic_string<char>", True
if (
class_name
== "basic_string<char, std::char_traits<char>, std::allocator<char> >"
):
return "std::basic_string<char>", True
return class_name, False
def update(self):
@@ -553,9 +596,9 @@ class StdMapLikeSynthProvider:
# if this gets set to True, then we will merrily return None for
# any child from that moment on
self.garbage = False
self.Mt = self.valobj.GetChildMemberWithName('_M_t')
self.Mimpl = self.Mt.GetChildMemberWithName('_M_impl')
self.Mheader = self.Mimpl.GetChildMemberWithName('_M_header')
self.Mt = self.valobj.GetChildMemberWithName("_M_t")
self.Mimpl = self.Mt.GetChildMemberWithName("_M_impl")
self.Mheader = self.Mimpl.GetChildMemberWithName("_M_header")
if not self.Mheader.IsValid():
self.count = 0
else:
@@ -572,11 +615,13 @@ class StdMapLikeSynthProvider:
# GCC does not emit DW_TAG_template_type_parameter for
# std::allocator<...>. For such a case, get the type of
# std::pair from a member of std::map.
rep_type = self.valobj.GetChildMemberWithName('_M_t').GetType()
self.data_type = rep_type.GetTypedefedType().GetTemplateArgumentType(1)
rep_type = self.valobj.GetChildMemberWithName("_M_t").GetType()
self.data_type = (
rep_type.GetTypedefedType().GetTemplateArgumentType(1)
)
# from libstdc++ implementation of _M_root for rbtree
self.Mroot = self.Mheader.GetChildMemberWithName('_M_parent')
self.Mroot = self.Mheader.GetChildMemberWithName("_M_parent")
self.data_size = self.data_type.GetByteSize()
self.skip_size = self.Mheader.GetType().GetByteSize()
except:
@@ -596,7 +641,8 @@ class StdMapLikeSynthProvider:
if root_ptr_val == 0:
return 0
count = self.Mimpl.GetChildMemberWithName(
'_M_node_count').GetValueAsUnsigned(0)
"_M_node_count"
).GetValueAsUnsigned(0)
logger >> "I have " + str(count) + " children available"
return count
except:
@@ -605,7 +651,7 @@ class StdMapLikeSynthProvider:
def get_child_index(self, name):
logger = lldb.formatters.Logger.Logger()
try:
return int(name.lstrip('[').rstrip(']'))
return int(name.lstrip("[").rstrip("]"))
except:
return -1
@@ -627,7 +673,8 @@ class StdMapLikeSynthProvider:
offset = offset - 1
# skip all the base stuff and get at the data
return current.CreateChildAtOffset(
'[' + str(index) + ']', self.skip_size, self.data_type)
"[" + str(index) + "]", self.skip_size, self.data_type
)
except:
return None
@@ -667,7 +714,7 @@ class StdMapLikeSynthProvider:
x = node
y = self.parent(x)
max_steps -= 1
while(self.node_ptr_value(x) == self.node_ptr_value(self.right(y))):
while self.node_ptr_value(x) == self.node_ptr_value(self.right(y)):
x = y
y = self.parent(y)
max_steps -= 1
@@ -682,8 +729,10 @@ class StdMapLikeSynthProvider:
def has_children(self):
return True
_list_uses_loop_detector = True
class StdDequeSynthProvider:
def __init__(self, valobj, d):
self.valobj = valobj
@@ -693,7 +742,6 @@ class StdDequeSynthProvider:
self.element_size = -1
self.find_block_size()
def find_block_size(self):
# in order to use the deque we must have the block size, or else
# it's impossible to know what memory addresses are valid
@@ -707,7 +755,7 @@ class StdDequeSynthProvider:
# #define _GLIBCXX_DEQUE_BUF_SIZE 512
#
# return (__size < _GLIBCXX_DEQUE_BUF_SIZE
# ? size_t(_GLIBCXX_DEQUE_BUF_SIZE / __size) : size_t(1));
# ? size_t(_GLIBCXX_DEQUE_BUF_SIZE / __size) : size_t(1));
if self.element_size < 512:
self.block_size = 512 // self.element_size
else:
@@ -723,7 +771,7 @@ class StdDequeSynthProvider:
def get_child_index(self, name):
try:
return int(name.lstrip('[').rstrip(']'))
return int(name.lstrip("[").rstrip("]"))
except:
return -1
@@ -733,13 +781,15 @@ class StdDequeSynthProvider:
if index >= self.num_children():
return None
try:
name = '[' + str(index) + ']'
name = "[" + str(index) + "]"
# We first look for the element in the first subarray,
# which might be incomplete.
if index < self.first_node_size:
# The following statement is valid because self.first_elem is the pointer
# to the first element
return self.first_elem.CreateChildAtOffset(name, index * self.element_size, self.element_type)
return self.first_elem.CreateChildAtOffset(
name, index * self.element_size, self.element_type
)
# Now the rest of the subarrays except for maybe the last one
# are going to be complete, so the final expression is simpler
@@ -747,10 +797,13 @@ class StdDequeSynthProvider:
# We first move to the beginning of the node/subarray were our element is
node = self.start_node.CreateChildAtOffset(
'',
"",
(1 + i) * self.valobj.GetProcess().GetAddressByteSize(),
self.element_type.GetPointerType())
return node.CreateChildAtOffset(name, j * self.element_size, self.element_type)
self.element_type.GetPointerType(),
)
return node.CreateChildAtOffset(
name, j * self.element_size, self.element_type
)
except:
return None
@@ -774,42 +827,66 @@ class StdDequeSynthProvider:
count = 0
impl = self.valobj.GetChildMemberWithName('_M_impl')
impl = self.valobj.GetChildMemberWithName("_M_impl")
# we calculate the size of the first node (i.e. first internal array)
self.start = impl.GetChildMemberWithName('_M_start')
self.start_node = self.start.GetChildMemberWithName('_M_node')
self.start = impl.GetChildMemberWithName("_M_start")
self.start_node = self.start.GetChildMemberWithName("_M_node")
first_node_address = self.start_node.GetValueAsUnsigned(0)
first_node_last_elem = self.start.GetChildMemberWithName('_M_last').GetValueAsUnsigned(0)
self.first_elem = self.start.GetChildMemberWithName('_M_cur')
first_node_last_elem = self.start.GetChildMemberWithName(
"_M_last"
).GetValueAsUnsigned(0)
self.first_elem = self.start.GetChildMemberWithName("_M_cur")
first_node_first_elem = self.first_elem.GetValueAsUnsigned(0)
finish = impl.GetChildMemberWithName("_M_finish")
last_node_address = finish.GetChildMemberWithName(
"_M_node"
).GetValueAsUnsigned(0)
last_node_first_elem = finish.GetChildMemberWithName(
"_M_first"
).GetValueAsUnsigned(0)
last_node_last_elem = finish.GetChildMemberWithName(
"_M_cur"
).GetValueAsUnsigned(0)
finish = impl.GetChildMemberWithName('_M_finish')
last_node_address = finish.GetChildMemberWithName('_M_node').GetValueAsUnsigned(0)
last_node_first_elem = finish.GetChildMemberWithName('_M_first').GetValueAsUnsigned(0)
last_node_last_elem = finish.GetChildMemberWithName('_M_cur').GetValueAsUnsigned(0)
if first_node_first_elem == 0 or first_node_last_elem == 0 or first_node_first_elem > first_node_last_elem:
if (
first_node_first_elem == 0
or first_node_last_elem == 0
or first_node_first_elem > first_node_last_elem
):
return False
if last_node_first_elem == 0 or last_node_last_elem == 0 or last_node_first_elem > last_node_last_elem:
if (
last_node_first_elem == 0
or last_node_last_elem == 0
or last_node_first_elem > last_node_last_elem
):
return False
if last_node_address == first_node_address:
self.first_node_size = (last_node_last_elem - first_node_first_elem) // self.element_size
self.first_node_size = (
last_node_last_elem - first_node_first_elem
) // self.element_size
count += self.first_node_size
else:
self.first_node_size = (first_node_last_elem - first_node_first_elem) // self.element_size
self.first_node_size = (
first_node_last_elem - first_node_first_elem
) // self.element_size
count += self.first_node_size
# we calculate the size of the last node
finish = impl.GetChildMemberWithName('_M_finish')
last_node_address = finish.GetChildMemberWithName('_M_node').GetValueAsUnsigned(0)
count += (last_node_last_elem - last_node_first_elem) // self.element_size
finish = impl.GetChildMemberWithName("_M_finish")
last_node_address = finish.GetChildMemberWithName(
"_M_node"
).GetValueAsUnsigned(0)
count += (
last_node_last_elem - last_node_first_elem
) // self.element_size
# we calculate the size of the intermediate nodes
num_intermediate_nodes = (last_node_address - first_node_address - 1) // self.valobj.GetProcess().GetAddressByteSize()
num_intermediate_nodes = (
last_node_address - first_node_address - 1
) // self.valobj.GetProcess().GetAddressByteSize()
count += self.block_size * num_intermediate_nodes
self.count = count
except:

View File

@@ -15,7 +15,7 @@ import lldb.formatters.Logger
def make_string(F, L):
strval = ''
strval = ""
G = F.GetData().uint8
for X in range(L):
V = G[X]
@@ -24,6 +24,7 @@ def make_string(F, L):
strval = strval + chr(V % 256)
return '"' + strval + '"'
# if we ever care about big-endian, these two functions might need to change
@@ -32,7 +33,8 @@ def is_short_string(value):
def extract_short_size(value):
return ((value >> 1) % 256)
return (value >> 1) % 256
# some of the members of libc++ std::string are anonymous or have internal names that convey
# no external significance - we access them by index since this saves a name lookup that would add
@@ -67,13 +69,12 @@ def stdstring_SummaryProvider(valobj, dict):
error = lldb.SBError()
strval = data.GetString(error, 0)
if error.Fail():
return '<error:' + error.GetCString() + '>'
return "<error:" + error.GetCString() + ">"
else:
return '"' + strval + '"'
class stdvector_SynthProvider:
def __init__(self, valobj, dict):
logger = lldb.formatters.Logger.Logger()
self.valobj = valobj
@@ -97,7 +98,7 @@ class stdvector_SynthProvider:
if start_val >= finish_val:
return 0
num_children = (finish_val - start_val)
num_children = finish_val - start_val
if (num_children % self.data_size) != 0:
return 0
else:
@@ -109,7 +110,7 @@ class stdvector_SynthProvider:
def get_child_index(self, name):
logger = lldb.formatters.Logger.Logger()
try:
return int(name.lstrip('[').rstrip(']'))
return int(name.lstrip("[").rstrip("]"))
except:
return -1
@@ -123,20 +124,22 @@ class stdvector_SynthProvider:
try:
offset = index * self.data_size
return self.start.CreateChildAtOffset(
'[' + str(index) + ']', offset, self.data_type)
"[" + str(index) + "]", offset, self.data_type
)
except:
return None
def update(self):
logger = lldb.formatters.Logger.Logger()
try:
self.start = self.valobj.GetChildMemberWithName('__begin_')
self.finish = self.valobj.GetChildMemberWithName('__end_')
self.start = self.valobj.GetChildMemberWithName("__begin_")
self.finish = self.valobj.GetChildMemberWithName("__end_")
# the purpose of this field is unclear, but it is the only field whose type is clearly T* for a vector<T>
# if this ends up not being correct, we can use the APIs to get at
# template arguments
data_type_finder = self.valobj.GetChildMemberWithName(
'__end_cap_').GetChildMemberWithName('__first_')
"__end_cap_"
).GetChildMemberWithName("__first_")
self.data_type = data_type_finder.GetType().GetPointeeType()
self.data_size = self.data_type.GetByteSize()
except:
@@ -145,28 +148,28 @@ class stdvector_SynthProvider:
def has_children(self):
return True
# Just an example: the actual summary is produced by a summary string:
# size=${svar%#}
def stdvector_SummaryProvider(valobj, dict):
prov = stdvector_SynthProvider(valobj, None)
return 'size=' + str(prov.num_children())
return "size=" + str(prov.num_children())
class stdlist_entry:
def __init__(self, entry):
logger = lldb.formatters.Logger.Logger()
self.entry = entry
def _next_impl(self):
logger = lldb.formatters.Logger.Logger()
return stdlist_entry(self.entry.GetChildMemberWithName('__next_'))
return stdlist_entry(self.entry.GetChildMemberWithName("__next_"))
def _prev_impl(self):
logger = lldb.formatters.Logger.Logger()
return stdlist_entry(self.entry.GetChildMemberWithName('__prev_'))
return stdlist_entry(self.entry.GetChildMemberWithName("__prev_"))
def _value_impl(self):
logger = lldb.formatters.Logger.Logger()
@@ -187,7 +190,6 @@ class stdlist_entry:
class stdlist_iterator:
def increment_node(self, node):
logger = lldb.formatters.Logger.Logger()
if node.is_null:
@@ -206,7 +208,7 @@ class stdlist_iterator:
def next(self):
logger = lldb.formatters.Logger.Logger()
node = self.increment_node(self.node)
if node is not None and node.sbvalue.IsValid() and not(node.is_null):
if node is not None and node.sbvalue.IsValid() and not (node.is_null):
self.node = node
return self.value()
else:
@@ -227,7 +229,6 @@ class stdlist_iterator:
class stdlist_SynthProvider:
def __init__(self, valobj, dict):
logger = lldb.formatters.Logger.Logger()
self.valobj = valobj
@@ -235,7 +236,7 @@ class stdlist_SynthProvider:
def next_node(self, node):
logger = lldb.formatters.Logger.Logger()
return node.GetChildMemberWithName('__next_')
return node.GetChildMemberWithName("__next_")
def value(self, node):
logger = lldb.formatters.Logger.Logger()
@@ -293,14 +294,14 @@ class stdlist_SynthProvider:
current = current.next
if size > _list_capping_size:
return _list_capping_size
return (size - 1)
return size - 1
except:
return 0
def get_child_index(self, name):
logger = lldb.formatters.Logger.Logger()
try:
return int(name.lstrip('[').rstrip(']'))
return int(name.lstrip("[").rstrip("]"))
except:
return -1
@@ -317,10 +318,11 @@ class stdlist_SynthProvider:
# we do not return __value_ because then all our children would be named __value_
# we need to make a copy of __value__ with the right name -
# unfortunate
obj = current.GetChildMemberWithName('__value_')
obj = current.GetChildMemberWithName("__value_")
obj_data = obj.GetData()
return self.valobj.CreateValueFromData(
'[' + str(index) + ']', obj_data, self.data_type)
"[" + str(index) + "]", obj_data, self.data_type
)
except:
return None
@@ -339,10 +341,10 @@ class stdlist_SynthProvider:
logger = lldb.formatters.Logger.Logger()
self.count = None
try:
impl = self.valobj.GetChildMemberWithName('__end_')
impl = self.valobj.GetChildMemberWithName("__end_")
self.node_address = self.valobj.AddressOf().GetValueAsUnsigned(0)
self.head = impl.GetChildMemberWithName('__next_')
self.tail = impl.GetChildMemberWithName('__prev_')
self.head = impl.GetChildMemberWithName("__next_")
self.tail = impl.GetChildMemberWithName("__prev_")
self.data_type = self.extract_type()
self.data_size = self.data_type.GetByteSize()
except:
@@ -356,28 +358,25 @@ class stdlist_SynthProvider:
# size=${svar%#}
def stdlist_SummaryProvider(valobj, dict):
prov = stdlist_SynthProvider(valobj, None)
return 'size=' + str(prov.num_children())
return "size=" + str(prov.num_children())
# a tree node - this class makes the syntax in the actual iterator nicer
# to read and maintain
class stdmap_iterator_node:
def _left_impl(self):
logger = lldb.formatters.Logger.Logger()
return stdmap_iterator_node(
self.node.GetChildMemberWithName("__left_"))
return stdmap_iterator_node(self.node.GetChildMemberWithName("__left_"))
def _right_impl(self):
logger = lldb.formatters.Logger.Logger()
return stdmap_iterator_node(
self.node.GetChildMemberWithName("__right_"))
return stdmap_iterator_node(self.node.GetChildMemberWithName("__right_"))
def _parent_impl(self):
logger = lldb.formatters.Logger.Logger()
return stdmap_iterator_node(
self.node.GetChildMemberWithName("__parent_"))
return stdmap_iterator_node(self.node.GetChildMemberWithName("__parent_"))
def _value_impl(self):
logger = lldb.formatters.Logger.Logger()
@@ -402,17 +401,17 @@ class stdmap_iterator_node:
is_null = property(_null_impl, None)
sbvalue = property(_sbvalue_impl, None)
# a Python implementation of the tree iterator used by libc++
class stdmap_iterator:
def tree_min(self, x):
logger = lldb.formatters.Logger.Logger()
steps = 0
if x.is_null:
return None
while (not x.left.is_null):
while not x.left.is_null:
x = x.left
steps += 1
if steps > self.max_count:
@@ -424,7 +423,7 @@ class stdmap_iterator:
logger = lldb.formatters.Logger.Logger()
if x.is_null:
return None
while (not x.right.is_null):
while not x.right.is_null:
x = x.right
return x
@@ -441,7 +440,7 @@ class stdmap_iterator:
if not node.right.is_null:
return self.tree_min(node.right)
steps = 0
while (not self.tree_is_left_child(node)):
while not self.tree_is_left_child(node):
steps += 1
if steps > self.max_count:
logger >> "Returning None - we overflowed"
@@ -462,7 +461,7 @@ class stdmap_iterator:
def next(self):
logger = lldb.formatters.Logger.Logger()
node = self.increment_node(self.node)
if node is not None and node.sbvalue.IsValid() and not(node.is_null):
if node is not None and node.sbvalue.IsValid() and not (node.is_null):
self.node = node
return self.value()
else:
@@ -484,7 +483,6 @@ class stdmap_iterator:
class stdmap_SynthProvider:
def __init__(self, valobj, dict):
logger = lldb.formatters.Logger.Logger()
self.valobj = valobj
@@ -499,8 +497,8 @@ class stdmap_SynthProvider:
# if this gets set to True, then we will merrily return None for
# any child from that moment on
self.garbage = False
self.tree = self.valobj.GetChildMemberWithName('__tree_')
self.root_node = self.tree.GetChildMemberWithName('__begin_node_')
self.tree = self.valobj.GetChildMemberWithName("__tree_")
self.root_node = self.tree.GetChildMemberWithName("__begin_node_")
# this data is either lazily-calculated, or cannot be inferred at this moment
# we still need to mark it as None, meaning "please set me ASAP"
self.data_type = None
@@ -521,8 +519,12 @@ class stdmap_SynthProvider:
def num_children_impl(self):
logger = lldb.formatters.Logger.Logger()
try:
return self.valobj.GetChildMemberWithName('__tree_').GetChildMemberWithName(
'__pair3_').GetChildMemberWithName('__first_').GetValueAsUnsigned()
return (
self.valobj.GetChildMemberWithName("__tree_")
.GetChildMemberWithName("__pair3_")
.GetChildMemberWithName("__first_")
.GetValueAsUnsigned()
)
except:
return 0
@@ -535,10 +537,10 @@ class stdmap_SynthProvider:
if self.num_children() == 0:
return False
deref = self.root_node.Dereference()
if not(deref.IsValid()):
if not (deref.IsValid()):
return False
value = deref.GetChildMemberWithName('__value_')
if not(value.IsValid()):
value = deref.GetChildMemberWithName("__value_")
if not (value.IsValid()):
return False
self.data_type = value.GetType()
self.data_size = self.data_type.GetByteSize()
@@ -554,15 +556,15 @@ class stdmap_SynthProvider:
fields_count = node_type.GetNumberOfFields()
for i in range(fields_count):
field = node_type.GetFieldAtIndex(i)
if field.GetName() == '__value_':
if field.GetName() == "__value_":
self.skip_size = field.GetOffsetInBytes()
break
return (self.skip_size is not None)
return self.skip_size is not None
def get_child_index(self, name):
logger = lldb.formatters.Logger.Logger()
try:
return int(name.lstrip('[').rstrip(']'))
return int(name.lstrip("[").rstrip("]"))
except:
return -1
@@ -577,60 +579,70 @@ class stdmap_SynthProvider:
logger >> "Returning None since this tree is garbage"
return None
try:
iterator = stdmap_iterator(
self.root_node, max_count=self.num_children())
iterator = stdmap_iterator(self.root_node, max_count=self.num_children())
# the debug info for libc++ std::map is such that __begin_node_ has a very nice and useful type
# out of which we can grab the information we need - every other node has a less informative
# type which omits all value information and only contains housekeeping information for the RB tree
# hence, we need to know if we are at a node != 0, so that we can
# still get at the data
need_to_skip = (index > 0)
need_to_skip = index > 0
current = iterator.advance(index)
if current is None:
logger >> "Tree is garbage - returning None"
self.garbage = True
return None
if self.get_data_type():
if not(need_to_skip):
if not (need_to_skip):
current = current.Dereference()
obj = current.GetChildMemberWithName('__value_')
obj = current.GetChildMemberWithName("__value_")
obj_data = obj.GetData()
# make sure we have a valid offset for the next items
self.get_value_offset(current)
# we do not return __value_ because then we would end up with a child named
# __value_ instead of [0]
return self.valobj.CreateValueFromData(
'[' + str(index) + ']', obj_data, self.data_type)
"[" + str(index) + "]", obj_data, self.data_type
)
else:
# FIXME we need to have accessed item 0 before accessing
# any other item!
if self.skip_size is None:
logger >> "You asked for item > 0 before asking for item == 0, I will fetch 0 now then retry"
(
logger
>> "You asked for item > 0 before asking for item == 0, I will fetch 0 now then retry"
)
if self.get_child_at_index(0):
return self.get_child_at_index(index)
else:
logger >> "item == 0 could not be found. sorry, nothing can be done here."
(
logger
>> "item == 0 could not be found. sorry, nothing can be done here."
)
return None
return current.CreateChildAtOffset(
'[' + str(index) + ']', self.skip_size, self.data_type)
"[" + str(index) + "]", self.skip_size, self.data_type
)
else:
logger >> "Unable to infer data-type - returning None (should mark tree as garbage here?)"
(
logger
>> "Unable to infer data-type - returning None (should mark tree as garbage here?)"
)
return None
except Exception as err:
logger >> "Hit an exception: " + str(err)
return None
# Just an example: the actual summary is produced by a summary string:
# size=${svar%#}
def stdmap_SummaryProvider(valobj, dict):
prov = stdmap_SynthProvider(valobj, None)
return 'size=' + str(prov.num_children())
return "size=" + str(prov.num_children())
class stddeque_SynthProvider:
def __init__(self, valobj, d):
logger = lldb.formatters.Logger.Logger()
logger.write("init")
@@ -643,8 +655,8 @@ class stddeque_SynthProvider:
self.block_size = -1
self.element_size = -1
logger.write(
"block_size=%d, element_size=%d" %
(self.block_size, self.element_size))
"block_size=%d, element_size=%d" % (self.block_size, self.element_size)
)
def find_block_size(self):
# in order to use the deque we must have the block size, or else
@@ -678,7 +690,7 @@ class stddeque_SynthProvider:
def get_child_index(self, name):
logger = lldb.formatters.Logger.Logger()
try:
return int(name.lstrip('[').rstrip(']'))
return int(name.lstrip("[").rstrip("]"))
except:
return -1
@@ -693,8 +705,9 @@ class stddeque_SynthProvider:
i, j = divmod(self.start + index, self.block_size)
return self.first.CreateValueFromExpression(
'[' + str(index) + ']', '*(*(%s + %d) + %d)' %
(self.map_begin.get_expr_path(), i, j))
"[" + str(index) + "]",
"*(*(%s + %d) + %d)" % (self.map_begin.get_expr_path(), i, j),
)
except:
return None
@@ -722,30 +735,28 @@ class stddeque_SynthProvider:
# one, and the 'size' element gives the number of elements
# in the deque.
count = self._get_value_of_compressed_pair(
self.valobj.GetChildMemberWithName('__size_'))
self.valobj.GetChildMemberWithName("__size_")
)
# give up now if we cant access memory reliably
if self.block_size < 0:
logger.write("block_size < 0")
return
map_ = self.valobj.GetChildMemberWithName('__map_')
start = self.valobj.GetChildMemberWithName(
'__start_').GetValueAsUnsigned(0)
first = map_.GetChildMemberWithName('__first_')
map_ = self.valobj.GetChildMemberWithName("__map_")
start = self.valobj.GetChildMemberWithName("__start_").GetValueAsUnsigned(0)
first = map_.GetChildMemberWithName("__first_")
map_first = first.GetValueAsUnsigned(0)
self.map_begin = map_.GetChildMemberWithName(
'__begin_')
self.map_begin = map_.GetChildMemberWithName("__begin_")
map_begin = self.map_begin.GetValueAsUnsigned(0)
map_end = map_.GetChildMemberWithName(
'__end_').GetValueAsUnsigned(0)
map_end = map_.GetChildMemberWithName("__end_").GetValueAsUnsigned(0)
map_endcap = self._get_value_of_compressed_pair(
map_.GetChildMemberWithName( '__end_cap_'))
map_.GetChildMemberWithName("__end_cap_")
)
# check consistency
if not map_first <= map_begin <= map_end <= map_endcap:
logger.write("map pointers are not monotonic")
return
total_rows, junk = divmod(
map_endcap - map_first, self.pointer_size)
total_rows, junk = divmod(map_endcap - map_first, self.pointer_size)
if junk:
logger.write("endcap-first doesnt align correctly")
return
@@ -759,8 +770,8 @@ class stddeque_SynthProvider:
return
logger.write(
"update success: count=%r, start=%r, first=%r" %
(count, start, first))
"update success: count=%r, start=%r, first=%r" % (count, start, first)
)
# if consistent, save all we really need:
self.count = count
self.start = start
@@ -774,12 +785,11 @@ class stddeque_SynthProvider:
class stdsharedptr_SynthProvider:
def __init__(self, valobj, d):
logger = lldb.formatters.Logger.Logger()
logger.write("init")
self.valobj = valobj
#self.element_ptr_type = self.valobj.GetType().GetTemplateArgumentType(0).GetPointerType()
# self.element_ptr_type = self.valobj.GetType().GetTemplateArgumentType(0).GetPointerType()
self.ptr = None
self.cntrl = None
process = valobj.GetProcess()
@@ -809,32 +819,50 @@ class stdsharedptr_SynthProvider:
if self.cntrl is None:
count = 0
else:
count = 1 + \
self.cntrl.GetChildMemberWithName('__shared_owners_').GetValueAsSigned()
count = (
1
+ self.cntrl.GetChildMemberWithName(
"__shared_owners_"
).GetValueAsSigned()
)
return self.valobj.CreateValueFromData(
"count", lldb.SBData.CreateDataFromUInt64Array(
self.endianness, self.pointer_size, [count]), self.count_type)
"count",
lldb.SBData.CreateDataFromUInt64Array(
self.endianness, self.pointer_size, [count]
),
self.count_type,
)
if index == 2:
if self.cntrl is None:
count = 0
else:
count = 1 + \
self.cntrl.GetChildMemberWithName('__shared_weak_owners_').GetValueAsSigned()
count = (
1
+ self.cntrl.GetChildMemberWithName(
"__shared_weak_owners_"
).GetValueAsSigned()
)
return self.valobj.CreateValueFromData(
"weak_count", lldb.SBData.CreateDataFromUInt64Array(
self.endianness, self.pointer_size, [count]), self.count_type)
"weak_count",
lldb.SBData.CreateDataFromUInt64Array(
self.endianness, self.pointer_size, [count]
),
self.count_type,
)
return None
def update(self):
logger = lldb.formatters.Logger.Logger()
self.ptr = self.valobj.GetChildMemberWithName(
'__ptr_') # .Cast(self.element_ptr_type)
cntrl = self.valobj.GetChildMemberWithName('__cntrl_')
"__ptr_"
) # .Cast(self.element_ptr_type)
cntrl = self.valobj.GetChildMemberWithName("__cntrl_")
if cntrl.GetValueAsUnsigned(0):
self.cntrl = cntrl.Dereference()
else:
self.cntrl = None
# we can use two different categories for old and new formatters - type names are different enough that we should make no confusion
# talking with libc++ developer: "std::__1::class_name is set in stone
# until we decide to change the ABI. That shouldn't happen within a 5 year
@@ -843,29 +871,41 @@ class stdsharedptr_SynthProvider:
def __lldb_init_module(debugger, dict):
debugger.HandleCommand(
'type summary add -F libcxx.stdstring_SummaryProvider "std::__1::string" -w libcxx')
'type summary add -F libcxx.stdstring_SummaryProvider "std::__1::string" -w libcxx'
)
debugger.HandleCommand(
'type summary add -F libcxx.stdstring_SummaryProvider "std::__1::basic_string<char, class std::__1::char_traits<char>, class std::__1::allocator<char> >" -w libcxx')
'type summary add -F libcxx.stdstring_SummaryProvider "std::__1::basic_string<char, class std::__1::char_traits<char>, class std::__1::allocator<char> >" -w libcxx'
)
debugger.HandleCommand(
'type synthetic add -l libcxx.stdvector_SynthProvider -x "^(std::__1::)vector<.+>$" -w libcxx')
'type synthetic add -l libcxx.stdvector_SynthProvider -x "^(std::__1::)vector<.+>$" -w libcxx'
)
debugger.HandleCommand(
'type summary add -F libcxx.stdvector_SummaryProvider -e -x "^(std::__1::)vector<.+>$" -w libcxx')
'type summary add -F libcxx.stdvector_SummaryProvider -e -x "^(std::__1::)vector<.+>$" -w libcxx'
)
debugger.HandleCommand(
'type synthetic add -l libcxx.stdlist_SynthProvider -x "^(std::__1::)list<.+>$" -w libcxx')
'type synthetic add -l libcxx.stdlist_SynthProvider -x "^(std::__1::)list<.+>$" -w libcxx'
)
debugger.HandleCommand(
'type summary add -F libcxx.stdlist_SummaryProvider -e -x "^(std::__1::)list<.+>$" -w libcxx')
'type summary add -F libcxx.stdlist_SummaryProvider -e -x "^(std::__1::)list<.+>$" -w libcxx'
)
debugger.HandleCommand(
'type synthetic add -l libcxx.stdmap_SynthProvider -x "^(std::__1::)map<.+> >$" -w libcxx')
'type synthetic add -l libcxx.stdmap_SynthProvider -x "^(std::__1::)map<.+> >$" -w libcxx'
)
debugger.HandleCommand(
'type summary add -F libcxx.stdmap_SummaryProvider -e -x "^(std::__1::)map<.+> >$" -w libcxx')
'type summary add -F libcxx.stdmap_SummaryProvider -e -x "^(std::__1::)map<.+> >$" -w libcxx'
)
debugger.HandleCommand("type category enable libcxx")
debugger.HandleCommand(
'type synthetic add -l libcxx.stddeque_SynthProvider -x "^(std::__1::)deque<.+>$" -w libcxx')
'type synthetic add -l libcxx.stddeque_SynthProvider -x "^(std::__1::)deque<.+>$" -w libcxx'
)
debugger.HandleCommand(
'type synthetic add -l libcxx.stdsharedptr_SynthProvider -x "^(std::__1::)shared_ptr<.+>$" -w libcxx')
'type synthetic add -l libcxx.stdsharedptr_SynthProvider -x "^(std::__1::)shared_ptr<.+>$" -w libcxx'
)
# turns out the structs look the same, so weak_ptr can be handled the same!
debugger.HandleCommand(
'type synthetic add -l libcxx.stdsharedptr_SynthProvider -x "^(std::__1::)weak_ptr<.+>$" -w libcxx')
'type synthetic add -l libcxx.stdsharedptr_SynthProvider -x "^(std::__1::)weak_ptr<.+>$" -w libcxx'
)
_map_capping_size = 255
_list_capping_size = 255

View File

@@ -7,57 +7,60 @@
import sys
def is_message_type(t, internal_dict):
for base in t.get_bases_array():
if base.GetName() == "Message":
return True
return False
for base in t.get_bases_array():
if base.GetName() == "Message":
return True
return False
def message_summary(value, internal_dict):
# Could have used a summary string as well. All the work is done by the child
# provider.
return "Message"
# Could have used a summary string as well. All the work is done by the child
# provider.
return "Message"
class MessageChildProvider:
def __init__(self, value, internal_dict):
self.value = value
self.synthetic_children = self._analyze_children(value)
def __init__(self, value, internal_dict):
self.value = value
self.synthetic_children = self._analyze_children(value)
def has_children(self):
return self.num_children() > 0
def has_children(self):
return self.num_children() > 0
def num_children(self):
return len(self.synthetic_children)
def num_children(self):
return len(self.synthetic_children)
def get_child_index(self, name):
for index, child in enumerate(self.synthetic_children):
if child.GetName() == name:
return index
return None
def get_child_index(self, name):
for index, child in enumerate(self.synthetic_children):
if child.GetName() == name:
return index
return None
def get_child_at_index(self, index):
return self.synthetic_children[index]
def get_child_at_index(self, index):
return self.synthetic_children[index]
def _rename_sbvalue(self, value):
# We want to display the field with its original name without a trailing
# underscore. So we create a new SBValue with the same type and address but
# a different name.
name = value.GetName()
assert name.endswith("_")
new_name = name[:-1]
return value.CreateValueFromAddress(new_name, value.GetLoadAddress(),
value.GetType())
def _analyze_children(self, value):
result = []
for i in range(value.GetNumChildren()):
child = value.GetChildAtIndex(i)
child_name = child.GetName()
if child_name.startswith("_"):
continue # Internal field, skip
# Normal field. Check presence bit.
presence_bit = value.GetChildMemberWithName("_has_" + child_name)
if presence_bit.GetValueAsUnsigned() != 0:
result.append(self._rename_sbvalue(child))
return result
def _rename_sbvalue(self, value):
# We want to display the field with its original name without a trailing
# underscore. So we create a new SBValue with the same type and address but
# a different name.
name = value.GetName()
assert name.endswith("_")
new_name = name[:-1]
return value.CreateValueFromAddress(
new_name, value.GetLoadAddress(), value.GetType()
)
def _analyze_children(self, value):
result = []
for i in range(value.GetNumChildren()):
child = value.GetChildAtIndex(i)
child_name = child.GetName()
if child_name.startswith("_"):
continue # Internal field, skip
# Normal field. Check presence bit.
presence_bit = value.GetChildMemberWithName("_has_" + child_name)
if presence_bit.GetValueAsUnsigned() != 0:
result.append(self._rename_sbvalue(child))
return result

View File

@@ -4,7 +4,6 @@ _map_capping_size = 255
class libcxx_hash_table_SynthProvider:
def __init__(self, valobj, dict):
self.valobj = valobj
self.num_elements = None
@@ -34,23 +33,38 @@ class libcxx_hash_table_SynthProvider:
#
# We will calculate other values about the map because they will be useful for the summary.
#
table = self.valobj.GetChildMemberWithName('__table_')
table = self.valobj.GetChildMemberWithName("__table_")
bl_ptr = table.GetChildMemberWithName(
'__bucket_list_').GetChildMemberWithName('__ptr_')
"__bucket_list_"
).GetChildMemberWithName("__ptr_")
self.bucket_array_ptr = bl_ptr.GetChildMemberWithName(
'__first_').GetValueAsUnsigned(0)
self.bucket_count = bl_ptr.GetChildMemberWithName('__second_').GetChildMemberWithName(
'__data_').GetChildMemberWithName('__first_').GetValueAsUnsigned(0)
"__first_"
).GetValueAsUnsigned(0)
self.bucket_count = (
bl_ptr.GetChildMemberWithName("__second_")
.GetChildMemberWithName("__data_")
.GetChildMemberWithName("__first_")
.GetValueAsUnsigned(0)
)
logger >> "Bucket count = %r" % self.bucket_count
self.begin_ptr = table.GetChildMemberWithName('__p1_').GetChildMemberWithName(
'__first_').GetChildMemberWithName('__next_')
self.begin_ptr = (
table.GetChildMemberWithName("__p1_")
.GetChildMemberWithName("__first_")
.GetChildMemberWithName("__next_")
)
self.num_elements = table.GetChildMemberWithName(
'__p2_').GetChildMemberWithName('__first_').GetValueAsUnsigned(0)
self.max_load_factor = table.GetChildMemberWithName(
'__p3_').GetChildMemberWithName('__first_').GetValueAsUnsigned(0)
self.num_elements = (
table.GetChildMemberWithName("__p2_")
.GetChildMemberWithName("__first_")
.GetValueAsUnsigned(0)
)
self.max_load_factor = (
table.GetChildMemberWithName("__p3_")
.GetChildMemberWithName("__first_")
.GetValueAsUnsigned(0)
)
logger >> "Num elements = %r" % self.num_elements
# save the pointers as we get them
@@ -78,7 +92,7 @@ class libcxx_hash_table_SynthProvider:
def get_child_index(self, name):
logger = lldb.formatters.Logger.Logger()
try:
return int(name.lstrip('[').rstrip(']'))
return int(name.lstrip("[").rstrip("]"))
except:
return -1
@@ -91,8 +105,7 @@ class libcxx_hash_table_SynthProvider:
return None
# extend
logger >> " : cache size starts with %d elements" % len(
self.elements_cache)
logger >> " : cache size starts with %d elements" % len(self.elements_cache)
while index >= len(self.elements_cache):
# if we hit the end before we get the index, give up:
if not self.next_element:
@@ -101,24 +114,23 @@ class libcxx_hash_table_SynthProvider:
node = self.next_element.Dereference()
value = node.GetChildMemberWithName('__value_')
hash_value = node.GetChildMemberWithName(
'__hash_').GetValueAsUnsigned()
value = node.GetChildMemberWithName("__value_")
hash_value = node.GetChildMemberWithName("__hash_").GetValueAsUnsigned()
self.elements_cache.append((value, hash_value))
self.next_element = node.GetChildMemberWithName('__next_')
self.next_element = node.GetChildMemberWithName("__next_")
if not self.next_element.GetValueAsUnsigned(0):
self.next_element = None
# hit the index! so we have the value
logger >> " : cache size ends with %d elements" % len(
self.elements_cache)
logger >> " : cache size ends with %d elements" % len(self.elements_cache)
value, hash_value = self.elements_cache[index]
return self.valobj.CreateValueFromData(
'[%d] <hash %d>' %
(index, hash_value), value.GetData(), value.GetType())
"[%d] <hash %d>" % (index, hash_value), value.GetData(), value.GetType()
)
def __lldb_init_module(debugger, dict):
debugger.HandleCommand(
'type synthetic add -l unordered_multi.libcxx_hash_table_SynthProvider -x "^(std::__1::)unordered_(multi)?(map|set)<.+> >$" -w libcxx')
'type synthetic add -l unordered_multi.libcxx_hash_table_SynthProvider -x "^(std::__1::)unordered_(multi)?(map|set)<.+> >$" -w libcxx'
)

View File

@@ -7,10 +7,11 @@ import sys
def find_lldb_root():
lldb_root = os.path.realpath(
os.path.dirname(inspect.getfile(inspect.currentframe())))
os.path.dirname(inspect.getfile(inspect.currentframe()))
)
while True:
parent = os.path.dirname(lldb_root)
if parent == lldb_root: # dirname('/') == '/'
if parent == lldb_root: # dirname('/') == '/'
raise Exception("use_lldb_suite_root.py not found")
lldb_root = parent
@@ -18,6 +19,7 @@ def find_lldb_root():
if os.path.isfile(test_path):
return lldb_root
# lldbsuite.lldb_root refers to the root of the git/svn source checkout
lldb_root = find_lldb_root()

View File

@@ -19,24 +19,22 @@ def _encoded_write(old_write, encoding):
s = s.decode(encoding, "replace")
# Filter unreadable characters, Python 3 is stricter than python 2 about them.
import re
s = re.sub(r'[^\x00-\x7f]',r' ',s)
s = re.sub(r"[^\x00-\x7f]", r" ", s)
return old_write(s)
return impl
'''
"""
Create a Text I/O file object that can be written to with either unicode strings
or byte strings.
'''
"""
def open(
file,
encoding,
mode='r',
buffering=-1,
errors=None,
newline=None,
closefd=True):
file, encoding, mode="r", buffering=-1, errors=None, newline=None, closefd=True
):
wrapped_file = io.open(
file,
mode=mode,
@@ -44,7 +42,8 @@ def open(
encoding=encoding,
errors=errors,
newline=newline,
closefd=closefd)
new_write = _encoded_write(getattr(wrapped_file, 'write'), encoding)
setattr(wrapped_file, 'write', new_write)
closefd=closefd,
)
new_write = _encoded_write(getattr(wrapped_file, "write"), encoding)
setattr(wrapped_file, "write", new_write)
return wrapped_file

View File

@@ -1,15 +1,13 @@
import inspect
def requires_self(func):
func_argc = len(inspect.getfullargspec(func).args)
if func_argc == 0 or (
getattr(
func,
'im_self',
None) is not None) or (
hasattr(
func,
'__self__')):
if (
func_argc == 0
or (getattr(func, "im_self", None) is not None)
or (hasattr(func, "__self__"))
):
return False
else:
return True

View File

@@ -22,8 +22,7 @@ def is_compiler_clang_with_gmodules(compiler_path):
else:
# Check the compiler help for the -gmodules option.
clang_help = os.popen("%s --help" % compiler_path).read()
return GMODULES_HELP_REGEX.search(
clang_help, re.DOTALL) is not None
return GMODULES_HELP_REGEX.search(clang_help, re.DOTALL) is not None
GMODULES_SUPPORT_MAP[compiler_path] = _gmodules_supported_internal()
return GMODULES_SUPPORT_MAP[compiler_path]

View File

@@ -2,15 +2,16 @@ import binascii
import shlex
import subprocess
def get_command_output(command):
try:
return subprocess.check_output(
command,
shell=True,
universal_newlines=True).rstrip()
command, shell=True, universal_newlines=True
).rstrip()
except subprocess.CalledProcessError as e:
return e.output
def bitcast_to_string(b: bytes) -> str:
"""
Take a bytes object and return a string. The returned string contains the
@@ -19,6 +20,7 @@ def bitcast_to_string(b: bytes) -> str:
"""
return b.decode("latin1")
def bitcast_to_bytes(s: str) -> bytes:
"""
Take a string and return a bytes object. The returned object contains the
@@ -27,14 +29,17 @@ def bitcast_to_bytes(s: str) -> bytes:
"""
return s.encode("latin1")
def unhexlify(hexstr):
"""Hex-decode a string. The result is always a string."""
return bitcast_to_string(binascii.unhexlify(hexstr))
def hexlify(data):
"""Hex-encode string data. The result if always a string."""
return bitcast_to_string(binascii.hexlify(bitcast_to_bytes(data)))
# TODO: Replace this with `shlex.join` when minimum Python version is >= 3.8
def join_for_shell(split_command):
return " ".join([shlex.quote(part) for part in split_command])

View File

@@ -23,36 +23,42 @@ from optparse import OptionParser
benches = [
# Measure startup delays creating a target, setting a breakpoint, and run
# to breakpoint stop.
'./dotest.py -v +b %E %X -n -p TestStartupDelays.py',
"./dotest.py -v +b %E %X -n -p TestStartupDelays.py",
# Measure 'frame variable' response after stopping at a breakpoint.
'./dotest.py -v +b %E %X -n -p TestFrameVariableResponse.py',
"./dotest.py -v +b %E %X -n -p TestFrameVariableResponse.py",
# Measure stepping speed after stopping at a breakpoint.
'./dotest.py -v +b %E %X -n -p TestSteppingSpeed.py',
"./dotest.py -v +b %E %X -n -p TestSteppingSpeed.py",
# Measure expression cmd response with a simple custom executable program.
'./dotest.py +b -n -p TestExpressionCmd.py',
"./dotest.py +b -n -p TestExpressionCmd.py",
# Attach to a spawned process then run disassembly benchmarks.
'./dotest.py -v +b -n %E -p TestDoAttachThenDisassembly.py'
"./dotest.py -v +b -n %E -p TestDoAttachThenDisassembly.py",
]
def main():
"""Read the items from 'benches' and run the command line one by one."""
parser = OptionParser(usage="""\
parser = OptionParser(
usage="""\
%prog [options]
Run the standard benchmarks defined in the list named 'benches'.\
""")
parser.add_option('-e', '--executable',
type='string', action='store',
dest='exe',
help='The target program launched by lldb.')
parser.add_option('-x', '--breakpoint-spec',
type='string', action='store',
dest='break_spec',
help='The lldb breakpoint spec for the target program.')
"""
)
parser.add_option(
"-e",
"--executable",
type="string",
action="store",
dest="exe",
help="The target program launched by lldb.",
)
parser.add_option(
"-x",
"--breakpoint-spec",
type="string",
action="store",
dest="break_spec",
help="The lldb breakpoint spec for the target program.",
)
# Parses the options, if any.
opts, args = parser.parse_args()
@@ -60,14 +66,15 @@ Run the standard benchmarks defined in the list named 'benches'.\
print("Starting bench runner....")
for item in benches:
command = item.replace('%E',
'-e "%s"' % opts.exe if opts.exe else '')
command = command.replace('%X', '-x "%s"' %
opts.break_spec if opts.break_spec else '')
command = item.replace("%E", '-e "%s"' % opts.exe if opts.exe else "")
command = command.replace(
"%X", '-x "%s"' % opts.break_spec if opts.break_spec else ""
)
print("Running %s" % (command))
os.system(command)
print("Bench runner done.")
if __name__ == '__main__':
if __name__ == "__main__":
main()

View File

@@ -7,10 +7,12 @@ factory method below hands out builders based on the given platform.
def get_builder(platform):
"""Returns a Builder instance for the given platform."""
if platform == 'darwin':
from .darwin import BuilderDarwin
return BuilderDarwin()
"""Returns a Builder instance for the given platform."""
if platform == "darwin":
from .darwin import BuilderDarwin
from .builder import Builder
return Builder()
return BuilderDarwin()
from .builder import Builder
return Builder()

View File

@@ -47,19 +47,31 @@ class Builder:
# Construct the base make invocation.
lldb_test = os.environ["LLDB_TEST"]
if not (lldb_test and configuration.test_build_dir and test_subdir
and test_name and (not os.path.isabs(test_subdir))):
if not (
lldb_test
and configuration.test_build_dir
and test_subdir
and test_name
and (not os.path.isabs(test_subdir))
):
raise Exception("Could not derive test directories")
build_dir = os.path.join(configuration.test_build_dir, test_subdir,
test_name)
build_dir = os.path.join(configuration.test_build_dir, test_subdir, test_name)
src_dir = os.path.join(configuration.test_src_root, test_subdir)
# This is a bit of a hack to make inline testcases work.
makefile = os.path.join(src_dir, "Makefile")
if not os.path.isfile(makefile):
makefile = os.path.join(build_dir, "Makefile")
return [
make, "VPATH=" + src_dir, "-C", build_dir, "-I", src_dir, "-I",
os.path.join(lldb_test, "make"), "-f", makefile
make,
"VPATH=" + src_dir,
"-C",
build_dir,
"-I",
src_dir,
"-I",
os.path.join(lldb_test, "make"),
"-f",
makefile,
]
def getCmdLine(self, d):
@@ -76,7 +88,7 @@ class Builder:
append_vars = ["CFLAGS", "CFLAGS_EXTRAS", "LD_EXTRAS"]
if k in append_vars and k in os.environ:
v = os.environ[k] + " " + v
return '%s=%s' % (k, v)
return "%s=%s" % (k, v)
cmdline = [setOrAppendVariable(k, v) for k, v in list(d.items())]
@@ -98,7 +110,7 @@ class Builder:
if not cc and configuration.compiler:
cc = configuration.compiler
if cc:
return ["CC=\"%s\"" % cc]
return ['CC="%s"' % cc]
return []
def getSDKRootSpec(self):
@@ -116,17 +128,23 @@ class Builder:
module cache used for the make system.
"""
if configuration.clang_module_cache_dir:
return ["CLANG_MODULE_CACHE_DIR={}".format(
configuration.clang_module_cache_dir)]
return [
"CLANG_MODULE_CACHE_DIR={}".format(configuration.clang_module_cache_dir)
]
return []
def getLibCxxArgs(self):
if configuration.libcxx_include_dir and configuration.libcxx_library_dir:
libcpp_args = ["LIBCPP_INCLUDE_DIR={}".format(configuration.libcxx_include_dir),
"LIBCPP_LIBRARY_DIR={}".format(configuration.libcxx_library_dir)]
libcpp_args = [
"LIBCPP_INCLUDE_DIR={}".format(configuration.libcxx_include_dir),
"LIBCPP_LIBRARY_DIR={}".format(configuration.libcxx_library_dir),
]
if configuration.libcxx_include_target_dir:
libcpp_args.append("LIBCPP_INCLUDE_TARGET_DIR={}".format(
configuration.libcxx_include_target_dir))
libcpp_args.append(
"LIBCPP_INCLUDE_TARGET_DIR={}".format(
configuration.libcxx_include_target_dir
)
)
return libcpp_args
return []
@@ -141,19 +159,34 @@ class Builder:
return ["MAKE_DSYM=NO", "MAKE_GMODULES=YES"]
return None
def getBuildCommand(self, debug_info, architecture=None, compiler=None,
dictionary=None, testdir=None, testname=None, make_targets=None):
def getBuildCommand(
self,
debug_info,
architecture=None,
compiler=None,
dictionary=None,
testdir=None,
testname=None,
make_targets=None,
):
debug_info_args = self._getDebugInfoArgs(debug_info)
if debug_info_args is None:
return None
if make_targets is None:
make_targets = ["all"]
command_parts = [
self.getMake(testdir, testname), debug_info_args, make_targets,
self.getArchCFlags(architecture), self.getArchSpec(architecture),
self.getCCSpec(compiler), self.getExtraMakeArgs(),
self.getSDKRootSpec(), self.getModuleCacheSpec(),
self.getLibCxxArgs(), self.getCmdLine(dictionary)]
self.getMake(testdir, testname),
debug_info_args,
make_targets,
self.getArchCFlags(architecture),
self.getArchSpec(architecture),
self.getCCSpec(compiler),
self.getExtraMakeArgs(),
self.getSDKRootSpec(),
self.getModuleCacheSpec(),
self.getLibCxxArgs(),
self.getCmdLine(dictionary),
]
command = list(itertools.chain(*command_parts))
return command

View File

@@ -21,7 +21,7 @@ def get_os_env_from_platform(platform):
def get_os_from_sdk(sdk):
return sdk[:sdk.find('.')], ""
return sdk[: sdk.find(".")], ""
def get_os_and_env():
@@ -61,7 +61,7 @@ def get_triple_str(arch, vendor, os, version, env):
component = [arch, vendor, os + version]
if env:
components.append(env)
return '-'.join(component)
return "-".join(component)
class BuilderDarwin(Builder):
@@ -77,32 +77,31 @@ class BuilderDarwin(Builder):
args = dict()
if configuration.dsymutil:
args['DSYMUTIL'] = configuration.dsymutil
args["DSYMUTIL"] = configuration.dsymutil
if configuration.apple_sdk and 'internal' in configuration.apple_sdk:
if configuration.apple_sdk and "internal" in configuration.apple_sdk:
sdk_root = lldbutil.get_xcode_sdk_root(configuration.apple_sdk)
if sdk_root:
private_frameworks = os.path.join(sdk_root, 'System',
'Library',
'PrivateFrameworks')
args['FRAMEWORK_INCLUDES'] = '-F{}'.format(private_frameworks)
private_frameworks = os.path.join(
sdk_root, "System", "Library", "PrivateFrameworks"
)
args["FRAMEWORK_INCLUDES"] = "-F{}".format(private_frameworks)
operating_system, env = get_os_and_env()
if operating_system and operating_system != "macosx":
builder_dir = os.path.dirname(os.path.abspath(__file__))
test_dir = os.path.dirname(builder_dir)
if env == "simulator":
entitlements_file = 'entitlements-simulator.plist'
entitlements_file = "entitlements-simulator.plist"
else:
entitlements_file = 'entitlements.plist'
entitlements = os.path.join(test_dir, 'make', entitlements_file)
args['CODESIGN'] = 'codesign --entitlements {}'.format(
entitlements)
entitlements_file = "entitlements.plist"
entitlements = os.path.join(test_dir, "make", entitlements_file)
args["CODESIGN"] = "codesign --entitlements {}".format(entitlements)
else:
args['CODESIGN'] = 'codesign'
args["CODESIGN"] = "codesign"
# Return extra args as a formatted string.
return ['{}={}'.format(key, value) for key, value in args.items()]
return ["{}={}".format(key, value) for key, value in args.items()]
def getArchCFlags(self, arch):
"""Returns the ARCH_CFLAGS for the make system."""

View File

@@ -18,7 +18,6 @@ from lldbsuite.test import lldbutil
class ConcurrentEventsBase(TestBase):
# Concurrency is the primary test factor here, not debug info variants.
NO_DEBUG_INFO_TESTCASE = True
@@ -26,13 +25,16 @@ class ConcurrentEventsBase(TestBase):
# Call super's setUp().
super(ConcurrentEventsBase, self).setUp()
# Find the line number for our breakpoint.
self.filename = 'main.cpp'
self.filename = "main.cpp"
self.thread_breakpoint_line = line_number(
self.filename, '// Set breakpoint here')
self.filename, "// Set breakpoint here"
)
self.setup_breakpoint_line = line_number(
self.filename, '// Break here and adjust num')
self.filename, "// Break here and adjust num"
)
self.finish_breakpoint_line = line_number(
self.filename, '// Break here and verify one thread is active')
self.filename, "// Break here and verify one thread is active"
)
def describe_threads(self):
ret = []
@@ -45,66 +47,83 @@ class ConcurrentEventsBase(TestBase):
bpid = x.GetStopReasonDataAtIndex(0)
bp = self.inferior_target.FindBreakpointByID(bpid)
reason_str = "%s hit %d times" % (
lldbutil.get_description(bp), bp.GetHitCount())
lldbutil.get_description(bp),
bp.GetHitCount(),
)
elif reason == lldb.eStopReasonWatchpoint:
watchid = x.GetStopReasonDataAtIndex(0)
watch = self.inferior_target.FindWatchpointByID(watchid)
reason_str = "%s hit %d times" % (
lldbutil.get_description(watch), watch.GetHitCount())
lldbutil.get_description(watch),
watch.GetHitCount(),
)
elif reason == lldb.eStopReasonSignal:
signals = self.inferior_process.GetUnixSignals()
signal_name = signals.GetSignalAsCString(
x.GetStopReasonDataAtIndex(0))
signal_name = signals.GetSignalAsCString(x.GetStopReasonDataAtIndex(0))
reason_str = "signal %s" % signal_name
location = "\t".join([lldbutil.get_description(
x.GetFrameAtIndex(i)) for i in range(x.GetNumFrames())])
location = "\t".join(
[
lldbutil.get_description(x.GetFrameAtIndex(i))
for i in range(x.GetNumFrames())
]
)
ret.append(
"thread %d %s due to %s at\n\t%s" %
(id, status, reason_str, location))
"thread %d %s due to %s at\n\t%s" % (id, status, reason_str, location)
)
return ret
def add_breakpoint(self, line, descriptions):
""" Adds a breakpoint at self.filename:line and appends its description to descriptions, and
returns the LLDB SBBreakpoint object.
"""Adds a breakpoint at self.filename:line and appends its description to descriptions, and
returns the LLDB SBBreakpoint object.
"""
bpno = lldbutil.run_break_set_by_file_and_line(
self, self.filename, line, num_expected_locations=-1)
self, self.filename, line, num_expected_locations=-1
)
bp = self.inferior_target.FindBreakpointByID(bpno)
descriptions.append(": file = 'main.cpp', line = %d" % line)
return bp
def inferior_done(self):
""" Returns true if the inferior is done executing all the event threads (and is stopped at self.finish_breakpoint,
or has terminated execution.
"""Returns true if the inferior is done executing all the event threads (and is stopped at self.finish_breakpoint,
or has terminated execution.
"""
return self.finish_breakpoint.GetHitCount() > 0 or \
self.crash_count > 0 or \
self.inferior_process.GetState() == lldb.eStateExited
return (
self.finish_breakpoint.GetHitCount() > 0
or self.crash_count > 0
or self.inferior_process.GetState() == lldb.eStateExited
)
def count_signaled_threads(self):
count = 0
for thread in self.inferior_process:
if thread.GetStopReason() == lldb.eStopReasonSignal and thread.GetStopReasonDataAtIndex(
0) == self.inferior_process.GetUnixSignals().GetSignalNumberFromName('SIGUSR1'):
if (
thread.GetStopReason() == lldb.eStopReasonSignal
and thread.GetStopReasonDataAtIndex(0)
== self.inferior_process.GetUnixSignals().GetSignalNumberFromName(
"SIGUSR1"
)
):
count += 1
return count
def do_thread_actions(self,
num_breakpoint_threads=0,
num_signal_threads=0,
num_watchpoint_threads=0,
num_crash_threads=0,
num_delay_breakpoint_threads=0,
num_delay_signal_threads=0,
num_delay_watchpoint_threads=0,
num_delay_crash_threads=0):
""" Sets a breakpoint in the main thread where test parameters (numbers of threads) can be adjusted, runs the inferior
to that point, and modifies the locals that control the event thread counts. Also sets a breakpoint in
breakpoint_func (the function executed by each 'breakpoint' thread) and a watchpoint on a global modified in
watchpoint_func. The inferior is continued until exit or a crash takes place, and the number of events seen by LLDB
is verified to match the expected number of events.
def do_thread_actions(
self,
num_breakpoint_threads=0,
num_signal_threads=0,
num_watchpoint_threads=0,
num_crash_threads=0,
num_delay_breakpoint_threads=0,
num_delay_signal_threads=0,
num_delay_watchpoint_threads=0,
num_delay_crash_threads=0,
):
"""Sets a breakpoint in the main thread where test parameters (numbers of threads) can be adjusted, runs the inferior
to that point, and modifies the locals that control the event thread counts. Also sets a breakpoint in
breakpoint_func (the function executed by each 'breakpoint' thread) and a watchpoint on a global modified in
watchpoint_func. The inferior is continued until exit or a crash takes place, and the number of events seen by LLDB
is verified to match the expected number of events.
"""
exe = self.getBuildArtifact("a.out")
self.runCmd("file " + exe, CURRENT_EXECUTABLE_SET)
@@ -116,27 +135,34 @@ class ConcurrentEventsBase(TestBase):
# Initialize all the breakpoints (main thread/aux thread)
self.setup_breakpoint = self.add_breakpoint(
self.setup_breakpoint_line, expected_bps)
self.setup_breakpoint_line, expected_bps
)
self.finish_breakpoint = self.add_breakpoint(
self.finish_breakpoint_line, expected_bps)
self.finish_breakpoint_line, expected_bps
)
# Set the thread breakpoint
if num_breakpoint_threads + num_delay_breakpoint_threads > 0:
self.thread_breakpoint = self.add_breakpoint(
self.thread_breakpoint_line, expected_bps)
self.thread_breakpoint_line, expected_bps
)
# Verify breakpoints
self.expect(
"breakpoint list -f",
"Breakpoint locations shown correctly",
substrs=expected_bps)
substrs=expected_bps,
)
# Run the program.
self.runCmd("run", RUN_SUCCEEDED)
# Check we are at line self.setup_breakpoint
self.expect("thread backtrace", STOPPED_DUE_TO_BREAKPOINT,
substrs=["stop reason = breakpoint 1."])
self.expect(
"thread backtrace",
STOPPED_DUE_TO_BREAKPOINT,
substrs=["stop reason = breakpoint 1."],
)
# Initialize the (single) watchpoint on the global variable (g_watchme)
if num_watchpoint_threads + num_delay_watchpoint_threads > 0:
@@ -144,9 +170,9 @@ class ConcurrentEventsBase(TestBase):
for w in self.inferior_target.watchpoint_iter():
self.thread_watchpoint = w
self.assertTrue(
"g_watchme" in str(
self.thread_watchpoint),
"Watchpoint location not shown correctly")
"g_watchme" in str(self.thread_watchpoint),
"Watchpoint location not shown correctly",
)
# Get the process
self.inferior_process = self.inferior_target.GetProcess()
@@ -156,7 +182,8 @@ class ConcurrentEventsBase(TestBase):
self.assertEqual(
self.inferior_process.GetNumThreads(),
1,
'Expected to stop before any additional threads are spawned.')
"Expected to stop before any additional threads are spawned.",
)
self.runCmd("expr num_breakpoint_threads=%d" % num_breakpoint_threads)
self.runCmd("expr num_crash_threads=%d" % num_crash_threads)
@@ -164,17 +191,13 @@ class ConcurrentEventsBase(TestBase):
self.runCmd("expr num_watchpoint_threads=%d" % num_watchpoint_threads)
self.runCmd(
"expr num_delay_breakpoint_threads=%d" %
num_delay_breakpoint_threads)
"expr num_delay_breakpoint_threads=%d" % num_delay_breakpoint_threads
)
self.runCmd("expr num_delay_crash_threads=%d" % num_delay_crash_threads)
self.runCmd("expr num_delay_signal_threads=%d" % num_delay_signal_threads)
self.runCmd(
"expr num_delay_crash_threads=%d" %
num_delay_crash_threads)
self.runCmd(
"expr num_delay_signal_threads=%d" %
num_delay_signal_threads)
self.runCmd(
"expr num_delay_watchpoint_threads=%d" %
num_delay_watchpoint_threads)
"expr num_delay_watchpoint_threads=%d" % num_delay_watchpoint_threads
)
# Continue the inferior so threads are spawned
self.runCmd("continue")
@@ -183,23 +206,28 @@ class ConcurrentEventsBase(TestBase):
# the inferior program ensures all threads are started and running
# before any thread triggers its 'event'.
num_threads = self.inferior_process.GetNumThreads()
expected_num_threads = num_breakpoint_threads + num_delay_breakpoint_threads \
+ num_signal_threads + num_delay_signal_threads \
+ num_watchpoint_threads + num_delay_watchpoint_threads \
+ num_crash_threads + num_delay_crash_threads + 1
expected_num_threads = (
num_breakpoint_threads
+ num_delay_breakpoint_threads
+ num_signal_threads
+ num_delay_signal_threads
+ num_watchpoint_threads
+ num_delay_watchpoint_threads
+ num_crash_threads
+ num_delay_crash_threads
+ 1
)
self.assertEqual(
num_threads,
expected_num_threads,
'Expected to see %d threads, but seeing %d. Details:\n%s' %
(expected_num_threads,
num_threads,
"\n\t".join(
self.describe_threads())))
"Expected to see %d threads, but seeing %d. Details:\n%s"
% (expected_num_threads, num_threads, "\n\t".join(self.describe_threads())),
)
self.signal_count = self.count_signaled_threads()
self.crash_count = len(
lldbutil.get_crashed_threads(
self, self.inferior_process))
lldbutil.get_crashed_threads(self, self.inferior_process)
)
# Run to completion (or crash)
while not self.inferior_done():
@@ -208,16 +236,16 @@ class ConcurrentEventsBase(TestBase):
self.runCmd("continue")
self.signal_count += self.count_signaled_threads()
self.crash_count += len(
lldbutil.get_crashed_threads(
self, self.inferior_process))
lldbutil.get_crashed_threads(self, self.inferior_process)
)
if num_crash_threads > 0 or num_delay_crash_threads > 0:
# Expecting a crash
self.assertTrue(
self.crash_count > 0,
"Expecting at least one thread to crash. Details: %s" %
"\t\n".join(
self.describe_threads()))
"Expecting at least one thread to crash. Details: %s"
% "\t\n".join(self.describe_threads()),
)
# Ensure the zombie process is reaped
self.runCmd("process kill")
@@ -228,53 +256,61 @@ class ConcurrentEventsBase(TestBase):
self.assertEqual(
1,
self.finish_breakpoint.GetHitCount(),
"Expected main thread (finish) breakpoint to be hit once")
"Expected main thread (finish) breakpoint to be hit once",
)
num_threads = self.inferior_process.GetNumThreads()
self.assertEqual(
1,
num_threads,
"Expecting 1 thread but seeing %d. Details:%s" %
(num_threads,
"\n\t".join(
self.describe_threads())))
"Expecting 1 thread but seeing %d. Details:%s"
% (num_threads, "\n\t".join(self.describe_threads())),
)
self.runCmd("continue")
# The inferior process should have exited without crashing
self.assertEqual(
0,
self.crash_count,
"Unexpected thread(s) in crashed state")
0, self.crash_count, "Unexpected thread(s) in crashed state"
)
self.assertEqual(
self.inferior_process.GetState(),
lldb.eStateExited,
PROCESS_EXITED)
self.inferior_process.GetState(), lldb.eStateExited, PROCESS_EXITED
)
# Verify the number of actions took place matches expected numbers
expected_breakpoint_threads = num_delay_breakpoint_threads + num_breakpoint_threads
breakpoint_hit_count = self.thread_breakpoint.GetHitCount(
) if expected_breakpoint_threads > 0 else 0
expected_breakpoint_threads = (
num_delay_breakpoint_threads + num_breakpoint_threads
)
breakpoint_hit_count = (
self.thread_breakpoint.GetHitCount()
if expected_breakpoint_threads > 0
else 0
)
self.assertEqual(
expected_breakpoint_threads,
breakpoint_hit_count,
"Expected %d breakpoint hits, but got %d" %
(expected_breakpoint_threads,
breakpoint_hit_count))
"Expected %d breakpoint hits, but got %d"
% (expected_breakpoint_threads, breakpoint_hit_count),
)
expected_signal_threads = num_delay_signal_threads + num_signal_threads
self.assertEqual(
expected_signal_threads,
self.signal_count,
"Expected %d stops due to signal delivery, but got %d" %
(expected_signal_threads,
self.signal_count))
"Expected %d stops due to signal delivery, but got %d"
% (expected_signal_threads, self.signal_count),
)
expected_watchpoint_threads = num_delay_watchpoint_threads + num_watchpoint_threads
watchpoint_hit_count = self.thread_watchpoint.GetHitCount(
) if expected_watchpoint_threads > 0 else 0
expected_watchpoint_threads = (
num_delay_watchpoint_threads + num_watchpoint_threads
)
watchpoint_hit_count = (
self.thread_watchpoint.GetHitCount()
if expected_watchpoint_threads > 0
else 0
)
self.assertEqual(
expected_watchpoint_threads,
watchpoint_hit_count,
"Expected %d watchpoint hits, got %d" %
(expected_watchpoint_threads,
watchpoint_hit_count))
"Expected %d watchpoint hits, got %d"
% (expected_watchpoint_threads, watchpoint_hit_count),
)

View File

@@ -62,7 +62,7 @@ yaml2obj = None
# The arch might dictate some specific CFLAGS to be passed to the toolchain to build
# the inferior programs. The global variable cflags_extras provides a hook to do
# just that.
cflags_extras = ''
cflags_extras = ""
# The filters (testclass.testmethod) used to admit tests into our test suite.
filters = []
@@ -78,7 +78,7 @@ xfail_tests = None
# Set this flag if there is any session info dumped during the test run.
sdir_has_content = False
# svn_info stores the output from 'svn info lldb.base.dir'.
svn_info = ''
svn_info = ""
# Default verbosity is 0.
verbose = 0
@@ -93,7 +93,7 @@ testdirs = [lldbsuite.lldb_test_root]
test_src_root = lldbsuite.lldb_test_root
# Separator string.
separator = '-' * 70
separator = "-" * 70
failed = False
@@ -133,8 +133,10 @@ enabled_plugins = []
def shouldSkipBecauseOfCategories(test_categories):
if use_categories:
if len(test_categories) == 0 or len(
categories_list & set(test_categories)) == 0:
if (
len(test_categories) == 0
or len(categories_list & set(test_categories)) == 0
):
return True
for category in skip_categories:
@@ -151,6 +153,7 @@ def get_filecheck_path():
if filecheck and os.path.lexists(filecheck):
return filecheck
def get_yaml2obj_path():
"""
Get the path to the yaml2obj tool.

File diff suppressed because it is too large Load Diff

View File

@@ -52,7 +52,7 @@ def is_exe(fpath):
"""Returns true if fpath is an executable."""
if fpath == None:
return False
if sys.platform == 'win32':
if sys.platform == "win32":
if not fpath.endswith(".exe"):
fpath += ".exe"
return os.path.isfile(fpath) and os.access(fpath, os.X_OK)
@@ -75,7 +75,8 @@ def which(program):
def usage(parser):
parser.print_help()
if configuration.verbose > 0:
print("""
print(
"""
Examples:
This is an example of using the -f option to pinpoint to a specific test class
@@ -166,21 +167,22 @@ to create reference logs for debugging.
$ ./dotest.py --log-success
""")
"""
)
sys.exit(0)
def parseExclusion(exclusion_file):
"""Parse an exclusion file, of the following format, where
'skip files', 'skip methods', 'xfail files', and 'xfail methods'
are the possible list heading values:
'skip files', 'skip methods', 'xfail files', and 'xfail methods'
are the possible list heading values:
skip files
<file name>
<file name>
skip files
<file name>
<file name>
xfail methods
<method name>
xfail methods
<method name>
"""
excl_type = None
@@ -193,11 +195,11 @@ def parseExclusion(exclusion_file):
if not line:
excl_type = None
elif excl_type == 'skip':
elif excl_type == "skip":
if not configuration.skip_tests:
configuration.skip_tests = []
configuration.skip_tests.append(line)
elif excl_type == 'xfail':
elif excl_type == "xfail":
if not configuration.xfail_tests:
configuration.xfail_tests = []
configuration.xfail_tests.append(line)
@@ -231,14 +233,14 @@ def parseOptionsAndInitTestdirs():
if args.set_env_vars:
for env_var in args.set_env_vars:
parts = env_var.split('=', 1)
parts = env_var.split("=", 1)
if len(parts) == 1:
os.environ[parts[0]] = ""
else:
os.environ[parts[0]] = parts[1]
if args.set_inferior_env_vars:
lldbtest_config.inferior_env = ' '.join(args.set_inferior_env_vars)
lldbtest_config.inferior_env = " ".join(args.set_inferior_env_vars)
if args.h:
do_help = True
@@ -249,19 +251,19 @@ def parseOptionsAndInitTestdirs():
configuration.compiler = which(args.compiler)
if not is_exe(configuration.compiler):
logging.error(
'%s is not a valid compiler executable; aborting...',
args.compiler)
"%s is not a valid compiler executable; aborting...", args.compiler
)
sys.exit(-1)
else:
# Use a compiler appropriate appropriate for the Apple SDK if one was
# specified
if platform_system == 'Darwin' and args.apple_sdk:
if platform_system == "Darwin" and args.apple_sdk:
configuration.compiler = seven.get_command_output(
'xcrun -sdk "%s" -find clang 2> /dev/null' %
(args.apple_sdk))
'xcrun -sdk "%s" -find clang 2> /dev/null' % (args.apple_sdk)
)
else:
# 'clang' on ubuntu 14.04 is 3.4 so we try clang-3.5 first
candidateCompilers = ['clang-3.5', 'clang', 'gcc']
candidateCompilers = ["clang-3.5", "clang", "gcc"]
for candidate in candidateCompilers:
if which(candidate):
configuration.compiler = candidate
@@ -269,22 +271,27 @@ def parseOptionsAndInitTestdirs():
if args.dsymutil:
configuration.dsymutil = args.dsymutil
elif platform_system == 'Darwin':
elif platform_system == "Darwin":
configuration.dsymutil = seven.get_command_output(
'xcrun -find -toolchain default dsymutil')
"xcrun -find -toolchain default dsymutil"
)
if args.llvm_tools_dir:
configuration.filecheck = shutil.which("FileCheck", path=args.llvm_tools_dir)
configuration.yaml2obj = shutil.which("yaml2obj", path=args.llvm_tools_dir)
if not configuration.get_filecheck_path():
logging.warning('No valid FileCheck executable; some tests may fail...')
logging.warning('(Double-check the --llvm-tools-dir argument to dotest.py)')
logging.warning("No valid FileCheck executable; some tests may fail...")
logging.warning("(Double-check the --llvm-tools-dir argument to dotest.py)")
if args.libcxx_include_dir or args.libcxx_library_dir:
if args.lldb_platform_name:
logging.warning('Custom libc++ is not supported for remote runs: ignoring --libcxx arguments')
logging.warning(
"Custom libc++ is not supported for remote runs: ignoring --libcxx arguments"
)
elif not (args.libcxx_include_dir and args.libcxx_library_dir):
logging.error('Custom libc++ requires both --libcxx-include-dir and --libcxx-library-dir')
logging.error(
"Custom libc++ requires both --libcxx-include-dir and --libcxx-library-dir"
)
sys.exit(-1)
configuration.libcxx_include_dir = args.libcxx_include_dir
configuration.libcxx_include_target_dir = args.libcxx_include_target_dir
@@ -300,14 +307,12 @@ def parseOptionsAndInitTestdirs():
lldbtest_config.out_of_tree_debugserver = args.out_of_tree_debugserver
# Set SDKROOT if we are using an Apple SDK
if platform_system == 'Darwin' and args.apple_sdk:
if platform_system == "Darwin" and args.apple_sdk:
configuration.sdkroot = seven.get_command_output(
'xcrun --sdk "%s" --show-sdk-path 2> /dev/null' %
(args.apple_sdk))
'xcrun --sdk "%s" --show-sdk-path 2> /dev/null' % (args.apple_sdk)
)
if not configuration.sdkroot:
logging.error(
'No SDK found with the name %s; aborting...',
args.apple_sdk)
logging.error("No SDK found with the name %s; aborting...", args.apple_sdk)
sys.exit(-1)
if args.arch:
@@ -317,47 +322,51 @@ def parseOptionsAndInitTestdirs():
if args.categories_list:
configuration.categories_list = set(
test_categories.validate(
args.categories_list, False))
test_categories.validate(args.categories_list, False)
)
configuration.use_categories = True
else:
configuration.categories_list = []
if args.skip_categories:
configuration.skip_categories += test_categories.validate(
args.skip_categories, False)
args.skip_categories, False
)
if args.xfail_categories:
configuration.xfail_categories += test_categories.validate(
args.xfail_categories, False)
args.xfail_categories, False
)
if args.E:
os.environ['CFLAGS_EXTRAS'] = args.E
os.environ["CFLAGS_EXTRAS"] = args.E
if args.dwarf_version:
configuration.dwarf_version = args.dwarf_version
# We cannot modify CFLAGS_EXTRAS because they're used in test cases
# that explicitly require no debug info.
os.environ['CFLAGS'] = '-gdwarf-{}'.format(configuration.dwarf_version)
os.environ["CFLAGS"] = "-gdwarf-{}".format(configuration.dwarf_version)
if args.settings:
for setting in args.settings:
if not len(setting) == 1 or not setting[0].count('='):
logging.error('"%s" is not a setting in the form "key=value"',
setting[0])
if not len(setting) == 1 or not setting[0].count("="):
logging.error(
'"%s" is not a setting in the form "key=value"', setting[0]
)
sys.exit(-1)
setting_list = setting[0].split('=', 1)
setting_list = setting[0].split("=", 1)
configuration.settings.append((setting_list[0], setting_list[1]))
if args.d:
sys.stdout.write(
"Suspending the process %d to wait for debugger to attach...\n" %
os.getpid())
"Suspending the process %d to wait for debugger to attach...\n"
% os.getpid()
)
sys.stdout.flush()
os.kill(os.getpid(), signal.SIGSTOP)
if args.f:
if any([x.startswith('-') for x in args.f]):
if any([x.startswith("-") for x in args.f]):
usage(parser)
configuration.filters.extend(args.f)
@@ -371,8 +380,8 @@ def parseOptionsAndInitTestdirs():
lldbtest_config.lldbExec = which(args.executable)
if not is_exe(lldbtest_config.lldbExec):
logging.error(
'%s is not a valid executable to test; aborting...',
args.executable)
"%s is not a valid executable to test; aborting...", args.executable
)
sys.exit(-1)
if args.excluded:
@@ -380,12 +389,12 @@ def parseOptionsAndInitTestdirs():
parseExclusion(excl_file)
if args.p:
if args.p.startswith('-'):
if args.p.startswith("-"):
usage(parser)
configuration.regexp = args.p
if args.t:
os.environ['LLDB_COMMAND_TRACE'] = 'YES'
os.environ["LLDB_COMMAND_TRACE"] = "YES"
if args.v:
configuration.verbose = 2
@@ -394,10 +403,9 @@ def parseOptionsAndInitTestdirs():
if args.sharp:
configuration.count = args.sharp
if sys.platform.startswith('win32'):
os.environ['LLDB_DISABLE_CRASH_DIALOG'] = str(
args.disable_crash_dialog)
os.environ['LLDB_LAUNCH_INFERIORS_WITHOUT_CONSOLE'] = str(True)
if sys.platform.startswith("win32"):
os.environ["LLDB_DISABLE_CRASH_DIALOG"] = str(args.disable_crash_dialog)
os.environ["LLDB_LAUNCH_INFERIORS_WITHOUT_CONSOLE"] = str(True)
if do_help:
usage(parser)
@@ -408,7 +416,7 @@ def parseOptionsAndInitTestdirs():
configuration.lldb_platform_url = args.lldb_platform_url
if args.lldb_platform_working_dir:
configuration.lldb_platform_working_dir = args.lldb_platform_working_dir
if platform_system == 'Darwin' and args.apple_sdk:
if platform_system == "Darwin" and args.apple_sdk:
configuration.apple_sdk = args.apple_sdk
if args.test_build_dir:
configuration.test_build_dir = args.test_build_dir
@@ -416,12 +424,14 @@ def parseOptionsAndInitTestdirs():
configuration.lldb_module_cache_dir = args.lldb_module_cache_dir
else:
configuration.lldb_module_cache_dir = os.path.join(
configuration.test_build_dir, 'module-cache-lldb')
configuration.test_build_dir, "module-cache-lldb"
)
if args.clang_module_cache_dir:
configuration.clang_module_cache_dir = args.clang_module_cache_dir
else:
configuration.clang_module_cache_dir = os.path.join(
configuration.test_build_dir, 'module-cache-clang')
configuration.test_build_dir, "module-cache-clang"
)
if args.lldb_libs_dir:
configuration.lldb_libs_dir = args.lldb_libs_dir
@@ -431,10 +441,13 @@ def parseOptionsAndInitTestdirs():
# Gather all the dirs passed on the command line.
if len(args.args) > 0:
configuration.testdirs = [os.path.realpath(os.path.abspath(x)) for x in args.args]
configuration.testdirs = [
os.path.realpath(os.path.abspath(x)) for x in args.args
]
lldbtest_config.codesign_identity = args.codesign_identity
def registerFaulthandler():
try:
import faulthandler
@@ -444,9 +457,10 @@ def registerFaulthandler():
faulthandler.enable()
# faulthandler.register is not available on Windows.
if getattr(faulthandler, 'register', None):
if getattr(faulthandler, "register", None):
faulthandler.register(signal.SIGTERM, chain=True)
def setupSysPath():
"""
Add LLDB.framework/Resources/Python to the search paths for modules.
@@ -458,7 +472,7 @@ def setupSysPath():
scriptPath = os.environ["DOTEST_SCRIPT_DIR"]
else:
scriptPath = os.path.dirname(os.path.abspath(__file__))
if not scriptPath.endswith('test'):
if not scriptPath.endswith("test"):
print("This script expects to reside in lldb's test directory.")
sys.exit(-1)
@@ -473,10 +487,10 @@ def setupSysPath():
# the LLDB source code.
os.environ["LLDB_SRC"] = lldbsuite.lldb_root
pluginPath = os.path.join(scriptPath, 'plugins')
toolsLLDBVSCode = os.path.join(scriptPath, 'tools', 'lldb-vscode')
toolsLLDBServerPath = os.path.join(scriptPath, 'tools', 'lldb-server')
intelpt = os.path.join(scriptPath, 'tools', 'intelpt')
pluginPath = os.path.join(scriptPath, "plugins")
toolsLLDBVSCode = os.path.join(scriptPath, "tools", "lldb-vscode")
toolsLLDBServerPath = os.path.join(scriptPath, "tools", "lldb-server")
intelpt = os.path.join(scriptPath, "tools", "intelpt")
# Insert script dir, plugin dir and lldb-server dir to the sys.path.
sys.path.insert(0, pluginPath)
@@ -509,19 +523,21 @@ def setupSysPath():
if not lldbtest_config.lldbExec:
# Last, check the path
lldbtest_config.lldbExec = which('lldb')
lldbtest_config.lldbExec = which("lldb")
if lldbtest_config.lldbExec and not is_exe(lldbtest_config.lldbExec):
print(
"'{}' is not a path to a valid executable".format(
lldbtest_config.lldbExec))
"'{}' is not a path to a valid executable".format(lldbtest_config.lldbExec)
)
lldbtest_config.lldbExec = None
if not lldbtest_config.lldbExec:
print("The 'lldb' executable cannot be located. Some of the tests may not be run as a result.")
print(
"The 'lldb' executable cannot be located. Some of the tests may not be run as a result."
)
sys.exit(-1)
os.system('%s -v' % lldbtest_config.lldbExec)
os.system("%s -v" % lldbtest_config.lldbExec)
lldbDir = os.path.dirname(lldbtest_config.lldbExec)
@@ -531,36 +547,47 @@ def setupSysPath():
else:
if not configuration.shouldSkipBecauseOfCategories(["lldb-vscode"]):
print(
"The 'lldb-vscode' executable cannot be located. The lldb-vscode tests can not be run as a result.")
"The 'lldb-vscode' executable cannot be located. The lldb-vscode tests can not be run as a result."
)
configuration.skip_categories.append("lldb-vscode")
lldbPythonDir = None # The directory that contains 'lldb/__init__.py'
# If our lldb supports the -P option, use it to find the python path:
lldb_dash_p_result = subprocess.check_output([lldbtest_config.lldbExec, "-P"], universal_newlines=True)
lldb_dash_p_result = subprocess.check_output(
[lldbtest_config.lldbExec, "-P"], universal_newlines=True
)
if lldb_dash_p_result:
for line in lldb_dash_p_result.splitlines():
if os.path.isdir(line) and os.path.exists(os.path.join(line, 'lldb', '__init__.py')):
if os.path.isdir(line) and os.path.exists(
os.path.join(line, "lldb", "__init__.py")
):
lldbPythonDir = line
break
if not lldbPythonDir:
print(
"Unable to load lldb extension module. Possible reasons for this include:")
"Unable to load lldb extension module. Possible reasons for this include:"
)
print(" 1) LLDB was built with LLDB_ENABLE_PYTHON=0")
print(
" 2) PYTHONPATH and PYTHONHOME are not set correctly. PYTHONHOME should refer to")
" 2) PYTHONPATH and PYTHONHOME are not set correctly. PYTHONHOME should refer to"
)
print(
" the version of Python that LLDB built and linked against, and PYTHONPATH")
" the version of Python that LLDB built and linked against, and PYTHONPATH"
)
print(
" should contain the Lib directory for the same python distro, as well as the")
print(" location of LLDB\'s site-packages folder.")
" should contain the Lib directory for the same python distro, as well as the"
)
print(" location of LLDB's site-packages folder.")
print(
" 3) A different version of Python than that which was built against is exported in")
print(" the system\'s PATH environment variable, causing conflicts.")
" 3) A different version of Python than that which was built against is exported in"
)
print(" the system's PATH environment variable, causing conflicts.")
print(
" 4) The executable '%s' could not be found. Please check " %
lldbtest_config.lldbExec)
" 4) The executable '%s' could not be found. Please check "
% lldbtest_config.lldbExec
)
print(" that it exists and is executable.")
if lldbPythonDir:
@@ -569,18 +596,18 @@ def setupSysPath():
# If the path we've constructed looks like that, then we'll strip out
# the Versions/A part.
(before, frameWithVersion, after) = lldbPythonDir.rpartition(
"LLDB.framework/Versions/A")
"LLDB.framework/Versions/A"
)
if frameWithVersion != "":
lldbPythonDir = before + "LLDB.framework" + after
lldbPythonDir = os.path.abspath(lldbPythonDir)
if "freebsd" in sys.platform or "linux" in sys.platform:
os.environ['LLDB_LIB_DIR'] = os.path.join(lldbPythonDir, '..', '..')
os.environ["LLDB_LIB_DIR"] = os.path.join(lldbPythonDir, "..", "..")
# If tests need to find LLDB_FRAMEWORK, now they can do it
os.environ["LLDB_FRAMEWORK"] = os.path.dirname(
os.path.dirname(lldbPythonDir))
os.environ["LLDB_FRAMEWORK"] = os.path.dirname(os.path.dirname(lldbPythonDir))
# This is to locate the lldb.py module. Insert it right after
# sys.path[0].
@@ -622,15 +649,15 @@ def visit_file(dir, name):
def iter_filters():
for filterspec in configuration.filters:
parts = filterspec.split('.')
parts = filterspec.split(".")
if check(module, parts):
yield filterspec
elif parts[0] == base and len(parts) > 1 and check(module, parts[1:]):
yield '.'.join(parts[1:])
yield ".".join(parts[1:])
else:
for key,value in module.__dict__.items():
for key, value in module.__dict__.items():
if check(value, parts):
yield key + '.' + filterspec
yield key + "." + filterspec
filtered = False
for filterspec in iter_filters():
@@ -648,22 +675,22 @@ def visit_file(dir, name):
# Also the fail-over case when the filterspec branch
# (base, filterspec) combo doesn't make sense.
configuration.suite.addTests(
unittest2.defaultTestLoader.loadTestsFromName(base))
unittest2.defaultTestLoader.loadTestsFromName(base)
)
def visit(prefix, dir, names):
"""Visitor function for os.path.walk(path, visit, arg)."""
dir_components = set(dir.split(os.sep))
excluded_components = set(['.svn', '.git'])
excluded_components = set([".svn", ".git"])
if dir_components.intersection(excluded_components):
return
# Gather all the Python test file names that follow the Test*.py pattern.
python_test_files = [
name
for name in names
if name.endswith('.py') and name.startswith(prefix)]
name for name in names if name.endswith(".py") and name.startswith(prefix)
]
# Visit all the python test files.
for name in python_test_files:
@@ -689,17 +716,15 @@ def visit(prefix, dir, names):
def checkDsymForUUIDIsNotOn():
cmd = ["defaults", "read", "com.apple.DebugSymbols"]
process = subprocess.Popen(
cmd,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
process = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
cmd_output = process.stdout.read()
output_str = cmd_output.decode("utf-8")
if "DBGFileMappedPaths = " in output_str:
print("%s =>" % ' '.join(cmd))
print("%s =>" % " ".join(cmd))
print(output_str)
print(
"Disable automatic lookup and caching of dSYMs before running the test suite!")
"Disable automatic lookup and caching of dSYMs before running the test suite!"
)
print("Exiting...")
sys.exit(0)
@@ -708,6 +733,7 @@ def exitTestSuite(exitCode=None):
# lldb.py does SBDebugger.Initialize().
# Call SBDebugger.Terminate() on exit.
import lldb
lldb.SBDebugger.Terminate()
if exitCode:
sys.exit(exitCode)
@@ -715,11 +741,11 @@ def exitTestSuite(exitCode=None):
def getVersionForSDK(sdk):
sdk = str.lower(sdk)
full_path = seven.get_command_output('xcrun -sdk %s --show-sdk-path' % sdk)
full_path = seven.get_command_output("xcrun -sdk %s --show-sdk-path" % sdk)
basename = os.path.basename(full_path)
basename = os.path.splitext(basename)[0]
basename = str.lower(basename)
ver = basename.replace(sdk, '')
ver = basename.replace(sdk, "")
return ver
@@ -734,14 +760,16 @@ def checkCompiler():
raise Exception(c + " is not a valid compiler")
pipe = subprocess.Popen(
['xcrun', '-find', c], stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
["xcrun", "-find", c], stdout=subprocess.PIPE, stderr=subprocess.STDOUT
)
cmd_output = pipe.stdout.read()
if not cmd_output or "not found" in cmd_output:
raise Exception(c + " is not a valid compiler")
configuration.compiler = cmd_output.split('\n')[0]
configuration.compiler = cmd_output.split("\n")[0]
print("'xcrun -find %s' returning %s" % (c, configuration.compiler))
def canRunLibcxxTests():
from lldbsuite.test import lldbplatformutil
@@ -753,24 +781,35 @@ def canRunLibcxxTests():
if platform == "linux":
with tempfile.NamedTemporaryFile() as f:
cmd = [configuration.compiler, "-xc++", "-stdlib=libc++", "-o", f.name, "-"]
p = subprocess.Popen(cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True)
p = subprocess.Popen(
cmd,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
universal_newlines=True,
)
_, stderr = p.communicate("#include <cassert>\nint main() {}")
if not p.returncode:
return True, "Compiling with -stdlib=libc++ works"
return False, "Compiling with -stdlib=libc++ fails with the error: %s" % stderr
return (
False,
"Compiling with -stdlib=libc++ fails with the error: %s" % stderr,
)
return False, "Don't know how to build with libc++ on %s" % platform
def checkLibcxxSupport():
result, reason = canRunLibcxxTests()
if result:
return # libc++ supported
return # libc++ supported
if "libc++" in configuration.categories_list:
return # libc++ category explicitly requested, let it run.
return # libc++ category explicitly requested, let it run.
if configuration.verbose:
print("libc++ tests will not be run because: " + reason)
configuration.skip_categories.append("libc++")
def canRunLibstdcxxTests():
from lldbsuite.test import lldbplatformutil
@@ -781,16 +820,18 @@ def canRunLibstdcxxTests():
return True, "libstdcxx always present"
return False, "Don't know how to build with libstdcxx on %s" % platform
def checkLibstdcxxSupport():
result, reason = canRunLibstdcxxTests()
if result:
return # libstdcxx supported
return # libstdcxx supported
if "libstdcxx" in configuration.categories_list:
return # libstdcxx category explicitly requested, let it run.
return # libstdcxx category explicitly requested, let it run.
if configuration.verbose:
print("libstdcxx tests will not be run because: " + reason)
configuration.skip_categories.append("libstdcxx")
def canRunWatchpointTests():
from lldbsuite.test import lldbplatformutil
@@ -799,8 +840,13 @@ def canRunWatchpointTests():
if os.geteuid() == 0:
return True, "root can always write dbregs"
try:
output = subprocess.check_output(["/sbin/sysctl", "-n",
"security.models.extensions.user_set_dbregs"]).decode().strip()
output = (
subprocess.check_output(
["/sbin/sysctl", "-n", "security.models.extensions.user_set_dbregs"]
)
.decode()
.strip()
)
if output == "1":
return True, "security.models.extensions.user_set_dbregs enabled"
except subprocess.CalledProcessError:
@@ -808,20 +854,23 @@ def canRunWatchpointTests():
return False, "security.models.extensions.user_set_dbregs disabled"
elif platform == "freebsd" and configuration.arch == "aarch64":
import lldb
if lldb.SBPlatform.GetHostPlatform().GetOSMajorVersion() < 13:
return False, "Watchpoint support on arm64 requires FreeBSD 13.0"
return True, "watchpoint support available"
def checkWatchpointSupport():
result, reason = canRunWatchpointTests()
if result:
return # watchpoints supported
return # watchpoints supported
if "watchpoint" in configuration.categories_list:
return # watchpoint category explicitly requested, let it run.
return # watchpoint category explicitly requested, let it run.
if configuration.verbose:
print("watchpoint tests will not be run because: " + reason)
configuration.skip_categories.append("watchpoint")
def checkObjcSupport():
from lldbsuite.test import lldbplatformutil
@@ -830,6 +879,7 @@ def checkObjcSupport():
print("objc tests will be skipped because of unsupported platform")
configuration.skip_categories.append("objc")
def checkDebugInfoSupport():
from lldbsuite.test import lldbplatformutil
@@ -837,11 +887,12 @@ def checkDebugInfoSupport():
compiler = configuration.compiler
for cat in test_categories.debug_info_categories:
if cat in configuration.categories_list:
continue # Category explicitly requested, let it run.
continue # Category explicitly requested, let it run.
if test_categories.is_supported_on_platform(cat, platform, compiler):
continue
configuration.skip_categories.append(cat)
def checkDebugServerSupport():
from lldbsuite.test import lldbplatformutil
import lldb
@@ -853,13 +904,13 @@ def checkDebugServerSupport():
# <rdar://problem/34539270>
configuration.skip_categories.append("debugserver")
if configuration.verbose:
print(skip_msg%"debugserver");
print(skip_msg % "debugserver")
else:
configuration.skip_categories.append("debugserver")
if lldb.remote_platform and lldbplatformutil.getPlatform() == "windows":
configuration.skip_categories.append("llgs")
if configuration.verbose:
print(skip_msg%"lldb-server");
print(skip_msg % "lldb-server")
def checkForkVForkSupport():
@@ -888,6 +939,7 @@ def run_suite():
setupSysPath()
import lldb
lldb.SBDebugger.Initialize()
lldb.SBDebugger.PrintStackTraceOnError()
@@ -899,46 +951,58 @@ def run_suite():
from lldbsuite.test import lldbutil
if configuration.lldb_platform_name:
print("Setting up remote platform '%s'" %
(configuration.lldb_platform_name))
lldb.remote_platform = lldb.SBPlatform(
configuration.lldb_platform_name)
print("Setting up remote platform '%s'" % (configuration.lldb_platform_name))
lldb.remote_platform = lldb.SBPlatform(configuration.lldb_platform_name)
lldb.selected_platform = lldb.remote_platform
if not lldb.remote_platform.IsValid():
print(
"error: unable to create the LLDB platform named '%s'." %
(configuration.lldb_platform_name))
"error: unable to create the LLDB platform named '%s'."
% (configuration.lldb_platform_name)
)
exitTestSuite(1)
if configuration.lldb_platform_url:
# We must connect to a remote platform if a LLDB platform URL was
# specified
print(
"Connecting to remote platform '%s' at '%s'..." %
(configuration.lldb_platform_name, configuration.lldb_platform_url))
"Connecting to remote platform '%s' at '%s'..."
% (configuration.lldb_platform_name, configuration.lldb_platform_url)
)
platform_connect_options = lldb.SBPlatformConnectOptions(
configuration.lldb_platform_url)
configuration.lldb_platform_url
)
err = lldb.remote_platform.ConnectRemote(platform_connect_options)
if err.Success():
print("Connected.")
else:
print("error: failed to connect to remote platform using URL '%s': %s" % (
configuration.lldb_platform_url, err))
print(
"error: failed to connect to remote platform using URL '%s': %s"
% (configuration.lldb_platform_url, err)
)
exitTestSuite(1)
else:
configuration.lldb_platform_url = None
if configuration.lldb_platform_working_dir:
print("Setting remote platform working directory to '%s'..." %
(configuration.lldb_platform_working_dir))
print(
"Setting remote platform working directory to '%s'..."
% (configuration.lldb_platform_working_dir)
)
error = lldb.remote_platform.MakeDirectory(
configuration.lldb_platform_working_dir, 448) # 448 = 0o700
configuration.lldb_platform_working_dir, 448
) # 448 = 0o700
if error.Fail():
raise Exception("making remote directory '%s': %s" % (
configuration.lldb_platform_working_dir, error))
raise Exception(
"making remote directory '%s': %s"
% (configuration.lldb_platform_working_dir, error)
)
if not lldb.remote_platform.SetWorkingDirectory(
configuration.lldb_platform_working_dir):
raise Exception("failed to set working directory '%s'" % configuration.lldb_platform_working_dir)
configuration.lldb_platform_working_dir
):
raise Exception(
"failed to set working directory '%s'"
% configuration.lldb_platform_working_dir
)
lldb.selected_platform = lldb.remote_platform
else:
lldb.remote_platform = None
@@ -958,11 +1022,15 @@ def run_suite():
checkForkVForkSupport()
skipped_categories_list = ", ".join(configuration.skip_categories)
print("Skipping the following test categories: {}".format(configuration.skip_categories))
print(
"Skipping the following test categories: {}".format(
configuration.skip_categories
)
)
for testdir in configuration.testdirs:
for (dirpath, dirnames, filenames) in os.walk(testdir):
visit('Test', dirpath, filenames)
for dirpath, dirnames, filenames in os.walk(testdir):
visit("Test", dirpath, filenames)
#
# Now that we have loaded all the test cases, run the whole test suite.
@@ -980,8 +1048,7 @@ def run_suite():
print("compiler=%s" % configuration.compiler)
# Iterating over all possible architecture and compiler combinations.
configString = "arch=%s compiler=%s" % (configuration.arch,
configuration.compiler)
configString = "arch=%s compiler=%s" % (configuration.arch, configuration.compiler)
# Output the configuration.
if configuration.verbose:
@@ -991,9 +1058,12 @@ def run_suite():
if configuration.verbose:
sys.stderr.write(configuration.separator + "\n")
sys.stderr.write(
"Collected %d test%s\n\n" %
(configuration.suite.countTestCases(),
configuration.suite.countTestCases() != 1 and "s" or ""))
"Collected %d test%s\n\n"
% (
configuration.suite.countTestCases(),
configuration.suite.countTestCases() != 1 and "s" or "",
)
)
if configuration.suite.countTestCases() == 0:
logging.error("did not discover any matching tests")
@@ -1004,41 +1074,42 @@ def run_suite():
result = unittest2.TextTestRunner(
stream=sys.stderr,
verbosity=configuration.verbose,
resultclass=test_result.LLDBTestResult).run(
configuration.suite)
resultclass=test_result.LLDBTestResult,
).run(configuration.suite)
else:
# We are invoking the same test suite more than once. In this case,
# mark __ignore_singleton__ flag as True so the signleton pattern is
# not enforced.
test_result.LLDBTestResult.__ignore_singleton__ = True
for i in range(configuration.count):
result = unittest2.TextTestRunner(
stream=sys.stderr,
verbosity=configuration.verbose,
resultclass=test_result.LLDBTestResult).run(
configuration.suite)
resultclass=test_result.LLDBTestResult,
).run(configuration.suite)
configuration.failed = not result.wasSuccessful()
if configuration.sdir_has_content and configuration.verbose:
sys.stderr.write(
"Session logs for test failures/errors/unexpected successes"
" can be found in the test build directory\n")
" can be found in the test build directory\n"
)
if configuration.use_categories and len(
configuration.failures_per_category) > 0:
if configuration.use_categories and len(configuration.failures_per_category) > 0:
sys.stderr.write("Failures per category:\n")
for category in configuration.failures_per_category:
sys.stderr.write(
"%s - %d\n" %
(category, configuration.failures_per_category[category]))
"%s - %d\n" % (category, configuration.failures_per_category[category])
)
# Exiting.
exitTestSuite(configuration.failed)
if __name__ == "__main__":
print(
__file__ +
" is for use as a module only. It should not be run as a standalone script.")
__file__
+ " is for use as a module only. It should not be run as a standalone script."
)
sys.exit(-1)

View File

@@ -12,227 +12,323 @@ from . import configuration
def create_parser():
parser = argparse.ArgumentParser(
description='description',
prefix_chars='+-',
add_help=False)
description="description", prefix_chars="+-", add_help=False
)
group = None
# Helper function for boolean options (group will point to the current
# group when executing X)
X = lambda optstr, helpstr, **kwargs: group.add_argument(
optstr, help=helpstr, action='store_true', **kwargs)
optstr, help=helpstr, action="store_true", **kwargs
)
group = parser.add_argument_group('Help')
group = parser.add_argument_group("Help")
group.add_argument(
'-h',
'--help',
dest='h',
action='store_true',
help="Print this help message and exit. Add '-v' for more detailed help.")
"-h",
"--help",
dest="h",
action="store_true",
help="Print this help message and exit. Add '-v' for more detailed help.",
)
# C and Python toolchain options
group = parser.add_argument_group('Toolchain options')
group = parser.add_argument_group("Toolchain options")
group.add_argument(
'-A',
'--arch',
metavar='arch',
dest='arch',
help=textwrap.dedent('''Specify the architecture(s) to test. This option can be specified more than once'''))
group.add_argument('-C', '--compiler', metavar='compiler', dest='compiler', help=textwrap.dedent(
'''Specify the compiler(s) used to build the inferior executables. The compiler path can be an executable basename or a full path to a compiler executable. This option can be specified multiple times.'''))
if sys.platform == 'darwin':
group.add_argument('--apple-sdk', metavar='apple_sdk', dest='apple_sdk', default="", help=textwrap.dedent(
'''Specify the name of the Apple SDK (macosx, macosx.internal, iphoneos, iphoneos.internal, or path to SDK) and use the appropriate tools from that SDK's toolchain.'''))
group.add_argument('--libcxx-include-dir', help=textwrap.dedent(
'Specify the path to a custom libc++ include directory. Must be used in conjunction with --libcxx-library-dir.'))
group.add_argument('--libcxx-include-target-dir', help=textwrap.dedent(
'Specify the path to a custom libc++ include target directory to use in addition to --libcxx-include-dir. Optional.'))
group.add_argument('--libcxx-library-dir', help=textwrap.dedent(
'Specify the path to a custom libc++ library directory. Must be used in conjunction with --libcxx-include-dir.'))
"-A",
"--arch",
metavar="arch",
dest="arch",
help=textwrap.dedent(
"""Specify the architecture(s) to test. This option can be specified more than once"""
),
)
group.add_argument(
"-C",
"--compiler",
metavar="compiler",
dest="compiler",
help=textwrap.dedent(
"""Specify the compiler(s) used to build the inferior executables. The compiler path can be an executable basename or a full path to a compiler executable. This option can be specified multiple times."""
),
)
if sys.platform == "darwin":
group.add_argument(
"--apple-sdk",
metavar="apple_sdk",
dest="apple_sdk",
default="",
help=textwrap.dedent(
"""Specify the name of the Apple SDK (macosx, macosx.internal, iphoneos, iphoneos.internal, or path to SDK) and use the appropriate tools from that SDK's toolchain."""
),
)
group.add_argument(
"--libcxx-include-dir",
help=textwrap.dedent(
"Specify the path to a custom libc++ include directory. Must be used in conjunction with --libcxx-library-dir."
),
)
group.add_argument(
"--libcxx-include-target-dir",
help=textwrap.dedent(
"Specify the path to a custom libc++ include target directory to use in addition to --libcxx-include-dir. Optional."
),
)
group.add_argument(
"--libcxx-library-dir",
help=textwrap.dedent(
"Specify the path to a custom libc++ library directory. Must be used in conjunction with --libcxx-include-dir."
),
)
# FIXME? This won't work for different extra flags according to each arch.
group.add_argument(
'-E',
metavar='extra-flags',
help=textwrap.dedent('''Specify the extra flags to be passed to the toolchain when building the inferior programs to be debugged
suggestions: do not lump the "-A arch1 -A arch2" together such that the -E option applies to only one of the architectures'''))
"-E",
metavar="extra-flags",
help=textwrap.dedent(
"""Specify the extra flags to be passed to the toolchain when building the inferior programs to be debugged
suggestions: do not lump the "-A arch1 -A arch2" together such that the -E option applies to only one of the architectures"""
),
)
group.add_argument('--dsymutil', metavar='dsymutil', dest='dsymutil', help=textwrap.dedent('Specify which dsymutil to use.'))
group.add_argument('--llvm-tools-dir', metavar='dir', dest='llvm_tools_dir',
help=textwrap.dedent('The location of llvm tools used for testing (yaml2obj, FileCheck, etc.).'))
group.add_argument(
"--dsymutil",
metavar="dsymutil",
dest="dsymutil",
help=textwrap.dedent("Specify which dsymutil to use."),
)
group.add_argument(
"--llvm-tools-dir",
metavar="dir",
dest="llvm_tools_dir",
help=textwrap.dedent(
"The location of llvm tools used for testing (yaml2obj, FileCheck, etc.)."
),
)
# Test filtering options
group = parser.add_argument_group('Test filtering options')
group = parser.add_argument_group("Test filtering options")
group.add_argument(
'-f',
metavar='filterspec',
action='append',
help=('Specify a filter, which looks like "TestModule.TestClass.test_name". '+
'You may also use shortened filters, such as '+
'"TestModule.TestClass", "TestClass.test_name", or just "test_name".'))
"-f",
metavar="filterspec",
action="append",
help=(
'Specify a filter, which looks like "TestModule.TestClass.test_name". '
+ "You may also use shortened filters, such as "
+ '"TestModule.TestClass", "TestClass.test_name", or just "test_name".'
),
)
group.add_argument(
'-p',
metavar='pattern',
help='Specify a regexp filename pattern for inclusion in the test suite')
group.add_argument('--excluded', metavar='exclusion-file', action='append', help=textwrap.dedent(
'''Specify a file for tests to exclude. File should contain lists of regular expressions for test files or methods,
with each list under a matching header (xfail files, xfail methods, skip files, skip methods)'''))
"-p",
metavar="pattern",
help="Specify a regexp filename pattern for inclusion in the test suite",
)
group.add_argument(
'-G',
'--category',
metavar='category',
action='append',
dest='categories_list',
help=textwrap.dedent('''Specify categories of test cases of interest. Can be specified more than once.'''))
"--excluded",
metavar="exclusion-file",
action="append",
help=textwrap.dedent(
"""Specify a file for tests to exclude. File should contain lists of regular expressions for test files or methods,
with each list under a matching header (xfail files, xfail methods, skip files, skip methods)"""
),
)
group.add_argument(
'--skip-category',
metavar='category',
action='append',
dest='skip_categories',
help=textwrap.dedent('''Specify categories of test cases to skip. Takes precedence over -G. Can be specified more than once.'''))
"-G",
"--category",
metavar="category",
action="append",
dest="categories_list",
help=textwrap.dedent(
"""Specify categories of test cases of interest. Can be specified more than once."""
),
)
group.add_argument(
'--xfail-category',
metavar='category',
action='append',
dest='xfail_categories',
help=textwrap.dedent('''Specify categories of test cases that are expected to fail. Can be specified more than once.'''))
"--skip-category",
metavar="category",
action="append",
dest="skip_categories",
help=textwrap.dedent(
"""Specify categories of test cases to skip. Takes precedence over -G. Can be specified more than once."""
),
)
group.add_argument(
"--xfail-category",
metavar="category",
action="append",
dest="xfail_categories",
help=textwrap.dedent(
"""Specify categories of test cases that are expected to fail. Can be specified more than once."""
),
)
# Configuration options
group = parser.add_argument_group('Configuration options')
group = parser.add_argument_group("Configuration options")
group.add_argument(
'--framework',
metavar='framework-path',
help='The path to LLDB.framework')
"--framework", metavar="framework-path", help="The path to LLDB.framework"
)
group.add_argument(
'--executable',
metavar='executable-path',
help='The path to the lldb executable')
"--executable",
metavar="executable-path",
help="The path to the lldb executable",
)
group.add_argument(
'--out-of-tree-debugserver',
dest='out_of_tree_debugserver',
action='store_true',
help='A flag to indicate an out-of-tree debug server is being used')
"--out-of-tree-debugserver",
dest="out_of_tree_debugserver",
action="store_true",
help="A flag to indicate an out-of-tree debug server is being used",
)
group.add_argument(
'--dwarf-version',
metavar='dwarf_version',
dest='dwarf_version',
"--dwarf-version",
metavar="dwarf_version",
dest="dwarf_version",
type=int,
help='Override the DWARF version.')
help="Override the DWARF version.",
)
group.add_argument(
'--setting',
metavar='SETTING=VALUE',
dest='settings',
"--setting",
metavar="SETTING=VALUE",
dest="settings",
type=str,
nargs=1,
action='append',
help='Run "setting set SETTING VALUE" before executing any test.')
action="append",
help='Run "setting set SETTING VALUE" before executing any test.',
)
group.add_argument(
'-y',
"-y",
type=int,
metavar='count',
help="Specify the iteration count used to collect our benchmarks. An example is the number of times to do 'thread step-over' to measure stepping speed.")
metavar="count",
help="Specify the iteration count used to collect our benchmarks. An example is the number of times to do 'thread step-over' to measure stepping speed.",
)
group.add_argument(
'-#',
"-#",
type=int,
metavar='sharp',
dest='sharp',
help='Repeat the test suite for a specified number of times')
group.add_argument('--channel', metavar='channel', dest='channels', action='append', help=textwrap.dedent(
"Specify the log channels (and optional categories) e.g. 'lldb all' or 'gdb-remote packets' if no categories are specified, 'default' is used"))
metavar="sharp",
dest="sharp",
help="Repeat the test suite for a specified number of times",
)
group.add_argument(
'--log-success',
dest='log_success',
action='store_true',
help="Leave logs/traces even for successful test runs (useful for creating reference log files during debugging.)")
"--channel",
metavar="channel",
dest="channels",
action="append",
help=textwrap.dedent(
"Specify the log channels (and optional categories) e.g. 'lldb all' or 'gdb-remote packets' if no categories are specified, 'default' is used"
),
)
group.add_argument(
'--codesign-identity',
metavar='Codesigning identity',
default='lldb_codesign',
help='The codesigning identity to use')
"--log-success",
dest="log_success",
action="store_true",
help="Leave logs/traces even for successful test runs (useful for creating reference log files during debugging.)",
)
group.add_argument(
'--build-dir',
dest='test_build_dir',
metavar='Test build directory',
default='lldb-test-build.noindex',
help='The root build directory for the tests. It will be removed before running.')
"--codesign-identity",
metavar="Codesigning identity",
default="lldb_codesign",
help="The codesigning identity to use",
)
group.add_argument(
'--lldb-module-cache-dir',
dest='lldb_module_cache_dir',
metavar='The clang module cache directory used by LLDB',
help='The clang module cache directory used by LLDB. Defaults to <test build directory>/module-cache-lldb.')
"--build-dir",
dest="test_build_dir",
metavar="Test build directory",
default="lldb-test-build.noindex",
help="The root build directory for the tests. It will be removed before running.",
)
group.add_argument(
'--clang-module-cache-dir',
dest='clang_module_cache_dir',
metavar='The clang module cache directory used by Clang',
help='The clang module cache directory used in the Make files by Clang while building tests. Defaults to <test build directory>/module-cache-clang.')
"--lldb-module-cache-dir",
dest="lldb_module_cache_dir",
metavar="The clang module cache directory used by LLDB",
help="The clang module cache directory used by LLDB. Defaults to <test build directory>/module-cache-lldb.",
)
group.add_argument(
'--lldb-libs-dir',
dest='lldb_libs_dir',
metavar='path',
help='The path to LLDB library directory (containing liblldb)')
"--clang-module-cache-dir",
dest="clang_module_cache_dir",
metavar="The clang module cache directory used by Clang",
help="The clang module cache directory used in the Make files by Clang while building tests. Defaults to <test build directory>/module-cache-clang.",
)
group.add_argument(
'--enable-plugin',
dest='enabled_plugins',
action='append',
"--lldb-libs-dir",
dest="lldb_libs_dir",
metavar="path",
help="The path to LLDB library directory (containing liblldb)",
)
group.add_argument(
"--enable-plugin",
dest="enabled_plugins",
action="append",
type=str,
metavar='A plugin whose tests will be enabled',
help='A plugin whose tests will be enabled. The only currently supported plugin is intel-pt.')
metavar="A plugin whose tests will be enabled",
help="A plugin whose tests will be enabled. The only currently supported plugin is intel-pt.",
)
# Configuration options
group = parser.add_argument_group('Remote platform options')
group = parser.add_argument_group("Remote platform options")
group.add_argument(
'--platform-name',
dest='lldb_platform_name',
metavar='platform-name',
help='The name of a remote platform to use')
"--platform-name",
dest="lldb_platform_name",
metavar="platform-name",
help="The name of a remote platform to use",
)
group.add_argument(
'--platform-url',
dest='lldb_platform_url',
metavar='platform-url',
help='A LLDB platform URL to use when connecting to a remote platform to run the test suite')
"--platform-url",
dest="lldb_platform_url",
metavar="platform-url",
help="A LLDB platform URL to use when connecting to a remote platform to run the test suite",
)
group.add_argument(
'--platform-working-dir',
dest='lldb_platform_working_dir',
metavar='platform-working-dir',
help='The directory to use on the remote platform.')
"--platform-working-dir",
dest="lldb_platform_working_dir",
metavar="platform-working-dir",
help="The directory to use on the remote platform.",
)
# Test-suite behaviour
group = parser.add_argument_group('Runtime behaviour options')
X('-d', 'Suspend the process after launch to wait indefinitely for a debugger to attach')
X('-t', 'Turn on tracing of lldb command and other detailed test executions')
group = parser.add_argument_group("Runtime behaviour options")
X(
"-d",
"Suspend the process after launch to wait indefinitely for a debugger to attach",
)
X("-t", "Turn on tracing of lldb command and other detailed test executions")
group.add_argument(
'-u',
dest='unset_env_varnames',
metavar='variable',
action='append',
help='Specify an environment variable to unset before running the test cases. e.g., -u DYLD_INSERT_LIBRARIES -u MallocScribble')
"-u",
dest="unset_env_varnames",
metavar="variable",
action="append",
help="Specify an environment variable to unset before running the test cases. e.g., -u DYLD_INSERT_LIBRARIES -u MallocScribble",
)
group.add_argument(
'--env',
dest='set_env_vars',
metavar='variable',
action='append',
help='Specify an environment variable to set to the given value before running the test cases e.g.: --env CXXFLAGS=-O3 --env DYLD_INSERT_LIBRARIES')
"--env",
dest="set_env_vars",
metavar="variable",
action="append",
help="Specify an environment variable to set to the given value before running the test cases e.g.: --env CXXFLAGS=-O3 --env DYLD_INSERT_LIBRARIES",
)
group.add_argument(
'--inferior-env',
dest='set_inferior_env_vars',
metavar='variable',
action='append',
help='Specify an environment variable to set to the given value for the inferior.')
X('-v', 'Do verbose mode of unittest framework (print out each test case invocation)')
"--inferior-env",
dest="set_inferior_env_vars",
metavar="variable",
action="append",
help="Specify an environment variable to set to the given value for the inferior.",
)
X(
"-v",
"Do verbose mode of unittest framework (print out each test case invocation)",
)
group.add_argument(
'--enable-crash-dialog',
dest='disable_crash_dialog',
action='store_false',
help='(Windows only) When LLDB crashes, display the Windows crash dialog.')
"--enable-crash-dialog",
dest="disable_crash_dialog",
action="store_false",
help="(Windows only) When LLDB crashes, display the Windows crash dialog.",
)
group.set_defaults(disable_crash_dialog=True)
# Remove the reference to our helper function
del X
group = parser.add_argument_group('Test directories')
group = parser.add_argument_group("Test directories")
group.add_argument(
'args',
metavar='test-dir',
nargs='*',
help='Specify a list of directory names to search for test modules named after Test*.py (test discovery). If empty, search from the current working directory instead.')
"args",
metavar="test-dir",
nargs="*",
help="Specify a list of directory names to search for test modules named after Test*.py (test discovery). If empty, search from the current working directory instead.",
)
return parser

View File

@@ -6,6 +6,7 @@ import socket
import traceback
from lldbsuite.support import seven
def checksum(message):
"""
Calculate the GDB server protocol checksum of the message.
@@ -40,8 +41,8 @@ def escape_binary(message):
out = ""
for c in message:
d = ord(c)
if d in (0x23, 0x24, 0x7d):
out += chr(0x7d)
if d in (0x23, 0x24, 0x7D):
out += chr(0x7D)
out += chr(d ^ 0x20)
else:
out += c
@@ -68,7 +69,7 @@ def hex_decode_bytes(hex_bytes):
hex_len = len(hex_bytes)
i = 0
while i < hex_len - 1:
out += chr(int(hex_bytes[i:i + 2], 16))
out += chr(int(hex_bytes[i : i + 2], 16))
i += 2
return out
@@ -87,7 +88,9 @@ class MockGDBServerResponder:
registerCount = 40
packetLog = None
class RESPONSE_DISCONNECT: pass
class RESPONSE_DISCONNECT:
pass
def __init__(self):
self.packetLog = []
@@ -113,19 +116,19 @@ class MockGDBServerResponder:
if packet[0] == "G":
# Gxxxxxxxxxxx
# Gxxxxxxxxxxx;thread:1234;
return self.writeRegisters(packet[1:].split(';')[0])
return self.writeRegisters(packet[1:].split(";")[0])
if packet[0] == "p":
regnum = packet[1:].split(';')[0]
regnum = packet[1:].split(";")[0]
return self.readRegister(int(regnum, 16))
if packet[0] == "P":
register, value = packet[1:].split("=")
return self.writeRegister(int(register, 16), value)
if packet[0] == "m":
addr, length = [int(x, 16) for x in packet[1:].split(',')]
addr, length = [int(x, 16) for x in packet[1:].split(",")]
return self.readMemory(addr, length)
if packet[0] == "M":
location, encoded_data = packet[1:].split(":")
addr, length = [int(x, 16) for x in location.split(',')]
addr, length = [int(x, 16) for x in location.split(",")]
return self.writeMemory(addr, encoded_data)
if packet[0:7] == "qSymbol":
return self.qSymbol(packet[8:])
@@ -152,25 +155,25 @@ class MockGDBServerResponder:
return self.selectThread(packet[1], int(tid, 16))
if packet[0:6] == "qXfer:":
obj, read, annex, location = packet[6:].split(":")
offset, length = [int(x, 16) for x in location.split(',')]
offset, length = [int(x, 16) for x in location.split(",")]
data, has_more = self.qXferRead(obj, annex, offset, length)
if data is not None:
return self._qXferResponse(data, has_more)
return ""
if packet.startswith("vAttach;"):
pid = packet.partition(';')[2]
pid = packet.partition(";")[2]
return self.vAttach(int(pid, 16))
if packet[0] == "Z":
return self.setBreakpoint(packet)
if packet.startswith("qThreadStopInfo"):
threadnum = int (packet[15:], 16)
threadnum = int(packet[15:], 16)
return self.threadStopInfo(threadnum)
if packet == "QThreadSuffixSupported":
return self.QThreadSuffixSupported()
if packet == "QListThreadsInStopReply":
return self.QListThreadsInStopReply()
if packet.startswith("qMemoryRegionInfo:"):
return self.qMemoryRegionInfo(int(packet.split(':')[1], 16))
return self.qMemoryRegionInfo(int(packet.split(":")[1], 16))
if packet == "qQueryGDBServer":
return self.qQueryGDBServer()
if packet == "qHostInfo":
@@ -178,7 +181,7 @@ class MockGDBServerResponder:
if packet == "qGetWorkingDir":
return self.qGetWorkingDir()
if packet == "qOffsets":
return self.qOffsets();
return self.qOffsets()
if packet == "qProcessInfo":
return self.qProcessInfo()
if packet == "qsProcessInfo":
@@ -200,7 +203,7 @@ class MockGDBServerResponder:
if packet.startswith("QEnvironmentHexEncoded:"):
return self.QEnvironmentHexEncoded(packet)
if packet.startswith("qRegisterInfo"):
regnum = int(packet[len("qRegisterInfo"):], 16)
regnum = int(packet[len("qRegisterInfo") :], 16)
return self.qRegisterInfo(regnum)
if packet == "k":
return self.k()
@@ -346,6 +349,7 @@ class MockGDBServerResponder:
Override the responder class to implement behavior suitable for the test at
hand.
"""
class UnexpectedPacketException(Exception):
pass
@@ -416,7 +420,8 @@ class ServerSocket(ServerChannel):
class TCPServerSocket(ServerSocket):
def __init__(self):
family, type, proto, _, addr = socket.getaddrinfo(
"localhost", 0, proto=socket.IPPROTO_TCP)[0]
"localhost", 0, proto=socket.IPPROTO_TCP
)[0]
super().__init__(family, type, proto, addr)
def get_connect_address(self):
@@ -441,10 +446,11 @@ class PtyServerSocket(ServerChannel):
def __init__(self):
import pty
import tty
primary, secondary = pty.openpty()
tty.setraw(primary)
self._primary = io.FileIO(primary, 'r+b')
self._secondary = io.FileIO(secondary, 'r+b')
self._primary = io.FileIO(primary, "r+b")
self._secondary = io.FileIO(secondary, "r+b")
def get_connect_address(self):
libc = ctypes.CDLL(None)
@@ -465,7 +471,7 @@ class PtyServerSocket(ServerChannel):
except OSError as e:
# closing the pty results in EIO on Linux, convert it to EOF
if e.errno == errno.EIO:
return b''
return b""
raise
def sendall(self, data):
@@ -528,7 +534,9 @@ class MockGDBServer:
except self.TerminateConnectionException:
pass
except Exception as e:
print("An exception happened when receiving the response from the gdb server. Closing the client...")
print(
"An exception happened when receiving the response from the gdb server. Closing the client..."
)
traceback.print_exc()
finally:
self._socket.close_connection()
@@ -567,22 +575,23 @@ class MockGDBServer:
# If we're looking at the start of the received data, that means
# we're looking for the start of a new packet, denoted by a $.
# It's also possible we'll see an ACK here, denoted by a +
if data[0] == '+':
if data[0] == "+":
self._receivedData = data[1:]
return self.PACKET_ACK
if ord(data[0]) == 3:
self._receivedData = data[1:]
return self.PACKET_INTERRUPT
if data[0] == '$':
if data[0] == "$":
i += 1
else:
raise self.InvalidPacketException(
"Unexpected leading byte: %s" % data[0])
"Unexpected leading byte: %s" % data[0]
)
# If we're looking beyond the start of the received data, then we're
# looking for the end of the packet content, denoted by a #.
# Note that we pick up searching from where we left off last time
while i < data_len and data[i] != '#':
while i < data_len and data[i] != "#":
i += 1
# If there isn't enough data left for a checksum, just remember where
@@ -596,14 +605,14 @@ class MockGDBServer:
packet = data[1:i]
i += 1
try:
check = int(data[i:i + 2], 16)
check = int(data[i : i + 2], 16)
except ValueError:
raise self.InvalidPacketException("Checksum is not valid hex")
i += 2
if check != checksum(packet):
raise self.InvalidPacketException(
"Checksum %02x does not match content %02x" %
(check, checksum(packet)))
"Checksum %02x does not match content %02x" % (check, checksum(packet))
)
# remove parsed bytes from _receivedData and reset offset so parsing
# can start on the next packet the next time around
self._receivedData = data[i:]
@@ -623,7 +632,7 @@ class MockGDBServer:
# We'll handle the ack stuff here since it's not something any of the
# tests will be concerned about, and it'll get turned off quickly anyway.
if self._shouldSendAck:
self._socket.sendall(seven.bitcast_to_bytes('+'))
self._socket.sendall(seven.bitcast_to_bytes("+"))
if packet == "QStartNoAckMode":
self._shouldSendAck = False
response = "OK"

View File

@@ -113,16 +113,14 @@ def add_lldb_module_directory():
try:
lldb_module_path = None
if platform.system() == 'Darwin':
if platform.system() == "Darwin":
# Use xcrun to find the selected lldb.
lldb_module_path = subprocess.check_output(["xcrun", "lldb", "-P"])
elif platform.system() == 'Windows':
lldb_module_path = subprocess.check_output(
["lldb.exe", "-P"], shell=True)
elif platform.system() == "Windows":
lldb_module_path = subprocess.check_output(["lldb.exe", "-P"], shell=True)
else:
# Use the shell to run lldb from the path.
lldb_module_path = subprocess.check_output(
["lldb", "-P"], shell=True)
lldb_module_path = subprocess.check_output(["lldb", "-P"], shell=True)
# Trim the result.
if lldb_module_path is not None:
@@ -145,24 +143,27 @@ def add_lldb_test_package_paths(check_dir):
"""
def child_dirs(parent_dir):
return [os.path.join(parent_dir, child)
for child in os.listdir(parent_dir)
if os.path.isdir(os.path.join(parent_dir, child))]
return [
os.path.join(parent_dir, child)
for child in os.listdir(parent_dir)
if os.path.isdir(os.path.join(parent_dir, child))
]
check_dir = os.path.realpath(check_dir)
while check_dir and len(check_dir) > 0:
# If the current directory contains a packages/Python
# directory, add that directory to the path.
packages_python_child_dir = os.path.join(
check_dir, "packages", "Python")
packages_python_child_dir = os.path.join(check_dir, "packages", "Python")
if os.path.exists(packages_python_child_dir):
sys.path.insert(0, packages_python_child_dir)
sys.path.insert(0, os.path.join(
packages_python_child_dir, "test_runner", "lib"))
sys.path.insert(
0, os.path.join(packages_python_child_dir, "test_runner", "lib")
)
# Handle third_party module/package directory.
third_party_module_dir = os.path.join(
check_dir, "third_party", "Python", "module")
check_dir, "third_party", "Python", "module"
)
for child_dir in child_dirs(third_party_module_dir):
# Yes, we embed the module in the module parent dir
sys.path.insert(0, child_dir)

View File

@@ -65,8 +65,7 @@ class Stopwatch(object):
if self.__start__ is None:
self.__start__ = time.time()
else:
raise Exception(
"start() already called, did you forget to stop() first?")
raise Exception("start() already called, did you forget to stop() first?")
# Return self to facilitate the context manager __enter__ protocol.
return self
@@ -96,8 +95,13 @@ class Stopwatch(object):
# return numpy.std(self.__nums__)
def __str__(self):
return "Avg: %f (Laps: %d, Total Elapsed Time: %f, min=%f, max=%f)" % (self.avg(
), self.__laps__, self.__total_elapsed__, min(self.__nums__), max(self.__nums__))
return "Avg: %f (Laps: %d, Total Elapsed Time: %f, min=%f, max=%f)" % (
self.avg(),
self.__laps__,
self.__total_elapsed__,
min(self.__nums__),
max(self.__nums__),
)
class BenchBase(TestBase):

View File

@@ -3,187 +3,185 @@
import lldb
# DWARF Expression operators.
DW_OP_addr = 0x03
DW_OP_deref = 0x06
DW_OP_const1u = 0x08
DW_OP_const1s = 0x09
DW_OP_const2u = 0x0A
DW_OP_const2s = 0x0B
DW_OP_const4u = 0x0C
DW_OP_const4s = 0x0D
DW_OP_const8u = 0x0E
DW_OP_const8s = 0x0F
DW_OP_constu = 0x10
DW_OP_consts = 0x11
DW_OP_dup = 0x12
DW_OP_drop = 0x13
DW_OP_over = 0x14
DW_OP_pick = 0x15
DW_OP_swap = 0x16
DW_OP_rot = 0x17
DW_OP_xderef = 0x18
DW_OP_abs = 0x19
DW_OP_and = 0x1A
DW_OP_div = 0x1B
DW_OP_minus = 0x1C
DW_OP_mod = 0x1D
DW_OP_mul = 0x1E
DW_OP_neg = 0x1F
DW_OP_not = 0x20
DW_OP_or = 0x21
DW_OP_plus = 0x22
DW_OP_plus_uconst = 0x23
DW_OP_shl = 0x24
DW_OP_shr = 0x25
DW_OP_shra = 0x26
DW_OP_xor = 0x27
DW_OP_skip = 0x2F
DW_OP_bra = 0x28
DW_OP_eq = 0x29
DW_OP_ge = 0x2A
DW_OP_gt = 0x2B
DW_OP_le = 0x2C
DW_OP_lt = 0x2D
DW_OP_ne = 0x2E
DW_OP_lit0 = 0x30
DW_OP_lit1 = 0x31
DW_OP_lit2 = 0x32
DW_OP_lit3 = 0x33
DW_OP_lit4 = 0x34
DW_OP_lit5 = 0x35
DW_OP_lit6 = 0x36
DW_OP_lit7 = 0x37
DW_OP_lit8 = 0x38
DW_OP_lit9 = 0x39
DW_OP_lit10 = 0x3A
DW_OP_lit11 = 0x3B
DW_OP_lit12 = 0x3C
DW_OP_lit13 = 0x3D
DW_OP_lit14 = 0x3E
DW_OP_lit15 = 0x3F
DW_OP_lit16 = 0x40
DW_OP_lit17 = 0x41
DW_OP_lit18 = 0x42
DW_OP_lit19 = 0x43
DW_OP_lit20 = 0x44
DW_OP_lit21 = 0x45
DW_OP_lit22 = 0x46
DW_OP_lit23 = 0x47
DW_OP_lit24 = 0x48
DW_OP_lit25 = 0x49
DW_OP_lit26 = 0x4A
DW_OP_lit27 = 0x4B
DW_OP_lit28 = 0x4C
DW_OP_lit29 = 0x4D
DW_OP_lit30 = 0x4E
DW_OP_lit31 = 0x4F
DW_OP_reg0 = 0x50
DW_OP_reg1 = 0x51
DW_OP_reg2 = 0x52
DW_OP_reg3 = 0x53
DW_OP_reg4 = 0x54
DW_OP_reg5 = 0x55
DW_OP_reg6 = 0x56
DW_OP_reg7 = 0x57
DW_OP_reg8 = 0x58
DW_OP_reg9 = 0x59
DW_OP_reg10 = 0x5A
DW_OP_reg11 = 0x5B
DW_OP_reg12 = 0x5C
DW_OP_reg13 = 0x5D
DW_OP_reg14 = 0x5E
DW_OP_reg15 = 0x5F
DW_OP_reg16 = 0x60
DW_OP_reg17 = 0x61
DW_OP_reg18 = 0x62
DW_OP_reg19 = 0x63
DW_OP_reg20 = 0x64
DW_OP_reg21 = 0x65
DW_OP_reg22 = 0x66
DW_OP_reg23 = 0x67
DW_OP_reg24 = 0x68
DW_OP_reg25 = 0x69
DW_OP_reg26 = 0x6A
DW_OP_reg27 = 0x6B
DW_OP_reg28 = 0x6C
DW_OP_reg29 = 0x6D
DW_OP_reg30 = 0x6E
DW_OP_reg31 = 0x6F
DW_OP_breg0 = 0x70
DW_OP_breg1 = 0x71
DW_OP_breg2 = 0x72
DW_OP_breg3 = 0x73
DW_OP_breg4 = 0x74
DW_OP_breg5 = 0x75
DW_OP_breg6 = 0x76
DW_OP_breg7 = 0x77
DW_OP_breg8 = 0x78
DW_OP_breg9 = 0x79
DW_OP_breg10 = 0x7A
DW_OP_breg11 = 0x7B
DW_OP_breg12 = 0x7C
DW_OP_breg13 = 0x7D
DW_OP_breg14 = 0x7E
DW_OP_breg15 = 0x7F
DW_OP_breg16 = 0x80
DW_OP_breg17 = 0x81
DW_OP_breg18 = 0x82
DW_OP_breg19 = 0x83
DW_OP_breg20 = 0x84
DW_OP_breg21 = 0x85
DW_OP_breg22 = 0x86
DW_OP_breg23 = 0x87
DW_OP_breg24 = 0x88
DW_OP_breg25 = 0x89
DW_OP_breg26 = 0x8A
DW_OP_breg27 = 0x8B
DW_OP_breg28 = 0x8C
DW_OP_breg29 = 0x8D
DW_OP_breg30 = 0x8E
DW_OP_breg31 = 0x8F
DW_OP_regx = 0x90
DW_OP_fbreg = 0x91
DW_OP_bregx = 0x92
DW_OP_piece = 0x93
DW_OP_deref_size = 0x94
DW_OP_xderef_size = 0x95
DW_OP_nop = 0x96
DW_OP_push_object_address = 0x97
DW_OP_call2 = 0x98
DW_OP_call4 = 0x99
DW_OP_call_ref = 0x9A
DW_OP_form_tls_address = 0x9B
DW_OP_call_frame_cfa = 0x9C
DW_OP_bit_piece = 0x9D
DW_OP_implicit_value = 0x9E
DW_OP_stack_value = 0x9F
DW_OP_lo_user = 0xE0
DW_OP_GNU_push_tls_address = 0xE0
DW_OP_hi_user = 0xFF
DW_OP_addr = 0x03
DW_OP_deref = 0x06
DW_OP_const1u = 0x08
DW_OP_const1s = 0x09
DW_OP_const2u = 0x0A
DW_OP_const2s = 0x0B
DW_OP_const4u = 0x0C
DW_OP_const4s = 0x0D
DW_OP_const8u = 0x0E
DW_OP_const8s = 0x0F
DW_OP_constu = 0x10
DW_OP_consts = 0x11
DW_OP_dup = 0x12
DW_OP_drop = 0x13
DW_OP_over = 0x14
DW_OP_pick = 0x15
DW_OP_swap = 0x16
DW_OP_rot = 0x17
DW_OP_xderef = 0x18
DW_OP_abs = 0x19
DW_OP_and = 0x1A
DW_OP_div = 0x1B
DW_OP_minus = 0x1C
DW_OP_mod = 0x1D
DW_OP_mul = 0x1E
DW_OP_neg = 0x1F
DW_OP_not = 0x20
DW_OP_or = 0x21
DW_OP_plus = 0x22
DW_OP_plus_uconst = 0x23
DW_OP_shl = 0x24
DW_OP_shr = 0x25
DW_OP_shra = 0x26
DW_OP_xor = 0x27
DW_OP_skip = 0x2F
DW_OP_bra = 0x28
DW_OP_eq = 0x29
DW_OP_ge = 0x2A
DW_OP_gt = 0x2B
DW_OP_le = 0x2C
DW_OP_lt = 0x2D
DW_OP_ne = 0x2E
DW_OP_lit0 = 0x30
DW_OP_lit1 = 0x31
DW_OP_lit2 = 0x32
DW_OP_lit3 = 0x33
DW_OP_lit4 = 0x34
DW_OP_lit5 = 0x35
DW_OP_lit6 = 0x36
DW_OP_lit7 = 0x37
DW_OP_lit8 = 0x38
DW_OP_lit9 = 0x39
DW_OP_lit10 = 0x3A
DW_OP_lit11 = 0x3B
DW_OP_lit12 = 0x3C
DW_OP_lit13 = 0x3D
DW_OP_lit14 = 0x3E
DW_OP_lit15 = 0x3F
DW_OP_lit16 = 0x40
DW_OP_lit17 = 0x41
DW_OP_lit18 = 0x42
DW_OP_lit19 = 0x43
DW_OP_lit20 = 0x44
DW_OP_lit21 = 0x45
DW_OP_lit22 = 0x46
DW_OP_lit23 = 0x47
DW_OP_lit24 = 0x48
DW_OP_lit25 = 0x49
DW_OP_lit26 = 0x4A
DW_OP_lit27 = 0x4B
DW_OP_lit28 = 0x4C
DW_OP_lit29 = 0x4D
DW_OP_lit30 = 0x4E
DW_OP_lit31 = 0x4F
DW_OP_reg0 = 0x50
DW_OP_reg1 = 0x51
DW_OP_reg2 = 0x52
DW_OP_reg3 = 0x53
DW_OP_reg4 = 0x54
DW_OP_reg5 = 0x55
DW_OP_reg6 = 0x56
DW_OP_reg7 = 0x57
DW_OP_reg8 = 0x58
DW_OP_reg9 = 0x59
DW_OP_reg10 = 0x5A
DW_OP_reg11 = 0x5B
DW_OP_reg12 = 0x5C
DW_OP_reg13 = 0x5D
DW_OP_reg14 = 0x5E
DW_OP_reg15 = 0x5F
DW_OP_reg16 = 0x60
DW_OP_reg17 = 0x61
DW_OP_reg18 = 0x62
DW_OP_reg19 = 0x63
DW_OP_reg20 = 0x64
DW_OP_reg21 = 0x65
DW_OP_reg22 = 0x66
DW_OP_reg23 = 0x67
DW_OP_reg24 = 0x68
DW_OP_reg25 = 0x69
DW_OP_reg26 = 0x6A
DW_OP_reg27 = 0x6B
DW_OP_reg28 = 0x6C
DW_OP_reg29 = 0x6D
DW_OP_reg30 = 0x6E
DW_OP_reg31 = 0x6F
DW_OP_breg0 = 0x70
DW_OP_breg1 = 0x71
DW_OP_breg2 = 0x72
DW_OP_breg3 = 0x73
DW_OP_breg4 = 0x74
DW_OP_breg5 = 0x75
DW_OP_breg6 = 0x76
DW_OP_breg7 = 0x77
DW_OP_breg8 = 0x78
DW_OP_breg9 = 0x79
DW_OP_breg10 = 0x7A
DW_OP_breg11 = 0x7B
DW_OP_breg12 = 0x7C
DW_OP_breg13 = 0x7D
DW_OP_breg14 = 0x7E
DW_OP_breg15 = 0x7F
DW_OP_breg16 = 0x80
DW_OP_breg17 = 0x81
DW_OP_breg18 = 0x82
DW_OP_breg19 = 0x83
DW_OP_breg20 = 0x84
DW_OP_breg21 = 0x85
DW_OP_breg22 = 0x86
DW_OP_breg23 = 0x87
DW_OP_breg24 = 0x88
DW_OP_breg25 = 0x89
DW_OP_breg26 = 0x8A
DW_OP_breg27 = 0x8B
DW_OP_breg28 = 0x8C
DW_OP_breg29 = 0x8D
DW_OP_breg30 = 0x8E
DW_OP_breg31 = 0x8F
DW_OP_regx = 0x90
DW_OP_fbreg = 0x91
DW_OP_bregx = 0x92
DW_OP_piece = 0x93
DW_OP_deref_size = 0x94
DW_OP_xderef_size = 0x95
DW_OP_nop = 0x96
DW_OP_push_object_address = 0x97
DW_OP_call2 = 0x98
DW_OP_call4 = 0x99
DW_OP_call_ref = 0x9A
DW_OP_form_tls_address = 0x9B
DW_OP_call_frame_cfa = 0x9C
DW_OP_bit_piece = 0x9D
DW_OP_implicit_value = 0x9E
DW_OP_stack_value = 0x9F
DW_OP_lo_user = 0xE0
DW_OP_GNU_push_tls_address = 0xE0
DW_OP_hi_user = 0xFF
class DwarfOpcodeParser(object):
def updateRegInfoBitsize(self, reg_info, byte_order):
""" Update the regInfo bit size. """
"""Update the regInfo bit size."""
# Evaluate Dwarf Expression
expr_result = self.evaluateDwarfExpression(reg_info["dynamic_size_dwarf_expr_bytes"],
byte_order)
expr_result = self.evaluateDwarfExpression(
reg_info["dynamic_size_dwarf_expr_bytes"], byte_order
)
if expr_result == 0:
reg_info["bitsize"] = 32
elif expr_result == 1:
reg_info["bitsize"] = 64
def evaluateDwarfExpression(self, dwarf_opcode, byte_order):
"""Evaluate Dwarf Expression. """
"""Evaluate Dwarf Expression."""
dwarf_opcode = [dwarf_opcode[i:i+2] for i in range(0,len(dwarf_opcode),2)]
dwarf_opcode = [dwarf_opcode[i : i + 2] for i in range(0, len(dwarf_opcode), 2)]
dwarf_data = []
for index in range(len(dwarf_opcode)):
if index < len(dwarf_opcode):
val = int(dwarf_opcode[index], 16)
else:
@@ -197,9 +195,16 @@ class DwarfOpcodeParser(object):
self.reset_test_sequence()
# Read register value
self.test_sequence.add_log_lines(
["read packet: $p{0:x}#00".format(reg_no),
{"direction": "send", "regex": r"^\$([0-9a-fA-F]+)#",
"capture": {1: "p_response"}}],True)
[
"read packet: $p{0:x}#00".format(reg_no),
{
"direction": "send",
"regex": r"^\$([0-9a-fA-F]+)#",
"capture": {1: "p_response"},
},
],
True,
)
Context = self.expect_gdbremote_sequence()
self.assertIsNotNone(Context)
@@ -207,13 +212,19 @@ class DwarfOpcodeParser(object):
self.assertIsNotNone(p_response)
if byte_order == lldb.eByteOrderLittle:
# In case of little endian
# first decode the HEX ASCII bytes and then reverse it
# to get actual value of SR register
p_response = "".join(reversed([p_response[i:i+2] for i in range(0,
len(p_response),2)]))
# In case of little endian
# first decode the HEX ASCII bytes and then reverse it
# to get actual value of SR register
p_response = "".join(
reversed(
[
p_response[i : i + 2]
for i in range(0, len(p_response), 2)
]
)
)
# Push register value
dwarf_data.append(int(p_response,16))
dwarf_data.append(int(p_response, 16))
elif val == DW_OP_lit1:
# Push literal 1
@@ -252,4 +263,3 @@ class DwarfOpcodeParser(object):
self.assertTrue(len(dwarf_data) == 1)
expr_result = dwarf_data.pop()
return expr_result

View File

@@ -4,6 +4,7 @@ import lldb
from lldbsuite.test.lldbtest import *
from lldbsuite.test.gdbclientutils import *
class GDBRemoteTestBase(TestBase):
"""
Base class for GDB client tests.
@@ -52,8 +53,9 @@ class GDBRemoteTestBase(TestBase):
"""
listener = self.dbg.GetListener()
error = lldb.SBError()
process = target.ConnectRemote(listener,
self.server.get_connect_url(), "gdb-remote", error)
process = target.ConnectRemote(
listener, self.server.get_connect_url(), "gdb-remote", error
)
self.assertTrue(error.Success(), error.description)
self.assertTrue(process, PROCESS_IS_VALID)
return process
@@ -79,8 +81,10 @@ class GDBRemoteTestBase(TestBase):
i += 1
j += 1
if i < len(packets):
self.fail(u"Did not receive: %s\nLast 10 packets:\n\t%s" %
(packets[i], u'\n\t'.join(log)))
self.fail(
"Did not receive: %s\nLast 10 packets:\n\t%s"
% (packets[i], "\n\t".join(log))
)
class GDBPlatformClientTestBase(GDBRemoteTestBase):

View File

@@ -15,25 +15,25 @@ from . import configuration
from . import lldbutil
from .decorators import *
def source_type(filename):
_, extension = os.path.splitext(filename)
return {
'.c': 'C_SOURCES',
'.cpp': 'CXX_SOURCES',
'.cxx': 'CXX_SOURCES',
'.cc': 'CXX_SOURCES',
'.m': 'OBJC_SOURCES',
'.mm': 'OBJCXX_SOURCES'
".c": "C_SOURCES",
".cpp": "CXX_SOURCES",
".cxx": "CXX_SOURCES",
".cc": "CXX_SOURCES",
".m": "OBJC_SOURCES",
".mm": "OBJCXX_SOURCES",
}.get(extension, None)
class CommandParser:
def __init__(self):
self.breakpoints = []
def parse_one_command(self, line):
parts = line.split('//%')
parts = line.split("//%")
command = None
new_breakpoint = True
@@ -46,7 +46,7 @@ class CommandParser:
def parse_source_files(self, source_files):
for source_file in source_files:
file_handle = io.open(source_file, encoding='utf-8')
file_handle = io.open(source_file, encoding="utf-8")
lines = file_handle.readlines()
line_number = 0
# non-NULL means we're looking through whitespace to find
@@ -62,30 +62,31 @@ class CommandParser:
if command is not None:
if current_breakpoint is None:
current_breakpoint = {}
current_breakpoint['file_name'] = source_file
current_breakpoint['line_number'] = line_number
current_breakpoint['command'] = command
current_breakpoint["file_name"] = source_file
current_breakpoint["line_number"] = line_number
current_breakpoint["command"] = command
self.breakpoints.append(current_breakpoint)
else:
current_breakpoint['command'] = current_breakpoint[
'command'] + "\n" + command
current_breakpoint["command"] = (
current_breakpoint["command"] + "\n" + command
)
for bkpt in self.breakpoints:
bkpt['command'] = textwrap.dedent(bkpt['command'])
bkpt["command"] = textwrap.dedent(bkpt["command"])
def set_breakpoints(self, target):
for breakpoint in self.breakpoints:
breakpoint['breakpoint'] = target.BreakpointCreateByLocation(
breakpoint['file_name'], breakpoint['line_number'])
breakpoint["breakpoint"] = target.BreakpointCreateByLocation(
breakpoint["file_name"], breakpoint["line_number"]
)
def handle_breakpoint(self, test, breakpoint_id):
for breakpoint in self.breakpoints:
if breakpoint['breakpoint'].GetID() == breakpoint_id:
test.execute_user_command(breakpoint['command'])
if breakpoint["breakpoint"].GetID() == breakpoint_id:
test.execute_user_command(breakpoint["command"])
return
class InlineTest(TestBase):
def getBuildDirBasename(self):
return self.__class__.__name__ + "." + self.testMethodName
@@ -103,17 +104,17 @@ class InlineTest(TestBase):
else:
categories[t] = [f]
with open(makefilePath, 'w+') as makefile:
with open(makefilePath, "w+") as makefile:
for t in list(categories.keys()):
line = t + " := " + " ".join(categories[t])
makefile.write(line + "\n")
if ('OBJCXX_SOURCES' in list(categories.keys())) or \
('OBJC_SOURCES' in list(categories.keys())):
makefile.write(
"LDFLAGS = $(CFLAGS) -lobjc -framework Foundation\n")
if ("OBJCXX_SOURCES" in list(categories.keys())) or (
"OBJC_SOURCES" in list(categories.keys())
):
makefile.write("LDFLAGS = $(CFLAGS) -lobjc -framework Foundation\n")
if ('CXX_SOURCES' in list(categories.keys())):
if "CXX_SOURCES" in list(categories.keys()):
makefile.write("CXXFLAGS += -std=c++11\n")
makefile.write("include Makefile.rules\n")
@@ -135,8 +136,7 @@ class InlineTest(TestBase):
def do_test(self):
exe = self.getBuildArtifact("a.out")
source_files = [f for f in os.listdir(self.getSourceDir())
if source_type(f)]
source_files = [f for f in os.listdir(self.getSourceDir()) if source_type(f)]
target = self.dbg.CreateTarget(exe)
parser = CommandParser()
@@ -150,18 +150,19 @@ class InlineTest(TestBase):
while lldbutil.get_stopped_thread(process, lldb.eStopReasonBreakpoint):
hit_breakpoints += 1
thread = lldbutil.get_stopped_thread(
process, lldb.eStopReasonBreakpoint)
thread = lldbutil.get_stopped_thread(process, lldb.eStopReasonBreakpoint)
for bp_id in self._get_breakpoint_ids(thread):
parser.handle_breakpoint(self, bp_id)
process.Continue()
self.assertTrue(hit_breakpoints > 0,
"inline test did not hit a single breakpoint")
self.assertTrue(
hit_breakpoints > 0, "inline test did not hit a single breakpoint"
)
# Either the process exited or the stepping plan is complete.
self.assertTrue(process.GetState() in [lldb.eStateStopped,
lldb.eStateExited],
PROCESS_EXITED)
self.assertTrue(
process.GetState() in [lldb.eStateStopped, lldb.eStateExited],
PROCESS_EXITED,
)
def check_expression(self, expression, expected_result, use_summary=True):
value = self.frame().EvaluateExpression(expression)
@@ -173,8 +174,7 @@ class InlineTest(TestBase):
answer = value.GetSummary()
else:
answer = value.GetValue()
report_str = "%s expected: %s got: %s" % (
expression, expected_result, answer)
report_str = "%s expected: %s got: %s" % (expression, expected_result, answer)
self.assertTrue(answer == expected_result, report_str)
@@ -183,13 +183,12 @@ def ApplyDecoratorsToFunction(func, decorators):
if isinstance(decorators, list):
for decorator in decorators:
tmp = decorator(tmp)
elif hasattr(decorators, '__call__'):
elif hasattr(decorators, "__call__"):
tmp = decorators(tmp)
return tmp
def MakeInlineTest(__file, __globals, decorators=None, name=None,
build_dict=None):
def MakeInlineTest(__file, __globals, decorators=None, name=None, build_dict=None):
# Adjust the filename if it ends in .pyc. We want filenames to
# reflect the source python file, not the compiled variant.
if __file is not None and __file.endswith(".pyc"):
@@ -203,8 +202,9 @@ def MakeInlineTest(__file, __globals, decorators=None, name=None,
test_func = ApplyDecoratorsToFunction(InlineTest._test, decorators)
# Build the test case
test_class = type(name, (InlineTest,), dict(test=test_func,
name=name, _build_dict=build_dict))
test_class = type(
name, (InlineTest,), dict(test=test_func, name=name, _build_dict=build_dict)
)
# Add the test case to the globals, and hide InlineTest
__globals.update({name: test_class})

View File

@@ -10,34 +10,40 @@ from .lldbtest import *
from . import lldbutil
from lldbsuite.test.decorators import *
@skipIfRemote
@skipIfWindows # llvm.org/pr22274: need a pexpect replacement for windows
class PExpectTest(TestBase):
NO_DEBUG_INFO_TESTCASE = True
PROMPT = "(lldb) "
def expect_prompt(self):
self.child.expect_exact(self.PROMPT)
def launch(self, executable=None, extra_args=None, timeout=60,
dimensions=None, run_under=None, post_spawn=None,
use_colors=False):
logfile = getattr(sys.stdout, 'buffer',
sys.stdout) if self.TraceOn() else None
def launch(
self,
executable=None,
extra_args=None,
timeout=60,
dimensions=None,
run_under=None,
post_spawn=None,
use_colors=False,
):
logfile = getattr(sys.stdout, "buffer", sys.stdout) if self.TraceOn() else None
args = []
if run_under is not None:
args += run_under
args += [lldbtest_config.lldbExec, '--no-lldbinit']
args += [lldbtest_config.lldbExec, "--no-lldbinit"]
if not use_colors:
args.append('--no-use-colors')
args.append("--no-use-colors")
for cmd in self.setUpCommands():
if "use-color false" in cmd and use_colors:
continue
args += ['-O', cmd]
args += ["-O", cmd]
if executable is not None:
args += ['--file', executable]
args += ["--file", executable]
if extra_args is not None:
args.extend(extra_args)
@@ -46,11 +52,17 @@ class PExpectTest(TestBase):
env["HOME"] = self.getBuildDir()
import pexpect
self.child = pexpect.spawn(
args[0], args=args[1:], logfile=logfile,
timeout=timeout, dimensions=dimensions, env=env)
self.child.ptyproc.delayafterclose = timeout/10
self.child.ptyproc.delayafterterminate = timeout/10
args[0],
args=args[1:],
logfile=logfile,
timeout=timeout,
dimensions=dimensions,
env=env,
)
self.child.ptyproc.delayafterclose = timeout / 10
self.child.ptyproc.delayafterterminate = timeout / 10
if post_spawn is not None:
post_spawn()
@@ -66,11 +78,10 @@ class PExpectTest(TestBase):
self.expect_prompt()
def expect(self, cmd, substrs=None):
self.assertNotIn('\n', cmd)
self.assertNotIn("\n", cmd)
# If 'substrs' is a string then this code would just check that every
# character of the string is in the output.
assert not isinstance(substrs, str), \
"substrs must be a collection of strings"
assert not isinstance(substrs, str), "substrs must be a collection of strings"
self.child.sendline(cmd)
if substrs is not None:

View File

@@ -8,9 +8,23 @@ import itertools
# LLDB modules
import lldb
windows, linux, macosx, darwin, ios, tvos, watchos, bridgeos, darwin_all, \
darwin_embedded, darwin_simulator, freebsd, netbsd, bsd_all, android \
= range(15)
(
windows,
linux,
macosx,
darwin,
ios,
tvos,
watchos,
bridgeos,
darwin_all,
darwin_embedded,
darwin_simulator,
freebsd,
netbsd,
bsd_all,
android,
) = range(15)
__darwin_embedded = ["ios", "tvos", "watchos", "bridgeos"]
__darwin_simulators = ["iphonesimulator", "watchsimulator", "appletvsimulator"]
@@ -30,12 +44,11 @@ __name_lookup = {
freebsd: ["freebsd"],
netbsd: ["netbsd"],
bsd_all: ["freebsd", "netbsd"],
android: ["android"]
android: ["android"],
}
def translate(values):
if isinstance(values, int):
# This is a value from the platform enumeration, translate it.
return __name_lookup[values]

Some files were not shown because too many files have changed in this diff Show More