arm_debug: Support multiple APs per DAP and remove DAP from armv7* structs

Separate out the values from adiv5_dap that are associated with a specific AP into a new struct, so we can properly support multiple APs. Remove the DAP struct from the armv7* structs, because we can have multiple CPUs per DAP, and we shouldn't have multiple DAP structs. Tidy up a few places where ap_current is used incorrectly.

Change-Id: I0c6ef4b49cc86b140366347aaf9b76c07cbab0a8
Signed-off-by: Patrick Stewart <patstew@gmail.com>
Reviewed-on: http://openocd.zylin.com/2984
Tested-by: jenkins
Reviewed-by: Matthias Welwarsky <matthias@welwarsky.de>
Reviewed-by: Andreas Fritiofson <andreas.fritiofson@gmail.com>
This commit is contained in:
Patrick Stewart 2015-09-28 13:51:58 +01:00 committed by Andreas Fritiofson
parent 67f24e6734
commit bf4cf76631
10 changed files with 128 additions and 97 deletions

View File

@ -459,7 +459,7 @@ static void bitbang_swd_read_reg(struct adiv5_dap *dap, uint8_t cmd, uint32_t *v
if (value)
*value = data;
if (cmd & SWD_CMD_APnDP)
bitbang_exchange(true, NULL, 0, dap->memaccess_tck);
bitbang_exchange(true, NULL, 0, dap->ap[dap_ap_get_select(dap)].memaccess_tck);
return;
case SWD_ACK_WAIT:
LOG_DEBUG("SWD_ACK_WAIT");
@ -511,7 +511,7 @@ static void bitbang_swd_write_reg(struct adiv5_dap *dap, uint8_t cmd, uint32_t v
switch (ack) {
case SWD_ACK_OK:
if (cmd & SWD_CMD_APnDP)
bitbang_exchange(true, NULL, 0, dap->memaccess_tck);
bitbang_exchange(true, NULL, 0, dap->ap[dap_ap_get_select(dap)].memaccess_tck);
return;
case SWD_ACK_WAIT:
LOG_DEBUG("SWD_ACK_WAIT");

View File

@ -1057,7 +1057,7 @@ static void ftdi_swd_queue_cmd(struct adiv5_dap *dap, uint8_t cmd, uint32_t *dst
/* Insert idle cycles after AP accesses to avoid WAIT */
if (cmd & SWD_CMD_APnDP)
mpsse_clock_data_out(mpsse_ctx, NULL, 0, dap->memaccess_tck, SWD_MODE);
mpsse_clock_data_out(mpsse_ctx, NULL, 0, dap->ap[dap_ap_get_select(dap)].memaccess_tck, SWD_MODE);
}

View File

@ -1853,9 +1853,8 @@ static void jlink_swd_queue_cmd(struct adiv5_dap *dap, uint8_t cmd,
uint32_t *dst, uint32_t data)
{
uint8_t data_parity_trn[DIV_ROUND_UP(32 + 1, 8)];
if (tap_length + 46 + 8 + dap->memaccess_tck >= swd_buffer_size * 8 ||
pending_scan_results_length == MAX_PENDING_SCAN_RESULTS) {
if (tap_length + 46 + 8 + dap->ap[dap_ap_get_select(dap)].memaccess_tck >= sizeof(tdi_buffer) * 8 ||
pending_scan_results_length == MAX_PENDING_SCAN_RESULTS) {
/* Not enough room in the queue. Run the queue. */
queued_retval = jlink_swd_run_queue(dap);
}
@ -1890,7 +1889,7 @@ static void jlink_swd_queue_cmd(struct adiv5_dap *dap, uint8_t cmd,
/* Insert idle cycles after AP accesses to avoid WAIT. */
if (cmd & SWD_CMD_APnDP)
jlink_queue_data_out(NULL, dap->memaccess_tck);
jlink_queue_data_out(NULL, dap->ap[dap_ap_get_select(dap)].memaccess_tck);
}
static const struct swd_driver jlink_swd = {

View File

@ -117,8 +117,8 @@ static int adi_jtag_dp_scan(struct adiv5_dap *dap,
if ((instr == JTAG_DP_APACC)
&& ((reg_addr == MEM_AP_REG_DRW)
|| ((reg_addr & 0xF0) == MEM_AP_REG_BD0))
&& (dap->memaccess_tck != 0))
jtag_add_runtest(dap->memaccess_tck,
&& (dap->ap[dap_ap_get_select(dap)].memaccess_tck != 0))
jtag_add_runtest(dap->ap[dap_ap_get_select(dap)].memaccess_tck,
TAP_IDLE);
return ERROR_OK;
@ -280,14 +280,14 @@ static int jtagdp_transaction_endcheck(struct adiv5_dap *dap)
* MEM-AP access; but not if autoincrementing.
* *Real* CSW and TAR values are always shown.
*/
if (dap->ap_tar_value != (uint32_t) -1)
if (dap->ap[dap_ap_get_select(dap)].tar_value != (uint32_t) -1)
LOG_DEBUG("MEM-AP Cached values: "
"ap_bank 0x%" PRIx32
", ap_csw 0x%" PRIx32
", ap_tar 0x%" PRIx32,
dap->ap_bank_value,
dap->ap_csw_value,
dap->ap_tar_value);
dap->ap[dap_ap_get_select(dap)].csw_value,
dap->ap[dap_ap_get_select(dap)].tar_value);
if (ctrlstat & SSTICKYORUN)
LOG_ERROR("JTAG-DP OVERRUN - check clock, "

View File

@ -113,34 +113,33 @@ void dap_ap_select(struct adiv5_dap *dap, uint8_t ap)
* Values MUST BE UPDATED BEFORE AP ACCESS.
*/
dap->ap_bank_value = -1;
dap->ap_csw_value = -1;
dap->ap_tar_value = -1;
}
}
static int dap_setup_accessport_csw(struct adiv5_dap *dap, uint32_t csw)
{
csw = csw | CSW_DBGSWENABLE | CSW_MASTER_DEBUG | CSW_HPROT |
dap->apcsw[dap->ap_current >> 24];
dap->ap[dap_ap_get_select(dap)].csw_default;
if (csw != dap->ap_csw_value) {
if (csw != dap->ap[dap_ap_get_select(dap)].csw_value) {
/* LOG_DEBUG("DAP: Set CSW %x",csw); */
int retval = dap_queue_ap_write(dap, MEM_AP_REG_CSW, csw);
if (retval != ERROR_OK)
return retval;
dap->ap_csw_value = csw;
dap->ap[dap_ap_get_select(dap)].csw_value = csw;
}
return ERROR_OK;
}
static int dap_setup_accessport_tar(struct adiv5_dap *dap, uint32_t tar)
{
if (tar != dap->ap_tar_value || dap->ap_csw_value & CSW_ADDRINC_MASK) {
if (tar != dap->ap[dap_ap_get_select(dap)].tar_value ||
(dap->ap[dap_ap_get_select(dap)].csw_value & CSW_ADDRINC_MASK)) {
/* LOG_DEBUG("DAP: Set TAR %x",tar); */
int retval = dap_queue_ap_write(dap, MEM_AP_REG_TAR, tar);
if (retval != ERROR_OK)
return retval;
dap->ap_tar_value = tar;
dap->ap[dap_ap_get_select(dap)].tar_value = tar;
}
return ERROR_OK;
}
@ -292,6 +291,7 @@ static int mem_ap_write_atomic_u32(struct adiv5_dap *dap, uint32_t address,
static int mem_ap_write(struct adiv5_dap *dap, const uint8_t *buffer, uint32_t size, uint32_t count,
uint32_t address, bool addrinc)
{
struct adiv5_ap *ap = &dap->ap[dap_ap_get_select(dap)];
size_t nbytes = size * count;
const uint32_t csw_addrincr = addrinc ? CSW_ADDRINC_SINGLE : CSW_ADDRINC_OFF;
uint32_t csw_size;
@ -324,7 +324,7 @@ static int mem_ap_write(struct adiv5_dap *dap, const uint8_t *buffer, uint32_t s
return ERROR_TARGET_UNALIGNED_ACCESS;
}
if (dap->unaligned_access_bad && (address % size != 0))
if (ap->unaligned_access_bad && (address % size != 0))
return ERROR_TARGET_UNALIGNED_ACCESS;
retval = dap_setup_accessport_tar(dap, address ^ addr_xor);
@ -335,8 +335,8 @@ static int mem_ap_write(struct adiv5_dap *dap, const uint8_t *buffer, uint32_t s
uint32_t this_size = size;
/* Select packed transfer if possible */
if (addrinc && dap->packed_transfers && nbytes >= 4
&& max_tar_block_size(dap->tar_autoincr_block, address) >= 4) {
if (addrinc && ap->packed_transfers && nbytes >= 4
&& max_tar_block_size(ap->tar_autoincr_block, address) >= 4) {
this_size = 4;
retval = dap_setup_accessport_csw(dap, csw_size | CSW_ADDRINC_PACKED);
} else {
@ -384,7 +384,7 @@ static int mem_ap_write(struct adiv5_dap *dap, const uint8_t *buffer, uint32_t s
break;
/* Rewrite TAR if it wrapped or we're xoring addresses */
if (addrinc && (addr_xor || (address % dap->tar_autoincr_block < size && nbytes > 0))) {
if (addrinc && (addr_xor || (address % ap->tar_autoincr_block < size && nbytes > 0))) {
retval = dap_setup_accessport_tar(dap, address ^ addr_xor);
if (retval != ERROR_OK)
break;
@ -422,6 +422,7 @@ static int mem_ap_write(struct adiv5_dap *dap, const uint8_t *buffer, uint32_t s
static int mem_ap_read(struct adiv5_dap *dap, uint8_t *buffer, uint32_t size, uint32_t count,
uint32_t adr, bool addrinc)
{
struct adiv5_ap *ap = &dap->ap[dap_ap_get_select(dap)];
size_t nbytes = size * count;
const uint32_t csw_addrincr = addrinc ? CSW_ADDRINC_SINGLE : CSW_ADDRINC_OFF;
uint32_t csw_size;
@ -444,7 +445,7 @@ static int mem_ap_read(struct adiv5_dap *dap, uint8_t *buffer, uint32_t size, ui
else
return ERROR_TARGET_UNALIGNED_ACCESS;
if (dap->unaligned_access_bad && (adr % size != 0))
if (ap->unaligned_access_bad && (adr % size != 0))
return ERROR_TARGET_UNALIGNED_ACCESS;
/* Allocate buffer to hold the sequence of DRW reads that will be made. This is a significant
@ -470,8 +471,8 @@ static int mem_ap_read(struct adiv5_dap *dap, uint8_t *buffer, uint32_t size, ui
uint32_t this_size = size;
/* Select packed transfer if possible */
if (addrinc && dap->packed_transfers && nbytes >= 4
&& max_tar_block_size(dap->tar_autoincr_block, address) >= 4) {
if (addrinc && ap->packed_transfers && nbytes >= 4
&& max_tar_block_size(ap->tar_autoincr_block, address) >= 4) {
this_size = 4;
retval = dap_setup_accessport_csw(dap, csw_size | CSW_ADDRINC_PACKED);
} else {
@ -488,7 +489,7 @@ static int mem_ap_read(struct adiv5_dap *dap, uint8_t *buffer, uint32_t size, ui
address += this_size;
/* Rewrite TAR if it wrapped */
if (addrinc && address % dap->tar_autoincr_block < size && nbytes > 0) {
if (addrinc && address % ap->tar_autoincr_block < size && nbytes > 0) {
retval = dap_setup_accessport_tar(dap, address);
if (retval != ERROR_OK)
break;
@ -522,8 +523,8 @@ static int mem_ap_read(struct adiv5_dap *dap, uint8_t *buffer, uint32_t size, ui
while (nbytes > 0) {
uint32_t this_size = size;
if (addrinc && dap->packed_transfers && nbytes >= 4
&& max_tar_block_size(dap->tar_autoincr_block, address) >= 4) {
if (addrinc && ap->packed_transfers && nbytes >= 4
&& max_tar_block_size(ap->tar_autoincr_block, address) >= 4) {
this_size = 4;
}
@ -628,6 +629,23 @@ extern const struct dap_ops jtag_dp_ops;
/*--------------------------------------------------------------------------*/
/**
* Create a new DAP
*/
struct adiv5_dap *dap_init(void)
{
struct adiv5_dap *dap = calloc(1, sizeof(struct adiv5_dap));
int i;
/* Set up with safe defaults */
for (i = 0; i <= 255; i++) {
/* memaccess_tck max is 255 */
dap->ap[i].memaccess_tck = 255;
/* Number of bits for tar autoincrement, impl. dep. at least 10 */
dap->ap[i].tar_autoincr_block = (1<<10);
}
return dap;
}
/**
* Initialize a DAP. This sets up the power domains, prepares the DP
* for further use, and arranges to use AP #0 for all AP operations
@ -645,6 +663,7 @@ int ahbap_debugport_init(struct adiv5_dap *dap, uint8_t apsel)
/* check that we support packed transfers */
uint32_t csw, cfg;
int retval;
struct adiv5_ap *ap = &dap->ap[apsel];
LOG_DEBUG(" ");
@ -737,17 +756,17 @@ int ahbap_debugport_init(struct adiv5_dap *dap, uint8_t apsel)
return retval;
if (csw & CSW_ADDRINC_PACKED)
dap->packed_transfers = true;
ap->packed_transfers = true;
else
dap->packed_transfers = false;
ap->packed_transfers = false;
/* Packed transfers on TI BE-32 processors do not work correctly in
* many cases. */
if (dap->ti_be_32_quirks)
dap->packed_transfers = false;
ap->packed_transfers = false;
LOG_DEBUG("MEM_AP Packed Transfers: %s",
dap->packed_transfers ? "enabled" : "disabled");
ap->packed_transfers ? "enabled" : "disabled");
/* The ARM ADI spec leaves implementation-defined whether unaligned
* memory accesses work, only work partially, or cause a sticky error.
@ -755,7 +774,7 @@ int ahbap_debugport_init(struct adiv5_dap *dap, uint8_t apsel)
* and unaligned writes seem to cause a sticky error.
* TODO: it would be nice to have a way to detect whether unaligned
* operations are supported on other processors. */
dap->unaligned_access_bad = dap->ti_be_32_quirks;
ap->unaligned_access_bad = dap->ti_be_32_quirks;
LOG_DEBUG("MEM_AP CFG: large data %d, long address %d, big-endian %d",
!!(cfg & 0x04), !!(cfg & 0x02), !!(cfg & 0x01));
@ -1521,7 +1540,7 @@ COMMAND_HANDLER(dap_memaccess_command)
switch (CMD_ARGC) {
case 0:
memaccess_tck = dap->memaccess_tck;
memaccess_tck = dap->ap[dap->apsel].memaccess_tck;
break;
case 1:
COMMAND_PARSE_NUMBER(u32, CMD_ARGV[0], memaccess_tck);
@ -1529,10 +1548,10 @@ COMMAND_HANDLER(dap_memaccess_command)
default:
return ERROR_COMMAND_SYNTAX_ERROR;
}
dap->memaccess_tck = memaccess_tck;
dap->ap[dap->apsel].memaccess_tck = memaccess_tck;
command_print(CMD_CTX, "memory bus access delay set to %" PRIi32 " tck",
dap->memaccess_tck);
dap->ap[dap->apsel].memaccess_tck);
return ERROR_OK;
}
@ -1582,7 +1601,7 @@ COMMAND_HANDLER(dap_apcsw_command)
struct arm *arm = target_to_arm(target);
struct adiv5_dap *dap = arm->dap;
uint32_t apcsw = dap->apcsw[dap->apsel], sprot = 0;
uint32_t apcsw = dap->ap[dap->apsel].csw_default, sprot = 0;
switch (CMD_ARGC) {
case 0:
@ -1602,7 +1621,7 @@ COMMAND_HANDLER(dap_apcsw_command)
default:
return ERROR_COMMAND_SYNTAX_ERROR;
}
dap->apcsw[dap->apsel] = apcsw;
dap->ap[dap->apsel].csw_default = apcsw;
return 0;
}

View File

@ -122,11 +122,52 @@
#define CSW_SPROT (1UL << 30)
#define CSW_DBGSWENABLE (1UL << 31)
/**
* This represents an ARM Debug Interface (v5) Access Port (AP).
* Most common is a MEM-AP, for memory access.
*/
struct adiv5_ap {
/**
* Default value for (MEM-AP) AP_REG_CSW register.
*/
uint32_t csw_default;
/**
* Cache for (MEM-AP) AP_REG_CSW register value. This is written to
* configure an access mode, such as autoincrementing AP_REG_TAR during
* word access. "-1" indicates no cached value.
*/
uint32_t csw_value;
/**
* Cache for (MEM-AP) AP_REG_TAR register value This is written to
* configure the address being read or written
* "-1" indicates no cached value.
*/
uint32_t tar_value;
/**
* Configures how many extra tck clocks are added after starting a
* MEM-AP access before we try to read its status (and/or result).
*/
uint32_t memaccess_tck;
/* Size of TAR autoincrement block, ARM ADI Specification requires at least 10 bits */
uint32_t tar_autoincr_block;
/* true if packed transfers are supported by the MEM-AP */
bool packed_transfers;
/* true if unaligned memory access is not supported by the MEM-AP */
bool unaligned_access_bad;
};
/**
* This represents an ARM Debug Interface (v5) Debug Access Port (DAP).
* A DAP has two types of component: one Debug Port (DP), which is a
* transport agent; and at least one Access Port (AP), controlling
* resource access. Most common is a MEM-AP, for memory access.
* resource access.
*
* There are two basic DP transports: JTAG, and ARM's low pin-count SWD.
* Accordingly, this interface is responsible for hiding the transport
@ -145,7 +186,9 @@ struct adiv5_dap {
/* Control config */
uint32_t dp_ctrl_stat;
uint32_t apcsw[256];
struct adiv5_ap ap[256];
/* The current manually selected AP by the "dap apsel" command */
uint32_t apsel;
/**
@ -171,20 +214,6 @@ struct adiv5_dap {
*/
uint32_t dp_bank_value;
/**
* Cache for (MEM-AP) AP_REG_CSW register value. This is written to
* configure an access mode, such as autoincrementing AP_REG_TAR during
* word access. "-1" indicates no cached value.
*/
uint32_t ap_csw_value;
/**
* Cache for (MEM-AP) AP_REG_TAR register value This is written to
* configure the address being read or written
* "-1" indicates no cached value.
*/
uint32_t ap_tar_value;
/* information about current pending SWjDP-AHBAP transaction */
uint8_t ack;
@ -194,21 +223,6 @@ struct adiv5_dap {
*/
uint32_t *last_read;
/**
* Configures how many extra tck clocks are added after starting a
* MEM-AP access before we try to read its status (and/or result).
*/
uint32_t memaccess_tck;
/* Size of TAR autoincrement block, ARM ADI Specification requires at least 10 bits */
uint32_t tar_autoincr_block;
/* true if packed transfers are supported by the MEM-AP */
bool packed_transfers;
/* true if unaligned memory access is not supported by the MEM-AP */
bool unaligned_access_bad;
/* The TI TMS470 and TMS570 series processors use a BE-32 memory ordering
* despite lack of support in the ARMv7 architecture. Memory access through
* the AHB-AP has strange byte ordering these processors, and we need to
@ -447,6 +461,9 @@ int mem_ap_sel_read_buf_noincr(struct adiv5_dap *swjdp, uint8_t ap,
int mem_ap_sel_write_buf_noincr(struct adiv5_dap *swjdp, uint8_t ap,
const uint8_t *buffer, uint32_t size, uint32_t count, uint32_t address);
/* Create DAP struct */
struct adiv5_dap *dap_init(void);
/* Initialisation of the debug system, power domains and registers */
int ahbap_debugport_init(struct adiv5_dap *swjdp, uint8_t apsel);

View File

@ -104,8 +104,6 @@ struct armv7a_common {
int common_magic;
struct reg_cache *core_cache;
struct adiv5_dap dap;
/* Core Debug Unit */
struct arm_dpm dpm;
uint32_t debug_base;

View File

@ -146,7 +146,6 @@ struct armv7m_common {
int common_magic;
int exception_number;
struct adiv5_dap dap;
/* AP this processor is connected to in the DAP */
uint8_t debug_ap;

View File

@ -3119,30 +3119,24 @@ static int cortex_a_init_arch_info(struct target *target,
struct cortex_a_common *cortex_a, struct jtag_tap *tap)
{
struct armv7a_common *armv7a = &cortex_a->armv7a_common;
struct adiv5_dap *dap = &armv7a->dap;
armv7a->arm.dap = dap;
/* Setup struct cortex_a_common */
cortex_a->common_magic = CORTEX_A_COMMON_MAGIC;
/* tap has no dap initialized */
if (!tap->dap) {
armv7a->arm.dap = dap;
/* Setup struct cortex_a_common */
tap->dap = dap_init();
/* prepare JTAG information for the new target */
cortex_a->jtag_info.tap = tap;
cortex_a->jtag_info.scann_size = 4;
/* Leave (only) generic DAP stuff for debugport_init() */
dap->jtag_info = &cortex_a->jtag_info;
tap->dap->jtag_info = &cortex_a->jtag_info;
}
/* Number of bits for tar autoincrement, impl. dep. at least 10 */
dap->tar_autoincr_block = (1 << 10);
dap->memaccess_tck = 80;
tap->dap = dap;
} else
armv7a->arm.dap = tap->dap;
tap->dap->ap[dap_ap_get_select(tap->dap)].memaccess_tck = 80;
armv7a->arm.dap = tap->dap;
cortex_a->fast_reg_read = 0;

View File

@ -1903,6 +1903,9 @@ int cortex_m_examine(struct target *target)
struct adiv5_dap *swjdp = cortex_m->armv7m.arm.dap;
struct armv7m_common *armv7m = target_to_armv7m(target);
/* Leave (only) generic DAP stuff for debugport_init(); */
swjdp->ap[armv7m->debug_ap].memaccess_tck = 8;
/* stlink shares the examine handler but does not support
* all its calls */
if (!armv7m->stlink) {
@ -1957,7 +1960,7 @@ int cortex_m_examine(struct target *target)
if (i == 4 || i == 3) {
/* Cortex-M3/M4 has 4096 bytes autoincrement range */
armv7m->dap.tar_autoincr_block = (1 << 12);
swjdp->ap[armv7m->debug_ap].tar_autoincr_block = (1 << 12);
}
/* Configure trace modules */
@ -2106,24 +2109,26 @@ static int cortex_m_init_arch_info(struct target *target,
armv7m_init_arch_info(target, armv7m);
/* prepare JTAG information for the new target */
cortex_m->jtag_info.tap = tap;
cortex_m->jtag_info.scann_size = 4;
/* tap has no dap initialized */
if (!tap->dap) {
tap->dap = dap_init();
/* prepare JTAG information for the new target */
cortex_m->jtag_info.tap = tap;
cortex_m->jtag_info.scann_size = 4;
/* Leave (only) generic DAP stuff for debugport_init() */
tap->dap->jtag_info = &cortex_m->jtag_info;
}
/* default reset mode is to use srst if fitted
* if not it will use CORTEX_M3_RESET_VECTRESET */
cortex_m->soft_reset_config = CORTEX_M_RESET_VECTRESET;
armv7m->arm.dap = &armv7m->dap;
armv7m->arm.dap = tap->dap;
/* Leave (only) generic DAP stuff for debugport_init(); */
armv7m->dap.jtag_info = &cortex_m->jtag_info;
armv7m->dap.memaccess_tck = 8;
/* Cortex-M3/M4 has 4096 bytes autoincrement range
* but set a safe default to 1024 to support Cortex-M0
* this will be changed in cortex_m3_examine if a M3/M4 is detected */
armv7m->dap.tar_autoincr_block = (1 << 10);
tap->dap->ap[dap_ap_get_select(tap->dap)].memaccess_tck = 8;
/* register arch-specific functions */
armv7m->examine_debug_reason = cortex_m_examine_debug_reason;