diff --git a/develop/ace__initiator_8h_source.html b/develop/ace__initiator_8h_source.html
index 8235e4cd..63533edb 100644
--- a/develop/ace__initiator_8h_source.html
+++ b/develop/ace__initiator_8h_source.html
@@ -24,7 +24,7 @@
scc
- 2022.4.0
+ 2024.03
SystemC components library
|
@@ -88,698 +88,742 @@
21 #include <axi/fsm/base.h>
22 #include <axi/fsm/protocol_fsm.h>
23 #include <axi/signal_if.h>
-
- 25 #include <tlm/scc/tlm_mm.h>
- 26 #include <tlm_utils/peq_with_cb_and_phase.h>
-
-
-
-
- 33 using namespace axi::fsm;
+ 24 #include <cci_cfg/cci_param_typed.h>
+ 25 #include <scc/fifo_w_cb.h>
+
+ 27 #include <tlm/scc/tlm_mm.h>
+ 28 #include <tlm_utils/peq_with_cb_and_phase.h>
+
+
+
- 35 template <
typename CFG>
-
- 37 public aw_ace<CFG, typename CFG::master_types>,
- 38 public wdata_ace<CFG, typename CFG::master_types>,
- 39 public b_ace<CFG, typename CFG::master_types>,
- 40 public ar_ace<CFG, typename CFG::master_types>,
- 41 public rresp_ace<CFG, typename CFG::master_types>,
-
- 43 public ac_ace<CFG, typename CFG::master_types>,
- 44 public cr_ace<CFG, typename CFG::master_types>,
- 45 public cd_ace<CFG, typename CFG::master_types>,
-
-
-
-
-
- 51 enum { CACHELINE_SZ = 64 };
+ 35 using namespace axi::fsm;
+
+ 37 template <
typename CFG>
+
+ 39 public aw_ace<CFG, typename CFG::master_types>,
+ 40 public wdata_ace<CFG, typename CFG::master_types>,
+ 41 public b_ace<CFG, typename CFG::master_types>,
+ 42 public ar_ace<CFG, typename CFG::master_types>,
+ 43 public rresp_ace<CFG, typename CFG::master_types>,
+
+ 45 public ac_ace<CFG, typename CFG::master_types>,
+ 46 public cr_ace<CFG, typename CFG::master_types>,
+ 47 public cd_ace<CFG, typename CFG::master_types>,
+
+
+
+
- 53 using payload_type = axi::axi_protocol_types::tlm_payload_type;
- 54 using phase_type = axi::axi_protocol_types::tlm_phase_type;
-
- 56 sc_core::sc_in<bool> clk_i{
"clk_i"};
+ 53 enum { CACHELINE_SZ = 64 };
+
+ 55 using payload_type = axi::axi_protocol_types::tlm_payload_type;
+ 56 using phase_type = axi::axi_protocol_types::tlm_phase_type;
-
+ 58 sc_core::sc_in<bool> clk_i{
"clk_i"};
-
- 61 : sc_core::sc_module(nm)
-
- 63 ,
base(CFG::BUSWIDTH,
true) {
- 64 instance_name = name();
-
-
- 67 sensitive << clk_i.pos();
-
-
-
-
-
-
-
-
-
-
-
-
-
- 81 void b_transport(payload_type& trans, sc_core::sc_time& t)
override {
- 82 trans.set_dmi_allowed(
false);
- 83 trans.set_response_status(tlm::TLM_OK_RESPONSE);
-
-
- 86 tlm::tlm_sync_enum nb_transport_fw(payload_type& trans, phase_type& phase, sc_core::sc_time& t)
override {
-
- 88 sc_core::sc_time delay;
- 89 fw_peq.notify(trans, phase, delay);
- 90 return tlm::TLM_ACCEPTED;
-
-
- 93 bool get_direct_mem_ptr(payload_type& trans, tlm::tlm_dmi& dmi_data)
override {
- 94 trans.set_dmi_allowed(
false);
-
+
+
+ 62 cci::cci_param<bool> pipelined_wrreq{
"pipelined_wrreq",
false};
+
+ 64 ace_initiator(sc_core::sc_module_name
const& nm,
bool pipelined_wrreq =
false)
+ 65 : sc_core::sc_module(nm)
+
+ 67 ,
base(CFG::BUSWIDTH,
true)
+ 68 , pipelined_wrreq(
"pipelined_wrreq", pipelined_wrreq) {
+ 69 instance_name = name();
+
+
+ 72 sensitive << clk_i.pos();
+
+
+
+
+
+
+
+
+
+
+
+
+
+ 86 void b_transport(payload_type& trans, sc_core::sc_time& t)
override {
+ 87 trans.set_dmi_allowed(
false);
+ 88 trans.set_response_status(tlm::TLM_OK_RESPONSE);
+
+
+ 91 tlm::tlm_sync_enum nb_transport_fw(payload_type& trans, phase_type& phase, sc_core::sc_time& t)
override {
+
+ 93 sc_core::sc_time delay;
+ 94 fw_peq.notify(trans, phase, delay);
+ 95 return tlm::TLM_ACCEPTED;
- 98 unsigned int transport_dbg(payload_type& trans)
override {
return 0; }
-
- 100 void end_of_elaboration()
override { clk_if =
dynamic_cast<sc_core::sc_clock*
>(clk_i.get_interface()); }
-
-
-
-
-
- 106 void clk_delay() { clk_delayed.notify(axi::CLK_DELAY); }
-
-
-
-
-
-
-
-
-
-
-
-
- 131 static typename CFG::data_t get_cache_data_for_beat(
fsm::fsm_handle* fsm_hndl);
- 132 unsigned int SNOOP = 3;
-
-
- 135 std::array<unsigned, 3> outstanding_cnt{0, 0, 0};
- 136 std::array<fsm_handle*, 3> active_req{
nullptr,
nullptr,
nullptr};
- 137 std::array<fsm_handle*, 3> active_resp{
nullptr,
nullptr,
nullptr};
- 138 std::array<fsm_handle*, 4> active_resp_beat{
nullptr,
nullptr,
nullptr};
- 139 sc_core::sc_clock* clk_if{
nullptr};
- 140 sc_core::sc_event clk_delayed, clk_self, r_end_resp_evt, w_end_resp_evt, aw_evt, ar_evt, ac_end_req_evt;
- 141 void nb_fw(payload_type& trans,
const phase_type& phase) {
- 142 auto t = sc_core::SC_ZERO_TIME;
-
-
- 145 tlm_utils::peq_with_cb_and_phase<ace_initiator> fw_peq{
this, &ace_initiator::nb_fw};
- 146 std::unordered_map<unsigned, std::deque<fsm_handle*>> rd_resp_by_id, wr_resp_by_id;
- 147 sc_core::sc_buffer<uint8_t> wdata_vl;
- 148 sc_core::sc_event rack_vl;
- 149 sc_core::sc_event wack_vl;
- 150 void write_ar(tlm::tlm_generic_payload& trans);
- 151 void write_aw(tlm::tlm_generic_payload& trans);
- 152 void write_wdata(tlm::tlm_generic_payload& trans,
unsigned beat,
bool last =
false);
-
-
-
-
-
-
- 159 sc_dt::sc_uint<CFG::ADDRWIDTH> addr = trans.get_address();
- 160 this->ar_addr.write(addr);
-
- 162 this->ar_prot.write(ext->get_prot());
-
- 164 this->ar_id->write(sc_dt::sc_uint<CFG::IDWIDTH>(ext->get_id()));
- 165 this->ar_len->write(sc_dt::sc_uint<8>(ext->get_length()));
- 166 this->ar_size->write(sc_dt::sc_uint<3>(ext->get_size()));
- 167 this->ar_burst->write(sc_dt::sc_uint<2>(
axi::to_int(ext->get_burst())));
- 168 if(ext->is_exclusive())
- 169 this->ar_lock->write(
true);
- 170 this->ar_cache->write(sc_dt::sc_uint<4>(ext->get_cache()));
- 171 this->ar_prot.write(ext->get_prot());
- 172 this->ar_qos->write(ext->get_qos());
- 173 this->ar_region->write(ext->get_region());
- 174 this->ar_domain->write(sc_dt::sc_uint<2>((uint8_t)ext->get_domain()));
- 175 this->ar_snoop->write(sc_dt::sc_uint<4>((uint8_t)ext->get_snoop()));
- 176 this->ar_bar->write(sc_dt::sc_uint<2>((uint8_t)ext->get_barrier()));
- 177 this->ar_user->write(ext->get_user(axi::common::id_type::CTRL));
+ 98 bool get_direct_mem_ptr(payload_type& trans, tlm::tlm_dmi& dmi_data)
override {
+ 99 trans.set_dmi_allowed(
false);
+
+
+
+ 103 unsigned int transport_dbg(payload_type& trans)
override {
return 0; }
+
+ 105 void end_of_elaboration()
override { clk_if =
dynamic_cast<sc_core::sc_clock*
>(clk_i.get_interface()); }
+
+
+
+
+
+ 111 void clk_delay() { clk_delayed.notify(axi::CLK_DELAY); }
+
+
+
+
+
+
+
+
+
+
+
+
+ 136 static typename CFG::data_t get_cache_data_for_beat(
fsm::fsm_handle* fsm_hndl);
+ 137 unsigned int SNOOP = 3;
+
+
+ 140 std::array<unsigned, 3> outstanding_cnt{0, 0, 0};
+ 141 std::array<fsm_handle*, 3> active_req{
nullptr,
nullptr,
nullptr};
+ 142 std::array<fsm_handle*, 3> active_resp{
nullptr,
nullptr,
nullptr};
+ 143 std::array<fsm_handle*, 4> active_resp_beat{
nullptr,
nullptr,
nullptr};
+ 144 sc_core::sc_clock* clk_if{
nullptr};
+ 145 sc_core::sc_event clk_delayed, clk_self, r_end_resp_evt, w_end_resp_evt, ac_end_req_evt;
+ 146 void nb_fw(payload_type& trans,
const phase_type& phase) {
+ 147 auto t = sc_core::SC_ZERO_TIME;
+
+
+ 150 tlm_utils::peq_with_cb_and_phase<ace_initiator> fw_peq{
this, &ace_initiator::nb_fw};
+ 151 std::unordered_map<unsigned, std::deque<fsm_handle*>> rd_resp_by_id, wr_resp_by_id;
+
+ 153 tlm::tlm_generic_payload* gp =
nullptr;
+
+ 155 bool needs_end_req =
false;
+
+ 157 fifo_entry(tlm::tlm_generic_payload* gp,
bool last,
bool needs_end_req,
size_t beat_num)
+
+
+ 160 , needs_end_req(needs_end_req)
+ 161 , beat_num(beat_num) {
+
+
+
+ 165 fifo_entry(tlm::tlm_generic_payload* gp,
bool needs_end_req)
+
+ 167 , needs_end_req(needs_end_req) {
+
+
+
+ 171 fifo_entry(fifo_entry
const& o)
+
+
+ 174 , needs_end_req(o.needs_end_req)
+ 175 , beat_num(o.beat_num) {
+ 176 if(gp && gp->has_mm())
+
-
-
-
- 182 sc_dt::sc_uint<CFG::ADDRWIDTH> addr = trans.get_address();
- 183 this->aw_addr.write(addr);
-
- 185 this->aw_prot.write(ext->get_prot());
- 186 if(ext->is_exclusive())
- 187 this->aw_lock->write(
true);
- 188 if(this->aw_id.get_interface())
- 189 this->aw_id->write(sc_dt::sc_uint<CFG::IDWIDTH>(ext->get_id()));
- 190 this->aw_len->write(sc_dt::sc_uint<8>(ext->get_length()));
- 191 this->aw_size->write(sc_dt::sc_uint<3>(ext->get_size()));
- 192 this->aw_burst->write(sc_dt::sc_uint<2>(
axi::to_int(ext->get_burst())));
- 193 this->aw_cache->write(sc_dt::sc_uint<4>(ext->get_cache()));
- 194 this->aw_qos->write(sc_dt::sc_uint<4>(ext->get_qos()));
- 195 this->aw_region->write(sc_dt::sc_uint<4>(ext->get_region()));
- 196 this->aw_user->write(ext->get_user(axi::common::id_type::CTRL));
- 197 this->aw_domain->write(sc_dt::sc_uint<2>((uint8_t)ext->get_domain()));
- 198 this->aw_snoop->write(sc_dt::sc_uint<CFG::AWSNOOPWIDTH>((uint8_t)ext->get_snoop()));
- 199 this->aw_bar->write(sc_dt::sc_uint<2>((uint8_t)ext->get_barrier()));
- 200 this->aw_unique->write(ext->get_unique());
-
-
+ 179 fifo_entry& operator=(
const fifo_entry& o) {
+
+
+ 182 needs_end_req = o.needs_end_req;
+ 183 beat_num = o.beat_num;
+
+
+
+ 187 if(gp && gp->has_mm())
+
+
+
+
+
+
+ 194 sc_core::sc_event rack_vl;
+ 195 sc_core::sc_event wack_vl;
+ 196 void write_ar(tlm::tlm_generic_payload& trans);
+ 197 void write_aw(tlm::tlm_generic_payload& trans);
+ 198 void write_wdata(tlm::tlm_generic_payload& trans,
unsigned beat);
+
+
+
+
-
-
- 206 typename CFG::data_t data{0};
- 207 sc_dt::sc_uint<CFG::BUSWIDTH / 8> strb{0};
-
-
- 210 auto byte_offset = beat * size;
- 211 auto offset = (trans.get_address() + byte_offset) & (CFG::BUSWIDTH / 8 - 1);
- 212 auto beptr = trans.get_byte_enable_length() ? trans.get_byte_enable_ptr() + byte_offset :
nullptr;
- 213 if(offset && (size + offset) > (CFG::BUSWIDTH / 8)) {
-
- 215 auto dptr = trans.get_data_ptr();
-
- 217 for(
size_t i = offset; i < size; ++i, ++dptr) {
- 218 auto bit_offs = i * 8;
- 219 data(bit_offs + 7, bit_offs) = *dptr;
-
- 221 strb[i] = *beptr == 0xff;
-
-
-
-
-
- 227 auto beat_start_idx = byte_offset - offset;
- 228 auto data_len = trans.get_data_length();
- 229 auto dptr = trans.get_data_ptr() + beat_start_idx;
-
- 231 for(
size_t i = 0; i < size && (beat_start_idx + i) < data_len; ++i, ++dptr) {
- 232 auto bit_offs = i * 8;
- 233 data(bit_offs + 7, bit_offs) = *dptr;
-
- 235 strb[i] = *beptr == 0xff;
-
-
-
-
-
-
- 242 auto dptr = trans.get_data_ptr() + byte_offset;
-
- 244 for(
size_t i = 0; i < size; ++i, ++dptr) {
- 245 auto bit_offs = (offset + i) * 8;
- 246 data(bit_offs + 7, bit_offs) = *dptr;
-
- 248 strb[offset + i] = *beptr == 0xff;
-
-
- 251 strb[offset + i] =
true;
-
-
- 254 this->w_data.write(data);
- 255 this->w_strb.write(strb);
-
- 257 this->w_id->write(ext->get_id());
- 258 if(this->w_user.get_interface())
- 259 this->w_user->write(ext->get_user(axi::common::id_type::DATA));
-
-
-
-
-
-
-
- 267 auto byte_offset = beat_count * size;
- 268 auto offset = (fsm_hndl->
trans->get_address() + byte_offset) & (CFG::BUSWIDTH / 8 - 1);
- 269 typename CFG::data_t data{0};
- 270 if(offset && (size + offset) > (CFG::BUSWIDTH / 8)) {
- 271 if(beat_count == 0) {
- 272 auto dptr = fsm_hndl->
trans->get_data_ptr();
- 273 for(
size_t i = offset; i < size; ++i, ++dptr) {
- 274 auto bit_offs = i * 8;
- 275 data(bit_offs + 7, bit_offs) = *dptr;
-
-
- 278 auto beat_start_idx = byte_offset - offset;
- 279 auto data_len = fsm_hndl->
trans->get_data_length();
- 280 auto dptr = fsm_hndl->
trans->get_data_ptr() + beat_start_idx;
- 281 for(
size_t i = offset; i < size && (beat_start_idx + i) < data_len; ++i, ++dptr) {
- 282 auto bit_offs = i * 8;
- 283 data(bit_offs + 7, bit_offs) = *dptr;
-
-
-
- 287 auto dptr = fsm_hndl->
trans->get_data_ptr() + byte_offset;
- 288 for(
size_t i = 0; i < size; ++i, ++dptr) {
- 289 auto bit_offs = (offset + i) * 8;
- 290 data(bit_offs + 7, bit_offs) = *dptr;
-
-
-
-
-
-
- 297 fsm_hndl->
fsm->cb[RequestPhaseBeg] = [
this, fsm_hndl]() ->
void {
-
- 299 SCCTRACE(SCMOD) <<
" for snoop in RequestPhaseBeg ";
-
-
- 302 outstanding_cnt[fsm_hndl->
trans->get_command()]++;
-
- 304 auto offset = fsm_hndl->
trans->get_address() % (CFG::BUSWIDTH / 8);
- 305 if(offset + fsm_hndl->
trans->get_data_length() > CFG::BUSWIDTH / 8) {
- 306 SCCFATAL(SCMOD) <<
" transaction " << *fsm_hndl->
trans <<
" is not AXI4Lite compliant";
-
-
-
-
- 311 fsm_hndl->
fsm->cb[BegPartReqE] = [
this, fsm_hndl]() ->
void {
- 312 sc_assert(fsm_hndl->
trans->is_write());
-
- 314 write_aw(*fsm_hndl->
trans);
- 315 aw_evt.notify(sc_core::SC_ZERO_TIME);
-
-
- 318 active_req[tlm::TLM_WRITE_COMMAND] = fsm_hndl;
-
-
- 321 fsm_hndl->
fsm->cb[EndPartReqE] = [
this, fsm_hndl]() ->
void {
- 322 active_req[tlm::TLM_WRITE_COMMAND] =
nullptr;
- 323 tlm::tlm_phase phase = axi::END_PARTIAL_REQ;
- 324 sc_core::sc_time t = (clk_if ? clk_if->period() - axi::CLK_DELAY - 1_ps : sc_core::SC_ZERO_TIME);
- 325 auto ret = tsckt->nb_transport_bw(*fsm_hndl->
trans, phase, t);
-
-
- 328 fsm_hndl->
fsm->cb[BegReqE] = [
this, fsm_hndl]() ->
void {
-
- 330 SCCTRACE(SCMOD) <<
" BegReq of setup_cb";
- 331 sc_core::sc_time t(sc_core::SC_ZERO_TIME);
- 332 tlm::tlm_phase phase = tlm::BEGIN_REQ;
- 333 auto ret = tsckt->nb_transport_bw(*fsm_hndl->
trans, phase, t);
-
- 335 switch(fsm_hndl->
trans->get_command()) {
- 336 case tlm::TLM_READ_COMMAND:
- 337 active_req[tlm::TLM_READ_COMMAND] = fsm_hndl;
- 338 write_ar(*fsm_hndl->
trans);
- 339 ar_evt.notify(sc_core::SC_ZERO_TIME);
-
- 341 case tlm::TLM_WRITE_COMMAND:
- 342 SCCTRACE(SCMOD) <<
"in BegReqE for trans " << *fsm_hndl->
trans;
- 343 active_req[tlm::TLM_WRITE_COMMAND] = fsm_hndl;
-
- 345 write_aw(*fsm_hndl->
trans);
- 346 aw_evt.notify(sc_core::SC_ZERO_TIME);
-
-
- 349 auto ext = fsm_hndl->
trans->get_extension<ace_extension>();
- 350 if(!axi::is_dataless(ext)) {
-
-
+
+ 205 sc_dt::sc_uint<CFG::ADDRWIDTH> addr = trans.get_address();
+ 206 this->ar_addr.write(addr);
+
+ 208 this->ar_prot.write(ext->get_prot());
+
+ 210 this->ar_id->write(sc_dt::sc_uint<CFG::IDWIDTH>(ext->get_id()));
+ 211 this->ar_len->write(sc_dt::sc_uint<8>(ext->get_length()));
+ 212 this->ar_size->write(sc_dt::sc_uint<3>(ext->get_size()));
+ 213 this->ar_burst->write(sc_dt::sc_uint<2>(
axi::to_int(ext->get_burst())));
+ 214 if(ext->is_exclusive())
+ 215 this->ar_lock->write(
true);
+ 216 this->ar_cache->write(sc_dt::sc_uint<4>(ext->get_cache()));
+ 217 this->ar_prot.write(ext->get_prot());
+ 218 this->ar_qos->write(ext->get_qos());
+ 219 this->ar_region->write(ext->get_region());
+ 220 this->ar_domain->write(sc_dt::sc_uint<2>((uint8_t)ext->get_domain()));
+ 221 this->ar_snoop->write(sc_dt::sc_uint<4>((uint8_t)ext->get_snoop()));
+ 222 this->ar_bar->write(sc_dt::sc_uint<2>((uint8_t)ext->get_barrier()));
+ 223 this->ar_user->write(ext->get_user(axi::common::id_type::CTRL));
+
+
+
+
+ 228 sc_dt::sc_uint<CFG::ADDRWIDTH> addr = trans.get_address();
+ 229 this->aw_addr.write(addr);
+
+ 231 this->aw_prot.write(ext->get_prot());
+ 232 if(ext->is_exclusive())
+ 233 this->aw_lock->write(
true);
+ 234 if(this->aw_id.get_interface())
+ 235 this->aw_id->write(sc_dt::sc_uint<CFG::IDWIDTH>(ext->get_id()));
+ 236 this->aw_len->write(sc_dt::sc_uint<8>(ext->get_length()));
+ 237 this->aw_size->write(sc_dt::sc_uint<3>(ext->get_size()));
+ 238 this->aw_burst->write(sc_dt::sc_uint<2>(
axi::to_int(ext->get_burst())));
+ 239 this->aw_cache->write(sc_dt::sc_uint<4>(ext->get_cache()));
+ 240 this->aw_qos->write(sc_dt::sc_uint<4>(ext->get_qos()));
+ 241 this->aw_region->write(sc_dt::sc_uint<4>(ext->get_region()));
+ 242 this->aw_user->write(ext->get_user(axi::common::id_type::CTRL));
+ 243 this->aw_domain->write(sc_dt::sc_uint<2>((uint8_t)ext->get_domain()));
+ 244 this->aw_snoop->write(sc_dt::sc_uint<CFG::AWSNOOPWIDTH>((uint8_t)ext->get_snoop()));
+ 245 this->aw_bar->write(sc_dt::sc_uint<2>((uint8_t)ext->get_barrier()));
+ 246 this->aw_unique->write(ext->get_unique());
+
+
+
+
+
+ 252 typename CFG::data_t data{0};
+ 253 sc_dt::sc_uint<CFG::BUSWIDTH / 8> strb{0};
+
+
+ 256 auto byte_offset = beat * size;
+ 257 auto offset = (trans.get_address() + byte_offset) & (CFG::BUSWIDTH / 8 - 1);
+ 258 auto beptr = trans.get_byte_enable_length() ? trans.get_byte_enable_ptr() + byte_offset :
nullptr;
+ 259 if(offset && (size + offset) > (CFG::BUSWIDTH / 8)) {
+
+ 261 auto dptr = trans.get_data_ptr();
+
+ 263 for(
size_t i = offset; i < size; ++i, ++dptr) {
+ 264 auto bit_offs = i * 8;
+ 265 data(bit_offs + 7, bit_offs) = *dptr;
+
+ 267 strb[i] = *beptr == 0xff;
+
+
+
+
+
+ 273 auto beat_start_idx = byte_offset - offset;
+ 274 auto data_len = trans.get_data_length();
+ 275 auto dptr = trans.get_data_ptr() + beat_start_idx;
+
+ 277 for(
size_t i = 0; i < size && (beat_start_idx + i) < data_len; ++i, ++dptr) {
+ 278 auto bit_offs = i * 8;
+ 279 data(bit_offs + 7, bit_offs) = *dptr;
+
+ 281 strb[i] = *beptr == 0xff;
+
+
+
+
+
+
+ 288 auto dptr = trans.get_data_ptr() + byte_offset;
+
+ 290 for(
size_t i = 0; i < size; ++i, ++dptr) {
+ 291 auto bit_offs = (offset + i) * 8;
+ 292 data(bit_offs + 7, bit_offs) = *dptr;
+
+ 294 strb[offset + i] = *beptr == 0xff;
+
+
+ 297 strb[offset + i] =
true;
+
+
+ 300 this->w_data.write(data);
+ 301 this->w_strb.write(strb);
+
+ 303 this->w_id->write(ext->get_id());
+ 304 if(this->w_user.get_interface())
+ 305 this->w_user->write(ext->get_user(axi::common::id_type::DATA));
+
+
+
+
+
+
+
+ 313 auto byte_offset = beat_count * size;
+ 314 auto offset = (fsm_hndl->
trans->get_address() + byte_offset) & (CFG::BUSWIDTH / 8 - 1);
+ 315 typename CFG::data_t data{0};
+ 316 if(offset && (size + offset) > (CFG::BUSWIDTH / 8)) {
+ 317 if(beat_count == 0) {
+ 318 auto dptr = fsm_hndl->
trans->get_data_ptr();
+ 319 for(
size_t i = offset; i < size; ++i, ++dptr) {
+ 320 auto bit_offs = i * 8;
+ 321 data(bit_offs + 7, bit_offs) = *dptr;
+
+
+ 324 auto beat_start_idx = byte_offset - offset;
+ 325 auto data_len = fsm_hndl->
trans->get_data_length();
+ 326 auto dptr = fsm_hndl->
trans->get_data_ptr() + beat_start_idx;
+ 327 for(
size_t i = offset; i < size && (beat_start_idx + i) < data_len; ++i, ++dptr) {
+ 328 auto bit_offs = i * 8;
+ 329 data(bit_offs + 7, bit_offs) = *dptr;
+
+
+
+ 333 auto dptr = fsm_hndl->
trans->get_data_ptr() + byte_offset;
+ 334 for(
size_t i = 0; i < size; ++i, ++dptr) {
+ 335 auto bit_offs = (offset + i) * 8;
+ 336 data(bit_offs + 7, bit_offs) = *dptr;
+
+
+
+
+
+
+ 343 fsm_hndl->
fsm->cb[RequestPhaseBeg] = [
this, fsm_hndl]() ->
void {
+
+ 345 SCCTRACE(SCMOD) <<
" for snoop in RequestPhaseBeg ";
+
+
+ 348 outstanding_cnt[fsm_hndl->
trans->get_command()]++;
+
+ 350 auto offset = fsm_hndl->
trans->get_address() % (CFG::BUSWIDTH / 8);
+ 351 if(offset + fsm_hndl->
trans->get_data_length() > CFG::BUSWIDTH / 8) {
+ 352 SCCFATAL(SCMOD) <<
" transaction " << *fsm_hndl->
trans <<
" is not AXI4Lite compliant";
- 357 fsm_hndl->
fsm->cb[EndReqE] = [
this, fsm_hndl]() ->
void {
-
- 359 active_req[SNOOP] =
nullptr;
- 360 ac_end_req_evt.notify();
-
- 362 switch(fsm_hndl->
trans->get_command()) {
- 363 case tlm::TLM_READ_COMMAND:
- 364 rd_resp_by_id[axi::get_axi_id(*fsm_hndl->
trans)].push_back(fsm_hndl);
- 365 active_req[tlm::TLM_READ_COMMAND] =
nullptr;
-
- 367 case tlm::TLM_WRITE_COMMAND:
- 368 SCCTRACE(SCMOD) <<
"in EndReq for trans " << *fsm_hndl->
trans;
- 369 wr_resp_by_id[axi::get_axi_id(*fsm_hndl->
trans)].push_back(fsm_hndl);
- 370 active_req[tlm::TLM_WRITE_COMMAND] =
nullptr;
-
-
- 373 tlm::tlm_phase phase = tlm::END_REQ;
- 374 sc_core::sc_time t = (sc_core::SC_ZERO_TIME);
- 375 SCCTRACE(SCMOD) <<
" in EndReq before set_resp";
+ 357 fsm_hndl->
fsm->cb[BegPartReqE] = [
this, fsm_hndl]() ->
void {
+ 358 sc_assert(fsm_hndl->
trans->is_write());
+
+ 360 aw_fifo.push_back({fsm_hndl->
trans.
get(),
false});
+
+ 362 wdata_fifo.push_back({fsm_hndl->
trans.
get(),
false, wdata_fifo.num_avail()>0, fsm_hndl->
beat_count});
+ 363 if(pipelined_wrreq && !wdata_fifo.num_avail())
+ 364 schedule(EndPartReqE, fsm_hndl->
trans, sc_core::SC_ZERO_TIME);
+
+ 366 fsm_hndl->
fsm->cb[EndPartReqE] = [
this, fsm_hndl]() ->
void {
+ 367 tlm::tlm_phase phase = axi::END_PARTIAL_REQ;
+ 368 sc_core::sc_time t(clk_if ? ::scc::time_to_next_posedge(clk_if) - 1_ps : sc_core::SC_ZERO_TIME);
+ 369 auto ret = tsckt->nb_transport_bw(*fsm_hndl->
trans, phase, t);
+
+
+ 372 fsm_hndl->
fsm->cb[BegReqE] = [
this, fsm_hndl]() ->
void {
+
+ 374 sc_core::sc_time t(sc_core::SC_ZERO_TIME);
+ 375 tlm::tlm_phase phase = tlm::BEGIN_REQ;
376 auto ret = tsckt->nb_transport_bw(*fsm_hndl->
trans, phase, t);
- 377 fsm_hndl->
trans->set_response_status(tlm::TLM_OK_RESPONSE);
-
-
- 380 fsm_hndl->
fsm->cb[BegPartRespE] = [
this, fsm_hndl]() ->
void {
-
- 382 active_resp_beat[SNOOP] = fsm_hndl;
- 383 cd_vl.notify({1, fsm_hndl});
-
-
-
- 387 assert(fsm_hndl->
trans->is_read());
- 388 tlm::tlm_phase phase = axi::BEGIN_PARTIAL_RESP;
- 389 sc_core::sc_time t(sc_core::SC_ZERO_TIME);
- 390 auto ret = tsckt->nb_transport_bw(*fsm_hndl->
trans, phase, t);
-
-
- 393 fsm_hndl->
fsm->cb[EndPartRespE] = [
this, fsm_hndl]() ->
void {
- 394 SCCTRACE(SCMOD) <<
"in EndPartRespE of setup_cb ";
-
- 396 tlm::tlm_phase phase = axi::END_PARTIAL_RESP;
- 397 sc_core::sc_time t(sc_core::SC_ZERO_TIME);
- 398 auto ret = tsckt->nb_transport_bw(*fsm_hndl->
trans, phase, t);
-
- 400 active_resp_beat[SNOOP] =
nullptr;
-
-
-
- 404 r_end_resp_evt.notify();
-
-
- 407 fsm_hndl->
fsm->cb[BegRespE] = [
this, fsm_hndl]() ->
void {
- 408 SCCTRACE(SCMOD) <<
"in setup_cb, processing event BegRespE for trans " << *fsm_hndl->
trans;
-
- 410 active_resp_beat[SNOOP] = fsm_hndl;
- 411 cd_vl.notify({3, fsm_hndl});
- 412 cr_resp_vl.notify({3, fsm_hndl});
-
-
-
- 416 tlm::tlm_phase phase = tlm::BEGIN_RESP;
- 417 sc_core::sc_time t(sc_core::SC_ZERO_TIME);
- 418 auto ret = tsckt->nb_transport_bw(*fsm_hndl->
trans, phase, t);
-
-
- 421 fsm_hndl->
fsm->cb[EndRespE] = [
this, fsm_hndl]() ->
void {
- 422 SCCTRACE(SCMOD) <<
"in EndResp of setup_cb for trans" << *fsm_hndl->
trans;
-
+
+ 378 switch(fsm_hndl->
trans->get_command()) {
+ 379 case tlm::TLM_READ_COMMAND:
+ 380 ar_fifo.push_back({fsm_hndl->
trans.
get(),
false});
+
+ 382 case tlm::TLM_WRITE_COMMAND:
+
+ 384 aw_fifo.push_back({fsm_hndl->
trans.
get(),
false});
+
+
+ 387 if(!axi::is_dataless(fsm_hndl->
trans->get_extension<ace_extension>())) {
+ 388 wdata_fifo.push_back({fsm_hndl->
trans.
get(),
true, wdata_fifo.num_avail()>0, fsm_hndl->
beat_count});
+ 389 if(pipelined_wrreq && !wdata_fifo.num_avail())
+ 390 schedule(EndReqE, fsm_hndl->
trans, sc_core::SC_ZERO_TIME);
+
+
+
+
+ 395 fsm_hndl->
fsm->cb[EndReqE] = [
this, fsm_hndl]() ->
void {
+
+ 397 active_req[SNOOP] =
nullptr;
+ 398 ac_end_req_evt.notify();
+
+ 400 auto id = axi::get_axi_id(*fsm_hndl->
trans);
+ 401 switch(fsm_hndl->
trans->get_command()) {
+ 402 case tlm::TLM_READ_COMMAND:
+ 403 rd_resp_by_id[id].push_back(fsm_hndl);
+
+ 405 case tlm::TLM_WRITE_COMMAND:
+ 406 wr_resp_by_id[id].push_back(fsm_hndl);
+
+
+ 409 tlm::tlm_phase phase = tlm::END_REQ;
+ 410 sc_core::sc_time t(clk_if ? ::scc::time_to_next_posedge(clk_if) - 1_ps : sc_core::SC_ZERO_TIME);
+ 411 auto ret = tsckt->nb_transport_bw(*fsm_hndl->
trans, phase, t);
+ 412 fsm_hndl->
trans->set_response_status(tlm::TLM_OK_RESPONSE);
+
+
+ 415 fsm_hndl->
fsm->cb[BegPartRespE] = [
this, fsm_hndl]() ->
void {
+
+ 417 active_resp_beat[SNOOP] = fsm_hndl;
+ 418 cd_vl.notify({1, fsm_hndl});
+
+
+
+ 422 assert(fsm_hndl->
trans->is_read());
+ 423 tlm::tlm_phase phase = axi::BEGIN_PARTIAL_RESP;
424 sc_core::sc_time t(sc_core::SC_ZERO_TIME);
- 425 tlm::tlm_phase phase = tlm::END_RESP;
- 426 auto ret = tsckt->nb_transport_bw(*fsm_hndl->
trans, phase, t);
- 427 active_resp_beat[SNOOP] =
nullptr;
-
- 429 fsm_hndl->
finish.notify();
-
- 431 if(fsm_hndl->
trans->is_read()) {
- 432 rd_resp_by_id[axi::get_axi_id(*fsm_hndl->
trans)].pop_front();
- 433 r_end_resp_evt.notify();
- 434 }
else if(fsm_hndl->
trans->is_write()) {
- 435 wr_resp_by_id[axi::get_axi_id(*fsm_hndl->
trans)].pop_front();
- 436 w_end_resp_evt.notify();
-
-
-
- 440 fsm_hndl->
fsm->cb[Ack] = [
this, fsm_hndl]() ->
void {
- 441 SCCTRACE(SCMOD) <<
"in ACK of setup_cb for " << *fsm_hndl->
trans;
- 442 if(fsm_hndl->
trans->is_read()) {
- 443 rack_vl.notify(sc_core::SC_ZERO_TIME);
-
- 445 if(fsm_hndl->
trans->is_write()) {
- 446 wack_vl.notify(sc_core::SC_ZERO_TIME);
-
-
-
-
-
- 452 this->r_ack.write(
false);
- 453 wait(sc_core::SC_ZERO_TIME);
-
-
- 456 this->r_ack.write(
true);
- 457 wait(clk_i.posedge_event());
- 458 this->r_ack.write(
false);
-
-
-
-
- 463 this->w_ack.write(
false);
- 464 wait(sc_core::SC_ZERO_TIME);
-
-
- 467 this->w_ack.write(
true);
- 468 wait(clk_i.posedge_event());
- 469 this->w_ack.write(
false);
-
-
-
-
- 474 this->ar_valid.write(
false);
- 475 wait(sc_core::SC_ZERO_TIME);
-
-
- 478 this->ar_valid.write(
true);
-
- 480 wait(this->ar_ready.posedge_event() | clk_delayed);
- 481 if(this->ar_ready.read())
- 482 react(axi::fsm::protocol_time_point_e::EndReqE, active_req[tlm::TLM_READ_COMMAND]);
- 483 }
while(!this->ar_ready.read());
- 484 wait(clk_i.posedge_event());
- 485 this->ar_valid.write(
false);
-
-
-
-
- 490 this->r_ready.write(
false);
- 491 wait(sc_core::SC_ZERO_TIME);
-
- 493 wait(this->r_valid.posedge_event() | clk_delayed);
- 494 if(this->r_valid.event() || (!active_resp[tlm::TLM_READ_COMMAND] && this->r_valid.read())) {
- 495 wait(sc_core::SC_ZERO_TIME);
- 496 auto id = CFG::IS_LITE ? 0U : this->r_id->read().to_uint();
- 497 auto data = this->r_data.read();
- 498 auto resp = this->r_resp.read();
- 499 SCCTRACE(SCMOD) <<
" r_t() get r_resp = " << resp;
- 500 auto& q = rd_resp_by_id[id];
-
- 502 auto* fsm_hndl = q.front();
-
-
- 505 auto byte_offset = beat_count * size;
- 506 auto offset = (fsm_hndl->
trans->get_address() + byte_offset) & (CFG::BUSWIDTH / 8 - 1);
- 507 if(offset && (size + offset) > (CFG::BUSWIDTH / 8)) {
- 508 if(beat_count == 0) {
- 509 auto dptr = fsm_hndl->
trans->get_data_ptr();
-
- 511 for(
size_t i = offset; i < size; ++i, ++dptr) {
- 512 auto bit_offs = i * 8;
- 513 *dptr = data(bit_offs + 7, bit_offs).to_uint();
-
-
- 516 auto beat_start_idx = beat_count * size - offset;
- 517 auto data_len = fsm_hndl->
trans->get_data_length();
- 518 auto dptr = fsm_hndl->
trans->get_data_ptr() + beat_start_idx;
-
- 520 for(
size_t i = offset; i < size && (beat_start_idx + i) < data_len; ++i, ++dptr) {
- 521 auto bit_offs = i * 8;
- 522 *dptr = data(bit_offs + 7, bit_offs).to_uint();
-
-
-
- 526 auto dptr = fsm_hndl->
trans->get_data_ptr() + beat_count * size;
-
- 528 for(
size_t i = 0; i < size; ++i, ++dptr) {
- 529 auto bit_offs = (offset + i) * 8;
- 530 *dptr = data(bit_offs + 7, bit_offs).to_uint();
-
-
-
- 534 fsm_hndl->
trans->get_extension(e);
-
-
-
-
- 539 if(axi::is_dataless(e)) {
- 540 SCCTRACE(SCMOD) <<
" r_t() for Make/Clean/Barrier Trans" << *fsm_hndl->
trans;
- 541 react(axi::fsm::protocol_time_point_e::BegRespE, fsm_hndl);
-
- 543 auto tp = CFG::IS_LITE || this->r_last->read() ? axi::fsm::protocol_time_point_e::BegRespE
- 544 : axi::fsm::protocol_time_point_e::BegPartRespE;
-
-
- 547 wait(r_end_resp_evt);
- 548 this->r_ready->write(
true);
- 549 wait(clk_i.posedge_event());
- 550 this->r_ready.write(
false);
-
-
-
-
-
- 556 this->aw_valid.write(
false);
- 557 wait(sc_core::SC_ZERO_TIME);
-
-
- 560 this->aw_valid.write(
true);
- 561 SCCTRACE(SCMOD) <<
" aw_t() write aw_valid ";
-
- 563 wait(this->aw_ready.posedge_event() | clk_delayed);
- 564 }
while(!this->aw_ready.read());
- 565 auto* fsm_hndl = active_req[tlm::TLM_WRITE_COMMAND];
-
- 567 react(axi::fsm::protocol_time_point_e::EndReqE, fsm_hndl);
- 568 wait(clk_i.posedge_event());
- 569 this->aw_valid.write(
false);
-
-
-
-
- 574 this->w_valid.write(
false);
- 575 wait(sc_core::SC_ZERO_TIME);
-
-
- 578 this->w_last->write(
false);
- 579 wait(wdata_vl.default_event());
- 580 auto val = wdata_vl.read();
- 581 SCCTRACE(SCMOD) <<
"wdata_t() with wdata_vl = " << (uint16_t)val;
- 582 this->w_valid.write(val & 0x1);
-
- 584 this->w_last->write(val & 0x2);
-
- 586 wait(this->w_ready.posedge_event() | clk_delayed);
-
- 588 if(this->w_ready.read()) {
-
- 590 CFG::IS_LITE || (val & 0x2) ? axi::fsm::protocol_time_point_e::EndReqE : axi::fsm::protocol_time_point_e::EndPartReqE;
- 591 react(evt, active_req[tlm::TLM_WRITE_COMMAND]);
-
- 593 }
while(!this->w_ready.read());
- 594 wait(clk_i.posedge_event());
- 595 this->w_valid.write(
false);
-
-
-
-
- 600 this->b_ready.write(
false);
- 601 wait(sc_core::SC_ZERO_TIME);
-
- 603 wait(this->b_valid.posedge_event() | clk_delayed);
- 604 if(this->b_valid.event() || (!active_resp[tlm::TLM_WRITE_COMMAND] && this->b_valid.read())) {
- 605 auto id = !CFG::IS_LITE ? this->b_id->read().to_uint() : 0U;
- 606 auto resp = this->b_resp.read();
- 607 auto& q = wr_resp_by_id[id];
-
- 609 auto* fsm_hndl = q.front();
-
- 611 fsm_hndl->
trans->get_extension(e);
- 612 e->
set_resp(axi::into<axi::resp_e>(resp));
- 613 react(axi::fsm::protocol_time_point_e::BegRespE, fsm_hndl);
-
- 615 wait(w_end_resp_evt);
- 616 this->b_ready.write(
true);
- 617 wait(clk_i.posedge_event());
- 618 this->b_ready.write(
false);
+ 425 auto ret = tsckt->nb_transport_bw(*fsm_hndl->
trans, phase, t);
+
+
+ 428 fsm_hndl->
fsm->cb[EndPartRespE] = [
this, fsm_hndl]() ->
void {
+
+ 430 tlm::tlm_phase phase = axi::END_PARTIAL_RESP;
+ 431 sc_core::sc_time t(sc_core::SC_ZERO_TIME);
+ 432 auto ret = tsckt->nb_transport_bw(*fsm_hndl->
trans, phase, t);
+
+ 434 active_resp_beat[SNOOP] =
nullptr;
+
+
+
+ 438 r_end_resp_evt.notify();
+
+
+ 441 fsm_hndl->
fsm->cb[BegRespE] = [
this, fsm_hndl]() ->
void {
+
+ 443 active_resp_beat[SNOOP] = fsm_hndl;
+ 444 cd_vl.notify({3, fsm_hndl});
+ 445 cr_resp_vl.notify({3, fsm_hndl});
+
+
+
+ 449 tlm::tlm_phase phase = tlm::BEGIN_RESP;
+ 450 sc_core::sc_time t(sc_core::SC_ZERO_TIME);
+ 451 auto ret = tsckt->nb_transport_bw(*fsm_hndl->
trans, phase, t);
+
+
+ 454 fsm_hndl->
fsm->cb[EndRespE] = [
this, fsm_hndl]() ->
void {
+
+ 456 sc_core::sc_time t(sc_core::SC_ZERO_TIME);
+ 457 tlm::tlm_phase phase = tlm::END_RESP;
+ 458 auto ret = tsckt->nb_transport_bw(*fsm_hndl->
trans, phase, t);
+ 459 active_resp_beat[SNOOP] =
nullptr;
+
+ 461 fsm_hndl->
finish.notify();
+
+ 463 if(fsm_hndl->
trans->is_read()) {
+ 464 rd_resp_by_id[axi::get_axi_id(*fsm_hndl->
trans)].pop_front();
+ 465 r_end_resp_evt.notify();
+ 466 }
else if(fsm_hndl->
trans->is_write()) {
+ 467 wr_resp_by_id[axi::get_axi_id(*fsm_hndl->
trans)].pop_front();
+ 468 w_end_resp_evt.notify();
+
+
+
+ 472 fsm_hndl->
fsm->cb[Ack] = [
this, fsm_hndl]() ->
void {
+ 473 SCCTRACE(SCMOD) <<
"in ACK of setup_cb for " << *fsm_hndl->
trans;
+ 474 if(fsm_hndl->
trans->is_read()) {
+ 475 rack_vl.notify(sc_core::SC_ZERO_TIME);
+
+ 477 if(fsm_hndl->
trans->is_write()) {
+ 478 wack_vl.notify(sc_core::SC_ZERO_TIME);
+
+
+
+
+
+ 484 this->r_ack.write(
false);
+ 485 wait(sc_core::SC_ZERO_TIME);
+
+
+ 488 this->r_ack.write(
true);
+ 489 wait(clk_i.posedge_event());
+ 490 this->r_ack.write(
false);
+
+
+
+
+ 495 this->w_ack.write(
false);
+ 496 wait(sc_core::SC_ZERO_TIME);
+
+
+ 499 this->w_ack.write(
true);
+ 500 wait(clk_i.posedge_event());
+ 501 this->w_ack.write(
false);
+
+
+
+
+ 506 this->ar_valid.write(
false);
+ 507 wait(sc_core::SC_ZERO_TIME);
+
+ 509 auto val = ar_fifo.read();
+
+ 511 this->ar_valid.write(
true);
+
+ 513 wait(this->ar_ready.posedge_event() | clk_delayed);
+ 514 if(this->ar_ready.read())
+ 515 react(axi::fsm::protocol_time_point_e::EndReqE, val.gp);
+ 516 }
while(!this->ar_ready.read());
+ 517 wait(clk_i.posedge_event());
+ 518 this->ar_valid.write(
false);
+
+
+
+
+ 523 this->r_ready.write(
false);
+ 524 wait(sc_core::SC_ZERO_TIME);
+
+ 526 if(!this->r_valid.read())
+ 527 wait(this->r_valid.posedge_event());
+
+
+ 530 if(this->r_valid.event() || (!active_resp[tlm::TLM_READ_COMMAND] && this->r_valid.read())) {
+ 531 wait(sc_core::SC_ZERO_TIME);
+ 532 auto id = CFG::IS_LITE ? 0U : this->r_id->read().to_uint();
+ 533 auto data = this->r_data.read();
+ 534 auto resp = this->r_resp.read();
+ 535 auto& q = rd_resp_by_id[id];
+ 536 sc_assert(q.size() &&
"No transaction found for received id");
+ 537 auto* fsm_hndl = q.front();
+
+
+ 540 auto byte_offset = beat_count * size;
+ 541 auto offset = (fsm_hndl->
trans->get_address() + byte_offset) & (CFG::BUSWIDTH / 8 - 1);
+ 542 if(offset && (size + offset) > (CFG::BUSWIDTH / 8)) {
+ 543 if(beat_count == 0) {
+ 544 auto dptr = fsm_hndl->
trans->get_data_ptr();
+
+ 546 for(
size_t i = offset; i < size; ++i, ++dptr) {
+ 547 auto bit_offs = i * 8;
+ 548 *dptr = data(bit_offs + 7, bit_offs).to_uint();
+
+
+ 551 auto beat_start_idx = beat_count * size - offset;
+ 552 auto data_len = fsm_hndl->
trans->get_data_length();
+ 553 auto dptr = fsm_hndl->
trans->get_data_ptr() + beat_start_idx;
+
+ 555 for(
size_t i = offset; i < size && (beat_start_idx + i) < data_len; ++i, ++dptr) {
+ 556 auto bit_offs = i * 8;
+ 557 *dptr = data(bit_offs + 7, bit_offs).to_uint();
+
+
+
+ 561 auto dptr = fsm_hndl->
trans->get_data_ptr() + beat_count * size;
+
+ 563 for(
size_t i = 0; i < size; ++i, ++dptr) {
+ 564 auto bit_offs = (offset + i) * 8;
+ 565 *dptr = data(bit_offs + 7, bit_offs).to_uint();
+
+
+
+ 569 fsm_hndl->
trans->get_extension(e);
+
+
+
+
+ 574 if(axi::is_dataless(e)) {
+ 575 SCCTRACE(SCMOD) <<
" r_t() for Make/Clean/Barrier Trans" << *fsm_hndl->
trans;
+ 576 react(axi::fsm::protocol_time_point_e::BegRespE, fsm_hndl);
+
+ 578 auto tp = CFG::IS_LITE || this->r_last->read() ? axi::fsm::protocol_time_point_e::BegRespE
+ 579 : axi::fsm::protocol_time_point_e::BegPartRespE;
+
+
+ 582 wait(r_end_resp_evt);
+ 583 this->r_ready.write(
true);
+ 584 wait(clk_i.posedge_event());
+ 585 this->r_ready.write(
false);
+
+
+
+
+
+ 591 this->aw_valid.write(
false);
+ 592 wait(sc_core::SC_ZERO_TIME);
+
+ 594 auto val = aw_fifo.read();
+
+ 596 this->aw_valid.write(
true);
+
+ 598 wait(this->aw_ready.posedge_event() | clk_delayed);
+ 599 }
while(!this->aw_ready.read());
+ 600 if(axi::is_dataless(val.gp->template get_extension<axi::ace_extension>()))
+ 601 schedule(axi::fsm::protocol_time_point_e::EndReqE, val.gp, sc_core::SC_ZERO_TIME);
+ 602 wait(clk_i.posedge_event());
+ 603 this->aw_valid.write(
false);
+
+
+
+
+ 608 this->w_valid.write(
false);
+ 609 wait(sc_core::SC_ZERO_TIME);
+
+
+ 612 this->w_last->write(
false);
+ 613 if(pipelined_wrreq) {
+ 614 while(!wdata_fifo.num_avail()) {
+ 615 wait(clk_i.posedge_event());
+
+
+ 618 wait(wdata_fifo.data_written_event());
-
-
-
- 623 this->ac_ready.write(
false);
- 624 wait(sc_core::SC_ZERO_TIME);
-
-
-
- 628 auto arlen = ((CACHELINE_SZ - 1) / CFG::BUSWIDTH / 8);
-
-
- 631 auto data_len = (1 << arsize) * (arlen + 1);
-
- 633 wait(this->ac_valid.posedge_event() | clk_delayed);
- 634 if(this->ac_valid.read()) {
- 635 SCCTRACE(SCMOD) <<
"ACVALID detected, for address 0x" << std::hex << this->ac_addr.read();
- 636 SCCTRACE(SCMOD) <<
"in ac_t(), create snoop trans with data_len= " << data_len;
-
- 638 gp->set_address(this->ac_addr.read());
- 639 gp->set_command(tlm::TLM_READ_COMMAND);
- 640 gp->set_streaming_width(data_len);
-
- 642 gp->get_extension(ext);
-
- 644 if(data_len == (CFG::BUSWIDTH / 8))
-
-
-
- 648 ext->
set_snoop(axi::into<axi::snoop_e>(this->ac_snoop->read()));
- 649 ext->
set_prot(this->ac_prot->read());
-
-
-
-
- 654 active_req[SNOOP] = find_or_create(gp,
true);
- 655 active_req[SNOOP]->is_snoop =
true;
- 656 react(axi::fsm::protocol_time_point_e::RequestPhaseBeg, active_req[SNOOP]);
-
- 658 wait(ac_end_req_evt);
- 659 this->ac_ready.write(
true);
- 660 wait(clk_i.posedge_event());
- 661 this->ac_ready.write(
false);
-
-
-
-
- 666 this->cd_valid.write(
false);
- 667 wait(sc_core::SC_ZERO_TIME);
-
-
-
-
- 672 std::tie(val, fsm_hndl) = cd_vl.get();
- 673 SCCTRACE(SCMOD) << __FUNCTION__ <<
" val = " << (uint16_t)val <<
" beat_count = " << fsm_hndl->
beat_count;
- 674 SCCTRACE(SCMOD) << __FUNCTION__ <<
" got snoop beat of trans " << *fsm_hndl->
trans;
-
-
- 677 this->cd_data.write(get_cache_data_for_beat(fsm_hndl));
- 678 this->cd_valid.write(val & 0x1);
- 679 SCCTRACE(SCMOD) << __FUNCTION__ <<
"() write cd_valid high ";
- 680 this->cd_last->write(val & 0x2);
-
- 682 wait(this->cd_ready.posedge_event() | clk_delayed);
- 683 if(this->cd_ready.read()) {
-
- 685 CFG::IS_LITE || (val & 0x2) ? axi::fsm::protocol_time_point_e::EndRespE : axi::fsm::protocol_time_point_e::EndPartRespE;
-
-
-
- 689 SCCTRACE(SCMOD) << __FUNCTION__ <<
"() receives cd_ready high, schedule evt " << evt2str(evt);
- 690 react(evt, active_resp_beat[SNOOP]);
-
-
- 693 }
while(!this->cd_ready.read());
- 694 SCCTRACE(SCMOD) << __FUNCTION__ <<
" finished snoop beat of trans [" << fsm_hndl->
trans <<
"]";
- 695 wait(clk_i.posedge_event());
- 696 this->cd_valid.write(
false);
-
- 698 this->cd_last->write(
false);
-
-
-
- 702 this->cr_valid.write(
false);
- 703 wait(sc_core::SC_ZERO_TIME);
-
-
-
-
- 708 std::tie(val, fsm_hndl) = cr_resp_vl.get();
- 709 SCCTRACE(SCMOD) << __FUNCTION__ <<
" (), generate snoop response in cr channel, val = " << (uint16_t)val
- 710 <<
" total beat_num = " << fsm_hndl->
beat_count;
-
-
-
- 714 this->cr_valid.write(
true);
-
- 716 wait(this->cr_ready.posedge_event() | clk_delayed);
- 717 if(this->cr_ready.read()) {
- 718 auto evt = axi::fsm::protocol_time_point_e::EndRespE;
- 719 SCCTRACE(SCMOD) << __FUNCTION__ <<
"(), schedule EndRespE ";
- 720 react(evt, active_resp_beat[SNOOP]);
-
- 722 }
while(!this->cr_ready.read());
- 723 SCCTRACE(SCMOD) <<
"finished snoop response ";
- 724 wait(clk_i.posedge_event());
- 725 this->cr_valid.write(
false);
-
-
+ 620 auto val = wdata_fifo.front();
+ 621 wdata_fifo.pop_front();
+ 622 write_wdata(*val.gp, val.beat_num);
+ 623 if(pipelined_wrreq && val.needs_end_req) {
+ 624 auto evt = CFG::IS_LITE || (val.last) ? axi::fsm::protocol_time_point_e::EndReqE : axi::fsm::protocol_time_point_e::EndPartReqE;
+ 625 schedule(evt, val.gp, sc_core::SC_ZERO_TIME);
+
+ 627 this->w_valid.write(
true);
+
+ 629 this->w_last->write(val.last);
+
+ 631 wait(this->w_ready.posedge_event() | clk_delayed);
+ 632 if(!pipelined_wrreq && this->w_ready.read()) {
+ 633 auto evt = val.last ? axi::fsm::protocol_time_point_e::EndReqE : axi::fsm::protocol_time_point_e::EndPartReqE;
+ 634 schedule(evt, val.gp, sc_core::SC_ZERO_TIME);
+
+ 636 }
while(!this->w_ready.read());
+ 637 wait(clk_i.posedge_event());
+ 638 this->w_valid.write(
false);
+
+
+
+
+ 643 this->b_ready.write(
false);
+ 644 wait(sc_core::SC_ZERO_TIME);
+
+ 646 wait(this->b_valid.posedge_event() | clk_delayed);
+ 647 if(this->b_valid.event() || (!active_resp[tlm::TLM_WRITE_COMMAND] && this->b_valid.read())) {
+ 648 auto id = !CFG::IS_LITE ? this->b_id->read().to_uint() : 0U;
+ 649 auto resp = this->b_resp.read();
+ 650 auto& q = wr_resp_by_id[id];
+
+ 652 auto* fsm_hndl = q.front();
+
+ 654 fsm_hndl->
trans->get_extension(e);
+ 655 e->
set_resp(axi::into<axi::resp_e>(resp));
+ 656 react(axi::fsm::protocol_time_point_e::BegRespE, fsm_hndl);
+ 657 wait(w_end_resp_evt);
+ 658 this->b_ready.write(
true);
+ 659 wait(clk_i.posedge_event());
+ 660 this->b_ready.write(
false);
+
+
+
+
+ 665 this->ac_ready.write(
false);
+ 666 wait(sc_core::SC_ZERO_TIME);
+
+
+
+ 670 auto arlen = ((CACHELINE_SZ - 1) / CFG::BUSWIDTH / 8);
+
+
+ 673 auto data_len = (1 << arsize) * (arlen + 1);
+
+ 675 wait(this->ac_valid.posedge_event() | clk_delayed);
+ 676 if(this->ac_valid.read()) {
+ 677 SCCTRACE(SCMOD) <<
"ACVALID detected, for address 0x" << std::hex << this->ac_addr.read();
+ 678 SCCTRACE(SCMOD) <<
"in ac_t(), create snoop trans with data_len= " << data_len;
+
+ 680 gp->set_address(this->ac_addr.read());
+ 681 gp->set_command(tlm::TLM_READ_COMMAND);
+ 682 gp->set_streaming_width(data_len);
+
+ 684 gp->get_extension(ext);
+
+ 686 if(data_len == (CFG::BUSWIDTH / 8))
+
+
+
+ 690 ext->
set_snoop(axi::into<axi::snoop_e>(this->ac_snoop->read()));
+ 691 ext->
set_prot(this->ac_prot->read());
+
+
+
+
+ 696 active_req[SNOOP] = find_or_create(gp,
true);
+ 697 active_req[SNOOP]->is_snoop =
true;
+ 698 react(axi::fsm::protocol_time_point_e::RequestPhaseBeg, active_req[SNOOP]);
+
+ 700 wait(ac_end_req_evt);
+ 701 this->ac_ready.write(
true);
+ 702 wait(clk_i.posedge_event());
+ 703 this->ac_ready.write(
false);
+
+
+
+
+ 708 this->cd_valid.write(
false);
+ 709 wait(sc_core::SC_ZERO_TIME);
+
+
+
+
+ 714 std::tie(val, fsm_hndl) = cd_vl.get();
+ 715 SCCTRACE(SCMOD) << __FUNCTION__ <<
" val = " << (uint16_t)val <<
" beat_count = " << fsm_hndl->
beat_count;
+ 716 SCCTRACE(SCMOD) << __FUNCTION__ <<
" got snoop beat of trans " << *fsm_hndl->
trans;
+
+
+ 719 this->cd_data.write(get_cache_data_for_beat(fsm_hndl));
+ 720 this->cd_valid.write(val & 0x1);
+ 721 SCCTRACE(SCMOD) << __FUNCTION__ <<
"() write cd_valid high ";
+ 722 this->cd_last->write(val & 0x2);
+
+ 724 wait(this->cd_ready.posedge_event() | clk_delayed);
+ 725 if(this->cd_ready.read()) {
+
+ 727 CFG::IS_LITE || (val & 0x2) ? axi::fsm::protocol_time_point_e::EndRespE : axi::fsm::protocol_time_point_e::EndPartRespE;
-
+
+
+ 731 SCCTRACE(SCMOD) << __FUNCTION__ <<
"() receives cd_ready high, schedule evt " << evt2str(evt);
+ 732 react(evt, active_resp_beat[SNOOP]);
+
+
+ 735 }
while(!this->cd_ready.read());
+ 736 SCCTRACE(SCMOD) << __FUNCTION__ <<
" finished snoop beat of trans [" << fsm_hndl->
trans <<
"]";
+ 737 wait(clk_i.posedge_event());
+ 738 this->cd_valid.write(
false);
+
+ 740 this->cd_last->write(
false);
+
+
+
+ 744 this->cr_valid.write(
false);
+ 745 wait(sc_core::SC_ZERO_TIME);
+
+
+
+
+ 750 std::tie(val, fsm_hndl) = cr_resp_vl.get();
+ 751 SCCTRACE(SCMOD) << __FUNCTION__ <<
" (), generate snoop response in cr channel, val = " << (uint16_t)val
+ 752 <<
" total beat_num = " << fsm_hndl->
beat_count;
+
+
+
+ 756 this->cr_valid.write(
true);
+
+ 758 wait(this->cr_ready.posedge_event() | clk_delayed);
+ 759 if(this->cr_ready.read()) {
+ 760 auto evt = axi::fsm::protocol_time_point_e::EndRespE;
+ 761 SCCTRACE(SCMOD) << __FUNCTION__ <<
"(), schedule EndRespE ";
+ 762 react(evt, active_resp_beat[SNOOP]);
+
+ 764 }
while(!this->cr_ready.read());
+ 765 SCCTRACE(SCMOD) <<
"finished snoop response ";
+ 766 wait(clk_i.posedge_event());
+ 767 this->cr_valid.write(
false);
+
+
+
+
+
+tlm::tlm_generic_payload * get() const noexcept
Return the stored pointer.
payload_type * allocate()
get a plain tlm_payload_type without extensions
static tlm_mm & get()
accessor function of the singleton
TLM2.0 components modeling AHB.
@@ -809,7 +853,7 @@
size_t beat_count
beat count of this transaction
AxiProtocolFsm *const fsm
pointer to the FSM
bool is_snoop
indicator if this is a snoop access
-
+
void set_length(uint8_t)
set the AxLEN value of the transaction, the value denotes the burst length - 1
void set_burst(burst_e)
set the AxBURST value,
void set_size(uint8_t)
get the AxSIZE value of the transaction, the length is 2^size. It needs to be less than 10 (512 bit w...
diff --git a/develop/ace__lite__initiator_8h_source.html b/develop/ace__lite__initiator_8h_source.html
index 0dfe41b4..14ed0ffb 100644
--- a/develop/ace__lite__initiator_8h_source.html
+++ b/develop/ace__lite__initiator_8h_source.html
@@ -24,7 +24,7 @@
scc
- 2022.4.0
+ 2024.03
SystemC components library
|
@@ -88,498 +88,546 @@
21 #include <axi/fsm/base.h>
22 #include <axi/fsm/protocol_fsm.h>
23 #include <axi/signal_if.h>
-
- 25 #include <tlm/scc/tlm_mm.h>
- 26 #include <tlm_utils/peq_with_cb_and_phase.h>
-
-
-
-
- 33 template <
typename CFG>
-
- 35 public aw_ace_lite<CFG, typename CFG::master_types>,
-
- 37 public b_ace_lite<CFG, typename CFG::master_types>,
- 38 public ar_ace_lite<CFG, typename CFG::master_types>,
-
-
-
-
-
- 44 using payload_type = axi::axi_protocol_types::tlm_payload_type;
- 45 using phase_type = axi::axi_protocol_types::tlm_phase_type;
-
- 47 sc_core::sc_in<bool> clk_i{
"clk_i"};
-
-
+ 24 #include <cci_cfg/cci_param_typed.h>
+ 25 #include <scc/fifo_w_cb.h>
+
+ 27 #include <tlm/scc/tlm_mm.h>
+ 28 #include <tlm_utils/peq_with_cb_and_phase.h>
+
+
+
+
+ 35 using namespace axi::fsm;
+
+ 37 template <
typename CFG>
+
+ 39 public aw_ace_lite<CFG, typename CFG::master_types>,
+
+ 41 public b_ace_lite<CFG, typename CFG::master_types>,
+ 42 public ar_ace_lite<CFG, typename CFG::master_types>,
+
+
+
+
+
+ 48 using payload_type = axi::axi_protocol_types::tlm_payload_type;
+ 49 using phase_type = axi::axi_protocol_types::tlm_phase_type;
-
- 52 : sc_core::sc_module(nm)
-
- 54 ,
base(CFG::BUSWIDTH,
false) {
- 55 instance_name = name();
-
-
- 58 sensitive << clk_i.pos();
-
-
-
-
-
-
-
-
- 67 void b_transport(payload_type& trans, sc_core::sc_time& t)
override {
- 68 trans.set_dmi_allowed(
false);
- 69 trans.set_response_status(tlm::TLM_OK_RESPONSE);
-
-
- 72 tlm::tlm_sync_enum nb_transport_fw(payload_type& trans, phase_type& phase, sc_core::sc_time& t)
override {
-
- 74 sc_core::sc_time delay;
- 75 fw_peq.notify(trans, phase, delay);
- 76 return tlm::TLM_ACCEPTED;
+ 51 sc_core::sc_in<bool> clk_i{
"clk_i"};
+
+
+
+ 55 cci::cci_param<bool> pipelined_wrreq{
"pipelined_wrreq",
false};
+
+
+ 58 : sc_core::sc_module(nm)
+
+ 60 ,
base(CFG::BUSWIDTH,
false)
+ 61 , pipelined_wrreq(
"pipelined_wrreq", pipelined_wrreq) {
+ 62 instance_name = name();
+
+
+ 65 sensitive << clk_i.pos();
+
+
+
+
+
+
+
+
+ 74 void b_transport(payload_type& trans, sc_core::sc_time& t)
override {
+ 75 trans.set_dmi_allowed(
false);
+ 76 trans.set_response_status(tlm::TLM_OK_RESPONSE);
- 79 bool get_direct_mem_ptr(payload_type& trans, tlm::tlm_dmi& dmi_data)
override {
- 80 trans.set_dmi_allowed(
false);
-
-
-
- 84 unsigned int transport_dbg(payload_type& trans)
override {
return 0; }
+ 79 tlm::tlm_sync_enum nb_transport_fw(payload_type& trans, phase_type& phase, sc_core::sc_time& t)
override {
+
+ 81 sc_core::sc_time delay;
+ 82 fw_peq.notify(trans, phase, delay);
+ 83 return tlm::TLM_ACCEPTED;
+
- 86 void end_of_elaboration()
override { clk_if =
dynamic_cast<sc_core::sc_clock*
>(clk_i.get_interface()); }
-
-
-
-
-
- 92 void clk_delay() { clk_delayed.notify(axi::CLK_DELAY); }
-
-
-
-
-
-
- 106 static typename CFG::data_t get_cache_data_for_beat(
fsm::fsm_handle* fsm_hndl);
- 107 std::array<unsigned, 3> outstanding_cnt{0, 0, 0};
- 108 std::array<fsm_handle*, 3> active_req{
nullptr,
nullptr,
nullptr};
- 109 std::array<fsm_handle*, 3> active_resp{
nullptr,
nullptr,
nullptr};
- 110 sc_core::sc_clock* clk_if{
nullptr};
- 111 sc_core::sc_event clk_delayed, clk_self, r_end_resp_evt, w_end_resp_evt, aw_evt, ar_evt;
- 112 void nb_fw(payload_type& trans,
const phase_type& phase) {
- 113 auto t = sc_core::SC_ZERO_TIME;
-
-
- 116 tlm_utils::peq_with_cb_and_phase<ace_lite_initiator> fw_peq{
this, &ace_lite_initiator::nb_fw};
- 117 std::unordered_map<unsigned, std::deque<fsm_handle*>> rd_resp_by_id, wr_resp_by_id;
- 118 sc_core::sc_buffer<uint8_t> wdata_vl;
- 119 void write_ar(tlm::tlm_generic_payload& trans);
- 120 void write_aw(tlm::tlm_generic_payload& trans);
- 121 void write_wdata(tlm::tlm_generic_payload& trans,
unsigned beat,
bool last =
false);
-
-
-
-
-
-
- 128 sc_dt::sc_uint<CFG::ADDRWIDTH> addr = trans.get_address();
- 129 this->ar_addr.write(addr);
-
- 131 this->ar_id->write(sc_dt::sc_uint<CFG::IDWIDTH>(ext->get_id()));
- 132 this->ar_len->write(sc_dt::sc_uint<8>(ext->get_length()));
- 133 this->ar_size->write(sc_dt::sc_uint<3>(ext->get_size()));
- 134 this->ar_burst->write(sc_dt::sc_uint<2>(
axi::to_int(ext->get_burst())));
- 135 if(ext->is_exclusive())
- 136 this->ar_lock->write(
true);
- 137 this->ar_cache->write(sc_dt::sc_uint<4>(ext->get_cache()));
- 138 this->ar_prot.write(ext->get_prot());
- 139 this->ar_qos->write(ext->get_qos());
- 140 this->ar_region->write(ext->get_region());
- 141 this->ar_domain->write(sc_dt::sc_uint<2>((uint8_t)ext->get_domain()));
- 142 this->ar_snoop->write(sc_dt::sc_uint<4>((uint8_t)ext->get_snoop()));
- 143 this->ar_bar->write(sc_dt::sc_uint<2>((uint8_t)ext->get_barrier()));
- 144 this->ar_user->write(ext->get_user(axi::common::id_type::CTRL));
-
-
-
-
- 149 sc_dt::sc_uint<CFG::ADDRWIDTH> addr = trans.get_address();
- 150 this->aw_addr.write(addr);
-
- 152 this->aw_prot.write(ext->get_prot());
-
- 154 if(this->aw_id.get_interface())
- 155 this->aw_id->write(sc_dt::sc_uint<CFG::IDWIDTH>(ext->get_id()));
- 156 if(ext->is_exclusive())
- 157 this->aw_lock->write(
true);
- 158 this->aw_len->write(sc_dt::sc_uint<8>(ext->get_length()));
- 159 this->aw_size->write(sc_dt::sc_uint<3>(ext->get_size()));
- 160 this->aw_burst->write(sc_dt::sc_uint<2>(
axi::to_int(ext->get_burst())));
- 161 this->aw_cache->write(sc_dt::sc_uint<4>(ext->get_cache()));
- 162 this->aw_qos->write(sc_dt::sc_uint<4>(ext->get_qos()));
- 163 this->aw_region->write(sc_dt::sc_uint<4>(ext->get_region()));
- 164 this->aw_user->write(ext->get_user(axi::common::id_type::CTRL));
- 165 this->aw_domain->write(sc_dt::sc_uint<2>((uint8_t)ext->get_domain()));
- 166 this->aw_snoop->write(sc_dt::sc_uint<CFG::AWSNOOPWIDTH>((uint8_t)ext->get_snoop()));
- 167 this->aw_bar->write(sc_dt::sc_uint<2>((uint8_t)ext->get_barrier()));
-
-
- 170 if(ext->is_stash_nid_en()) {
- 171 this->aw_stashniden->write(
true);
- 172 this->aw_stashnid->write(sc_dt::sc_uint<11>(ext->get_stash_nid()));
-
- 174 if(ext->is_stash_lpid_en()) {
- 175 this->aw_stashlpiden->write(
true);
- 176 this->aw_stashlpid->write(sc_dt::sc_uint<5>(ext->get_stash_lpid()));
-
-
-
-
-
- 182 template <
typename CFG>
-
- 184 typename CFG::data_t data{0};
- 185 sc_dt::sc_uint<CFG::BUSWIDTH / 8> strb{0};
-
-
- 188 auto byte_offset = beat * size;
- 189 auto offset = (trans.get_address() + byte_offset) & (CFG::BUSWIDTH / 8 - 1);
- 190 auto beptr = trans.get_byte_enable_length() ? trans.get_byte_enable_ptr() + byte_offset :
nullptr;
- 191 if(offset && (size + offset) > (CFG::BUSWIDTH / 8)) {
-
- 193 auto dptr = trans.get_data_ptr();
-
- 195 for(
size_t i = offset; i < size; ++i, ++dptr) {
- 196 auto bit_offs = i * 8;
- 197 data(bit_offs + 7, bit_offs) = *dptr;
-
- 199 strb[i] = *beptr == 0xff;
-
-
-
-
-
- 205 auto beat_start_idx = byte_offset - offset;
- 206 auto data_len = trans.get_data_length();
- 207 auto dptr = trans.get_data_ptr() + beat_start_idx;
-
- 209 for(
size_t i = 0; i < size && (beat_start_idx + i) < data_len; ++i, ++dptr) {
- 210 auto bit_offs = i * 8;
- 211 data(bit_offs + 7, bit_offs) = *dptr;
-
- 213 strb[i] = *beptr == 0xff;
-
-
-
-
-
-
- 220 auto dptr = trans.get_data_ptr() + byte_offset;
-
- 222 for(
size_t i = 0; i < size; ++i, ++dptr) {
- 223 auto bit_offs = (offset + i) * 8;
- 224 data(bit_offs + 7, bit_offs) = *dptr;
-
- 226 strb[offset + i] = *beptr == 0xff;
-
-
- 229 strb[offset + i] =
true;
-
-
- 232 this->w_data.write(data);
- 233 this->w_strb.write(strb);
-
- 235 this->w_id->write(ext->get_id());
- 236 if(this->w_user.get_interface())
- 237 this->w_user->write(ext->get_user(axi::common::id_type::DATA));
-
-
-
-
- 242 auto beat_count = fsm_hndl->beat_count;
-
-
- 245 auto byte_offset = beat_count * size;
- 246 auto offset = (fsm_hndl->trans->get_address() + byte_offset) & (CFG::BUSWIDTH / 8 - 1);
- 247 typename CFG::data_t data{0};
- 248 if(offset && (size + offset) > (CFG::BUSWIDTH / 8)) {
- 249 if(beat_count == 0) {
- 250 auto dptr = fsm_hndl->trans->get_data_ptr();
- 251 for(
size_t i = offset; i < size; ++i, ++dptr) {
- 252 auto bit_offs = i * 8;
- 253 data(bit_offs + 7, bit_offs) = *dptr;
-
-
- 256 auto beat_start_idx = byte_offset - offset;
- 257 auto data_len = fsm_hndl->trans->get_data_length();
- 258 auto dptr = fsm_hndl->trans->get_data_ptr() + beat_start_idx;
- 259 for(
size_t i = offset; i < size && (beat_start_idx + i) < data_len; ++i, ++dptr) {
- 260 auto bit_offs = i * 8;
- 261 data(bit_offs + 7, bit_offs) = *dptr;
-
-
-
- 265 auto dptr = fsm_hndl->trans->get_data_ptr() + byte_offset;
- 266 for(
size_t i = 0; i < size; ++i, ++dptr) {
- 267 auto bit_offs = (offset + i) * 8;
- 268 data(bit_offs + 7, bit_offs) = *dptr;
-
-
-
-
-
-
- 275 fsm_hndl->fsm->cb[RequestPhaseBeg] = [
this, fsm_hndl]() ->
void {
- 276 if(fsm_hndl->is_snoop) {
- 277 SCCTRACE(SCMOD) <<
" for snoop in RequestPhaseBeg ";
-
- 279 fsm_hndl->beat_count = 0;
- 280 outstanding_cnt[fsm_hndl->trans->get_command()]++;
-
- 282 auto offset = fsm_hndl->trans->get_address() % (CFG::BUSWIDTH / 8);
- 283 if(offset + fsm_hndl->trans->get_data_length() > CFG::BUSWIDTH / 8) {
- 284 SCCFATAL(SCMOD) <<
" transaction " << *fsm_hndl->trans <<
" is not AXI4Lite compliant";
-
-
-
-
- 289 fsm_hndl->fsm->cb[BegPartReqE] = [
this, fsm_hndl]() ->
void {
- 290 sc_assert(fsm_hndl->trans->is_write());
- 291 if(fsm_hndl->beat_count == 0) {
- 292 write_aw(*fsm_hndl->trans);
- 293 aw_evt.notify(sc_core::SC_ZERO_TIME);
-
- 295 write_wdata(*fsm_hndl->trans, fsm_hndl->beat_count);
- 296 active_req[tlm::TLM_WRITE_COMMAND] = fsm_hndl;
-
-
- 299 fsm_hndl->fsm->cb[EndPartReqE] = [
this, fsm_hndl]() ->
void {
- 300 active_req[tlm::TLM_WRITE_COMMAND] =
nullptr;
- 301 tlm::tlm_phase phase = axi::END_PARTIAL_REQ;
- 302 sc_core::sc_time t = (clk_if ? clk_if->period() - axi::CLK_DELAY - 1_ps : sc_core::SC_ZERO_TIME);
- 303 auto ret = tsckt->nb_transport_bw(*fsm_hndl->trans, phase, t);
- 304 fsm_hndl->beat_count++;
-
- 306 fsm_hndl->fsm->cb[BegReqE] = [
this, fsm_hndl]() ->
void {
- 307 SCCTRACEALL(SCMOD) <<
"In BegReqE of setup_cb";
- 308 switch(fsm_hndl->trans->get_command()) {
- 309 case tlm::TLM_READ_COMMAND:
- 310 active_req[tlm::TLM_READ_COMMAND] = fsm_hndl;
- 311 write_ar(*fsm_hndl->trans);
- 312 ar_evt.notify(sc_core::SC_ZERO_TIME);
-
- 314 case tlm::TLM_WRITE_COMMAND:
- 315 active_req[tlm::TLM_WRITE_COMMAND] = fsm_hndl;
- 316 if(fsm_hndl->beat_count == 0) {
- 317 write_aw(*fsm_hndl->trans);
- 318 aw_evt.notify(sc_core::SC_ZERO_TIME);
-
-
- 321 auto ext = fsm_hndl->trans->get_extension<ace_extension>();
- 322 if(!axi::is_dataless(ext)) {
- 323 write_wdata(*fsm_hndl->trans, fsm_hndl->beat_count,
true);
-
-
-
-
- 328 fsm_hndl->fsm->cb[EndReqE] = [
this, fsm_hndl]() ->
void {
- 329 SCCTRACEALL(SCMOD) <<
"In EndReqE of setup_cb";
- 330 switch(fsm_hndl->trans->get_command()) {
- 331 case tlm::TLM_READ_COMMAND:
- 332 rd_resp_by_id[axi::get_axi_id(*fsm_hndl->trans)].push_back(fsm_hndl);
- 333 active_req[tlm::TLM_READ_COMMAND] =
nullptr;
-
- 335 case tlm::TLM_WRITE_COMMAND:
- 336 wr_resp_by_id[axi::get_axi_id(*fsm_hndl->trans)].push_back(fsm_hndl);
- 337 active_req[tlm::TLM_WRITE_COMMAND] =
nullptr;
- 338 fsm_hndl->beat_count++;
+ 86 bool get_direct_mem_ptr(payload_type& trans, tlm::tlm_dmi& dmi_data)
override {
+ 87 trans.set_dmi_allowed(
false);
+
+
+
+ 91 unsigned int transport_dbg(payload_type& trans)
override {
return 0; }
+
+ 93 void end_of_elaboration()
override { clk_if =
dynamic_cast<sc_core::sc_clock*
>(clk_i.get_interface()); }
+
+
+
+
+
+ 99 void clk_delay() { clk_delayed.notify(axi::CLK_DELAY); }
+
+
+
+
+
+
+ 113 static typename CFG::data_t get_cache_data_for_beat(
fsm::fsm_handle* fsm_hndl);
+ 114 std::array<unsigned, 3> outstanding_cnt{0, 0, 0};
+ 115 std::array<fsm_handle*, 3> active_resp{
nullptr,
nullptr,
nullptr};
+ 116 sc_core::sc_clock* clk_if{
nullptr};
+ 117 sc_core::sc_event clk_delayed, clk_self, r_end_resp_evt, w_end_resp_evt;
+ 118 void nb_fw(payload_type& trans,
const phase_type& phase) {
+ 119 auto t = sc_core::SC_ZERO_TIME;
+
+
+ 122 tlm_utils::peq_with_cb_and_phase<ace_lite_initiator> fw_peq{
this, &ace_lite_initiator::nb_fw};
+ 123 std::unordered_map<unsigned, std::deque<fsm_handle*>> rd_resp_by_id, wr_resp_by_id;
+
+ 125 tlm::tlm_generic_payload* gp =
nullptr;
+
+ 127 bool needs_end_req =
false;
+
+ 129 fifo_entry(tlm::tlm_generic_payload* gp,
bool last,
bool needs_end_req,
size_t beat_num)
+
+
+ 132 , needs_end_req(needs_end_req)
+ 133 , beat_num(beat_num) {
+
+
+
+ 137 fifo_entry(tlm::tlm_generic_payload* gp,
bool needs_end_req)
+
+ 139 , needs_end_req(needs_end_req) {
+
+
+
+ 143 fifo_entry(fifo_entry
const& o)
+
+
+ 146 , needs_end_req(o.needs_end_req)
+ 147 , beat_num(o.beat_num) {
+ 148 if(gp && gp->has_mm())
+
+
+ 151 fifo_entry& operator=(
const fifo_entry& o) {
+
+
+ 154 needs_end_req = o.needs_end_req;
+ 155 beat_num = o.beat_num;
+
+
+
+ 159 if(gp && gp->has_mm())
+
+
+
+
+
+
+ 166 void write_ar(tlm::tlm_generic_payload& trans);
+ 167 void write_aw(tlm::tlm_generic_payload& trans);
+ 168 void write_wdata(tlm::tlm_generic_payload& trans,
unsigned beat);
+
+
+
+
+
+
+ 175 sc_dt::sc_uint<CFG::ADDRWIDTH> addr = trans.get_address();
+ 176 this->ar_addr.write(addr);
+
+ 178 this->ar_id->write(sc_dt::sc_uint<CFG::IDWIDTH>(ext->get_id()));
+ 179 this->ar_len->write(sc_dt::sc_uint<8>(ext->get_length()));
+ 180 this->ar_size->write(sc_dt::sc_uint<3>(ext->get_size()));
+ 181 this->ar_burst->write(sc_dt::sc_uint<2>(
axi::to_int(ext->get_burst())));
+ 182 if(ext->is_exclusive())
+ 183 this->ar_lock->write(
true);
+ 184 this->ar_cache->write(sc_dt::sc_uint<4>(ext->get_cache()));
+ 185 this->ar_prot.write(ext->get_prot());
+ 186 this->ar_qos->write(ext->get_qos());
+ 187 this->ar_region->write(ext->get_region());
+ 188 this->ar_domain->write(sc_dt::sc_uint<2>((uint8_t)ext->get_domain()));
+ 189 this->ar_snoop->write(sc_dt::sc_uint<4>((uint8_t)ext->get_snoop()));
+ 190 this->ar_bar->write(sc_dt::sc_uint<2>((uint8_t)ext->get_barrier()));
+ 191 this->ar_user->write(ext->get_user(axi::common::id_type::CTRL));
+
+
+
+
+ 196 sc_dt::sc_uint<CFG::ADDRWIDTH> addr = trans.get_address();
+ 197 this->aw_addr.write(addr);
+
+ 199 this->aw_prot.write(ext->get_prot());
+
+ 201 if(this->aw_id.get_interface())
+ 202 this->aw_id->write(sc_dt::sc_uint<CFG::IDWIDTH>(ext->get_id()));
+ 203 if(ext->is_exclusive())
+ 204 this->aw_lock->write(
true);
+ 205 this->aw_len->write(sc_dt::sc_uint<8>(ext->get_length()));
+ 206 this->aw_size->write(sc_dt::sc_uint<3>(ext->get_size()));
+ 207 this->aw_burst->write(sc_dt::sc_uint<2>(
axi::to_int(ext->get_burst())));
+ 208 this->aw_cache->write(sc_dt::sc_uint<4>(ext->get_cache()));
+ 209 this->aw_qos->write(sc_dt::sc_uint<4>(ext->get_qos()));
+ 210 this->aw_region->write(sc_dt::sc_uint<4>(ext->get_region()));
+ 211 this->aw_user->write(ext->get_user(axi::common::id_type::CTRL));
+ 212 this->aw_domain->write(sc_dt::sc_uint<2>((uint8_t)ext->get_domain()));
+ 213 this->aw_snoop->write(sc_dt::sc_uint<CFG::AWSNOOPWIDTH>((uint8_t)ext->get_snoop()));
+ 214 this->aw_bar->write(sc_dt::sc_uint<2>((uint8_t)ext->get_barrier()));
+
+
+ 217 if(ext->is_stash_nid_en()) {
+ 218 this->aw_stashniden->write(
true);
+ 219 this->aw_stashnid->write(sc_dt::sc_uint<11>(ext->get_stash_nid()));
+
+ 221 if(ext->is_stash_lpid_en()) {
+ 222 this->aw_stashlpiden->write(
true);
+ 223 this->aw_stashlpid->write(sc_dt::sc_uint<5>(ext->get_stash_lpid()));
+
+
+
+
+
+
+ 230 typename CFG::data_t data{0};
+ 231 sc_dt::sc_uint<CFG::BUSWIDTH / 8> strb{0};
+
+
+ 234 auto byte_offset = beat * size;
+ 235 auto offset = (trans.get_address() + byte_offset) & (CFG::BUSWIDTH / 8 - 1);
+ 236 auto beptr = trans.get_byte_enable_length() ? trans.get_byte_enable_ptr() + byte_offset :
nullptr;
+ 237 if(offset && (size + offset) > (CFG::BUSWIDTH / 8)) {
+
+ 239 auto dptr = trans.get_data_ptr();
+
+ 241 for(
size_t i = offset; i < size; ++i, ++dptr) {
+ 242 auto bit_offs = i * 8;
+ 243 data(bit_offs + 7, bit_offs) = *dptr;
+
+ 245 strb[i] = *beptr == 0xff;
+
+
+
+
+
+ 251 auto beat_start_idx = byte_offset - offset;
+ 252 auto data_len = trans.get_data_length();
+ 253 auto dptr = trans.get_data_ptr() + beat_start_idx;
+
+ 255 for(
size_t i = 0; i < size && (beat_start_idx + i) < data_len; ++i, ++dptr) {
+ 256 auto bit_offs = i * 8;
+ 257 data(bit_offs + 7, bit_offs) = *dptr;
+
+ 259 strb[i] = *beptr == 0xff;
+
+
+
+
+
+
+ 266 auto dptr = trans.get_data_ptr() + byte_offset;
+
+ 268 for(
size_t i = 0; i < size; ++i, ++dptr) {
+ 269 auto bit_offs = (offset + i) * 8;
+ 270 data(bit_offs + 7, bit_offs) = *dptr;
+
+ 272 strb[offset + i] = *beptr == 0xff;
+
+
+ 275 strb[offset + i] =
true;
+
+
+ 278 this->w_data.write(data);
+ 279 this->w_strb.write(strb);
+
+ 281 this->w_id->write(ext->get_id());
+ 282 if(this->w_user.get_interface())
+ 283 this->w_user->write(ext->get_user(axi::common::id_type::DATA));
+
+
+
+
+
+
+
+ 291 auto byte_offset = beat_count * size;
+ 292 auto offset = (fsm_hndl->
trans->get_address() + byte_offset) & (CFG::BUSWIDTH / 8 - 1);
+ 293 typename CFG::data_t data{0};
+ 294 if(offset && (size + offset) > (CFG::BUSWIDTH / 8)) {
+ 295 if(beat_count == 0) {
+ 296 auto dptr = fsm_hndl->
trans->get_data_ptr();
+ 297 for(
size_t i = offset; i < size; ++i, ++dptr) {
+ 298 auto bit_offs = i * 8;
+ 299 data(bit_offs + 7, bit_offs) = *dptr;
+
+
+ 302 auto beat_start_idx = byte_offset - offset;
+ 303 auto data_len = fsm_hndl->
trans->get_data_length();
+ 304 auto dptr = fsm_hndl->
trans->get_data_ptr() + beat_start_idx;
+ 305 for(
size_t i = offset; i < size && (beat_start_idx + i) < data_len; ++i, ++dptr) {
+ 306 auto bit_offs = i * 8;
+ 307 data(bit_offs + 7, bit_offs) = *dptr;
+
+
+
+ 311 auto dptr = fsm_hndl->
trans->get_data_ptr() + byte_offset;
+ 312 for(
size_t i = 0; i < size; ++i, ++dptr) {
+ 313 auto bit_offs = (offset + i) * 8;
+ 314 data(bit_offs + 7, bit_offs) = *dptr;
+
+
+
+
+
+
+ 321 fsm_hndl->
fsm->cb[RequestPhaseBeg] = [
this, fsm_hndl]() ->
void {
+
+ 323 SCCTRACE(SCMOD) <<
" for snoop in RequestPhaseBeg ";
+
+
+ 326 outstanding_cnt[fsm_hndl->
trans->get_command()]++;
+
+ 328 auto offset = fsm_hndl->
trans->get_address() % (CFG::BUSWIDTH / 8);
+ 329 if(offset + fsm_hndl->
trans->get_data_length() > CFG::BUSWIDTH / 8) {
+ 330 SCCFATAL(SCMOD) <<
" transaction " << *fsm_hndl->
trans <<
" is not AXI4Lite compliant";
+
+
+
+
+ 335 fsm_hndl->
fsm->cb[BegPartReqE] = [
this, fsm_hndl]() ->
void {
+ 336 sc_assert(fsm_hndl->
trans->is_write());
+
+ 338 aw_fifo.push_back({fsm_hndl->
trans.
get(),
false});
- 340 tlm::tlm_phase phase = tlm::END_REQ;
- 341 sc_core::sc_time t = (clk_if ? clk_if->period() - axi::CLK_DELAY - 1_ps : sc_core::SC_ZERO_TIME);
- 342 SCCTRACE(SCMOD) <<
" in EndReq before set_resp";
- 343 auto ret = tsckt->nb_transport_bw(*fsm_hndl->trans, phase, t);
- 344 fsm_hndl->trans->set_response_status(tlm::TLM_OK_RESPONSE);
-
-
- 347 fsm_hndl->fsm->cb[BegPartRespE] = [
this, fsm_hndl]() ->
void {
-
- 349 assert(fsm_hndl->trans->is_read());
- 350 tlm::tlm_phase phase = axi::BEGIN_PARTIAL_RESP;
- 351 sc_core::sc_time t(sc_core::SC_ZERO_TIME);
- 352 auto ret = tsckt->nb_transport_bw(*fsm_hndl->trans, phase, t);
-
- 354 fsm_hndl->fsm->cb[EndPartRespE] = [
this, fsm_hndl]() ->
void {
- 355 SCCTRACE(SCMOD) <<
"in EndPartRespE of setup_cb ";
- 356 fsm_hndl->beat_count++;
- 357 r_end_resp_evt.notify();
-
- 359 fsm_hndl->fsm->cb[BegRespE] = [
this, fsm_hndl]() ->
void {
-
- 361 tlm::tlm_phase phase = tlm::BEGIN_RESP;
- 362 sc_core::sc_time t(sc_core::SC_ZERO_TIME);
- 363 auto ret = tsckt->nb_transport_bw(*fsm_hndl->trans, phase, t);
-
- 365 fsm_hndl->fsm->cb[EndRespE] = [
this, fsm_hndl]() ->
void {
- 366 SCCTRACE(SCMOD) <<
"in EndResp of setup_cb ";
- 367 if(fsm_hndl->trans->is_read()) {
- 368 rd_resp_by_id[axi::get_axi_id(*fsm_hndl->trans)].pop_front();
- 369 r_end_resp_evt.notify();
-
- 371 if(fsm_hndl->trans->is_write()) {
- 372 wr_resp_by_id[axi::get_axi_id(*fsm_hndl->trans)].pop_front();
- 373 w_end_resp_evt.notify();
-
-
-
-
- 378 this->ar_valid.write(
false);
- 379 wait(sc_core::SC_ZERO_TIME);
-
-
- 382 this->ar_valid.write(
true);
-
- 384 wait(this->ar_ready.posedge_event() | clk_delayed);
- 385 if(this->ar_ready.read())
- 386 react(axi::fsm::protocol_time_point_e::EndReqE, active_req[tlm::TLM_READ_COMMAND]);
- 387 }
while(!this->ar_ready.read());
- 388 wait(clk_i.posedge_event());
- 389 this->ar_valid.write(
false);
-
-
-
-
- 394 this->r_ready.write(
false);
- 395 wait(sc_core::SC_ZERO_TIME);
-
- 397 wait(this->r_valid.posedge_event() | clk_delayed);
- 398 if(this->r_valid.event() || (!active_resp[tlm::TLM_READ_COMMAND] && this->r_valid.read())) {
- 399 wait(sc_core::SC_ZERO_TIME);
- 400 auto id = CFG::IS_LITE ? 0U : this->r_id->read().to_uint();
- 401 auto data = this->r_data.read();
- 402 auto resp = this->r_resp.read();
- 403 auto& q = rd_resp_by_id[id];
-
- 405 auto* fsm_hndl = q.front();
- 406 auto beat_count = fsm_hndl->beat_count;
-
- 408 auto byte_offset = beat_count * size;
- 409 auto offset = (fsm_hndl->trans->get_address() + byte_offset) & (CFG::BUSWIDTH / 8 - 1);
- 410 if(offset && (size + offset) > (CFG::BUSWIDTH / 8)) {
- 411 if(beat_count == 0) {
- 412 auto dptr = fsm_hndl->trans->get_data_ptr();
-
- 414 for(
size_t i = offset; i < size; ++i, ++dptr) {
- 415 auto bit_offs = i * 8;
- 416 *dptr = data(bit_offs + 7, bit_offs).to_uint();
-
-
- 419 auto beat_start_idx = beat_count * size - offset;
- 420 auto data_len = fsm_hndl->trans->get_data_length();
- 421 auto dptr = fsm_hndl->trans->get_data_ptr() + beat_start_idx;
-
- 423 for(
size_t i = offset; i < size && (beat_start_idx + i) < data_len; ++i, ++dptr) {
- 424 auto bit_offs = i * 8;
- 425 *dptr = data(bit_offs + 7, bit_offs).to_uint();
-
-
-
- 429 auto dptr = fsm_hndl->trans->get_data_ptr() + beat_count * size;
-
- 431 for(
size_t i = 0; i < size; ++i, ++dptr) {
- 432 auto bit_offs = (offset + i) * 8;
- 433 *dptr = data(bit_offs + 7, bit_offs).to_uint();
-
-
-
- 437 fsm_hndl->trans->get_extension(e);
-
-
-
- 441 if(axi::is_dataless(e)) {
- 442 SCCTRACE(SCMOD) <<
" r_t() for Make/Clean/Barrier Trans" << *fsm_hndl->trans;
- 443 react(axi::fsm::protocol_time_point_e::BegRespE, fsm_hndl);
-
- 445 auto tp = CFG::IS_LITE || this->r_last->read() ? axi::fsm::protocol_time_point_e::BegRespE
- 446 : axi::fsm::protocol_time_point_e::BegPartRespE;
-
-
-
-
- 451 wait(r_end_resp_evt);
- 452 this->r_ready->write(
true);
- 453 wait(clk_i.posedge_event());
- 454 this->r_ready.write(
false);
-
-
-
-
-
- 460 this->aw_valid.write(
false);
- 461 wait(sc_core::SC_ZERO_TIME);
-
-
- 464 this->aw_valid.write(
true);
-
- 466 wait(this->aw_ready.posedge_event() | clk_delayed);
- 467 }
while(!this->aw_ready.read());
- 468 auto* fsm_hndl = active_req[tlm::TLM_WRITE_COMMAND];
-
- 470 react(axi::fsm::protocol_time_point_e::EndReqE, fsm_hndl);
- 471 wait(clk_i.posedge_event());
- 472 this->aw_valid.write(
false);
-
-
-
-
- 477 this->w_valid.write(
false);
- 478 wait(sc_core::SC_ZERO_TIME);
-
-
- 481 this->w_last->write(
false);
- 482 wait(wdata_vl.default_event());
- 483 auto val = wdata_vl.read();
- 484 this->w_valid.write(val & 0x1);
-
- 486 this->w_last->write(val & 0x2);
-
- 488 wait(this->w_ready.posedge_event() | clk_delayed);
- 489 if(this->w_ready.read()) {
-
- 491 CFG::IS_LITE || (val & 0x2) ? axi::fsm::protocol_time_point_e::EndReqE : axi::fsm::protocol_time_point_e::EndPartReqE;
- 492 react(evt, active_req[tlm::TLM_WRITE_COMMAND]);
-
- 494 }
while(!this->w_ready.read());
- 495 wait(clk_i.posedge_event());
- 496 this->w_valid.write(
false);
-
-
-
-
- 501 this->b_ready.write(
false);
- 502 wait(sc_core::SC_ZERO_TIME);
-
- 504 wait(this->b_valid.posedge_event() | clk_delayed);
- 505 if(this->b_valid.event() || (!active_resp[tlm::TLM_WRITE_COMMAND] && this->b_valid.read())) {
- 506 SCCTRACEALL(SCMOD) <<
" b_t() received b_valid ";
- 507 auto id = !CFG::IS_LITE ? this->b_id->read().to_uint() : 0U;
- 508 auto resp = this->b_resp.read();
- 509 auto& q = wr_resp_by_id[id];
-
- 511 auto* fsm_hndl = q.front();
-
- 513 fsm_hndl->trans->get_extension(e);
- 514 e->
set_resp(axi::into<axi::resp_e>(resp));
- 515 react(axi::fsm::protocol_time_point_e::BegRespE, fsm_hndl);
-
- 517 wait(w_end_resp_evt);
- 518 this->b_ready.write(
true);
- 519 wait(clk_i.posedge_event());
- 520 this->b_ready.write(
false);
-
-
-
-
+ 340 wdata_fifo.push_back({fsm_hndl->
trans.
get(),
false, wdata_fifo.num_avail()>0, fsm_hndl->
beat_count});
+ 341 if(pipelined_wrreq && !wdata_fifo.num_avail())
+ 342 schedule(EndPartReqE, fsm_hndl->
trans, sc_core::SC_ZERO_TIME);
+
+ 344 fsm_hndl->
fsm->cb[EndPartReqE] = [
this, fsm_hndl]() ->
void {
+ 345 tlm::tlm_phase phase = axi::END_PARTIAL_REQ;
+ 346 sc_core::sc_time t(clk_if ? ::scc::time_to_next_posedge(clk_if) - 1_ps : sc_core::SC_ZERO_TIME);
+ 347 auto ret = tsckt->nb_transport_bw(*fsm_hndl->
trans, phase, t);
+
+
+ 350 fsm_hndl->
fsm->cb[BegReqE] = [
this, fsm_hndl]() ->
void {
+ 351 switch(fsm_hndl->
trans->get_command()) {
+ 352 case tlm::TLM_READ_COMMAND:
+ 353 ar_fifo.push_back({fsm_hndl->
trans.
get(),
false});
+
+ 355 case tlm::TLM_WRITE_COMMAND:
+
+ 357 aw_fifo.push_back({fsm_hndl->
trans.
get(),
false});
+
+
+ 360 if(!axi::is_dataless(fsm_hndl->
trans->get_extension<ace_extension>())) {
+ 361 wdata_fifo.push_back({fsm_hndl->
trans.
get(),
true, wdata_fifo.num_avail()>0, fsm_hndl->
beat_count});
+ 362 if(pipelined_wrreq && !wdata_fifo.num_avail())
+ 363 schedule(EndReqE, fsm_hndl->
trans, sc_core::SC_ZERO_TIME);
+
+
+
+ 367 fsm_hndl->
fsm->cb[EndReqE] = [
this, fsm_hndl]() ->
void {
+ 368 auto id = axi::get_axi_id(*fsm_hndl->
trans);
+ 369 switch(fsm_hndl->
trans->get_command()) {
+ 370 case tlm::TLM_READ_COMMAND:
+ 371 rd_resp_by_id[id].push_back(fsm_hndl);
+
+ 373 case tlm::TLM_WRITE_COMMAND:
+ 374 wr_resp_by_id[id].push_back(fsm_hndl);
+
+
+ 377 tlm::tlm_phase phase = tlm::END_REQ;
+ 378 sc_core::sc_time t(clk_if ? ::scc::time_to_next_posedge(clk_if) - 1_ps : sc_core::SC_ZERO_TIME);
+ 379 auto ret = tsckt->nb_transport_bw(*fsm_hndl->
trans, phase, t);
+ 380 fsm_hndl->
trans->set_response_status(tlm::TLM_OK_RESPONSE);
+
+
+ 383 fsm_hndl->
fsm->cb[BegPartRespE] = [
this, fsm_hndl]() ->
void {
+
+ 385 assert(fsm_hndl->
trans->is_read());
+ 386 tlm::tlm_phase phase = axi::BEGIN_PARTIAL_RESP;
+ 387 sc_core::sc_time t(sc_core::SC_ZERO_TIME);
+ 388 auto ret = tsckt->nb_transport_bw(*fsm_hndl->
trans, phase, t);
+
+ 390 fsm_hndl->
fsm->cb[EndPartRespE] = [
this, fsm_hndl]() ->
void {
+
+ 392 r_end_resp_evt.notify();
+
+ 394 fsm_hndl->
fsm->cb[BegRespE] = [
this, fsm_hndl]() ->
void {
+
+ 396 tlm::tlm_phase phase = tlm::BEGIN_RESP;
+ 397 sc_core::sc_time t(sc_core::SC_ZERO_TIME);
+ 398 auto ret = tsckt->nb_transport_bw(*fsm_hndl->
trans, phase, t);
+
+ 400 fsm_hndl->
fsm->cb[EndRespE] = [
this, fsm_hndl]() ->
void {
+ 401 if(fsm_hndl->
trans->is_read()) {
+ 402 rd_resp_by_id[axi::get_axi_id(*fsm_hndl->
trans)].pop_front();
+ 403 r_end_resp_evt.notify();
+
+ 405 if(fsm_hndl->
trans->is_write()) {
+ 406 wr_resp_by_id[axi::get_axi_id(*fsm_hndl->
trans)].pop_front();
+ 407 w_end_resp_evt.notify();
+
+
+
+
+
+ 413 this->ar_valid.write(
false);
+ 414 wait(sc_core::SC_ZERO_TIME);
+
+ 416 auto val = ar_fifo.read();
+
+ 418 this->ar_valid.write(
true);
+
+ 420 wait(this->ar_ready.posedge_event() | clk_delayed);
+ 421 if(this->ar_ready.read())
+ 422 react(axi::fsm::protocol_time_point_e::EndReqE, val.gp);
+ 423 }
while(!this->ar_ready.read());
+ 424 wait(clk_i.posedge_event());
+ 425 this->ar_valid.write(
false);
+
+
+
+
+ 430 this->r_ready.write(
false);
+ 431 wait(sc_core::SC_ZERO_TIME);
+
+ 433 if(!this->r_valid.read())
+ 434 wait(this->r_valid.posedge_event());
+
+
+ 437 if(this->r_valid.event() || (!active_resp[tlm::TLM_READ_COMMAND] && this->r_valid.read())) {
+ 438 wait(sc_core::SC_ZERO_TIME);
+ 439 auto id = CFG::IS_LITE ? 0U : this->r_id->read().to_uint();
+ 440 auto data = this->r_data.read();
+ 441 auto resp = this->r_resp.read();
+ 442 auto& q = rd_resp_by_id[id];
+ 443 sc_assert(q.size() &&
"No transaction found for received id");
+ 444 auto* fsm_hndl = q.front();
+
+
+ 447 auto byte_offset = beat_count * size;
+ 448 auto offset = (fsm_hndl->
trans->get_address() + byte_offset) & (CFG::BUSWIDTH / 8 - 1);
+ 449 if(offset && (size + offset) > (CFG::BUSWIDTH / 8)) {
+ 450 if(beat_count == 0) {
+ 451 auto dptr = fsm_hndl->
trans->get_data_ptr();
+
+ 453 for(
size_t i = offset; i < size; ++i, ++dptr) {
+ 454 auto bit_offs = i * 8;
+ 455 *dptr = data(bit_offs + 7, bit_offs).to_uint();
+
+
+ 458 auto beat_start_idx = beat_count * size - offset;
+ 459 auto data_len = fsm_hndl->
trans->get_data_length();
+ 460 auto dptr = fsm_hndl->
trans->get_data_ptr() + beat_start_idx;
+
+ 462 for(
size_t i = offset; i < size && (beat_start_idx + i) < data_len; ++i, ++dptr) {
+ 463 auto bit_offs = i * 8;
+ 464 *dptr = data(bit_offs + 7, bit_offs).to_uint();
+
+
+
+ 468 auto dptr = fsm_hndl->
trans->get_data_ptr() + beat_count * size;
+
+ 470 for(
size_t i = 0; i < size; ++i, ++dptr) {
+ 471 auto bit_offs = (offset + i) * 8;
+ 472 *dptr = data(bit_offs + 7, bit_offs).to_uint();
+
+
+
+ 476 fsm_hndl->
trans->get_extension(e);
+
+
+
+ 480 if(axi::is_dataless(e)) {
+ 481 SCCTRACE(SCMOD) <<
" r_t() for Make/Clean/Barrier Trans" << *fsm_hndl->
trans;
+ 482 react(axi::fsm::protocol_time_point_e::BegRespE, fsm_hndl);
+
+ 484 auto tp = CFG::IS_LITE || this->r_last->read() ? axi::fsm::protocol_time_point_e::BegRespE
+ 485 : axi::fsm::protocol_time_point_e::BegPartRespE;
+
+
+ 488 wait(r_end_resp_evt);
+ 489 this->r_ready.write(
true);
+ 490 wait(clk_i.posedge_event());
+ 491 this->r_ready.write(
false);
+
+
+
+
+
+ 497 this->aw_valid.write(
false);
+ 498 wait(sc_core::SC_ZERO_TIME);
+
+ 500 auto val = aw_fifo.read();
+
+ 502 this->aw_valid.write(
true);
+
+ 504 wait(this->aw_ready.posedge_event() | clk_delayed);
+ 505 }
while(!this->aw_ready.read());
+ 506 if(axi::is_dataless(val.gp->template get_extension<axi::ace_extension>()))
+ 507 schedule(axi::fsm::protocol_time_point_e::EndReqE, val.gp, sc_core::SC_ZERO_TIME);
+ 508 wait(clk_i.posedge_event());
+ 509 this->aw_valid.write(
false);
+
+
+
+
+ 514 this->w_valid.write(
false);
+ 515 wait(sc_core::SC_ZERO_TIME);
+
+
+ 518 this->w_last->write(
false);
+ 519 if(pipelined_wrreq) {
+ 520 while(!wdata_fifo.num_avail()) {
+ 521 wait(clk_i.posedge_event());
+
+
+ 524 wait(wdata_fifo.data_written_event());
+
+ 526 auto val = wdata_fifo.front();
+ 527 wdata_fifo.pop_front();
+ 528 write_wdata(*val.gp, val.beat_num);
+ 529 if(pipelined_wrreq && val.needs_end_req) {
+ 530 auto evt = CFG::IS_LITE || (val.last) ? axi::fsm::protocol_time_point_e::EndReqE : axi::fsm::protocol_time_point_e::EndPartReqE;
+ 531 schedule(evt, val.gp, sc_core::SC_ZERO_TIME);
+
+ 533 this->w_valid.write(
true);
+
+ 535 this->w_last->write(val.last);
+
+ 537 wait(this->w_ready.posedge_event() | clk_delayed);
+ 538 if(!pipelined_wrreq && this->w_ready.read()) {
+ 539 auto evt = val.last ? axi::fsm::protocol_time_point_e::EndReqE : axi::fsm::protocol_time_point_e::EndPartReqE;
+ 540 schedule(evt, val.gp, sc_core::SC_ZERO_TIME);
+
+ 542 }
while(!this->w_ready.read());
+ 543 wait(clk_i.posedge_event());
+ 544 this->w_valid.write(
false);
+
+
+
+
+ 549 this->b_ready.write(
false);
+ 550 wait(sc_core::SC_ZERO_TIME);
+
+ 552 wait(this->b_valid.posedge_event() | clk_delayed);
+ 553 if(this->b_valid.event() || (!active_resp[tlm::TLM_WRITE_COMMAND] && this->b_valid.read())) {
+ 554 auto id = !CFG::IS_LITE ? this->b_id->read().to_uint() : 0U;
+ 555 auto resp = this->b_resp.read();
+ 556 auto& q = wr_resp_by_id[id];
+
+ 558 auto* fsm_hndl = q.front();
+
+ 560 fsm_hndl->
trans->get_extension(e);
+ 561 e->
set_resp(axi::into<axi::resp_e>(resp));
+ 562 react(axi::fsm::protocol_time_point_e::BegRespE, fsm_hndl);
+ 563 wait(w_end_resp_evt);
+ 564 this->b_ready.write(
true);
+ 565 wait(clk_i.posedge_event());
+ 566 this->b_ready.write(
false);
+
+
+
+
+
+tlm::tlm_generic_payload * get() const noexcept
Return the stored pointer.
TLM2.0 components modeling AHB.
constexpr ULT to_int(E t)
unsigned get_burst_size(const request &r)
@@ -593,9 +641,12 @@
base class of all AXITLM based adapters and interfaces.
tlm::tlm_sync_enum nb_fw(payload_type &trans, phase_type const &phase, sc_core::sc_time &t)
triggers the FSM based on TLM phases in the forward path. Should be called from np_transport_fw of th...
-base(size_t transfer_width, bool coherent=false, axi::fsm::protocol_time_point_e wr_start=axi::fsm::RequestPhaseBeg)
the constructor
-
+tlm::scc::tlm_gp_shared_ptr trans
pointer to the associated AXITLM payload
+size_t beat_count
beat count of this transaction
+AxiProtocolFsm *const fsm
pointer to the FSM
+bool is_snoop
indicator if this is a snoop access
+
uint8_t get_size() const
set the AxSIZE value of the transaction
void set_resp(resp_e)
set the response status as POD
diff --git a/develop/ace__lite__target_8h_source.html b/develop/ace__lite__target_8h_source.html
index 6af9d413..550bce6b 100644
--- a/develop/ace__lite__target_8h_source.html
+++ b/develop/ace__lite__target_8h_source.html
@@ -24,7 +24,7 @@
diff --git a/develop/annotated_dup.js b/develop/annotated_dup.js
index 5b65d355..6997c892 100644
--- a/develop/annotated_dup.js
+++ b/develop/annotated_dup.js
@@ -275,13 +275,6 @@ var annotated_dup =
[ "bitfield", "classscc_1_1bitfield.html", "classscc_1_1bitfield" ],
[ "tlm_target_bfs_register_base", "classscc_1_1tlm__target__bfs__register__base.html", "classscc_1_1tlm__target__bfs__register__base" ],
[ "cci_broker", "classscc_1_1cci__broker.html", "classscc_1_1cci__broker" ],
- [ "_min_max_restriction", "structscc_1_1__min__max__restriction.html", "structscc_1_1__min__max__restriction" ],
- [ "_min_max_excl_restriction", "structscc_1_1__min__max__excl__restriction.html", "structscc_1_1__min__max__excl__restriction" ],
- [ "_min_restriction", "structscc_1_1__min__restriction.html", "structscc_1_1__min__restriction" ],
- [ "_min_excl_restriction", "structscc_1_1__min__excl__restriction.html", "structscc_1_1__min__excl__restriction" ],
- [ "_max_restriction", "structscc_1_1__max__restriction.html", "structscc_1_1__max__restriction" ],
- [ "_max_excl_restriction", "structscc_1_1__max__excl__restriction.html", "structscc_1_1__max__excl__restriction" ],
- [ "_discrete_restriction", "structscc_1_1__discrete__restriction.html", "structscc_1_1__discrete__restriction" ],
[ "cci_param_restricted", "structscc_1_1cci__param__restricted.html", "structscc_1_1cci__param__restricted" ],
[ "configurable_tracer", "classscc_1_1configurable__tracer.html", "classscc_1_1configurable__tracer" ],
[ "configurer", "classscc_1_1configurer.html", "classscc_1_1configurer" ],
diff --git a/develop/apb__initiator_8cpp_source.html b/develop/apb__initiator_8cpp_source.html
index 2cbfdb1f..8d21736a 100644
--- a/develop/apb__initiator_8cpp_source.html
+++ b/develop/apb__initiator_8cpp_source.html
@@ -24,7 +24,7 @@
scc
- 2022.4.0
+ 2024.03
SystemC components library
|
@@ -88,425 +88,482 @@
21 #include <axi/fsm/base.h>
22 #include <axi/fsm/protocol_fsm.h>
23 #include <axi/signal_if.h>
-
- 25 #include <tlm_utils/peq_with_cb_and_phase.h>
-
-
-
-
- 32 using namespace axi::fsm;
+ 24 #include <cci_cfg/cci_param_typed.h>
+ 25 #include <scc/fifo_w_cb.h>
+
+ 27 #include <tlm_utils/peq_with_cb_and_phase.h>
+
+
+
- 34 template <
typename CFG>
-
- 36 public aw_axi<CFG, typename CFG::master_types>,
- 37 public wdata_axi<CFG, typename CFG::master_types>,
- 38 public b_axi<CFG, typename CFG::master_types>,
- 39 public ar_axi<CFG, typename CFG::master_types>,
- 40 public rresp_axi<CFG, typename CFG::master_types>,
-
-
-
-
- 45 using payload_type = axi::axi_protocol_types::tlm_payload_type;
- 46 using phase_type = axi::axi_protocol_types::tlm_phase_type;
-
- 48 sc_core::sc_in<bool> clk_i{
"clk_i"};
+ 34 using namespace axi::fsm;
+
+ 36 template <
typename CFG>
+
+ 38 public aw_axi<CFG, typename CFG::master_types>,
+ 39 public wdata_axi<CFG, typename CFG::master_types>,
+ 40 public b_axi<CFG, typename CFG::master_types>,
+ 41 public ar_axi<CFG, typename CFG::master_types>,
+ 42 public rresp_axi<CFG, typename CFG::master_types>,
+
+
+
+
+ 47 using payload_type = axi::axi_protocol_types::tlm_payload_type;
+ 48 using phase_type = axi::axi_protocol_types::tlm_phase_type;
-
+ 50 sc_core::sc_in<bool> clk_i{
"clk_i"};
-
- 53 : sc_core::sc_module(nm)
- 54 ,
base(CFG::BUSWIDTH) {
- 55 instance_name = name();
-
-
- 58 sensitive << clk_i.pos();
-
-
-
-
-
-
-
-
- 67 void b_transport(payload_type& trans, sc_core::sc_time& t)
override {
- 68 trans.set_dmi_allowed(
false);
- 69 trans.set_response_status(tlm::TLM_OK_RESPONSE);
-
-
- 72 tlm::tlm_sync_enum nb_transport_fw(payload_type& trans, phase_type& phase, sc_core::sc_time& t)
override {
-
- 74 sc_core::sc_time delay;
- 75 fw_peq.notify(trans, phase, delay);
- 76 return tlm::TLM_ACCEPTED;
-
-
- 79 bool get_direct_mem_ptr(payload_type& trans, tlm::tlm_dmi& dmi_data)
override {
- 80 trans.set_dmi_allowed(
false);
-
+
+
+ 54 cci::cci_param<bool> pipelined_wrreq{
"pipelined_wrreq",
false};
+
+ 56 axi4_initiator(sc_core::sc_module_name
const& nm,
bool pipelined_wrreq =
false)
+ 57 : sc_core::sc_module(nm)
+
+ 59 , pipelined_wrreq(
"pipelined_wrreq", pipelined_wrreq) {
+ 60 instance_name = name();
+
+
+ 63 sensitive << clk_i.pos();
+
+
+
+
+
+
+
+
+ 72 void b_transport(payload_type& trans, sc_core::sc_time& t)
override {
+ 73 trans.set_dmi_allowed(
false);
+ 74 trans.set_response_status(tlm::TLM_OK_RESPONSE);
+
+
+ 77 tlm::tlm_sync_enum nb_transport_fw(payload_type& trans, phase_type& phase, sc_core::sc_time& t)
override {
+
+ 79 sc_core::sc_time delay;
+ 80 fw_peq.notify(trans, phase, delay);
+ 81 return tlm::TLM_ACCEPTED;
- 84 unsigned int transport_dbg(payload_type& trans)
override {
return 0; }
-
- 86 void end_of_elaboration()
override { clk_if =
dynamic_cast<sc_core::sc_clock*
>(clk_i.get_interface()); }
-
-
-
-
-
- 92 void clk_delay() { clk_delayed.notify(axi::CLK_DELAY); }
-
-
-
-
-
-
- 99 std::array<unsigned, 3> outstanding_cnt{0, 0, 0};
- 100 std::array<fsm_handle*, 3> active_req{
nullptr,
nullptr,
nullptr};
- 101 std::array<fsm_handle*, 3> active_resp{
nullptr,
nullptr,
nullptr};
- 102 sc_core::sc_clock* clk_if{
nullptr};
- 103 sc_core::sc_event clk_delayed, clk_self, r_end_resp_evt, aw_evt, ar_evt;
- 104 void nb_fw(payload_type& trans,
const phase_type& phase) {
- 105 auto delay = sc_core::SC_ZERO_TIME;
-
-
- 108 tlm_utils::peq_with_cb_and_phase<axi4_initiator> fw_peq{
this, &axi4_initiator::nb_fw};
- 109 std::unordered_map<unsigned, std::deque<fsm_handle*>> rd_resp_by_id, wr_resp_by_id;
- 110 sc_core::sc_buffer<uint8_t> wdata_vl;
- 111 void write_ar(tlm::tlm_generic_payload& trans);
- 112 void write_aw(tlm::tlm_generic_payload& trans);
- 113 void write_wdata(tlm::tlm_generic_payload& trans,
unsigned beat,
bool last =
false);
-
-
-
-
-
-
- 120 sc_dt::sc_uint<CFG::ADDRWIDTH> addr = trans.get_address();
- 121 this->ar_addr.write(addr);
-
- 123 this->ar_prot.write(ext->get_prot());
-
- 125 auto id = ext->get_id();
- 126 if(
id >= (1 << CFG::IDWIDTH))
- 127 SCCERR(SCMOD) <<
"ARID value larger that signal arid with width=" << CFG::IDWIDTH <<
" can carry";
- 128 this->ar_id->write(sc_dt::sc_uint<CFG::IDWIDTH>(
id));
- 129 this->ar_len->write(sc_dt::sc_uint<8>(ext->get_length()));
- 130 this->ar_size->write(sc_dt::sc_uint<3>(ext->get_size()));
- 131 this->ar_burst->write(sc_dt::sc_uint<2>(
axi::to_int(ext->get_burst())));
- 132 if(ext->is_exclusive())
- 133 this->ar_lock->write(
true);
- 134 this->ar_cache->write(sc_dt::sc_uint<4>(ext->get_cache()));
- 135 this->ar_qos->write(ext->get_qos());
- 136 if(this->ar_user.get_interface())
- 137 this->ar_user->write(ext->get_user(axi::common::id_type::CTRL));
-
-
-
-
-
- 143 sc_dt::sc_uint<CFG::ADDRWIDTH> addr = trans.get_address();
- 144 this->aw_addr.write(addr);
-
- 146 this->aw_prot.write(ext->get_prot());
-
- 148 auto id = ext->get_id();
- 149 if(
id >= (1 << CFG::IDWIDTH))
- 150 SCCERR(SCMOD) <<
"AWID value larger than signal awid with width=" << CFG::IDWIDTH <<
" can carry";
- 151 this->aw_id->write(sc_dt::sc_uint<CFG::IDWIDTH>(
id));
- 152 this->aw_len->write(sc_dt::sc_uint<8>(ext->get_length()));
- 153 this->aw_size->write(sc_dt::sc_uint<3>(ext->get_size()));
- 154 this->aw_burst->write(sc_dt::sc_uint<2>(
axi::to_int(ext->get_burst())));
- 155 this->aw_cache->write(sc_dt::sc_uint<4>(ext->get_cache()));
- 156 this->aw_qos->write(ext->get_qos());
- 157 if(ext->is_exclusive())
- 158 this->aw_lock->write(
true);
- 159 if(this->aw_user.get_interface())
- 160 this->aw_user->write(ext->get_user(axi::common::id_type::CTRL));
-
-
-
-
-
-
- 167 typename CFG::data_t data{0};
- 168 sc_dt::sc_uint<CFG::BUSWIDTH / 8> strb{0};
-
-
- 171 auto byte_offset = beat * size;
- 172 auto offset = (trans.get_address() + byte_offset) & (CFG::BUSWIDTH / 8 - 1);
- 173 auto beptr = trans.get_byte_enable_length() ? trans.get_byte_enable_ptr() + byte_offset :
nullptr;
- 174 if(offset && (size + offset) > (CFG::BUSWIDTH / 8)) {
-
- 176 auto dptr = trans.get_data_ptr();
- 177 for(
size_t i = offset; i < size; ++i, ++dptr) {
- 178 auto bit_offs = i * 8;
- 179 data(bit_offs + 7, bit_offs) = *dptr;
-
- 181 strb[i] = *beptr == 0xff;
-
-
-
-
-
- 187 auto beat_start_idx = byte_offset - offset;
- 188 auto data_len = trans.get_data_length();
- 189 auto dptr = trans.get_data_ptr() + beat_start_idx;
- 190 for(
size_t i = 0; i < size && (beat_start_idx + i) < data_len; ++i, ++dptr) {
- 191 auto bit_offs = i * 8;
- 192 data(bit_offs + 7, bit_offs) = *dptr;
-
- 194 strb[i] = *beptr == 0xff;
-
-
-
-
-
-
- 201 auto dptr = trans.get_data_ptr() + byte_offset;
- 202 for(
size_t i = 0; i < size; ++i, ++dptr) {
- 203 auto bit_offs = (offset + i) * 8;
- 204 data(bit_offs + 7, bit_offs) = *dptr;
-
- 206 strb[offset + i] = *beptr == 0xff;
-
-
- 209 strb[offset + i] =
true;
-
-
- 212 this->w_data.write(data);
- 213 this->w_strb.write(strb);
-
- 215 this->w_id->write(ext->get_id());
- 216 if(this->w_user.get_interface())
- 217 this->w_user->write(ext->get_user(axi::common::id_type::DATA));
-
-
-
-
- 222 fsm_hndl->
fsm->cb[RequestPhaseBeg] = [
this, fsm_hndl]() ->
void {
-
- 224 outstanding_cnt[fsm_hndl->
trans->get_command()]++;
-
- 226 auto offset = fsm_hndl->
trans->get_address() % (CFG::BUSWIDTH / 8);
- 227 if(offset + fsm_hndl->
trans->get_data_length() > CFG::BUSWIDTH / 8) {
- 228 SCCFATAL(SCMOD) <<
" transaction " << *fsm_hndl->
trans <<
" is not AXI4Lite compliant";
-
-
-
- 232 fsm_hndl->
fsm->cb[BegPartReqE] = [
this, fsm_hndl]() ->
void {
- 233 sc_assert(fsm_hndl->
trans->is_write());
-
- 235 write_aw(*fsm_hndl->
trans);
- 236 aw_evt.notify(sc_core::SC_ZERO_TIME);
-
-
- 239 active_req[tlm::TLM_WRITE_COMMAND] = fsm_hndl;
-
-
- 242 fsm_hndl->
fsm->cb[EndPartReqE] = [
this, fsm_hndl]() ->
void {
- 243 active_req[tlm::TLM_WRITE_COMMAND] =
nullptr;
- 244 tlm::tlm_phase phase = axi::END_PARTIAL_REQ;
- 245 sc_core::sc_time t(clk_if ? ::scc::time_to_next_posedge(clk_if) - 1_ps : sc_core::SC_ZERO_TIME);
- 246 auto ret = tsckt->nb_transport_bw(*fsm_hndl->
trans, phase, t);
-
-
- 249 fsm_hndl->
fsm->cb[BegReqE] = [
this, fsm_hndl]() ->
void {
- 250 switch(fsm_hndl->
trans->get_command()) {
- 251 case tlm::TLM_READ_COMMAND:
- 252 active_req[tlm::TLM_READ_COMMAND] = fsm_hndl;
- 253 write_ar(*fsm_hndl->
trans);
- 254 ar_evt.notify(sc_core::SC_ZERO_TIME);
-
- 256 case tlm::TLM_WRITE_COMMAND:
- 257 active_req[tlm::TLM_WRITE_COMMAND] = fsm_hndl;
-
- 259 write_aw(*fsm_hndl->
trans);
- 260 aw_evt.notify(sc_core::SC_ZERO_TIME);
-
-
-
-
-
- 266 fsm_hndl->
fsm->cb[EndReqE] = [
this, fsm_hndl]() ->
void {
- 267 auto id = axi::get_axi_id(*fsm_hndl->
trans);
- 268 switch(fsm_hndl->
trans->get_command()) {
- 269 case tlm::TLM_READ_COMMAND:
- 270 rd_resp_by_id[id].push_back(fsm_hndl);
- 271 active_req[tlm::TLM_READ_COMMAND] =
nullptr;
-
- 273 case tlm::TLM_WRITE_COMMAND:
- 274 wr_resp_by_id[id].push_back(fsm_hndl);
- 275 active_req[tlm::TLM_WRITE_COMMAND] =
nullptr;
-
-
- 278 tlm::tlm_phase phase = tlm::END_REQ;
- 279 sc_core::sc_time t(clk_if ? ::scc::time_to_next_posedge(clk_if) - 1_ps : sc_core::SC_ZERO_TIME);
- 280 auto ret = tsckt->nb_transport_bw(*fsm_hndl->
trans, phase, t);
- 281 fsm_hndl->
trans->set_response_status(tlm::TLM_OK_RESPONSE);
-
- 283 fsm_hndl->
fsm->cb[BegPartRespE] = [
this, fsm_hndl]() ->
void {
-
- 285 assert(fsm_hndl->
trans->is_read());
- 286 tlm::tlm_phase phase = axi::BEGIN_PARTIAL_RESP;
- 287 sc_core::sc_time t(sc_core::SC_ZERO_TIME);
- 288 auto ret = tsckt->nb_transport_bw(*fsm_hndl->
trans, phase, t);
-
- 290 fsm_hndl->
fsm->cb[EndPartRespE] = [
this, fsm_hndl]() ->
void {
-
- 292 r_end_resp_evt.notify();
-
- 294 fsm_hndl->
fsm->cb[BegRespE] = [
this, fsm_hndl]() ->
void {
-
- 296 tlm::tlm_phase phase = tlm::BEGIN_RESP;
- 297 sc_core::sc_time t(sc_core::SC_ZERO_TIME);
- 298 auto ret = tsckt->nb_transport_bw(*fsm_hndl->
trans, phase, t);
-
- 300 fsm_hndl->
fsm->cb[EndRespE] = [
this, fsm_hndl]() ->
void {
- 301 r_end_resp_evt.notify();
- 302 if(fsm_hndl->
trans->is_read())
- 303 rd_resp_by_id[axi::get_axi_id(*fsm_hndl->
trans)].pop_front();
- 304 if(fsm_hndl->
trans->is_write())
- 305 wr_resp_by_id[axi::get_axi_id(*fsm_hndl->
trans)].pop_front();
-
-
-
-
- 310 this->ar_valid.write(
false);
- 311 wait(sc_core::SC_ZERO_TIME);
-
-
- 314 this->ar_valid.write(
true);
-
- 316 wait(this->ar_ready.posedge_event() | clk_delayed);
- 317 if(this->ar_ready.read())
- 318 react(axi::fsm::protocol_time_point_e::EndReqE, active_req[tlm::TLM_READ_COMMAND]);
- 319 }
while(!this->ar_ready.read());
- 320 wait(clk_i.posedge_event());
- 321 this->ar_valid.write(
false);
-
-
-
-
- 326 this->r_ready.write(
false);
- 327 wait(sc_core::SC_ZERO_TIME);
-
- 329 if(!this->r_valid.read())
- 330 wait(this->r_valid.posedge_event());
-
-
- 333 if(this->r_valid.event() || (!active_resp[tlm::TLM_READ_COMMAND] && this->r_valid.read())) {
- 334 wait(sc_core::SC_ZERO_TIME);
- 335 auto id = CFG::IS_LITE ? 0U : this->r_id->read().to_uint();
- 336 auto data = this->r_data.read();
- 337 auto resp = this->r_resp.read();
- 338 auto& q = rd_resp_by_id[id];
- 339 sc_assert(q.size() &&
"No transaction found for received id");
- 340 auto* fsm_hndl = q.front();
-
-
- 343 auto byte_offset = beat_count * size;
- 344 auto offset = (fsm_hndl->
trans->get_address() + byte_offset) & (CFG::BUSWIDTH / 8 - 1);
- 345 if(offset && (size + offset) > (CFG::BUSWIDTH / 8)) {
- 346 if(beat_count == 0) {
- 347 auto dptr = fsm_hndl->
trans->get_data_ptr();
- 348 for(
size_t i = offset; i < size; ++i, ++dptr) {
- 349 auto bit_offs = i * 8;
- 350 *dptr = data(bit_offs + 7, bit_offs).to_uint();
-
-
- 353 auto beat_start_idx = beat_count * size - offset;
- 354 auto data_len = fsm_hndl->
trans->get_data_length();
- 355 auto dptr = fsm_hndl->
trans->get_data_ptr() + beat_start_idx;
- 356 for(
size_t i = offset; i < size && (beat_start_idx + i) < data_len; ++i, ++dptr) {
- 357 auto bit_offs = i * 8;
- 358 *dptr = data(bit_offs + 7, bit_offs).to_uint();
-
-
-
- 362 auto dptr = fsm_hndl->
trans->get_data_ptr() + beat_count * size;
- 363 for(
size_t i = 0; i < size; ++i, ++dptr) {
- 364 auto bit_offs = (offset + i) * 8;
- 365 *dptr = data(bit_offs + 7, bit_offs).to_uint();
-
-
-
- 369 fsm_hndl->
trans->get_extension(e);
- 370 e->
set_resp(axi::into<axi::resp_e>(resp));
-
- 372 auto tp = CFG::IS_LITE || this->r_last->read() ? axi::fsm::protocol_time_point_e::BegRespE
- 373 : axi::fsm::protocol_time_point_e::BegPartRespE;
-
- 375 wait(r_end_resp_evt);
- 376 this->r_ready.write(
true);
- 377 wait(clk_i.posedge_event());
- 378 this->r_ready.write(
false);
-
-
-
-
-
- 384 this->aw_valid.write(
false);
- 385 wait(sc_core::SC_ZERO_TIME);
-
-
- 388 this->aw_valid.write(
true);
-
- 390 wait(this->aw_ready.posedge_event() | clk_delayed);
- 391 }
while(!this->aw_ready.read());
- 392 wait(clk_i.posedge_event());
- 393 this->aw_valid.write(
false);
-
-
-
-
- 398 this->w_valid.write(
false);
- 399 wait(sc_core::SC_ZERO_TIME);
-
-
- 402 this->w_last->write(
false);
- 403 wait(wdata_vl.default_event());
- 404 auto val = wdata_vl.read();
- 405 this->w_valid.write(val & 0x1);
-
- 407 this->w_last->write(val & 0x2);
-
- 409 wait(this->w_ready.posedge_event() | clk_delayed);
- 410 if(this->w_ready.read()) {
-
- 412 CFG::IS_LITE || (val & 0x2) ? axi::fsm::protocol_time_point_e::EndReqE : axi::fsm::protocol_time_point_e::EndPartReqE;
- 413 react(evt, active_req[tlm::TLM_WRITE_COMMAND]);
-
- 415 }
while(!this->w_ready.read());
- 416 wait(clk_i.posedge_event());
- 417 this->w_valid.write(
false);
-
-
-
-
- 422 this->b_ready.write(
false);
- 423 wait(sc_core::SC_ZERO_TIME);
-
- 425 wait(this->b_valid.posedge_event() | clk_delayed);
- 426 if(this->b_valid.event() || (!active_resp[tlm::TLM_WRITE_COMMAND] && this->b_valid.read())) {
- 427 auto id = !CFG::IS_LITE ? this->b_id->read().to_uint() : 0U;
- 428 auto resp = this->b_resp.read();
- 429 auto& q = wr_resp_by_id[id];
-
- 431 auto* fsm_hndl = q.front();
-
- 433 fsm_hndl->
trans->get_extension(e);
- 434 e->
set_resp(axi::into<axi::resp_e>(resp));
- 435 react(axi::fsm::protocol_time_point_e::BegRespE, fsm_hndl);
- 436 wait(r_end_resp_evt);
- 437 this->b_ready.write(
true);
- 438 wait(clk_i.posedge_event());
- 439 this->b_ready.write(
false);
-
-
-
-
-
+ 84 bool get_direct_mem_ptr(payload_type& trans, tlm::tlm_dmi& dmi_data)
override {
+ 85 trans.set_dmi_allowed(
false);
+
+
+
+ 89 unsigned int transport_dbg(payload_type& trans)
override {
return 0; }
+
+ 91 void end_of_elaboration()
override { clk_if =
dynamic_cast<sc_core::sc_clock*
>(clk_i.get_interface()); }
+
+
+
+
+
+ 97 void clk_delay() { clk_delayed.notify(axi::CLK_DELAY); }
+
+
+
+
+
+
+ 104 std::array<unsigned, 3> outstanding_cnt{0, 0, 0};
+ 105 std::array<fsm_handle*, 3> active_resp{
nullptr,
nullptr,
nullptr};
+ 106 sc_core::sc_clock* clk_if{
nullptr};
+ 107 sc_core::sc_event clk_delayed, clk_self, r_end_resp_evt, w_end_resp_evt;
+ 108 void nb_fw(payload_type& trans,
const phase_type& phase) {
+ 109 auto t = sc_core::SC_ZERO_TIME;
+
+
+ 112 tlm_utils::peq_with_cb_and_phase<axi4_initiator> fw_peq{
this, &axi4_initiator::nb_fw};
+ 113 std::unordered_map<unsigned, std::deque<fsm_handle*>> rd_resp_by_id, wr_resp_by_id;
+
+ 115 tlm::tlm_generic_payload* gp =
nullptr;
+
+ 117 bool needs_end_req =
false;
+
+ 119 fifo_entry(tlm::tlm_generic_payload* gp,
bool last,
bool needs_end_req,
size_t beat_num)
+
+
+ 122 , needs_end_req(needs_end_req)
+ 123 , beat_num(beat_num) {
+
+
+
+ 127 fifo_entry(tlm::tlm_generic_payload* gp,
bool needs_end_req)
+
+ 129 , needs_end_req(needs_end_req) {
+
+
+
+ 133 fifo_entry(fifo_entry
const& o)
+
+
+ 136 , needs_end_req(o.needs_end_req)
+ 137 , beat_num(o.beat_num) {
+ 138 if(gp && gp->has_mm())
+
+
+ 141 fifo_entry& operator=(
const fifo_entry& o) {
+
+
+ 144 needs_end_req = o.needs_end_req;
+ 145 beat_num = o.beat_num;
+
+
+
+ 149 if(gp && gp->has_mm())
+
+
+
+
+
+
+ 156 void write_ar(tlm::tlm_generic_payload& trans);
+ 157 void write_aw(tlm::tlm_generic_payload& trans);
+ 158 void write_wdata(tlm::tlm_generic_payload& trans,
unsigned beat);
+
+
+
+
+
+
+ 165 sc_dt::sc_uint<CFG::ADDRWIDTH> addr = trans.get_address();
+ 166 this->ar_addr.write(addr);
+
+ 168 this->ar_prot.write(ext->get_prot());
+
+ 170 auto id = ext->get_id();
+ 171 if(
id >= (1 << CFG::IDWIDTH))
+ 172 SCCERR(SCMOD) <<
"ARID value larger that signal arid with width=" << CFG::IDWIDTH <<
" can carry";
+ 173 this->ar_id->write(sc_dt::sc_uint<CFG::IDWIDTH>(
id));
+ 174 this->ar_len->write(sc_dt::sc_uint<8>(ext->get_length()));
+ 175 this->ar_size->write(sc_dt::sc_uint<3>(ext->get_size()));
+ 176 this->ar_burst->write(sc_dt::sc_uint<2>(
axi::to_int(ext->get_burst())));
+ 177 if(ext->is_exclusive())
+ 178 this->ar_lock->write(
true);
+ 179 this->ar_cache->write(sc_dt::sc_uint<4>(ext->get_cache()));
+ 180 this->ar_qos->write(ext->get_qos());
+ 181 if(this->ar_user.get_interface())
+ 182 this->ar_user->write(ext->get_user(axi::common::id_type::CTRL));
+
+
+
+
+
+ 188 sc_dt::sc_uint<CFG::ADDRWIDTH> addr = trans.get_address();
+ 189 this->aw_addr.write(addr);
+
+ 191 this->aw_prot.write(ext->get_prot());
+
+ 193 auto id = ext->get_id();
+ 194 if(
id >= (1 << CFG::IDWIDTH))
+ 195 SCCERR(SCMOD) <<
"AWID value larger than signal awid with width=" << CFG::IDWIDTH <<
" can carry";
+ 196 this->aw_id->write(sc_dt::sc_uint<CFG::IDWIDTH>(
id));
+ 197 this->aw_len->write(sc_dt::sc_uint<8>(ext->get_length()));
+ 198 this->aw_size->write(sc_dt::sc_uint<3>(ext->get_size()));
+ 199 this->aw_burst->write(sc_dt::sc_uint<2>(
axi::to_int(ext->get_burst())));
+ 200 this->aw_cache->write(sc_dt::sc_uint<4>(ext->get_cache()));
+ 201 this->aw_qos->write(ext->get_qos());
+ 202 if(ext->is_exclusive())
+ 203 this->aw_lock->write(
true);
+ 204 if(this->aw_user.get_interface())
+ 205 this->aw_user->write(ext->get_user(axi::common::id_type::CTRL));
+
+
+
+
+
+
+ 212 typename CFG::data_t data{0};
+ 213 sc_dt::sc_uint<CFG::BUSWIDTH / 8> strb{0};
+
+
+ 216 auto byte_offset = beat * size;
+ 217 auto offset = (trans.get_address() + byte_offset) & (CFG::BUSWIDTH / 8 - 1);
+ 218 auto beptr = trans.get_byte_enable_length() ? trans.get_byte_enable_ptr() + byte_offset :
nullptr;
+ 219 if(offset && (size + offset) > (CFG::BUSWIDTH / 8)) {
+
+ 221 auto dptr = trans.get_data_ptr();
+ 222 for(
size_t i = offset; i < size; ++i, ++dptr) {
+ 223 auto bit_offs = i * 8;
+ 224 data(bit_offs + 7, bit_offs) = *dptr;
+
+ 226 strb[i] = *beptr == 0xff;
+
+
+
+
+
+ 232 auto beat_start_idx = byte_offset - offset;
+ 233 auto data_len = trans.get_data_length();
+ 234 auto dptr = trans.get_data_ptr() + beat_start_idx;
+ 235 for(
size_t i = 0; i < size && (beat_start_idx + i) < data_len; ++i, ++dptr) {
+ 236 auto bit_offs = i * 8;
+ 237 data(bit_offs + 7, bit_offs) = *dptr;
+
+ 239 strb[i] = *beptr == 0xff;
+
+
+
+
+
+
+ 246 auto dptr = trans.get_data_ptr() + byte_offset;
+ 247 for(
size_t i = 0; i < size; ++i, ++dptr) {
+ 248 auto bit_offs = (offset + i) * 8;
+ 249 data(bit_offs + 7, bit_offs) = *dptr;
+
+ 251 strb[offset + i] = *beptr == 0xff;
+
+
+ 254 strb[offset + i] =
true;
+
+
+ 257 this->w_data.write(data);
+ 258 this->w_strb.write(strb);
+
+ 260 this->w_id->write(ext->get_id());
+ 261 if(this->w_user.get_interface())
+ 262 this->w_user->write(ext->get_user(axi::common::id_type::DATA));
+
+
+
+
+ 267 fsm_hndl->
fsm->cb[RequestPhaseBeg] = [
this, fsm_hndl]() ->
void {
+
+ 269 outstanding_cnt[fsm_hndl->
trans->get_command()]++;
+
+ 271 auto offset = fsm_hndl->
trans->get_address() % (CFG::BUSWIDTH / 8);
+ 272 if(offset + fsm_hndl->
trans->get_data_length() > CFG::BUSWIDTH / 8) {
+ 273 SCCFATAL(SCMOD) <<
" transaction " << *fsm_hndl->
trans <<
" is not AXI4Lite compliant";
+
+
+
+ 277 fsm_hndl->
fsm->cb[BegPartReqE] = [
this, fsm_hndl]() ->
void {
+ 278 sc_assert(fsm_hndl->
trans->is_write());
+
+ 280 aw_fifo.push_back({fsm_hndl->
trans.
get(),
false});
+
+ 282 wdata_fifo.push_back({fsm_hndl->
trans.
get(),
false, wdata_fifo.num_avail()>0, fsm_hndl->
beat_count});
+ 283 if(pipelined_wrreq && !wdata_fifo.num_avail())
+ 284 schedule(EndPartReqE, fsm_hndl->
trans, sc_core::SC_ZERO_TIME);
+
+ 286 fsm_hndl->
fsm->cb[EndPartReqE] = [
this, fsm_hndl]() ->
void {
+ 287 tlm::tlm_phase phase = axi::END_PARTIAL_REQ;
+ 288 sc_core::sc_time t(clk_if ? ::scc::time_to_next_posedge(clk_if) - 1_ps : sc_core::SC_ZERO_TIME);
+ 289 auto ret = tsckt->nb_transport_bw(*fsm_hndl->
trans, phase, t);
+
+
+ 292 fsm_hndl->
fsm->cb[BegReqE] = [
this, fsm_hndl]() ->
void {
+ 293 switch(fsm_hndl->
trans->get_command()) {
+ 294 case tlm::TLM_READ_COMMAND:
+ 295 ar_fifo.push_back({fsm_hndl->
trans.
get(),
false});
+
+ 297 case tlm::TLM_WRITE_COMMAND:
+
+ 299 aw_fifo.push_back({fsm_hndl->
trans.
get(),
false});
+
+ 301 wdata_fifo.push_back({fsm_hndl->
trans.
get(),
true, wdata_fifo.num_avail()>0, fsm_hndl->
beat_count});
+ 302 if(pipelined_wrreq && !wdata_fifo.num_avail())
+ 303 schedule(EndReqE, fsm_hndl->
trans, sc_core::SC_ZERO_TIME);
+
+
+ 306 fsm_hndl->
fsm->cb[EndReqE] = [
this, fsm_hndl]() ->
void {
+ 307 auto id = axi::get_axi_id(*fsm_hndl->
trans);
+ 308 switch(fsm_hndl->
trans->get_command()) {
+ 309 case tlm::TLM_READ_COMMAND:
+ 310 rd_resp_by_id[id].push_back(fsm_hndl);
+
+ 312 case tlm::TLM_WRITE_COMMAND:
+ 313 wr_resp_by_id[id].push_back(fsm_hndl);
+
+
+ 316 tlm::tlm_phase phase = tlm::END_REQ;
+ 317 sc_core::sc_time t(clk_if ? ::scc::time_to_next_posedge(clk_if) - 1_ps : sc_core::SC_ZERO_TIME);
+ 318 auto ret = tsckt->nb_transport_bw(*fsm_hndl->
trans, phase, t);
+ 319 fsm_hndl->
trans->set_response_status(tlm::TLM_OK_RESPONSE);
+
+ 321 fsm_hndl->
fsm->cb[BegPartRespE] = [
this, fsm_hndl]() ->
void {
+
+ 323 assert(fsm_hndl->
trans->is_read());
+ 324 tlm::tlm_phase phase = axi::BEGIN_PARTIAL_RESP;
+ 325 sc_core::sc_time t(sc_core::SC_ZERO_TIME);
+ 326 auto ret = tsckt->nb_transport_bw(*fsm_hndl->
trans, phase, t);
+
+ 328 fsm_hndl->
fsm->cb[EndPartRespE] = [
this, fsm_hndl]() ->
void {
+
+ 330 r_end_resp_evt.notify();
+
+ 332 fsm_hndl->
fsm->cb[BegRespE] = [
this, fsm_hndl]() ->
void {
+
+ 334 tlm::tlm_phase phase = tlm::BEGIN_RESP;
+ 335 sc_core::sc_time t(sc_core::SC_ZERO_TIME);
+ 336 auto ret = tsckt->nb_transport_bw(*fsm_hndl->
trans, phase, t);
+
+ 338 fsm_hndl->
fsm->cb[EndRespE] = [
this, fsm_hndl]() ->
void {
+ 339 if(fsm_hndl->
trans->is_read()) {
+ 340 rd_resp_by_id[axi::get_axi_id(*fsm_hndl->
trans)].pop_front();
+ 341 r_end_resp_evt.notify();
+
+ 343 if(fsm_hndl->
trans->is_write()) {
+ 344 wr_resp_by_id[axi::get_axi_id(*fsm_hndl->
trans)].pop_front();
+ 345 w_end_resp_evt.notify();
+
+
+
+
+
+ 351 this->ar_valid.write(
false);
+ 352 wait(sc_core::SC_ZERO_TIME);
+
+ 354 auto val = ar_fifo.read();
+
+ 356 this->ar_valid.write(
true);
+
+ 358 wait(this->ar_ready.posedge_event() | clk_delayed);
+ 359 if(this->ar_ready.read())
+ 360 react(axi::fsm::protocol_time_point_e::EndReqE, val.gp);
+ 361 }
while(!this->ar_ready.read());
+ 362 wait(clk_i.posedge_event());
+ 363 this->ar_valid.write(
false);
+
+
+
+
+ 368 this->r_ready.write(
false);
+ 369 wait(sc_core::SC_ZERO_TIME);
+
+ 371 if(!this->r_valid.read())
+ 372 wait(this->r_valid.posedge_event());
+
+
+ 375 if(this->r_valid.event() || (!active_resp[tlm::TLM_READ_COMMAND] && this->r_valid.read())) {
+ 376 wait(sc_core::SC_ZERO_TIME);
+ 377 auto id = CFG::IS_LITE ? 0U : this->r_id->read().to_uint();
+ 378 auto data = this->r_data.read();
+ 379 auto resp = this->r_resp.read();
+ 380 auto& q = rd_resp_by_id[id];
+ 381 sc_assert(q.size() &&
"No transaction found for received id");
+ 382 auto* fsm_hndl = q.front();
+
+
+ 385 auto byte_offset = beat_count * size;
+ 386 auto offset = (fsm_hndl->
trans->get_address() + byte_offset) & (CFG::BUSWIDTH / 8 - 1);
+ 387 if(offset && (size + offset) > (CFG::BUSWIDTH / 8)) {
+ 388 if(beat_count == 0) {
+ 389 auto dptr = fsm_hndl->
trans->get_data_ptr();
+ 390 for(
size_t i = offset; i < size; ++i, ++dptr) {
+ 391 auto bit_offs = i * 8;
+ 392 *dptr = data(bit_offs + 7, bit_offs).to_uint();
+
+
+ 395 auto beat_start_idx = beat_count * size - offset;
+ 396 auto data_len = fsm_hndl->
trans->get_data_length();
+ 397 auto dptr = fsm_hndl->
trans->get_data_ptr() + beat_start_idx;
+ 398 for(
size_t i = offset; i < size && (beat_start_idx + i) < data_len; ++i, ++dptr) {
+ 399 auto bit_offs = i * 8;
+ 400 *dptr = data(bit_offs + 7, bit_offs).to_uint();
+
+
+
+ 404 auto dptr = fsm_hndl->
trans->get_data_ptr() + beat_count * size;
+ 405 for(
size_t i = 0; i < size; ++i, ++dptr) {
+ 406 auto bit_offs = (offset + i) * 8;
+ 407 *dptr = data(bit_offs + 7, bit_offs).to_uint();
+
+
+
+ 411 fsm_hndl->
trans->get_extension(e);
+ 412 e->
set_resp(axi::into<axi::resp_e>(resp));
+
+ 414 auto tp = CFG::IS_LITE || this->r_last->read() ? axi::fsm::protocol_time_point_e::BegRespE
+ 415 : axi::fsm::protocol_time_point_e::BegPartRespE;
+
+ 417 wait(r_end_resp_evt);
+ 418 this->r_ready.write(
true);
+ 419 wait(clk_i.posedge_event());
+ 420 this->r_ready.write(
false);
+
+
+
+
+
+ 426 this->aw_valid.write(
false);
+ 427 wait(sc_core::SC_ZERO_TIME);
+
+ 429 auto val = aw_fifo.read();
+
+ 431 this->aw_valid.write(
true);
+
+ 433 wait(this->aw_ready.posedge_event() | clk_delayed);
+ 434 }
while(!this->aw_ready.read());
+ 435 wait(clk_i.posedge_event());
+ 436 this->aw_valid.write(
false);
+
+
+
+
+ 441 this->w_valid.write(
false);
+ 442 wait(sc_core::SC_ZERO_TIME);
+
+
+ 445 this->w_last->write(
false);
+ 446 if(pipelined_wrreq) {
+ 447 while(!wdata_fifo.num_avail()) {
+ 448 wait(clk_i.posedge_event());
+
+
+ 451 wait(wdata_fifo.data_written_event());
+
+ 453 auto val = wdata_fifo.front();
+ 454 wdata_fifo.pop_front();
+ 455 write_wdata(*val.gp, val.beat_num);
+ 456 if(pipelined_wrreq && val.needs_end_req) {
+ 457 auto evt = CFG::IS_LITE || (val.last) ? axi::fsm::protocol_time_point_e::EndReqE : axi::fsm::protocol_time_point_e::EndPartReqE;
+ 458 schedule(evt, val.gp, sc_core::SC_ZERO_TIME);
+
+ 460 this->w_valid.write(
true);
+
+ 462 this->w_last->write(val.last);
+
+ 464 wait(this->w_ready.posedge_event() | clk_delayed);
+ 465 if(!pipelined_wrreq && this->w_ready.read()) {
+
+ 467 CFG::IS_LITE || (val.last) ? axi::fsm::protocol_time_point_e::EndReqE : axi::fsm::protocol_time_point_e::EndPartReqE;
+ 468 schedule(evt, val.gp, sc_core::SC_ZERO_TIME);
+
+ 470 }
while(!this->w_ready.read());
+ 471 wait(clk_i.posedge_event());
+ 472 this->w_valid.write(
false);
+
+
+
+
+ 477 this->b_ready.write(
false);
+ 478 wait(sc_core::SC_ZERO_TIME);
+
+ 480 wait(this->b_valid.posedge_event() | clk_delayed);
+ 481 if(this->b_valid.event() || (!active_resp[tlm::TLM_WRITE_COMMAND] && this->b_valid.read())) {
+ 482 auto id = !CFG::IS_LITE ? this->b_id->read().to_uint() : 0U;
+ 483 auto resp = this->b_resp.read();
+ 484 auto& q = wr_resp_by_id[id];
+
+ 486 auto* fsm_hndl = q.front();
+
+ 488 fsm_hndl->
trans->get_extension(e);
+ 489 e->
set_resp(axi::into<axi::resp_e>(resp));
+ 490 react(axi::fsm::protocol_time_point_e::BegRespE, fsm_hndl);
+ 491 wait(w_end_resp_evt);
+ 492 this->b_ready.write(
true);
+ 493 wait(clk_i.posedge_event());
+ 494 this->b_ready.write(
false);
+
+
+
+
+
+
+tlm::tlm_generic_payload * get() const noexcept
Return the stored pointer.
TLM2.0 components modeling AHB.
tlm::tlm_fw_transport_if< TYPES > axi_fw_transport_if
alias declaration for the forward interface
constexpr ULT to_int(E t)
@@ -523,7 +580,7 @@
tlm::scc::tlm_gp_shared_ptr trans
pointer to the associated AXITLM payload
size_t beat_count
beat count of this transaction
AxiProtocolFsm *const fsm
pointer to the FSM
-
+
uint8_t get_size() const
set the AxSIZE value of the transaction
void set_resp(resp_e)
set the response status as POD
diff --git a/develop/axi4__target_8h_source.html b/develop/axi4__target_8h_source.html
index 8efeb70c..146f50f1 100644
--- a/develop/axi4__target_8h_source.html
+++ b/develop/axi4__target_8h_source.html
@@ -24,7 +24,7 @@
scc
- 2022.4.0
+ 2024.03
SystemC components library
|
@@ -264,1086 +264,1079 @@
198 case axi::snoop_e::WRITE_NO_SNOOP:
199 sc_assert(axi_domain == axi::domain_e::NON_SHAREABLE || axi_domain == axi::domain_e::SYSTEM);
- 200 opcode = chi::req_optype_e::WriteNoSnpFull;
- 201 if(gp.get_data_length() < 64)
- 202 opcode = chi::req_optype_e::WriteNoSnpPtl;
-
- 204 case axi::snoop_e::WRITE_UNIQUE:
- 205 sc_assert(axi_domain == axi::domain_e::INNER_SHAREABLE || axi_domain == axi::domain_e::OUTER_SHAREABLE);
- 206 opcode = chi::req_optype_e::WriteUniquePtl;
- 207 chi_req_ext->req.set_snp_attr(cacheable);
-
- 209 case axi::snoop_e::WRITE_LINE_UNIQUE:
- 210 opcode = chi::req_optype_e::WriteUniqueFull;
-
- 212 case axi::snoop_e::WRITE_CLEAN: {
-
- 214 for(
auto i = 0; i < gp.get_byte_enable_length(); ++i) {
- 215 if(gp.get_byte_enable_ptr()[i] == 0) {
-
-
-
-
-
- 221 opcode = chi::req_optype_e::WriteCleanPtl;
-
- 223 opcode = chi::req_optype_e::WriteCleanFull;
-
-
- 226 case axi::snoop_e::WRITE_BACK:
-
- 228 gp.get_data_length() == 64 ? chi::req_optype_e::WriteBackFull : chi::req_optype_e::WriteBackPtl;
+ 200 opcode = gp.get_data_length() == 64 ? chi::req_optype_e::WriteNoSnpFull : chi::req_optype_e::WriteNoSnpPtl;
+
+ 202 case axi::snoop_e::WRITE_UNIQUE:
+ 203 sc_assert(axi_domain == axi::domain_e::INNER_SHAREABLE || axi_domain == axi::domain_e::OUTER_SHAREABLE);
+ 204 opcode = gp.get_data_length() == 64 ? chi::req_optype_e::WriteUniqueFull:chi::req_optype_e::WriteUniquePtl;
+ 205 chi_req_ext->req.set_snp_attr(cacheable);
+
+ 207 case axi::snoop_e::WRITE_LINE_UNIQUE:
+ 208 opcode = chi::req_optype_e::WriteUniqueFull;
+
+ 210 case axi::snoop_e::WRITE_CLEAN: {
+
+ 212 for(
auto i = 0; i < gp.get_byte_enable_length(); ++i) {
+ 213 if(gp.get_byte_enable_ptr()[i] == 0) {
+
+
+
+
+
+ 219 opcode = chi::req_optype_e::WriteCleanPtl;
+
+ 221 opcode = chi::req_optype_e::WriteCleanFull;
+
+
+ 224 case axi::snoop_e::WRITE_BACK:
+ 225 opcode = gp.get_data_length() == 64 ? chi::req_optype_e::WriteBackFull : chi::req_optype_e::WriteBackPtl;
+
+ 227 case axi::snoop_e::EVICT:
+ 228 opcode = chi::req_optype_e::Evict;
- 230 case axi::snoop_e::EVICT:
- 231 opcode = chi::req_optype_e::Evict;
+ 230 case axi::snoop_e::WRITE_EVICT:
+ 231 opcode = chi::req_optype_e::WriteEvictFull;
- 233 case axi::snoop_e::WRITE_EVICT:
- 234 opcode = chi::req_optype_e::WriteEvictFull;
+ 233 case axi::snoop_e::WRITE_UNIQUE_PTL_STASH:
+ 234 opcode = chi::req_optype_e::WriteUniquePtlStash;
- 236 case axi::snoop_e::WRITE_UNIQUE_PTL_STASH:
- 237 opcode = chi::req_optype_e::WriteUniquePtlStash;
+ 236 case axi::snoop_e::WRITE_UNIQUE_FULL_STASH:
+ 237 opcode = chi::req_optype_e::WriteUniqueFullStash;
- 239 case axi::snoop_e::WRITE_UNIQUE_FULL_STASH:
- 240 opcode = chi::req_optype_e::WriteUniqueFullStash;
-
- 242 case axi::snoop_e::STASH_ONCE_UNIQUE:
- 243 opcode = chi::req_optype_e::StashOnceUnique;
- 244 gp.set_command(tlm::TLM_IGNORE_COMMAND);
- 245 gp.set_data_length(0);
- 246 chi_req_ext->req.set_size(6);
-
- 248 case axi::snoop_e::STASH_ONCE_SHARED:
- 249 opcode = chi::req_optype_e::StashOnceShared;
- 250 gp.set_command(tlm::TLM_IGNORE_COMMAND);
- 251 gp.set_data_length(0);
- 252 chi_req_ext->req.set_size(6);
+ 239 case axi::snoop_e::STASH_ONCE_UNIQUE:
+ 240 opcode = chi::req_optype_e::StashOnceUnique;
+ 241 gp.set_command(tlm::TLM_IGNORE_COMMAND);
+ 242 gp.set_data_length(0);
+ 243 chi_req_ext->req.set_size(6);
+
+ 245 case axi::snoop_e::STASH_ONCE_SHARED:
+ 246 opcode = chi::req_optype_e::StashOnceShared;
+ 247 gp.set_command(tlm::TLM_IGNORE_COMMAND);
+ 248 gp.set_data_length(0);
+ 249 chi_req_ext->req.set_size(6);
+
+
+ 252 SCCWARN(name) <<
"unexpected snoop type " <<
axi::to_char(axi_snp) <<
" during write";
-
- 255 SCCWARN(name) <<
"unexpected snoop type " <<
axi::to_char(axi_snp) <<
" during write";
-
-
- 258 chi_req_ext->req.set_opcode(opcode);
-
- 260 if(axi_snp != axi::snoop_e::WRITE_NO_SNOOP) {
- 261 chi_req_ext->req.set_snp_attr(cacheable);
-
- 263 if(opcode == chi::req_optype_e::WriteUniquePtlStash || opcode == chi::req_optype_e::WriteUniqueFullStash ||
- 264 opcode == chi::req_optype_e::StashOnceUnique || opcode == chi::req_optype_e::StashOnceShared) {
- 265 if(ace_ext->is_stash_nid_en()) {
- 266 chi_req_ext->req.set_stash_n_id(ace_ext->get_stash_nid());
- 267 chi_req_ext->req.set_stash_n_id_valid(
true);
-
- 269 if(ace_ext->is_stash_lpid_en()) {
- 270 chi_req_ext->req.set_stash_lp_id(ace_ext->get_stash_lpid());
- 271 chi_req_ext->req.set_stash_lp_id_valid(
true);
-
-
-
-
- 276 SCCERR(name) <<
"Not yet implemented !!! ";
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
- 310 switch(ace_ext->get_cache()) {
-
-
+
+ 255 chi_req_ext->req.set_opcode(opcode);
+
+ 257 if(axi_snp != axi::snoop_e::WRITE_NO_SNOOP) {
+ 258 chi_req_ext->req.set_snp_attr(cacheable);
+
+ 260 if(opcode == chi::req_optype_e::WriteUniquePtlStash || opcode == chi::req_optype_e::WriteUniqueFullStash ||
+ 261 opcode == chi::req_optype_e::StashOnceUnique || opcode == chi::req_optype_e::StashOnceShared) {
+ 262 if(ace_ext->is_stash_nid_en()) {
+ 263 chi_req_ext->req.set_stash_n_id(ace_ext->get_stash_nid());
+ 264 chi_req_ext->req.set_stash_n_id_valid(
true);
+
+ 266 if(ace_ext->is_stash_lpid_en()) {
+ 267 chi_req_ext->req.set_stash_lp_id(ace_ext->get_stash_lpid());
+ 268 chi_req_ext->req.set_stash_lp_id_valid(
true);
+
+
+
+
+ 273 SCCERR(name) <<
"Not yet implemented !!! ";
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ 307 switch(ace_ext->get_cache()) {
+
+
+
+
+
-
-
+
+
-
-
+
+
-
-
-
-
-
- 325 mem_attr = gp.is_read() ? 0b1101 : 0b0101;
-
-
-
- 329 mem_attr = gp.is_read() ? 0b0101 : 0b1101;
-
-
-
-
+
+
+ 322 mem_attr = gp.is_read() ? 0b1101 : 0b0101;
+
+
+
+ 326 mem_attr = gp.is_read() ? 0b0101 : 0b1101;
+
+
+
+
+
+
+ 333 SCCERR(name) <<
"Unexpected AxCACHE type";
-
- 336 SCCERR(name) <<
"Unexpected AxCACHE type";
-
-
-
- 340 auto allocate = (ace_ext->is_read_other_allocate() && axi_gp_cmd == tlm::TLM_WRITE_COMMAND) ||
- 341 (ace_ext->is_write_other_allocate() && axi_gp_cmd == tlm::TLM_READ_COMMAND);
- 342 auto cachable = ace_ext->is_modifiable();
- 343 auto ewa = ace_ext->is_bufferable();
- 344 auto device = ace_ext->get_cache() < 2;
- 345 mem_attr = (allocate ? 8 : 0) + (cachable ? 4 : 0) + (device ? 2 : 0) + (ewa ? 1 : 0);
-
-
-
-
- 350 case chi::req_optype_e::ReadNoSnp:
- 351 case chi::req_optype_e::ReadNoSnpSep:
- 352 case chi::req_optype_e::ReadOnce:
- 353 case chi::req_optype_e::ReadOnceCleanInvalid:
- 354 case chi::req_optype_e::ReadOnceMakeInvalid:
- 355 case chi::req_optype_e::WriteNoSnpPtl:
- 356 case chi::req_optype_e::WriteNoSnpFull:
- 357 case chi::req_optype_e::WriteUniquePtl:
- 358 case chi::req_optype_e::WriteUniqueFull:
- 359 case chi::req_optype_e::AtomicStoreAdd:
- 360 case chi::req_optype_e::AtomicStoreClr:
- 361 case chi::req_optype_e::AtomicStoreEor:
- 362 case chi::req_optype_e::AtomicStoreSet:
- 363 case chi::req_optype_e::AtomicStoreSmax:
- 364 case chi::req_optype_e::AtomicStoreSmin:
- 365 case chi::req_optype_e::AtomicStoreUmax:
- 366 case chi::req_optype_e::AtomicStoreUmin:
- 367 case chi::req_optype_e::AtomicLoadAdd:
- 368 case chi::req_optype_e::AtomicLoadClr:
- 369 case chi::req_optype_e::AtomicLoadEor:
- 370 case chi::req_optype_e::AtomicLoadSet:
- 371 case chi::req_optype_e::AtomicLoadSmax:
- 372 case chi::req_optype_e::AtomicLoadSmin:
- 373 case chi::req_optype_e::AtomicLoadUmax:
- 374 case chi::req_optype_e::AtomicLoadUmin:
- 375 case chi::req_optype_e::AtomicSwap:
- 376 case chi::req_optype_e::AtomicCompare:
- 377 chi_req_ext->req.set_order(0b00);
-
-
-
-
-
-
- 384 chi_req_ext->req.set_mem_attr(mem_attr);
+
+
+ 337 auto allocate = (ace_ext->is_read_other_allocate() && axi_gp_cmd == tlm::TLM_WRITE_COMMAND) ||
+ 338 (ace_ext->is_write_other_allocate() && axi_gp_cmd == tlm::TLM_READ_COMMAND);
+ 339 auto cachable = ace_ext->is_modifiable();
+ 340 auto ewa = ace_ext->is_bufferable();
+ 341 auto device = ace_ext->get_cache() < 2;
+ 342 mem_attr = (allocate ? 8 : 0) + (cachable ? 4 : 0) + (device ? 2 : 0) + (ewa ? 1 : 0);
+
+
+
+
+ 347 case chi::req_optype_e::ReadNoSnp:
+ 348 case chi::req_optype_e::ReadNoSnpSep:
+ 349 case chi::req_optype_e::ReadOnce:
+ 350 case chi::req_optype_e::ReadOnceCleanInvalid:
+ 351 case chi::req_optype_e::ReadOnceMakeInvalid:
+ 352 case chi::req_optype_e::WriteNoSnpPtl:
+ 353 case chi::req_optype_e::WriteNoSnpFull:
+ 354 case chi::req_optype_e::WriteUniquePtl:
+ 355 case chi::req_optype_e::WriteUniqueFull:
+ 356 case chi::req_optype_e::AtomicStoreAdd:
+ 357 case chi::req_optype_e::AtomicStoreClr:
+ 358 case chi::req_optype_e::AtomicStoreEor:
+ 359 case chi::req_optype_e::AtomicStoreSet:
+ 360 case chi::req_optype_e::AtomicStoreSmax:
+ 361 case chi::req_optype_e::AtomicStoreSmin:
+ 362 case chi::req_optype_e::AtomicStoreUmax:
+ 363 case chi::req_optype_e::AtomicStoreUmin:
+ 364 case chi::req_optype_e::AtomicLoadAdd:
+ 365 case chi::req_optype_e::AtomicLoadClr:
+ 366 case chi::req_optype_e::AtomicLoadEor:
+ 367 case chi::req_optype_e::AtomicLoadSet:
+ 368 case chi::req_optype_e::AtomicLoadSmax:
+ 369 case chi::req_optype_e::AtomicLoadSmin:
+ 370 case chi::req_optype_e::AtomicLoadUmax:
+ 371 case chi::req_optype_e::AtomicLoadUmin:
+ 372 case chi::req_optype_e::AtomicSwap:
+ 373 case chi::req_optype_e::AtomicCompare:
+ 374 chi_req_ext->req.set_order(0b00);
+
+
+
+
+
+
+ 381 chi_req_ext->req.set_mem_attr(mem_attr);
+
+ 383 if(!chi::is_valid(chi_req_ext))
+ 384 SCCFATAL(__FUNCTION__) <<
"Conversion created an invalid chi request, pls. check the AXI/ACE settings";
- 386 if(!chi::is_valid(chi_req_ext))
- 387 SCCFATAL(__FUNCTION__) <<
"Conversion created an invalid chi request, pls. check the AXI/ACE settings";
-
-
- 390 gp.set_auto_extension(chi_req_ext);
-
- 392 gp.set_extension(chi_req_ext);
-
-
-
-
-
- 398 gp.set_extension(ace_ext);
- 399 gp.set_extension(axi4_ext);
-
-
-
- 403 switch(req_e->req.get_opcode()) {
-
- 405 case chi::req_optype_e::ReadNoSnpSep:
-
- 407 case chi::req_optype_e::Evict:
- 408 case chi::req_optype_e::StashOnceUnique:
- 409 case chi::req_optype_e::StashOnceShared:
- 410 case chi::req_optype_e::CleanShared:
- 411 case chi::req_optype_e::CleanSharedPersist:
- 412 case chi::req_optype_e::CleanSharedPersistSep:
- 413 case chi::req_optype_e::CleanInvalid:
- 414 case chi::req_optype_e::MakeInvalid:
-
- 416 case chi::req_optype_e::WriteNoSnpZero:
- 417 case chi::req_optype_e::WriteNoSnpFull:
- 418 case chi::req_optype_e::WriteNoSnpPtl:
- 419 case chi::req_optype_e::WriteUniqueZero:
- 420 case chi::req_optype_e::WriteUniquePtl:
- 421 case chi::req_optype_e::WriteUniqueFull:
- 422 case chi::req_optype_e::WriteUniqueFullStash:
- 423 case chi::req_optype_e::WriteBackFull:
- 424 case chi::req_optype_e::WriteBackPtl:
- 425 case chi::req_optype_e::WriteCleanFull:
- 426 case chi::req_optype_e::WriteCleanPtl:
-
- 428 case chi::req_optype_e::AtomicStoreAdd:
- 429 case chi::req_optype_e::AtomicStoreClr:
- 430 case chi::req_optype_e::AtomicStoreEor:
- 431 case chi::req_optype_e::AtomicStoreSet:
- 432 case chi::req_optype_e::AtomicStoreSmax:
- 433 case chi::req_optype_e::AtomicStoreSmin:
- 434 case chi::req_optype_e::AtomicStoreUmax:
- 435 case chi::req_optype_e::AtomicStoreUmin:
- 436 case chi::req_optype_e::AtomicLoadAdd:
- 437 case chi::req_optype_e::AtomicLoadClr:
- 438 case chi::req_optype_e::AtomicLoadEor:
- 439 case chi::req_optype_e::AtomicLoadSet:
- 440 case chi::req_optype_e::AtomicLoadSmax:
- 441 case chi::req_optype_e::AtomicLoadSmin:
- 442 case chi::req_optype_e::AtomicLoadUmax:
- 443 case chi::req_optype_e::AtomicLoadUmin:
- 444 case chi::req_optype_e::AtomicCompare:
- 445 case chi::req_optype_e::AtomicSwap:
-
-
-
- 449 req_e->req.set_exp_comp_ack(
false);
-
-
-
- 453 case chi::req_optype_e::ReadNoSnp:
- 454 case chi::req_optype_e::ReadOnce:
- 455 case chi::req_optype_e::CleanUnique:
- 456 case chi::req_optype_e::MakeUnique:
+
+ 387 gp.set_auto_extension(chi_req_ext);
+
+ 389 gp.set_extension(chi_req_ext);
+
+
+
+
+
+ 395 gp.set_extension(ace_ext);
+ 396 gp.set_extension(axi4_ext);
+
+
+
+ 400 switch(req_e->req.get_opcode()) {
+
+ 402 case chi::req_optype_e::ReadNoSnpSep:
+
+ 404 case chi::req_optype_e::Evict:
+ 405 case chi::req_optype_e::StashOnceUnique:
+ 406 case chi::req_optype_e::StashOnceShared:
+ 407 case chi::req_optype_e::CleanShared:
+ 408 case chi::req_optype_e::CleanSharedPersist:
+ 409 case chi::req_optype_e::CleanSharedPersistSep:
+ 410 case chi::req_optype_e::CleanInvalid:
+ 411 case chi::req_optype_e::MakeInvalid:
+
+ 413 case chi::req_optype_e::WriteNoSnpZero:
+ 414 case chi::req_optype_e::WriteNoSnpFull:
+ 415 case chi::req_optype_e::WriteNoSnpPtl:
+ 416 case chi::req_optype_e::WriteUniqueZero:
+ 417 case chi::req_optype_e::WriteUniquePtl:
+ 418 case chi::req_optype_e::WriteUniqueFull:
+ 419 case chi::req_optype_e::WriteUniqueFullStash:
+ 420 case chi::req_optype_e::WriteBackFull:
+ 421 case chi::req_optype_e::WriteBackPtl:
+ 422 case chi::req_optype_e::WriteCleanFull:
+ 423 case chi::req_optype_e::WriteCleanPtl:
+
+ 425 case chi::req_optype_e::AtomicStoreAdd:
+ 426 case chi::req_optype_e::AtomicStoreClr:
+ 427 case chi::req_optype_e::AtomicStoreEor:
+ 428 case chi::req_optype_e::AtomicStoreSet:
+ 429 case chi::req_optype_e::AtomicStoreSmax:
+ 430 case chi::req_optype_e::AtomicStoreSmin:
+ 431 case chi::req_optype_e::AtomicStoreUmax:
+ 432 case chi::req_optype_e::AtomicStoreUmin:
+ 433 case chi::req_optype_e::AtomicLoadAdd:
+ 434 case chi::req_optype_e::AtomicLoadClr:
+ 435 case chi::req_optype_e::AtomicLoadEor:
+ 436 case chi::req_optype_e::AtomicLoadSet:
+ 437 case chi::req_optype_e::AtomicLoadSmax:
+ 438 case chi::req_optype_e::AtomicLoadSmin:
+ 439 case chi::req_optype_e::AtomicLoadUmax:
+ 440 case chi::req_optype_e::AtomicLoadUmin:
+ 441 case chi::req_optype_e::AtomicCompare:
+ 442 case chi::req_optype_e::AtomicSwap:
+
+
+
+ 446 req_e->req.set_exp_comp_ack(
false);
+
+
+
+ 450 case chi::req_optype_e::ReadNoSnp:
+ 451 case chi::req_optype_e::ReadOnce:
+ 452 case chi::req_optype_e::CleanUnique:
+ 453 case chi::req_optype_e::MakeUnique:
+ 454 req_e->req.set_exp_comp_ack(
true);
+
+
457 req_e->req.set_exp_comp_ack(
true);
-
- 460 req_e->req.set_exp_comp_ack(
true);
-
-
-
-
- 465 if((req_e->req.get_opcode() == chi::req_optype_e::ReadNoSnp ||
- 466 req_e->req.get_opcode() == chi::req_optype_e::ReadOnce) &&
- 467 (req_e->req.get_order() == 0b10 || req_e->req.get_order() == 0b11)) {
- 468 req_e->req.set_exp_comp_ack(
true);
-
-
-
- 472 if((req_e->req.get_opcode() >= chi::req_optype_e::WriteEvictFull &&
- 473 req_e->req.get_opcode() <= chi::req_optype_e::WriteUniquePtlStash) &&
- 474 (req_e->req.get_order() == 0b10 || req_e->req.get_order() == 0b11)) {
- 475 req_e->req.set_exp_comp_ack(
true);
-
-
-
- 479 bool make_rsp_from_req(tlm::tlm_generic_payload& gp, chi::rsp_optype_e rsp_opcode) {
-
- 481 if(rsp_opcode == chi::rsp_optype_e::CompAck) {
- 482 if(is_dataless(ctrl_e) || gp.is_write()) {
- 483 ctrl_e->resp.set_tgt_id(ctrl_e->req.get_tgt_id());
- 484 ctrl_e->resp.set_trace_tag(ctrl_e->req.is_trace_tag());
- 485 if(ctrl_e->req.get_opcode() == chi::req_optype_e::MakeReadUnique) {
- 486 ctrl_e->set_txn_id(ctrl_e->resp.get_db_id());
-
-
-
- 490 ctrl_e->req.set_tgt_id(dat_e->dat.get_home_n_id());
- 491 ctrl_e->set_src_id(dat_e->get_src_id());
- 492 ctrl_e->set_qos(dat_e->get_qos());
- 493 ctrl_e->set_txn_id(dat_e->dat.get_db_id());
- 494 ctrl_e->resp.set_tgt_id(dat_e->dat.get_tgt_id());
- 495 ctrl_e->resp.set_trace_tag(dat_e->dat.is_trace_tag());
-
+
+
+
+ 462 if((req_e->req.get_opcode() == chi::req_optype_e::ReadNoSnp ||
+ 463 req_e->req.get_opcode() == chi::req_optype_e::ReadOnce) &&
+ 464 (req_e->req.get_order() == 0b10 || req_e->req.get_order() == 0b11)) {
+ 465 req_e->req.set_exp_comp_ack(
true);
+
+
+
+ 469 if((req_e->req.get_opcode() >= chi::req_optype_e::WriteEvictFull &&
+ 470 req_e->req.get_opcode() <= chi::req_optype_e::WriteUniquePtlStash) &&
+ 471 (req_e->req.get_order() == 0b10 || req_e->req.get_order() == 0b11)) {
+ 472 req_e->req.set_exp_comp_ack(
true);
+
+
+
+ 476 bool make_rsp_from_req(tlm::tlm_generic_payload& gp, chi::rsp_optype_e rsp_opcode) {
+
+ 478 if(rsp_opcode == chi::rsp_optype_e::CompAck) {
+ 479 if(is_dataless(ctrl_e) || gp.is_write()) {
+ 480 ctrl_e->resp.set_tgt_id(ctrl_e->req.get_tgt_id());
+ 481 ctrl_e->resp.set_trace_tag(ctrl_e->req.is_trace_tag());
+ 482 if(ctrl_e->req.get_opcode() == chi::req_optype_e::MakeReadUnique) {
+ 483 ctrl_e->set_txn_id(ctrl_e->resp.get_db_id());
+
+
+
+ 487 ctrl_e->req.set_tgt_id(dat_e->dat.get_home_n_id());
+ 488 ctrl_e->set_src_id(dat_e->get_src_id());
+ 489 ctrl_e->set_qos(dat_e->get_qos());
+ 490 ctrl_e->set_txn_id(dat_e->dat.get_db_id());
+ 491 ctrl_e->resp.set_tgt_id(dat_e->dat.get_tgt_id());
+ 492 ctrl_e->resp.set_trace_tag(dat_e->dat.is_trace_tag());
+
+ 494 ctrl_e->resp.set_opcode(rsp_opcode);
+
+
497 ctrl_e->resp.set_opcode(rsp_opcode);
-
-
- 500 ctrl_e->resp.set_opcode(rsp_opcode);
-
- 502 snp_e->resp.set_opcode(rsp_opcode);
- 503 if(rsp_opcode == chi::rsp_optype_e::CompAck) {
-
- 505 snp_e->set_src_id(dat_e->get_src_id());
- 506 snp_e->set_qos(dat_e->get_qos());
- 507 snp_e->set_txn_id(dat_e->dat.get_db_id());
- 508 snp_e->resp.set_tgt_id(dat_e->dat.get_tgt_id());
- 509 snp_e->resp.set_trace_tag(dat_e->dat.is_trace_tag());
-
-
-
-
-
-
-
-
-
-
-
- 521 chi::pe::chi_rn_initiator_b::chi_rn_initiator_b(sc_core::sc_module_name nm,
-
- 523 size_t transfer_width)
-
-
- 526 , transfer_width_in_bytes(transfer_width / 8) {
- 527 add_attribute(tgt_id);
- 528 add_attribute(src_id);
- 529 add_attribute(data_interleaving);
- 530 add_attribute(strict_income_order);
- 531 add_attribute(use_legacy_mapping);
-
-
- 534 SC_METHOD(clk_counter);
- 535 sensitive << clk_i.pos();
-
- 537 SC_THREAD(snoop_dispatch);
-
-
- 540 chi::pe::chi_rn_initiator_b::~chi_rn_initiator_b() {
- 541 if(tx_state_by_trans.size()) {
- 542 for(
auto& e : tx_state_by_trans)
- 543 SCCDEBUG(SCMOD) <<
"unfinished transaction with ptr: "<< e.first <<
" with access address = 0x" << std::hex << ((tlm::tlm_generic_payload*)e.first)->get_address() ;
- 544 SCCWARN(SCMOD) <<
"is still waiting for unfinished transactions with number = " << tx_state_by_trans.size() ;
-
-
- 547 for(
auto& e : tx_state_by_trans)
-
- 549 for(
auto p: tx_state_pool)
-
+
+ 499 snp_e->resp.set_opcode(rsp_opcode);
+ 500 if(rsp_opcode == chi::rsp_optype_e::CompAck) {
+
+ 502 snp_e->set_src_id(dat_e->get_src_id());
+ 503 snp_e->set_qos(dat_e->get_qos());
+ 504 snp_e->set_txn_id(dat_e->dat.get_db_id());
+ 505 snp_e->resp.set_tgt_id(dat_e->dat.get_tgt_id());
+ 506 snp_e->resp.set_trace_tag(dat_e->dat.is_trace_tag());
+
+
+
+
+
+
+
+
+
+
+
+ 518 chi::pe::chi_rn_initiator_b::chi_rn_initiator_b(sc_core::sc_module_name nm,
+
+ 520 size_t transfer_width)
+
+
+ 523 , transfer_width_in_bytes(transfer_width / 8) {
+
+
+ 526 SC_METHOD(clk_counter);
+ 527 sensitive << clk_i.pos();
+
+ 529 SC_THREAD(snoop_dispatch);
+
+
+ 532 chi::pe::chi_rn_initiator_b::~chi_rn_initiator_b() {
+ 533 if(tx_state_by_trans.size()) {
+ 534 for(
auto& e : tx_state_by_trans)
+ 535 SCCDEBUG(SCMOD) <<
"unfinished transaction with ptr: "<< e.first <<
" with access address = 0x" << std::hex << ((tlm::tlm_generic_payload*)e.first)->get_address() ;
+ 536 SCCWARN(SCMOD) <<
"is still waiting for unfinished transactions with number = " << tx_state_by_trans.size() ;
+
+
+ 539 for(
auto& e : tx_state_by_trans)
+
+ 541 for(
auto p: tx_state_pool)
+
+
+
+ 545 void chi::pe::chi_rn_initiator_b::b_snoop(payload_type& trans, sc_core::sc_time& t) {
+ 546 if(bw_o.get_interface()) {
+ 547 auto latency = bw_o->transport(trans);
+ 548 if(latency < std::numeric_limits<unsigned>::max())
+ 549 t += latency * (clk_if ? clk_if->period() : clk_period);
+
- 553 void chi::pe::chi_rn_initiator_b::b_snoop(payload_type& trans, sc_core::sc_time& t) {
- 554 if(bw_o.get_interface()) {
- 555 auto latency = bw_o->transport(trans);
- 556 if(latency < std::numeric_limits<unsigned>::max())
- 557 t += latency * (clk_if ? clk_if->period() : clk_period);
-
-
-
-
-
- 563 sc_assert(req_ext !=
nullptr);
- 564 auto it = tx_state_by_trans.find(to_id(trans));
- 565 sc_assert(it != tx_state_by_trans.end());
- 566 auto* txs = it->second;
- 567 handle_snoop_response(trans, txs);
-
-
- 570 tlm::tlm_sync_enum chi::pe::chi_rn_initiator_b::nb_transport_bw(payload_type& trans, phase_type& phase,
- 571 sc_core::sc_time& t) {
-
- 573 if(phase == tlm::BEGIN_REQ) {
-
-
- 576 snp_peq.notify(trans, t);
-
- 578 auto it = tx_state_by_trans.find(to_id(trans));
- 579 sc_assert(it != tx_state_by_trans.end());
- 580 it->second->peq.notify(std::make_tuple(&trans, phase), t);
-
-
- 583 if(phase == tlm::BEGIN_REQ) {
-
- 585 if(credit_ext->type == credit_type_e::REQ) {
- 586 SCCTRACEALL(SCMOD) <<
"Received " << credit_ext->count <<
" req "
- 587 << (credit_ext->count == 1 ?
"credit" :
"credits");
- 588 for(
auto i = 0U; i < credit_ext->count; ++i)
-
-
- 591 phase = tlm::END_RESP;
- 592 trans.set_response_status(tlm::TLM_OK_RESPONSE);
-
- 594 t += clk_if->period() - 1_ps;
- 595 return tlm::TLM_COMPLETED;
-
- 597 SCCFATAL(SCMOD) <<
"Illegal transaction received from HN";
-
-
- 600 auto it = tx_state_by_trans.find(to_id(trans));
- 601 sc_assert(it != tx_state_by_trans.end());
- 602 it->second->peq.notify(std::make_tuple(&trans, phase), t);
-
-
- 605 return tlm::TLM_ACCEPTED;
-
-
- 608 void chi::pe::chi_rn_initiator_b::invalidate_direct_mem_ptr(sc_dt::uint64 start_range, sc_dt::uint64 end_range) {}
-
- 610 void chi::pe::chi_rn_initiator_b::update_data_extension(
chi::chi_data_extension* data_ext, payload_type& trans) {
-
- 612 sc_assert(req_e !=
nullptr);
- 613 switch(req_e->req.get_opcode()) {
- 614 case chi::req_optype_e::WriteNoSnpPtl:
- 615 case chi::req_optype_e::WriteNoSnpFull:
- 616 case chi::req_optype_e::WriteUniquePtl:
- 617 case chi::req_optype_e::WriteUniqueFull:
- 618 case chi::req_optype_e::WriteUniquePtlStash:
- 619 case chi::req_optype_e::WriteUniqueFullStash:
-
- 621 case chi::req_optype_e::WriteNoSnpFullCleanSh:
- 622 case chi::req_optype_e::WriteNoSnpFullCleanInv:
- 623 case chi::req_optype_e::WriteNoSnpFullCleanShPerSep:
- 624 case chi::req_optype_e::WriteUniqueFullCleanSh:
- 625 case chi::req_optype_e::WriteUniqueFullCleanShPerSep:
- 626 case chi::req_optype_e::WriteBackFullCleanShPerSep:
- 627 case chi::req_optype_e::WriteNoSnpPtlCleanSh:
- 628 case chi::req_optype_e::WriteNoSnpPtlCleanInv:
- 629 case chi::req_optype_e::WriteNoSnpPtlCleanShPerSep:
- 630 case chi::req_optype_e::WriteUniquePtlCleanSh:
- 631 case chi::req_optype_e::WriteUniquePtlCleanShPerSep:
- 632 data_ext->dat.set_opcode(chi::dat_optype_e::NonCopyBackWrData);
-
-
- 635 case chi::req_optype_e::WriteBackFull:
- 636 case chi::req_optype_e::WriteBackPtl:
- 637 case chi::req_optype_e::WriteCleanFull:
- 638 case chi::req_optype_e::WriteCleanPtl:
-
- 640 case chi::req_optype_e::WriteBackFullCleanSh:
- 641 case chi::req_optype_e::WriteBackFullCleanInv:
- 642 case chi::req_optype_e::WriteCleanFullCleanSh:
- 643 case chi::req_optype_e::WriteCleanFullCleanShPerSep:
- 644 case chi::req_optype_e::WriteEvictFull:
- 645 data_ext->dat.set_opcode(chi::dat_optype_e::CopyBackWrData);
-
-
- 648 case chi::req_optype_e::AtomicStoreAdd:
- 649 case chi::req_optype_e::AtomicStoreClr:
- 650 case chi::req_optype_e::AtomicStoreEor:
- 651 case chi::req_optype_e::AtomicStoreSet:
- 652 case chi::req_optype_e::AtomicStoreSmax:
- 653 case chi::req_optype_e::AtomicStoreSmin:
- 654 case chi::req_optype_e::AtomicStoreUmax:
- 655 case chi::req_optype_e::AtomicStoreUmin:
- 656 data_ext->dat.set_opcode(chi::dat_optype_e::NonCopyBackWrData);
-
- 658 case chi::req_optype_e::AtomicLoadAdd:
- 659 case chi::req_optype_e::AtomicLoadClr:
- 660 case chi::req_optype_e::AtomicLoadEor:
- 661 case chi::req_optype_e::AtomicLoadSet:
- 662 case chi::req_optype_e::AtomicLoadSmax:
- 663 case chi::req_optype_e::AtomicLoadSmin:
- 664 case chi::req_optype_e::AtomicLoadUmax:
- 665 case chi::req_optype_e::AtomicLoadUmin:
- 666 case chi::req_optype_e::AtomicSwap:
- 667 case chi::req_optype_e::AtomicCompare:
- 668 data_ext->dat.set_opcode(chi::dat_optype_e::NonCopyBackWrData);
-
-
- 671 SCCWARN(SCMOD) <<
" Unable to match req_opcode with data_opcode in write transaction ";
-
- 673 if(data_ext->dat.get_opcode() == chi::dat_optype_e::NonCopyBackWrData) {
- 674 data_ext->dat.set_resp(chi::dat_resptype_e::NonCopyBackWrData);
- 675 }
else if(data_ext->dat.get_opcode() == chi::dat_optype_e::NCBWrDataCompAck) {
- 676 data_ext->dat.set_resp(chi::dat_resptype_e::NCBWrDataCompAck);
- 677 }
else if(data_ext->dat.get_opcode() == chi::dat_optype_e::CopyBackWrData) {
- 678 auto cache_ext = trans.get_extension<::cache::cache_info>();
- 679 sc_assert(cache_ext !=
nullptr);
- 680 auto cache_state = cache_ext->get_state();
- 681 if(cache_state == ::cache::state::IX) {
- 682 data_ext->dat.set_resp(chi::dat_resptype_e::CopyBackWrData_I);
- 683 }
else if(cache_state == ::cache::state::UC) {
- 684 data_ext->dat.set_resp(chi::dat_resptype_e::CopyBackWrData_UC);
- 685 }
else if(cache_state == ::cache::state::SC) {
- 686 data_ext->dat.set_resp(chi::dat_resptype_e::CopyBackWrData_SC);
- 687 }
else if(cache_state == ::cache::state::UD) {
- 688 data_ext->dat.set_resp(chi::dat_resptype_e::CopyBackWrData_UD_PD);
- 689 }
else if(cache_state == ::cache::state::SD) {
- 690 data_ext->dat.set_resp(chi::dat_resptype_e::CopyBackWrData_SD_PD);
-
- 692 SCCWARN(SCMOD) <<
" Unable to match cache state with resptype ";
-
- 694 SCCWARN(SCMOD) <<
"Unable to match resptype with WriteData Responses";
-
-
- 697 auto db_id = req_e->resp.get_db_id();
- 698 data_ext->set_txn_id(db_id);
- 699 data_ext->set_src_id(req_e->resp.get_tgt_id());
- 700 data_ext->dat.set_tgt_id(req_e->get_src_id());
-
-
- 703 void chi::pe::chi_rn_initiator_b::create_data_ext(payload_type& trans) {
-
- 705 update_data_extension(data_ext, trans);
-
-
-
- 709 void chi::pe::chi_rn_initiator_b::send_packet(tlm::tlm_phase phase, payload_type& trans,
-
- 711 sc_core::sc_time delay = sc_core::SC_ZERO_TIME;
- 712 tlm::tlm_sync_enum ret = socket_fw->nb_transport_fw(trans, phase, delay);
- 713 if(ret == tlm::TLM_UPDATED) {
- 714 if(phase == chi::END_PARTIAL_DATA || phase == chi::END_DATA) {
-
-
-
-
- 719 auto entry = txs->peq.
get();
- 720 sc_assert(std::get<0>(entry) == &trans &&
- 721 (std::get<1>(entry) == chi::END_PARTIAL_DATA || std::get<1>(entry) == chi::END_DATA));
-
- 723 auto timing_e = trans.get_extension<atp::timing_params>();
- 724 auto delay_in_cycles = (timing_e && timing_e->wbv) ? timing_e->wbv : 1;
- 725 while(delay_in_cycles) {
-
- 727 wait(clk_i.posedge_event());
-
-
-
-
- 732 sc_core::sc_time delay;
- 733 tlm::tlm_phase phase;
-
- 735 if(data_ext ==
nullptr) {
- 736 create_data_ext(trans);
-
-
-
- 740 auto beat_cnt = calculate_beats(trans);
- 741 SCCDEBUG(SCMOD) <<
"Starting transaction on channel WDAT : (opcode, cmd, addr, len) = ("
- 742 <<
to_char(data_ext->dat.get_opcode()) <<
", " << trans.get_command() <<
", " << std::hex
- 743 << trans.get_address() <<
", " << trans.get_data_length() <<
")";
+
+
+ 555 sc_assert(req_ext !=
nullptr);
+ 556 auto it = tx_state_by_trans.find(to_id(trans));
+ 557 sc_assert(it != tx_state_by_trans.end());
+ 558 auto* txs = it->second;
+ 559 handle_snoop_response(trans, txs);
+
+
+ 562 tlm::tlm_sync_enum chi::pe::chi_rn_initiator_b::nb_transport_bw(payload_type& trans, phase_type& phase,
+ 563 sc_core::sc_time& t) {
+
+ 565 if(phase == tlm::BEGIN_REQ) {
+
+
+ 568 snp_peq.notify(trans, t);
+
+ 570 auto it = tx_state_by_trans.find(to_id(trans));
+ 571 sc_assert(it != tx_state_by_trans.end());
+ 572 it->second->peq.notify(std::make_tuple(&trans, phase), t);
+
+
+ 575 if(phase == tlm::BEGIN_REQ) {
+
+ 577 if(credit_ext->type == credit_type_e::REQ) {
+ 578 SCCTRACEALL(SCMOD) <<
"Received " << credit_ext->count <<
" req "
+ 579 << (credit_ext->count == 1 ?
"credit" :
"credits");
+ 580 for(
auto i = 0U; i < credit_ext->count; ++i)
+
+
+ 583 phase = tlm::END_RESP;
+ 584 trans.set_response_status(tlm::TLM_OK_RESPONSE);
+
+ 586 t += clk_if->period() - 1_ps;
+ 587 return tlm::TLM_COMPLETED;
+
+ 589 SCCFATAL(SCMOD) <<
"Illegal transaction received from HN";
+
+
+ 592 auto it = tx_state_by_trans.find(to_id(trans));
+ 593 sc_assert(it != tx_state_by_trans.end());
+ 594 it->second->peq.notify(std::make_tuple(&trans, phase), t);
+
+
+ 597 return tlm::TLM_ACCEPTED;
+
+
+ 600 void chi::pe::chi_rn_initiator_b::invalidate_direct_mem_ptr(sc_dt::uint64 start_range, sc_dt::uint64 end_range) {}
+
+ 602 void chi::pe::chi_rn_initiator_b::update_data_extension(
chi::chi_data_extension* data_ext, payload_type& trans) {
+
+ 604 sc_assert(req_e !=
nullptr);
+ 605 switch(req_e->req.get_opcode()) {
+ 606 case chi::req_optype_e::WriteNoSnpPtl:
+ 607 case chi::req_optype_e::WriteNoSnpFull:
+ 608 case chi::req_optype_e::WriteUniquePtl:
+ 609 case chi::req_optype_e::WriteUniqueFull:
+ 610 case chi::req_optype_e::WriteUniquePtlStash:
+ 611 case chi::req_optype_e::WriteUniqueFullStash:
+
+ 613 case chi::req_optype_e::WriteNoSnpFullCleanSh:
+ 614 case chi::req_optype_e::WriteNoSnpFullCleanInv:
+ 615 case chi::req_optype_e::WriteNoSnpFullCleanShPerSep:
+ 616 case chi::req_optype_e::WriteUniqueFullCleanSh:
+ 617 case chi::req_optype_e::WriteUniqueFullCleanShPerSep:
+ 618 case chi::req_optype_e::WriteBackFullCleanShPerSep:
+ 619 case chi::req_optype_e::WriteNoSnpPtlCleanSh:
+ 620 case chi::req_optype_e::WriteNoSnpPtlCleanInv:
+ 621 case chi::req_optype_e::WriteNoSnpPtlCleanShPerSep:
+ 622 case chi::req_optype_e::WriteUniquePtlCleanSh:
+ 623 case chi::req_optype_e::WriteUniquePtlCleanShPerSep:
+ 624 data_ext->dat.set_opcode(chi::dat_optype_e::NonCopyBackWrData);
+
+
+ 627 case chi::req_optype_e::WriteBackFull:
+ 628 case chi::req_optype_e::WriteBackPtl:
+ 629 case chi::req_optype_e::WriteCleanFull:
+ 630 case chi::req_optype_e::WriteCleanPtl:
+
+ 632 case chi::req_optype_e::WriteBackFullCleanSh:
+ 633 case chi::req_optype_e::WriteBackFullCleanInv:
+ 634 case chi::req_optype_e::WriteCleanFullCleanSh:
+ 635 case chi::req_optype_e::WriteCleanFullCleanShPerSep:
+ 636 case chi::req_optype_e::WriteEvictFull:
+ 637 data_ext->dat.set_opcode(chi::dat_optype_e::CopyBackWrData);
+
+
+ 640 case chi::req_optype_e::AtomicStoreAdd:
+ 641 case chi::req_optype_e::AtomicStoreClr:
+ 642 case chi::req_optype_e::AtomicStoreEor:
+ 643 case chi::req_optype_e::AtomicStoreSet:
+ 644 case chi::req_optype_e::AtomicStoreSmax:
+ 645 case chi::req_optype_e::AtomicStoreSmin:
+ 646 case chi::req_optype_e::AtomicStoreUmax:
+ 647 case chi::req_optype_e::AtomicStoreUmin:
+ 648 data_ext->dat.set_opcode(chi::dat_optype_e::NonCopyBackWrData);
+
+ 650 case chi::req_optype_e::AtomicLoadAdd:
+ 651 case chi::req_optype_e::AtomicLoadClr:
+ 652 case chi::req_optype_e::AtomicLoadEor:
+ 653 case chi::req_optype_e::AtomicLoadSet:
+ 654 case chi::req_optype_e::AtomicLoadSmax:
+ 655 case chi::req_optype_e::AtomicLoadSmin:
+ 656 case chi::req_optype_e::AtomicLoadUmax:
+ 657 case chi::req_optype_e::AtomicLoadUmin:
+ 658 case chi::req_optype_e::AtomicSwap:
+ 659 case chi::req_optype_e::AtomicCompare:
+ 660 data_ext->dat.set_opcode(chi::dat_optype_e::NonCopyBackWrData);
+
+
+ 663 SCCWARN(SCMOD) <<
" Unable to match req_opcode with data_opcode in write transaction ";
+
+ 665 if(data_ext->dat.get_opcode() == chi::dat_optype_e::NonCopyBackWrData) {
+ 666 data_ext->dat.set_resp(chi::dat_resptype_e::NonCopyBackWrData);
+ 667 }
else if(data_ext->dat.get_opcode() == chi::dat_optype_e::NCBWrDataCompAck) {
+ 668 data_ext->dat.set_resp(chi::dat_resptype_e::NCBWrDataCompAck);
+ 669 }
else if(data_ext->dat.get_opcode() == chi::dat_optype_e::CopyBackWrData) {
+ 670 auto cache_ext = trans.get_extension<::cache::cache_info>();
+ 671 sc_assert(cache_ext !=
nullptr);
+ 672 auto cache_state = cache_ext->get_state();
+ 673 if(cache_state == ::cache::state::IX) {
+ 674 data_ext->dat.set_resp(chi::dat_resptype_e::CopyBackWrData_I);
+ 675 }
else if(cache_state == ::cache::state::UC) {
+ 676 data_ext->dat.set_resp(chi::dat_resptype_e::CopyBackWrData_UC);
+ 677 }
else if(cache_state == ::cache::state::SC) {
+ 678 data_ext->dat.set_resp(chi::dat_resptype_e::CopyBackWrData_SC);
+ 679 }
else if(cache_state == ::cache::state::UD) {
+ 680 data_ext->dat.set_resp(chi::dat_resptype_e::CopyBackWrData_UD_PD);
+ 681 }
else if(cache_state == ::cache::state::SD) {
+ 682 data_ext->dat.set_resp(chi::dat_resptype_e::CopyBackWrData_SD_PD);
+
+ 684 SCCWARN(SCMOD) <<
" Unable to match cache state with resptype ";
+
+ 686 SCCWARN(SCMOD) <<
"Unable to match resptype with WriteData Responses";
+
+
+ 689 auto db_id = req_e->resp.get_db_id();
+ 690 data_ext->set_txn_id(db_id);
+ 691 data_ext->set_src_id(req_e->resp.get_tgt_id());
+ 692 data_ext->dat.set_tgt_id(req_e->get_src_id());
+
+
+ 695 void chi::pe::chi_rn_initiator_b::create_data_ext(payload_type& trans) {
+
+ 697 update_data_extension(data_ext, trans);
+
+
+
+ 701 void chi::pe::chi_rn_initiator_b::send_packet(tlm::tlm_phase phase, payload_type& trans,
+
+ 703 sc_core::sc_time delay = sc_core::SC_ZERO_TIME;
+ 704 tlm::tlm_sync_enum ret = socket_fw->nb_transport_fw(trans, phase, delay);
+ 705 if(ret == tlm::TLM_UPDATED) {
+ 706 if(phase == chi::END_PARTIAL_DATA || phase == chi::END_DATA) {
+
+
+
+
+ 711 auto entry = txs->peq.
get();
+ 712 sc_assert(std::get<0>(entry) == &trans &&
+ 713 (std::get<1>(entry) == chi::END_PARTIAL_DATA || std::get<1>(entry) == chi::END_DATA));
+
+ 715 auto timing_e = trans.get_extension<atp::timing_params>();
+ 716 auto delay_in_cycles = (timing_e && timing_e->wbv) ? timing_e->wbv : 1;
+ 717 while(delay_in_cycles) {
+
+ 719 wait(clk_i.posedge_event());
+
+
+
+
+ 724 sc_core::sc_time delay;
+ 725 tlm::tlm_phase phase;
+
+ 727 if(data_ext ==
nullptr) {
+ 728 create_data_ext(trans);
+
+
+
+ 732 auto beat_cnt = calculate_beats(trans);
+ 733 SCCDEBUG(SCMOD) <<
"Starting transaction on channel WDAT : (opcode, cmd, addr, len) = ("
+ 734 <<
to_char(data_ext->dat.get_opcode()) <<
", " << trans.get_command() <<
", " << std::hex
+ 735 << trans.get_address() <<
", " << trans.get_data_length() <<
")";
+
+ 737 if(!data_interleaving.get_value()) {
+
+ 739 for(
auto i = 0U; i < beat_cnt; ++i) {
+
+ 741 phase = chi::BEGIN_PARTIAL_DATA;
+
+ 743 phase = chi::BEGIN_DATA;
- 745 if(!data_interleaving.value) {
-
- 747 for(
auto i = 0U; i < beat_cnt; ++i) {
-
- 749 phase = chi::BEGIN_PARTIAL_DATA;
-
- 751 phase = chi::BEGIN_DATA;
-
-
- 754 data_ext->dat.set_data_id(i<<(transfer_width_in_bytes*8/128 -1));
- 755 SCCTRACE(SCMOD) <<
"WDAT flit with txnid " << data_ext->cmn.
get_txn_id() <<
" data_id = " << (
unsigned int)(data_ext->dat.get_data_id())<<
" sent. Beat count: " << i
- 756 <<
", addr: 0x" << std::hex << trans.get_address() <<
", last=" << (i == (beat_cnt - 1));
- 757 send_packet(phase, trans, txs);
-
-
- 760 for(
auto i = 0U; i < beat_cnt; ++i) {
-
-
-
- 764 phase = chi::BEGIN_PARTIAL_DATA;
-
- 766 phase = chi::BEGIN_DATA;
-
- 768 data_ext->dat.set_data_id(i<<(transfer_width_in_bytes*8/128 -1));
- 769 SCCTRACE(SCMOD) <<
"WDAT flit with txnid " << data_ext->cmn.
get_txn_id() <<
" data_id = " << (
unsigned int)(data_ext->dat.get_data_id())<<
" sent. Beat count: " << i
- 770 <<
", addr: 0x" << std::hex << trans.get_address()
- 771 <<
", last=" << (i == (beat_cnt - 1));
- 772 send_packet(phase, trans, txs);
-
-
-
-
-
-
-
- 780 void chi::pe::chi_rn_initiator_b::send_comp_ack(payload_type& trans, tx_state*& txs) {
- 781 if(make_rsp_from_req(trans, chi::rsp_optype_e::CompAck)) {
-
- 783 SCCDEBUG(SCMOD) <<
"Send the CompAck response on SRSP channel, addr: 0x" << std::hex << trans.get_address();
- 784 tlm::tlm_phase phase = chi::ACK;
- 785 auto delay = SC_ZERO_TIME;
- 786 auto ret = socket_fw->nb_transport_fw(trans, phase, delay);
- 787 if(ret == tlm::TLM_UPDATED && phase == chi::ACK) {
-
-
-
- 791 auto entry = txs->peq.get();
- 792 sc_assert(std::get<0>(entry) == &trans && std::get<1>(entry) == tlm::END_RESP);
-
- 794 wait(clk_i.posedge_event());
-
-
-
-
- 799 switch(ext->req.get_opcode()){
- 800 case req_optype_e::WriteBackFullCleanSh:
- 801 case req_optype_e::WriteBackFullCleanInv:
- 802 case req_optype_e::WriteBackFullCleanShPerSep:
- 803 case req_optype_e::WriteCleanFullCleanSh:
- 804 case req_optype_e::WriteCleanFullCleanShPerSep:
- 805 case req_optype_e::WriteNoSnpFullCleanSh:
- 806 case req_optype_e::WriteNoSnpFullCleanInv:
- 807 case req_optype_e::WriteNoSnpFullCleanShPerSep:
- 808 case req_optype_e::WriteUniquePtlCleanSh:
- 809 case req_optype_e::WriteUniqueFullCleanSh:
- 810 case req_optype_e::WriteUniquePtlCleanShPerSep:
- 811 case req_optype_e::WriteUniqueFullCleanShPerSep:
-
-
-
-
-
-
- 818 switch(ext->req.get_opcode()){
- 819 case req_optype_e::WriteBackFullCleanShPerSep:
- 820 case req_optype_e::WriteCleanFullCleanShPerSep:
- 821 case req_optype_e::WriteNoSnpFullCleanShPerSep:
- 822 case req_optype_e::WriteUniquePtlCleanShPerSep:
- 823 case req_optype_e::WriteUniqueFullCleanShPerSep:
- 824 case req_optype_e::CleanSharedPersistSep:
-
-
-
-
-
- 830 enum { WAIT_CTRL=0x1, WAIT_DATA=0x2, WAIT_COMPCMO=4, WAIT_PERSIST=8};
- 831 void chi::pe::chi_rn_initiator_b::exec_read_write_protocol(
const unsigned int txn_id, payload_type& trans,
-
-
- 834 sc_core::sc_time delay;
-
- 836 unsigned not_finish = WAIT_CTRL;
- 837 not_finish |= is_dataless(ctrl_ext)?0:WAIT_DATA;
- 838 not_finish |= expectCompCMO(ctrl_ext)?WAIT_COMPCMO:0;
- 839 not_finish |= expectPersist(ctrl_ext)?WAIT_PERSIST:0;
- 840 auto exp_beat_cnt = calculate_beats(trans);
-
-
-
- 844 auto entry = txs->peq.
get();
- 845 sc_assert(std::get<0>(entry) == &trans);
- 846 auto phase = std::get<1>(entry);
- 847 if(phase == tlm::BEGIN_RESP) {
- 848 if(chi::is_dataless(ctrl_ext)){
- 849 switch(ctrl_ext->resp.get_opcode()) {
- 850 case chi::rsp_optype_e::Comp:
- 851 if(ctrl_ext->req.get_opcode() == chi::req_optype_e::MakeReadUnique)
- 852 not_finish &= ~WAIT_CTRL;
-
- 854 switch(ctrl_ext->resp.get_resp()) {
- 855 case chi::rsp_resptype_e::Comp_I:
- 856 case chi::rsp_resptype_e::Comp_UC:
- 857 case chi::rsp_resptype_e::Comp_SC:
- 858 not_finish &= ~WAIT_CTRL;
-
-
-
-
+
+ 746 data_ext->dat.set_data_id(i<<(transfer_width_in_bytes*8/128 -1));
+ 747 SCCTRACE(SCMOD) <<
"WDAT flit with txnid " << data_ext->cmn.
get_txn_id() <<
" data_id = " << (
unsigned int)(data_ext->dat.get_data_id())<<
" sent. Beat count: " << i
+ 748 <<
", addr: 0x" << std::hex << trans.get_address() <<
", last=" << (i == (beat_cnt - 1));
+ 749 send_packet(phase, trans, txs);
+
+
+ 752 for(
auto i = 0U; i < beat_cnt; ++i) {
+
+
+
+ 756 phase = chi::BEGIN_PARTIAL_DATA;
+
+ 758 phase = chi::BEGIN_DATA;
+
+ 760 data_ext->dat.set_data_id(i<<(transfer_width_in_bytes*8/128 -1));
+ 761 SCCTRACE(SCMOD) <<
"WDAT flit with txnid " << data_ext->cmn.
get_txn_id() <<
" data_id = " << (
unsigned int)(data_ext->dat.get_data_id())<<
" sent. Beat count: " << i
+ 762 <<
", addr: 0x" << std::hex << trans.get_address()
+ 763 <<
", last=" << (i == (beat_cnt - 1));
+ 764 send_packet(phase, trans, txs);
+
+
+
+
+
+
+ 771 void chi::pe::chi_rn_initiator_b::send_comp_ack(payload_type& trans, tx_state*& txs) {
+ 772 if(make_rsp_from_req(trans, chi::rsp_optype_e::CompAck)) {
+
+ 774 SCCDEBUG(SCMOD) <<
"Send the CompAck response on SRSP channel, addr: 0x" << std::hex << trans.get_address();
+ 775 tlm::tlm_phase phase = chi::ACK;
+ 776 auto delay = SC_ZERO_TIME;
+ 777 auto ret = socket_fw->nb_transport_fw(trans, phase, delay);
+ 778 if(ret == tlm::TLM_UPDATED && phase == chi::ACK) {
+
+
+
+ 782 auto entry = txs->peq.get();
+ 783 sc_assert(std::get<0>(entry) == &trans && std::get<1>(entry) == tlm::END_RESP);
+
+ 785 wait(clk_i.posedge_event());
+
+
+
+
+ 790 switch(ext->req.get_opcode()){
+ 791 case req_optype_e::WriteBackFullCleanSh:
+ 792 case req_optype_e::WriteBackFullCleanInv:
+ 793 case req_optype_e::WriteBackFullCleanShPerSep:
+ 794 case req_optype_e::WriteCleanFullCleanSh:
+ 795 case req_optype_e::WriteCleanFullCleanShPerSep:
+ 796 case req_optype_e::WriteNoSnpFullCleanSh:
+ 797 case req_optype_e::WriteNoSnpFullCleanInv:
+ 798 case req_optype_e::WriteNoSnpFullCleanShPerSep:
+ 799 case req_optype_e::WriteUniquePtlCleanSh:
+ 800 case req_optype_e::WriteUniqueFullCleanSh:
+ 801 case req_optype_e::WriteUniquePtlCleanShPerSep:
+ 802 case req_optype_e::WriteUniqueFullCleanShPerSep:
+
+
+
+
+
+
+ 809 switch(ext->req.get_opcode()){
+ 810 case req_optype_e::WriteBackFullCleanShPerSep:
+ 811 case req_optype_e::WriteCleanFullCleanShPerSep:
+ 812 case req_optype_e::WriteNoSnpFullCleanShPerSep:
+ 813 case req_optype_e::WriteUniquePtlCleanShPerSep:
+ 814 case req_optype_e::WriteUniqueFullCleanShPerSep:
+ 815 case req_optype_e::CleanSharedPersistSep:
+
+
+
+
+
+ 821 enum { WAIT_CTRL=0x1, WAIT_DATA=0x2, WAIT_COMPCMO=4, WAIT_PERSIST=8};
+ 822 void chi::pe::chi_rn_initiator_b::exec_read_write_protocol(
const unsigned int txn_id, payload_type& trans,
+
+
+ 825 sc_core::sc_time delay;
+
+ 827 unsigned not_finish = WAIT_CTRL;
+ 828 not_finish |= is_dataless(ctrl_ext)?0:WAIT_DATA;
+ 829 not_finish |= expectCompCMO(ctrl_ext)?WAIT_COMPCMO:0;
+ 830 not_finish |= expectPersist(ctrl_ext)?WAIT_PERSIST:0;
+ 831 auto exp_beat_cnt = calculate_beats(trans);
+
+
+
+ 835 auto entry = txs->peq.
get();
+ 836 sc_assert(std::get<0>(entry) == &trans);
+ 837 auto phase = std::get<1>(entry);
+ 838 if(phase == tlm::BEGIN_RESP) {
+ 839 if(chi::is_dataless(ctrl_ext)){
+ 840 switch(ctrl_ext->resp.get_opcode()) {
+ 841 case chi::rsp_optype_e::Comp:
+ 842 if(ctrl_ext->req.get_opcode() == chi::req_optype_e::MakeReadUnique)
+ 843 not_finish &= ~WAIT_CTRL;
+
+ 845 switch(ctrl_ext->resp.get_resp()) {
+ 846 case chi::rsp_resptype_e::Comp_I:
+ 847 case chi::rsp_resptype_e::Comp_UC:
+ 848 case chi::rsp_resptype_e::Comp_SC:
+ 849 not_finish &= ~WAIT_CTRL;
+
+
+
+
+
+ 855 case chi::rsp_optype_e::CompDBIDResp:
+ 856 case chi::rsp_optype_e::CompPersist:
+ 857 case chi::rsp_optype_e::CompCMO:
+ 858 case chi::rsp_optype_e::CompStashDone:
+ 859 not_finish &= ~WAIT_CTRL;
+
+ 861 case chi::rsp_optype_e::Persist:
+ 862 not_finish &= ~WAIT_PERSIST;
- 864 case chi::rsp_optype_e::CompDBIDResp:
- 865 case chi::rsp_optype_e::CompPersist:
- 866 case chi::rsp_optype_e::CompCMO:
- 867 case chi::rsp_optype_e::CompStashDone:
- 868 not_finish &= ~WAIT_CTRL;
-
- 870 case chi::rsp_optype_e::Persist:
- 871 not_finish &= ~WAIT_PERSIST;
-
-
+
+
+
+ 867 not_finish &= ~WAIT_DATA;
+ 868 send_cresp_response(trans);
+ 869 }
else if(trans.is_write()) {
+ 870 switch(ctrl_ext->resp.get_opcode()) {
+ 871 case chi::rsp_optype_e::CompCMO:
+ 872 not_finish &= ~WAIT_COMPCMO;
+ 873 send_cresp_response(trans);
-
- 876 not_finish &= ~WAIT_DATA;
- 877 send_cresp_response(trans);
- 878 }
else if(trans.is_write()) {
- 879 switch(ctrl_ext->resp.get_opcode()) {
- 880 case chi::rsp_optype_e::CompCMO:
- 881 not_finish &= ~WAIT_COMPCMO;
- 882 send_cresp_response(trans);
-
- 884 case chi::rsp_optype_e::Persist:
- 885 not_finish &= ~WAIT_PERSIST;
- 886 send_cresp_response(trans);
+ 875 case chi::rsp_optype_e::Persist:
+ 876 not_finish &= ~WAIT_PERSIST;
+ 877 send_cresp_response(trans);
+
+ 879 case chi::rsp_optype_e::CompDBIDResp:
+ 880 not_finish &= ~WAIT_CTRL;
+
+ 882 case chi::rsp_optype_e::DBIDResp:
+ 883 case chi::rsp_optype_e::DBIDRespOrd:
+ 884 send_cresp_response(trans);
+ 885 send_wdata(trans, txs);
+ 886 not_finish &= ~WAIT_DATA;
- 888 case chi::rsp_optype_e::CompDBIDResp:
+ 888 case chi::rsp_optype_e::Comp:
889 not_finish &= ~WAIT_CTRL;
-
- 891 case chi::rsp_optype_e::DBIDResp:
- 892 case chi::rsp_optype_e::DBIDRespOrd:
- 893 send_cresp_response(trans);
- 894 send_wdata(trans, txs);
- 895 not_finish &= ~WAIT_DATA;
-
- 897 case chi::rsp_optype_e::Comp:
- 898 not_finish &= ~WAIT_CTRL;
- 899 send_cresp_response(trans);
-
-
- 902 SCCFATAL(SCMOD) <<
"Illegal opcode received: " <<
to_char(ctrl_ext->resp.get_opcode());
-
- 904 }
else if(trans.is_read()) {
- 905 not_finish &= ~WAIT_CTRL;
- 906 send_cresp_response(trans);
-
- 908 }
else if(trans.is_read() && (phase == chi::BEGIN_PARTIAL_DATA || phase == chi::BEGIN_DATA)) {
- 909 SCCTRACE(SCMOD) <<
"RDAT flit received. Beat count: " << beat_cnt <<
", addr: 0x" << std::hex
- 910 << trans.get_address();
- 911 if(phase == chi::BEGIN_PARTIAL_DATA)
- 912 phase = chi::END_PARTIAL_DATA;
-
- 914 phase = chi::END_DATA;
- 915 delay = clk_if ? clk_if->period() - 1_ps : SC_ZERO_TIME;
- 916 socket_fw->nb_transport_fw(trans, phase, delay);
-
- 918 if(phase == chi::END_DATA) {
- 919 not_finish &= ~(WAIT_CTRL | WAIT_DATA);
- 920 if(beat_cnt != exp_beat_cnt)
- 921 SCCERR(SCMOD) <<
"Wrong beat count, expected " << exp_beat_cnt <<
", got " << beat_cnt;
-
-
- 924 SCCFATAL(SCMOD) <<
"Illegal protocol state (maybe just not implemented?)";
-
-
-
-
- 929 void chi::pe::chi_rn_initiator_b::send_cresp_response(payload_type& trans) {
-
- 931 sc_assert(resp_ext !=
nullptr);
- 932 if(is_request_order(resp_ext))
-
- 934 auto id = (unsigned)(resp_ext->get_txn_id());
- 935 SCCDEBUG(SCMOD) <<
"got cresp: src_id=" << (unsigned)resp_ext->get_src_id()
- 936 <<
", tgt_id=" << (unsigned)resp_ext->resp.get_tgt_id()
- 937 <<
", txnid=0x" << std::hex <<
id <<
", " <<
to_char(resp_ext->resp.get_opcode())
- 938 <<
", resp=" <<
to_char(resp_ext->resp.get_resp())
- 939 <<
", db_id=" << (unsigned)resp_ext->resp.get_db_id() <<
", addr=0x" << std::hex
- 940 << trans.get_address() <<
")";
- 941 tlm::tlm_phase phase = tlm::END_RESP;
- 942 sc_core::sc_time delay = clk_if ? clk_if->period() - 1_ps : SC_ZERO_TIME;
- 943 socket_fw->nb_transport_fw(trans, phase, delay);
- 944 wait(clk_i.posedge_event());
-
-
- 947 void chi::pe::chi_rn_initiator_b::exec_atomic_protocol(
const unsigned int txn_id, payload_type& trans,
-
- 949 sc_core::sc_time delay;
-
- 951 auto entry = txs->peq.
get();
- 952 sc_assert(std::get<0>(entry) == &trans);
- 953 auto phase = std::get<1>(entry);
- 954 if(phase == tlm::BEGIN_RESP) {
- 955 send_cresp_response(trans);
-
- 957 if(resp_ext->resp.get_opcode() == chi::rsp_optype_e::DBIDResp) {
- 958 SCCERR(SCMOD) <<
"CRESP illegal response opcode: " <<
to_char(resp_ext->resp.get_opcode());
-
-
- 961 SCCERR(SCMOD) <<
"Illegal protocol state (maybe just not implemented?) " << phase;
-
-
- 964 auto not_finish = 0b11U;
- 965 auto exp_beat_cnt = calculate_beats(trans);
- 966 auto input_beat_cnt = 0U;
- 967 auto output_beat_cnt = 0U;
-
-
-
- 973 if(output_beat_cnt < exp_beat_cnt) {
-
-
- 976 update_data_extension(data_ext, trans);
-
- 978 create_data_ext(trans);
-
-
- 981 SCCDEBUG(SCMOD) <<
"Atomic send data (txn_id,opcode,cmd,addr,len) = (" << txn_id <<
","
-
- 983 << trans.get_command() <<
",0x" << std::hex << trans.get_address() <<
","
- 984 << trans.get_data_length() <<
"), beat=" << output_beat_cnt <<
"/" << exp_beat_cnt;
- 985 if(output_beat_cnt < exp_beat_cnt)
- 986 phase = chi::BEGIN_PARTIAL_DATA;
-
- 988 phase = chi::BEGIN_DATA;
- 989 send_packet(phase, trans, txs);
- 990 if(output_beat_cnt == exp_beat_cnt) {
- 991 wait(clk_i.posedge_event());
-
-
-
- 996 if(input_beat_cnt < exp_beat_cnt && txs->peq.has_next()) {
-
-
- 999 auto entry = txs->peq.
get();
- 1000 sc_assert(std::get<0>(entry) == &trans);
- 1001 phase = std::get<1>(entry);
-
- 1003 if(phase == chi::BEGIN_PARTIAL_DATA || phase == chi::BEGIN_DATA) {
-
- 1005 sc_assert(data_ext);
-
- 1007 SCCDEBUG(SCMOD) <<
"Atomic received data (txn_id,opcode,cmd,addr,len)=(" << txn_id <<
","
- 1008 <<
to_char(data_ext->dat.get_opcode()) <<
"," << trans.get_command() <<
",0x"
- 1009 << std::hex << trans.get_address() <<
"," << trans.get_data_length()
- 1010 <<
"), beat=" << input_beat_cnt <<
"/" << exp_beat_cnt;
- 1011 if(phase == chi::BEGIN_PARTIAL_DATA)
- 1012 phase = chi::END_PARTIAL_DATA;
-
- 1014 phase = chi::END_DATA;
- 1015 delay = clk_if ? clk_if->period() - 1_ps : SC_ZERO_TIME;
- 1016 socket_fw->nb_transport_fw(trans, phase, delay);
- 1017 if(phase == chi::END_DATA) {
-
- 1019 if(input_beat_cnt != exp_beat_cnt)
- 1020 SCCERR(SCMOD) <<
"Wrong beat count, expected " << exp_beat_cnt <<
", got " << input_beat_cnt;
-
-
- 1023 SCCERR(SCMOD) <<
"Illegal protocol state: " << phase;
-
- 1025 }
else if(output_beat_cnt == exp_beat_cnt)
- 1026 wait(txs->peq.
event());
-
-
-
-
- 1031 SCCTRACE(SCMOD) <<
"got transport req";
-
-
- 1034 socket_fw->b_transport(trans, t);
-
-
-
- 1038 convert_axi4ace_to_chi(trans, name(), use_legacy_mapping.value);
-
- 1040 sc_assert(req_ext !=
nullptr);
-
- 1042 req_ext->set_src_id(src_id.value);
- 1043 req_ext->req.set_tgt_id(tgt_id.value);
- 1044 req_ext->req.set_max_flit(calculate_beats(trans) - 1);
-
- 1046 auto it = tx_state_by_trans.find(to_id(trans));
- 1047 if(it == tx_state_by_trans.end()) {
- 1048 if(!tx_state_pool.size())
-
-
- 1051 std::tie(it, success) = tx_state_by_trans.insert({to_id(trans), tx_state_pool.back()});
- 1052 tx_state_pool.pop_back();
-
- 1054 auto& txs = it->second;
- 1055 auto const txn_id = req_ext->get_txn_id();
- 1056 if(chi::is_request_order(req_ext)) {
-
-
- 1059 if(strict_income_order.value) strict_order_sem.wait();
- 1060 sem_lock txnlck(active_tx_by_id[txn_id]);
- 1061 if(strict_income_order.value) strict_order_sem.post();
- 1062 setExpCompAck(req_ext);
- 1064 auto timing_e = trans.get_extension<atp::timing_params>();
- 1065 if(timing_e !=
nullptr) {
- 1066 auto delay_in_cycles = trans.is_read() ? timing_e->artv : timing_e->awtv;
- 1067 auto current_count = get_clk_cnt();
- 1068 if(current_count - m_prev_clk_cnt < delay_in_cycles) {
- 1069 unsigned delta_cycles = delay_in_cycles - (current_count - m_prev_clk_cnt);
- 1070 while(delta_cycles) {
-
- 1072 wait(clk_i.posedge_event());
-
-
-
-
-
-
-
-
-
- 1082 SCCTRACE(SCMOD) <<
"starting transaction with txn_id=" << txn_id;
- 1083 m_prev_clk_cnt = get_clk_cnt();
- 1084 tlm::tlm_phase phase = tlm::BEGIN_REQ;
- 1085 sc_core::sc_time delay;
- 1086 SCCTRACE(SCMOD) <<
"Send REQ, addr: 0x" << std::hex << trans.get_address() <<
", TxnID: 0x" << std::hex
-
- 1088 tlm::tlm_sync_enum ret = socket_fw->nb_transport_fw(trans, phase, delay);
- 1089 if(ret == tlm::TLM_UPDATED) {
- 1090 sc_assert(phase == tlm::END_REQ);
-
-
- 1093 auto entry = txs->peq.
get();
- 1094 sc_assert(std::get<0>(entry) == &trans && std::get<1>(entry) == tlm::END_REQ);
-
-
- 1097 wait(clk_i.posedge_event());
-
- 1099 if(credit_ext->type == credit_type_e::REQ) {
- 1100 SCCTRACEALL(SCMOD) <<
"Received " << credit_ext->count <<
" req "
- 1101 << (credit_ext->count == 1 ?
"credit" :
"credits");
- 1102 for(
auto i = 0U; i < credit_ext->count; ++i)
-
-
-
-
-
-
- 1109 if((req_optype_e::AtomicLoadAdd <= req_ext->req.get_opcode()) &&
- 1110 (req_ext->req.get_opcode() <= req_optype_e::AtomicCompare))
- 1111 exec_atomic_protocol(txn_id, trans, txs);
-
- 1113 exec_read_write_protocol(txn_id, trans, txs);
- 1114 bool is_atomic = req_ext->req.get_opcode() >= req_optype_e::AtomicStoreAdd &&
- 1115 req_ext->req.get_opcode() <= req_optype_e::AtomicCompare;
- 1116 bool compack_allowed =
true;
- 1117 switch(req_ext->req.get_opcode()) {
- 1118 case req_optype_e::WriteUniqueFullStash:
- 1119 case req_optype_e::WriteUniquePtlStash:
- 1120 case req_optype_e::StashOnceShared:
- 1121 case req_optype_e::StashOnceUnique:
- 1122 case req_optype_e::WriteBackPtl:
- 1123 case req_optype_e::WriteBackFull:
- 1124 case req_optype_e::WriteCleanFull:
- 1125 case req_optype_e::WriteCleanPtl:
- 1126 case req_optype_e::CleanSharedPersistSep:
- 1127 case req_optype_e::WriteEvictFull:
- 1128 case req_optype_e::WriteUniqueZero:
- 1129 case req_optype_e::WriteNoSnpZero:
- 1130 case req_optype_e::StashOnceSepShared:
- 1131 case req_optype_e::StashOnceSepUnique:
- 1132 case req_optype_e::WriteBackFullCleanSh:
- 1133 case req_optype_e::WriteBackFullCleanInv:
- 1134 case req_optype_e::WriteBackFullCleanShPerSep:
- 1135 case req_optype_e::WriteCleanFullCleanSh :
- 1136 case req_optype_e::WriteCleanFullCleanShPerSep:
- 1137 compack_allowed =
false;
-
-
-
-
- 1142 if(!is_atomic && compack_allowed && req_ext->req.is_exp_comp_ack())
- 1143 send_comp_ack(trans, txs);
-
-
- 1146 trans.set_response_status(tlm::TLM_OK_RESPONSE);
- 1147 wait(clk_i.posedge_event());
- 1148 tx_state_pool.push_back(it->second);
- 1149 tx_state_pool.back()->peq.clear();
- 1150 tx_state_by_trans.erase(it);
- 1151 SCCTRACE(SCMOD) <<
"finished non-blocking protocol";
- 1152 any_tx_finished.notify(SC_ZERO_TIME);
-
-
-
-
- 1157 void chi::pe::chi_rn_initiator_b::handle_snoop_response(payload_type& trans,
-
-
- 1160 tlm::tlm_phase phase;
-
-
-
-
- 1165 sc_assert(snp_ext !=
nullptr);
-
- 1167 snp_ext->set_src_id(src_id.value);
- 1168 snp_ext->resp.set_tgt_id(snp_ext->get_src_id());
- 1169 snp_ext->resp.set_db_id(snp_ext->get_txn_id());
-
- 1171 phase = tlm::BEGIN_RESP;
- 1172 delay = SC_ZERO_TIME;
-
- 1174 snp_ext->resp.get_data_pull() ? 0b11U : 0b10U;
-
-
- 1177 auto ret = socket_fw->nb_transport_fw(trans, phase, delay);
- 1178 if(ret == tlm::TLM_UPDATED) {
- 1179 sc_assert(phase == tlm::END_RESP);
-
-
-
- 1183 wait(clk_i.posedge_event());
-
- 1185 if(snp_ext->resp.get_data_pull() && trans.get_data_length() < 64) {
- 1186 delete[] trans.get_data_ptr();
- 1187 trans.set_data_ptr(
new uint8_t[64]);
- 1188 trans.set_data_length(64);
-
- 1190 auto exp_beat_cnt = calculate_beats(trans);
-
-
-
- 1194 auto entry = txs->peq.
get();
- 1195 sc_assert(std::get<0>(entry) == &trans);
- 1196 auto phase = std::get<1>(entry);
- 1197 if(phase == tlm::END_RESP) {
-
- 1199 }
else if(snp_ext->resp.get_data_pull() && (phase == chi::BEGIN_PARTIAL_DATA || phase == chi::BEGIN_DATA)) {
- 1200 SCCTRACE(SCMOD) <<
"RDAT packet received with phase " << phase <<
". Beat count: " << beat_cnt
- 1201 <<
", addr: 0x" << std::hex << trans.get_address();
-
- 1203 if(phase == chi::BEGIN_PARTIAL_DATA)
- 1204 phase = chi::END_PARTIAL_DATA;
-
- 1206 phase = chi::END_DATA;
- 1207 delay = clk_if ? clk_if->period() - 1_ps : SC_ZERO_TIME;
- 1208 socket_fw->nb_transport_fw(trans, phase, delay);
-
- 1210 if(phase == chi::END_DATA) {
-
- 1212 if(beat_cnt != exp_beat_cnt)
- 1213 SCCERR(SCMOD) <<
"Wrong beat count, expected " << exp_beat_cnt <<
", got " << beat_cnt;
- 1214 if(bw_o.get_interface())
- 1215 bw_o->transport(trans);
-
-
-
- 1219 SCCFATAL(SCMOD) <<
"Illegal protocol state (maybe just not implemented?)";
-
-
- 1222 wait(clk_i.posedge_event());
- 1223 if(snp_ext->resp.get_data_pull())
- 1224 send_comp_ack(trans, txs);
-
- 1226 ext->set_src_id(src_id.value);
- 1227 send_wdata(trans, txs);
-
-
-
-
- 1232 void chi::pe::chi_rn_initiator_b::snoop_dispatch() {
- 1233 sc_core::sc_spawn_options opts;
- 1234 opts.set_stack_size(0x10000);
- 1235 payload_type* trans{
nullptr};
-
- 1237 while(!(trans = snp_peq.get_next_transaction())) {
- 1238 wait(snp_peq.get_event());
-
- 1240 if(thread_avail == 0 && thread_active < 32) {
-
-
- 1243 payload_type* trans{
nullptr};
-
-
-
- 1247 while(!(trans = snp_dispatch_que.get_next_transaction()))
- 1248 wait(snp_dispatch_que.get_event());
- 1249 sc_assert(thread_avail > 0);
-
- 1251 this->snoop_handler(trans);
-
-
-
-
-
- 1257 snp_dispatch_que.notify(*trans);
-
-
-
- 1261 void chi::pe::chi_rn_initiator_b::snoop_handler(payload_type* trans) {
-
- 1263 sc_assert(req_ext !=
nullptr);
- 1264 auto const txn_id = req_ext->get_txn_id();
-
- 1266 SCCDEBUG(SCMOD) <<
"Received SNOOP request: (src_id, txn_id, opcode, command, address) = " << req_ext->get_src_id()
- 1267 <<
", " << txn_id <<
", " <<
to_char(req_ext->req.get_opcode()) <<
", "
- 1268 << (trans->is_read() ?
"READ" :
"WRITE") <<
", " << std::hex << trans->get_address() <<
")";
-
- 1270 auto it = tx_state_by_trans.find(to_id(trans));
- 1271 if(it == tx_state_by_trans.end()) {
- 1272 if(!tx_state_pool.size())
- 1273 tx_state_pool.push_back(
new tx_state(
util::strprintf(
"peq_%d", ++peq_cnt)));
-
- 1275 std::tie(it, success) = tx_state_by_trans.insert({to_id(trans), tx_state_pool.back()});
- 1276 tx_state_pool.pop_back();
-
- 1278 auto* txs = it->second;
-
- 1280 sc_time delay = clk_if ? clk_if->period() - 1_ps : SC_ZERO_TIME;
- 1281 tlm::tlm_phase phase = tlm::END_REQ;
- 1282 socket_fw->nb_transport_fw(*trans, phase, delay);
-
- 1284 if(bw_o.get_interface())
- 1285 cycles = bw_o->transport(*trans);
- 1286 if(cycles < std::numeric_limits<unsigned>::max()) {
-
- 1288 for(
size_t i = 0; i < cycles + 1; ++i)
- 1289 wait(clk_i.posedge_event());
- 1290 handle_snoop_response(*trans, txs);
-
- 1292 tx_state_pool.push_back(it->second);
- 1293 tx_state_pool.back()->peq.clear();
- 1294 tx_state_by_trans.erase(to_id(trans));
-
-
-
-
-void transport(payload_type &trans, bool blocking) override
The forward transport function. It behaves blocking and is re-entrant.
-void snoop_resp(payload_type &trans, bool sync=false) override
triggers a non-blocking snoop response if the snoop callback does not do so.
+ 890 send_cresp_response(trans);
+
+
+ 893 SCCFATAL(SCMOD) <<
"Illegal opcode received: " <<
to_char(ctrl_ext->resp.get_opcode());
+
+ 895 }
else if(trans.is_read()) {
+ 896 not_finish &= ~WAIT_CTRL;
+ 897 send_cresp_response(trans);
+
+ 899 }
else if(trans.is_read() && (phase == chi::BEGIN_PARTIAL_DATA || phase == chi::BEGIN_DATA)) {
+ 900 SCCTRACE(SCMOD) <<
"RDAT flit received. Beat count: " << beat_cnt <<
", addr: 0x" << std::hex
+ 901 << trans.get_address();
+ 902 if(phase == chi::BEGIN_PARTIAL_DATA)
+ 903 phase = chi::END_PARTIAL_DATA;
+
+ 905 phase = chi::END_DATA;
+ 906 delay = clk_if ? ::scc::time_to_next_posedge(clk_if) - 1_ps : SC_ZERO_TIME;
+ 907 socket_fw->nb_transport_fw(trans, phase, delay);
+
+ 909 if(phase == chi::END_DATA) {
+ 910 not_finish &= ~(WAIT_CTRL | WAIT_DATA);
+ 911 if(beat_cnt != exp_beat_cnt)
+ 912 SCCERR(SCMOD) <<
"Wrong beat count, expected " << exp_beat_cnt <<
", got " << beat_cnt;
+
+
+ 915 SCCFATAL(SCMOD) <<
"Illegal protocol state (maybe just not implemented?)";
+
+
+
+
+ 920 void chi::pe::chi_rn_initiator_b::send_cresp_response(payload_type& trans) {
+
+ 922 sc_assert(resp_ext !=
nullptr);
+ 923 if(is_request_order(resp_ext))
+
+ 925 auto id = (unsigned)(resp_ext->get_txn_id());
+ 926 SCCDEBUG(SCMOD) <<
"got cresp: src_id=" << (unsigned)resp_ext->get_src_id()
+ 927 <<
", tgt_id=" << (unsigned)resp_ext->resp.get_tgt_id()
+ 928 <<
", txnid=0x" << std::hex <<
id <<
", " <<
to_char(resp_ext->resp.get_opcode())
+ 929 <<
", resp=" <<
to_char(resp_ext->resp.get_resp())
+ 930 <<
", db_id=" << (unsigned)resp_ext->resp.get_db_id() <<
", addr=0x" << std::hex
+ 931 << trans.get_address() <<
")";
+ 932 tlm::tlm_phase phase = tlm::END_RESP;
+ 933 sc_core::sc_time delay = clk_if ? ::scc::time_to_next_posedge(clk_if) - 1_ps : SC_ZERO_TIME;
+ 934 socket_fw->nb_transport_fw(trans, phase, delay);
+ 935 wait(clk_i.posedge_event());
+
+
+ 938 void chi::pe::chi_rn_initiator_b::exec_atomic_protocol(
const unsigned int txn_id, payload_type& trans,
+
+ 940 sc_core::sc_time delay;
+
+ 942 auto entry = txs->peq.
get();
+ 943 sc_assert(std::get<0>(entry) == &trans);
+ 944 auto phase = std::get<1>(entry);
+ 945 if(phase == tlm::BEGIN_RESP) {
+ 946 send_cresp_response(trans);
+
+ 948 if(resp_ext->resp.get_opcode() == chi::rsp_optype_e::DBIDResp) {
+ 949 SCCERR(SCMOD) <<
"CRESP illegal response opcode: " <<
to_char(resp_ext->resp.get_opcode());
+
+
+ 952 SCCERR(SCMOD) <<
"Illegal protocol state (maybe just not implemented?) " << phase;
+
+
+ 955 auto not_finish = 0b11U;
+ 956 auto exp_beat_cnt = calculate_beats(trans);
+ 957 auto input_beat_cnt = 0U;
+ 958 auto output_beat_cnt = 0U;
+
+
+
+ 964 if(output_beat_cnt < exp_beat_cnt) {
+
+
+ 967 update_data_extension(data_ext, trans);
+
+ 969 create_data_ext(trans);
+
+
+ 972 SCCDEBUG(SCMOD) <<
"Atomic send data (txn_id,opcode,cmd,addr,len) = (" << txn_id <<
","
+
+ 974 << trans.get_command() <<
",0x" << std::hex << trans.get_address() <<
","
+ 975 << trans.get_data_length() <<
"), beat=" << output_beat_cnt <<
"/" << exp_beat_cnt;
+ 976 if(output_beat_cnt < exp_beat_cnt)
+ 977 phase = chi::BEGIN_PARTIAL_DATA;
+
+ 979 phase = chi::BEGIN_DATA;
+ 980 send_packet(phase, trans, txs);
+ 981 if(output_beat_cnt == exp_beat_cnt) {
+ 982 wait(clk_i.posedge_event());
+
+
+
+ 987 if(input_beat_cnt < exp_beat_cnt && txs->peq.has_next()) {
+
+
+ 990 auto entry = txs->peq.
get();
+ 991 sc_assert(std::get<0>(entry) == &trans);
+ 992 phase = std::get<1>(entry);
+
+ 994 if(phase == chi::BEGIN_PARTIAL_DATA || phase == chi::BEGIN_DATA) {
+
+
+
+ 998 SCCDEBUG(SCMOD) <<
"Atomic received data (txn_id,opcode,cmd,addr,len)=(" << txn_id <<
","
+ 999 <<
to_char(data_ext->dat.get_opcode()) <<
"," << trans.get_command() <<
",0x"
+ 1000 << std::hex << trans.get_address() <<
"," << trans.get_data_length()
+ 1001 <<
"), beat=" << input_beat_cnt <<
"/" << exp_beat_cnt;
+ 1002 if(phase == chi::BEGIN_PARTIAL_DATA)
+ 1003 phase = chi::END_PARTIAL_DATA;
+
+ 1005 phase = chi::END_DATA;
+ 1006 delay = clk_if ? ::scc::time_to_next_posedge(clk_if) - 1_ps : SC_ZERO_TIME;
+ 1007 socket_fw->nb_transport_fw(trans, phase, delay);
+ 1008 if(phase == chi::END_DATA) {
+
+ 1010 if(input_beat_cnt != exp_beat_cnt)
+ 1011 SCCERR(SCMOD) <<
"Wrong beat count, expected " << exp_beat_cnt <<
", got " << input_beat_cnt;
+
+
+ 1014 SCCERR(SCMOD) <<
"Illegal protocol state: " << phase;
+
+ 1016 }
else if(output_beat_cnt == exp_beat_cnt)
+ 1017 wait(txs->peq.
event());
+
+
+
+
+ 1022 SCCTRACE(SCMOD) <<
"got transport req";
+
+
+ 1025 socket_fw->b_transport(trans, t);
+
+
+
+ 1029 convert_axi4ace_to_chi(trans, name(), use_legacy_mapping.get_value());
+
+ 1031 sc_assert(req_ext !=
nullptr);
+
+ 1033 req_ext->set_src_id(src_id.get_value());
+ 1034 req_ext->req.set_tgt_id(tgt_id.get_value());
+ 1035 req_ext->req.set_max_flit(calculate_beats(trans) - 1);
+
+ 1037 auto it = tx_state_by_trans.find(to_id(trans));
+ 1038 if(it == tx_state_by_trans.end()) {
+ 1039 if(!tx_state_pool.size())
+
+
+ 1042 std::tie(it, success) = tx_state_by_trans.insert({to_id(trans), tx_state_pool.back()});
+ 1043 tx_state_pool.pop_back();
+
+ 1045 auto& txs = it->second;
+ 1046 auto const txn_id = req_ext->get_txn_id();
+ 1047 if(chi::is_request_order(req_ext)) {
+
+
+ 1050 if(strict_income_order.get_value()) strict_order_sem.wait();
+ 1051 sem_lock txnlck(active_tx_by_id[txn_id]);
+
+
+ 1054 if(strict_income_order.get_value()) strict_order_sem.post();
+ 1055 setExpCompAck(req_ext);
+ 1057 auto timing_e = trans.get_extension<atp::timing_params>();
+ 1058 if(timing_e !=
nullptr) {
+ 1059 auto delay_in_cycles = trans.is_read() ? timing_e->artv : timing_e->awtv;
+ 1060 auto current_count = get_clk_cnt();
+ 1061 if(current_count - m_prev_clk_cnt < delay_in_cycles) {
+ 1062 unsigned delta_cycles = delay_in_cycles - (current_count - m_prev_clk_cnt);
+ 1063 while(delta_cycles) {
+
+ 1065 wait(clk_i.posedge_event());
+
+
+
+
+
+
+
+
+
+ 1075 SCCTRACE(SCMOD) <<
"starting transaction with txn_id=" << txn_id;
+ 1076 m_prev_clk_cnt = get_clk_cnt();
+ 1077 tlm::tlm_phase phase = tlm::BEGIN_REQ;
+ 1078 sc_core::sc_time delay;
+ 1079 SCCTRACE(SCMOD) <<
"Send REQ, addr: 0x" << std::hex << trans.get_address() <<
", TxnID: 0x" << std::hex
+
+ 1081 tlm::tlm_sync_enum ret = socket_fw->nb_transport_fw(trans, phase, delay);
+ 1082 if(ret == tlm::TLM_UPDATED) {
+ 1083 sc_assert(phase == tlm::END_REQ);
+
+
+ 1086 auto entry = txs->peq.
get();
+ 1087 sc_assert(std::get<0>(entry) == &trans && std::get<1>(entry) == tlm::END_REQ);
+
+
+ 1090 wait(clk_i.posedge_event());
+
+ 1092 if(credit_ext->type == credit_type_e::REQ) {
+ 1093 SCCTRACEALL(SCMOD) <<
"Received " << credit_ext->count <<
" req "
+ 1094 << (credit_ext->count == 1 ?
"credit" :
"credits");
+ 1095 for(
auto i = 0U; i < credit_ext->count; ++i)
+
+
+
+
+
+
+ 1102 if((req_optype_e::AtomicLoadAdd <= req_ext->req.get_opcode()) &&
+ 1103 (req_ext->req.get_opcode() <= req_optype_e::AtomicCompare))
+ 1104 exec_atomic_protocol(txn_id, trans, txs);
+
+ 1106 exec_read_write_protocol(txn_id, trans, txs);
+ 1107 bool is_atomic = req_ext->req.get_opcode() >= req_optype_e::AtomicStoreAdd &&
+ 1108 req_ext->req.get_opcode() <= req_optype_e::AtomicCompare;
+ 1109 bool compack_allowed =
true;
+ 1110 switch(req_ext->req.get_opcode()) {
+ 1111 case req_optype_e::WriteUniqueFullStash:
+ 1112 case req_optype_e::WriteUniquePtlStash:
+ 1113 case req_optype_e::StashOnceShared:
+ 1114 case req_optype_e::StashOnceUnique:
+ 1115 case req_optype_e::WriteBackPtl:
+ 1116 case req_optype_e::WriteBackFull:
+ 1117 case req_optype_e::WriteCleanFull:
+ 1118 case req_optype_e::WriteCleanPtl:
+ 1119 case req_optype_e::CleanSharedPersistSep:
+ 1120 case req_optype_e::WriteEvictFull:
+ 1121 case req_optype_e::WriteUniqueZero:
+ 1122 case req_optype_e::WriteNoSnpZero:
+ 1123 case req_optype_e::StashOnceSepShared:
+ 1124 case req_optype_e::StashOnceSepUnique:
+ 1125 case req_optype_e::WriteBackFullCleanSh:
+ 1126 case req_optype_e::WriteBackFullCleanInv:
+ 1127 case req_optype_e::WriteBackFullCleanShPerSep:
+ 1128 case req_optype_e::WriteCleanFullCleanSh :
+ 1129 case req_optype_e::WriteCleanFullCleanShPerSep:
+ 1130 compack_allowed =
false;
+
+
+
+
+ 1135 if(!is_atomic && compack_allowed && req_ext->req.is_exp_comp_ack())
+ 1136 send_comp_ack(trans, txs);
+
+
+ 1139 trans.set_response_status(tlm::TLM_OK_RESPONSE);
+ 1140 wait(clk_i.posedge_event());
+ 1141 tx_state_pool.push_back(it->second);
+ 1142 tx_state_pool.back()->peq.clear();
+ 1143 tx_state_by_trans.erase(it);
+ 1144 SCCTRACE(SCMOD) <<
"finished non-blocking protocol";
+ 1145 any_tx_finished.notify(SC_ZERO_TIME);
+
+
+
+
+ 1150 void chi::pe::chi_rn_initiator_b::handle_snoop_response(payload_type& trans,
+
+
+ 1153 tlm::tlm_phase phase;
+
+
+
+
+ 1158 sc_assert(snp_ext !=
nullptr);
+
+ 1160 snp_ext->set_src_id(src_id.get_value());
+ 1161 snp_ext->resp.set_tgt_id(snp_ext->get_src_id());
+ 1162 snp_ext->resp.set_db_id(snp_ext->get_txn_id());
+
+ 1164 phase = tlm::BEGIN_RESP;
+ 1165 delay = SC_ZERO_TIME;
+
+ 1167 snp_ext->resp.get_data_pull() ? 0b11U : 0b10U;
+
+
+ 1170 auto ret = socket_fw->nb_transport_fw(trans, phase, delay);
+ 1171 if(ret == tlm::TLM_UPDATED) {
+ 1172 sc_assert(phase == tlm::END_RESP);
+
+
+
+ 1176 wait(clk_i.posedge_event());
+
+ 1178 if(snp_ext->resp.get_data_pull() && trans.get_data_length() < 64) {
+ 1179 delete[] trans.get_data_ptr();
+ 1180 trans.set_data_ptr(
new uint8_t[64]);
+ 1181 trans.set_data_length(64);
+
+ 1183 auto exp_beat_cnt = calculate_beats(trans);
+
+
+
+ 1187 auto entry = txs->peq.
get();
+ 1188 sc_assert(std::get<0>(entry) == &trans);
+ 1189 auto phase = std::get<1>(entry);
+ 1190 if(phase == tlm::END_RESP) {
+
+ 1192 }
else if(snp_ext->resp.get_data_pull() && (phase == chi::BEGIN_PARTIAL_DATA || phase == chi::BEGIN_DATA)) {
+ 1193 SCCTRACE(SCMOD) <<
"RDAT packet received with phase " << phase <<
". Beat count: " << beat_cnt
+ 1194 <<
", addr: 0x" << std::hex << trans.get_address();
+
+ 1196 if(phase == chi::BEGIN_PARTIAL_DATA)
+ 1197 phase = chi::END_PARTIAL_DATA;
+
+ 1199 phase = chi::END_DATA;
+ 1200 delay = clk_if ? ::scc::time_to_next_posedge(clk_if) - 1_ps : SC_ZERO_TIME;
+ 1201 socket_fw->nb_transport_fw(trans, phase, delay);
+
+ 1203 if(phase == chi::END_DATA) {
+
+ 1205 if(beat_cnt != exp_beat_cnt)
+ 1206 SCCERR(SCMOD) <<
"Wrong beat count, expected " << exp_beat_cnt <<
", got " << beat_cnt;
+ 1207 if(bw_o.get_interface())
+ 1208 bw_o->transport(trans);
+
+
+
+ 1212 SCCFATAL(SCMOD) <<
"Illegal protocol state (maybe just not implemented?)";
+
+
+ 1215 wait(clk_i.posedge_event());
+ 1216 if(snp_ext->resp.get_data_pull())
+ 1217 send_comp_ack(trans, txs);
+
+ 1219 ext->set_src_id(src_id.get_value());
+ 1220 send_wdata(trans, txs);
+
+
+
+
+ 1225 void chi::pe::chi_rn_initiator_b::snoop_dispatch() {
+ 1226 sc_core::sc_spawn_options opts;
+ 1227 opts.set_stack_size(0x10000);
+ 1228 payload_type* trans{
nullptr};
+
+ 1230 while(!(trans = snp_peq.get_next_transaction())) {
+ 1231 wait(snp_peq.get_event());
+
+ 1233 if(thread_avail == 0 && thread_active < 32) {
+
+
+ 1236 payload_type* trans{
nullptr};
+
+
+
+ 1240 while(!(trans = snp_dispatch_que.get_next_transaction()))
+ 1241 wait(snp_dispatch_que.get_event());
+ 1242 sc_assert(thread_avail > 0);
+
+ 1244 this->snoop_handler(trans);
+
+
+
+
+
+ 1250 snp_dispatch_que.notify(*trans);
+
+
+
+ 1254 void chi::pe::chi_rn_initiator_b::snoop_handler(payload_type* trans) {
+
+ 1256 sc_assert(req_ext !=
nullptr);
+ 1257 auto const txn_id = req_ext->get_txn_id();
+
+ 1259 SCCDEBUG(SCMOD) <<
"Received SNOOP request: (src_id, txn_id, opcode, command, address) = " << req_ext->get_src_id()
+ 1260 <<
", " << txn_id <<
", " <<
to_char(req_ext->req.get_opcode()) <<
", "
+ 1261 << (trans->is_read() ?
"READ" :
"WRITE") <<
", " << std::hex << trans->get_address() <<
")";
+
+ 1263 auto it = tx_state_by_trans.find(to_id(trans));
+ 1264 if(it == tx_state_by_trans.end()) {
+ 1265 if(!tx_state_pool.size())
+ 1266 tx_state_pool.push_back(
new tx_state(
util::strprintf(
"peq_%d", ++peq_cnt)));
+
+ 1268 std::tie(it, success) = tx_state_by_trans.insert({to_id(trans), tx_state_pool.back()});
+ 1269 tx_state_pool.pop_back();
+
+ 1271 auto* txs = it->second;
+
+ 1273 sc_time delay = clk_if ? ::scc::time_to_next_posedge(clk_if) - 1_ps : SC_ZERO_TIME;
+ 1274 tlm::tlm_phase phase = tlm::END_REQ;
+ 1275 socket_fw->nb_transport_fw(*trans, phase, delay);
+
+ 1277 if(bw_o.get_interface())
+ 1278 cycles = bw_o->transport(*trans);
+ 1279 if(cycles < std::numeric_limits<unsigned>::max()) {
+
+ 1281 for(
size_t i = 0; i < cycles + 1; ++i)
+ 1282 wait(clk_i.posedge_event());
+ 1283 handle_snoop_response(*trans, txs);
+
+ 1285 tx_state_pool.push_back(it->second);
+ 1286 tx_state_pool.back()->peq.clear();
+ 1287 tx_state_by_trans.erase(to_id(trans));
+
+
+
+
+void transport(payload_type &trans, bool blocking) override
The forward transport function. It behaves blocking and is re-entrant.
+void snoop_resp(payload_type &trans, bool sync=false) override
triggers a non-blocking snoop response if the snoop callback does not do so.
@ MEMORY_BARRIER
Normal access, respecting barriers.
const char * to_char(E t)
TLM2.0 components modeling CHI.
@@ -1359,7 +1352,7 @@
unsigned int get_txn_id() const
-
+
sc_core::sc_event & event()
get the available event
diff --git a/develop/chi__rn__initiator_8h_source.html b/develop/chi__rn__initiator_8h_source.html
index 73e3e412..46b22385 100644
--- a/develop/chi__rn__initiator_8h_source.html
+++ b/develop/chi__rn__initiator_8h_source.html
@@ -24,7 +24,7 @@