diff --git a/cmake_targets/CMakeLists.txt b/cmake_targets/CMakeLists.txt index 34b868642d966cd58f1a4491169a67cfb0ee5dff..f949af4e6919f594384481508254b67133ce6d82 100644 --- a/cmake_targets/CMakeLists.txt +++ b/cmake_targets/CMakeLists.txt @@ -1365,6 +1365,7 @@ set (MAC_SRC ${MAC_DIR}/eNB_scheduler_bch.c ${MAC_DIR}/eNB_scheduler_primitives.c ${MAC_DIR}/eNB_scheduler_RA.c + ${MAC_DIR}/eNB_scheduler_phytest.c ${MAC_DIR}/pre_processor.c ${MAC_DIR}/config.c ${MAC_DIR}/config_ue.c diff --git a/common/config/libconfig/config_libconfig.c b/common/config/libconfig/config_libconfig.c index a73a0737f25849fba1f59d820cc229f5547a063a..cfb0a214cfac14c337f894d67a5f64427d0f685e 100644 --- a/common/config/libconfig/config_libconfig.c +++ b/common/config/libconfig/config_libconfig.c @@ -130,6 +130,7 @@ int config_libconfig_get(paramdef_t *cfgoptions,int numoptions, char *prefix ) { case TYPE_STRING: +printf("call config_lookup_string for '%s' %p\n", cfgpath, &(libconfig_privdata.cfg)); fflush(stdout); if ( config_lookup_string(&(libconfig_privdata.cfg),cfgpath, (const char **)&str)) { if ( cfgoptions[i].numelt > 0 && str != NULL && strlen(str) >= cfgoptions[i].numelt ) { fprintf(stderr,"[LIBCONFIG] %s: %s exceeds maximum length of %i bytes, value truncated\n", diff --git a/common/utils/itti/intertask_interface.c b/common/utils/itti/intertask_interface.c index 1d38a3692b358d4cd52ef951cf9108cee905d188..21a3304e202b6910ce27145b5c807c72b4552912 100644 --- a/common/utils/itti/intertask_interface.c +++ b/common/utils/itti/intertask_interface.c @@ -72,6 +72,9 @@ const int itti_debug = (ITTI_DEBUG_ISSUES | ITTI_DEBUG_MP_STATISTICS); /* Global message size */ #define MESSAGE_SIZE(mESSAGEiD) (sizeof(MessageHeader) + itti_desc.messages_info[mESSAGEiD].size) + +extern int emulate_rf; + typedef enum task_state_s { TASK_STATE_NOT_CONFIGURED, TASK_STATE_STARTING, TASK_STATE_READY, TASK_STATE_ENDED, TASK_STATE_MAX, } task_state_t; @@ -342,14 +345,16 @@ int itti_send_msg_to_task(task_id_t destination_task_id, instance_t instance, Me destination_task_id, itti_get_task_name(destination_task_id)); } else { - /* We cannot send a message if the task is not running */ - AssertFatal (itti_desc.threads[destination_thread_id].task_state == TASK_STATE_READY, - "Task %s Cannot send message %s (%d) to thread %d, it is not in ready state (%d)!\n", - itti_get_task_name(origin_task_id), - itti_desc.messages_info[message_id].name, - message_id, - destination_thread_id, - itti_desc.threads[destination_thread_id].task_state); + if(!emulate_rf){ + /* We cannot send a message if the task is not running */ + AssertFatal (itti_desc.threads[destination_thread_id].task_state == TASK_STATE_READY, + "Task %s Cannot send message %s (%d) to thread %d, it is not in ready state (%d)!\n", + itti_get_task_name(origin_task_id), + itti_desc.messages_info[message_id].name, + message_id, + destination_thread_id, + itti_desc.threads[destination_thread_id].task_state); + } /* Allocate new list element */ new = (message_list_t *) itti_malloc (origin_task_id, destination_task_id, sizeof(struct message_list_s)); diff --git a/nfapi/oai_integration/nfapi_vnf.c b/nfapi/oai_integration/nfapi_vnf.c index 07c3f1aa91fda58261b36dd8c90df3cbaf12d781..e77aa2b3765abd743c7831b7f4fd5443f569677f 100644 --- a/nfapi/oai_integration/nfapi_vnf.c +++ b/nfapi/oai_integration/nfapi_vnf.c @@ -201,8 +201,8 @@ void oai_create_enb(void) { eNB->CC_id = bodge_counter; eNB->abstraction_flag = 0; eNB->single_thread_flag = 0;//single_thread_flag; - eNB->td = ulsch_decoding_data;//(single_thread_flag==1) ? ulsch_decoding_data_2thread : ulsch_decoding_data; - eNB->te = dlsch_encoding;//(single_thread_flag==1) ? dlsch_encoding_2threads : dlsch_encoding; + eNB->td = ulsch_decoding_data_all;//(single_thread_flag==1) ? ulsch_decoding_data_2thread : ulsch_decoding_data; + eNB->te = dlsch_encoding_all;//(single_thread_flag==1) ? dlsch_encoding_2threads : dlsch_encoding; RC.nb_CC[bodge_counter] = 1; diff --git a/openair1/PHY/INIT/lte_init.c b/openair1/PHY/INIT/lte_init.c index ff36b0a5e2f4597de8275ff59bad2a286d1d27ac..474207a407ae1e54db3dc425e46d7de77b58fdff 100644 --- a/openair1/PHY/INIT/lte_init.c +++ b/openair1/PHY/INIT/lte_init.c @@ -738,6 +738,7 @@ void phy_config_cba_rnti (module_id_t Mod_id,int CC_id,eNB_flag_t eNB_flag, uin } } + int phy_init_lte_eNB(PHY_VARS_eNB *eNB, unsigned char is_secondary_eNB, unsigned char abstraction_flag) diff --git a/openair1/PHY/LTE_TRANSPORT/dci.c b/openair1/PHY/LTE_TRANSPORT/dci.c index ded49d1c73a8c53d0574dfdc8f43aa46a3e001e0..71cc8130b2da66552fc4bf12df5e4a514717bb20 100755 --- a/openair1/PHY/LTE_TRANSPORT/dci.c +++ b/openair1/PHY/LTE_TRANSPORT/dci.c @@ -2276,7 +2276,7 @@ uint8_t generate_dci_top(uint8_t num_pdcch_symbols, /* clear all bits, the above code may generate too much false detections * (not sure about this, to be checked somehow) */ - // memset(e, 0, DCI_BITS_MAX); + //memset(e, 0, DCI_BITS_MAX); e_ptr = e; @@ -2298,6 +2298,7 @@ uint8_t generate_dci_top(uint8_t num_pdcch_symbols, #endif if (dci_alloc[i].firstCCE>=0) { +//printf("generate DCI .%d rnti %d length %d\n", subframe, dci_alloc[i].rnti, dci_alloc[i].dci_length); e_ptr = generate_dci0(dci_alloc[i].dci_pdu, e+(72*dci_alloc[i].firstCCE), dci_alloc[i].dci_length, diff --git a/openair1/PHY/LTE_TRANSPORT/dci_tools.c b/openair1/PHY/LTE_TRANSPORT/dci_tools.c index 4b73de6fcc72864fa1a55d7924819dab46cd5e14..f37e7432c5b1882cd4a80093f31340261a8a5716 100644 --- a/openair1/PHY/LTE_TRANSPORT/dci_tools.c +++ b/openair1/PHY/LTE_TRANSPORT/dci_tools.c @@ -2183,6 +2183,7 @@ void fill_dci_and_dlsch(PHY_VARS_eNB *eNB,int frame,int subframe,eNB_rxtx_proc_t #endif +//printf("DCI %d.%d rnti %d harq %d TBS %d\n", frame, subframe, rel8->rnti, rel8->harq_process, dlsch0_harq->TBS); #if T_TRACER if (dlsch0->active) T(T_ENB_PHY_DLSCH_UE_DCI, T_INT(0), T_INT(frame), T_INT(subframe), diff --git a/openair1/PHY/LTE_TRANSPORT/dlsch_coding.c b/openair1/PHY/LTE_TRANSPORT/dlsch_coding.c index e91665e04eb19e23b7544ba94c1aefb2ab3806fc..49db69cff483840ec5eab89e7d1ed36c44558196 100644 --- a/openair1/PHY/LTE_TRANSPORT/dlsch_coding.c +++ b/openair1/PHY/LTE_TRANSPORT/dlsch_coding.c @@ -42,6 +42,7 @@ #include "UTIL/LOG/vcd_signal_dumper.h" #include "UTIL/LOG/log.h" #include <syscall.h> +#include "targets/RT/USER/rt_wrapper.h" //#define DEBUG_DLSCH_CODING //#define DEBUG_DLSCH_FREE 1 @@ -52,7 +53,13 @@ ((pilots==1)&&(first_pilot==0)&&(((re<3))||((re>5)&&(re<9)))) \ */ #define is_not_pilot(pilots,first_pilot,re) (1) +/*extern void thread_top_init(char *thread_name, + int affinity, + uint64_t runtime, + uint64_t deadline, + uint64_t period);*/ +extern int codingw; void free_eNB_dlsch(LTE_eNB_DLSCH_t *dlsch) { @@ -244,11 +251,15 @@ void clean_eNb_dlsch(LTE_eNB_DLSCH_t *dlsch) } + + int dlsch_encoding_2threads0(te_params *tep) { LTE_eNB_DLSCH_t *dlsch = tep->dlsch; unsigned int G = tep->G; unsigned char harq_pid = tep->harq_pid; + unsigned int total_worker = tep->total_worker; + unsigned int current_worker = tep->current_worker; unsigned short iind; @@ -261,7 +272,7 @@ int dlsch_encoding_2threads0(te_params *tep) { if (dlsch->harq_processes[harq_pid]->round == 0) { // this is a new packet - for (r=0; r<dlsch->harq_processes[harq_pid]->C>>1; r++) { + for (r=(dlsch->harq_processes[harq_pid]->C/(total_worker+1))*current_worker; r<(dlsch->harq_processes[harq_pid]->C/(total_worker+1))*(current_worker+1); r++) { if (r<dlsch->harq_processes[harq_pid]->Cminus) Kr = dlsch->harq_processes[harq_pid]->Kminus; @@ -304,21 +315,34 @@ int dlsch_encoding_2threads0(te_params *tep) { // Fill in the "e"-sequence from 36-212, V8.6 2009-03, p. 16-17 (for each "e") and concatenate the // outputs for each code segment, see Section 5.1.5 p.20 - for (r=0; r<dlsch->harq_processes[harq_pid]->C>>1; r++) { - r_offset += lte_rate_matching_turbo(dlsch->harq_processes[harq_pid]->RTC[r], - G, //G - dlsch->harq_processes[harq_pid]->w[r], - dlsch->harq_processes[harq_pid]->e+r_offset, - dlsch->harq_processes[harq_pid]->C, // C - dlsch->Nsoft, // Nsoft, - dlsch->Mdlharq, - dlsch->Kmimo, - dlsch->harq_processes[harq_pid]->rvidx, - dlsch->harq_processes[harq_pid]->Qm, - dlsch->harq_processes[harq_pid]->Nl, - r, - nb_rb); - // m); // r + for (r=0,r_offset=0; r<(dlsch->harq_processes[harq_pid]->C/(total_worker+1))*(current_worker+1); r++) { + if(r<(dlsch->harq_processes[harq_pid]->C/(total_worker+1))*(current_worker)){ + int Nl=dlsch->harq_processes[harq_pid]->Nl; + int Qm=dlsch->harq_processes[harq_pid]->Qm; + int C = dlsch->harq_processes[harq_pid]->C; + int Gp = G/Nl/Qm; + int GpmodC = Gp%C; + if (r < (C-(GpmodC))) + r_offset += Nl*Qm * (Gp/C); + else + r_offset += Nl*Qm * ((GpmodC==0?0:1) + (Gp/C)); + } + else{ + r_offset += lte_rate_matching_turbo(dlsch->harq_processes[harq_pid]->RTC[r], + G, //G + dlsch->harq_processes[harq_pid]->w[r], + dlsch->harq_processes[harq_pid]->e+r_offset, + dlsch->harq_processes[harq_pid]->C, // C + dlsch->Nsoft, // Nsoft, + dlsch->Mdlharq, + dlsch->Kmimo, + dlsch->harq_processes[harq_pid]->rvidx, + dlsch->harq_processes[harq_pid]->Qm, + dlsch->harq_processes[harq_pid]->Nl, + r, + nb_rb); + // m); // r + } } VCD_SIGNAL_DUMPER_DUMP_FUNCTION_BY_NAME(VCD_SIGNAL_DUMPER_FUNCTIONS_ENB_DLSCH_ENCODING_W, VCD_FUNCTION_OUT); @@ -326,32 +350,46 @@ int dlsch_encoding_2threads0(te_params *tep) { return(0); } + extern int oai_exit; void *te_thread(void *param) { + cpu_set_t cpuset; + CPU_ZERO(&cpuset); + + thread_top_init("te_thread",1,200000,250000,500000); pthread_setname_np( pthread_self(),"te processing"); - LOG_I(PHY,"thread te created id=%ld", syscall(__NR_gettid)); + LOG_I(PHY,"thread te created id=%ld\n", syscall(__NR_gettid)); + - eNB_proc_t *proc = &((te_params *)param)->eNB->proc; + te_params *tep = (te_params *)param; + + //wait_sync("te_thread"); + while (!oai_exit) { + if (wait_on_condition(&tep->mutex_te,&tep->cond_te,&tep->instance_cnt_te,"te thread")<0) break; + if(oai_exit) break; - if (wait_on_condition(&proc->mutex_te,&proc->cond_te,&proc->instance_cnt_te,"te thread")<0) break; + dlsch_encoding_2threads0(tep); - dlsch_encoding_2threads0((te_params*)param); + if (release_thread(&tep->mutex_te,&tep->instance_cnt_te,"te thread")<0) break; - - if (release_thread(&proc->mutex_te,&proc->instance_cnt_te,"te thread")<0) break; - - if (pthread_cond_signal(&proc->cond_te) != 0) { + if (pthread_cond_signal(&tep->cond_te) != 0) { printf("[eNB] ERROR pthread_cond_signal for te thread exit\n"); exit_fun( "ERROR pthread_cond_signal" ); return(NULL); } + /*if(opp_enabled == 1 && te_wakeup_stats0->diff_now>50*3000){ + print_meas_now(te_wakeup_stats0,"coding_wakeup",stderr); + printf("te_thread0 delay for waking up in frame_rx: %d subframe_rx: %d \n",proc->frame_rx,proc->subframe_rx); + }*/ } return(NULL); } + + int dlsch_encoding_2threads(PHY_VARS_eNB *eNB, unsigned char *a, uint8_t num_pdcch_symbols, @@ -360,9 +398,16 @@ int dlsch_encoding_2threads(PHY_VARS_eNB *eNB, uint8_t subframe, time_stats_t *rm_stats, time_stats_t *te_stats, - time_stats_t *i_stats) + time_stats_t *te_wait_stats, + time_stats_t *te_main_stats, + time_stats_t *te_wakeup_stats0, + time_stats_t *te_wakeup_stats1, + time_stats_t *i_stats, + int worker_num) { + //start_meas(&eNB->dlsch_turbo_encoding_preperation_stats); + LTE_DL_FRAME_PARMS *frame_parms = &eNB->frame_parms; eNB_proc_t *proc = &eNB->proc; unsigned int G; @@ -382,19 +427,22 @@ int dlsch_encoding_2threads(PHY_VARS_eNB *eNB, mod_order = dlsch->harq_processes[harq_pid]->Qm; G = get_G(frame_parms,nb_rb,dlsch->harq_processes[harq_pid]->rb_alloc,mod_order,dlsch->harq_processes[harq_pid]->Nl,num_pdcch_symbols,frame,subframe,dlsch->harq_processes[harq_pid]->mimo_mode==TM7?7:0); - if (dlsch->harq_processes[harq_pid]->round == 0) { // this is a new packet + start_meas(&eNB->dlsch_turbo_encoding_preperation_stats); // Add 24-bit crc (polynomial A) to payload crc = crc24a(a, A)>>8; + stop_meas(&eNB->dlsch_turbo_encoding_preperation_stats); a[A>>3] = ((uint8_t*)&crc)[2]; a[1+(A>>3)] = ((uint8_t*)&crc)[1]; a[2+(A>>3)] = ((uint8_t*)&crc)[0]; dlsch->harq_processes[harq_pid]->B = A+24; memcpy(dlsch->harq_processes[harq_pid]->b,a,(A/8)+4); + //stop_meas(&eNB->dlsch_turbo_encoding_preperation_stats); + start_meas(&eNB->dlsch_turbo_encoding_segmentation_stats); if (lte_segmentation(dlsch->harq_processes[harq_pid]->b, dlsch->harq_processes[harq_pid]->c, dlsch->harq_processes[harq_pid]->B, @@ -406,34 +454,41 @@ int dlsch_encoding_2threads(PHY_VARS_eNB *eNB, &dlsch->harq_processes[harq_pid]->F)<0) return(-1); - - - if (proc->instance_cnt_te==0) { - printf("[eNB] TE thread busy\n"); - exit_fun("TE thread busy"); - pthread_mutex_unlock( &proc->mutex_te ); - return(-1); - } - - VCD_SIGNAL_DUMPER_DUMP_FUNCTION_BY_NAME(VCD_SIGNAL_DUMPER_FUNCTIONS_ENB_DLSCH_ENCODING, VCD_FUNCTION_OUT); - ++proc->instance_cnt_te; - - proc->tep.eNB = eNB; - proc->tep.dlsch = dlsch; - proc->tep.G = G; - proc->tep.harq_pid = harq_pid; - - // wakeup worker to do second half segments - if (pthread_cond_signal(&proc->cond_te) != 0) { - printf("[eNB] ERROR pthread_cond_signal for te thread exit\n"); - exit_fun( "ERROR pthread_cond_signal" ); - return (-1); + stop_meas(&eNB->dlsch_turbo_encoding_segmentation_stats); + + start_meas(&eNB->dlsch_turbo_encoding_signal_stats); + for(int i=0;i<worker_num;i++) + { + proc->tep[i].eNB = eNB; + proc->tep[i].dlsch = dlsch; + proc->tep[i].G = G; + proc->tep[i].harq_pid = harq_pid; + proc->tep[i].total_worker = worker_num; + proc->tep[i].current_worker = i; + + pthread_mutex_lock( &proc->tep[i].mutex_te ); + if (proc->tep[i].instance_cnt_te==0) { + printf("[eNB] TE thread busy\n"); + exit_fun("TE thread busy"); + pthread_mutex_unlock( &proc->tep[i].mutex_te ); + return(-1); + } + + ++proc->tep[i].instance_cnt_te; + + // wakeup worker to do segments + if (pthread_cond_signal(&proc->tep[i].cond_te) != 0) { + printf("[eNB] ERROR pthread_cond_signal for te thread %d exit\n",i); + exit_fun( "ERROR pthread_cond_signal" ); + return (-1); + } + + pthread_mutex_unlock( &proc->tep[i].mutex_te ); } - pthread_mutex_unlock( &proc->mutex_te ); - - VCD_SIGNAL_DUMPER_DUMP_FUNCTION_BY_NAME(VCD_SIGNAL_DUMPER_FUNCTIONS_ENB_DLSCH_ENCODING, VCD_FUNCTION_IN); - for (r=dlsch->harq_processes[harq_pid]->C>>1; r<dlsch->harq_processes[harq_pid]->C; r++) { + stop_meas(&eNB->dlsch_turbo_encoding_signal_stats); + start_meas(te_main_stats); + for (r=(dlsch->harq_processes[harq_pid]->C/(worker_num+1))*worker_num; r<dlsch->harq_processes[harq_pid]->C; r++) { if (r<dlsch->harq_processes[harq_pid]->Cminus) Kr = dlsch->harq_processes[harq_pid]->Kminus; @@ -478,15 +533,18 @@ int dlsch_encoding_2threads(PHY_VARS_eNB *eNB, } else { - proc->tep.eNB = eNB; - proc->tep.dlsch = dlsch; - proc->tep.G = G; - - // wakeup worker to do second half segments - if (pthread_cond_signal(&proc->cond_te) != 0) { + for(int i=0;i<worker_num;i++) + { + proc->tep[i].eNB = eNB; + proc->tep[i].dlsch = dlsch; + proc->tep[i].G = G; + proc->tep[i].total_worker = worker_num; + proc->tep[i].current_worker = i; + if (pthread_cond_signal(&proc->tep[i].cond_te) != 0) { printf("[eNB] ERROR pthread_cond_signal for te thread exit\n"); exit_fun( "ERROR pthread_cond_signal" ); return (-1); + } } } @@ -496,7 +554,7 @@ int dlsch_encoding_2threads(PHY_VARS_eNB *eNB, for (r=0,r_offset=0; r<dlsch->harq_processes[harq_pid]->C; r++) { // get information for E for the segments that are handled by the worker thread - if (r<(dlsch->harq_processes[harq_pid]->C>>1)) { + if (r<(dlsch->harq_processes[harq_pid]->C/(worker_num+1))*worker_num) { int Nl=dlsch->harq_processes[harq_pid]->Nl; int Qm=dlsch->harq_processes[harq_pid]->Qm; int C = dlsch->harq_processes[harq_pid]->C; @@ -526,16 +584,138 @@ int dlsch_encoding_2threads(PHY_VARS_eNB *eNB, stop_meas(rm_stats); } } + stop_meas(te_main_stats); - // wait for worker to finish - - wait_on_busy_condition(&proc->mutex_te,&proc->cond_te,&proc->instance_cnt_te,"te thread"); + start_meas(te_wait_stats); + if(worker_num == 1) + { + wait_on_busy_condition(&proc->tep[0].mutex_te,&proc->tep[0].cond_te,&proc->tep[0].instance_cnt_te,"te thread 0"); + } + else if(worker_num == 2) + { + wait_on_busy_condition(&proc->tep[0].mutex_te,&proc->tep[0].cond_te,&proc->tep[0].instance_cnt_te,"te thread 0"); + wait_on_busy_condition(&proc->tep[1].mutex_te,&proc->tep[1].cond_te,&proc->tep[1].instance_cnt_te,"te thread 1"); + } + else + { + wait_on_busy_condition(&proc->tep[0].mutex_te,&proc->tep[0].cond_te,&proc->tep[0].instance_cnt_te,"te thread 0"); + wait_on_busy_condition(&proc->tep[1].mutex_te,&proc->tep[1].cond_te,&proc->tep[1].instance_cnt_te,"te thread 1"); + wait_on_busy_condition(&proc->tep[2].mutex_te,&proc->tep[2].cond_te,&proc->tep[2].instance_cnt_te,"te thread 2"); + } + stop_meas(te_wait_stats); + + /*if(opp_enabled == 1 && te_wait_stats->diff_now>100*3000){ + print_meas_now(te_wait_stats,"coding_wait",stderr); + printf("coding delay in wait on codition in frame_rx: %d \n",proc->frame_rx); + }*/ VCD_SIGNAL_DUMPER_DUMP_FUNCTION_BY_NAME(VCD_SIGNAL_DUMPER_FUNCTIONS_ENB_DLSCH_ENCODING, VCD_FUNCTION_OUT); return(0); } +int dlsch_encoding_all(PHY_VARS_eNB *eNB, + unsigned char *a, + uint8_t num_pdcch_symbols, + LTE_eNB_DLSCH_t *dlsch, + int frame, + uint8_t subframe, + time_stats_t *rm_stats, + time_stats_t *te_stats, + time_stats_t *te_wait_stats, + time_stats_t *te_main_stats, + time_stats_t *te_wakeup_stats0, + time_stats_t *te_wakeup_stats1, + time_stats_t *i_stats) +{ + int encoding_return = 0; + unsigned int L,C,B; + B = dlsch->harq_processes[dlsch->harq_ids[subframe]]->B; + if(B<=6144) + { + L=0; + C=1; + } + else + { + L=24; + C = B/(6144-L); + if((6144-L)*C < B) + { + C = C+1; + } + } + + if(C >= 8 && get_nprocs()>=16 && codingw)//one main three worker + { + encoding_return = + dlsch_encoding_2threads(eNB, + a, + num_pdcch_symbols, + dlsch, + frame, + subframe, + rm_stats, + te_stats, + te_wait_stats, + te_main_stats, + te_wakeup_stats0, + te_wakeup_stats1, + i_stats, + 3); + } + else if(C >= 6 && get_nprocs()>=8 && codingw)//one main two worker + { + encoding_return = + dlsch_encoding_2threads(eNB, + a, + num_pdcch_symbols, + dlsch, + frame, + subframe, + rm_stats, + te_stats, + te_wait_stats, + te_main_stats, + te_wakeup_stats0, + te_wakeup_stats1, + i_stats, + 2); + } + else if(C >= 4 && get_nprocs()>=4 && codingw)//one main one worker + { + encoding_return = + dlsch_encoding_2threads(eNB, + a, + num_pdcch_symbols, + dlsch, + frame, + subframe, + rm_stats, + te_stats, + te_wait_stats, + te_main_stats, + te_wakeup_stats0, + te_wakeup_stats1, + i_stats, + 1); + } + else + { + encoding_return = + dlsch_encoding(eNB, + a, + num_pdcch_symbols, + dlsch, + frame, + subframe, + rm_stats, + te_stats, + i_stats); + } + return encoding_return; +} + int dlsch_encoding(PHY_VARS_eNB *eNB, unsigned char *a, diff --git a/openair1/PHY/LTE_TRANSPORT/dlsch_modulation.c b/openair1/PHY/LTE_TRANSPORT/dlsch_modulation.c index f3a9a05ae5be7f7efe889d6fdafd34b4ad0b4345..3bd174c3b39343c0334e0af42235884937711c4c 100644 --- a/openair1/PHY/LTE_TRANSPORT/dlsch_modulation.c +++ b/openair1/PHY/LTE_TRANSPORT/dlsch_modulation.c @@ -38,7 +38,8 @@ #include "defs.h" #include "UTIL/LOG/vcd_signal_dumper.h" -//#define DEBUG_DLSCH_MODULATION 1 +//#define DEBUG_DLSCH_MODULATION +#define NEW_ALLOC_RE //#define is_not_pilot(pilots,re,nushift,use2ndpilots) ((pilots==0) || ((re!=nushift) && (re!=nushift+6)&&((re!=nushift+3)||(use2ndpilots==1))&&((re!=nushift+9)||(use2ndpilots==1)))?1:0) @@ -120,6 +121,17 @@ void generate_16qam_table(void) } } +void generate_qpsk_table(void) +{ + + int a,index; + + for (a=-1; a<=1; a+=2) { + index = (1+a)/2; + qpsk_table[index] = -a*QPSK; + } +} + @@ -157,6 +169,158 @@ void layer1prec2A(int32_t *antenna0_sample, int32_t *antenna1_sample, uint8_t pr uint32_t FOUR[2]={0,4}; uint32_t TWO[2]={0,2}; +int allocate_REs_in_RB_no_pilots_QPSK_siso(PHY_VARS_eNB* phy_vars_eNB, + int **txdataF, + uint32_t *jj, + uint32_t *jj2, + uint16_t re_offset, + uint32_t symbol_offset, + LTE_DL_eNB_HARQ_t *dlsch0_harq, + LTE_DL_eNB_HARQ_t *dlsch1_harq, + uint8_t pilots, + int16_t amp, + uint8_t precoder_index, + int16_t *qam_table_s0, + int16_t *qam_table_s1, + uint32_t *re_allocated, + uint8_t skip_dc, + uint8_t skip_half, + uint8_t lprime, + uint8_t mprime, + uint8_t Ns, + int *P1_SHIFT, + int *P2_SHIFT) +{ + + LTE_DL_FRAME_PARMS *frame_parms = &phy_vars_eNB->frame_parms; + uint8_t *x0 = dlsch0_harq->e; + uint32_t qpsk_table_offset_re = 0; + uint32_t qpsk_table_offset_im = 0; + + uint32_t tti_offset; + uint8_t re; + uint8_t *x0p; + + if (skip_dc == 0) { + for (x0p=&x0[*jj],tti_offset=symbol_offset+re_offset,re=0; + re<12; + re++,x0p+=2,tti_offset++) { + + qpsk_table_offset_re=x0p[0]; + qpsk_table_offset_im=x0p[1]; + ((int16_t *)&txdataF[0][tti_offset])[0]=qam_table_s0[qpsk_table_offset_re]; + ((int16_t *)&txdataF[0][tti_offset])[1]=qam_table_s0[qpsk_table_offset_im]; + } + } + else { + // 1st half of PRB + for (x0p=&x0[*jj],tti_offset=symbol_offset+re_offset,re=0; + re<6; + re++,x0p+=2,tti_offset++) { + + qpsk_table_offset_re=x0p[0]; + qpsk_table_offset_im=x0p[1]; + ((int16_t *)&txdataF[0][tti_offset])[0]=qam_table_s0[qpsk_table_offset_re]; + ((int16_t *)&txdataF[0][tti_offset])[1]=qam_table_s0[qpsk_table_offset_im]; + } + // 2nd half of PRB + for (tti_offset=symbol_offset+re_offset-frame_parms->ofdm_symbol_size+7; + re<12; + re++,x0p+=2,tti_offset++) { + + qpsk_table_offset_re=x0p[0]; + qpsk_table_offset_im=x0p[1]; + ((int16_t *)&txdataF[0][tti_offset])[0]=qam_table_s0[qpsk_table_offset_re]; + ((int16_t *)&txdataF[0][tti_offset])[1]=qam_table_s0[qpsk_table_offset_im]; + } + } + *re_allocated = *re_allocated + 12; + *jj=*jj + 24; + + return(0); +} + +int allocate_REs_in_RB_pilots_QPSK_siso(PHY_VARS_eNB* phy_vars_eNB, + int **txdataF, + uint32_t *jj, + uint32_t *jj2, + uint16_t re_offset, + uint32_t symbol_offset, + LTE_DL_eNB_HARQ_t *dlsch0_harq, + LTE_DL_eNB_HARQ_t *dlsch1_harq, + uint8_t pilots, + int16_t amp, + uint8_t precoder_index, + int16_t *qam_table_s0, + int16_t *qam_table_s1, + uint32_t *re_allocated, + uint8_t skip_dc, + uint8_t skip_half, + uint8_t lprime, + uint8_t mprime, + uint8_t Ns, + int *P1_SHIFT, + int *P2_SHIFT) +{ + + + LTE_DL_FRAME_PARMS *frame_parms=&phy_vars_eNB->frame_parms; + + uint8_t *x0 = dlsch0_harq->e; + uint32_t qpsk_table_offset_re = 0; + uint32_t qpsk_table_offset_im = 0; + + uint32_t tti_offset; + uint8_t re; + uint8_t *x0p; + + + if (skip_dc == 0) { + // printf("pilots: P1_SHIFT[0] %d\n",P1_SHIFT[0]); + for (x0p=&x0[*jj],tti_offset=symbol_offset+re_offset+P1_SHIFT[0],re=P1_SHIFT[0]; + re<12; + x0p+=2) { + + qpsk_table_offset_re=x0p[0]; + qpsk_table_offset_im=x0p[1]; + ((int16_t *)&txdataF[0][tti_offset])[0]=qam_table_s0[qpsk_table_offset_re]; + ((int16_t *)&txdataF[0][tti_offset])[1]=qam_table_s0[qpsk_table_offset_im]; + // printf("pilots: re %d, tti_offset %d, P1_SHIFT %d\n",re,tti_offset,P1_SHIFT[re+1]); + tti_offset+=P1_SHIFT[re+1]; + re+=P1_SHIFT[re+1]; + } + } + else { + for (x0p=&x0[*jj],tti_offset=symbol_offset+re_offset+P1_SHIFT[0],re=P1_SHIFT[0]; + re<6; + x0p+=2) { + + qpsk_table_offset_re+=x0p[0]; + qpsk_table_offset_im+=x0p[1]; + ((int16_t *)&txdataF[0][tti_offset])[0]=qam_table_s0[qpsk_table_offset_re]; + ((int16_t *)&txdataF[0][tti_offset])[1]=qam_table_s0[qpsk_table_offset_im]; + tti_offset+=P1_SHIFT[re+1]; + re+=P1_SHIFT[re+1]; + } + + for (tti_offset=symbol_offset+re_offset-frame_parms->ofdm_symbol_size+6+P1_SHIFT[6]; + re<12; + x0p+=2) { + + qpsk_table_offset_re+=x0p[0]; + qpsk_table_offset_im+=x0p[1]; + ((int16_t *)&txdataF[0][tti_offset])[0]=qam_table_s0[qpsk_table_offset_re]; + ((int16_t *)&txdataF[0][tti_offset])[1]=qam_table_s0[qpsk_table_offset_im]; + tti_offset+=P1_SHIFT[re+1]; + re+=P1_SHIFT[re+1]; + } + } + *re_allocated = *re_allocated + 10; + *jj=*jj + 20; + + return(0); +} + int allocate_REs_in_RB_no_pilots_16QAM_siso(PHY_VARS_eNB* phy_vars_eNB, int **txdataF, uint32_t *jj, @@ -2011,11 +2175,11 @@ int dlsch_modulation(PHY_VARS_eNB* phy_vars_eNB, uint8_t mod_order0 = 0; uint8_t mod_order1 = 0; int16_t amp_rho_a, amp_rho_b; - int16_t qam16_table_a0[4],qam64_table_a0[8],qam16_table_b0[4],qam64_table_b0[8]; - int16_t qam16_table_a1[4],qam64_table_a1[8],qam16_table_b1[4],qam64_table_b1[8]; + int16_t qam16_table_a0[4],qam64_table_a0[8],qam16_table_b0[4],qam64_table_b0[8];//qpsk_table_a0[2],qpsk_table_b0[2] + int16_t qam16_table_a1[4],qam64_table_a1[8],qam16_table_b1[4],qam64_table_b1[8];//qpsk_table_a1[2],qpsk_table_b1[2] int16_t *qam_table_s0=NULL,*qam_table_s1=NULL; -#if 0 +#ifdef NEW_ALLOC_RE /* TODO: variable to be removed? */ int (*allocate_REs)(PHY_VARS_eNB*, int **, @@ -2125,8 +2289,15 @@ int dlsch_modulation(PHY_VARS_eNB* phy_vars_eNB, amp_rho_b = (int16_t)(((int32_t)amp*dlsch1->sqrt_rho_b)>>13); } - - if (mod_order0 == 4) + /*if(mod_order0 == 2) + { + for(i=0;i<2;i++) + { + qpsk_table_a0[i] = (int16_t)(((int32_t)qpsk_table[i]*amp_rho_a)>>15); + qpsk_table_b0[i] = (int16_t)(((int32_t)qpsk_table[i]*amp_rho_b)>>15); + } + } + else*/ if (mod_order0 == 4) for (i=0;i<4; i++) { qam16_table_a0[i] = (int16_t)(((int32_t)qam16_table[i]*amp_rho_a)>>15); qam16_table_b0[i] = (int16_t)(((int32_t)qam16_table[i]*amp_rho_b)>>15); @@ -2137,7 +2308,14 @@ int dlsch_modulation(PHY_VARS_eNB* phy_vars_eNB, qam64_table_b0[i] = (int16_t)(((int32_t)qam64_table[i]*amp_rho_b)>>15); } - if (mod_order1 == 4) + /*if (mod_order1 == 2) + { + for (i=0; i<2; i++) { + qpsk_table_a1[i] = (int16_t)(((int32_t)qpsk_table[i]*amp_rho_a)>>15); + qpsk_table_b1[i] = (int16_t)(((int32_t)qpsk_table[i]*amp_rho_b)>>15); + } + } + else*/ if (mod_order1 == 4) for (i=0; i<4; i++) { qam16_table_a1[i] = (int16_t)(((int32_t)qam16_table[i]*amp_rho_a)>>15); qam16_table_b1[i] = (int16_t)(((int32_t)qam16_table[i]*amp_rho_b)>>15); @@ -2259,7 +2437,7 @@ int dlsch_modulation(PHY_VARS_eNB* phy_vars_eNB, re_offset = frame_parms->first_carrier_offset; symbol_offset = (uint32_t)frame_parms->ofdm_symbol_size*(l+(subframe_offset*nsymb)); -#if 0 +#ifdef NEW_ALLOC_RE /* TODO: remove this code? */ allocate_REs = allocate_REs_in_RB; #endif @@ -2267,11 +2445,30 @@ int dlsch_modulation(PHY_VARS_eNB* phy_vars_eNB, switch (mod_order0) { case 2: qam_table_s0 = NULL; + /*if (pilots) { + qam_table_s0 = qpsk_table_b0; +#ifdef NEW_ALLOC_RE + // TODO: remove this code? // + allocate_REs = (dlsch0->harq_processes[harq_pid]->mimo_mode == SISO) ? + allocate_REs_in_RB_pilots_QPSK_siso : + allocate_REs_in_RB; +#endif + } + else { + qam_table_s0 = qpsk_table_a0; +#ifdef NEW_ALLOC_RE + // TODO: remove this code? // + allocate_REs = (dlsch0->harq_processes[harq_pid]->mimo_mode == SISO) ? + allocate_REs_in_RB_no_pilots_QPSK_siso : + allocate_REs_in_RB; +#endif + + }*/ break; case 4: if (pilots) { qam_table_s0 = qam16_table_b0; -#if 0 +#ifdef NEW_ALLOC_RE /* TODO: remove this code? */ allocate_REs = (dlsch0->harq_processes[harq_pid]->mimo_mode == SISO) ? allocate_REs_in_RB_pilots_16QAM_siso : @@ -2280,7 +2477,7 @@ int dlsch_modulation(PHY_VARS_eNB* phy_vars_eNB, } else { qam_table_s0 = qam16_table_a0; -#if 0 +#ifdef NEW_ALLOC_RE /* TODO: remove this code? */ allocate_REs = (dlsch0->harq_processes[harq_pid]->mimo_mode == SISO) ? allocate_REs_in_RB_no_pilots_16QAM_siso : @@ -2293,7 +2490,7 @@ int dlsch_modulation(PHY_VARS_eNB* phy_vars_eNB, case 6: if (pilots) { qam_table_s0 = qam64_table_b0; -#if 0 +#ifdef NEW_ALLOC_RE /* TODO: remove this code? */ allocate_REs = (dlsch0->harq_processes[harq_pid]->mimo_mode == SISO) ? allocate_REs_in_RB_pilots_64QAM_siso : @@ -2302,7 +2499,7 @@ int dlsch_modulation(PHY_VARS_eNB* phy_vars_eNB, } else { qam_table_s0 = qam64_table_a0; -#if 0 +#ifdef NEW_ALLOC_RE /* TODO: remove this code? */ allocate_REs = (dlsch0->harq_processes[harq_pid]->mimo_mode == SISO) ? allocate_REs_in_RB_no_pilots_64QAM_siso : @@ -2316,10 +2513,16 @@ int dlsch_modulation(PHY_VARS_eNB* phy_vars_eNB, switch (mod_order1) { case 2: qam_table_s1 = NULL; -#if 0 +#ifdef NEW_ALLOC_RE /* TODO: remove this code? */ allocate_REs = allocate_REs_in_RB; #endif + /*if (pilots) { + qam_table_s1 = qpsk_table_b1; + } + else { + qam_table_s1 = qpsk_table_a1; + }*/ break; case 4: if (pilots) { diff --git a/openair1/PHY/LTE_TRANSPORT/extern.h b/openair1/PHY/LTE_TRANSPORT/extern.h index e791bec9a0c27f45fd047acc46f510d590684f69..7ab9031010ba0fa16341899e31a81e17f7b69210 100644 --- a/openair1/PHY/LTE_TRANSPORT/extern.h +++ b/openair1/PHY/LTE_TRANSPORT/extern.h @@ -30,7 +30,7 @@ extern short *ul_ref_sigs_rx[30][2][33]; extern unsigned short dftsizes[33]; extern unsigned short ref_primes[33]; -extern int qam64_table[8],qam16_table[4]; +extern int qam64_table[8],qam16_table[4],qpsk_table[2]; extern unsigned char cs_ri_normal[4]; extern unsigned char cs_ri_extended[4]; diff --git a/openair1/PHY/LTE_TRANSPORT/proto.h b/openair1/PHY/LTE_TRANSPORT/proto.h index 0e1a60c0b9ac1bc5105c73ebfe5b253769eb9d94..e706bbbaeb2658fba754ce367e9f248d09f0e490 100644 --- a/openair1/PHY/LTE_TRANSPORT/proto.h +++ b/openair1/PHY/LTE_TRANSPORT/proto.h @@ -127,6 +127,20 @@ int32_t dlsch_encoding(PHY_VARS_eNB *eNB, time_stats_t *rm_stats, time_stats_t *te_stats, time_stats_t *i_stats); + +int32_t dlsch_encoding_all(PHY_VARS_eNB *eNB, + uint8_t *a, + uint8_t num_pdcch_symbols, + LTE_eNB_DLSCH_t *dlsch, + int frame, + uint8_t subframe, + time_stats_t *rm_stats, + time_stats_t *te_stats, + time_stats_t *te_wait_stats, + time_stats_t *te_main_stats, + time_stats_t *te_wakeup_stats0, + time_stats_t *te_wakeup_stats1, + time_stats_t *i_stats); int32_t dlsch_encoding_SIC(PHY_VARS_UE *ue, uint8_t *a, @@ -171,7 +185,12 @@ int32_t dlsch_encoding_2threads(PHY_VARS_eNB *eNB, uint8_t subframe, time_stats_t *rm_stats, time_stats_t *te_stats, - time_stats_t *i_stats); + time_stats_t *te_wait_stats, + time_stats_t *te_main_stats, + time_stats_t *te_wakeup_stats0, + time_stats_t *te_wakeup_stats1, + time_stats_t *i_stats, + int worker_num); void dlsch_encoding_emul(PHY_VARS_eNB *phy_vars_eNB, uint8_t *DLSCH_pdu, @@ -1488,6 +1507,7 @@ uint8_t generate_dci_top_emul(PHY_VARS_eNB *phy_vars_eNB, void generate_64qam_table(void); void generate_16qam_table(void); +void generate_qpsk_table(void); uint16_t extract_crc(uint8_t *dci,uint8_t DCI_LENGTH); @@ -1897,6 +1917,11 @@ unsigned int ulsch_decoding(PHY_VARS_eNB *phy_vars_eNB, uint8_t Nbundled, uint8_t llr8_flag); +int ulsch_decoding_data_all(PHY_VARS_eNB *eNB, + int UE_id, + int harq_pid, + int llr8_flag); + /*! \brief Decoding of ULSCH data component from 36-212. This one spawns 1 worker thread in parallel,half of the segments in each thread. @param phy_vars_eNB Pointer to eNB top-level descriptor diff --git a/openair1/PHY/LTE_TRANSPORT/ulsch_decoding.c b/openair1/PHY/LTE_TRANSPORT/ulsch_decoding.c index 0d9bf8fd7611b89703fcfed195490a30294570ed..2f4cfd23442a022d694606f8d199a27c1eb43306 100644 --- a/openair1/PHY/LTE_TRANSPORT/ulsch_decoding.c +++ b/openair1/PHY/LTE_TRANSPORT/ulsch_decoding.c @@ -31,7 +31,7 @@ */ //#include "defs.h" - +#include <syscall.h> #include "PHY/defs.h" #include "PHY/extern.h" #include "PHY/CODING/extern.h" @@ -46,6 +46,9 @@ #include "UTIL/LOG/vcd_signal_dumper.h" //#define DEBUG_ULSCH_DECODING +#include "targets/RT/USER/rt_wrapper.h" + +extern int codingw; void free_eNB_ulsch(LTE_eNB_ULSCH_t *ulsch) { @@ -221,8 +224,6 @@ uint8_t extract_cqi_crc(uint8_t *cqi,uint8_t CQI_LENGTH) - - int ulsch_decoding_data_2thread0(td_params* tdp) { PHY_VARS_eNB *eNB = tdp->eNB; @@ -414,13 +415,20 @@ int ulsch_decoding_data_2thread0(td_params* tdp) { extern int oai_exit; void *td_thread(void *param) { - pthread_setname_np( pthread_self(), "td processing"); PHY_VARS_eNB *eNB = ((td_params*)param)->eNB; eNB_proc_t *proc = &eNB->proc; + cpu_set_t cpuset; + CPU_ZERO(&cpuset); + + thread_top_init("td_thread",1,200000,250000,500000); + pthread_setname_np( pthread_self(),"td processing"); + LOG_I(PHY,"thread td created id=%ld\n", syscall(__NR_gettid)); + //wait_sync("td_thread"); while (!oai_exit) { if (wait_on_condition(&proc->mutex_td,&proc->cond_td,&proc->instance_cnt_td,"td thread")<0) break; + if(oai_exit) break; ((td_params*)param)->ret = ulsch_decoding_data_2thread0((td_params*)param); @@ -623,6 +631,7 @@ int ulsch_decoding_data_2thread(PHY_VARS_eNB *eNB,int UE_id,int harq_pid,int llr break; } stop_meas(&eNB->ulsch_turbo_decoding_stats); + //printf("/////////////////////////////////////////**************************loop for %d time in ulsch_decoding main\n",r); } // wait for worker to finish @@ -782,6 +791,20 @@ int ulsch_decoding_data(PHY_VARS_eNB *eNB,int UE_id,int harq_pid,int llr8_flag) return(ret); } +int ulsch_decoding_data_all(PHY_VARS_eNB *eNB,int UE_id,int harq_pid,int llr8_flag) +{ + int ret = 0; + /*if(codingw) + { + ret = ulsch_decoding_data_2thread(eNB,UE_id,harq_pid,llr8_flag); + } + else*/ + { + ret = ulsch_decoding_data(eNB,UE_id,harq_pid,llr8_flag); + } + return ret; +} + static inline unsigned int lte_gold_unscram(unsigned int *x1, unsigned int *x2, unsigned char reset) __attribute__((always_inline)); static inline unsigned int lte_gold_unscram(unsigned int *x1, unsigned int *x2, unsigned char reset) { diff --git a/openair1/PHY/LTE_TRANSPORT/vars.h b/openair1/PHY/LTE_TRANSPORT/vars.h index 4b6fa920ea76b863d1e6d2b0f5b8dae19c9c9ea1..41ee40b64af7a3df0b484706e67d08f8da45a26f 100644 --- a/openair1/PHY/LTE_TRANSPORT/vars.h +++ b/openair1/PHY/LTE_TRANSPORT/vars.h @@ -62,7 +62,7 @@ unsigned char ue_power_offsets[25] = {14,11,9,8,7,6,6,5,4,4,4,3,3,3,2,2,2,1,1,1, short conjugate[8]__attribute__((aligned(16))) = {-1,1,-1,1,-1,1,-1,1}; short conjugate2[8]__attribute__((aligned(16))) = {1,-1,1,-1,1,-1,1,-1}; -int qam64_table[8],qam16_table[4]; +int qam64_table[8],qam16_table[4],qpsk_table[2]; unsigned char cs_ri_normal[4] = {1,4,7,10}; unsigned char cs_ri_extended[4] = {0,3,5,8}; diff --git a/openair1/PHY/TOOLS/time_meas.c b/openair1/PHY/TOOLS/time_meas.c index d80832e243c9b0af7cea98e1d3da9fca0ff01b67..cb7775904099a769091795d62d23cc1b1d950b25 100644 --- a/openair1/PHY/TOOLS/time_meas.c +++ b/openair1/PHY/TOOLS/time_meas.c @@ -53,7 +53,7 @@ void print_meas_now(time_stats_t *ts, const char* name, FILE* file_name){ if (ts->trials>0) { //fprintf(file_name,"Name %25s: Processing %15.3f ms for SF %d, diff_now %15.3f \n", name,(ts->diff_now/(cpu_freq_GHz*1000000.0)),subframe,ts->diff_now); - fprintf(file_name,"%15.3f ms, diff_now %15.3f \n",(ts->diff_now/(cpu_freq_GHz*1000000.0)),(double)ts->diff_now); + fprintf(file_name,"%15.3f us, diff_now %15.3f \n",(ts->diff_now/(cpu_freq_GHz*1000.0)),(double)ts->diff_now); } } diff --git a/openair1/PHY/defs.h b/openair1/PHY/defs.h index 9ca33c9404d610cc29d4c1eac6f9210147db6790..1d2b622e7e748db1610f3bf96a30f26177df7c8c 100644 --- a/openair1/PHY/defs.h +++ b/openair1/PHY/defs.h @@ -270,6 +270,8 @@ typedef struct { pthread_mutex_t mutex_rxtx; /// scheduling parameters for RXn-TXnp4 thread struct sched_param sched_param_rxtx; + /// pipeline ready state + int pipe_ready; } eNB_rxtx_proc_t; typedef struct { @@ -285,6 +287,20 @@ typedef struct { LTE_eNB_DLSCH_t *dlsch; int G; int harq_pid; + int total_worker; + int current_worker; + /// \internal This variable is protected by \ref mutex_te. + int instance_cnt_te; + /// pthread attributes for parallel turbo-encoder thread + pthread_attr_t attr_te; + /// scheduling parameters for parallel turbo-encoder thread + struct sched_param sched_param_te; + /// pthread structure for parallel turbo-encoder thread + pthread_t pthread_te; + /// condition variable for parallel turbo-encoder thread + pthread_cond_t cond_te; + /// mutex for parallel turbo-encoder thread + pthread_mutex_t mutex_te; } te_params; typedef struct RU_proc_t_s { @@ -321,6 +337,7 @@ typedef struct RU_proc_t_s { /// \brief Instance count for FH processing thread. /// \internal This variable is protected by \ref mutex_FH. int instance_cnt_FH; + int instance_cnt_FH1; /// \internal This variable is protected by \ref mutex_prach. int instance_cnt_prach; #ifdef Rel14 @@ -336,10 +353,13 @@ typedef struct RU_proc_t_s { int instance_cnt_asynch_rxtx; /// \internal This variable is protected by \ref mutex_fep int instance_cnt_fep; - /// \internal This variable is protected by \ref mutex_fep + /// \internal This variable is protected by \ref mutex_feptx int instance_cnt_feptx; + /// This varible is protected by \ref mutex_emulatedRF + int instance_cnt_emulateRF; /// pthread structure for RU FH processing thread pthread_t pthread_FH; + pthread_t pthread_FH1; /// pthread structure for RU prach processing thread pthread_t pthread_prach; #ifdef Rel14 @@ -350,8 +370,10 @@ typedef struct RU_proc_t_s { pthread_t pthread_synch; /// pthread struct for RU RX FEP worker thread pthread_t pthread_fep; - /// pthread struct for RU RX FEPTX worker thread + /// pthread struct for RU TX FEP worker thread pthread_t pthread_feptx; + /// pthread struct for emulated RF + pthread_t pthread_emulateRF; /// pthread structure for asychronous RX/TX processing thread pthread_t pthread_asynch_rxtx; /// flag to indicate first RX acquisition @@ -360,6 +382,7 @@ typedef struct RU_proc_t_s { int first_tx; /// pthread attributes for RU FH processing thread pthread_attr_t attr_FH; + pthread_attr_t attr_FH1; /// pthread attributes for RU prach pthread_attr_t attr_prach; #ifdef Rel14 @@ -374,8 +397,11 @@ typedef struct RU_proc_t_s { pthread_attr_t attr_fep; /// pthread attributes for worker feptx thread pthread_attr_t attr_feptx; + /// pthread attributes for emulated RF + pthread_attr_t attr_emulateRF; /// scheduling parameters for RU FH thread struct sched_param sched_param_FH; + struct sched_param sched_param_FH1; /// scheduling parameters for RU prach thread struct sched_param sched_param_prach; #ifdef Rel14 @@ -388,6 +414,7 @@ typedef struct RU_proc_t_s { struct sched_param sched_param_asynch_rxtx; /// condition variable for RU FH thread pthread_cond_t cond_FH; + pthread_cond_t cond_FH1; /// condition variable for RU prach thread pthread_cond_t cond_prach; #ifdef Rel14 @@ -398,14 +425,17 @@ typedef struct RU_proc_t_s { pthread_cond_t cond_synch; /// condition variable for asynch RX/TX thread pthread_cond_t cond_asynch_rxtx; - /// condition varaible for RU RX FEP thread + /// condition varible for RU RX FEP thread pthread_cond_t cond_fep; - /// condition varaible for RU RX FEPTX thread + /// condition varible for RU TX FEP thread pthread_cond_t cond_feptx; + /// condition varible for emulated RF + pthread_cond_t cond_emulateRF; /// condition variable for eNB signal pthread_cond_t cond_eNBs; /// mutex for RU FH pthread_mutex_t mutex_FH; + pthread_mutex_t mutex_FH1; /// mutex for RU prach pthread_mutex_t mutex_prach; #ifdef Rel14 @@ -422,12 +452,17 @@ typedef struct RU_proc_t_s { pthread_mutex_t mutex_fep; /// mutex for fep TX worker thread pthread_mutex_t mutex_feptx; + /// mutex for emulated RF thread + pthread_mutex_t mutex_emulateRF; /// symbol mask for IF4p5 reception per subframe uint32_t symbol_mask[10]; /// number of slave threads int num_slaves; /// array of pointers to slaves struct RU_proc_t_s **slave_proc; + /// pipeline ready state + int ru_rx_ready; + int ru_tx_ready; } RU_proc_t; /// Context data structure for eNB subframe processing @@ -461,7 +496,7 @@ typedef struct eNB_proc_t_s { /// \internal This variable is protected by \ref mutex_td. int instance_cnt_td; /// \internal This variable is protected by \ref mutex_te. - int instance_cnt_te; + //int instance_cnt_te[3]; /// \internal This variable is protected by \ref mutex_prach. int instance_cnt_prach; #ifdef Rel14 @@ -483,7 +518,7 @@ typedef struct eNB_proc_t_s { /// pthread attributes for parallel turbo-decoder thread pthread_attr_t attr_td; /// pthread attributes for parallel turbo-encoder thread - pthread_attr_t attr_te; + //pthread_attr_t attr_te[3]; /// pthread attributes for single eNB processing thread pthread_attr_t attr_single; /// pthread attributes for prach processing thread @@ -497,7 +532,7 @@ typedef struct eNB_proc_t_s { /// scheduling parameters for parallel turbo-decoder thread struct sched_param sched_param_td; /// scheduling parameters for parallel turbo-encoder thread - struct sched_param sched_param_te; + //struct sched_param sched_param_te[3]; /// scheduling parameters for single eNB thread struct sched_param sched_param_single; /// scheduling parameters for prach thread @@ -511,7 +546,7 @@ typedef struct eNB_proc_t_s { /// pthread structure for parallel turbo-decoder thread pthread_t pthread_td; /// pthread structure for parallel turbo-encoder thread - pthread_t pthread_te; + //pthread_t pthread_te[3]; /// pthread structure for PRACH thread pthread_t pthread_prach; #ifdef Rel14 @@ -521,7 +556,7 @@ typedef struct eNB_proc_t_s { /// condition variable for parallel turbo-decoder thread pthread_cond_t cond_td; /// condition variable for parallel turbo-encoder thread - pthread_cond_t cond_te; + //pthread_cond_t cond_te[3]; /// condition variable for PRACH processing thread; pthread_cond_t cond_prach; #ifdef Rel14 @@ -533,7 +568,7 @@ typedef struct eNB_proc_t_s { /// mutex for parallel turbo-decoder thread pthread_mutex_t mutex_td; /// mutex for parallel turbo-encoder thread - pthread_mutex_t mutex_te; + //pthread_mutex_t mutex_te[3]; /// mutex for PRACH thread pthread_mutex_t mutex_prach; #ifdef Rel14 @@ -559,9 +594,13 @@ typedef struct eNB_proc_t_s { /// parameters for turbo-decoding worker thread td_params tdp; /// parameters for turbo-encoding worker thread - te_params tep; + te_params tep[3]; /// set of scheduling variables RXn-TXnp4 threads eNB_rxtx_proc_t proc_rxtx[2]; + /// stats thread pthread descriptor + pthread_t process_stats_thread; + /// for waking up tx procedure + RU_proc_t *ru_proc; } eNB_proc_t; @@ -663,6 +702,7 @@ typedef enum { REMOTE_IF4p5 =3, REMOTE_IF1pp =4, MAX_RU_IF_TYPES =5 + //EMULATE_RF =6 } RU_if_south_t; typedef struct RU_t_s{ @@ -757,11 +797,19 @@ typedef struct RU_t_s{ void (*wakeup_prach_eNB_br)(struct PHY_VARS_eNB_s *eNB,struct RU_t_s *ru,int frame,int subframe); #endif /// function pointer to eNB entry routine - void (*eNB_top)(struct PHY_VARS_eNB_s *eNB, int frame_rx, int subframe_rx, char *string); + void (*eNB_top)(struct PHY_VARS_eNB_s *eNB, int frame_rx, int subframe_rx, char *string,struct RU_t_s *ru); /// Timing statistics time_stats_t ofdm_demod_stats; /// Timing statistics (TX) time_stats_t ofdm_mod_stats; + /// Timing wait statistics + time_stats_t ofdm_demod_wait_stats; + /// Timing wakeup statistics + time_stats_t ofdm_demod_wakeup_stats; + /// Timing wait statistics (TX) + time_stats_t ofdm_mod_wait_stats; + /// Timing wakeup statistics (TX) + time_stats_t ofdm_mod_wakeup_stats; /// Timing statistics (RX Fronthaul + Compression) time_stats_t rx_fhaul; /// Timing statistics (TX Fronthaul + Compression) @@ -955,7 +1003,7 @@ typedef struct PHY_VARS_eNB_s { eth_params_t eth_params; int rx_total_gain_dB; int (*td)(struct PHY_VARS_eNB_s *eNB,int UE_id,int harq_pid,int llr8_flag); - int (*te)(struct PHY_VARS_eNB_s *,uint8_t *,uint8_t,LTE_eNB_DLSCH_t *,int,uint8_t,time_stats_t *,time_stats_t *,time_stats_t *); + int (*te)(struct PHY_VARS_eNB_s *,uint8_t *,uint8_t,LTE_eNB_DLSCH_t *,int,uint8_t,time_stats_t *,time_stats_t *,time_stats_t *,time_stats_t *,time_stats_t *,time_stats_t *,time_stats_t *); int (*start_if)(struct RU_t_s *ru,struct PHY_VARS_eNB_s *eNB); uint8_t local_flag; LTE_DL_FRAME_PARMS frame_parms; @@ -1134,7 +1182,14 @@ typedef struct PHY_VARS_eNB_s { time_stats_t dlsch_modulation_stats; time_stats_t dlsch_scrambling_stats; time_stats_t dlsch_rate_matching_stats; + time_stats_t dlsch_turbo_encoding_preperation_stats; + time_stats_t dlsch_turbo_encoding_segmentation_stats; time_stats_t dlsch_turbo_encoding_stats; + time_stats_t dlsch_turbo_encoding_waiting_stats; + time_stats_t dlsch_turbo_encoding_signal_stats; + time_stats_t dlsch_turbo_encoding_main_stats; + time_stats_t dlsch_turbo_encoding_wakeup_stats0; + time_stats_t dlsch_turbo_encoding_wakeup_stats1; time_stats_t dlsch_interleaving_stats; time_stats_t rx_dft_stats; diff --git a/openair1/PHY/impl_defs_top.h b/openair1/PHY/impl_defs_top.h index 60d3fec5f4da660076ae16cc68fdf8e2c145aff3..2697dc55789078aadc22ed4d701156381ef15a55 100644 --- a/openair1/PHY/impl_defs_top.h +++ b/openair1/PHY/impl_defs_top.h @@ -203,6 +203,9 @@ // QAM amplitude definitions +/// Amplitude for QPSK (\f$ 2^15 \times 1/\sqrt{2}\f$) +#define QPSK 23170 + /// First Amplitude for QAM16 (\f$ 2^{15} \times 2/\sqrt{10}\f$) #define QAM16_n1 20724 /// Second Amplitude for QAM16 (\f$ 2^{15} \times 1/\sqrt{10}\f$) diff --git a/openair1/SCHED/fapi_l1.c b/openair1/SCHED/fapi_l1.c index df01fad13622307d0c14c65fbd302744d23453e2..1f52f189439c870435362136912552477557ff8a 100644 --- a/openair1/SCHED/fapi_l1.c +++ b/openair1/SCHED/fapi_l1.c @@ -201,8 +201,8 @@ void handle_nfapi_dlsch_pdu(PHY_VARS_eNB *eNB,int frame,int subframe,eNB_rxtx_pr dlsch0_harq->pdsch_start = eNB->pdcch_vars[subframe & 1].num_pdcch_symbols; if (dlsch0_harq->round==0) { //get pointer to SDU if this a new SDU - AssertFatal(sdu!=NULL,"NFAPI: SFN/SF:%04d%d proc:TX:[frame %d subframe %d]: programming dlsch for round 0, rnti %x, UE_id %d, harq_pid %d : sdu is null for pdu_index %d dlsch0_harq[round:%d SFN/SF:%d%d pdu:%p mcs:%d ndi:%d pdschstart:%d]\n", - frame,subframe, + AssertFatal(sdu!=NULL,"sdu==%d, NFAPI: SFN/SF:%04d%d proc:TX:[frame %d subframe %d]: programming dlsch for round 0, rnti %x, UE_id %d, harq_pid %d : sdu is null for pdu_index %d dlsch0_harq[round:%d SFN/SF:%d%d pdu:%p mcs:%d ndi:%d pdschstart:%d]\n", + *sdu,frame,subframe, proc->frame_tx,proc->subframe_tx,rel8->rnti,UE_id,harq_pid, dl_config_pdu->dlsch_pdu.dlsch_pdu_rel8.pdu_index,dlsch0_harq->round,dlsch0_harq->frame,dlsch0_harq->subframe,dlsch0_harq->pdu,dlsch0_harq->mcs,dlsch0_harq->ndi,dlsch0_harq->pdsch_start); if (rel8->rnti != 0xFFFF) LOG_D(PHY,"NFAPI: SFN/SF:%04d%d proc:TX:[frame %d, subframe %d]: programming dlsch for round 0, rnti %x, UE_id %d, harq_pid %d\n", diff --git a/openair1/SCHED/phy_procedures_lte_eNb.c b/openair1/SCHED/phy_procedures_lte_eNb.c index 780d3a1a6f830591cab59c0bb975c59f50d41fec..ecdf8e5756d66d7b24575578a682d6056bb21001 100644 --- a/openair1/SCHED/phy_procedures_lte_eNb.c +++ b/openair1/SCHED/phy_procedures_lte_eNb.c @@ -346,14 +346,24 @@ void pdsch_procedures(PHY_VARS_eNB *eNB, start_meas(&eNB->dlsch_encoding_stats); eNB->te(eNB, - dlsch_harq->pdu, - dlsch_harq->pdsch_start, - dlsch, - frame,subframe, - &eNB->dlsch_rate_matching_stats, - &eNB->dlsch_turbo_encoding_stats, - &eNB->dlsch_interleaving_stats); + dlsch_harq->pdu, + dlsch_harq->pdsch_start, + dlsch, + frame, + subframe, + &eNB->dlsch_rate_matching_stats, + &eNB->dlsch_turbo_encoding_stats, + &eNB->dlsch_turbo_encoding_waiting_stats, + &eNB->dlsch_turbo_encoding_main_stats, + &eNB->dlsch_turbo_encoding_wakeup_stats0, + &eNB->dlsch_turbo_encoding_wakeup_stats1, + &eNB->dlsch_interleaving_stats); stop_meas(&eNB->dlsch_encoding_stats); + //////////////////////////////////////////////////******************************************* + if(eNB->dlsch_encoding_stats.diff_now>500*3000 && opp_enabled == 1) + { + print_meas_now(&eNB->dlsch_encoding_stats,"total coding",stderr); + } // 36-211 start_meas(&eNB->dlsch_scrambling_stats); dlsch_scrambling(fp, @@ -546,6 +556,7 @@ void phy_procedures_eNB_TX(PHY_VARS_eNB *eNB, else { // generate pdsch + pdsch_procedures(eNB, proc, harq_pid, @@ -1359,37 +1370,63 @@ extern int oai_exit; extern void *td_thread(void*); -void init_td_thread(PHY_VARS_eNB *eNB,pthread_attr_t *attr_td) { +void init_td_thread(PHY_VARS_eNB *eNB) { eNB_proc_t *proc = &eNB->proc; proc->tdp.eNB = eNB; proc->instance_cnt_td = -1; - + + pthread_attr_init( &proc->attr_td); pthread_mutex_init( &proc->mutex_td, NULL); pthread_cond_init( &proc->cond_td, NULL); + + pthread_create(&proc->pthread_td, &proc->attr_td, td_thread, (void*)&proc->tdp); - pthread_create(&proc->pthread_td, attr_td, td_thread, (void*)&proc->tdp); +} +void kill_td_thread(PHY_VARS_eNB *eNB) { + eNB_proc_t *proc = &eNB->proc; + proc->instance_cnt_td = 0; + pthread_cond_signal(&proc->cond_td); + + pthread_join(proc->pthread_td, NULL); + pthread_mutex_destroy( &proc->mutex_td ); + pthread_cond_destroy( &proc->cond_td ); } extern void *te_thread(void*); -void init_te_thread(PHY_VARS_eNB *eNB,pthread_attr_t *attr_te) { +void init_te_thread(PHY_VARS_eNB *eNB) { eNB_proc_t *proc = &eNB->proc; - proc->tep.eNB = eNB; - proc->instance_cnt_te = -1; + for(int i=0; i<3 ;i++){ + proc->tep[i].eNB = eNB; + proc->tep[i].instance_cnt_te = -1; + + pthread_mutex_init( &proc->tep[i].mutex_te, NULL); + pthread_cond_init( &proc->tep[i].cond_te, NULL); + pthread_attr_init( &proc->tep[i].attr_te); - pthread_mutex_init( &proc->mutex_te, NULL); - pthread_cond_init( &proc->cond_te, NULL); + printf("Creating te_thread 0\n"); + pthread_create(&proc->tep[i].pthread_te, &proc->tep[i].attr_te, te_thread, (void*)&proc->tep[i]); + } +} +void kill_te_thread(PHY_VARS_eNB *eNB) { - printf("Creating te_thread\n"); - pthread_create(&proc->pthread_te, attr_te, te_thread, (void*)&proc->tep); + eNB_proc_t *proc = &eNB->proc; + for(int i=0; i<3 ;i++){ + proc->tep[i].instance_cnt_te = 0; + pthread_cond_signal(&proc->tep[i].cond_te); + pthread_join(proc->tep[i].pthread_te, NULL); + pthread_mutex_destroy( &proc->tep[i].mutex_te); + pthread_cond_destroy( &proc->tep[i].cond_te); + } } + void fill_rx_indication(PHY_VARS_eNB *eNB,int UE_id,int frame,int subframe) { nfapi_rx_indication_pdu_t *pdu; @@ -1624,6 +1661,7 @@ void fill_ulsch_harq_indication(PHY_VARS_eNB *eNB,LTE_UL_eNB_HARQ_t *ulsch_harq, // release DLSCH if needed release_harq(eNB,UE_id,i,frame,subframe,0xffff, ulsch_harq->o_ACK[i] == 1); + #if T_TRACER /* TODO: get correct harq pid */ if (ulsch_harq->o_ACK[i] != 1) @@ -1722,6 +1760,7 @@ void fill_uci_harq_indication(PHY_VARS_eNB *eNB, // release DLSCH if needed release_harq(eNB,UE_id,0,frame,subframe,0xffff, harq_ack[0] == 1); + #if T_TRACER if (harq_ack[0] != 1) T(T_ENB_PHY_DLSCH_UE_NACK, T_INT(0), T_INT(frame), T_INT(subframe), diff --git a/openair1/SCHED/ru_procedures.c b/openair1/SCHED/ru_procedures.c index 534023632c2cdc9354efcdaa0f617d9f79bf4fab..257fef6e31150f2cd21d4c4a2c6880c39b622359 100644 --- a/openair1/SCHED/ru_procedures.c +++ b/openair1/SCHED/ru_procedures.c @@ -138,12 +138,22 @@ static void *feptx_thread(void *param) { RU_t *ru = (RU_t *)param; RU_proc_t *proc = &ru->proc; + cpu_set_t cpuset; + CPU_ZERO(&cpuset); + + thread_top_init("feptx_thread",1,85000,120000,500000); + pthread_setname_np( pthread_self(),"feptx processing"); + LOG_I(PHY,"thread feptx created id=%ld\n", syscall(__NR_gettid)); + //CPU_SET(6, &cpuset); + //pthread_setaffinity_np(pthread_self(), sizeof(cpu_set_t), &cpuset); + //wait_sync("feptx_thread"); - thread_top_init("feptx_thread",0,870000,1000000,1000000); + while (!oai_exit) { if (wait_on_condition(&proc->mutex_feptx,&proc->cond_feptx,&proc->instance_cnt_feptx,"feptx thread")<0) break; + //stop_meas(&ru->ofdm_mod_wakeup_stats); feptx0(ru,1); if (release_thread(&proc->mutex_feptx,&proc->instance_cnt_feptx,"feptx thread")<0) break; @@ -152,6 +162,10 @@ static void *feptx_thread(void *param) { exit_fun( "ERROR pthread_cond_signal" ); return NULL; } + /*if(opp_enabled == 1 && ru->ofdm_mod_wakeup_stats.diff_now>30*3000){ + print_meas_now(&ru->ofdm_mod_wakeup_stats,"fep wakeup",stderr); + printf("delay in fep wakeup in frame_tx: %d subframe_rx: %d \n",proc->frame_tx,proc->subframe_tx); + }*/ } @@ -168,7 +182,7 @@ void feptx_ofdm_2thread(RU_t *ru) { wait.tv_sec=0; wait.tv_nsec=5000000L; - + VCD_SIGNAL_DUMPER_DUMP_FUNCTION_BY_NAME(VCD_SIGNAL_DUMPER_FUNCTIONS_PHY_PROCEDURES_RU_FEPTX_OFDM , 1 ); start_meas(&ru->ofdm_mod_stats); if (subframe_select(fp,subframe) == SF_UL) return; @@ -198,17 +212,25 @@ void feptx_ofdm_2thread(RU_t *ru) { exit_fun( "ERROR pthread_cond_signal" ); return; } + //start_meas(&ru->ofdm_mod_wakeup_stats); pthread_mutex_unlock( &proc->mutex_feptx ); } // call first slot in this thread feptx0(ru,0); + start_meas(&ru->ofdm_mod_wait_stats); wait_on_busy_condition(&proc->mutex_feptx,&proc->cond_feptx,&proc->instance_cnt_feptx,"feptx thread"); + stop_meas(&ru->ofdm_mod_wait_stats); + /*if(opp_enabled == 1 && ru->ofdm_mod_wait_stats.diff_now>30*3000){ + print_meas_now(&ru->ofdm_mod_wait_stats,"fep wakeup",stderr); + printf("delay in feptx wait on codition in frame_rx: %d subframe_rx: %d \n",proc->frame_tx,proc->subframe_tx); + }*/ VCD_SIGNAL_DUMPER_DUMP_FUNCTION_BY_NAME(VCD_SIGNAL_DUMPER_FUNCTIONS_PHY_PROCEDURES_RU_FEPTX_OFDM , 0 ); stop_meas(&ru->ofdm_mod_stats); + VCD_SIGNAL_DUMPER_DUMP_FUNCTION_BY_NAME(VCD_SIGNAL_DUMPER_FUNCTIONS_PHY_PROCEDURES_RU_FEPTX_OFDM , 0 ); } @@ -426,12 +448,23 @@ static void *fep_thread(void *param) { RU_t *ru = (RU_t *)param; RU_proc_t *proc = &ru->proc; - thread_top_init("fep_thread",0,870000,1000000,1000000); + thread_top_init("fep_thread",1,100000,120000,5000000); + pthread_setname_np( pthread_self(),"fep processing"); + LOG_I(PHY,"thread fep created id=%ld\n", syscall(__NR_gettid)); + + cpu_set_t cpuset; + CPU_ZERO(&cpuset); + //CPU_SET(2, &cpuset); + //pthread_setaffinity_np(pthread_self(), sizeof(cpu_set_t), &cpuset); + //wait_sync("fep_thread"); while (!oai_exit) { - if (wait_on_condition(&proc->mutex_fep,&proc->cond_fep,&proc->instance_cnt_fep,"fep thread")<0) break; + if (wait_on_condition(&proc->mutex_fep,&proc->cond_fep,&proc->instance_cnt_fep,"fep thread")<0) break; + //stop_meas(&ru->ofdm_demod_wakeup_stats); + VCD_SIGNAL_DUMPER_DUMP_FUNCTION_BY_NAME( VCD_SIGNAL_DUMPER_FUNCTIONS_PHY_PROCEDURES_RU_FEPRX1, 1 ); fep0(ru,0); + VCD_SIGNAL_DUMPER_DUMP_FUNCTION_BY_NAME( VCD_SIGNAL_DUMPER_FUNCTIONS_PHY_PROCEDURES_RU_FEPRX1, 0 ); if (release_thread(&proc->mutex_fep,&proc->instance_cnt_fep,"fep thread")<0) break; if (pthread_cond_signal(&proc->cond_fep) != 0) { @@ -439,6 +472,10 @@ static void *fep_thread(void *param) { exit_fun( "ERROR pthread_cond_signal" ); return NULL; } + /*if(opp_enabled == 1 && ru->ofdm_demod_wakeup_stats.diff_now>30*3000){ + print_meas_now(&ru->ofdm_demod_wakeup_stats,"fep wakeup",stderr); + printf("delay in fep wakeup in frame_rx: %d subframe_rx: %d \n",proc->frame_rx,proc->subframe_rx); + }*/ } @@ -489,6 +526,7 @@ void ru_fep_full_2thread(RU_t *ru) { wait.tv_sec=0; wait.tv_nsec=5000000L; + VCD_SIGNAL_DUMPER_DUMP_FUNCTION_BY_NAME( VCD_SIGNAL_DUMPER_FUNCTIONS_PHY_PROCEDURES_RU_FEPRX, 1 ); start_meas(&ru->ofdm_demod_stats); if (pthread_mutex_timedlock(&proc->mutex_fep,&wait) != 0) { @@ -512,15 +550,23 @@ void ru_fep_full_2thread(RU_t *ru) { exit_fun( "ERROR pthread_cond_signal" ); return; } + //start_meas(&ru->ofdm_demod_wakeup_stats); pthread_mutex_unlock( &proc->mutex_fep ); // call second slot in this symbol fep0(ru,1); + start_meas(&ru->ofdm_demod_wait_stats); wait_on_busy_condition(&proc->mutex_fep,&proc->cond_fep,&proc->instance_cnt_fep,"fep thread"); + stop_meas(&ru->ofdm_demod_wait_stats); + if(opp_enabled == 1 && ru->ofdm_demod_wakeup_stats.diff_now>30*3000){ + print_meas_now(&ru->ofdm_demod_wakeup_stats,"fep wakeup",stderr); + printf("delay in fep wait on codition in frame_rx: %d subframe_rx: %d \n",proc->frame_rx,proc->subframe_rx); + } stop_meas(&ru->ofdm_demod_stats); + VCD_SIGNAL_DUMPER_DUMP_FUNCTION_BY_NAME( VCD_SIGNAL_DUMPER_FUNCTIONS_PHY_PROCEDURES_RU_FEPRX, 0 ); } diff --git a/openair2/ENB_APP/enb_config.c b/openair2/ENB_APP/enb_config.c index aa5bbdb3c73b71760a5f28ff18c6e430a99125a2..19c168503fbea71b190eb870de1ce7a4c7c2b5d1 100644 --- a/openair2/ENB_APP/enb_config.c +++ b/openair2/ENB_APP/enb_config.c @@ -297,6 +297,9 @@ void RCconfig_macrlc() { mac_top_init_eNB(); for (j=0;j<RC.nb_macrlc_inst;j++) { + //RC.mac[j]->phy_test = *(MacRLC_ParamList.paramarray[j][MACRLC_PHY_TEST_IDX].iptr); + //printf("PHY_TEST = %d,%d\n", RC.mac[j]->phy_test, j); + if (strcmp(*(MacRLC_ParamList.paramarray[j][MACRLC_TRANSPORT_N_PREFERENCE_IDX].strptr), "local_RRC") == 0) { // check number of instances is same as RRC/PDCP diff --git a/openair2/ENB_APP/enb_paramdef.h b/openair2/ENB_APP/enb_paramdef.h index a8379c259f948882e6a17c4b98b5daccbfe2067f..fe2b5a8515c164681841375b1b00576255fcc59e 100755 --- a/openair2/ENB_APP/enb_paramdef.h +++ b/openair2/ENB_APP/enb_paramdef.h @@ -687,4 +687,43 @@ typedef enum { #define CONFIG_STRING_MACRLC_CONFIG "macrlc_config" - +/* MACRLC configuration parameters names */ +#define CONFIG_STRING_MACRLC_CC "num_cc" +#define CONFIG_STRING_MACRLC_TRANSPORT_N_PREFERENCE "tr_n_preference" +#define CONFIG_STRING_MACRLC_LOCAL_N_IF_NAME "local_n_if_name" +#define CONFIG_STRING_MACRLC_LOCAL_N_ADDRESS "local_n_address" +#define CONFIG_STRING_MACRLC_REMOTE_N_ADDRESS "remote_n_address" +#define CONFIG_STRING_MACRLC_LOCAL_N_PORTC "local_n_portc" +#define CONFIG_STRING_MACRLC_REMOTE_N_PORTC "remote_n_portc" +#define CONFIG_STRING_MACRLC_LOCAL_N_PORTD "local_n_portd" +#define CONFIG_STRING_MACRLC_REMOTE_N_PORTD "remote_n_portd" +#define CONFIG_STRING_MACRLC_TRANSPORT_S_PREFERENCE "tr_s_preference" +#define CONFIG_STRING_MACRLC_LOCAL_S_IF_NAME "local_s_if_name" +#define CONFIG_STRING_MACRLC_LOCAL_S_ADDRESS "local_s_address" +#define CONFIG_STRING_MACRLC_REMOTE_S_ADDRESS "remote_s_address" +#define CONFIG_STRING_MACRLC_LOCAL_S_PORTC "local_s_portc" +#define CONFIG_STRING_MACRLC_REMOTE_S_PORTC "remote_s_portc" +#define CONFIG_STRING_MACRLC_LOCAL_S_PORTD "local_s_portd" +#define CONFIG_STRING_MACRLC_REMOTE_S_PORTD "remote_s_portd" +#define CONFIG_STRING_MACRLC_PHY_TEST_MODE "phy_test_mode" + + +#define MACRLC_CC_IDX 0 +#define MACRLC_TRANSPORT_N_PREFERENCE_IDX 1 +#define MACRLC_LOCAL_N_IF_NAME_IDX 2 +#define MACRLC_LOCAL_N_ADDRESS_IDX 3 +#define MACRLC_REMOTE_N_ADDRESS_IDX 4 +#define MACRLC_LOCAL_N_PORTC_IDX 5 +#define MACRLC_REMOTE_N_PORTC_IDX 6 +#define MACRLC_LOCAL_N_PORTD_IDX 7 +#define MACRLC_REMOTE_N_PORTD_IDX 8 +#define MACRLC_TRANSPORT_S_PREFERENCE_IDX 9 +#define MACRLC_LOCAL_S_IF_NAME_IDX 10 +#define MACRLC_LOCAL_S_ADDRESS_IDX 11 +#define MACRLC_REMOTE_S_ADDRESS_IDX 12 +#define MACRLC_LOCAL_S_PORTC_IDX 13 +#define MACRLC_REMOTE_S_PORTC_IDX 14 +#define MACRLC_LOCAL_S_PORTD_IDX 15 +#define MACRLC_REMOTE_S_PORTD_IDX 16 +#define MACRLC_PHY_TEST_IDX 17 +/*---------------------------------------------------------------------------------------------------------------------------------------------------------*/ diff --git a/openair2/LAYER2/MAC/defs.h b/openair2/LAYER2/MAC/defs.h index e3f5d954f1aacd5d6b958a30c52e55f46092fe29..107483b35a071e65a5a8118127b08b483ff78cf2 100644 --- a/openair2/LAYER2/MAC/defs.h +++ b/openair2/LAYER2/MAC/defs.h @@ -1071,6 +1071,8 @@ typedef struct eNB_MAC_INST_s { COMMON_channels_t common_channels[MAX_NUM_CCs]; /// current PDU index (BCH,MCH,DLSCH) uint16_t pdu_index[MAX_NUM_CCs]; + /// flag to enable phy-test (disables the scheduler) + uint16_t phy_test; /// NFAPI Config Request Structure nfapi_config_request_t config[MAX_NUM_CCs]; diff --git a/openair2/LAYER2/MAC/eNB_scheduler.c b/openair2/LAYER2/MAC/eNB_scheduler.c index 8dab2a9b71857a027ded133e2f932df1a1be1d60..3acd921fb8629f9854b4992db55b261fb20908cd 100644 --- a/openair2/LAYER2/MAC/eNB_scheduler.c +++ b/openair2/LAYER2/MAC/eNB_scheduler.c @@ -64,6 +64,7 @@ #define DEBUG_eNB_SCHEDULER 1 extern RAN_CONTEXT_t RC; +extern int phy_test; uint16_t pdcch_order_table[6] = { 31, 31, 511, 2047, 2047, 8191 }; @@ -640,26 +641,33 @@ eNB_dlsch_ulsch_scheduler(module_id_t module_idP, frame_t frameP, #endif // This schedules MIB + if ((subframeP == 0) && (frameP & 3) == 0) schedule_mib(module_idP, frameP, subframeP); - // This schedules SI for legacy LTE and eMTC starting in subframeP - schedule_SI(module_idP, frameP, subframeP); - // This schedules Paging in subframeP - schedule_PCH(module_idP,frameP,subframeP); - // This schedules Random-Access for legacy LTE and eMTC starting in subframeP - schedule_RA(module_idP, frameP, subframeP); - // copy previously scheduled UL resources (ULSCH + HARQ) - copy_ulreq(module_idP, frameP, subframeP); - // This schedules SRS in subframeP - schedule_SRS(module_idP, frameP, subframeP); - // This schedules ULSCH in subframeP (dci0) - schedule_ulsch(module_idP, frameP, subframeP); - // This schedules UCI_SR in subframeP - schedule_SR(module_idP, frameP, subframeP); - // This schedules UCI_CSI in subframeP - schedule_CSI(module_idP, frameP, subframeP); - // This schedules DLSCH in subframeP - schedule_dlsch(module_idP, frameP, subframeP, mbsfn_status); + if (phy_test == 0){ + // This schedules SI for legacy LTE and eMTC starting in subframeP + schedule_SI(module_idP, frameP, subframeP); + // This schedules Paging in subframeP + schedule_PCH(module_idP,frameP,subframeP); + // This schedules Random-Access for legacy LTE and eMTC starting in subframeP + schedule_RA(module_idP, frameP, subframeP); + // copy previously scheduled UL resources (ULSCH + HARQ) + copy_ulreq(module_idP, frameP, subframeP); + // This schedules SRS in subframeP + schedule_SRS(module_idP, frameP, subframeP); + // This schedules ULSCH in subframeP (dci0) + schedule_ulsch(module_idP, frameP, subframeP); + // This schedules UCI_SR in subframeP + schedule_SR(module_idP, frameP, subframeP); + // This schedules UCI_CSI in subframeP + schedule_CSI(module_idP, frameP, subframeP); + // This schedules DLSCH in subframeP + schedule_dlsch(module_idP, frameP, subframeP, mbsfn_status); + } + else{ + schedule_ulsch_phy_test(module_idP,frameP,subframeP); + schedule_ue_spec_phy_test(module_idP,frameP,subframeP,mbsfn_status); + } if (RC.flexran[module_idP]->enabled) flexran_agent_send_update_stats(module_idP); diff --git a/openair2/LAYER2/MAC/eNB_scheduler_bch.c b/openair2/LAYER2/MAC/eNB_scheduler_bch.c index de4641fac6dc499dd54cf9b77ca8fc121e76a451..f12702fbb8c554d75784e3aa7472f586dd6817af 100644 --- a/openair2/LAYER2/MAC/eNB_scheduler_bch.c +++ b/openair2/LAYER2/MAC/eNB_scheduler_bch.c @@ -787,6 +787,7 @@ schedule_SI(module_id_t module_idP, frame_t frameP, sub_frame_t subframeP) eNB->eNB_stats[CC_id].bcch_buffer = bcch_sdu_length; eNB->eNB_stats[CC_id].total_bcch_buffer += bcch_sdu_length; eNB->eNB_stats[CC_id].bcch_mcs = mcs; +//printf("SI %d.%d\n", frameP, subframeP);/////////////////////////////////////////****************************** } else { //LOG_D(MAC,"[eNB %d] Frame %d : BCCH not active \n",Mod_id,frame); diff --git a/openair2/LAYER2/MAC/eNB_scheduler_phytest.c b/openair2/LAYER2/MAC/eNB_scheduler_phytest.c new file mode 100644 index 0000000000000000000000000000000000000000..e6258593245ea368c65d386e26dc6cc0aa5a362f --- /dev/null +++ b/openair2/LAYER2/MAC/eNB_scheduler_phytest.c @@ -0,0 +1,353 @@ +/* + * Licensed to the OpenAirInterface (OAI) Software Alliance under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The OpenAirInterface Software Alliance licenses this file to You under + * the OAI Public License, Version 1.0 (the "License"); you may not use this file + * except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.openairinterface.org/?page_id=698 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + *------------------------------------------------------------------------------- + * For more information about the OpenAirInterface (OAI) Software Alliance: + * contact@openairinterface.org + */ + +/*! \file eNB_scheduler_dlsch.c + * \brief procedures related to eNB for the DLSCH transport channel + * \author Navid Nikaein and Raymond Knopp + * \date 2010 - 2014 + * \email: navid.nikaein@eurecom.fr + * \version 1.0 + * @ingroup _mac + + */ + +#include "assertions.h" +#include "PHY/defs.h" +#include "PHY/extern.h" + +#include "SCHED/defs.h" +#include "SCHED/extern.h" + +#include "LAYER2/MAC/defs.h" +#include "LAYER2/MAC/proto.h" +#include "LAYER2/MAC/extern.h" +#include "UTIL/LOG/log.h" +#include "UTIL/LOG/vcd_signal_dumper.h" +#include "UTIL/OPT/opt.h" +#include "OCG.h" +#include "OCG_extern.h" + +#include "SIMULATION/TOOLS/defs.h" // for taus + +#include "T.h" + +extern RAN_CONTEXT_t RC; + +//------------------------------------------------------------------------------ +void +schedule_ue_spec_phy_test( + module_id_t module_idP, + frame_t frameP, + sub_frame_t subframeP, + int* mbsfn_flag +) +//------------------------------------------------------------------------------ +{ + uint8_t CC_id; + int UE_id=0; + uint16_t N_RB_DL; + uint16_t TBS; + uint16_t nb_rb; + + unsigned char harq_pid = subframeP%5; + uint16_t rnti = 0x1235; + uint32_t rb_alloc = 0x1FFFFFFF; + int32_t tpc = 1; + int32_t mcs = 28; + int32_t cqi = 15; + int32_t ndi = subframeP/5; + int32_t dai = 0; + + eNB_MAC_INST *eNB = RC.mac[module_idP]; + COMMON_channels_t *cc = eNB->common_channels; + nfapi_dl_config_request_body_t *dl_req; + nfapi_dl_config_request_pdu_t *dl_config_pdu; + + N_RB_DL = to_prb(cc->mib->message.dl_Bandwidth); + + for (CC_id=0; CC_id<MAX_NUM_CCs; CC_id++) { + LOG_D(MAC, "doing schedule_ue_spec for CC_id %d\n",CC_id); + + dl_req = &eNB->DL_req[CC_id].dl_config_request_body; + + if (mbsfn_flag[CC_id]>0) + continue; + + nb_rb = conv_nprb(0,rb_alloc,N_RB_DL); + //printf("////////////////////////////////////*************************nb_rb = %d\n",nb_rb); + TBS = get_TBS_DL(mcs,nb_rb); + + LOG_D(PHY,"schedule_ue_spec_phy_test: subframe %d/%d: nb_rb=%d, TBS=%d, mcs=%d (rb_alloc=%x, N_RB_DL=%d)\n",frameP,subframeP,nb_rb,TBS,mcs,rb_alloc,N_RB_DL); + + dl_config_pdu = &dl_req->dl_config_pdu_list[dl_req->number_pdu]; + memset((void*)dl_config_pdu,0,sizeof(nfapi_dl_config_request_pdu_t)); + dl_config_pdu->pdu_type = NFAPI_DL_CONFIG_DCI_DL_PDU_TYPE; + dl_config_pdu->pdu_size = (uint8_t)(2+sizeof(nfapi_dl_config_dci_dl_pdu)); + dl_config_pdu->dci_dl_pdu.dci_dl_pdu_rel8.dci_format = NFAPI_DL_DCI_FORMAT_1; + dl_config_pdu->dci_dl_pdu.dci_dl_pdu_rel8.aggregation_level = get_aggregation(get_bw_index(module_idP,CC_id),cqi,format1); + dl_config_pdu->dci_dl_pdu.dci_dl_pdu_rel8.resource_allocation_type = 0; + dl_config_pdu->dci_dl_pdu.dci_dl_pdu_rel8.virtual_resource_block_assignment_flag = 0; + dl_config_pdu->dci_dl_pdu.dci_dl_pdu_rel8.resource_block_coding = rb_alloc; + + dl_config_pdu->dci_dl_pdu.dci_dl_pdu_rel8.rnti = rnti; + dl_config_pdu->dci_dl_pdu.dci_dl_pdu_rel8.rnti_type = 1; // CRNTI : see Table 4-10 from SCF082 - nFAPI specifications + dl_config_pdu->dci_dl_pdu.dci_dl_pdu_rel8.transmission_power = 6000; // equal to RS power + + dl_config_pdu->dci_dl_pdu.dci_dl_pdu_rel8.harq_process = harq_pid; + dl_config_pdu->dci_dl_pdu.dci_dl_pdu_rel8.tpc = tpc; // dont adjust power when retransmitting + dl_config_pdu->dci_dl_pdu.dci_dl_pdu_rel8.new_data_indicator_1 = ndi; + dl_config_pdu->dci_dl_pdu.dci_dl_pdu_rel8.mcs_1 = mcs; + dl_config_pdu->dci_dl_pdu.dci_dl_pdu_rel8.redundancy_version_1 = 0; + //deactivate second codeword + dl_config_pdu->dci_dl_pdu.dci_dl_pdu_rel8.mcs_2 = 0; + dl_config_pdu->dci_dl_pdu.dci_dl_pdu_rel8.redundancy_version_2 = 1; + + if (cc[CC_id].tdd_Config != NULL) { //TDD + dl_config_pdu->dci_dl_pdu.dci_dl_pdu_rel8.downlink_assignment_index = dai; + LOG_D(MAC,"[eNB %d] Initial transmission CC_id %d : harq_pid %d, dai %d, mcs %d\n", + module_idP,CC_id,harq_pid,dai,mcs); + } else { + LOG_D(MAC,"[eNB %d] Initial transmission CC_id %d : harq_pid %d, mcs %d\n", + module_idP,CC_id,harq_pid,mcs); + + } + LOG_D(MAC,"Checking feasibility pdu %d (new sdu)\n",dl_req->number_pdu); + if (!CCE_allocation_infeasible(module_idP,CC_id,1,subframeP,dl_config_pdu->dci_dl_pdu.dci_dl_pdu_rel8.aggregation_level,rnti)) { + + + //ue_sched_ctl->round[CC_id][harq_pid] = 0; + dl_req->number_dci++; + dl_req->number_pdu++; + + // Toggle NDI for next time + /* + LOG_D(MAC,"CC_id %d Frame %d, subframeP %d: Toggling Format1 NDI for UE %d (rnti %x/%d) oldNDI %d\n", + CC_id, frameP,subframeP,UE_id, + rnti,harq_pid,UE_list->UE_template[CC_id][UE_id].oldNDI[harq_pid]); + + UE_list->UE_template[CC_id][UE_id].oldNDI[harq_pid]=1-UE_list->UE_template[CC_id][UE_id].oldNDI[harq_pid]; + UE_list->UE_template[CC_id][UE_id].oldmcs1[harq_pid] = mcs; + UE_list->UE_template[CC_id][UE_id].oldmcs2[harq_pid] = 0; + AssertFatal(UE_list->UE_template[CC_id][UE_id].physicalConfigDedicated!=NULL,"physicalConfigDedicated is NULL\n"); + AssertFatal(UE_list->UE_template[CC_id][UE_id].physicalConfigDedicated->pdsch_ConfigDedicated!=NULL,"physicalConfigDedicated->pdsch_ConfigDedicated is NULL\n"); + */ + + + + fill_nfapi_dlsch_config(eNB, + dl_req, + TBS, + eNB->pdu_index[CC_id], + rnti, + 0, // type 0 allocation from 7.1.6 in 36.213 + 0, // virtual_resource_block_assignment_flag + rb_alloc, // resource_block_coding + getQm(mcs), + 0, // redundancy version + 1, // transport blocks + 0, // transport block to codeword swap flag + cc[CC_id].p_eNB == 1 ? 0 : 1, // transmission_scheme + 1, // number of layers + 1, // number of subbands + // uint8_t codebook_index, + 4, // UE category capacity + PDSCH_ConfigDedicated__p_a_dB0, + 0, // delta_power_offset for TM5 + 0, // ngap + 0, // nprb + cc[CC_id].p_eNB == 1 ? 1 : 2, // transmission mode + 0, //number of PRBs treated as one subband, not used here + 0 // number of beamforming vectors, not used here + ); + + eNB->TX_req[CC_id].sfn_sf = fill_nfapi_tx_req(&eNB->TX_req[CC_id].tx_request_body, + (frameP*10)+subframeP, + TBS, + eNB->pdu_index[CC_id], + eNB->UE_list.DLSCH_pdu[CC_id][0][(unsigned char)UE_id].payload[0]); + } + else { + LOG_W(MAC,"[eNB_scheduler_phytest] DCI allocation infeasible!\n"); + } + } +} + +void schedule_ulsch_phy_test(module_id_t module_idP,frame_t frameP,sub_frame_t subframeP) +{ + uint16_t first_rb[MAX_NUM_CCs]; + int UE_id = 0; + uint8_t aggregation = 2; + rnti_t rnti = 0x1235; + uint8_t mcs = 0; + uint8_t harq_pid = 0; + uint32_t cqi_req = 0,cshift,ndi,tpc = 1; + int32_t normalized_rx_power; + int32_t target_rx_power= 178; + int CC_id = 0; + int nb_rb = 96; + eNB_MAC_INST *eNB = RC.mac[module_idP]; + COMMON_channels_t *cc = eNB->common_channels; + UE_list_t *UE_list=&eNB->UE_list; + UE_TEMPLATE *UE_template; + UE_sched_ctrl *UE_sched_ctrl; + int sched_frame=frameP; + int sched_subframe = (subframeP+4)%10; + + if (sched_subframe<subframeP) sched_frame++; + + nfapi_hi_dci0_request_body_t *hi_dci0_req = &eNB->HI_DCI0_req[CC_id].hi_dci0_request_body; + nfapi_hi_dci0_request_pdu_t *hi_dci0_pdu; + + //nfapi_ul_config_request_pdu_t *ul_config_pdu = &ul_req->ul_config_pdu_list[0];; + nfapi_ul_config_request_body_t *ul_req = &eNB->UL_req[CC_id].ul_config_request_body; + + + eNB->UL_req[CC_id].sfn_sf = (sched_frame<<4) + sched_subframe; + eNB->HI_DCI0_req[CC_id].sfn_sf = (frameP<<4)+subframeP; + + for (CC_id=0; CC_id<MAX_NUM_CCs; CC_id++) { + //rnti = UE_RNTI(module_idP,UE_id); + //leave out first RB for PUCCH + first_rb[CC_id] = 1; + // loop over all active UEs + + // if (eNB_UE_stats->mode == PUSCH) { // ue has a ulsch channel + + UE_template = &UE_list->UE_template[CC_id][UE_id]; + UE_sched_ctrl = &UE_list->UE_sched_ctrl[UE_id]; + harq_pid = subframe2harqpid(&cc[CC_id],sched_frame,sched_subframe); + + LOG_D(MAC,"Scheduling for frame %d, subframe %d => harq_pid %d\n",sched_frame,sched_subframe,harq_pid); + + RC.eNB[module_idP][CC_id]->pusch_stats_BO[UE_id][(frameP*10)+subframeP] = UE_template->TBS_UL[harq_pid]; + + + + //power control + //compute the expected ULSCH RX power (for the stats) + + // this is the normalized RX power and this should be constant (regardless of mcs + normalized_rx_power = UE_sched_ctrl->pusch_snr[CC_id]; + + // new transmission + + ndi = 1-UE_template->oldNDI_UL[harq_pid]; + UE_template->oldNDI_UL[harq_pid]=ndi; + UE_list->eNB_UE_stats[CC_id][UE_id].normalized_rx_power=normalized_rx_power; + UE_list->eNB_UE_stats[CC_id][UE_id].target_rx_power=target_rx_power; + UE_list->eNB_UE_stats[CC_id][UE_id].ulsch_mcs1 = mcs; + UE_template->mcs_UL[harq_pid] = mcs;//cmin (UE_template->pre_assigned_mcs_ul, openair_daq_vars.target_ue_ul_mcs); // adjust, based on user-defined MCS + UE_list->eNB_UE_stats[CC_id][UE_id].ulsch_mcs2 = mcs; + // buffer_occupancy = UE_template->ul_total_buffer; + + + + UE_template->TBS_UL[harq_pid] = get_TBS_UL(mcs,nb_rb); + UE_list->eNB_UE_stats[CC_id][UE_id].total_rbs_used_rx += nb_rb; + UE_list->eNB_UE_stats[CC_id][UE_id].ulsch_TBS = get_TBS_UL(mcs,nb_rb); + // buffer_occupancy -= TBS; + + + + // bad indices : 20 (40 PRB), 21 (45 PRB), 22 (48 PRB) + //store for possible retransmission + UE_template->nb_rb_ul[harq_pid] = nb_rb; + UE_template->first_rb_ul[harq_pid] = first_rb[CC_id]; + + UE_sched_ctrl->ul_scheduled |= (1<<harq_pid); + + // adjust total UL buffer status by TBS, wait for UL sdus to do final update + //UE_template->ul_total_buffer = UE_template->TBS_UL[harq_pid]; + // Cyclic shift for DM RS + cshift = 0;// values from 0 to 7 can be used for mapping the cyclic shift (36.211 , Table 5.5.2.1.1-1) + // save it for a potential retransmission + UE_template->cshift[harq_pid] = cshift; + + hi_dci0_pdu = &hi_dci0_req->hi_dci0_pdu_list[eNB->HI_DCI0_req[CC_id].hi_dci0_request_body.number_of_dci+eNB->HI_DCI0_req[CC_id].hi_dci0_request_body.number_of_hi]; + memset((void*)hi_dci0_pdu,0,sizeof(nfapi_hi_dci0_request_pdu_t)); + hi_dci0_pdu->pdu_type = NFAPI_HI_DCI0_DCI_PDU_TYPE; + hi_dci0_pdu->pdu_size = 2+sizeof(nfapi_hi_dci0_dci_pdu); + hi_dci0_pdu->dci_pdu.dci_pdu_rel8.dci_format = NFAPI_UL_DCI_FORMAT_0; + hi_dci0_pdu->dci_pdu.dci_pdu_rel8.aggregation_level = aggregation; + hi_dci0_pdu->dci_pdu.dci_pdu_rel8.rnti = rnti; + hi_dci0_pdu->dci_pdu.dci_pdu_rel8.transmission_power = 6000; + hi_dci0_pdu->dci_pdu.dci_pdu_rel8.resource_block_start = first_rb[CC_id]; + hi_dci0_pdu->dci_pdu.dci_pdu_rel8.number_of_resource_block = nb_rb; + hi_dci0_pdu->dci_pdu.dci_pdu_rel8.mcs_1 = mcs; + hi_dci0_pdu->dci_pdu.dci_pdu_rel8.cyclic_shift_2_for_drms = cshift; + hi_dci0_pdu->dci_pdu.dci_pdu_rel8.frequency_hopping_enabled_flag = 0; + hi_dci0_pdu->dci_pdu.dci_pdu_rel8.new_data_indication_1 = ndi; + hi_dci0_pdu->dci_pdu.dci_pdu_rel8.tpc = tpc; + hi_dci0_pdu->dci_pdu.dci_pdu_rel8.cqi_csi_request = cqi_req; + hi_dci0_pdu->dci_pdu.dci_pdu_rel8.dl_assignment_index = UE_template->DAI_ul[sched_subframe]; + + + eNB->HI_DCI0_req[CC_id].hi_dci0_request_body.number_of_dci++; + + + // Add UL_config PDUs + fill_nfapi_ulsch_config_request_rel8(&ul_req->ul_config_pdu_list[ul_req->number_of_pdus], + cqi_req, + cc, + 0,//UE_template->physicalConfigDedicated, + get_tmode(module_idP,CC_id,UE_id), + eNB->ul_handle, + rnti, + first_rb[CC_id], // resource_block_start + nb_rb, // number_of_resource_blocks + mcs, + cshift, // cyclic_shift_2_for_drms + 0, // frequency_hopping_enabled_flag + 0, // frequency_hopping_bits + ndi, // new_data_indication + 0, // redundancy_version + harq_pid, // harq_process_number + 0, // ul_tx_mode + 0, // current_tx_nb + 0, // n_srs + get_TBS_UL(mcs,nb_rb) + ); +#ifdef Rel14 + if (UE_template->rach_resource_type>0) { // This is a BL/CE UE allocation + fill_nfapi_ulsch_config_request_emtc(&ul_req->ul_config_pdu_list[ul_req->number_of_pdus], + UE_template->rach_resource_type>2 ? 2 : 1, + 1, //total_number_of_repetitions + 1, //repetition_number + (frameP*10)+subframeP); + } +#endif + ul_req->number_of_pdus = 1; + eNB->ul_handle++; + + + + add_ue_ulsch_info(module_idP, + CC_id, + UE_id, + subframeP, + S_UL_SCHEDULED); + + // increment first rb for next UE allocation + first_rb[CC_id]+= nb_rb; + + + } // loop of CC_id +} diff --git a/openair2/LAYER2/MAC/proto.h b/openair2/LAYER2/MAC/proto.h index 2586c94f432887276f6359e5039b55770dae3bd9..64daede100d723348bf707ca698cf12df7915896 100644 --- a/openair2/LAYER2/MAC/proto.h +++ b/openair2/LAYER2/MAC/proto.h @@ -150,6 +150,9 @@ void schedule_dlsch(module_id_t module_idP, frame_t frameP, void schedule_ue_spec(module_id_t module_idP, slice_id_t slice_idP, frame_t frameP,sub_frame_t subframe, int *mbsfn_flag); +void schedule_ue_spec_phy_test(module_id_t module_idP,frame_t frameP,sub_frame_t subframe,int *mbsfn_flag); +void schedule_ulsch_phy_test(module_id_t module_idP,frame_t frameP,sub_frame_t subframeP); + /** \brief Function for UE/PHY to compute PUSCH transmit power in power-control procedure. @param Mod_id Module id of UE diff --git a/openair2/UTIL/LOG/vcd_signal_dumper.c b/openair2/UTIL/LOG/vcd_signal_dumper.c index b18d2a25ffd714c778e16a96a971db1e081694d9..a026cfb081a70e1ffd8ab833dd711c329b6ee33e 100644 --- a/openair2/UTIL/LOG/vcd_signal_dumper.c +++ b/openair2/UTIL/LOG/vcd_signal_dumper.c @@ -191,6 +191,9 @@ const char* eurecomVariablesNames[] = { "ue0_trx_write_ns", "ue0_trx_read_ns_missing", "ue0_trx_write_ns_missing", + "enb_thread_rxtx_CPUID", + "ru_thread_CPUID", + "ru_thread_tx_CPUID" }; const char* eurecomFunctionsNames[] = { diff --git a/openair2/UTIL/LOG/vcd_signal_dumper.h b/openair2/UTIL/LOG/vcd_signal_dumper.h index a4c8cd2476e08f0b234e47540b54b89b1d6b52fd..a47cdc059d1a8236b78255b28d457ad8979613e5 100644 --- a/openair2/UTIL/LOG/vcd_signal_dumper.h +++ b/openair2/UTIL/LOG/vcd_signal_dumper.h @@ -167,6 +167,9 @@ typedef enum { VCD_SIGNAL_DUMPER_VARIABLES_UE0_TRX_WRITE_NS, VCD_SIGNAL_DUMPER_VARIABLES_UE0_TRX_READ_NS_MISSING, VCD_SIGNAL_DUMPER_VARIABLES_UE0_TRX_WRITE_NS_MISSING, + VCD_SIGNAL_DUMPER_VARIABLES_CPUID_ENB_THREAD_RXTX, + VCD_SIGNAL_DUMPER_VARIABLES_CPUID_RU_THREAD, + VCD_SIGNAL_DUMPER_VARIABLES_CPUID_RU_THREAD_TX, VCD_SIGNAL_DUMPER_VARIABLES_END } vcd_signal_dump_variables; diff --git a/targets/ARCH/COMMON/common_lib.c b/targets/ARCH/COMMON/common_lib.c index d74b1c6d4fed2d65640c1d5989e2ec846c642dcf..7ff3b820dca5e121f585bd3b9c5a5827ddea1e95 100644 --- a/targets/ARCH/COMMON/common_lib.c +++ b/targets/ARCH/COMMON/common_lib.c @@ -117,7 +117,7 @@ int openair0_device_load(openair0_device *device, openair0_config_t *openair0_cf rc=load_lib(device, openair0_cfg, NULL,RAU_LOCAL_RADIO_HEAD ); if ( rc >= 0) { - if ( set_device(device) < 0) { + if ( set_device(device) < 0) { fprintf(stderr, "%s %d:Unsupported radio head\n",__FILE__, __LINE__); return -1; } diff --git a/targets/ARCH/USRP/USERSPACE/LIB/usrp_lib.cpp b/targets/ARCH/USRP/USERSPACE/LIB/usrp_lib.cpp index 9b1755954ae84655a75cbe25ba0570291d01097f..a99c9d7a5d01d95060c9909486bf6528be2b547c 100644 --- a/targets/ARCH/USRP/USERSPACE/LIB/usrp_lib.cpp +++ b/targets/ARCH/USRP/USERSPACE/LIB/usrp_lib.cpp @@ -982,15 +982,20 @@ extern "C" { // workaround for an api problem, master clock has to be set with the constructor not via set_master_clock_rate args += boost::str(boost::format(",master_clock_rate=%f") % usrp_master_clock); -// args += ",num_send_frames=256,num_recv_frames=256, send_frame_size=4096, recv_frame_size=4096"; - // args += ",num_send_frames=256,num_recv_frames=256, send_frame_size=4096, recv_frame_size=4096"; uhd::device_addrs_t device_adds = uhd::device::find(args); if(device_adds.size() == 0) { - std::cerr<<"No USRP Device Found. " << std::endl; - free(s); - return -1; + args += ",addr=192.168.30.2"; + + uhd::device_addrs_t device_adds = uhd::device::find(args); + + if(device_adds.size() == 0) { + + std::cerr<<"No USRP Device Found. " << std::endl; + free(s); + return -1; + } } LOG_I(PHY,"Found USRP X300\n"); s->usrp = uhd::usrp::multi_usrp::make(args); @@ -1016,6 +1021,20 @@ extern "C" { LOG_I(PHY,"%s() sample_rate:%u\n", __FUNCTION__, (int)openair0_cfg[0].sample_rate); switch ((int)openair0_cfg[0].sample_rate) { + case 122880000: + // from usrp_time_offset + //openair0_cfg[0].samples_per_packet = 2048; + openair0_cfg[0].tx_sample_advance = 15; //to be checked + openair0_cfg[0].tx_bw = 80e6; + openair0_cfg[0].rx_bw = 80e6; + break; + case 61440000: + // from usrp_time_offset + //openair0_cfg[0].samples_per_packet = 2048; + openair0_cfg[0].tx_sample_advance = 15; + openair0_cfg[0].tx_bw = 40e6; + openair0_cfg[0].rx_bw = 40e6; + break; case 30720000: // from usrp_time_offset //openair0_cfg[0].samples_per_packet = 2048; diff --git a/targets/COMMON/create_tasks.c b/targets/COMMON/create_tasks.c index 1efe6f73eb74a8d518a81015f7c1a3ee0decefbf..04c503cb361d38c8d953ea89a4681200beef4ec2 100644 --- a/targets/COMMON/create_tasks.c +++ b/targets/COMMON/create_tasks.c @@ -40,6 +40,8 @@ # endif # include "enb_app.h" +extern int emulate_rf; + int create_tasks(uint32_t enb_nb) { LOG_D(ENB_APP, "%s(enb_nb:%d\n", __FUNCTION__, enb_nb); @@ -69,10 +71,11 @@ int create_tasks(uint32_t enb_nb) LOG_E(S1AP, "Create task for S1AP failed\n"); return -1; } - - if (itti_create_task (TASK_UDP, udp_eNB_task, NULL) < 0) { - LOG_E(UDP_, "Create task for UDP failed\n"); - return -1; + if(!emulate_rf){ + if (itti_create_task (TASK_UDP, udp_eNB_task, NULL) < 0) { + LOG_E(UDP_, "Create task for UDP failed\n"); + return -1; + } } if (itti_create_task (TASK_GTPV1_U, >pv1u_eNB_task, NULL) < 0) { diff --git a/targets/PROJECTS/GENERIC-LTE-EPC/CONF/enb.band7.tm1.100PRB.usrpx310.conf b/targets/PROJECTS/GENERIC-LTE-EPC/CONF/enb.band7.tm1.100PRB.usrpx310.conf new file mode 100644 index 0000000000000000000000000000000000000000..6c7c319a11239189d7c314f3d01c829441301cf2 --- /dev/null +++ b/targets/PROJECTS/GENERIC-LTE-EPC/CONF/enb.band7.tm1.100PRB.usrpx310.conf @@ -0,0 +1,209 @@ +Active_eNBs = ( "eNB_Eurecom_LTEBox"); +# Asn1_verbosity, choice in: none, info, annoying +Asn1_verbosity = "none"; + +eNBs = +( + { + ////////// Identification parameters: + eNB_ID = 0xe00; + + cell_type = "CELL_MACRO_ENB"; + + eNB_name = "eNB_Eurecom_LTEBox"; + + // Tracking area code, 0x0000 and 0xfffe are reserved values + tracking_area_code = "1"; + + mobile_country_code = "208"; + + mobile_network_code = "93"; + + tr_s_preference = "local_mac" + + ////////// Physical parameters: + + component_carriers = ( + { + node_function = "eNodeB_3GPP"; + node_timing = "synch_to_ext_device"; + node_synch_ref = 0; + frame_type = "FDD"; + tdd_config = 3; + tdd_config_s = 0; + prefix_type = "NORMAL"; + eutra_band = 7; + downlink_frequency = 2685000000L; + uplink_frequency_offset = -120000000; + Nid_cell = 0; + N_RB_DL = 100; + Nid_cell_mbsfn = 0; + nb_antenna_ports = 1; + nb_antennas_tx = 1; + nb_antennas_rx = 1; + tx_gain = 90; + rx_gain = 125; + pbch_repetition = "FALSE"; + prach_root = 0; + prach_config_index = 0; + prach_high_speed = "DISABLE"; + prach_zero_correlation = 1; + prach_freq_offset = 2; + pucch_delta_shift = 1; + pucch_nRB_CQI = 1; + pucch_nCS_AN = 0; + pucch_n1_AN = 32; + pdsch_referenceSignalPower = -27; + pdsch_p_b = 0; + pusch_n_SB = 1; + pusch_enable64QAM = "DISABLE"; + pusch_hoppingMode = "interSubFrame"; + pusch_hoppingOffset = 0; + pusch_groupHoppingEnabled = "ENABLE"; + pusch_groupAssignment = 0; + pusch_sequenceHoppingEnabled = "DISABLE"; + pusch_nDMRS1 = 1; + phich_duration = "NORMAL"; + phich_resource = "ONESIXTH"; + srs_enable = "DISABLE"; + /* srs_BandwidthConfig =; + srs_SubframeConfig =; + srs_ackNackST =; + srs_MaxUpPts =;*/ + + pusch_p0_Nominal = -96; + pusch_alpha = "AL1"; + pucch_p0_Nominal = -104; + msg3_delta_Preamble = 6; + pucch_deltaF_Format1 = "deltaF2"; + pucch_deltaF_Format1b = "deltaF3"; + pucch_deltaF_Format2 = "deltaF0"; + pucch_deltaF_Format2a = "deltaF0"; + pucch_deltaF_Format2b = "deltaF0"; + + rach_numberOfRA_Preambles = 64; + rach_preamblesGroupAConfig = "DISABLE"; + /* + rach_sizeOfRA_PreamblesGroupA = ; + rach_messageSizeGroupA = ; + rach_messagePowerOffsetGroupB = ; + */ + rach_powerRampingStep = 4; + rach_preambleInitialReceivedTargetPower = -108; + rach_preambleTransMax = 10; + rach_raResponseWindowSize = 10; + rach_macContentionResolutionTimer = 48; + rach_maxHARQ_Msg3Tx = 4; + + pcch_default_PagingCycle = 128; + pcch_nB = "oneT"; + bcch_modificationPeriodCoeff = 2; + ue_TimersAndConstants_t300 = 1000; + ue_TimersAndConstants_t301 = 1000; + ue_TimersAndConstants_t310 = 1000; + ue_TimersAndConstants_t311 = 10000; + ue_TimersAndConstants_n310 = 20; + ue_TimersAndConstants_n311 = 1; + ue_TransmissionMode = 1; + } + ); + + + srb1_parameters : + { + # timer_poll_retransmit = (ms) [5, 10, 15, 20,... 250, 300, 350, ... 500] + timer_poll_retransmit = 80; + + # timer_reordering = (ms) [0,5, ... 100, 110, 120, ... ,200] + timer_reordering = 35; + + # timer_reordering = (ms) [0,5, ... 250, 300, 350, ... ,500] + timer_status_prohibit = 0; + + # poll_pdu = [4, 8, 16, 32 , 64, 128, 256, infinity(>10000)] + poll_pdu = 4; + + # poll_byte = (kB) [25,50,75,100,125,250,375,500,750,1000,1250,1500,2000,3000,infinity(>10000)] + poll_byte = 99999; + + # max_retx_threshold = [1, 2, 3, 4 , 6, 8, 16, 32] + max_retx_threshold = 4; + } + + # ------- SCTP definitions + SCTP : + { + # Number of streams to use in input/output + SCTP_INSTREAMS = 2; + SCTP_OUTSTREAMS = 2; + }; + + + ////////// MME parameters: + mme_ip_address = ( { ipv4 = "192.168.12.26"; + ipv6 = "192:168:30::17"; + active = "yes"; + preference = "ipv4"; + } + ); + + NETWORK_INTERFACES : + { + + ENB_INTERFACE_NAME_FOR_S1_MME = "eth6"; + ENB_IPV4_ADDRESS_FOR_S1_MME = "192.168.12.111/24"; + ENB_INTERFACE_NAME_FOR_S1U = "eth6"; + ENB_IPV4_ADDRESS_FOR_S1U = "192.168.12.111/24"; + ENB_PORT_FOR_S1U = 2152; # Spec 2152 + }; + } +); + +MACRLCs = ( + { + num_cc = 1; + tr_s_preference = "local_L1"; + tr_n_preference = "local_RRC"; + } +); + +L1s = ( + { + num_cc = 1; + tr_n_preference = "local_mac"; + } +); + +RUs = ( + { + local_rf = "yes" + nb_tx = 1 + nb_rx = 1 + att_tx = 0 + att_rx = 0; + bands = [7]; + max_pdschReferenceSignalPower = -27; + max_rxgain = 116; + eNB_instances = [0]; + + } +); + + log_config : + { + global_log_level ="info"; + global_log_verbosity ="medium"; + hw_log_level ="info"; + hw_log_verbosity ="medium"; + phy_log_level ="info"; + phy_log_verbosity ="medium"; + mac_log_level ="info"; + mac_log_verbosity ="medium"; + rlc_log_level ="info"; + rlc_log_verbosity ="medium"; + pdcp_log_level ="info"; + pdcp_log_verbosity ="medium"; + rrc_log_level ="info"; + rrc_log_verbosity ="medium"; + }; + diff --git a/targets/PROJECTS/GENERIC-LTE-EPC/CONF/enb.band7.tm1.50PRB.usrpb210.conf b/targets/PROJECTS/GENERIC-LTE-EPC/CONF/enb.band7.tm1.50PRB.usrpb210.conf index c4307f8ff2333d9bcbe202e719917912dbb947d9..cfbfa51cab275669015604756f7e00f9436121e1 100644 --- a/targets/PROJECTS/GENERIC-LTE-EPC/CONF/enb.band7.tm1.50PRB.usrpb210.conf +++ b/targets/PROJECTS/GENERIC-LTE-EPC/CONF/enb.band7.tm1.50PRB.usrpb210.conf @@ -151,9 +151,9 @@ eNBs = { ENB_INTERFACE_NAME_FOR_S1_MME = "eth0"; - ENB_IPV4_ADDRESS_FOR_S1_MME = "192.168.12.19/24"; + ENB_IPV4_ADDRESS_FOR_S1_MME = "192.168.12.111/24"; ENB_INTERFACE_NAME_FOR_S1U = "eth0"; - ENB_IPV4_ADDRESS_FOR_S1U = "192.168.12.19/24"; + ENB_IPV4_ADDRESS_FOR_S1U = "192.168.12.111/24"; ENB_PORT_FOR_S1U = 2152; # Spec 2152 }; } @@ -164,6 +164,7 @@ MACRLCs = ( num_cc = 1; tr_s_preference = "local_L1"; tr_n_preference = "local_RRC"; + phy_test_mode = 1; } ); diff --git a/targets/RT/USER/eNB_usrp.gtkw b/targets/RT/USER/eNB_usrp.gtkw index 838ec54383f4de0dc6f6165fc374013a3f80e3b8..e50c1831b69ba1e7e49ab33f79f0df523cb15ec0 100644 --- a/targets/RT/USER/eNB_usrp.gtkw +++ b/targets/RT/USER/eNB_usrp.gtkw @@ -1,19 +1,19 @@ [*] [*] GTKWave Analyzer v3.3.58 (w)1999-2014 BSI -[*] Tue Jul 25 20:26:12 2017 +[*] Thu Feb 22 14:46:40 2018 [*] [dumpfile] "/tmp/openair_dump_eNB.vcd" -[dumpfile_mtime] "Tue Jul 25 20:11:55 2017" -[dumpfile_size] 19201475 -[savefile] "/home/papillon/openairinterface5g/targets/RT/USER/eNB_usrp.gtkw" -[timestart] 29023604000 -[size] 1236 578 -[pos] 309 0 -*-20.793451 29026062100 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 +[dumpfile_mtime] "Thu Feb 22 14:44:26 2018" +[dumpfile_size] 3482761 +[savefile] "/homes/wangts/openairinterface5g/targets/RT/USER/eNB_usrp.gtkw" +[timestart] 4525000000 +[size] 1920 1018 +[pos] 0 22 +*-21.506693 4530514310 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 [sst_width] 386 -[signals_width] 262 +[signals_width] 344 [sst_expanded] 1 -[sst_vpaned_height] 146 +[sst_vpaned_height] 303 @28 functions.trx_read functions.trx_write @@ -25,12 +25,42 @@ functions.eNB_thread_rxtx0 @24 variables.frame_number_RX0_RU[63:0] variables.subframe_number_RX0_RU[63:0] -@25 variables.frame_number_TX0_RU[63:0] -@24 variables.subframe_number_TX0_RU[63:0] @28 +functions.mac_schedule_dlsch +functions.macxface_eNB_dlsch_ulsch_scheduler +functions.macxface_ue_scheduler +functions.phy_eNB_ofdm_mod_l +@24 +variables.frame_number_RX0_eNB[63:0] +@25 +variables.subframe_number_RX0_eNB[63:0] +@24 +variables.frame_number_TX0_eNB[63:0] +variables.subframe_number_TX0_eNB[63:0] +variables.frame_number_RX1_eNB[63:0] +variables.subframe_number_RX1_eNB[63:0] +variables.frame_number_TX1_eNB[63:0] +variables.subframe_number_TX1_eNB[63:0] +@28 +functions.phy_eNB_dlsch_modulation +functions.phy_eNB_dlsch_encoding +functions.phy_eNB_dlsch_scrambling +functions.phy_eNB_beam_precoding +functions.phy_enb_pdcch_tx +functions.phy_enb_prach_rx +functions.phy_procedures_ru_feprx0 +functions.phy_procedures_eNb_rx_uespec0 +functions.phy_procedures_eNb_rx_uespec1 +functions.phy_enb_sfgen functions.phy_procedures_eNb_tx0 +functions.phy_procedures_eNb_tx1 +functions.phy_procedures_ru_feprx1 +functions.phy_procedures_ru_feptx_ofdm0 +functions.phy_procedures_ru_feptx_ofdm1 +functions.phy_procedures_ru_feptx_prec0 +functions.phy_procedures_ru_feptx_prec1 functions.eNB_thread_rxtx1 functions.phy_enb_sfgen functions.phy_enb_prach_rx diff --git a/targets/RT/USER/lte-enb.c b/targets/RT/USER/lte-enb.c index e1c5d10de561435bae4171bbf1714f6730b63470..7dcfd432de82387e1010543292ec1e5a1679323b 100644 --- a/targets/RT/USER/lte-enb.c +++ b/targets/RT/USER/lte-enb.c @@ -148,11 +148,13 @@ void exit_fun(const char* s); void init_eNB(int,int); void stop_eNB(int nb_inst); - +int wakeup_tx(PHY_VARS_eNB *eNB,RU_proc_t *ru_proc); +int wakeup_txfh(eNB_rxtx_proc_t *proc,RU_proc_t *ru_proc); void wakeup_prach_eNB(PHY_VARS_eNB *eNB,RU_t *ru,int frame,int subframe); #ifdef Rel14 void wakeup_prach_eNB_br(PHY_VARS_eNB *eNB,RU_t *ru,int frame,int subframe); #endif +extern int codingw; extern uint8_t nfapi_mode; extern void oai_subframe_ind(uint16_t sfn, uint16_t sf); @@ -220,6 +222,15 @@ static inline int rxtx(PHY_VARS_eNB *eNB,eNB_rxtx_proc_t *proc, char *thread_nam if (nfapi_mode == 0 || nfapi_mode == 1) { phy_procedures_eNB_uespec_RX(eNB, proc, no_relay ); } + VCD_SIGNAL_DUMPER_DUMP_FUNCTION_BY_NAME(VCD_SIGNAL_DUMPER_FUNCTIONS_ENB_DLSCH_ULSCH_SCHEDULER , 1 ); + + if(get_nprocs() >= 8){ + if(wait_on_condition(&proc[1].mutex_rxtx,&proc[1].cond_rxtx,&proc[1].pipe_ready,"wakeup_tx")<0) { + LOG_E(PHY,"Frame %d, subframe %d: TX1 not ready\n",proc[1].frame_rx,proc[1].subframe_rx); + return(-1); + } + if (release_thread(&proc[1].mutex_rxtx,&proc[1].pipe_ready,"wakeup_tx")<0) return(-1); + } pthread_mutex_lock(&eNB->UL_INFO_mutex); @@ -231,17 +242,13 @@ static inline int rxtx(PHY_VARS_eNB *eNB,eNB_rxtx_proc_t *proc, char *thread_nam eNB->if_inst->UL_indication(&eNB->UL_INFO); pthread_mutex_unlock(&eNB->UL_INFO_mutex); - - // ***************************************** - // TX processing for subframe n+sf_ahead - // run PHY TX procedures the one after the other for all CCs to avoid race conditions - // (may be relaxed in the future for performance reasons) - // ***************************************** - //if (wait_CCs(proc)<0) return(-1); - - if (oai_exit) return(-1); - phy_procedures_eNB_TX(eNB, proc, no_relay, NULL, 1); + VCD_SIGNAL_DUMPER_DUMP_FUNCTION_BY_NAME(VCD_SIGNAL_DUMPER_FUNCTIONS_ENB_DLSCH_ULSCH_SCHEDULER , 0 ); + if(oai_exit) return(-1); + if(get_nprocs() <= 4){ + phy_procedures_eNB_TX(eNB, proc, no_relay, NULL, 1); + } + stop_meas( &softmodem_stats_rxtx_sf ); @@ -293,6 +300,50 @@ static inline int rxtx(PHY_VARS_eNB *eNB,eNB_rxtx_proc_t *proc, char *thread_nam } +static void* tx_thread(void* param) { + + eNB_proc_t *eNB_proc = (eNB_proc_t*)param; + eNB_rxtx_proc_t *proc = &eNB_proc->proc_rxtx[1]; + PHY_VARS_eNB *eNB = RC.eNB[0][proc->CC_id]; + + char thread_name[100]; + sprintf(thread_name,"TXnp4_%d\n",&eNB->proc.proc_rxtx[0] == proc ? 0 : 1); + thread_top_init(thread_name,1,470000,500000,500000); + + //wait_sync("tx_thread"); + + while (!oai_exit) { + + + if (wait_on_condition(&proc->mutex_rxtx,&proc->cond_rxtx,&proc->instance_cnt_rxtx,thread_name)<0) break; + if (oai_exit) break; + // ***************************************** + // TX processing for subframe n+4 + // run PHY TX procedures the one after the other for all CCs to avoid race conditions + // (may be relaxed in the future for performance reasons) + // ***************************************** + VCD_SIGNAL_DUMPER_DUMP_VARIABLE_BY_NAME(VCD_SIGNAL_DUMPER_VARIABLES_SUBFRAME_NUMBER_TX1_ENB,proc->subframe_tx); + VCD_SIGNAL_DUMPER_DUMP_VARIABLE_BY_NAME(VCD_SIGNAL_DUMPER_VARIABLES_SUBFRAME_NUMBER_RX1_ENB,proc->subframe_rx); + VCD_SIGNAL_DUMPER_DUMP_VARIABLE_BY_NAME(VCD_SIGNAL_DUMPER_VARIABLES_FRAME_NUMBER_TX1_ENB,proc->frame_tx); + VCD_SIGNAL_DUMPER_DUMP_VARIABLE_BY_NAME(VCD_SIGNAL_DUMPER_VARIABLES_FRAME_NUMBER_RX1_ENB,proc->frame_rx); + + phy_procedures_eNB_TX(eNB, proc, no_relay, NULL, 1); + if (release_thread(&proc->mutex_rxtx,&proc->instance_cnt_rxtx,thread_name)<0) break; + + pthread_mutex_lock( &proc->mutex_rxtx ); + proc->pipe_ready++; + // the thread can now be woken up + if (pthread_cond_signal(&proc->cond_rxtx) != 0) { + LOG_E( PHY, "[eNB] ERROR pthread_cond_signal for eNB TXnp4 thread\n"); + exit_fun( "ERROR pthread_cond_signal" ); + } + pthread_mutex_unlock( &proc->mutex_rxtx ); + wakeup_txfh(proc,eNB_proc->ru_proc); + } + + return 0; +} + /*! * \brief The RX UE-specific and TX thread of eNB. * \param param is a \ref eNB_proc_t structure which contains the info what to process. @@ -302,36 +353,66 @@ static inline int rxtx(PHY_VARS_eNB *eNB,eNB_rxtx_proc_t *proc, char *thread_nam static void* eNB_thread_rxtx( void* param ) { static int eNB_thread_rxtx_status; - - eNB_rxtx_proc_t *proc = (eNB_rxtx_proc_t*)param; + eNB_proc_t *eNB_proc = (eNB_proc_t*)param; + eNB_rxtx_proc_t *proc = &eNB_proc->proc_rxtx[0]; PHY_VARS_eNB *eNB = RC.eNB[0][proc->CC_id]; + //RU_proc_t *ru_proc = NULL; char thread_name[100]; + cpu_set_t cpuset; + CPU_ZERO(&cpuset); // set default return value eNB_thread_rxtx_status = 0; - sprintf(thread_name,"RXn_TXnp4_%d",&eNB->proc.proc_rxtx[0] == proc ? 0 : 1); - thread_top_init(thread_name,1,850000L,1000000L,2000000L); + sprintf(thread_name,"RXn_TXnp4_%d\n",&eNB->proc.proc_rxtx[0] == proc ? 0 : 1); + thread_top_init(thread_name,1,470000,500000,500000); + pthread_setname_np( pthread_self(),"rxtx processing"); + LOG_I(PHY,"thread rxtx created id=%ld\n", syscall(__NR_gettid)); + + while (!oai_exit) { + + VCD_SIGNAL_DUMPER_DUMP_FUNCTION_BY_NAME( VCD_SIGNAL_DUMPER_FUNCTIONS_eNB_PROC_RXTX0+(proc->subframe_rx&1), 0 ); + T(T_ENB_MASTER_TICK, T_INT(0), T_INT(proc->frame_rx), T_INT(proc->subframe_rx)); if (wait_on_condition(&proc->mutex_rxtx,&proc->cond_rxtx,&proc->instance_cnt_rxtx,thread_name)<0) break; + VCD_SIGNAL_DUMPER_DUMP_VARIABLE_BY_NAME(VCD_SIGNAL_DUMPER_VARIABLES_CPUID_ENB_THREAD_RXTX,sched_getcpu()); VCD_SIGNAL_DUMPER_DUMP_FUNCTION_BY_NAME( VCD_SIGNAL_DUMPER_FUNCTIONS_eNB_PROC_RXTX0+(proc->subframe_rx&1), 1 ); + + VCD_SIGNAL_DUMPER_DUMP_VARIABLE_BY_NAME(VCD_SIGNAL_DUMPER_VARIABLES_SUBFRAME_NUMBER_TX0_ENB,proc->subframe_tx); + VCD_SIGNAL_DUMPER_DUMP_VARIABLE_BY_NAME(VCD_SIGNAL_DUMPER_VARIABLES_SUBFRAME_NUMBER_RX0_ENB,proc->subframe_rx); + VCD_SIGNAL_DUMPER_DUMP_VARIABLE_BY_NAME(VCD_SIGNAL_DUMPER_VARIABLES_FRAME_NUMBER_TX0_ENB,proc->frame_tx); + VCD_SIGNAL_DUMPER_DUMP_VARIABLE_BY_NAME(VCD_SIGNAL_DUMPER_VARIABLES_FRAME_NUMBER_RX0_ENB,proc->frame_rx); + if (oai_exit) break; if (eNB->CC_id==0) { if (rxtx(eNB,proc,thread_name) < 0) break; - } if (release_thread(&proc->mutex_rxtx,&proc->instance_cnt_rxtx,thread_name)<0) break; + pthread_mutex_lock( &proc->mutex_rxtx ); + proc->pipe_ready++; + // the thread can now be woken up + if (pthread_cond_signal(&proc->cond_rxtx) != 0) { + LOG_E( PHY, "[eNB] ERROR pthread_cond_signal for eNB TXnp4 thread\n"); + exit_fun( "ERROR pthread_cond_signal" ); + } + pthread_mutex_unlock( &proc->mutex_rxtx ); + if(get_nprocs() >= 8) wakeup_tx(eNB,eNB->proc.ru_proc); + else + { + phy_procedures_eNB_TX(eNB, proc, no_relay, NULL, 1); + wakeup_txfh(proc,eNB->proc.ru_proc); + } } // while !oai_exit @@ -367,10 +448,12 @@ static void wait_system_ready (char *message, volatile int *start_flag) { -void eNB_top(PHY_VARS_eNB *eNB, int frame_rx, int subframe_rx, char *string) +void eNB_top(PHY_VARS_eNB *eNB, int frame_rx, int subframe_rx, char *string,RU_t *ru) { eNB_proc_t *proc = &eNB->proc; eNB_rxtx_proc_t *proc_rxtx = &proc->proc_rxtx[0]; + LTE_DL_FRAME_PARMS *fp = &ru->frame_parms; + RU_proc_t *ru_proc=&ru->proc; proc->frame_rx = frame_rx; proc->subframe_rx = subframe_rx; @@ -378,23 +461,113 @@ void eNB_top(PHY_VARS_eNB *eNB, int frame_rx, int subframe_rx, char *string) if (!oai_exit) { T(T_ENB_MASTER_TICK, T_INT(0), T_INT(proc->frame_rx), T_INT(proc->subframe_rx)); - proc_rxtx->subframe_rx = proc->subframe_rx; - proc_rxtx->frame_rx = proc->frame_rx; - proc_rxtx->subframe_tx = (proc->subframe_rx+sf_ahead)%10; - proc_rxtx->frame_tx = (proc->subframe_rx>(9-sf_ahead)) ? (1+proc->frame_rx)&1023 : proc->frame_rx; - proc->frame_tx = proc_rxtx->frame_tx; - proc_rxtx->timestamp_tx = proc->timestamp_tx; + proc_rxtx->timestamp_tx = ru_proc->timestamp_rx + (sf_ahead*fp->samples_per_tti); + proc_rxtx->frame_rx = ru_proc->frame_rx; + proc_rxtx->subframe_rx = ru_proc->subframe_rx; + proc_rxtx->frame_tx = (proc_rxtx->subframe_rx > (9-sf_ahead)) ? (proc_rxtx->frame_rx+1)&1023 : proc_rxtx->frame_rx; + proc_rxtx->subframe_tx = (proc_rxtx->subframe_rx + sf_ahead)%10; if (rxtx(eNB,proc_rxtx,string) < 0) LOG_E(PHY,"eNB %d CC_id %d failed during execution\n",eNB->Mod_id,eNB->CC_id); + ru_proc->timestamp_tx = proc_rxtx->timestamp_tx; + ru_proc->subframe_tx = proc_rxtx->subframe_tx; + ru_proc->frame_tx = proc_rxtx->frame_tx; } } +int wakeup_txfh(eNB_rxtx_proc_t *proc,RU_proc_t *ru_proc) { + + struct timespec wait; + wait.tv_sec=0; + wait.tv_nsec=5000000L; + + + if(wait_on_condition(&ru_proc->mutex_eNBs,&ru_proc->cond_eNBs,&ru_proc->ru_tx_ready,"wakeup_txfh")<0) { + LOG_E(PHY,"Frame %d, subframe %d: TX FH not ready\n", ru_proc->frame_tx, ru_proc->subframe_tx); + return(-1); + } + if (release_thread(&ru_proc->mutex_eNBs,&ru_proc->ru_tx_ready,"wakeup_txfh")<0) return(-1); + + if (ru_proc->instance_cnt_eNBs == 0) { + LOG_E(PHY,"Frame %d, subframe %d: TX FH thread busy, dropping Frame %d, subframe %d\n", ru_proc->frame_tx, ru_proc->subframe_tx, proc->frame_rx, proc->subframe_rx); + return(-1); + } + if (pthread_mutex_timedlock(&ru_proc->mutex_eNBs,&wait) != 0) { + LOG_E( PHY, "[eNB] ERROR pthread_mutex_lock for eNB TX1 thread %d (IC %d)\n", ru_proc->subframe_rx&1,ru_proc->instance_cnt_eNBs ); + exit_fun( "error locking mutex_eNB" ); + return(-1); + } + + ++ru_proc->instance_cnt_eNBs; + ru_proc->timestamp_tx = proc->timestamp_tx; + ru_proc->subframe_tx = proc->subframe_tx; + ru_proc->frame_tx = proc->frame_tx; + + // the thread can now be woken up + if (pthread_cond_signal(&ru_proc->cond_eNBs) != 0) { + LOG_E( PHY, "[eNB] ERROR pthread_cond_signal for eNB TXnp4 thread\n"); + exit_fun( "ERROR pthread_cond_signal" ); + return(-1); + } + + pthread_mutex_unlock( &ru_proc->mutex_eNBs ); + + return(0); +} + +int wakeup_tx(PHY_VARS_eNB *eNB,RU_proc_t *ru_proc) { + + eNB_proc_t *proc=&eNB->proc; + + eNB_rxtx_proc_t *proc_rxtx1=&proc->proc_rxtx[1];//*proc_rxtx=&proc->proc_rxtx[proc->frame_rx&1]; + eNB_rxtx_proc_t *proc_rxtx0=&proc->proc_rxtx[0]; + + + struct timespec wait; + wait.tv_sec=0; + wait.tv_nsec=5000000L; + + + + if (proc_rxtx1->instance_cnt_rxtx == 0) { + LOG_E(PHY,"Frame %d, subframe %d: TX1 thread busy, dropping\n",proc_rxtx1->frame_rx,proc_rxtx1->subframe_rx); + return(-1); + } + + if (pthread_mutex_timedlock(&proc_rxtx1->mutex_rxtx,&wait) != 0) { + LOG_E( PHY, "[eNB] ERROR pthread_mutex_lock for eNB TX1 thread %d (IC %d)\n", proc_rxtx1->subframe_rx&1,proc_rxtx1->instance_cnt_rxtx ); + exit_fun( "error locking mutex_tx" ); + return(-1); + } + + ++proc_rxtx1->instance_cnt_rxtx; + + + proc_rxtx1->subframe_rx = proc_rxtx0->subframe_rx; + proc_rxtx1->frame_rx = proc_rxtx0->frame_rx; + proc_rxtx1->subframe_tx = proc_rxtx0->subframe_tx; + proc_rxtx1->frame_tx = proc_rxtx0->frame_tx; + proc_rxtx1->timestamp_tx = proc_rxtx0->timestamp_tx; + + // the thread can now be woken up + if (pthread_cond_signal(&proc_rxtx1->cond_rxtx) != 0) { + LOG_E( PHY, "[eNB] ERROR pthread_cond_signal for eNB TXnp4 thread\n"); + exit_fun( "ERROR pthread_cond_signal" ); + return(-1); + } + + pthread_mutex_unlock( &proc_rxtx1->mutex_rxtx ); + + return(0); +} int wakeup_rxtx(PHY_VARS_eNB *eNB,RU_t *ru) { eNB_proc_t *proc=&eNB->proc; + RU_proc_t *ru_proc=&ru->proc; - eNB_rxtx_proc_t *proc_rxtx=&proc->proc_rxtx[proc->frame_rx&1]; + eNB_rxtx_proc_t *proc_rxtx0=&proc->proc_rxtx[0];//*proc_rxtx=&proc->proc_rxtx[proc->frame_rx&1]; + //eNB_rxtx_proc_t *proc_rxtx1=&proc->proc_rxtx[1]; + LTE_DL_FRAME_PARMS *fp = &eNB->frame_parms; @@ -426,25 +599,28 @@ int wakeup_rxtx(PHY_VARS_eNB *eNB,RU_t *ru) { wait.tv_sec=0; wait.tv_nsec=5000000L; - /* accept some delay in processing - up to 5ms */ - for (i = 0; i < 10 && proc_rxtx->instance_cnt_rxtx == 0; i++) { - LOG_W( PHY,"[eNB] Frame %d Subframe %d, eNB RXn-TXnp4 thread busy!! (cnt_rxtx %i)\n", proc_rxtx->frame_tx, proc_rxtx->subframe_tx, proc_rxtx->instance_cnt_rxtx); - usleep(500); + + if(wait_on_condition(&proc_rxtx0->mutex_rxtx,&proc_rxtx0->cond_rxtx,&proc_rxtx0->pipe_ready,"wakeup_rxtx")<0) { + LOG_E(PHY,"Frame %d, subframe %d: RXTX0 not ready\n",proc_rxtx0->frame_rx,proc_rxtx0->subframe_rx); + return(-1); } - if (proc_rxtx->instance_cnt_rxtx == 0) { - exit_fun( "TX thread busy" ); + if (release_thread(&proc_rxtx0->mutex_rxtx,&proc_rxtx0->pipe_ready,"wakeup_rxtx")<0) return(-1); + + if (proc_rxtx0->instance_cnt_rxtx == 0) { + LOG_E(PHY,"Frame %d, subframe %d: RXTX0 thread busy, dropping\n",proc_rxtx0->frame_rx,proc_rxtx0->subframe_rx); return(-1); } // wake up TX for subframe n+sf_ahead // lock the TX mutex and make sure the thread is ready - if (pthread_mutex_timedlock(&proc_rxtx->mutex_rxtx,&wait) != 0) { - LOG_E( PHY, "[eNB] ERROR pthread_mutex_lock for eNB RXTX thread %d (IC %d)\n", proc_rxtx->subframe_rx&1,proc_rxtx->instance_cnt_rxtx ); + if (pthread_mutex_timedlock(&proc_rxtx0->mutex_rxtx,&wait) != 0) { + LOG_E( PHY, "[eNB] ERROR pthread_mutex_lock for eNB RXTX thread %d (IC %d)\n", proc_rxtx0->subframe_rx&1,proc_rxtx0->instance_cnt_rxtx ); exit_fun( "error locking mutex_rxtx" ); return(-1); } - ++proc_rxtx->instance_cnt_rxtx; + + ++proc_rxtx0->instance_cnt_rxtx; // We have just received and processed the common part of a subframe, say n. // TS_rx is the last received timestamp (start of 1st slot), TS_tx is the desired @@ -452,20 +628,20 @@ int wakeup_rxtx(PHY_VARS_eNB *eNB,RU_t *ru) { // The last (TS_rx mod samples_per_frame) was n*samples_per_tti, // we want to generate subframe (n+sf_ahead), so TS_tx = TX_rx+sf_ahead*samples_per_tti, // and proc->subframe_tx = proc->subframe_rx+sf_ahead - proc_rxtx->timestamp_tx = proc->timestamp_rx + (sf_ahead*fp->samples_per_tti); - proc_rxtx->frame_rx = proc->frame_rx; - proc_rxtx->subframe_rx = proc->subframe_rx; - proc_rxtx->frame_tx = (proc_rxtx->subframe_rx > (9-sf_ahead)) ? (proc_rxtx->frame_rx+1)&1023 : proc_rxtx->frame_rx; - proc_rxtx->subframe_tx = (proc_rxtx->subframe_rx + sf_ahead)%10; + proc_rxtx0->timestamp_tx = ru_proc->timestamp_rx + (sf_ahead*fp->samples_per_tti); + proc_rxtx0->frame_rx = ru_proc->frame_rx; + proc_rxtx0->subframe_rx = ru_proc->subframe_rx; + proc_rxtx0->frame_tx = (proc_rxtx0->subframe_rx > (9-sf_ahead)) ? (proc_rxtx0->frame_rx+1)&1023 : proc_rxtx0->frame_rx; + proc_rxtx0->subframe_tx = (proc_rxtx0->subframe_rx + sf_ahead)%10; // the thread can now be woken up - if (pthread_cond_signal(&proc_rxtx->cond_rxtx) != 0) { + if (pthread_cond_signal(&proc_rxtx0->cond_rxtx) != 0) { LOG_E( PHY, "[eNB] ERROR pthread_cond_signal for eNB RXn-TXnp4 thread\n"); exit_fun( "ERROR pthread_cond_signal" ); return(-1); } - pthread_mutex_unlock( &proc_rxtx->mutex_rxtx ); + pthread_mutex_unlock( &proc_rxtx0->mutex_rxtx ); return(0); } @@ -605,8 +781,9 @@ static void* eNB_thread_prach( void* param ) { // set default return value eNB_thread_prach_status = 0; - thread_top_init("eNB_thread_prach",1,500000L,1000000L,20000000L); + thread_top_init("eNB_thread_prach",1,500000,1000000,20000000); + //wait_sync("eNB_thread_prach"); while (!oai_exit) { @@ -647,7 +824,7 @@ static void* eNB_thread_prach_br( void* param ) { // set default return value eNB_thread_prach_status = 0; - thread_top_init("eNB_thread_prach_br",1,500000L,1000000L,20000000L); + thread_top_init("eNB_thread_prach_br",1,500000,1000000,20000000); while (!oai_exit) { @@ -671,8 +848,40 @@ static void* eNB_thread_prach_br( void* param ) { #endif -extern void init_td_thread(PHY_VARS_eNB *, pthread_attr_t *); -extern void init_te_thread(PHY_VARS_eNB *, pthread_attr_t *); + +extern void init_td_thread(PHY_VARS_eNB *); +extern void init_te_thread(PHY_VARS_eNB *); +extern void kill_td_thread(PHY_VARS_eNB *); +extern void kill_te_thread(PHY_VARS_eNB *); +//////////////////////////////////////need to modified////////////////***** + +static void* process_stats_thread(void* param) { + + PHY_VARS_eNB *eNB = (PHY_VARS_eNB*)param; + + wait_sync("process_stats_thread"); + + while (!oai_exit) { + sleep(1); + if (opp_enabled == 1) { + if (eNB->td) print_meas(&eNB->ulsch_decoding_stats,"ulsch_decoding",NULL,NULL); + if (eNB->te) + { + print_meas(&eNB->dlsch_turbo_encoding_preperation_stats,"dlsch_coding_crc",NULL,NULL); + print_meas(&eNB->dlsch_turbo_encoding_segmentation_stats,"dlsch_segmentation",NULL,NULL); + print_meas(&eNB->dlsch_encoding_stats,"dlsch_encoding",NULL,NULL); + print_meas(&eNB->dlsch_turbo_encoding_signal_stats,"coding_signal",NULL,NULL); + print_meas(&eNB->dlsch_turbo_encoding_main_stats,"coding_main",NULL,NULL); + print_meas(&eNB->dlsch_turbo_encoding_waiting_stats,"coding_wait",NULL,NULL); + print_meas(&eNB->dlsch_turbo_encoding_wakeup_stats0,"coding_worker_0",NULL,NULL); + print_meas(&eNB->dlsch_turbo_encoding_wakeup_stats1,"coding_worker_1",NULL,NULL); + } + print_meas(&eNB->dlsch_modulation_stats,"dlsch_modulation",NULL,NULL); + } + } + return(NULL); +} + void init_eNB_proc(int inst) { @@ -682,7 +891,6 @@ void init_eNB_proc(int inst) { eNB_proc_t *proc; eNB_rxtx_proc_t *proc_rxtx; pthread_attr_t *attr0=NULL,*attr1=NULL,*attr_prach=NULL; - //*attr_td=NULL,*attr_te=NULL; #ifdef Rel14 pthread_attr_t *attr_prach_br=NULL; #endif @@ -699,8 +907,11 @@ void init_eNB_proc(int inst) { proc_rxtx = proc->proc_rxtx; proc_rxtx[0].instance_cnt_rxtx = -1; proc_rxtx[1].instance_cnt_rxtx = -1; + proc_rxtx[0].pipe_ready = 0; + proc_rxtx[1].pipe_ready = 0; proc->instance_cnt_prach = -1; proc->instance_cnt_asynch_rxtx = -1; + proc->instance_cnt_synch = -1; proc->CC_id = CC_id; proc->first_rx=1; @@ -724,8 +935,6 @@ void init_eNB_proc(int inst) { pthread_attr_init( &proc->attr_prach); pthread_attr_init( &proc->attr_asynch_rxtx); - // pthread_attr_init( &proc->attr_td); - // pthread_attr_init( &proc->attr_te); pthread_attr_init( &proc_rxtx[0].attr_rxtx); pthread_attr_init( &proc_rxtx[1].attr_rxtx); #ifdef Rel14 @@ -747,6 +956,15 @@ void init_eNB_proc(int inst) { // attr_td = &proc->attr_td; // attr_te = &proc->attr_te; #endif + //////////////////////////////////////need to modified////////////////***** + if(get_nprocs() > 2 && codingw) + { + init_te_thread(eNB); + init_td_thread(eNB); + } + //////////////////////////////////////need to modified////////////////***** + pthread_create( &proc_rxtx[0].pthread_rxtx, attr0, eNB_thread_rxtx, proc ); + pthread_create( &proc_rxtx[1].pthread_rxtx, attr1, tx_thread, proc); LOG_I(PHY,"eNB->single_thread_flag:%d\n", eNB->single_thread_flag); @@ -767,6 +985,9 @@ void init_eNB_proc(int inst) { } AssertFatal(proc->instance_cnt_prach == -1,"instance_cnt_prach = %d\n",proc->instance_cnt_prach); + + + if (opp_enabled == 1) pthread_create(&proc->process_stats_thread,NULL,process_stats_thread,(void*)eNB); } @@ -792,6 +1013,8 @@ void init_eNB_proc(int inst) { pthread_mutex_init(&sync_phy_proc.mutex_phy_proc_tx, NULL); pthread_cond_init(&sync_phy_proc.cond_phy_proc_tx, NULL); sync_phy_proc.phy_proc_CC_id = 0; + + } @@ -805,21 +1028,22 @@ void kill_eNB_proc(int inst) { PHY_VARS_eNB *eNB; eNB_proc_t *proc; eNB_rxtx_proc_t *proc_rxtx; + int i; for (int CC_id=0; CC_id<MAX_NUM_CCs; CC_id++) { eNB=RC.eNB[inst][CC_id]; proc = &eNB->proc; proc_rxtx = &proc->proc_rxtx[0]; + kill_td_thread(eNB); + kill_te_thread(eNB); LOG_I(PHY, "Killing TX CC_id %d inst %d\n", CC_id, inst ); - - if (eNB->single_thread_flag==0) { - pthread_mutex_lock(&proc_rxtx[0].mutex_rxtx); - proc_rxtx[0].instance_cnt_rxtx = 0; - pthread_mutex_unlock(&proc_rxtx[0].mutex_rxtx); - pthread_mutex_lock(&proc_rxtx[1].mutex_rxtx); - proc_rxtx[1].instance_cnt_rxtx = 0; - pthread_mutex_unlock(&proc_rxtx[1].mutex_rxtx); + for (i=0; i<2; i++) { + pthread_mutex_lock(&proc_rxtx[i].mutex_rxtx); + proc_rxtx[i].instance_cnt_rxtx = 0; + proc_rxtx[i].pipe_ready = 0; + pthread_cond_signal(&proc_rxtx[i].cond_rxtx); + pthread_mutex_unlock(&proc_rxtx[i].mutex_rxtx); } proc->instance_cnt_prach = 0; pthread_cond_signal( &proc->cond_prach ); @@ -842,15 +1066,12 @@ void kill_eNB_proc(int inst) { #endif LOG_I(PHY, "Destroying UL_INFO mutex\n"); pthread_mutex_destroy(&eNB->UL_INFO_mutex); - int i; - if (eNB->single_thread_flag==0) { - for (i=0;i<2;i++) { - LOG_I(PHY, "Joining rxtx[%d] mutex/cond\n",i); - pthread_join( proc_rxtx[i].pthread_rxtx, (void**)&status ); - LOG_I(PHY, "Destroying rxtx[%d] mutex/cond\n",i); - pthread_mutex_destroy( &proc_rxtx[i].mutex_rxtx ); - pthread_cond_destroy( &proc_rxtx[i].cond_rxtx ); - } + for (i=0;i<2;i++) { + LOG_I(PHY, "Joining rxtx[%d] mutex/cond\n",i); + pthread_join( proc_rxtx[i].pthread_rxtx, (void**)&status ); + LOG_I(PHY, "Destroying rxtx[%d] mutex/cond\n",i); + pthread_mutex_destroy( &proc_rxtx[i].mutex_rxtx ); + pthread_cond_destroy( &proc_rxtx[i].cond_rxtx ); } } } @@ -1094,8 +1315,8 @@ void init_eNB(int single_thread_flag,int wait_for_sync) { #endif - eNB->td = ulsch_decoding_data;//(single_thread_flag==1) ? ulsch_decoding_data_2thread : ulsch_decoding_data; - eNB->te = dlsch_encoding;//(single_thread_flag==1) ? dlsch_encoding_2threads : dlsch_encoding; + eNB->td = ulsch_decoding_data_all;//(get_nprocs()<=4) ? ulsch_decoding_data : ulsch_decoding_data_2thread; + eNB->te = dlsch_encoding_all;//(get_nprocs()<=4) ? dlsch_encoding : dlsch_encoding_2threads; LOG_I(PHY,"Registering with MAC interface module\n"); diff --git a/targets/RT/USER/lte-ru.c b/targets/RT/USER/lte-ru.c index d9bda6a037993a4771c58eb4d9791f1ed9df40ee..bcc0e1c014c3bd8a3511da40116f127b98af8217 100644 --- a/targets/RT/USER/lte-ru.c +++ b/targets/RT/USER/lte-ru.c @@ -117,7 +117,11 @@ static int DEFENBS[] = {0}; #include "T.h" + extern volatile int oai_exit; +extern int emulate_rf; +extern int numerology; +extern int fepw; extern void phy_init_RU(RU_t*); @@ -679,6 +683,34 @@ void fh_if4p5_north_out(RU_t *ru) { } +static void* emulatedRF_thread(void* param) { + RU_proc_t *proc = (RU_proc_t *) param; + int microsec = 500; // length of time to sleep, in miliseconds + struct timespec req = {0}; + req.tv_sec = 0; + req.tv_nsec = (numerology>0)? ((microsec * 1000L)/numerology):(microsec * 1000L)*2; + cpu_set_t cpuset; + CPU_SET(1,&cpuset); + pthread_setaffinity_np(pthread_self(), sizeof(cpu_set_t), &cpuset); + + int policy; + struct sched_param sparam; + memset(&sparam, 0, sizeof(sparam)); + sparam.sched_priority = sched_get_priority_max(SCHED_FIFO); + policy = SCHED_FIFO ; + pthread_setschedparam(pthread_self(), policy, &sparam); + + wait_sync("emulatedRF_thread"); + while(!oai_exit){ + nanosleep(&req, (struct timespec *)NULL); + pthread_mutex_lock(&proc->mutex_emulateRF); + ++proc->instance_cnt_emulateRF; + pthread_mutex_unlock(&proc->mutex_emulateRF); + pthread_cond_signal(&proc->cond_emulateRF); + } + return 0; +} + void rx_rf(RU_t *ru,int *frame,int *subframe) { RU_proc_t *proc = &ru->proc; @@ -686,7 +718,7 @@ void rx_rf(RU_t *ru,int *frame,int *subframe) { void *rxp[ru->nb_rx]; unsigned int rxs; int i; - openair0_timestamp ts,old_ts; + openair0_timestamp ts=0,old_ts=0; for (i=0; i<ru->nb_rx; i++) rxp[i] = (void*)&ru->common.rxdata[i][*subframe*fp->samples_per_tti]; @@ -694,12 +726,18 @@ void rx_rf(RU_t *ru,int *frame,int *subframe) { VCD_SIGNAL_DUMPER_DUMP_FUNCTION_BY_NAME( VCD_SIGNAL_DUMPER_FUNCTIONS_TRX_READ, 1 ); old_ts = proc->timestamp_rx; - - rxs = ru->rfdevice.trx_read_func(&ru->rfdevice, + if(emulate_rf){ + wait_on_condition(&proc->mutex_emulateRF,&proc->cond_emulateRF,&proc->instance_cnt_emulateRF,"emulatedRF_thread"); + release_thread(&proc->mutex_emulateRF,&proc->instance_cnt_emulateRF,"emulatedRF_thread"); + rxs = fp->samples_per_tti; + } + else{ + rxs = ru->rfdevice.trx_read_func(&ru->rfdevice, &ts, rxp, fp->samples_per_tti, ru->nb_rx); + } VCD_SIGNAL_DUMPER_DUMP_FUNCTION_BY_NAME( VCD_SIGNAL_DUMPER_FUNCTIONS_TRX_READ, 0 ); @@ -715,7 +753,7 @@ void rx_rf(RU_t *ru,int *frame,int *subframe) { } else { if (proc->timestamp_rx - old_ts != fp->samples_per_tti) { - LOG_I(PHY,"rx_rf: rfdevice timing drift of %"PRId64" samples (ts_off %"PRId64")\n",proc->timestamp_rx - old_ts - fp->samples_per_tti,ru->ts_offset); + //LOG_I(PHY,"rx_rf: rfdevice timing drift of %"PRId64" samples (ts_off %"PRId64")\n",proc->timestamp_rx - old_ts - fp->samples_per_tti,ru->ts_offset); ru->ts_offset += (proc->timestamp_rx - old_ts - fp->samples_per_tti); proc->timestamp_rx = ts-ru->ts_offset; } @@ -725,9 +763,9 @@ void rx_rf(RU_t *ru,int *frame,int *subframe) { proc->subframe_rx = (proc->timestamp_rx / fp->samples_per_tti)%10; // synchronize first reception to frame 0 subframe 0 - proc->timestamp_tx = proc->timestamp_rx+(sf_ahead*fp->samples_per_tti); - proc->subframe_tx = (proc->subframe_rx+sf_ahead)%10; - proc->frame_tx = (proc->subframe_rx>(9-sf_ahead)) ? (proc->frame_rx+1)&1023 : proc->frame_rx; + //proc->timestamp_tx = proc->timestamp_rx+(sf_ahead*fp->samples_per_tti); + //proc->subframe_tx = (proc->subframe_rx+sf_ahead)%10; + //proc->frame_tx = (proc->subframe_rx>(9-sf_ahead)) ? (proc->frame_rx+1)&1023 : proc->frame_rx; LOG_D(PHY,"RU %d/%d TS %llu (off %d), frame %d, subframe %d\n", ru->idx, @@ -739,8 +777,6 @@ void rx_rf(RU_t *ru,int *frame,int *subframe) { if (ru == RC.ru[0]) { VCD_SIGNAL_DUMPER_DUMP_VARIABLE_BY_NAME( VCD_SIGNAL_DUMPER_VARIABLES_FRAME_NUMBER_RX0_RU, proc->frame_rx ); VCD_SIGNAL_DUMPER_DUMP_VARIABLE_BY_NAME( VCD_SIGNAL_DUMPER_VARIABLES_SUBFRAME_NUMBER_RX0_RU, proc->subframe_rx ); - VCD_SIGNAL_DUMPER_DUMP_VARIABLE_BY_NAME( VCD_SIGNAL_DUMPER_VARIABLES_FRAME_NUMBER_TX0_RU, proc->frame_tx ); - VCD_SIGNAL_DUMPER_DUMP_VARIABLE_BY_NAME( VCD_SIGNAL_DUMPER_VARIABLES_SUBFRAME_NUMBER_TX0_RU, proc->subframe_tx ); } if (proc->first_rx == 0) { @@ -815,6 +851,8 @@ void tx_rf(RU_t *ru) { for (i=0; i<ru->nb_tx; i++) txp[i] = (void*)&ru->common.txdata[i][(proc->subframe_tx*fp->samples_per_tti)-sf_extension]; + VCD_SIGNAL_DUMPER_DUMP_VARIABLE_BY_NAME( VCD_SIGNAL_DUMPER_VARIABLES_FRAME_NUMBER_TX0_RU, proc->frame_tx ); + VCD_SIGNAL_DUMPER_DUMP_VARIABLE_BY_NAME( VCD_SIGNAL_DUMPER_VARIABLES_SUBFRAME_NUMBER_TX0_RU, proc->subframe_tx ); VCD_SIGNAL_DUMPER_DUMP_VARIABLE_BY_NAME( VCD_SIGNAL_DUMPER_VARIABLES_TRX_TST, (proc->timestamp_tx-ru->openair0_cfg.tx_sample_advance)&0xffffffff ); VCD_SIGNAL_DUMPER_DUMP_FUNCTION_BY_NAME( VCD_SIGNAL_DUMPER_FUNCTIONS_TRX_WRITE, 1 ); @@ -855,7 +893,7 @@ static void* ru_thread_asynch_rxtx( void* param ) { int subframe=0, frame=0; - thread_top_init("ru_thread_asynch_rxtx",1,870000L,1000000L,1000000L); + thread_top_init("ru_thread_asynch_rxtx",1,870000,1000000,1000000); // wait for top-level synchronization and do one acquisition to get timestamp for setting frame/subframe @@ -881,7 +919,7 @@ static void* ru_thread_asynch_rxtx( void* param ) { subframe++; } LOG_D(PHY,"ru_thread_asynch_rxtx: Waiting on incoming fronthaul\n"); - // asynchronous receive from south (Mobipass) + // asynchronous receive from south (Mobipass) if (ru->fh_south_asynch_in) ru->fh_south_asynch_in(ru,&frame,&subframe); // asynchronous receive from north (RRU IF4/IF5) else if (ru->fh_north_asynch_in) { @@ -907,6 +945,7 @@ void wakeup_slaves(RU_proc_t *proc) { wait.tv_nsec=5000000L; for (i=0;i<proc->num_slaves;i++) { + //printf("////////////////////calling for slave thrads\n");////////////////////////******** RU_proc_t *slave_proc = proc->slave_proc[i]; // wake up slave FH thread // lock the FH mutex and make sure the thread is ready @@ -954,7 +993,8 @@ static void* ru_thread_prach( void* param ) { // set default return value ru_thread_prach_status = 0; - thread_top_init("ru_thread_prach",1,500000L,1000000L,20000000L); + thread_top_init("ru_thread_prach",1,500000,1000000,20000000); + //wait_sync("ru_thread_prach"); while (RC.ru_mask>0) { usleep(1e6); @@ -1009,7 +1049,8 @@ static void* ru_thread_prach_br( void* param ) { // set default return value ru_thread_prach_status = 0; - thread_top_init("ru_thread_prach_br",1,500000L,1000000L,20000000L); + thread_top_init("ru_thread_prach_br",1,500000,1000000,20000000); + //wait_sync("ru_thread_prach_br"); while (!oai_exit) { @@ -1139,13 +1180,14 @@ void wakeup_eNBs(RU_t *ru) { LOG_D(PHY,"wakeup_eNBs (num %d) for RU %d ru->eNB_top:%p\n",ru->num_eNB,ru->idx, ru->eNB_top); - if (ru->num_eNB==1 && ru->eNB_top!=0) { - // call eNB function directly + if (ru->num_eNB==1 && ru->eNB_top!=0 && get_nprocs() <= 4) { + // call eNB function directly + char string[20]; sprintf(string,"Incoming RU %d",ru->idx); LOG_D(PHY,"RU %d Call eNB_top\n",ru->idx); - ru->eNB_top(eNB_list[0],ru->proc.frame_rx,ru->proc.subframe_rx,string); + ru->eNB_top(eNB_list[0],ru->proc.frame_rx,ru->proc.subframe_rx,string,ru); } else { @@ -1154,7 +1196,7 @@ void wakeup_eNBs(RU_t *ru) { for (i=0;i<ru->num_eNB;i++) { LOG_D(PHY,"ru->wakeup_rxtx:%p\n", ru->wakeup_rxtx); - + eNB_list[i]->proc.ru_proc = &ru->proc; if (ru->wakeup_rxtx!=0 && ru->wakeup_rxtx(eNB_list[i],ru) < 0) { LOG_E(PHY,"could not wakeup eNB rxtx process for subframe %d\n", ru->proc.subframe_rx); @@ -1231,20 +1273,39 @@ void fill_rf_config(RU_t *ru, char *rf_config_file) { LTE_DL_FRAME_PARMS *fp = &ru->frame_parms; openair0_config_t *cfg = &ru->openair0_cfg; + //printf("////////////////numerology in config = %d\n",numerology); if(fp->N_RB_DL == 100) { - if (fp->threequarter_fs) { - cfg->sample_rate=23.04e6; - cfg->samples_per_frame = 230400; - cfg->tx_bw = 10e6; - cfg->rx_bw = 10e6; - } - else { - cfg->sample_rate=30.72e6; + if(numerology == 0){ + if (fp->threequarter_fs) { + cfg->sample_rate=23.04e6; + cfg->samples_per_frame = 230400; + cfg->tx_bw = 10e6; + cfg->rx_bw = 10e6; + } + else { + cfg->sample_rate=30.72e6; + cfg->samples_per_frame = 307200; + cfg->tx_bw = 10e6; + cfg->rx_bw = 10e6; + } + }else if(numerology == 1){ + cfg->sample_rate=61.44e6; + cfg->samples_per_frame = 307200; + cfg->tx_bw = 20e6; + cfg->rx_bw = 20e6; + }else if(numerology == 2){ + cfg->sample_rate=122.88e6; + cfg->samples_per_frame = 307200; + cfg->tx_bw = 40e6; + cfg->rx_bw = 40e6; + }else{ + printf("Wrong input for numerology %d\n setting to 20MHz normal CP configuration",numerology); + cfg->sample_rate=30.72e6; cfg->samples_per_frame = 307200; cfg->tx_bw = 10e6; cfg->rx_bw = 10e6; - } + } } else if(fp->N_RB_DL == 50) { cfg->sample_rate=15.36e6; cfg->samples_per_frame = 153600; @@ -1363,7 +1424,7 @@ static void* ru_stats_thread(void* param) { while (!oai_exit) { sleep(1); - if (opp_enabled == 1) { + if (opp_enabled == 1 && fepw) { if (ru->feprx) print_meas(&ru->ofdm_demod_stats,"feprx",NULL,NULL); if (ru->feptx_ofdm) print_meas(&ru->ofdm_mod_stats,"feptx_ofdm",NULL,NULL); if (ru->fh_north_asynch_in) print_meas(&ru->rx_fhaul,"rx_fhaul",NULL,NULL); @@ -1377,6 +1438,59 @@ static void* ru_stats_thread(void* param) { return(NULL); } +static void* ru_thread_tx( void* param ) { + RU_t *ru = (RU_t*)param; + RU_proc_t *proc = &ru->proc; + cpu_set_t cpuset; + CPU_ZERO(&cpuset); + + + thread_top_init("ru_thread_tx",1,400000,500000,500000); + + //CPU_SET(5, &cpuset); + //pthread_setaffinity_np(pthread_self(), sizeof(cpu_set_t), &cpuset); + //wait_sync("ru_thread_tx"); + + wait_on_condition(&proc->mutex_FH1,&proc->cond_FH1,&proc->instance_cnt_FH1,"ru_thread_tx"); + + printf( "ru_thread_tx ready\n"); + while (!oai_exit) { + + VCD_SIGNAL_DUMPER_DUMP_VARIABLE_BY_NAME(VCD_SIGNAL_DUMPER_VARIABLES_CPUID_RU_THREAD_TX,sched_getcpu()); + if (oai_exit) break; + + + LOG_D(PHY,"ru_thread_tx: Waiting for TX processing\n"); + // wait until eNBs are finished subframe RX n and TX n+4 + wait_on_condition(&proc->mutex_eNBs,&proc->cond_eNBs,&proc->instance_cnt_eNBs,"ru_thread_tx"); + if (oai_exit) break; + + // do TX front-end processing if needed (precoding and/or IDFTs) + if (ru->feptx_prec) ru->feptx_prec(ru); + + // do OFDM if needed + if ((ru->fh_north_asynch_in == NULL) && (ru->feptx_ofdm)) ru->feptx_ofdm(ru); + if(!emulate_rf){ + // do outgoing fronthaul (south) if needed + if ((ru->fh_north_asynch_in == NULL) && (ru->fh_south_out)) ru->fh_south_out(ru); + + if (ru->fh_north_out) ru->fh_north_out(ru); + } + release_thread(&proc->mutex_eNBs,&proc->instance_cnt_eNBs,"ru_thread_tx"); + + pthread_mutex_lock( &proc->mutex_eNBs ); + proc->ru_tx_ready++; + // the thread can now be woken up + if (pthread_cond_signal(&proc->cond_eNBs) != 0) { + LOG_E( PHY, "[eNB] ERROR pthread_cond_signal for eNB TXnp4 thread\n"); + exit_fun( "ERROR pthread_cond_signal" ); + } + pthread_mutex_unlock( &proc->mutex_eNBs ); + } + release_thread(&proc->mutex_FH1,&proc->instance_cnt_FH1,"ru_thread_tx"); + return 0; +} + static void* ru_thread( void* param ) { static int ru_thread_status; @@ -1387,35 +1501,54 @@ static void* ru_thread( void* param ) { int ret; int subframe =9; int frame =1023; + cpu_set_t cpuset; + CPU_ZERO(&cpuset); + // set default return value ru_thread_status = 0; // set default return value - thread_top_init("ru_thread",0,870000,1000000,1000000); + thread_top_init("ru_thread",1,400000,500000,500000); - LOG_I(PHY,"Starting RU %d (%s,%s),\n",ru->idx,eNB_functions[ru->function],eNB_timing[ru->if_timing]); + //CPU_SET(1, &cpuset); + //pthread_setaffinity_np(pthread_self(), sizeof(cpu_set_t), &cpuset); + pthread_setname_np( pthread_self(),"ru thread"); + LOG_I(PHY,"thread ru created id=%ld\n", syscall(__NR_gettid)); + LOG_I(PHY,"Starting RU %d (%s,%s),\n",ru->idx,eNB_functions[ru->function],eNB_timing[ru->if_timing]); - // Start IF device if any - if (ru->start_if) { - LOG_I(PHY,"Starting IF interface for RU %d\n",ru->idx); - AssertFatal(ru->start_if(ru,NULL) == 0, "Could not start the IF device\n"); - if (ru->if_south == LOCAL_RF) ret = connect_rau(ru); - else ret = attach_rru(ru); - AssertFatal(ret==0,"Cannot connect to radio\n"); - } - if (ru->if_south == LOCAL_RF) { // configure RF parameters only - fill_rf_config(ru,ru->rf_config_file); - init_frame_parms(&ru->frame_parms,1); - phy_init_RU(ru); - - ret = openair0_device_load(&ru->rfdevice,&ru->openair0_cfg); + if(emulate_rf){ + fill_rf_config(ru,ru->rf_config_file); + init_frame_parms(&ru->frame_parms,1); + phy_init_RU(ru); + if (setup_RU_buffers(ru)!=0) { + printf("Exiting, cannot initialize RU Buffers\n"); + exit(-1); + } } - if (setup_RU_buffers(ru)!=0) { - printf("Exiting, cannot initialize RU Buffers\n"); - exit(-1); + else{ + // Start IF device if any + if (ru->start_if) { + LOG_I(PHY,"Starting IF interface for RU %d\n",ru->idx); + AssertFatal(ru->start_if(ru,NULL) == 0, "Could not start the IF device\n"); + if (ru->if_south == LOCAL_RF) ret = connect_rau(ru); + else ret = attach_rru(ru); + AssertFatal(ret==0,"Cannot connect to radio\n"); + } + if (ru->if_south == LOCAL_RF) { // configure RF parameters only + fill_rf_config(ru,ru->rf_config_file); + init_frame_parms(&ru->frame_parms,1); + phy_init_RU(ru); + + + ret = openair0_device_load(&ru->rfdevice,&ru->openair0_cfg); + } + if (setup_RU_buffers(ru)!=0) { + printf("Exiting, cannot initialize RU Buffers\n"); + exit(-1); + } } LOG_I(PHY, "Signaling main thread that RU %d is ready\n",ru->idx); @@ -1424,38 +1557,46 @@ static void* ru_thread( void* param ) { pthread_cond_signal(&RC.ru_cond); pthread_mutex_unlock(&RC.ru_mutex); - wait_sync("ru_thread"); + pthread_mutex_lock(&proc->mutex_FH1); + proc->instance_cnt_FH1 = 0; + pthread_mutex_unlock(&proc->mutex_FH1); + pthread_cond_signal(&proc->cond_FH1); + wait_sync("ru_thread"); - - // Start RF device if any - if (ru->start_rf) { - if (ru->start_rf(ru) != 0) - LOG_E(HW,"Could not start the RF device\n"); - else LOG_I(PHY,"RU %d rf device ready\n",ru->idx); + if(!emulate_rf){ + // Start RF device if any + if (ru->start_rf) { + if (ru->start_rf(ru) != 0) + LOG_E(HW,"Could not start the RF device\n"); + else LOG_I(PHY,"RU %d rf device ready\n",ru->idx); + } + else LOG_I(PHY,"RU %d no rf device\n",ru->idx); + + // if an asnych_rxtx thread exists + // wakeup the thread because the devices are ready at this point + + if ((ru->fh_south_asynch_in)||(ru->fh_north_asynch_in)) { + pthread_mutex_lock(&proc->mutex_asynch_rxtx); + proc->instance_cnt_asynch_rxtx=0; + pthread_mutex_unlock(&proc->mutex_asynch_rxtx); + pthread_cond_signal(&proc->cond_asynch_rxtx); + } + else LOG_I(PHY,"RU %d no asynch_south interface\n",ru->idx); + + // if this is a slave RRU, try to synchronize on the DL frequency + if ((ru->is_slave) && (ru->if_south == LOCAL_RF)) do_ru_synch(ru); } - else LOG_I(PHY,"RU %d no rf device\n",ru->idx); - - // if an asnych_rxtx thread exists - // wakeup the thread because the devices are ready at this point - - if ((ru->fh_south_asynch_in)||(ru->fh_north_asynch_in)) { - pthread_mutex_lock(&proc->mutex_asynch_rxtx); - proc->instance_cnt_asynch_rxtx=0; - pthread_mutex_unlock(&proc->mutex_asynch_rxtx); - pthread_cond_signal(&proc->cond_asynch_rxtx); - } - else LOG_I(PHY,"RU %d no asynch_south interface\n",ru->idx); - // if this is a slave RRU, try to synchronize on the DL frequency - if ((ru->is_slave) && (ru->if_south == LOCAL_RF)) do_ru_synch(ru); // This is a forever while loop, it loops over subframes which are scheduled by incoming samples from HW devices while (!oai_exit) { + VCD_SIGNAL_DUMPER_DUMP_VARIABLE_BY_NAME(VCD_SIGNAL_DUMPER_VARIABLES_CPUID_RU_THREAD,sched_getcpu()); + // these are local subframe/frame counters to check that we are in synch with the fronthaul timing. // They are set on the first rx/tx in the underly FH routines. if (subframe==9) { @@ -1505,20 +1646,21 @@ static void* ru_thread( void* param ) { // wakeup all eNB processes waiting for this RU if (ru->num_eNB>0) wakeup_eNBs(ru); + + if(get_nprocs() <= 4){ + // do TX front-end processing if needed (precoding and/or IDFTs) + if (ru->feptx_prec) ru->feptx_prec(ru); + + // do OFDM if needed + if ((ru->fh_north_asynch_in == NULL) && (ru->feptx_ofdm)) ru->feptx_ofdm(ru); + if(!emulate_rf){ + // do outgoing fronthaul (south) if needed + if ((ru->fh_north_asynch_in == NULL) && (ru->fh_south_out)) ru->fh_south_out(ru); + + if (ru->fh_north_out) ru->fh_north_out(ru); + } + } - // wait until eNBs are finished subframe RX n and TX n+sf_ahead - wait_on_condition(&proc->mutex_eNBs,&proc->cond_eNBs,&proc->instance_cnt_eNBs,"ru_thread"); - - - // do TX front-end processing if needed (precoding and/or IDFTs) - if (ru->feptx_prec) ru->feptx_prec(ru); - - // do OFDM if needed - if ((ru->fh_north_asynch_in == NULL) && (ru->feptx_ofdm)) ru->feptx_ofdm(ru); - // do outgoing fronthaul (south) if needed - if ((ru->fh_north_asynch_in == NULL) && (ru->fh_south_out)) ru->fh_south_out(ru); - - if (ru->fh_north_out) ru->fh_north_out(ru); } @@ -1641,7 +1783,7 @@ void init_RU_proc(RU_t *ru) { int i=0; RU_proc_t *proc; - pthread_attr_t *attr_FH=NULL,*attr_prach=NULL,*attr_asynch=NULL,*attr_synch=NULL; + pthread_attr_t *attr_FH=NULL,*attr_FH1=NULL,*attr_prach=NULL,*attr_asynch=NULL,*attr_synch=NULL,*attr_emulateRF=NULL; //pthread_attr_t *attr_fep=NULL; #ifdef Rel14 pthread_attr_t *attr_prach_br=NULL; @@ -1656,14 +1798,19 @@ void init_RU_proc(RU_t *ru) { proc->ru = ru; proc->instance_cnt_prach = -1; - proc->instance_cnt_synch = -1; ; + proc->instance_cnt_synch = -1; proc->instance_cnt_FH = -1; + proc->instance_cnt_FH1 = -1; + proc->instance_cnt_emulateRF = -1; proc->instance_cnt_asynch_rxtx = -1; + proc->instance_cnt_eNBs = -1; proc->first_rx = 1; proc->first_tx = 1; proc->frame_offset = 0; proc->num_slaves = 0; proc->frame_tx_unwrap = 0; + proc->ru_rx_ready = 0; + proc->ru_tx_ready = 0; for (i=0;i<10;i++) proc->symbol_mask[i]=0; @@ -1671,15 +1818,21 @@ void init_RU_proc(RU_t *ru) { pthread_mutex_init( &proc->mutex_asynch_rxtx, NULL); pthread_mutex_init( &proc->mutex_synch,NULL); pthread_mutex_init( &proc->mutex_FH,NULL); + pthread_mutex_init( &proc->mutex_FH1,NULL); + pthread_mutex_init( &proc->mutex_emulateRF,NULL); pthread_mutex_init( &proc->mutex_eNBs, NULL); pthread_cond_init( &proc->cond_prach, NULL); pthread_cond_init( &proc->cond_FH, NULL); + pthread_cond_init( &proc->cond_FH1, NULL); + pthread_cond_init( &proc->cond_emulateRF, NULL); pthread_cond_init( &proc->cond_asynch_rxtx, NULL); pthread_cond_init( &proc->cond_synch,NULL); pthread_cond_init( &proc->cond_eNBs, NULL); pthread_attr_init( &proc->attr_FH); + pthread_attr_init( &proc->attr_FH1); + pthread_attr_init( &proc->attr_emulateRF); pthread_attr_init( &proc->attr_prach); pthread_attr_init( &proc->attr_synch); pthread_attr_init( &proc->attr_asynch_rxtx); @@ -1694,15 +1847,22 @@ void init_RU_proc(RU_t *ru) { #ifndef DEADLINE_SCHEDULER attr_FH = &proc->attr_FH; + attr_FH1 = &proc->attr_FH1; attr_prach = &proc->attr_prach; attr_synch = &proc->attr_synch; attr_asynch = &proc->attr_asynch_rxtx; + attr_emulateRF = &proc->attr_emulateRF; #ifdef Rel14 attr_prach_br = &proc->attr_prach_br; #endif #endif pthread_create( &proc->pthread_FH, attr_FH, ru_thread, (void*)ru ); + if(emulate_rf) + pthread_create( &proc->pthread_emulateRF, attr_emulateRF, emulatedRF_thread, (void*)proc ); + + if (get_nprocs() > 4) + pthread_create( &proc->pthread_FH1, attr_FH1, ru_thread_tx, (void*)ru ); if (ru->function == NGFI_RRU_IF4p5) { pthread_create( &proc->pthread_prach, attr_prach, ru_thread_prach, (void*)ru ); @@ -1714,7 +1874,10 @@ void init_RU_proc(RU_t *ru) { if ((ru->if_timing == synch_to_other) || (ru->function == NGFI_RRU_IF5) || - (ru->function == NGFI_RRU_IF4p5)) pthread_create( &proc->pthread_asynch_rxtx, attr_asynch, ru_thread_asynch_rxtx, (void*)ru ); + (ru->function == NGFI_RRU_IF4p5)) + { + pthread_create( &proc->pthread_asynch_rxtx, attr_asynch, ru_thread_asynch_rxtx, (void*)ru ); + } snprintf( name, sizeof(name), "ru_thread_FH %d", ru->idx ); pthread_setname_np( proc->pthread_FH, name ); @@ -1725,7 +1888,7 @@ void init_RU_proc(RU_t *ru) { pthread_create( &proc->pthread_prach, attr_prach, ru_thread_prach, (void*)ru ); } - if (get_nprocs()>=2) { + if (get_nprocs()> 2 && fepw) { if (ru->feprx) init_fep_thread(ru,NULL); if (ru->feptx_ofdm) init_feptx_thread(ru,NULL); } @@ -1740,38 +1903,46 @@ void kill_RU_proc(int inst) pthread_mutex_lock(&proc->mutex_FH); proc->instance_cnt_FH = 0; - pthread_mutex_unlock(&proc->mutex_FH); pthread_cond_signal(&proc->cond_FH); + pthread_mutex_unlock(&proc->mutex_FH); + + pthread_mutex_lock(&proc->mutex_FH1); + proc->instance_cnt_FH1 = 0; + pthread_cond_signal(&proc->cond_FH1); + pthread_mutex_unlock(&proc->mutex_FH1); pthread_mutex_lock(&proc->mutex_prach); proc->instance_cnt_prach = 0; - pthread_mutex_unlock(&proc->mutex_prach); pthread_cond_signal(&proc->cond_prach); + pthread_mutex_unlock(&proc->mutex_prach); #ifdef Rel14 pthread_mutex_lock(&proc->mutex_prach_br); proc->instance_cnt_prach_br = 0; - pthread_mutex_unlock(&proc->mutex_prach_br); pthread_cond_signal(&proc->cond_prach_br); + pthread_mutex_unlock(&proc->mutex_prach_br); #endif pthread_mutex_lock(&proc->mutex_synch); proc->instance_cnt_synch = 0; - pthread_mutex_unlock(&proc->mutex_synch); pthread_cond_signal(&proc->cond_synch); + pthread_mutex_unlock(&proc->mutex_synch); pthread_mutex_lock(&proc->mutex_eNBs); + proc->ru_tx_ready = 0; proc->instance_cnt_eNBs = 0; - pthread_mutex_unlock(&proc->mutex_eNBs); pthread_cond_signal(&proc->cond_eNBs); + pthread_mutex_unlock(&proc->mutex_eNBs); pthread_mutex_lock(&proc->mutex_asynch_rxtx); proc->instance_cnt_asynch_rxtx = 0; - pthread_mutex_unlock(&proc->mutex_asynch_rxtx); pthread_cond_signal(&proc->cond_asynch_rxtx); + pthread_mutex_unlock(&proc->mutex_asynch_rxtx); LOG_D(PHY, "Joining pthread_FH\n"); pthread_join(proc->pthread_FH, NULL); + LOG_D(PHY, "Joining pthread_FHTX\n"); + pthread_join(proc->pthread_FH1, NULL); if (ru->function == NGFI_RRU_IF4p5) { LOG_D(PHY, "Joining pthread_prach\n"); pthread_join(proc->pthread_prach, NULL); @@ -1791,7 +1962,7 @@ void kill_RU_proc(int inst) pthread_join(proc->pthread_asynch_rxtx, NULL); } } - if (get_nprocs() >= 2) { + if (get_nprocs() > 2 && fepw) { if (ru->feprx) { pthread_mutex_lock(&proc->mutex_fep); proc->instance_cnt_fep = 0; @@ -1822,15 +1993,18 @@ void kill_RU_proc(int inst) pthread_mutex_destroy(&proc->mutex_asynch_rxtx); pthread_mutex_destroy(&proc->mutex_synch); pthread_mutex_destroy(&proc->mutex_FH); + pthread_mutex_destroy(&proc->mutex_FH1); pthread_mutex_destroy(&proc->mutex_eNBs); pthread_cond_destroy(&proc->cond_prach); pthread_cond_destroy(&proc->cond_FH); + pthread_cond_destroy(&proc->cond_FH1); pthread_cond_destroy(&proc->cond_asynch_rxtx); pthread_cond_destroy(&proc->cond_synch); pthread_cond_destroy(&proc->cond_eNBs); pthread_attr_destroy(&proc->attr_FH); + pthread_attr_destroy(&proc->attr_FH1); pthread_attr_destroy(&proc->attr_prach); pthread_attr_destroy(&proc->attr_synch); pthread_attr_destroy(&proc->attr_asynch_rxtx); @@ -2083,8 +2257,8 @@ void set_function_spec_param(RU_t *ru) } else if (ru->function == eNodeB_3GPP) { ru->do_prach = 0; // no prach processing in RU - ru->feprx = (get_nprocs()<=2) ? fep_full : ru_fep_full_2thread; // RX DFTs - ru->feptx_ofdm = (get_nprocs()<=2) ? feptx_ofdm : feptx_ofdm_2thread; // this is fep with idft and precoding + ru->feprx = (get_nprocs()<=2 || !fepw) ? fep_full : ru_fep_full_2thread; // RX DFTs + ru->feptx_ofdm = (get_nprocs()<=2 || !fepw) ? feptx_ofdm : feptx_ofdm_2thread; // this is fep with idft and precoding ru->feptx_prec = feptx_prec; // this is fep with idft and precoding ru->fh_north_in = NULL; // no incoming fronthaul from north ru->fh_north_out = NULL; // no outgoing fronthaul to north @@ -2245,7 +2419,7 @@ void init_RU(char *rf_config_file) { } } } - // LOG_I(PHY,"Initializing RRU descriptor %d : (%s,%s,%d)\n",ru_id,ru_if_types[ru->if_south],eNB_timing[ru->if_timing],ru->function); + LOG_I(PHY,"Initializing RRU descriptor %d : (%s,%s,%d)\n",ru_id,ru_if_types[ru->if_south],eNB_timing[ru->if_timing],ru->function); set_function_spec_param(ru); LOG_I(PHY,"Starting ru_thread %d\n",ru_id); diff --git a/targets/RT/USER/lte-softmodem.c b/targets/RT/USER/lte-softmodem.c index 676ea8988482048fc2f0480306f37b0b437b77b0..8e469d526030c052bff0cac3efe7d5f595b6ad69 100644 --- a/targets/RT/USER/lte-softmodem.c +++ b/targets/RT/USER/lte-softmodem.c @@ -217,6 +217,10 @@ extern PHY_VARS_UE* init_ue_vars(LTE_DL_FRAME_PARMS *frame_parms, extern void init_eNB_afterRU(void); int transmission_mode=1; +int emulate_rf = 0; +int numerology = 0; +int codingw = 0; +int fepw = 0; @@ -632,6 +636,7 @@ void init_openair0(void) { int card; int i; + for (card=0; card<MAX_CARDS; card++) { @@ -639,6 +644,8 @@ void init_openair0(void) { openair0_cfg[card].configFilename = NULL; if(frame_parms[0]->N_RB_DL == 100) { + if(numerology == 0) + { if (frame_parms[0]->threequarter_fs) { openair0_cfg[card].sample_rate=23.04e6; openair0_cfg[card].samples_per_frame = 230400; @@ -650,6 +657,22 @@ void init_openair0(void) { openair0_cfg[card].tx_bw = 10e6; openair0_cfg[card].rx_bw = 10e6; } + }else if(numerology == 1) + { + openair0_cfg[card].sample_rate=61.44e6; + openair0_cfg[card].samples_per_frame = 307200; + openair0_cfg[card].tx_bw = 20e6; + openair0_cfg[card].rx_bw = 20e6; + }else if(numerology == 2) + { + openair0_cfg[card].sample_rate=122.88e6; + openair0_cfg[card].samples_per_frame = 307200; + openair0_cfg[card].tx_bw = 20e6; + openair0_cfg[card].rx_bw = 20e6; + }else + { + printf("Un supported numerology\n"); + } } else if(frame_parms[0]->N_RB_DL == 50) { openair0_cfg[card].sample_rate=15.36e6; openair0_cfg[card].samples_per_frame = 153600; diff --git a/targets/RT/USER/lte-softmodem.h b/targets/RT/USER/lte-softmodem.h index e8b9d9249023d13a4d2047668b2de8eb12b28d0c..aff969908f2d738114afe6bfef26fb8e588c5498 100644 --- a/targets/RT/USER/lte-softmodem.h +++ b/targets/RT/USER/lte-softmodem.h @@ -85,6 +85,12 @@ #define CONFIG_HLP_TPORT "tracer port\n" #define CONFIG_HLP_NOTWAIT "don't wait for tracer, start immediately\n" #define CONFIG_HLP_TNOFORK "to ease debugging with gdb\n" + +#define CONFIG_HLP_NUMEROLOGY "adding numerology for 5G\n" +#define CONFIG_HLP_CODINGW "coding worker thread enable(disable by defult)\n" +#define CONFIG_HLP_FEPW "FEP worker thread enabled(disable by defult)\n" +#define CONFIG_HLP_EMULATE_RF "Emulated RF enabled(disable by defult)\n" + #define CONFIG_HLP_DISABLNBIOT "disable nb-iot, even if defined in config\n" /***************************************************************************************************************************************/ @@ -139,35 +145,39 @@ extern int16_t dlsch_demod_shift; /* optname helpstr paramflags XXXptr defXXXval type numelt */ /*---------------------------------------------------------------------------------------------------------------------------------------------------------------------------*/ #define CMDLINE_PARAMS_DESC { \ -{"rf-config-file", CONFIG_HLP_RFCFGF, 0, strptr:(char **)&rf_config_file, defstrval:NULL, TYPE_STRING, sizeof(rf_config_file)}, \ -{"ulsch-max-errors", CONFIG_HLP_ULMAXE, 0, uptr:&ULSCH_max_consecutive_errors, defuintval:0, TYPE_UINT, 0}, \ -{"phy-test", CONFIG_HLP_PHYTST, PARAMFLAG_BOOL, iptr:&phy_test, defintval:0, TYPE_INT, 0}, \ -{"usim-test", CONFIG_HLP_USIM, PARAMFLAG_BOOL, u8ptr:&usim_test, defintval:0, TYPE_UINT8, 0}, \ -{"mmapped-dma", CONFIG_HLP_DMAMAP, PARAMFLAG_BOOL, uptr:&mmapped_dma, defintval:0, TYPE_INT, 0}, \ -{"external-clock", CONFIG_HLP_EXCCLK, PARAMFLAG_BOOL, uptr:&clock_source, defintval:0, TYPE_INT, 0}, \ -{"wait-for-sync", NULL, PARAMFLAG_BOOL, iptr:&wait_for_sync, defintval:0, TYPE_INT, 0}, \ -{"single-thread-disable", CONFIG_HLP_NOSNGLT, PARAMFLAG_BOOL, iptr:&single_thread_flag, defintval:1, TYPE_INT, 0}, \ -{"threadIQ", NULL, 0, iptr:&(threads.iq), defintval:1, TYPE_INT, 0}, \ -{"threadOneSubframe", NULL, 0, iptr:&(threads.one), defintval:1, TYPE_INT, 0}, \ -{"threadTwoSubframe", NULL, 0, iptr:&(threads.two), defintval:1, TYPE_INT, 0}, \ -{"threadThreeSubframe", NULL, 0, iptr:&(threads.three), defintval:1, TYPE_INT, 0}, \ -{"threadSlot1ProcOne", NULL, 0, iptr:&(threads.slot1_proc_one), defintval:1, TYPE_INT, 0}, \ -{"threadSlot1ProcTwo", NULL, 0, iptr:&(threads.slot1_proc_two), defintval:1, TYPE_INT, 0}, \ -{"dlsch-demod-shift", CONFIG_HLP_DLSHIFT, 0, iptr:(int32_t *)&dlsch_demod_shift, defintval:0, TYPE_INT, 0}, \ -{"A" , CONFIG_HLP_TADV, 0, uptr:&timing_advance, defintval:0, TYPE_UINT, 0}, \ -{"C" , CONFIG_HLP_DLF, 0, uptr:&(downlink_frequency[0][0]), defuintval:DEFAULT_DLF, TYPE_UINT, 0}, \ -{"a" , CONFIG_HLP_CHOFF, 0, iptr:&chain_offset, defintval:0, TYPE_INT, 0}, \ -{"d" , CONFIG_HLP_SOFTS, PARAMFLAG_BOOL, uptr:(uint32_t *)&do_forms, defintval:0, TYPE_INT8, 0}, \ -{"E" , CONFIG_HLP_TQFS, PARAMFLAG_BOOL, i8ptr:&threequarter_fs, defintval:0, TYPE_INT8, 0}, \ -{"K" , CONFIG_HLP_ITTIL, PARAMFLAG_NOFREE, strptr:&itti_dump_file, defstrval:"/tmp/itti.dump", TYPE_STRING, 0}, \ -{"m" , CONFIG_HLP_DLMCS, 0, uptr:&target_dl_mcs, defintval:0, TYPE_UINT, 0}, \ -{"t" , CONFIG_HLP_ULMCS, 0, uptr:&target_ul_mcs, defintval:0, TYPE_UINT, 0}, \ -{"W" , CONFIG_HLP_L2MONW, 0, strptr:(char **)&in_ip, defstrval:"127.0.0.1", TYPE_STRING, sizeof(in_ip)}, \ -{"P" , CONFIG_HLP_L2MONP, 0, strptr:(char **)&in_path, defstrval:"/tmp/oai_opt.pcap", TYPE_STRING, sizeof(in_path)}, \ -{"V" , CONFIG_HLP_VCD, PARAMFLAG_BOOL, iptr:&ouput_vcd, defintval:0, TYPE_INT, 0}, \ -{"q" , CONFIG_HLP_STMON, PARAMFLAG_BOOL, iptr:&opp_enabled, defintval:0, TYPE_INT, 0}, \ -{"S" , CONFIG_HLP_MSLOTS, PARAMFLAG_BOOL, u8ptr:&exit_missed_slots, defintval:1, TYPE_UINT8, 0}, \ -{"T" , CONFIG_HLP_TDD, PARAMFLAG_BOOL, iptr:&tddflag, defintval:0, TYPE_INT, 0}, \ +{"rf-config-file", CONFIG_HLP_RFCFGF, 0, strptr:(char **)&rf_config_file, defstrval:NULL, TYPE_STRING, sizeof(rf_config_file)},\ +{"ulsch-max-errors", CONFIG_HLP_ULMAXE, 0, uptr:&ULSCH_max_consecutive_errors, defuintval:0, TYPE_UINT, 0}, \ +{"phy-test", CONFIG_HLP_PHYTST, PARAMFLAG_BOOL, iptr:&phy_test, defintval:0, TYPE_INT, 0}, \ +{"usim-test", CONFIG_HLP_USIM, PARAMFLAG_BOOL, u8ptr:&usim_test, defintval:0, TYPE_UINT8, 0}, \ +{"mmapped-dma", CONFIG_HLP_DMAMAP, PARAMFLAG_BOOL, uptr:&mmapped_dma, defintval:0, TYPE_INT, 0}, \ +{"external-clock", CONFIG_HLP_EXCCLK, PARAMFLAG_BOOL, uptr:&clock_source, defintval:0, TYPE_INT, 0}, \ +{"wait-for-sync", NULL, PARAMFLAG_BOOL, iptr:&wait_for_sync, defintval:0, TYPE_INT, 0}, \ +{"single-thread-disable", CONFIG_HLP_NOSNGLT, PARAMFLAG_BOOL, iptr:&single_thread_flag, defintval:1, TYPE_INT, 0}, \ +{"threadIQ", NULL, 0, iptr:&(threads.iq), defintval:1, TYPE_INT, 0}, \ +{"threadOneSubframe", NULL, 0, iptr:&(threads.one), defintval:1, TYPE_INT, 0}, \ +{"threadTwoSubframe", NULL, 0, iptr:&(threads.two), defintval:1, TYPE_INT, 0}, \ +{"threadThreeSubframe", NULL, 0, iptr:&(threads.three), defintval:1, TYPE_INT, 0}, \ +{"threadSlot1ProcOne", NULL, 0, iptr:&(threads.slot1_proc_one), defintval:1, TYPE_INT, 0}, \ +{"threadSlot1ProcTwo", NULL, 0, iptr:&(threads.slot1_proc_two), defintval:1, TYPE_INT, 0}, \ +{"dlsch-demod-shift", CONFIG_HLP_DLSHIFT, 0, iptr:(int32_t *)&dlsch_demod_shift, defintval:0, TYPE_INT, 0}, \ +{"A" , CONFIG_HLP_TADV, 0, uptr:&timing_advance, defintval:0, TYPE_UINT, 0}, \ +{"C" , CONFIG_HLP_DLF, 0, uptr:&(downlink_frequency[0][0]), defuintval:2680000000, TYPE_UINT, 0}, \ +{"a" , CONFIG_HLP_CHOFF, 0, iptr:&chain_offset, defintval:0, TYPE_INT, 0}, \ +{"d" , CONFIG_HLP_SOFTS, PARAMFLAG_BOOL, uptr:(uint32_t *)&do_forms, defintval:0, TYPE_INT8, 0}, \ +{"E" , CONFIG_HLP_TQFS, PARAMFLAG_BOOL, i8ptr:&threequarter_fs, defintval:0, TYPE_INT8, 0}, \ +{"K" , CONFIG_HLP_ITTIL, PARAMFLAG_NOFREE, strptr:&itti_dump_file, defstrval:"/tmp/itti.dump", TYPE_STRING, 0}, \ +{"m" , CONFIG_HLP_DLMCS, 0, uptr:&target_dl_mcs, defintval:0, TYPE_UINT, 0}, \ +{"t" , CONFIG_HLP_ULMCS, 0, uptr:&target_ul_mcs, defintval:0, TYPE_UINT, 0}, \ +{"W" , CONFIG_HLP_L2MONW, 0, strptr:(char **)&in_ip, defstrval:"127.0.0.1", TYPE_STRING, sizeof(in_ip)}, \ +{"P" , CONFIG_HLP_L2MONP, 0, strptr:(char **)&in_path, defstrval:"/tmp/oai_opt.pcap", TYPE_STRING, sizeof(in_path)}, \ +{"V" , CONFIG_HLP_VCD, PARAMFLAG_BOOL, iptr:&ouput_vcd, defintval:0, TYPE_INT, 0}, \ +{"q" , CONFIG_HLP_STMON, PARAMFLAG_BOOL, iptr:&opp_enabled, defintval:0, TYPE_INT, 0}, \ +{"S" , CONFIG_HLP_MSLOTS, PARAMFLAG_BOOL, u8ptr:&exit_missed_slots, defintval:1, TYPE_UINT8, 0}, \ +{"T" , CONFIG_HLP_TDD, PARAMFLAG_BOOL, iptr:&tddflag, defintval:0, TYPE_INT, 0}, \ +{"numerology" , CONFIG_HLP_NUMEROLOGY, PARAMFLAG_BOOL, iptr:&numerology, defintval:0, TYPE_INT, 0}, \ +{"emulate-rf" , CONFIG_HLP_EMULATE_RF, PARAMFLAG_BOOL, iptr:&emulate_rf, defintval:0, TYPE_INT, 0}, \ +{"codingw" , CONFIG_HLP_CODINGW, PARAMFLAG_BOOL, iptr:&codingw, defintval:0, TYPE_INT, 0}, \ +{"fepw" , CONFIG_HLP_FEPW, PARAMFLAG_BOOL, iptr:&fepw, defintval:0, TYPE_INT, 0}, \ {"nbiot-disable", CONFIG_HLP_DISABLNBIOT,PARAMFLAG_BOOL, iptr:&nonbiotflag, defintval:0, TYPE_INT, 0} \ } @@ -262,8 +272,10 @@ extern void reset_opp_meas(void); extern void print_opp_meas(void); extern void init_fep_thread(PHY_VARS_eNB *, pthread_attr_t *); -extern void init_td_thread(PHY_VARS_eNB *, pthread_attr_t *); -extern void init_te_thread(PHY_VARS_eNB *, pthread_attr_t *); +extern void init_td_thread(PHY_VARS_eNB *); +extern void init_te_thread(PHY_VARS_eNB *); +extern void kill_td_thread(PHY_VARS_eNB *); +extern void kill_te_thread(PHY_VARS_eNB *); PHY_VARS_UE* init_ue_vars(LTE_DL_FRAME_PARMS *frame_parms, uint8_t UE_id, diff --git a/targets/RT/USER/rt_wrapper.c b/targets/RT/USER/rt_wrapper.c index c1e5996d44375f4d40f3eeb433ebced1279810f9..49fbed625672f9b4b9cc1e1defcb83381ca235c9 100644 --- a/targets/RT/USER/rt_wrapper.c +++ b/targets/RT/USER/rt_wrapper.c @@ -43,6 +43,7 @@ #include <getopt.h> #include <sys/sysinfo.h> #include "rt_wrapper.h" +#include <errno.h> #include "openair1/PHY/defs.h" @@ -283,6 +284,7 @@ void thread_top_init(char *thread_name, if (sched_setattr(0, &attr, flags) < 0 ) { perror("[SCHED] eNB tx thread: sched_setattr failed\n"); + fprintf(stderr,"sched_setattr Error = %s",strerror(errno)); exit(1); }