diff --git a/openair2/ENB_APP/CONTROL_MODULES/MAC/flexran_agent_mac.c b/openair2/ENB_APP/CONTROL_MODULES/MAC/flexran_agent_mac.c
index b9cad28278ba60fab4a54d0ce5535d6f5ea5c6d1..e353f5fde72d65725154084e1520cec23bb1e6b7 100644
--- a/openair2/ENB_APP/CONTROL_MODULES/MAC/flexran_agent_mac.c
+++ b/openair2/ENB_APP/CONTROL_MODULES/MAC/flexran_agent_mac.c
@@ -30,6 +30,7 @@
 #include "flexran_agent_extern.h"
 #include "flexran_agent_common.h"
 #include "flexran_agent_mac_internal.h"
+#include "flexran_agent_net_comm.h"
 
 #include "LAYER2/MAC/proto.h"
 #include "LAYER2/MAC/flexran_agent_mac_proto.h"
@@ -60,14 +61,11 @@ int flexran_agent_mac_handle_stats(mid_t mod_id, const void *params, Protocol__F
   // TODO: Must resolve conflicts among stats requests
 
   int i;
-  void *buffer;
-  int size;
   err_code_t err_code;
   xid_t xid;
   uint32_t usec_interval, sec_interval;
 
   //TODO: We do not deal with multiple CCs at the moment and eNB id is 0
-  int cc_id = 0;
   int enb_id = mod_id;
 
   //eNB_MAC_INST *eNB = &eNB_mac_inst[enb_id];
@@ -250,15 +248,15 @@ int flexran_agent_mac_stats_request(mid_t mod_id,
   Protocol__FlexHeader *header;
   int i;
 
-  if (flexran_create_header(xid, PROTOCOL__FLEX_TYPE__FLPT_STATS_REQUEST, &header) != 0)
-    goto error;
-
   Protocol__FlexStatsRequest *stats_request_msg;
   stats_request_msg = malloc(sizeof(Protocol__FlexStatsRequest));
   if(stats_request_msg == NULL)
     goto error;
-
   protocol__flex_stats_request__init(stats_request_msg);
+
+  if (flexran_create_header(xid, PROTOCOL__FLEX_TYPE__FLPT_STATS_REQUEST, &header) != 0)
+    goto error;
+
   stats_request_msg->header = header;
 
   stats_request_msg->type = report_config->report_type;
@@ -375,20 +373,19 @@ int flexran_agent_mac_stats_reply(mid_t mod_id,
 				  Protocol__FlexranMessage **msg) {
   Protocol__FlexHeader *header;
   int i, j, k;
-  int cc_id = 0;
   int enb_id = mod_id;
   //eNB_MAC_INST *eNB = &eNB_mac_inst[enb_id];
   //UE_list_t *eNB_UE_list=  &eNB->UE_list;
 
-
-  if (flexran_create_header(xid, PROTOCOL__FLEX_TYPE__FLPT_STATS_REPLY, &header) != 0)
-    goto error;
-
   Protocol__FlexStatsReply *stats_reply_msg;
   stats_reply_msg = malloc(sizeof(Protocol__FlexStatsReply));
   if (stats_reply_msg == NULL)
     goto error;
   protocol__flex_stats_reply__init(stats_reply_msg);
+
+  if (flexran_create_header(xid, PROTOCOL__FLEX_TYPE__FLPT_STATS_REPLY, &header) != 0)
+    goto error;
+
   stats_reply_msg->header = header;
 
   stats_reply_msg->n_ue_report = report_config->nr_ue;
@@ -419,7 +416,7 @@ int flexran_agent_mac_stats_reply(mid_t mod_id,
 	elem = (uint32_t *) malloc(sizeof(uint32_t)*ue_report[i]->n_bsr);
 	if (elem == NULL)
 	  goto error;
-	for (j = 0; j++; j < ue_report[i]->n_bsr) {
+	for (j = 0; j < ue_report[i]->n_bsr; j++) {
 	  // NN: we need to know the cc_id here, consider the first one
 	  elem[j] = flexran_get_ue_bsr (enb_id, i, j); 
 	}
@@ -472,7 +469,7 @@ int flexran_agent_mac_stats_reply(mid_t mod_id,
       /* Check flag for creation of MAC CE buffer status report */
       if (report_config->ue_report_type[i].ue_report_flags & PROTOCOL__FLEX_UE_STATS_TYPE__FLUST_MAC_CE_BS) {
 	// TODO: Fill in the actual MAC CE buffer status report
-	ue_report[i]->pending_mac_ces = (flexran_get_MAC_CE_bitmap_TA(enb_id,i,0) | (0 << 1) | (0 << 2) | (0 << 3)) & 15; /* Use as bitmap. Set one or more of the; /* Use as bitmap. Set one or more of the
+	ue_report[i]->pending_mac_ces = (flexran_get_MAC_CE_bitmap_TA(enb_id,i,0) | (0 << 1) | (0 << 2) | (0 << 3)) & 15;  /* Use as bitmap. Set one or more of the
 					       PROTOCOL__FLEX_CE_TYPE__FLPCET_ values
 					       found in stats_common.pb-c.h. See
 					       flex_ce_type in FlexRAN specification */
@@ -802,6 +799,8 @@ int flexran_agent_mac_destroy_stats_reply(Protocol__FlexranMessage *msg) {
 	  }
 	  free(dl_report->csi_report[j]->a31csi->sb_cqi);
 	  break;
+	default:
+	  break;
 	}
 
 	free(dl_report->csi_report[j]);
@@ -856,8 +855,6 @@ int flexran_agent_mac_sr_info(mid_t mod_id, const void *params, Protocol__Flexra
   Protocol__FlexHeader *header;
   int i;
   const int xid = *((int *)params);
-  if (flexran_create_header(xid, PROTOCOL__FLEX_TYPE__FLPT_UL_SR_INFO, &header) != 0)
-    goto error;
 
   Protocol__FlexUlSrInfo *ul_sr_info_msg;
   ul_sr_info_msg = malloc(sizeof(Protocol__FlexUlSrInfo));
@@ -866,6 +863,9 @@ int flexran_agent_mac_sr_info(mid_t mod_id, const void *params, Protocol__Flexra
   }
   protocol__flex_ul_sr_info__init(ul_sr_info_msg);
 
+  if (flexran_create_header(xid, PROTOCOL__FLEX_TYPE__FLPT_UL_SR_INFO, &header) != 0)
+    goto error;
+
   ul_sr_info_msg->header = header;
   ul_sr_info_msg->has_sfn_sf = 1;
   ul_sr_info_msg->sfn_sf = flexran_get_sfn_sf(mod_id);
@@ -923,8 +923,6 @@ int flexran_agent_mac_sf_trigger(mid_t mod_id, const void *params, Protocol__Fle
   Protocol__FlexHeader *header;
   int i,j;
   const int xid = *((int *)params);
-  if (flexran_create_header(xid, PROTOCOL__FLEX_TYPE__FLPT_SF_TRIGGER, &header) != 0)
-    goto error;
 
   Protocol__FlexSfTrigger *sf_trigger_msg;
   sf_trigger_msg = malloc(sizeof(Protocol__FlexSfTrigger));
@@ -933,6 +931,9 @@ int flexran_agent_mac_sf_trigger(mid_t mod_id, const void *params, Protocol__Fle
   }
   protocol__flex_sf_trigger__init(sf_trigger_msg);
 
+  if (flexran_create_header(xid, PROTOCOL__FLEX_TYPE__FLPT_SF_TRIGGER, &header) != 0)
+    goto error;
+
   frame_t frame;
   sub_frame_t subframe;
 
@@ -975,8 +976,8 @@ int flexran_agent_mac_sf_trigger(mid_t mod_id, const void *params, Protocol__Fle
       dl_info[i]->rnti = flexran_get_ue_crnti(mod_id, i);
       dl_info[i]->has_rnti = 1;
       /*Fill in the right id of this round's HARQ process for this UE*/
-      int harq_id;
-      int harq_status;
+      unsigned char harq_id;
+      unsigned char harq_status;
       flexran_get_harq(mod_id, UE_PCCID(mod_id,i), i, frame, subframe, &harq_id, &harq_status);
       dl_info[i]->harq_process_id = harq_id;
       dl_info[i]->has_harq_process_id = 1;
@@ -1060,10 +1061,7 @@ int flexran_agent_mac_sf_trigger(mid_t mod_id, const void *params, Protocol__Fle
     for (i = 0; i < sf_trigger_msg->n_dl_info; i++) {
       free(sf_trigger_msg->dl_info[i]->harq_status);
     }
-    free(sf_trigger_msg->dl_info);
-    for (i = 0; i < sf_trigger_msg->n_ul_info; i++) {
-      free(sf_trigger_msg->ul_info[i]->reception_status);
-    }
+    free(sf_trigger_msg->dl_info);    
     free(sf_trigger_msg->ul_info);
     free(sf_trigger_msg);
   }
@@ -1230,9 +1228,9 @@ int flexran_agent_mac_handle_dl_mac_config(mid_t mod_id, const void *params, Pro
   *msg = NULL;
   return 2;
 
- error:
-  *msg = NULL;
-  return -1;
+  // error:
+  //*msg = NULL;
+  //return -1;
 }
 
 void flexran_agent_init_mac_agent(mid_t mod_id) {
@@ -1252,7 +1250,7 @@ void flexran_agent_send_sr_info(mid_t mod_id) {
   int size;
   Protocol__FlexranMessage *msg;
   void *data;
-  int priority;
+  int priority = 0;
   err_code_t err_code;
 
   int xid = 0;
@@ -1282,7 +1280,7 @@ void flexran_agent_send_sf_trigger(mid_t mod_id) {
   int size;
   Protocol__FlexranMessage *msg;
   void *data;
-  int priority;
+  int priority = 0;
   err_code_t err_code;
 
   int xid = 0;
@@ -1310,13 +1308,11 @@ void flexran_agent_send_sf_trigger(mid_t mod_id) {
 
 void flexran_agent_send_update_mac_stats(mid_t mod_id) {
 
-  Protocol__FlexranMessage *current_report = NULL, *msg;
+  Protocol__FlexranMessage *current_report = NULL;
   void *data;
   int size;
   err_code_t err_code;
-  int priority;
-
-  mac_stats_updates_context_t stats_context = mac_stats_context[mod_id];
+  int priority = 0;
   
   if (pthread_mutex_lock(mac_stats_context[mod_id].mutex)) {
     goto error;
@@ -1437,7 +1433,7 @@ err_code_t flexran_agent_destroy_cont_mac_stats_update(mid_t mod_id) {
   flexran_agent_destroy_flexran_message(mac_stats_context[mod_id].prev_stats_reply);
   free(mac_stats_context[mod_id].mutex);
 
-  mac_agent_registered[mod_id] = NULL;
+  mac_agent_registered[mod_id] = 0;
   return 1;
 }
 
diff --git a/openair2/ENB_APP/CONTROL_MODULES/MAC/flexran_agent_mac_defs.h b/openair2/ENB_APP/CONTROL_MODULES/MAC/flexran_agent_mac_defs.h
index bb6aed1d8e9fadda1a37339fa993635b39de7956..9ec8594f88e567643f566145bb4dc2a0268a616c 100644
--- a/openair2/ENB_APP/CONTROL_MODULES/MAC/flexran_agent_mac_defs.h
+++ b/openair2/ENB_APP/CONTROL_MODULES/MAC/flexran_agent_mac_defs.h
@@ -63,8 +63,8 @@ typedef struct {
 
   /// Notify the controller for a state change of a particular UE, by sending the proper
   /// UE state change message (ACTIVATION, DEACTIVATION, HANDOVER)
-  void (*flexran_agent_notify_ue_state_change)(mid_t mod_id, uint32_t rnti,
-					       uint32_t state_change);
+  int (*flexran_agent_notify_ue_state_change)(mid_t mod_id, uint32_t rnti,
+					       uint8_t state_change);
   
   
   void *dl_scheduler_loaded_lib;
diff --git a/openair2/ENB_APP/CONTROL_MODULES/MAC/flexran_agent_mac_internal.c b/openair2/ENB_APP/CONTROL_MODULES/MAC/flexran_agent_mac_internal.c
index a63225e657cc725f42124ccf60fcfafb228c7f45..1fa9852487d6922ba211be4e7affddcedbf3339a 100644
--- a/openair2/ENB_APP/CONTROL_MODULES/MAC/flexran_agent_mac_internal.c
+++ b/openair2/ENB_APP/CONTROL_MODULES/MAC/flexran_agent_mac_internal.c
@@ -29,6 +29,7 @@
 #include <string.h>
 #include <dlfcn.h>
 
+#include "flexran_agent_common_internal.h"
 #include "flexran_agent_mac_internal.h"
 
 Protocol__FlexranMessage * flexran_agent_generate_diff_mac_stats_report(Protocol__FlexranMessage *new_message,
@@ -49,11 +50,6 @@ Protocol__FlexranMessage * flexran_agent_generate_diff_mac_stats_report(Protocol
   old_report = old_message->stats_reply_msg;
   new_report = new_message->stats_reply_msg;
 
-  /*Flags to designate changes in various levels of the message*/
-  int stats_had_changes = 0;
-  int ue_had_change = 0;
-  int cell_had_change = 0;
-
   /*See how many and which UE reports should be included in the final stats message*/
   int n_ue_report = 0;
   int ue_found = 0;
@@ -101,23 +97,6 @@ Protocol__FlexranMessage * flexran_agent_generate_diff_mac_stats_report(Protocol
     }
     cell_found = 0;
   }
-
-  /*TODO: create the reply message based on the findings*/
-  /*Create ue report list*/
-  if (n_ue_report > 0) {
-    ue_report = malloc(sizeof(Protocol__FlexUeStatsReport *));
-    for (i = 0; i<n_ue_report; i++) {
-      ue_report[i] = tmp_ue_report[i];
-    }
-  }
-
-  /*Create cell report list*/
-  if (n_cell_report > 0) {
-    cell_report = malloc(sizeof(Protocol__FlexCellStatsReport *));
-    for (i = 0; i<n_cell_report; i++) {
-      cell_report[i] = tmp_cell_report[i];
-    }
-  }
   
   if (n_cell_report > 0 || n_ue_report > 0) {
     /*Create header*/
@@ -128,11 +107,30 @@ Protocol__FlexranMessage * flexran_agent_generate_diff_mac_stats_report(Protocol
     }
     stats_reply_msg = malloc(sizeof(Protocol__FlexStatsReply));
     protocol__flex_stats_reply__init(stats_reply_msg);
+
     stats_reply_msg->header = header;
+    
+    /*TODO: create the reply message based on the findings*/
+    /*Create ue report list*/
     stats_reply_msg->n_ue_report = n_ue_report;
-    stats_reply_msg->ue_report = ue_report;
+    if (n_ue_report > 0) {
+      ue_report = malloc(sizeof(Protocol__FlexUeStatsReport *));
+      for (i = 0; i<n_ue_report; i++) {
+	ue_report[i] = tmp_ue_report[i];
+      }
+      stats_reply_msg->ue_report = ue_report;
+    }
+    
+    /*Create cell report list*/
     stats_reply_msg->n_cell_report = n_cell_report;
-    stats_reply_msg->cell_report = cell_report;
+    if (n_cell_report > 0) {
+      cell_report = malloc(sizeof(Protocol__FlexCellStatsReport *));
+      for (i = 0; i<n_cell_report; i++) {
+	cell_report[i] = tmp_cell_report[i];
+      }
+      stats_reply_msg->cell_report = cell_report;
+    }
+
     msg = malloc(sizeof(Protocol__FlexranMessage));
     if(msg == NULL)
       goto error;
@@ -270,7 +268,7 @@ Protocol__FlexUlCqiReport * copy_ul_cqi_report(Protocol__FlexUlCqiReport * origi
   ul_report = malloc(sizeof(Protocol__FlexUlCqi *) * full_ul_report->n_cqi_meas);
   if(ul_report == NULL)
     goto error;
-  for(i = 0; i++; i < full_ul_report->n_cqi_meas) {
+  for(i = 0; i < full_ul_report->n_cqi_meas; i++) {
     ul_report[i] = malloc(sizeof(Protocol__FlexUlCqi));
     if(ul_report[i] == NULL)
       goto error;
@@ -597,22 +595,22 @@ int parse_mac_config(mid_t mod_id, yaml_parser_t *parser) {
 	goto error;
       }
       // Check the types of subsystems offered and handle their values accordingly
-      if (strcmp(event.data.scalar.value, "dl_scheduler") == 0) {
+      if (strcmp((char *) event.data.scalar.value, "dl_scheduler") == 0) {
 	LOG_D(ENB_APP, "This is for the dl_scheduler subsystem\n");
 	// Call the proper handler
 	if (parse_dl_scheduler_config(mod_id, parser) == -1) {
 	  LOG_D(ENB_APP, "An error occured\n");
 	  goto error;
 	}
-      } else if (strcmp(event.data.scalar.value, "ul_scheduler") == 0) {
+      } else if (strcmp((char *) event.data.scalar.value, "ul_scheduler") == 0) {
 	// Call the proper handler
 	LOG_D(ENB_APP, "This is for the ul_scheduler subsystem\n");
 	goto error;
 	// TODO
-      } else if (strcmp(event.data.scalar.value, "ra_scheduler") == 0) {
+      } else if (strcmp((char *) event.data.scalar.value, "ra_scheduler") == 0) {
 	// Call the proper handler
 	// TODO
-      } else if (strcmp(event.data.scalar.value, "page_scheduler") == 0) {
+      } else if (strcmp((char *) event.data.scalar.value, "page_scheduler") == 0) {
 	// Call the proper handler
 	// TODO
       } else {
@@ -665,20 +663,20 @@ int parse_dl_scheduler_config(mid_t mod_id, yaml_parser_t *parser) {
 	goto error;
       }
       // Check what key needs to be set
-      if (strcmp(event.data.scalar.value, "behavior") == 0) {
+      if (strcmp((char *) event.data.scalar.value, "behavior") == 0) {
 	LOG_I(ENB_APP, "Time to set the behavior attribute\n");
 	yaml_event_delete(&event);
 	if (!yaml_parser_parse(parser, &event)) {
 	  goto error;
 	}
 	if (event.type == YAML_SCALAR_EVENT) {
-	  if (load_dl_scheduler_function(mod_id, event.data.scalar.value) == -1) {
+	  if (load_dl_scheduler_function(mod_id, (char *) event.data.scalar.value) == -1) {
 	    goto error;
 	  } 
 	} else {
 	  goto error;
 	}
-      } else if (strcmp(event.data.scalar.value, "parameters") == 0) {
+      } else if (strcmp((char *) event.data.scalar.value, "parameters") == 0) {
 	LOG_D(ENB_APP, "Now it is time to set the parameters for this subsystem\n");
 	if (parse_dl_scheduler_parameters(mod_id, parser) == -1) {
 	  goto error;
@@ -731,7 +729,7 @@ int parse_dl_scheduler_parameters(mid_t mod_id, yaml_parser_t *parser) {
       if (mac_agent_registered[mod_id]) {
 	LOG_D(ENB_APP, "Setting parameter %s\n", event.data.scalar.value);
 	param = dlsym(agent_mac_xface[mod_id]->dl_scheduler_loaded_lib,
-		      event.data.scalar.value);
+		      (char *) event.data.scalar.value);
 	if (param == NULL) {
 	  goto error;
 	}
diff --git a/openair2/ENB_APP/enb_config.c b/openair2/ENB_APP/enb_config.c
index fb4c6266cae811568d6b1e132ebcf8c04fb7b2ef..1841d8e9a90631517f995bf73f78ea57134666a6 100644
--- a/openair2/ENB_APP/enb_config.c
+++ b/openair2/ENB_APP/enb_config.c
@@ -344,7 +344,7 @@ void enb_config_display(void)
 #if defined(FLEXRAN_AGENT_SB_IF)
     printf( "\nFLEXRAN AGENT CONFIG : \n\n");
     printf( "\tInterface name:           \t%s:\n",enb_properties.properties[i]->flexran_agent_interface_name);
-    printf( "\tInterface IP Address:     \t%s:\n",enb_properties.properties[i]->flexran_agent_ipv4_address);
+    //    printf( "\tInterface IP Address:     \t%s:\n",enb_properties.properties[i]->flexran_agent_ipv4_address);
     printf( "\tInterface PORT:           \t%d:\n\n",enb_properties.properties[i]->flexran_agent_port);
     printf( "\tCache directory:          \t%s:\n",enb_properties.properties[i]->flexran_agent_cache);
     
@@ -2494,10 +2494,10 @@ const Enb_properties_array_t *enb_config_init(char* lib_config_file_name_pP)
               enb_properties.properties[enb_properties_index]->flexran_agent_interface_name = strdup(flexran_agent_interface_name);
               cidr = flexran_agent_ipv4_address;
               address = strtok(cidr, "/");
-	      enb_properties.properties[enb_properties_index]->flexran_agent_ipv4_address = strdup(address);
-	      /*  if (address) {
+	      //enb_properties.properties[enb_properties_index]->flexran_agent_ipv4_address = strdup(address);
+	      if (address) {
                 IPV4_STR_ADDR_TO_INT_NWBO (address, enb_properties.properties[enb_properties_index]->flexran_agent_ipv4_address, "BAD IP ADDRESS FORMAT FOR eNB Agent !\n" );
-		}*/
+	      }
 
               enb_properties.properties[enb_properties_index]->flexran_agent_port = flexran_agent_port;
 	      enb_properties.properties[enb_properties_index]->flexran_agent_cache = strdup(flexran_agent_cache);
diff --git a/openair2/ENB_APP/flexran_agent.c b/openair2/ENB_APP/flexran_agent.c
index beca1062bd6aa0fbd82a20ad5a0c39c0d0dfa899..0dcacd4b81a6d64da85524cb42619c4937330583 100644
--- a/openair2/ENB_APP/flexran_agent.c
+++ b/openair2/ENB_APP/flexran_agent.c
@@ -30,6 +30,8 @@
 #include "log.h"
 #include "flexran_agent.h"
 #include "flexran_agent_mac_defs.h"
+#include "flexran_agent_mac.h"
+#include "flexran_agent_mac_internal.h"
 
 #include "flexran_agent_extern.h"
 
@@ -38,6 +40,8 @@
 #include "flexran_agent_net_comm.h"
 #include "flexran_agent_async.h"
 
+#include <arpa/inet.h>
+
 //#define TEST_TIMER
 
 flexran_agent_instance_t flexran_agent[NUM_MAX_ENB];
@@ -64,11 +68,10 @@ void *flexran_agent_task(void *args){
   void *data;
   int size;
   err_code_t err_code;
-  int                   priority;
+  int                   priority = 0;
 
   MessageDef                     *msg_p           = NULL;
   const char                     *msg_name        = NULL;
-  instance_t                      instance;
   int                             result;
   struct flexran_agent_timer_element_s * elem = NULL;
 
@@ -79,7 +82,6 @@ void *flexran_agent_task(void *args){
     itti_receive_msg (TASK_FLEXRAN_AGENT, &msg_p);
     DevAssert(msg_p != NULL);
     msg_name = ITTI_MSG_NAME (msg_p);
-    instance = ITTI_MSG_INSTANCE (msg_p);
 
     switch (ITTI_MSG_ID(msg_p)) {
     case TERMINATE_MESSAGE:
@@ -212,9 +214,8 @@ int flexran_agent_start(mid_t mod_id, const Enb_properties_array_t* enb_properti
     strcpy(local_cache, DEFAULT_FLEXRAN_AGENT_CACHE);
   }
   
-  if (enb_properties->properties[mod_id]->flexran_agent_ipv4_address != NULL) {
-    strncpy(in_ip, enb_properties->properties[mod_id]->flexran_agent_ipv4_address, sizeof(in_ip) );
-    in_ip[sizeof(in_ip) - 1] = 0; // terminate string
+  if (enb_properties->properties[mod_id]->flexran_agent_ipv4_address != 0) {
+    inet_ntop(AF_INET, &(enb_properties->properties[mod_id]->flexran_agent_ipv4_address), in_ip, INET_ADDRSTRLEN);
   } else {
     strcpy(in_ip, DEFAULT_FLEXRAN_AGENT_IPv4_ADDRESS ); 
   }
@@ -237,7 +238,7 @@ int flexran_agent_start(mid_t mod_id, const Enb_properties_array_t* enb_properti
     channel_container_init = 1;
   }
   /*Create the async channel info*/
-  flexran_agent_instance_t *channel_info = flexran_agent_async_channel_info(mod_id, in_ip, in_port);
+  flexran_agent_async_channel_t *channel_info = flexran_agent_async_channel_info(mod_id, in_ip, in_port);
 
   /*Create a channel using the async channel info*/
   channel_id = flexran_agent_create_channel((void *) channel_info, 
diff --git a/openair2/ENB_APP/flexran_agent_common.c b/openair2/ENB_APP/flexran_agent_common.c
index 2ccca6602b5d8f2fd60a4f3f37b3f71dd01b6476..22793f16b490d46ec9eec1da737c6e78ad3121b2 100644
--- a/openair2/ENB_APP/flexran_agent_common.c
+++ b/openair2/ENB_APP/flexran_agent_common.c
@@ -32,6 +32,7 @@
 #include "flexran_agent_common.h"
 #include "flexran_agent_common_internal.h"
 #include "flexran_agent_extern.h"
+#include "flexran_agent_net_comm.h"
 #include "PHY/extern.h"
 #include "log.h"
 
@@ -110,14 +111,16 @@ int flexran_agent_hello(mid_t mod_id, const void *params, Protocol__FlexranMessa
   Protocol__FlexHeader *header;
   /*TODO: Need to set random xid or xid from received hello message*/
   xid_t xid = 1;
-  if (flexran_create_header(xid, PROTOCOL__FLEX_TYPE__FLPT_HELLO, &header) != 0)
-    goto error;
 
   Protocol__FlexHello *hello_msg;
   hello_msg = malloc(sizeof(Protocol__FlexHello));
   if(hello_msg == NULL)
     goto error;
   protocol__flex_hello__init(hello_msg);
+
+  if (flexran_create_header(xid, PROTOCOL__FLEX_TYPE__FLPT_HELLO, &header) != 0)
+    goto error;
+
   hello_msg->header = header;
 
   *msg = malloc(sizeof(Protocol__FlexranMessage));
@@ -163,14 +166,16 @@ int flexran_agent_echo_request(mid_t mod_id, const void* params, Protocol__Flexr
   Protocol__FlexHeader *header;
   /*TODO: Need to set a random xid*/
   xid_t xid = 1;
-  if (flexran_create_header(xid, PROTOCOL__FLEX_TYPE__FLPT_ECHO_REQUEST, &header) != 0)
-    goto error;
 
-  Protocol__FlexEchoRequest *echo_request_msg;
+  Protocol__FlexEchoRequest *echo_request_msg = NULL;
   echo_request_msg = malloc(sizeof(Protocol__FlexEchoRequest));
   if(echo_request_msg == NULL)
     goto error;
   protocol__flex_echo_request__init(echo_request_msg);
+
+  if (flexran_create_header(xid, PROTOCOL__FLEX_TYPE__FLPT_ECHO_REQUEST, &header) != 0)
+    goto error;
+
   echo_request_msg->header = header;
 
   *msg = malloc(sizeof(Protocol__FlexranMessage));
@@ -217,15 +222,16 @@ int flexran_agent_echo_reply(mid_t mod_id, const void *params, Protocol__Flexran
   Protocol__FlexEchoRequest *echo_req = input->echo_request_msg;
   xid = (echo_req->header)->xid;
 
-  Protocol__FlexHeader *header;
-  if (flexran_create_header(xid, PROTOCOL__FLEX_TYPE__FLPT_ECHO_REPLY, &header) != 0)
-    goto error;
-
-  Protocol__FlexEchoReply *echo_reply_msg;
+  Protocol__FlexEchoReply *echo_reply_msg = NULL;
   echo_reply_msg = malloc(sizeof(Protocol__FlexEchoReply));
   if(echo_reply_msg == NULL)
     goto error;
   protocol__flex_echo_reply__init(echo_reply_msg);
+
+  Protocol__FlexHeader *header;
+  if (flexran_create_header(xid, PROTOCOL__FLEX_TYPE__FLPT_ECHO_REPLY, &header) != 0)
+    goto error;
+
   echo_reply_msg->header = header;
 
   *msg = malloc(sizeof(Protocol__FlexranMessage));
@@ -299,7 +305,7 @@ int flexran_agent_destroy_ue_config_reply(Protocol__FlexranMessage *msg) {
   if(msg->msg_case != PROTOCOL__FLEXRAN_MESSAGE__MSG_UE_CONFIG_REPLY_MSG)
     goto error;
   free(msg->ue_config_reply_msg->header);
-  int i, j;
+  int i;
   Protocol__FlexUeConfigReply *reply = msg->ue_config_reply_msg;
   
   for(i = 0; i < reply->n_ue_config;i++){
@@ -376,7 +382,7 @@ int flexran_agent_destroy_lc_config_request(Protocol__FlexranMessage *msg) {
 }
 
 // call this function to start a nanosecond-resolution timer
-struct timespec timer_start(){
+struct timespec timer_start(void) {
     struct timespec start_time;
     clock_gettime(CLOCK_PROCESS_CPUTIME_ID, &start_time);
     return start_time;
@@ -395,11 +401,7 @@ int flexran_agent_control_delegation(mid_t mod_id, const void *params, Protocol_
   Protocol__FlexranMessage *input = (Protocol__FlexranMessage *)params;
   Protocol__FlexControlDelegation *control_delegation_msg = input->control_delegation_msg;
 
-  uint32_t delegation_type = control_delegation_msg->delegation_type;
-
-  int i;
-
-  struct timespec vartime = timer_start();
+  //  struct timespec vartime = timer_start();
   
   //Write the payload lib into a file in the cache and load the lib
   char lib_name[120];
@@ -413,16 +415,15 @@ int flexran_agent_control_delegation(mid_t mod_id, const void *params, Protocol_
   fwrite(control_delegation_msg->payload.data, control_delegation_msg->payload.len, 1, f);
   fclose(f);
 
-  long time_elapsed_nanos = timer_end(vartime);
+  //  long time_elapsed_nanos = timer_end(vartime);
   *msg = NULL;
   return 0;
 
- error:
-  return -1;
 }
 
 int flexran_agent_destroy_control_delegation(Protocol__FlexranMessage *msg) {
   /*TODO: Dealocate memory for a dynamically allocated control delegation message*/
+  return 0;
 }
 
 int flexran_agent_reconfiguration(mid_t mod_id, const void *params, Protocol__FlexranMessage **msg) {
@@ -437,6 +438,7 @@ int flexran_agent_reconfiguration(mid_t mod_id, const void *params, Protocol__Fl
 
 int flexran_agent_destroy_agent_reconfiguration(Protocol__FlexranMessage *msg) {
   /*TODO: Dealocate memory for a dynamically allocated agent reconfiguration message*/
+  return 0;
 }
 
 
@@ -474,7 +476,7 @@ int flexran_get_current_time_ms (mid_t mod_id, int subframe_flag){
 
 unsigned int flexran_get_current_frame (mid_t mod_id) {
 
-  #warning "SFN will not be in [0-1023] when oaisim is used"
+  //  #warning "SFN will not be in [0-1023] when oaisim is used"
   return ((eNB_MAC_INST *)enb[mod_id])->frame;
   
 }
@@ -637,7 +639,6 @@ int flexran_get_active_CC(mid_t mod_id, mid_t ue_id) {
 int flexran_get_current_RI(mid_t mod_id, mid_t ue_id, int CC_id) {
 	LTE_eNB_UE_stats	*eNB_UE_stats = NULL;
 
-	int			 pCCid	      = UE_PCCID(mod_id,ue_id);
 	rnti_t			 rnti	      = flexran_get_ue_crnti(mod_id,ue_id);
 
 	eNB_UE_stats			      = mac_xface->get_eNB_UE_stats(mod_id,CC_id,rnti);
@@ -679,7 +680,8 @@ int flexran_get_tpc(mid_t mod_id, mid_t ue_id) {
 	return tpc;
 }
 
-int flexran_get_harq(const mid_t mod_id, const uint8_t CC_id, const mid_t ue_id, const int frame, const uint8_t subframe, int *id, int *round)	{ //flag_id_status = 0 then id, else status
+int flexran_get_harq(const mid_t mod_id, const uint8_t CC_id, const mid_t ue_id, const int frame, const uint8_t subframe, 
+		     unsigned char *id, unsigned char *round)	{ //flag_id_status = 0 then id, else status
 	/*TODO: Add int TB in function parameters to get the status of the second TB. This can be done to by editing in
 	 * get_ue_active_harq_pid function in line 272 file: phy_procedures_lte_eNB.c to add
 	 * DLSCH_ptr = PHY_vars_eNB_g[Mod_id][CC_id]->dlsch_eNB[(uint32_t)UE_id][1];*/
@@ -701,7 +703,7 @@ int flexran_get_harq(const mid_t mod_id, const uint8_t CC_id, const mid_t ue_id,
   /* } */
 
   /* return 0; */
-  return round;
+  return *round;
 }
 
 int flexran_get_p0_pucch_dbm(mid_t mod_id, mid_t ue_id, int CC_id) {
@@ -922,15 +924,11 @@ int flexran_get_special_subframe_assignment(mid_t mod_id, int CC_id) {
 }
 
 int flexran_get_ra_ResponseWindowSize(mid_t mod_id, int CC_id) {
-	Enb_properties_array_t *enb_properties;
-	enb_properties = enb_config_get();
-	return enb_properties->properties[mod_id]->rach_raResponseWindowSize[CC_id];
+  return enb_config_get()->properties[mod_id]->rach_raResponseWindowSize[CC_id];
 }
 
 int flexran_get_mac_ContentionResolutionTimer(mid_t mod_id, int CC_id) {
-	Enb_properties_array_t *enb_properties;
-	enb_properties = enb_config_get();
-	return enb_properties->properties[mod_id]->rach_macContentionResolutionTimer[CC_id];
+  return enb_config_get()->properties[mod_id]->rach_macContentionResolutionTimer[CC_id];
 }
 
 int flexran_get_duplex_mode(mid_t mod_id, int CC_id) {
@@ -969,16 +967,19 @@ int flexran_get_num_pdcch_symb(mid_t mod_id, int CC_id) {
 
 
 int flexran_get_time_alignment_timer(mid_t mod_id, mid_t ue_id) {
-	struct rrc_eNB_ue_context_s* ue_context_p = NULL;
-	uint32_t rntiP = flexran_get_ue_crnti(mod_id,ue_id);
-
-	ue_context_p = rrc_eNB_get_ue_context(&eNB_rrc_inst[mod_id],rntiP);
-	if(ue_context_p != NULL) {
-	  if(ue_context_p->ue_context.mac_MainConfig != NULL)
-	    return ue_context_p->ue_context.mac_MainConfig->timeAlignmentTimerDedicated;
-	}
-	else
-	  return -1;
+  struct rrc_eNB_ue_context_s* ue_context_p = NULL;
+  uint32_t rntiP = flexran_get_ue_crnti(mod_id,ue_id);
+  
+  ue_context_p = rrc_eNB_get_ue_context(&eNB_rrc_inst[mod_id],rntiP);
+  if(ue_context_p != NULL) {
+    if(ue_context_p->ue_context.mac_MainConfig != NULL) {
+      return ue_context_p->ue_context.mac_MainConfig->timeAlignmentTimerDedicated;
+    } else {
+      return -1;
+    }
+  } else {
+    return -1;
+  }
 }
 
 int flexran_get_meas_gap_config(mid_t mod_id, mid_t ue_id) {
@@ -1040,12 +1041,14 @@ int flexran_get_half_duplex(mid_t ue_id) {
 		//	halfduplex = 1;
 	//}
 	//return halfduplex;
+  return 0;
 }
 
 int flexran_get_intra_sf_hopping(mid_t ue_id) {
 	//TODO:Get proper value
 	//temp = (((UE_RRC_INST *)enb_ue_rrc[ue_id])->UECap->UE_EUTRA_Capability->featureGroupIndicators->buf);
 	//return (0 & ( 1 << (31)));
+  return 0;
 }
 
 int flexran_get_type2_sb_1(mid_t ue_id) {
@@ -1053,11 +1056,13 @@ int flexran_get_type2_sb_1(mid_t ue_id) {
 	//uint8_t temp = 0;
 	//temp = (((UE_RRC_INST *)enb_ue_rrc[ue_id])->UECap->UE_EUTRA_Capability->featureGroupIndicators->buf);
 	//return (temp & ( 1 << (11)));
+  return 0;
 }
 
 int flexran_get_ue_category(mid_t ue_id) {
 	//TODO:Get proper value
 	//return (((UE_RRC_INST *)enb_ue_rrc[ue_id])->UECap->UE_EUTRA_Capability->ue_Category);
+  return 0;
 }
 
 int flexran_get_res_alloc_type1(mid_t ue_id) {
@@ -1065,35 +1070,41 @@ int flexran_get_res_alloc_type1(mid_t ue_id) {
 	//uint8_t temp = 0;
 	//temp = (((UE_RRC_INST *)enb_ue_rrc[ue_id])->UECap->UE_EUTRA_Capability->featureGroupIndicators->buf);
 	//return (temp & ( 1 << (30)));
+  return 0;
 }
 
 int flexran_get_ue_transmission_mode(mid_t mod_id, mid_t ue_id) {
-	struct rrc_eNB_ue_context_s* ue_context_p = NULL;
-	uint32_t rntiP = flexran_get_ue_crnti(mod_id,ue_id);
-
-	ue_context_p = rrc_eNB_get_ue_context(&eNB_rrc_inst[mod_id],rntiP);
-
-	if(ue_context_p != NULL) {
-	  if(ue_context_p->ue_context.physicalConfigDedicated != NULL){
-	    return ue_context_p->ue_context.physicalConfigDedicated->antennaInfo->choice.explicitValue.transmissionMode;
-	  }
-	}
-	else
-	  return -1;
+  struct rrc_eNB_ue_context_s* ue_context_p = NULL;
+  uint32_t rntiP = flexran_get_ue_crnti(mod_id,ue_id);
+  
+  ue_context_p = rrc_eNB_get_ue_context(&eNB_rrc_inst[mod_id],rntiP);
+  
+  if(ue_context_p != NULL) {
+    if(ue_context_p->ue_context.physicalConfigDedicated != NULL){
+      return ue_context_p->ue_context.physicalConfigDedicated->antennaInfo->choice.explicitValue.transmissionMode;
+    } else {
+      return -1;
+    }
+  } else {
+    return -1;
+  }
 }
 
 int flexran_get_tti_bundling(mid_t mod_id, mid_t ue_id) {
-	struct rrc_eNB_ue_context_s* ue_context_p = NULL;
-	uint32_t rntiP = flexran_get_ue_crnti(mod_id,ue_id);
-
-	ue_context_p = rrc_eNB_get_ue_context(&eNB_rrc_inst[mod_id],rntiP);
-	if(ue_context_p != NULL) {
-	  if(ue_context_p->ue_context.mac_MainConfig != NULL){
-	    return ue_context_p->ue_context.mac_MainConfig->ul_SCH_Config->ttiBundling;
-	  }
-	}
-	else
-	  return -1;
+  struct rrc_eNB_ue_context_s* ue_context_p = NULL;
+  uint32_t rntiP = flexran_get_ue_crnti(mod_id,ue_id);
+  
+  ue_context_p = rrc_eNB_get_ue_context(&eNB_rrc_inst[mod_id],rntiP);
+  if(ue_context_p != NULL) {
+    if(ue_context_p->ue_context.mac_MainConfig != NULL){
+      return ue_context_p->ue_context.mac_MainConfig->ul_SCH_Config->ttiBundling;
+    } else {
+      return -1;
+    }
+  }
+  else {
+    return -1;
+  }
 }
 
 int flexran_get_maxHARQ_TX(mid_t mod_id, mid_t ue_id) {
@@ -1110,45 +1121,52 @@ int flexran_get_maxHARQ_TX(mid_t mod_id, mid_t ue_id) {
 }
 
 int flexran_get_beta_offset_ack_index(mid_t mod_id, mid_t ue_id) {
-	struct rrc_eNB_ue_context_s* ue_context_p = NULL;
-	uint32_t rntiP = flexran_get_ue_crnti(mod_id,ue_id);
-
-	ue_context_p = rrc_eNB_get_ue_context(&eNB_rrc_inst[mod_id],rntiP);
-	if(ue_context_p != NULL) {
-	  if(ue_context_p->ue_context.physicalConfigDedicated != NULL){
-	    return ue_context_p->ue_context.physicalConfigDedicated->pusch_ConfigDedicated->betaOffset_ACK_Index;
-	  }
-	}
-	else
-	  return -1;
+  struct rrc_eNB_ue_context_s* ue_context_p = NULL;
+  uint32_t rntiP = flexran_get_ue_crnti(mod_id,ue_id);
+  
+  ue_context_p = rrc_eNB_get_ue_context(&eNB_rrc_inst[mod_id],rntiP);
+  if(ue_context_p != NULL) {
+    if(ue_context_p->ue_context.physicalConfigDedicated != NULL){
+      return ue_context_p->ue_context.physicalConfigDedicated->pusch_ConfigDedicated->betaOffset_ACK_Index;
+    } else {
+      return -1;
+    } 
+  } else {
+    return -1;
+  }
 }
 
 int flexran_get_beta_offset_ri_index(mid_t mod_id, mid_t ue_id) {
-	struct rrc_eNB_ue_context_s* ue_context_p = NULL;
-	uint32_t rntiP = flexran_get_ue_crnti(mod_id,ue_id);
-
-	ue_context_p = rrc_eNB_get_ue_context(&eNB_rrc_inst[mod_id],rntiP);
-	if(ue_context_p != NULL) {
-	  if(ue_context_p->ue_context.physicalConfigDedicated != NULL){
-	    return ue_context_p->ue_context.physicalConfigDedicated->pusch_ConfigDedicated->betaOffset_RI_Index;
-	  }
-	}
-	else
-	  return -1;
+  struct rrc_eNB_ue_context_s* ue_context_p = NULL;
+  uint32_t rntiP = flexran_get_ue_crnti(mod_id,ue_id);
+  
+  ue_context_p = rrc_eNB_get_ue_context(&eNB_rrc_inst[mod_id],rntiP);
+  if(ue_context_p != NULL) {
+    if(ue_context_p->ue_context.physicalConfigDedicated != NULL){
+      return ue_context_p->ue_context.physicalConfigDedicated->pusch_ConfigDedicated->betaOffset_RI_Index;
+    } else {
+      return -1;
+    }
+  } else {
+    return -1;
+  }
 }
 
 int flexran_get_beta_offset_cqi_index(mid_t mod_id, mid_t ue_id) {
-	struct rrc_eNB_ue_context_s* ue_context_p = NULL;
-	uint32_t rntiP = flexran_get_ue_crnti(mod_id,ue_id);
-
-	ue_context_p = rrc_eNB_get_ue_context(&eNB_rrc_inst[mod_id],rntiP);
-	if(ue_context_p != NULL) {
-	  if(ue_context_p->ue_context.physicalConfigDedicated != NULL){
-	    return ue_context_p->ue_context.physicalConfigDedicated->pusch_ConfigDedicated->betaOffset_CQI_Index;
-	  }
-	}
-	else
-	  return -1;
+  struct rrc_eNB_ue_context_s* ue_context_p = NULL;
+  uint32_t rntiP = flexran_get_ue_crnti(mod_id,ue_id);
+  
+  ue_context_p = rrc_eNB_get_ue_context(&eNB_rrc_inst[mod_id],rntiP);
+  if(ue_context_p != NULL) {
+    if(ue_context_p->ue_context.physicalConfigDedicated != NULL){
+      return ue_context_p->ue_context.physicalConfigDedicated->pusch_ConfigDedicated->betaOffset_CQI_Index;
+    } else {
+      return -1;
+    }
+  }
+  else {
+    return -1;
+  }
 }
 
 int flexran_get_simultaneous_ack_nack_cqi(mid_t mod_id, mid_t ue_id) {
@@ -1185,32 +1203,39 @@ int flexran_get_aperiodic_cqi_rep_mode(mid_t mod_id,mid_t ue_id) {
 }
 
 int flexran_get_tdd_ack_nack_feedback(mid_t mod_id, mid_t ue_id) {
-	struct rrc_eNB_ue_context_s* ue_context_p = NULL;
-	uint32_t rntiP = flexran_get_ue_crnti(mod_id,ue_id);
-
-	ue_context_p = rrc_eNB_get_ue_context(&eNB_rrc_inst[mod_id],rntiP);
+  // TODO: This needs fixing
+  return -1;
 
-	if(ue_context_p != NULL) {
-	  if(ue_context_p->ue_context.physicalConfigDedicated != NULL){
-	    return ue_context_p->ue_context.physicalConfigDedicated->pucch_ConfigDedicated->tdd_AckNackFeedbackMode;
-	  }
-	}
-	else
-	  return -1;
+  /* struct rrc_eNB_ue_context_s* ue_context_p = NULL; */
+  /* uint32_t rntiP = flexran_get_ue_crnti(mod_id,ue_id); */
+  
+  /* ue_context_p = rrc_eNB_get_ue_context(&eNB_rrc_inst[mod_id],rntiP); */
+  
+  /* if(ue_context_p != NULL) { */
+  /*   if(ue_context_p->ue_context.physicalConfigDedicated != NULL){ */
+  /*     return ue_context_p->ue_context.physicalConfigDedicated->pucch_ConfigDedicated->tdd_AckNackFeedbackMode; */
+  /*   } else { */
+  /*     return -1; */
+  /*   } */
+  /* } else { */
+  /*   return -1; */
+  /* } */
 }
 
 int flexran_get_ack_nack_repetition_factor(mid_t mod_id, mid_t ue_id) {
-	struct rrc_eNB_ue_context_s* ue_context_p = NULL;
-	uint32_t rntiP = flexran_get_ue_crnti(mod_id,ue_id);
-
-	ue_context_p = rrc_eNB_get_ue_context(&eNB_rrc_inst[mod_id],rntiP);
-	if(ue_context_p != NULL) {
-	  if(ue_context_p->ue_context.physicalConfigDedicated != NULL){
-	    return ue_context_p->ue_context.physicalConfigDedicated->pucch_ConfigDedicated->ackNackRepetition.choice.setup.repetitionFactor;
-	  }
-	}
-	else
-	  return -1;
+  struct rrc_eNB_ue_context_s* ue_context_p = NULL;
+  uint32_t rntiP = flexran_get_ue_crnti(mod_id,ue_id);
+  
+  ue_context_p = rrc_eNB_get_ue_context(&eNB_rrc_inst[mod_id],rntiP);
+  if(ue_context_p != NULL) {
+    if(ue_context_p->ue_context.physicalConfigDedicated != NULL){
+      return ue_context_p->ue_context.physicalConfigDedicated->pucch_ConfigDedicated->ackNackRepetition.choice.setup.repetitionFactor;
+    } else {
+      return -1;
+    }
+  } else {
+    return -1;
+  }
 }
 
 int flexran_get_extended_bsr_size(mid_t mod_id, mid_t ue_id) {
@@ -1233,23 +1258,26 @@ int flexran_get_extended_bsr_size(mid_t mod_id, mid_t ue_id) {
 }
 
 int flexran_get_ue_transmission_antenna(mid_t mod_id, mid_t ue_id) {
-	struct rrc_eNB_ue_context_s* ue_context_p = NULL;
-	uint32_t rntiP = flexran_get_ue_crnti(mod_id,ue_id);
-
-	ue_context_p = rrc_eNB_get_ue_context(&eNB_rrc_inst[mod_id],rntiP);
-
-	if(ue_context_p != NULL) {
-	  if(ue_context_p->ue_context.physicalConfigDedicated != NULL){
-	    if(ue_context_p->ue_context.physicalConfigDedicated->antennaInfo->choice.explicitValue.ue_TransmitAntennaSelection.choice.setup == AntennaInfoDedicated__ue_TransmitAntennaSelection__setup_closedLoop)
-	      return 2;
-	    else if(ue_context_p->ue_context.physicalConfigDedicated->antennaInfo->choice.explicitValue.ue_TransmitAntennaSelection.choice.setup == AntennaInfoDedicated__ue_TransmitAntennaSelection__setup_openLoop)
-	      return 1;
-	    else
-	      return 0;
-	  }
-	}
-	else
-	  return -1;
+  struct rrc_eNB_ue_context_s* ue_context_p = NULL;
+  uint32_t rntiP = flexran_get_ue_crnti(mod_id,ue_id);
+  
+  ue_context_p = rrc_eNB_get_ue_context(&eNB_rrc_inst[mod_id],rntiP);
+  
+  if(ue_context_p != NULL) {
+    if(ue_context_p->ue_context.physicalConfigDedicated != NULL){
+      if(ue_context_p->ue_context.physicalConfigDedicated->antennaInfo->choice.explicitValue.ue_TransmitAntennaSelection.choice.setup == AntennaInfoDedicated__ue_TransmitAntennaSelection__setup_closedLoop) {
+	return 2;
+      } else if(ue_context_p->ue_context.physicalConfigDedicated->antennaInfo->choice.explicitValue.ue_TransmitAntennaSelection.choice.setup == AntennaInfoDedicated__ue_TransmitAntennaSelection__setup_openLoop) {
+	return 1;
+      } else {
+	return 0;
+      }
+    } else {
+      return -1;
+    }
+  } else {
+    return -1;
+  }
 }
 
 int flexran_get_lcg(mid_t ue_id, mid_t lc_id) {
@@ -1265,10 +1293,13 @@ int flexran_get_lcg(mid_t ue_id, mid_t lc_id) {
 
 int flexran_get_direction(mid_t ue_id, mid_t lc_id) {
 	/*TODO: fill with the value for the rest of LCID*/
-  if(lc_id == DCCH | lc_id == DCCH1)
+  if(lc_id == DCCH || lc_id == DCCH1) {
     return 2;
-  else if(lc_id == DTCH)
+  } else if(lc_id == DTCH) {
     return 1;
+  } else {
+    return -1;
+  }
 }
 
 int flexran_agent_ue_state_change(mid_t mod_id, uint32_t rnti, uint8_t state_change) {
@@ -1276,8 +1307,7 @@ int flexran_agent_ue_state_change(mid_t mod_id, uint32_t rnti, uint8_t state_cha
   Protocol__FlexranMessage *msg;
   Protocol__FlexHeader *header;
   void *data;
-  int priority;
-  err_code_t err_code;
+  int priority = 0;
 
   int xid = 0;
 
@@ -1461,14 +1491,14 @@ int flexran_agent_ue_state_change(mid_t mod_id, uint32_t rnti, uint8_t state_cha
   data = flexran_agent_pack_message(msg, &size);
   /*Send sr info using the MAC channel of the eNB*/
   if (flexran_agent_msg_send(mod_id, FLEXRAN_AGENT_DEFAULT, data, size, priority)) {
-    err_code = PROTOCOL__FLEXRAN_ERR__MSG_ENQUEUING;
     goto error;
   }
 
   LOG_D(FLEXRAN_AGENT,"sent message with size %d\n", size);
-  return;
+  return 0;
  error:
   LOG_D(FLEXRAN_AGENT, "Could not send UE state message\n");
+  return -1;
 }
 
 
@@ -1481,15 +1511,17 @@ int flexran_agent_lc_config_reply(mid_t mod_id, const void *params, Protocol__Fl
   xid = (lc_config_request_msg->header)->xid;
 
   int i, j;
-  Protocol__FlexHeader *header;
-  if(flexran_create_header(xid, PROTOCOL__FLEX_TYPE__FLPT_GET_LC_CONFIG_REPLY, &header) != 0)
-    goto error;
 
   Protocol__FlexLcConfigReply *lc_config_reply_msg;
   lc_config_reply_msg = malloc(sizeof(Protocol__FlexLcConfigReply));
   if(lc_config_reply_msg == NULL)
     goto error;
   protocol__flex_lc_config_reply__init(lc_config_reply_msg);
+
+  Protocol__FlexHeader *header;
+  if(flexran_create_header(xid, PROTOCOL__FLEX_TYPE__FLPT_GET_LC_CONFIG_REPLY, &header) != 0)
+    goto error;
+
   lc_config_reply_msg->header = header;
 
   lc_config_reply_msg->n_lc_ue_config = flexran_get_num_ues(mod_id);
@@ -1606,15 +1638,16 @@ int flexran_agent_ue_config_reply(mid_t mod_id, const void *params, Protocol__Fl
 
   int i;
 
-  Protocol__FlexHeader *header;
-  if(flexran_create_header(xid, PROTOCOL__FLEX_TYPE__FLPT_GET_UE_CONFIG_REPLY, &header) != 0)
-    goto error;
-
   Protocol__FlexUeConfigReply *ue_config_reply_msg;
   ue_config_reply_msg = malloc(sizeof(Protocol__FlexUeConfigReply));
   if(ue_config_reply_msg == NULL)
     goto error;
   protocol__flex_ue_config_reply__init(ue_config_reply_msg);
+
+  Protocol__FlexHeader *header;
+  if(flexran_create_header(xid, PROTOCOL__FLEX_TYPE__FLPT_GET_UE_CONFIG_REPLY, &header) != 0)
+    goto error;
+
   ue_config_reply_msg->header = header;
 
   ue_config_reply_msg->n_ue_config = flexran_get_num_ues(mod_id);
@@ -1805,16 +1838,16 @@ int flexran_agent_enb_config_request(mid_t mod_id, const void* params, Protocol_
 
 	Protocol__FlexHeader *header;
 	xid_t xid = 1;
-	if(flexran_create_header(xid,PROTOCOL__FLEX_TYPE__FLPT_GET_ENB_CONFIG_REQUEST, &header) != 0)
-	  goto error;
 
 	Protocol__FlexEnbConfigRequest *enb_config_request_msg;
 	enb_config_request_msg = malloc(sizeof(Protocol__FlexEnbConfigRequest));
-
 	if(enb_config_request_msg == NULL)
 	  goto error;
-
 	protocol__flex_enb_config_request__init(enb_config_request_msg);
+	
+	if(flexran_create_header(xid,PROTOCOL__FLEX_TYPE__FLPT_GET_ENB_CONFIG_REQUEST, &header) != 0)
+	  goto error;
+
 	enb_config_request_msg->header = header;
 
 	*msg = malloc(sizeof(Protocol__FlexranMessage));
@@ -1846,19 +1879,19 @@ int flexran_agent_enb_config_reply(mid_t mod_id, const void *params, Protocol__F
   Protocol__FlexEnbConfigRequest *enb_config_req_msg = input->enb_config_request_msg;
   xid = (enb_config_req_msg->header)->xid;
   
-  int i, j, k;
-  int cc_id = 0;
+  int i, j;
   int enb_id = mod_id;
   
-  Protocol__FlexHeader *header;
-  if(flexran_create_header(xid, PROTOCOL__FLEX_TYPE__FLPT_GET_ENB_CONFIG_REPLY, &header) != 0)
-    goto error;
-  
-   Protocol__FlexEnbConfigReply *enb_config_reply_msg;
+  Protocol__FlexEnbConfigReply *enb_config_reply_msg;
   enb_config_reply_msg = malloc(sizeof(Protocol__FlexEnbConfigReply));
   if(enb_config_reply_msg == NULL)
     goto error;
   protocol__flex_enb_config_reply__init(enb_config_reply_msg);
+
+  Protocol__FlexHeader *header;
+  if(flexran_create_header(xid, PROTOCOL__FLEX_TYPE__FLPT_GET_ENB_CONFIG_REPLY, &header) != 0)
+    goto error;
+  
   enb_config_reply_msg->header = header;
   
   enb_config_reply_msg->enb_id = mod_id;
diff --git a/openair2/ENB_APP/flexran_agent_common.h b/openair2/ENB_APP/flexran_agent_common.h
index 56a52cd0b674480425fbace461cd23a838780615..a4ee53067cd2731d30337c42a58e50a7c39dbee3 100644
--- a/openair2/ENB_APP/flexran_agent_common.h
+++ b/openair2/ENB_APP/flexran_agent_common.h
@@ -72,7 +72,7 @@ int flexran_agent_deserialize_message(void *data, int size, Protocol__FlexranMes
 /* Serialize message and then destroy the input flexran msg. Should be called when protocol
    message is created dynamically */
 void * flexran_agent_pack_message(Protocol__FlexranMessage *msg, 
-			      uint32_t * size);
+			      int * size);
 
 /* Calls destructor of the given message */
 err_code_t flexran_agent_destroy_flexran_message(Protocol__FlexranMessage *msg);
@@ -271,7 +271,7 @@ int flexran_get_tpc(mid_t mod_id, mid_t ue_id);
    a designated frame and subframe. Returns 0 for success. The id and the 
    status of the HARQ process are stored in id and status respectively */
 int flexran_get_harq(const mid_t mod_id, const uint8_t CC_id, const mid_t ue_id,
-		     const int frame, const uint8_t subframe, int *id, int *round);
+		     const int frame, const uint8_t subframe, unsigned char *id, unsigned char *round);
 
 /* Uplink power control management*/
 int flexran_get_p0_pucch_dbm(mid_t mod_id, mid_t ue_id, int CC_id);
diff --git a/openair2/ENB_APP/flexran_agent_common_internal.c b/openair2/ENB_APP/flexran_agent_common_internal.c
index 3026190863e4cd32469b84d03f8c3874b52d2b6a..e735b2748b955c98c4666fe68828360229d1fc89 100644
--- a/openair2/ENB_APP/flexran_agent_common_internal.c
+++ b/openair2/ENB_APP/flexran_agent_common_internal.c
@@ -38,13 +38,12 @@ int apply_reconfiguration_policy(mid_t mod_id, const char *policy, size_t policy
   yaml_event_t event;
 
   int done = 0;
-  int mapping_started = 0;
 
   LOG_I(ENB_APP, "Time to apply a new policy \n");
 
   yaml_parser_initialize(&parser);
 
-  yaml_parser_set_input_string(&parser, policy, strlen(policy));
+  yaml_parser_set_input_string(&parser, (unsigned char *) policy, strlen(policy));
 
   while (!done) {
     if (!yaml_parser_parse(&parser, &event))
@@ -52,39 +51,40 @@ int apply_reconfiguration_policy(mid_t mod_id, const char *policy, size_t policy
  
     switch (event.type) {
     case YAML_STREAM_START_EVENT:
+      break;
     case YAML_STREAM_END_EVENT:
+      break;
     case YAML_DOCUMENT_START_EVENT:
+      break;
     case YAML_DOCUMENT_END_EVENT:
       break;
     case YAML_MAPPING_START_EVENT:
-      mapping_started = 1;
       break;
     case YAML_MAPPING_END_EVENT:
-      mapping_started = 0;
       break;
     case YAML_SCALAR_EVENT:
       // Check the system name and call the proper handler
-      if (strcmp(event.data.scalar.value, "mac") == 0) {
+      if (strcmp((char *) event.data.scalar.value, "mac") == 0) {
 	LOG_D(ENB_APP, "This is intended for the mac system\n");
 	// Call the mac handler
 	if (parse_mac_config(mod_id, &parser) == -1) {
 	  goto error;
 	}
-      } else if (strcmp(event.data.scalar.value, "rlc") == 0) {
+      } else if (strcmp((char *) event.data.scalar.value, "rlc") == 0) {
 	// Call the RLC handler
 	LOG_D(ENB_APP, "This is intended for the rlc system\n");
 	// TODO : Just skip it for now
 	if (skip_system_section(&parser) == -1) {
 	  goto error;
 	}
-      } else if (strcmp(event.data.scalar.value, "pdcp") == 0) {
+      } else if (strcmp((char *) event.data.scalar.value, "pdcp") == 0) {
 	// Call the PDCP handler
 	LOG_D(ENB_APP, "This is intended for the pdcp system\n");
 	// TODO : Just skip it for now
 	if (skip_system_section(&parser) == -1) {
 	  goto error;
 	}
-      } else if (strcmp(event.data.scalar.value, "rrc") == 0) {
+      } else if (strcmp((char *) event.data.scalar.value, "rrc") == 0) {
 	// Call the RRC handler
 	LOG_D(ENB_APP, "This is intended for the rrc system\n");
 	// TODO : Just skip it for now
@@ -159,8 +159,9 @@ int skip_system_section(yaml_parser_t *parser) {
       if (skip_subsystem_section(parser) == -1) {
 	goto error;
       }
+    default:
+      break;
     }
-    
     done = (event.type == YAML_SEQUENCE_END_EVENT);
 
     yaml_event_delete(&event);
@@ -199,7 +200,7 @@ int skip_subsystem_section(yaml_parser_t *parser) {
 	goto error;
       }
       // Check what key needs to be set
-      if (strcmp(event.data.scalar.value, "behavior") == 0) {
+      if (strcmp((char *) event.data.scalar.value, "behavior") == 0) {
 	LOG_D(ENB_APP, "Skipping the behavior attribute\n");
 	yaml_event_delete(&event);
 	if (!yaml_parser_parse(parser, &event)) {
@@ -210,7 +211,7 @@ int skip_subsystem_section(yaml_parser_t *parser) {
 	} else {
 	  goto error;
 	}
-      } else if (strcmp(event.data.scalar.value, "parameters") == 0) {
+      } else if (strcmp((char *) event.data.scalar.value, "parameters") == 0) {
 	LOG_D(ENB_APP, "Skipping the parameters for this subsystem\n");
 	if (skip_subsystem_parameters_config(parser) == -1) {
 	  goto error;
@@ -235,8 +236,6 @@ int skip_subsystem_section(yaml_parser_t *parser) {
 int skip_subsystem_parameters_config(yaml_parser_t *parser) {
   yaml_event_t event;
   
-  void *param;
-  
   int done = 0;
   int mapping_started = 0;
 
@@ -299,10 +298,10 @@ int skip_parameter_modification(yaml_parser_t *parser) {
       is_array = 1;
       break;
     case YAML_SCALAR_EVENT:
-      if ((strcmp(event.data.scalar.tag, YAML_INT_TAG) == 0) ||
-	  (strcmp(event.data.scalar.tag, YAML_FLOAT_TAG) == 0) ||
-	  (strcmp(event.data.scalar.tag, YAML_STR_TAG) == 0) ||
-	  (strcmp(event.data.scalar.tag, YAML_BOOL_TAG) == 0)) {
+      if ((strcmp((char *) event.data.scalar.tag, YAML_INT_TAG) == 0) ||
+	  (strcmp((char *) event.data.scalar.tag, YAML_FLOAT_TAG) == 0) ||
+	  (strcmp((char *) event.data.scalar.tag, YAML_STR_TAG) == 0) ||
+	  (strcmp((char *) event.data.scalar.tag, YAML_BOOL_TAG) == 0)) {
 	// Do nothing
       } else {
 	// No other type is supported at the moment, so it should be considered an error
@@ -351,14 +350,14 @@ int apply_parameter_modification(void *parameter, yaml_parser_t *parser) {
       is_array = 1;
       break;
     case YAML_SCALAR_EVENT:
-      if (strcmp(event.data.scalar.tag, YAML_INT_TAG) == 0) {
-	((int *) parameter)[i] = strtol(event.data.scalar.value, &endptr, 10);
-      } else if (strcmp(event.data.scalar.tag, YAML_FLOAT_TAG) == 0) {
-	((float *) parameter)[i] = strtof(event.data.scalar.value, &endptr);
-      } else if (strcmp(event.data.scalar.tag, YAML_STR_TAG) == 0) {
-	strncpy(&((char *) parameter)[i], event.data.scalar.value, event.data.scalar.length);
-      } else if (strcmp(event.data.scalar.tag, YAML_BOOL_TAG) == 0) {
-	if (strcmp(event.data.scalar.value, "true") == 0) {
+      if (strcmp((char *) event.data.scalar.tag, YAML_INT_TAG) == 0) {
+	((int *) parameter)[i] = strtol((char *) event.data.scalar.value, &endptr, 10);
+      } else if (strcmp((char *) event.data.scalar.tag, YAML_FLOAT_TAG) == 0) {
+	((float *) parameter)[i] = strtof((char *) event.data.scalar.value, &endptr);
+      } else if (strcmp((char *) event.data.scalar.tag, YAML_STR_TAG) == 0) {
+	strncpy(&((char *) parameter)[i], (char *) event.data.scalar.value, event.data.scalar.length);
+      } else if (strcmp((char *) event.data.scalar.tag, YAML_BOOL_TAG) == 0) {
+	if (strcmp((char *) event.data.scalar.value, "true") == 0) {
 	  ((int *) parameter)[i] = 1;
 	} else {
 	  ((int *) parameter)[i] = 0;
diff --git a/openair2/ENB_APP/flexran_agent_handler.c b/openair2/ENB_APP/flexran_agent_handler.c
index 4967e4692334d0b8fe86a379be929c99e854f5ac..4326ab6be03241df516c82d6f2f27129f2a2599a 100644
--- a/openair2/ENB_APP/flexran_agent_handler.c
+++ b/openair2/ENB_APP/flexran_agent_handler.c
@@ -73,12 +73,12 @@ flexran_agent_message_destruction_callback message_destruction_callback[] = {
   flexran_agent_destroy_agent_reconfiguration,
 };
 
-static const char *flexran_agent_direction2String[] = {
-  "", /* not_set  */
-  "originating message", /* originating message */
-  "successfull outcome", /* successfull outcome */
-  "unsuccessfull outcome", /* unsuccessfull outcome */
-};
+/* static const char *flexran_agent_direction2String[] = { */
+/*   "", /\* not_set  *\/ */
+/*   "originating message", /\* originating message *\/ */
+/*   "successfull outcome", /\* successfull outcome *\/ */
+/*   "unsuccessfull outcome", /\* unsuccessfull outcome *\/ */
+/* }; */
 
 
 Protocol__FlexranMessage* flexran_agent_handle_message (mid_t mod_id,
@@ -123,7 +123,7 @@ error:
 
 
 void * flexran_agent_pack_message(Protocol__FlexranMessage *msg, 
-				  uint32_t * size){
+				  int * size){
 
   void * buffer;
   err_code_t err_code = PROTOCOL__FLEXRAN_ERR__NO_ERR;
@@ -181,7 +181,7 @@ Protocol__FlexranMessage* flexran_agent_process_timeout(long timer_id, void* tim
 
  error:
   LOG_E(FLEXRAN_AGENT, "can't get the timer element\n");
-  return TIMER_ELEMENT_NOT_FOUND;
+  return NULL;
 }
 
 err_code_t flexran_agent_destroy_flexran_message(Protocol__FlexranMessage *msg) {
diff --git a/openair2/LAYER2/MAC/defs.h b/openair2/LAYER2/MAC/defs.h
index 15c0f3fb0e826dcbba8486d31483ceb0decdca61..63501128ef41359cdddb3f9dadaf87eeb67c3fdf 100644
--- a/openair2/LAYER2/MAC/defs.h
+++ b/openair2/LAYER2/MAC/defs.h
@@ -145,6 +145,9 @@
 /*!\brief minimum MAC data needed for transmitting 1 min RLC PDU size + 1 byte MAC subHeader */
 #define MIN_MAC_HDR_RLC_SIZE    (1 + MIN_RLC_PDU_SIZE)
 
+/*!\brief maximum number of slices / groups */
+#define MAX_NUM_SLICES 4 
+
 /* 
  * eNB part 
  */ 
diff --git a/openair2/LAYER2/MAC/eNB_scheduler_dlsch.c b/openair2/LAYER2/MAC/eNB_scheduler_dlsch.c
index 93dc9937119390e893d7ea084c20878b62c203ce..45e79930e524688f107d092019961a8a975d4ab0 100644
--- a/openair2/LAYER2/MAC/eNB_scheduler_dlsch.c
+++ b/openair2/LAYER2/MAC/eNB_scheduler_dlsch.c
@@ -442,7 +442,7 @@ schedule_ue_spec(
   //  unsigned char         rballoc_sub_UE[MAX_NUM_CCs][NUMBER_OF_UE_MAX][N_RBG_MAX];
   //  uint16_t              pre_nb_available_rbs[MAX_NUM_CCs][NUMBER_OF_UE_MAX];
   int                   mcs;
-  uint16_t              min_rb_unit[MAX_NUM_CCs];
+  int              min_rb_unit[MAX_NUM_CCs];
   eNB_MAC_INST         *eNB      = &eNB_mac_inst[module_idP];
   UE_list_t            *UE_list  = &eNB->UE_list;
   LTE_DL_FRAME_PARMS   *frame_parms[MAX_NUM_CCs];
diff --git a/openair2/LAYER2/MAC/flexran_agent_mac_proto.h b/openair2/LAYER2/MAC/flexran_agent_mac_proto.h
index 060e8ab6ca3f2114b4d98ace3b2cafcee58789e2..6757500dc2cc7175e48ea11c0ccb93d310904bf7 100644
--- a/openair2/LAYER2/MAC/flexran_agent_mac_proto.h
+++ b/openair2/LAYER2/MAC/flexran_agent_mac_proto.h
@@ -21,7 +21,7 @@
 
 /*! \file flexran_agent_mac_proto.h
  * \brief MAC functions for FlexRAN agent
- * \author Xenofon Foukas
+ * \author Xenofon Foukas and Navid Nikaein
  * \date 2016
  * \email: x.foukas@sms.ed.ac.uk
  * \version 0.1
@@ -36,6 +36,145 @@
 #include "header.pb-c.h"
 #include "flexran.pb-c.h"
 
+/*
+ * slice specific scheduler 
+ */
+typedef void (*slice_scheduler)(module_id_t mod_id, 
+				int slice_id, 
+				uint32_t frame, 
+				uint32_t subframe,
+				int *mbsfn_flag,
+				Protocol__FlexranMessage **dl_info);
+
+
+
+/*
+ * top level flexran scheduler used by the eNB scheduler
+ */
+void flexran_schedule_ue_spec_default(mid_t mod_id, 
+				      uint32_t frame, 
+				      uint32_t subframe,
+				      int *mbsfn_flag, 
+				      Protocol__FlexranMessage **dl_info);
+/*
+ * slice specific scheduler for embb
+ */
+void
+flexran_schedule_ue_spec_embb(mid_t   mod_id,
+			      int       slice_id, 
+			      uint32_t      frame,
+			      uint32_t      subframe,
+			      int           *mbsfn_flag,
+			      Protocol__FlexranMessage **dl_info);
+/*
+ * slice specific scheduler for urllc
+ */
+void
+flexran_schedule_ue_spec_urllc(mid_t   mod_id,
+			      int       slice_id, 
+			      uint32_t      frame,
+			      uint32_t      subframe,
+			      int           *mbsfn_flag,
+			      Protocol__FlexranMessage **dl_info);
+
+/*
+ * slice specific scheduler for mmtc
+ */
+void
+flexran_schedule_ue_spec_mmtc(mid_t   mod_id,
+			      int       slice_id, 
+			      uint32_t      frame,
+			      uint32_t      subframe,
+			      int           *mbsfn_flag,
+			      Protocol__FlexranMessage **dl_info);
+/*
+ * slice specific scheduler for best effort traffic 
+ */
+void
+flexran_schedule_ue_spec_be(mid_t   mod_id,
+			    int       slice_id, 
+			    uint32_t      frame,
+			    uint32_t      subframe,
+			    int           *mbsfn_flag,
+			    Protocol__FlexranMessage **dl_info);
+
+/*
+ * common flexran scheduler function
+ */
+void
+flexran_schedule_ue_spec_common(mid_t   mod_id,
+				int       slice_id, 
+				uint32_t      frame,
+				uint32_t      subframe,
+				int           *mbsfn_flag,
+				Protocol__FlexranMessage **dl_info);
+
+uint16_t flexran_nb_rbs_allowed_slice(float rb_percentage, 
+				      int total_rbs);
+
+int flexran_slice_member(int UE_id, 
+			 int slice_id);
+
+int flexran_slice_maxmcs(int slice_id) ;
+
+void _store_dlsch_buffer (module_id_t Mod_id,
+			  int         slice_id,
+			  frame_t     frameP,
+			  sub_frame_t subframeP);
+
+
+void _assign_rbs_required (module_id_t Mod_id,
+			   int         slice_id,
+			   frame_t     frameP,
+			   sub_frame_t subframe,
+			   uint16_t    nb_rbs_required[MAX_NUM_CCs][NUMBER_OF_UE_MAX],
+			   uint16_t    nb_rbs_allowed_slice[MAX_NUM_CCs][MAX_NUM_SLICES], 
+			   int         min_rb_unit[MAX_NUM_CCs]);
+
+int _maxround(module_id_t Mod_id,
+	      uint16_t rnti,
+	      int frame,
+	      sub_frame_t subframe,
+	      uint8_t ul_flag );
+
+int _maxcqi(module_id_t Mod_id,
+	    int32_t UE_id);
+
+void _sort_UEs (module_id_t Mod_idP,
+		int         frameP,
+		sub_frame_t subframeP);
+
+void _dlsch_scheduler_pre_processor (module_id_t   Mod_id,
+				     int           slice_id,
+				     frame_t       frameP,
+				     sub_frame_t   subframeP,
+				     int           N_RBG[MAX_NUM_CCs],
+				     int           *mbsfn_flag);
+
+void _dlsch_scheduler_pre_processor_reset (int module_idP,
+					   int UE_id,
+					   uint8_t  CC_id,
+					   int frameP,
+					   int subframeP,					  
+					   int N_RBG,
+					   uint16_t nb_rbs_required[MAX_NUM_CCs][NUMBER_OF_UE_MAX],
+					   uint16_t nb_rbs_required_remaining[MAX_NUM_CCs][NUMBER_OF_UE_MAX],
+					   uint16_t nb_rbs_allowed_slice[MAX_NUM_CCs][MAX_NUM_SLICES],
+					   unsigned char rballoc_sub[MAX_NUM_CCs][N_RBG_MAX],
+					   unsigned char MIMO_mode_indicator[MAX_NUM_CCs][N_RBG_MAX]);
+
+void _dlsch_scheduler_pre_processor_allocate (module_id_t   Mod_id,
+					      int           UE_id,
+					      uint8_t       CC_id,
+					      int           N_RBG,
+					      int           transmission_mode,
+					      int           min_rb_unit,
+					      uint8_t       N_RB_DL,
+					      uint16_t      nb_rbs_required[MAX_NUM_CCs][NUMBER_OF_UE_MAX],
+					      uint16_t      nb_rbs_required_remaining[MAX_NUM_CCs][NUMBER_OF_UE_MAX],
+					      unsigned char rballoc_sub[MAX_NUM_CCs][N_RBG_MAX],
+					      unsigned char MIMO_mode_indicator[MAX_NUM_CCs][N_RBG_MAX]);
+
 /*
  * Default scheduler used by the eNB agent
  */
@@ -46,17 +185,17 @@ void flexran_schedule_ue_spec_default(mid_t mod_id, uint32_t frame, uint32_t sub
  * Data plane function for applying the DL decisions of the scheduler
  */
 void flexran_apply_dl_scheduling_decisions(mid_t mod_id, uint32_t frame, uint32_t subframe, int *mbsfn_flag,
-					   const Protocol__FlexranMessage *dl_scheduling_info);
+					   Protocol__FlexranMessage *dl_scheduling_info);
 
 /*
  * Data plane function for applying the UE specific DL decisions of the scheduler
  */
 void flexran_apply_ue_spec_scheduling_decisions(mid_t mod_id, uint32_t frame, uint32_t subframe, int *mbsfn_flag,
-						uint32_t n_dl_ue_data, const Protocol__FlexDlData **dl_ue_data);
+						uint32_t n_dl_ue_data, Protocol__FlexDlData **dl_ue_data);
 
 /*
  * Data plane function for filling the DCI structure
  */
-void flexran_fill_oai_dci(mid_t mod_id, uint32_t CC_id, uint32_t rnti, const Protocol__FlexDlDci *dl_dci);
+void flexran_fill_oai_dci(mid_t mod_id, uint32_t CC_id, uint32_t rnti, Protocol__FlexDlDci *dl_dci);
 
 #endif
diff --git a/openair2/LAYER2/MAC/flexran_agent_scheduler_dataplane.c b/openair2/LAYER2/MAC/flexran_agent_scheduler_dataplane.c
index ff3ef32bd6db47dca8b8e8d90ad0d3a8b1a199f3..c9956bd9ce937c055c4b75913e3e72226a87ace6 100644
--- a/openair2/LAYER2/MAC/flexran_agent_scheduler_dataplane.c
+++ b/openair2/LAYER2/MAC/flexran_agent_scheduler_dataplane.c
@@ -56,12 +56,11 @@
 
 #include "SIMULATION/TOOLS/defs.h" // for taus
 
-
 void flexran_apply_dl_scheduling_decisions(mid_t mod_id,
 					   uint32_t frame,
 					   uint32_t subframe,
 					   int *mbsfn_flag,
-					   const Protocol__FlexranMessage *dl_scheduling_info) {
+					   Protocol__FlexranMessage *dl_scheduling_info) {
 
   Protocol__FlexDlMacConfig *mac_config = dl_scheduling_info->dl_mac_config_msg;
 
@@ -89,33 +88,26 @@ void flexran_apply_ue_spec_scheduling_decisions(mid_t mod_id,
 						uint32_t subframe,
 						int *mbsfn_flag,
 						uint32_t n_dl_ue_data,
-						const Protocol__FlexDlData **dl_ue_data) {
+						Protocol__FlexDlData **dl_ue_data) {
 
   uint8_t               CC_id;
   int                   UE_id;
-  int                   N_RBG[MAX_NUM_CCs];
-  unsigned char         aggregation;
   mac_rlc_status_resp_t rlc_status;
   unsigned char         ta_len=0;
   unsigned char         header_len = 0, header_len_tmp = 0;
   unsigned char         sdu_lcids[11],offset,num_sdus=0;
-  uint16_t              nb_rb,nb_rb_temp,total_nb_available_rb[MAX_NUM_CCs],nb_available_rb;
+  uint16_t              nb_rb;
   uint16_t              TBS,j,sdu_lengths[11],rnti,padding=0,post_padding=0;
   unsigned char         dlsch_buffer[MAX_DLSCH_PAYLOAD_BYTES];
   unsigned char         round            = 0;
   unsigned char         harq_pid         = 0;
-  
+  //  LTE_DL_FRAME_PARMS   *frame_parms[MAX_NUM_CCs];
   LTE_eNB_UE_stats     *eNB_UE_stats     = NULL;
   uint16_t              sdu_length_total = 0;
-  int                   mcs;
-  uint16_t              min_rb_unit[MAX_NUM_CCs];
   short                 ta_update        = 0;
   eNB_MAC_INST         *eNB      = &eNB_mac_inst[mod_id];
   UE_list_t            *UE_list  = &eNB->UE_list;
-  LTE_DL_FRAME_PARMS   *frame_parms[MAX_NUM_CCs];
-  int32_t                 normalized_rx_power, target_rx_power;
-  int32_t                 tpc=1;
-  static int32_t          tpc_accumulated=0;
+  //  static int32_t          tpc_accumulated=0;
   UE_sched_ctrl           *ue_sched_ctl;
 
   int last_sdu_header_len = 0;
@@ -135,7 +127,7 @@ void flexran_apply_ue_spec_scheduling_decisions(mid_t mod_id,
     dl_dci = dl_data->dl_dci;
 
     CC_id = dl_data->serv_cell_index;
-    frame_parms[CC_id] = mac_xface->get_lte_frame_parms(mod_id, CC_id);
+    //    frame_parms[CC_id] = mac_xface->get_lte_frame_parms(mod_id, CC_id);
     
     rnti = dl_data->rnti;
     UE_id = find_ue(rnti, PHY_vars_eNB_g[mod_id][CC_id]);
@@ -357,20 +349,19 @@ void flexran_apply_ue_spec_scheduling_decisions(mid_t mod_id,
     eNB_UE_stats->dlsch_mcs1 = dl_dci->mcs[0];
 
     //Fill the proper DCI of OAI
-    fill_oai_dci(mod_id, CC_id, rnti, dl_dci);
+    flexran_fill_oai_dci(mod_id, CC_id, rnti, dl_dci);
   }
 }
-
-void fill_oai_dci(mid_t mod_id, uint32_t CC_id, uint32_t rnti,
-		  const Protocol__FlexDlDci *dl_dci) {
+void flexran_fill_oai_dci(mid_t mod_id, uint32_t CC_id, uint32_t rnti,
+		  Protocol__FlexDlDci *dl_dci) {
 
   void         *DLSCH_dci        = NULL;
   DCI_PDU      *DCI_pdu;
 
-  unsigned char         round            = 0;
   unsigned char         harq_pid         = 0;
+  //  unsigned char round = 0;
   LTE_DL_FRAME_PARMS   *frame_parms[MAX_NUM_CCs];
-  int           size_bits, size_bytes;
+  int           size_bits = 0, size_bytes = 0;
   eNB_MAC_INST         *eNB      = &eNB_mac_inst[mod_id];
   UE_list_t            *UE_list  = &eNB->UE_list;
   LTE_eNB_UE_stats *eNB_UE_stats = NULL;
@@ -380,7 +371,7 @@ void fill_oai_dci(mid_t mod_id, uint32_t CC_id, uint32_t rnti,
   uint32_t format;
 
   harq_pid = dl_dci->harq_process;
-  round = dl_dci->rv[0];
+  //  round = dl_dci->rv[0];
   
   // Note this code is for a specific DCI format
   DLSCH_dci = (void *)UE_list->UE_template[CC_id][UE_id].DLSCH_DCI[harq_pid];
diff --git a/openair2/LAYER2/MAC/flexran_agent_scheduler_dlsch_ue.c b/openair2/LAYER2/MAC/flexran_agent_scheduler_dlsch_ue.c
index 8975465e77b11ade441f6da867c9621c4048cbe8..3af246a5d5be3e547ce533b5655ee7266d648c53 100644
--- a/openair2/LAYER2/MAC/flexran_agent_scheduler_dlsch_ue.c
+++ b/openair2/LAYER2/MAC/flexran_agent_scheduler_dlsch_ue.c
@@ -56,6 +56,7 @@
 #include "header.pb-c.h"
 #include "flexran.pb-c.h"
 #include "flexran_agent_mac.h"
+#include <dlfcn.h>
 
 #include "SIMULATION/TOOLS/defs.h" // for taus
 
@@ -65,915 +66,292 @@
 
 #define ENABLE_MAC_PAYLOAD_DEBUG
 
+/**
+ * Local variables to support slicing
+ * 
+ */
 
-//------------------------------------------------------------------------------
-void
-flexran_schedule_ue_spec_default(mid_t   mod_id,
-				 uint32_t      frame,
-				 uint32_t      subframe,
-				 int           *mbsfn_flag,
-				 Protocol__FlexranMessage **dl_info)
-//------------------------------------------------------------------------------
-{
-  uint8_t               CC_id;
-  int                   UE_id;
-  int                   N_RBG[MAX_NUM_CCs];
-  unsigned char         aggregation;
-  mac_rlc_status_resp_t rlc_status;
-  unsigned char         header_len = 0, header_len_tmp = 0, ta_len = 0;
-  uint16_t              nb_rb, nb_rb_temp, total_nb_available_rb[MAX_NUM_CCs], nb_available_rb;
-  uint16_t              TBS, j, rnti, padding=0, post_padding=0;
-  unsigned char         round            = 0;
-  unsigned char         harq_pid         = 0;
-  void                 *DLSCH_dci        = NULL;
-  uint16_t              sdu_length_total = 0;
-  int                   mcs, mcs_tmp;
-  uint16_t              min_rb_unit[MAX_NUM_CCs];
-  eNB_MAC_INST         *eNB      = &eNB_mac_inst[mod_id];
-  /* TODO: Must move the helper structs to scheduler implementation */
-  UE_list_t            *UE_list  = &eNB->UE_list;
-  int32_t                 normalized_rx_power, target_rx_power;
-  int32_t                 tpc = 1;
-  static int32_t          tpc_accumulated=0;
-  UE_sched_ctrl           *ue_sched_ctl;
 
-  Protocol__FlexDlData *dl_data[NUM_MAX_UE];
-  int num_ues_added = 0;
-  int channels_added = 0;
+/*!\brief  UE ULSCH scheduling states*/
+typedef enum {
+  MIN_SLICE_STRATEGY = 0,
+  SLICE_MASK,
+  UEID_TO_SLICEID,
+  MAX_SLICE_STRATEGY
+} SLICING_STRATEGY;
 
-  Protocol__FlexDlDci *dl_dci;
-  Protocol__FlexRlcPdu *rlc_pdus[11];
-  uint32_t *ce_bitmap;
-  Protocol__FlexRlcPdu **rlc_pdu;
-  int num_tb;
-  uint32_t ce_flags = 0;
+// this assumes a max of of 16 UE per eNB/CC
+#define SLICE0_MASK 0x000f 
+#define SLICE1_MASK 0x00f0
+#define SLICE2_MASK 0x0f00
+#define SLICE3_MASK 0xf000
 
-  uint8_t            rballoc_sub[25];
-  int i;
-  uint32_t data_to_request;
-  uint32_t dci_tbs;
-  uint8_t ue_has_transmission = 0;
-  uint32_t ndi;
-  
-  flexran_agent_mac_create_empty_dl_config(mod_id, dl_info);
-  
-  if (UE_list->head==-1) {
-    return;
-  }
-  
-  start_meas(&eNB->schedule_dlsch);
-  VCD_SIGNAL_DUMPER_DUMP_FUNCTION_BY_NAME(VCD_SIGNAL_DUMPER_FUNCTIONS_SCHEDULE_DLSCH,VCD_FUNCTION_IN);
 
-  //weight = get_ue_weight(module_idP,UE_id);
-  aggregation = 2; // set to the maximum aggregation level
+// number of active slices for  past and current time
+int n_active_slices = 1;
+int n_active_slices_current = 1;
 
-  for (CC_id=0; CC_id<MAX_NUM_CCs; CC_id++) {
-    min_rb_unit[CC_id] = get_min_rb_unit(mod_id, CC_id);
-    // get number of PRBs less those used by common channels
-    total_nb_available_rb[CC_id] = flexran_get_N_RB_DL(mod_id, CC_id);
-    for (i=0;i < flexran_get_N_RB_DL(mod_id, CC_id); i++)
-      if (eNB->common_channels[CC_id].vrb_map[i] != 0)
-	total_nb_available_rb[CC_id]--;
-    
-    N_RBG[CC_id] = flexran_get_N_RBG(mod_id, CC_id);
+// ue to slice mapping
+int slicing_strategy = UEID_TO_SLICEID;
+int slicing_strategy_current = UEID_TO_SLICEID;
 
-    // store the global enb stats:
-    eNB->eNB_stats[CC_id].num_dlactive_UEs =  UE_list->num_UEs;
-    eNB->eNB_stats[CC_id].available_prbs =  total_nb_available_rb[CC_id];
-    eNB->eNB_stats[CC_id].total_available_prbs +=  total_nb_available_rb[CC_id];
-    eNB->eNB_stats[CC_id].dlsch_bytes_tx=0;
-    eNB->eNB_stats[CC_id].dlsch_pdus_tx=0;
-  }
+// RB share for each slice for past and current time
+float slice_percentage[MAX_NUM_SLICES] = {1.0, 0.0, 0.0, 0.0};
+float slice_percentage_current[MAX_NUM_SLICES] = {1.0, 0.0, 0.0, 0.0};
+float total_slice_percentage = 0;
 
-   VCD_SIGNAL_DUMPER_DUMP_FUNCTION_BY_NAME(VCD_SIGNAL_DUMPER_FUNCTIONS_DLSCH_PREPROCESSOR,VCD_FUNCTION_IN);
+// MAX MCS for each slice for past and current time
+int slice_maxmcs[MAX_NUM_SLICES] = {28, 28, 28, 28};
+int slice_maxmcs_current[MAX_NUM_SLICES] = {28, 28, 28, 28};
 
-   start_meas(&eNB->schedule_dlsch_preprocessor);
-   _dlsch_scheduler_pre_processor(mod_id,
-				 frame,
-				 subframe,
-				 N_RBG,
-				 mbsfn_flag);
-   stop_meas(&eNB->schedule_dlsch_preprocessor);
-   VCD_SIGNAL_DUMPER_DUMP_FUNCTION_BY_NAME(VCD_SIGNAL_DUMPER_FUNCTIONS_DLSCH_PREPROCESSOR,VCD_FUNCTION_OUT);
+int update_dl_scheduler[MAX_NUM_SLICES] = {1, 0, 0, 0};
+int update_dl_scheduler_current[MAX_NUM_SLICES] = {1, 0, 0, 0};
 
-   for (CC_id=0; CC_id<MAX_NUM_CCs; CC_id++) {
-    LOG_D(MAC, "doing schedule_ue_spec for CC_id %d\n",CC_id);
+// name of available scheduler
+char *dl_scheduler_type[MAX_NUM_SLICES] = {"flexran_schedule_ue_spec_embb",
+					   "flexran_schedule_ue_spec_urllc",
+					   "flexran_schedule_ue_spec_mmtc",
+					   "flexran_schedule_ue_spec_be"      // best effort 
+};
 
-    if (mbsfn_flag[CC_id]>0)
-      continue;
+// pointer to the slice specific scheduler 
+slice_scheduler slice_sched[MAX_NUM_SLICES] = {0};
 
-    for (UE_id=UE_list->head; UE_id>=0; UE_id=UE_list->next[UE_id]) {
-      rnti = flexran_get_ue_crnti(mod_id, UE_id);
-      ue_sched_ctl = &UE_list->UE_sched_ctrl[UE_id];
 
-      if (rnti==NOT_A_RNTI) {
-        LOG_D(MAC,"Cannot find rnti for UE_id %d (num_UEs %d)\n", UE_id,UE_list->num_UEs);
-        // mac_xface->macphy_exit("Cannot find rnti for UE_id");
-        continue;
-      }
+/**
+ * preprocessor functions for scheduling
+ *
+ */
 
-      if (flexran_get_ue_crnti(mod_id, UE_id) == NOT_A_RNTI) {
-        LOG_D(MAC,"[eNB] Cannot find UE\n");
-        //  mac_xface->macphy_exit("[MAC][eNB] Cannot find eNB_UE_stats\n");
-        continue;
-      }
 
-      if ((ue_sched_ctl->pre_nb_available_rbs[CC_id] == 0) ||  // no RBs allocated 
-	  CCE_allocation_infeasible(mod_id, CC_id, 0, subframe, aggregation, rnti)) {
-        LOG_D(MAC,"[eNB %d] Frame %d : no RB allocated for UE %d on CC_id %d: continue \n",
-              mod_id, frame, UE_id, CC_id);
-        //if(mac_xface->get_transmission_mode(module_idP,rnti)==5)
-        continue; //to next user (there might be rbs availiable for other UEs in TM5
-        // else
-        //  break;
-      }
+// This function stores the downlink buffer for all the logical channels
+void _store_dlsch_buffer (module_id_t Mod_id,
+			  int         slice_id,
+			  frame_t     frameP,
+			  sub_frame_t subframeP)
+{
 
-      if (flexran_get_duplex_mode(mod_id, CC_id) == PROTOCOL__FLEX_DUPLEX_MODE__FLDM_TDD)  {
-        set_ue_dai (subframe,
-                    flexran_get_subframe_assignment(mod_id, CC_id),
-                    UE_id,
-                    CC_id,
-                    UE_list);
-        //TODO: update UL DAI after DLSCH scheduling
-        //set_ul_DAI(mod_id, UE_id, CC_id, frame, subframe,frame_parms);
-      }
+  int                   UE_id,i;
+  rnti_t                rnti;
+  mac_rlc_status_resp_t rlc_status;
+  UE_list_t             *UE_list = &eNB_mac_inst[Mod_id].UE_list;
+  UE_TEMPLATE           *UE_template;
 
-      channels_added = 0;
+  for (UE_id=UE_list->head; UE_id>=0; UE_id=UE_list->next[UE_id]) {
+ 
+    if (flexran_slice_member(UE_id, slice_id) == 0)
+      continue;
+    
+    UE_template = &UE_list->UE_template[UE_PCCID(Mod_id,UE_id)][UE_id];
 
-      // After this point all the UEs will be scheduled
-      dl_data[num_ues_added] = (Protocol__FlexDlData *) malloc(sizeof(Protocol__FlexDlData));
-      protocol__flex_dl_data__init(dl_data[num_ues_added]);
-      dl_data[num_ues_added]->has_rnti = 1;
-      dl_data[num_ues_added]->rnti = rnti;
-      dl_data[num_ues_added]->n_rlc_pdu = 0;
-      dl_data[num_ues_added]->has_serv_cell_index = 1;
-      dl_data[num_ues_added]->serv_cell_index = CC_id;
-      
-      nb_available_rb = ue_sched_ctl->pre_nb_available_rbs[CC_id];
-      flexran_get_harq(mod_id, CC_id, UE_id, frame, subframe, &harq_pid, &round);
-      sdu_length_total=0;
-      mcs = cqi_to_mcs[flexran_get_ue_wcqi(mod_id, UE_id)];
+    // clear logical channel interface variables
+    UE_template->dl_buffer_total = 0;
+    UE_template->dl_pdus_total = 0;
 
-#ifdef EXMIMO
+    for(i=0; i< MAX_NUM_LCID; i++) {
+      UE_template->dl_buffer_info[i]=0;
+      UE_template->dl_pdus_in_buffer[i]=0;
+      UE_template->dl_buffer_head_sdu_creation_time[i]=0;
+      UE_template->dl_buffer_head_sdu_remaining_size_to_send[i]=0;
+    }
 
-       if (mac_xface->get_transmission_mode(mod_id, CC_id, rnti) == 5) {
-	  mcs = cqi_to_mcs[flexran_get_ue_wcqi(mod_id, UE_id)];
-	  mcs =  cmin(mcs,16);
-       }
+    rnti = UE_RNTI(Mod_id,UE_id);
 
-#endif
+    for(i=0; i< MAX_NUM_LCID; i++) { // loop over all the logical channels
 
-      // initializing the rb allocation indicator for each UE
-       for(j = 0; j < flexran_get_N_RBG(mod_id, CC_id); j++) {
-        UE_list->UE_template[CC_id][UE_id].rballoc_subband[harq_pid][j] = 0;
-	rballoc_sub[j] = 0;
-      }
+      rlc_status = mac_rlc_status_ind(Mod_id,rnti, Mod_id,frameP,ENB_FLAG_YES,MBMS_FLAG_NO,i,0 );
+      UE_template->dl_buffer_info[i] = rlc_status.bytes_in_buffer; //storing the dlsch buffer for each logical channel
+      UE_template->dl_pdus_in_buffer[i] = rlc_status.pdus_in_buffer;
+      UE_template->dl_buffer_head_sdu_creation_time[i] = rlc_status.head_sdu_creation_time ;
+      UE_template->dl_buffer_head_sdu_creation_time_max = cmax(UE_template->dl_buffer_head_sdu_creation_time_max,
+          rlc_status.head_sdu_creation_time );
+      UE_template->dl_buffer_head_sdu_remaining_size_to_send[i] = rlc_status.head_sdu_remaining_size_to_send;
+      UE_template->dl_buffer_head_sdu_is_segmented[i] = rlc_status.head_sdu_is_segmented;
+      UE_template->dl_buffer_total += UE_template->dl_buffer_info[i];//storing the total dlsch buffer
+      UE_template->dl_pdus_total   += UE_template->dl_pdus_in_buffer[i];
 
-      /* LOG_D(MAC,"[eNB %d] Frame %d: Scheduling UE %d on CC_id %d (rnti %x, harq_pid %d, round %d, rb %d, cqi %d, mcs %d, rrc %d)\n", */
-      /*       mod_id, frame, UE_id, CC_id, rnti, harq_pid, round, nb_available_rb, */
-      /*       eNB_UE_stats->DL_cqi[0], mcs, */
-      /*       UE_list->eNB_UE_stats[CC_id][UE_id].rrc_status); */
+#ifdef DEBUG_eNB_SCHEDULER
 
-      dl_dci = (Protocol__FlexDlDci*) malloc(sizeof(Protocol__FlexDlDci));
-      protocol__flex_dl_dci__init(dl_dci);
-      dl_data[num_ues_added]->dl_dci = dl_dci;
+      /* note for dl_buffer_head_sdu_remaining_size_to_send[i] :
+       * 0 if head SDU has not been segmented (yet), else remaining size not already segmented and sent
+       */
+      if (UE_template->dl_buffer_info[i]>0)
+        LOG_D(MAC,
+              "[eNB %d][SLICE %d] Frame %d Subframe %d : RLC status for UE %d in LCID%d: total of %d pdus and size %d, head sdu queuing time %d, remaining size %d, is segmeneted %d \n",
+              Mod_id, slice_id,frameP, subframeP, UE_id,
+              i, UE_template->dl_pdus_in_buffer[i],UE_template->dl_buffer_info[i],
+              UE_template->dl_buffer_head_sdu_creation_time[i],
+              UE_template->dl_buffer_head_sdu_remaining_size_to_send[i],
+              UE_template->dl_buffer_head_sdu_is_segmented[i]
+             );
 
-      
-      dl_dci->has_rnti = 1;
-      dl_dci->rnti = rnti;
-      dl_dci->has_harq_process = 1;
-      dl_dci->harq_process = harq_pid;
-      
-      /* process retransmission  */
+#endif
 
-      if (round > 0) {
+    }
 
-	if (flexran_get_duplex_mode(mod_id, CC_id) == PROTOCOL__FLEX_DUPLEX_MODE__FLDM_TDD) {
-	  UE_list->UE_template[CC_id][UE_id].DAI++;
-	  update_ul_dci(mod_id, CC_id, rnti, UE_list->UE_template[CC_id][UE_id].DAI);
-	  LOG_D(MAC,"DAI update: CC_id %d subframeP %d: UE %d, DAI %d\n",
-		CC_id, subframe,UE_id,UE_list->UE_template[CC_id][UE_id].DAI);
-	}
+    //#ifdef DEBUG_eNB_SCHEDULER
+    if ( UE_template->dl_buffer_total>0)
+      LOG_D(MAC,"[eNB %d] Frame %d Subframe %d : RLC status for UE %d : total DL buffer size %d and total number of pdu %d \n",
+            Mod_id, frameP, subframeP, UE_id,
+            UE_template->dl_buffer_total,
+            UE_template->dl_pdus_total
+           );
 
-	mcs = UE_list->UE_template[CC_id][UE_id].mcs[harq_pid];
+    //#endif
+  }
+}
 
-	  // get freq_allocation
-	nb_rb = UE_list->UE_template[CC_id][UE_id].nb_rb[harq_pid];
-	  
-	/*TODO: Must add this to FlexRAN agent API */
-	dci_tbs = mac_xface->get_TBS_DL(mcs, nb_rb);
 
-	if (nb_rb <= nb_available_rb) {
-	  
-	  if(nb_rb == ue_sched_ctl->pre_nb_available_rbs[CC_id]) {
-	    for(j = 0; j < flexran_get_N_RBG(mod_id, CC_id); j++) { // for indicating the rballoc for each sub-band
-	      UE_list->UE_template[CC_id][UE_id].rballoc_subband[harq_pid][j] = ue_sched_ctl->rballoc_sub_UE[CC_id][j];
-            }
-	  } else {
-	    nb_rb_temp = nb_rb;
-	    j = 0;
+// This function returns the estimated number of RBs required by each UE for downlink scheduling
+void _assign_rbs_required (module_id_t Mod_id,
+			   int         slice_id,
+			   frame_t     frameP,
+			   sub_frame_t subframe,
+			   uint16_t    nb_rbs_required[MAX_NUM_CCs][NUMBER_OF_UE_MAX],
+			   uint16_t    nb_rbs_allowed_slice[MAX_NUM_CCs][MAX_NUM_SLICES],
+			   int         min_rb_unit[MAX_NUM_CCs])
+{
 
-	    while((nb_rb_temp > 0) && (j < flexran_get_N_RBG(mod_id, CC_id))) {
-	      if(ue_sched_ctl->rballoc_sub_UE[CC_id][j] == 1) {
-		UE_list->UE_template[CC_id][UE_id].rballoc_subband[harq_pid][j] = ue_sched_ctl->rballoc_sub_UE[CC_id][j];
-		
-		if((j == flexran_get_N_RBG(mod_id, CC_id) - 1) &&
-		   ((flexran_get_N_RB_DL(mod_id, CC_id) == 25)||
-		    (flexran_get_N_RB_DL(mod_id, CC_id) == 50))) {
-		  nb_rb_temp = nb_rb_temp - min_rb_unit[CC_id]+1;
-		} else {
-		  nb_rb_temp = nb_rb_temp - min_rb_unit[CC_id];
-		}
-	      }
-	      j = j + 1;
-	    }
-	  }
 
-	  nb_available_rb -= nb_rb;
-	  aggregation = process_ue_cqi(mod_id, UE_id);
-	  
-	  PHY_vars_eNB_g[mod_id][CC_id]->mu_mimo_mode[UE_id].pre_nb_available_rbs = nb_rb;
-	  PHY_vars_eNB_g[mod_id][CC_id]->mu_mimo_mode[UE_id].dl_pow_off = ue_sched_ctl->dl_pow_off[CC_id];
-	  
-	  for(j=0; j < flexran_get_N_RBG(mod_id, CC_id); j++) {
-	    PHY_vars_eNB_g[mod_id][CC_id]->mu_mimo_mode[UE_id].rballoc_sub[j] = UE_list->UE_template[CC_id][UE_id].rballoc_subband[harq_pid][j];
-	    rballoc_sub[j] = UE_list->UE_template[CC_id][UE_id].rballoc_subband[harq_pid][j];
-	  }
+  rnti_t           rnti;
+  uint16_t         TBS = 0;
+  LTE_eNB_UE_stats *eNB_UE_stats[MAX_NUM_CCs];
+  int              UE_id,n,i,j,CC_id,pCCid,tmp;
+  UE_list_t        *UE_list = &eNB_mac_inst[Mod_id].UE_list;
+  //  UE_TEMPLATE           *UE_template;
 
-	  // Keep the old NDI, do not toggle
-	  ndi = UE_list->UE_template[CC_id][UE_id].oldNDI[harq_pid];
-	  tpc = UE_list->UE_template[CC_id][UE_id].oldTPC[harq_pid];
-	  UE_list->UE_template[CC_id][UE_id].mcs[harq_pid] = mcs;
+  // clear rb allocations across all CC_ids
+  for (UE_id=UE_list->head; UE_id>=0; UE_id=UE_list->next[UE_id]) {
+    
+    if (flexran_slice_member(UE_id, slice_id) == 0)
+      continue;
+    
+    pCCid = UE_PCCID(Mod_id,UE_id);
+    rnti = UE_list->UE_template[pCCid][UE_id].rnti;
 
-	  ue_has_transmission = 1;
-	  num_ues_added++;
-	} else {
-	  LOG_D(MAC,"[eNB %d] Frame %d CC_id %d : don't schedule UE %d, its retransmission takes more resources than we have\n",
-                mod_id, frame, CC_id, UE_id);
-	  ue_has_transmission = 0;
-	}
-	//End of retransmission
-      } else { /* This is a potentially new SDU opportunity */
-	rlc_status.bytes_in_buffer = 0;
-        // Now check RLC information to compute number of required RBs
-        // get maximum TBS size for RLC request
-        //TBS = mac_xface->get_TBS(eNB_UE_stats->DL_cqi[0]<<1,nb_available_rb);
-        TBS = mac_xface->get_TBS_DL(mcs, nb_available_rb);
-	dci_tbs = TBS;
+    //update CQI information across component carriers
+    for (n=0; n<UE_list->numactiveCCs[UE_id]; n++) {
+      CC_id = UE_list->ordered_CCids[n][UE_id];
+      eNB_UE_stats[CC_id] = mac_xface->get_eNB_UE_stats(Mod_id,CC_id,rnti);
+      eNB_UE_stats[CC_id]->dlsch_mcs1=cqi_to_mcs[flexran_get_ue_wcqi(Mod_id, UE_id)];
+    }
 
-        // check first for RLC data on DCCH
-        // add the length for  all the control elements (timing adv, drx, etc) : header + payload
+    // provide the list of CCs sorted according to MCS
+    for (i=0; i<UE_list->numactiveCCs[UE_id]; i++) {
+      for (j=i+1; j<UE_list->numactiveCCs[UE_id]; j++) {
+        DevAssert( j < MAX_NUM_CCs );
 
-	ta_len = (ue_sched_ctl->ta_update!=0) ? 2 : 0;
-	
-	dl_data[num_ues_added]->n_ce_bitmap = 2;
-	dl_data[num_ues_added]->ce_bitmap = (uint32_t *) malloc(sizeof(uint32_t) * 2);
-	
-	if (ta_len > 0) {
-	  ce_flags |= PROTOCOL__FLEX_CE_TYPE__FLPCET_TA;
-	}
+        if (eNB_UE_stats[UE_list->ordered_CCids[i][UE_id]]->dlsch_mcs1 >
+            eNB_UE_stats[UE_list->ordered_CCids[j][UE_id]]->dlsch_mcs1) {
+          tmp = UE_list->ordered_CCids[i][UE_id];
+          UE_list->ordered_CCids[i][UE_id] = UE_list->ordered_CCids[j][UE_id];
+          UE_list->ordered_CCids[j][UE_id] = tmp;
+        }
+      }
+    }
 
-	/*TODO: Add other flags if DRX and other CE are required*/
-	
-	// Add the control element flags to the flexran message
-	dl_data[num_ues_added]->ce_bitmap[0] = ce_flags;
-	dl_data[num_ues_added]->ce_bitmap[1] = ce_flags;
+    /* NN --> RK
+     * check the index of UE_template"
+     */
+    if (UE_list->UE_template[pCCid][UE_id].dl_buffer_total> 0) {
+      LOG_D(MAC,"[preprocessor] assign RB for UE %d\n",UE_id);
 
-	// TODO : Need to prioritize DRBs
-	// Loop through the UE logical channels (DCCH, DCCH1, DTCH for now)
-	for (j = 1; j < NB_RB_MAX; j++) {
-	  header_len+=3;
+      for (i=0; i<UE_list->numactiveCCs[UE_id]; i++) {
+        CC_id = UE_list->ordered_CCids[i][UE_id];
+	eNB_UE_stats[CC_id] = mac_xface->get_eNB_UE_stats(Mod_id,CC_id,rnti);
 
-	  // Need to see if we have space for data from this channel
-	  if (dci_tbs - ta_len - header_len - sdu_length_total > 0) {
-	     LOG_D(MAC, "[TEST]Requested %d bytes from RLC buffer on channel %d during first call\n", dci_tbs-ta_len-header_len);
-	     //If we have space, we need to see how much data we can request at most (if any available)
-	     rlc_status = mac_rlc_status_ind(mod_id,
-					     rnti,
-					     mod_id,
-					     frame,
-					     ENB_FLAG_YES,
-					     MBMS_FLAG_NO,
-					     j,
-					     (dci_tbs-ta_len-header_len)); // transport block set size
+        if (eNB_UE_stats[CC_id]->dlsch_mcs1==0) {
+          nb_rbs_required[CC_id][UE_id] = 4;  // don't let the TBS get too small
+        } else {
+          nb_rbs_required[CC_id][UE_id] = min_rb_unit[CC_id];
+        }
 
-	     //If data are available in channel j
-	     if (rlc_status.bytes_in_buffer > 0) {
-	       LOG_D(MAC, "[TEST]Have %d bytes in DCCH buffer during first call\n", rlc_status.bytes_in_buffer);
-	       //Fill in as much as possible
-	       data_to_request = cmin(dci_tbs-ta_len-header_len, rlc_status.bytes_in_buffer);
-	       if (data_to_request < 128) { //The header will be one byte less
-		 header_len--;
-	       }
-	       /* if (j == 1 || j == 2) { */
-	       /*  data_to_request+=0; 
-	       /* } */
-	       LOG_D(MAC, "[TEST]Will request %d from channel %d\n", data_to_request, j);
-	       rlc_pdus[channels_added] = (Protocol__FlexRlcPdu *) malloc(sizeof(Protocol__FlexRlcPdu));
-	       protocol__flex_rlc_pdu__init(rlc_pdus[channels_added]);
-	       rlc_pdus[channels_added]->n_rlc_pdu_tb = 2;
-	       rlc_pdus[channels_added]->rlc_pdu_tb = (Protocol__FlexRlcPduTb **) malloc(sizeof(Protocol__FlexRlcPduTb *) * 2);
-	       rlc_pdus[channels_added]->rlc_pdu_tb[0] = (Protocol__FlexRlcPduTb *) malloc(sizeof(Protocol__FlexRlcPduTb));
-	       protocol__flex_rlc_pdu_tb__init(rlc_pdus[channels_added]->rlc_pdu_tb[0]);
-	       rlc_pdus[channels_added]->rlc_pdu_tb[0]->has_logical_channel_id = 1;
-	       rlc_pdus[channels_added]->rlc_pdu_tb[0]->logical_channel_id = j;
-	       rlc_pdus[channels_added]->rlc_pdu_tb[0]->has_size = 1;
-	       rlc_pdus[channels_added]->rlc_pdu_tb[0]->size = data_to_request;
-	       rlc_pdus[channels_added]->rlc_pdu_tb[1] = (Protocol__FlexRlcPduTb *) malloc(sizeof(Protocol__FlexRlcPduTb));
-	       protocol__flex_rlc_pdu_tb__init(rlc_pdus[channels_added]->rlc_pdu_tb[1]);
-	       rlc_pdus[channels_added]->rlc_pdu_tb[1]->has_logical_channel_id = 1;
-	       rlc_pdus[channels_added]->rlc_pdu_tb[1]->logical_channel_id = j;
-	       rlc_pdus[channels_added]->rlc_pdu_tb[1]->has_size = 1;
-	       rlc_pdus[channels_added]->rlc_pdu_tb[1]->size = data_to_request;
-	       dl_data[num_ues_added]->n_rlc_pdu++;
-	       channels_added++;
-	       //Set this to the max value that we might request
-	       sdu_length_total += data_to_request;
-	     } else {
-	       //Take back the assumption of a header for this channel
-	       header_len -= 3;
-	     } //End rlc_status.bytes_in_buffer <= 0
-	  } //end of if dci_tbs - ta_len - header_len > 0
-	} // End of iterating the logical channels
-	
-	// Add rlc_pdus to the dl_data message
-	dl_data[num_ues_added]->rlc_pdu = (Protocol__FlexRlcPdu **) malloc(sizeof(Protocol__FlexRlcPdu *) *
-									  dl_data[num_ues_added]->n_rlc_pdu);
-	for (i = 0; i < dl_data[num_ues_added]->n_rlc_pdu; i++) {
-	  dl_data[num_ues_added]->rlc_pdu[i] = rlc_pdus[i];
-	}
-	
-	// there is a payload
-        if (( dl_data[num_ues_added]->n_rlc_pdu > 0)) {
-	  // Now compute number of required RBs for total sdu length
-          // Assume RAH format 2
-          // adjust  header lengths
-	  header_len_tmp = header_len;
+        TBS = mac_xface->get_TBS_DL(eNB_UE_stats[CC_id]->dlsch_mcs1,nb_rbs_required[CC_id][UE_id]);
+	nb_rbs_allowed_slice[CC_id][slice_id] = flexran_nb_rbs_allowed_slice(slice_percentage[slice_id],
+									     flexran_get_N_RB_DL(Mod_id, CC_id));
+        LOG_D(MAC,"[preprocessor] start RB assignement for UE %d CC_id %d dl buffer %d (RB unit %d, MCS %d, TBS %d) \n",
+              UE_id, CC_id, UE_list->UE_template[pCCid][UE_id].dl_buffer_total,
+              nb_rbs_required[CC_id][UE_id],eNB_UE_stats[CC_id]->dlsch_mcs1,TBS);
 
-	  if (header_len == 2 || header_len == 3) { //Only one SDU, remove length field
-	    header_len = 1;
-	  } else { //Remove length field from the last SDU
-	    header_len--;
-	  }
+        /* calculating required number of RBs for each UE */
+        while (TBS < UE_list->UE_template[pCCid][UE_id].dl_buffer_total)  {
+          nb_rbs_required[CC_id][UE_id] += min_rb_unit[CC_id];
 
-	  mcs_tmp = mcs;
-	  if (mcs_tmp == 0) {
-            nb_rb = 4;  // don't let the TBS get too small
-          } else {
-            nb_rb=min_rb_unit[CC_id];
+          if (nb_rbs_required[CC_id][UE_id] > nb_rbs_allowed_slice[CC_id][slice_id]) {
+            TBS = mac_xface->get_TBS_DL(eNB_UE_stats[CC_id]->dlsch_mcs1, nb_rbs_allowed_slice[CC_id][slice_id]);
+            nb_rbs_required[CC_id][UE_id] = nb_rbs_allowed_slice[CC_id][slice_id];
+            break;
           }
 
-	  LOG_D(MAC,"[TEST]The initial number of resource blocks was %d\n", nb_rb);
-	  LOG_D(MAC,"[TEST] The initial mcs was %d\n", mcs_tmp);
+          TBS = mac_xface->get_TBS_DL(eNB_UE_stats[CC_id]->dlsch_mcs1,nb_rbs_required[CC_id][UE_id]);
+        } // end of while
 
-	  TBS = mac_xface->get_TBS_DL(mcs_tmp, nb_rb);
-	  LOG_D(MAC,"[TEST]The TBS during rate matching was %d\n", TBS);
+        LOG_D(MAC,"[eNB %d][SLICE %d] Frame %d: UE %d on CC %d: RB unit %d,  nb_required RB %d (TBS %d, mcs %d)\n",
+              Mod_id, slice_id,frameP,UE_id, CC_id,  min_rb_unit[CC_id], nb_rbs_required[CC_id][UE_id], TBS, eNB_UE_stats[CC_id]->dlsch_mcs1);
+      }
+    }
+  }
+}
 
-	  while (TBS < (sdu_length_total + header_len + ta_len))  {
-            nb_rb += min_rb_unit[CC_id];  //
-	    LOG_D(MAC, "[TEST]Had to increase the number of RBs\n");
-            if (nb_rb > nb_available_rb) { // if we've gone beyond the maximum number of RBs
-              // (can happen if N_RB_DL is odd)
-              TBS = mac_xface->get_TBS_DL(mcs_tmp, nb_available_rb);
-              nb_rb = nb_available_rb;
-              break;
-            }
+// This function scans all CC_ids for a particular UE to find the maximum round index of its HARQ processes
+int _maxround(module_id_t Mod_id,uint16_t rnti,int frame,sub_frame_t subframe,uint8_t ul_flag )
+{
 
-            TBS = mac_xface->get_TBS_DL(mcs_tmp, nb_rb);
-          }
+  uint8_t round,round_max=0,UE_id;
+  int CC_id;
+  UE_list_t *UE_list = &eNB_mac_inst[Mod_id].UE_list;
 
-	  if(nb_rb == ue_sched_ctl->pre_nb_available_rbs[CC_id]) {
-	    LOG_D(MAC, "[TEST]We had the exact number of rbs. Time to fill the rballoc subband\n");
-            for(j = 0; j < flexran_get_N_RBG(mod_id, CC_id); j++) { // for indicating the rballoc for each sub-band
-              UE_list->UE_template[CC_id][UE_id].rballoc_subband[harq_pid][j] = ue_sched_ctl->rballoc_sub_UE[CC_id][j];
-            }
-          } else {
-	    nb_rb_temp = nb_rb;
-            j = 0;
-	    LOG_D(MAC, "[TEST]Will only partially fill the bitmap\n");
-	    while((nb_rb_temp > 0) && (j < flexran_get_N_RBG(mod_id, CC_id))) {
-              if(ue_sched_ctl->rballoc_sub_UE[CC_id][j] == 1) {
-                UE_list->UE_template[CC_id][UE_id].rballoc_subband[harq_pid][j] = ue_sched_ctl->rballoc_sub_UE[CC_id][j];
-                if ((j == flexran_get_N_RBG(mod_id, CC_id) - 1) &&
-                    ((flexran_get_N_RB_DL(mod_id, CC_id) == 25)||
-                     (flexran_get_N_RB_DL(mod_id, CC_id) == 50))) {
-                  nb_rb_temp = nb_rb_temp - min_rb_unit[CC_id] + 1;
-                } else {
-                  nb_rb_temp = nb_rb_temp - min_rb_unit[CC_id];
-                }
-              }
-              j = j+1;
-            }
-	  }
-	  
-	  PHY_vars_eNB_g[mod_id][CC_id]->mu_mimo_mode[UE_id].pre_nb_available_rbs = nb_rb;
-          PHY_vars_eNB_g[mod_id][CC_id]->mu_mimo_mode[UE_id].dl_pow_off = ue_sched_ctl->dl_pow_off[CC_id];
+  for (CC_id=0; CC_id<MAX_NUM_CCs; CC_id++) {
 
-	  for(j = 0; j < flexran_get_N_RBG(mod_id, CC_id); j++) {
-            PHY_vars_eNB_g[mod_id][CC_id]->mu_mimo_mode[UE_id].rballoc_sub[j] = UE_list->UE_template[CC_id][UE_id].rballoc_subband[harq_pid][j];
-          }
+    UE_id = find_UE_id(Mod_id,rnti);
+    round    = UE_list->UE_sched_ctrl[UE_id].round[CC_id];
+    if (round > round_max) {
+      round_max = round;
+    }
+  }
 
-	  // decrease mcs until TBS falls below required length
-          while ((TBS > (sdu_length_total + header_len + ta_len)) && (mcs_tmp > 0)) {
-            mcs_tmp--;
-            TBS = mac_xface->get_TBS_DL(mcs_tmp, nb_rb);
-          }
+  return round_max;
+}
 
-	  // if we have decreased too much or we don't have enough RBs, increase MCS
-          while ((TBS < (sdu_length_total + header_len + ta_len)) &&
-		 ((( ue_sched_ctl->dl_pow_off[CC_id] > 0) && (mcs_tmp < 28))											     || ( (ue_sched_ctl->dl_pow_off[CC_id]==0) && (mcs_tmp <= 15)))) {
-            mcs_tmp++;
-            TBS = mac_xface->get_TBS_DL(mcs_tmp, nb_rb);
-          }
+// This function scans all CC_ids for a particular UE to find the maximum DL CQI
+int _maxcqi(module_id_t Mod_id,int32_t UE_id)
+{
 
-	  dci_tbs = TBS;
-	  mcs = mcs_tmp;
+  LTE_eNB_UE_stats *eNB_UE_stats = NULL;
+  UE_list_t *UE_list = &eNB_mac_inst[Mod_id].UE_list;
+  int CC_id,n;
+  int CQI = 0;
 
-	  aggregation = process_ue_cqi(mod_id,UE_id);
-	  dl_dci->has_aggr_level = 1;
-	  dl_dci->aggr_level = aggregation;
-	  
-          UE_list->UE_template[CC_id][UE_id].nb_rb[harq_pid] = nb_rb;
+  for (n=0; n<UE_list->numactiveCCs[UE_id]; n++) {
+    CC_id = UE_list->ordered_CCids[n][UE_id];
+    eNB_UE_stats = mac_xface->get_eNB_UE_stats(Mod_id,CC_id,UE_RNTI(Mod_id,UE_id));
 
-	  if (flexran_get_duplex_mode(mod_id, CC_id) == PROTOCOL__FLEX_DUPLEX_MODE__FLDM_TDD) {
-            UE_list->UE_template[CC_id][UE_id].DAI++;
-            //  printf("DAI update: subframeP %d: UE %d, DAI %d\n",subframeP,UE_id,UE_list->UE_template[CC_id][UE_id].DAI);
-	    //#warning only for 5MHz channel
-            update_ul_dci(mod_id, CC_id, rnti, UE_list->UE_template[CC_id][UE_id].DAI);
-          }
+    if (eNB_UE_stats==NULL) {
+      mac_xface->macphy_exit("maxcqi: could not get eNB_UE_stats\n");
+      return 0; // not reached
+    }
 
-	  // do PUCCH power control
-          // this is the normalized RX power
-	  normalized_rx_power = flexran_get_p0_pucch_dbm(mod_id,UE_id, CC_id); //eNB_UE_stats->Po_PUCCH_dBm; 
-	  target_rx_power = flexran_get_p0_nominal_pucch(mod_id, CC_id) + 10; //mac_xface->get_target_pucch_rx_power(mod_id, CC_id) + 10;
+    if (eNB_UE_stats->DL_cqi[0] > CQI) {
+      CQI = eNB_UE_stats->DL_cqi[0];
+    }
+  }
 
-	  // this assumes accumulated tpc
-	  // make sure that we are only sending a tpc update once a frame, otherwise the control loop will freak out
-	  int32_t framex10psubframe = UE_list->UE_template[CC_id][UE_id].pucch_tpc_tx_frame*10+UE_list->UE_template[CC_id][UE_id].pucch_tpc_tx_subframe;
+  return(CQI);
+}
 
-	  if (((framex10psubframe+10)<=(frame*10+subframe)) || //normal case
-	      ((framex10psubframe>(frame*10+subframe)) && (((10240-framex10psubframe+frame*10+subframe)>=10)))) //frame wrap-around
-	    if (flexran_get_p0_pucch_status(mod_id, UE_id, CC_id) == 1) {
-	      flexran_update_p0_pucch(mod_id, UE_id, CC_id);
-	      
-	      UE_list->UE_template[CC_id][UE_id].pucch_tpc_tx_frame = frame;
-	      UE_list->UE_template[CC_id][UE_id].pucch_tpc_tx_subframe = subframe;
-	      if (normalized_rx_power>(target_rx_power+1)) {
-		tpc = 0; //-1
-		tpc_accumulated--;
-	      } else if (normalized_rx_power<(target_rx_power-1)) {
-		tpc = 2; //+1
-		tpc_accumulated++;
-	      } else {
-		tpc = 1; //0
-	      }
-	      LOG_D(MAC,"[eNB %d] DLSCH scheduler: frame %d, subframe %d, harq_pid %d, tpc %d, accumulated %d, normalized/target rx power %d/%d\n",
-		    mod_id, frame, subframe, harq_pid, tpc,
-		    tpc_accumulated, normalized_rx_power, target_rx_power);
-	    } // Po_PUCCH has been updated 
-	    else {
-	      tpc = 1; //0
-	    } // time to do TPC update 
-	  else {
-	    tpc = 1; //0
-	  }
 
-	  for(i=0; i<PHY_vars_eNB_g[mod_id][CC_id]->frame_parms.N_RBG; i++) {
-	    rballoc_sub[i] = UE_list->UE_template[CC_id][UE_id].rballoc_subband[harq_pid][i];
-          }	
+// This fuction sorts the UE in order their dlsch buffer and CQI
+void _sort_UEs (module_id_t Mod_idP,
+               int         frameP,
+               sub_frame_t subframeP)
+{
 
-	   // Toggle NDI
-          LOG_D(MAC,"CC_id %d Frame %d, subframeP %d: Toggling Format1 NDI for UE %d (rnti %x/%d) oldNDI %d\n",
-                CC_id, frame, subframe, UE_id,
-                UE_list->UE_template[CC_id][UE_id].rnti,harq_pid, UE_list->UE_template[CC_id][UE_id].oldNDI[harq_pid]);
-          UE_list->UE_template[CC_id][UE_id].oldNDI[harq_pid]= 1 - UE_list->UE_template[CC_id][UE_id].oldNDI[harq_pid];
-	  ndi =  UE_list->UE_template[CC_id][UE_id].oldNDI[harq_pid];
-	  
-	  UE_list->UE_template[CC_id][UE_id].mcs[harq_pid] = mcs;
-	  UE_list->UE_template[CC_id][UE_id].oldTPC[harq_pid] = tpc;
 
-	  // Increase the pointer for the number of scheduled UEs
-	  num_ues_added++;
-	  ue_has_transmission = 1;
-	}  else { // There is no data from RLC or MAC header, so don't schedule
-	  ue_has_transmission = 0;
-	}
-      } // End of new scheduling
-      
-      // If we has transmission or retransmission
-      if (ue_has_transmission) {
-	switch (mac_xface->get_transmission_mode(mod_id, CC_id, rnti)) {
-	case 1:
-	case 2:
-	default:
-	  dl_dci->has_res_alloc = 1;
-	  dl_dci->res_alloc = 0;
-	  dl_dci->has_vrb_format = 1;
-	  dl_dci->vrb_format = PROTOCOL__FLEX_VRB_FORMAT__FLVRBF_LOCALIZED;
-	  dl_dci->has_format = 1;
-	  dl_dci->format = PROTOCOL__FLEX_DCI_FORMAT__FLDCIF_1;
-	  dl_dci->has_rb_bitmap = 1;
-	  dl_dci->rb_bitmap = allocate_prbs_sub(nb_rb, rballoc_sub);
-	  dl_dci->has_rb_shift = 1;
-	  dl_dci->rb_shift = 0;
-	  dl_dci->n_ndi = 1;
-	  dl_dci->ndi = (uint32_t *) malloc(sizeof(uint32_t) * dl_dci->n_ndi);
-	  dl_dci->ndi[0] = ndi;
-	  dl_dci->n_rv = 1;
-	  dl_dci->rv = (uint32_t *) malloc(sizeof(uint32_t) * dl_dci->n_rv);
-	  dl_dci->rv[0] = round & 3;
-	  dl_dci->has_tpc = 1;
-	  dl_dci->tpc = tpc;
-	  dl_dci->n_mcs = 1;
-	  dl_dci->mcs = (uint32_t *) malloc(sizeof(uint32_t) * dl_dci->n_mcs);
-	  dl_dci->mcs[0] = mcs;
-	  dl_dci->n_tbs_size = 1;
-	  dl_dci->tbs_size = (uint32_t *) malloc(sizeof(uint32_t) * dl_dci->n_tbs_size);
-	  dl_dci->tbs_size[0] = dci_tbs;
-	  if (flexran_get_duplex_mode(mod_id, CC_id) == PROTOCOL__FLEX_DUPLEX_MODE__FLDM_TDD) {
-	    dl_dci->has_dai = 1;
-	    dl_dci->dai = (UE_list->UE_template[CC_id][UE_id].DAI-1)&3;
-	  }
-	  break;
-	case 3:
-	  dl_dci->has_res_alloc = 1;
-	  dl_dci->res_alloc = 0;
-	  dl_dci->has_vrb_format = 1;
-	  dl_dci->vrb_format = PROTOCOL__FLEX_VRB_FORMAT__FLVRBF_LOCALIZED;
-	  dl_dci->has_format = 1;
-	  dl_dci->format = PROTOCOL__FLEX_DCI_FORMAT__FLDCIF_2A;
-	  dl_dci->has_rb_bitmap = 1;
-	  dl_dci->rb_bitmap = allocate_prbs_sub(nb_rb, rballoc_sub);
-	  dl_dci->has_rb_shift = 1;
-	  dl_dci->rb_shift = 0;
-	  dl_dci->n_ndi = 2;
-	  dl_dci->ndi = (uint32_t *) malloc(sizeof(uint32_t) * dl_dci->n_ndi);
-	  dl_dci->ndi[0] = ndi;
-	  dl_dci->ndi[1] = ndi;
-	  dl_dci->n_rv = 2;
-	  dl_dci->rv = (uint32_t *) malloc(sizeof(uint32_t) * dl_dci->n_rv);
-	  dl_dci->rv[0] = round & 3;
-	  dl_dci->rv[1] = round & 3;
-	  dl_dci->has_tpc = 1;
-	  dl_dci->tpc = tpc;
-	  dl_dci->n_mcs = 2;
-	  dl_dci->mcs = (uint32_t *) malloc(sizeof(uint32_t) * dl_dci->n_mcs);
-	  dl_dci->mcs[0] = mcs;
-	  dl_dci->mcs[1] = mcs;
-	  dl_dci->n_tbs_size = 2;
-	  dl_dci->tbs_size = (uint32_t *) malloc(sizeof(uint32_t) * dl_dci->n_tbs_size);
-	  dl_dci->tbs_size[0] = dci_tbs;
-	  dl_dci->tbs_size[1] = dci_tbs;
-	  if (flexran_get_duplex_mode(mod_id, CC_id) == PROTOCOL__FLEX_DUPLEX_MODE__FLDM_TDD) {
-	    dl_dci->has_dai = 1;
-	    dl_dci->dai = (UE_list->UE_template[CC_id][UE_id].DAI-1)&3;
-	  }
-	  break;
-	case 4:
-	  dl_dci->has_res_alloc = 1;
-	  dl_dci->res_alloc = 0;
-	  dl_dci->has_vrb_format = 1;
-	  dl_dci->vrb_format = PROTOCOL__FLEX_VRB_FORMAT__FLVRBF_LOCALIZED;
-	  dl_dci->has_format = 1;
-	  dl_dci->format = PROTOCOL__FLEX_DCI_FORMAT__FLDCIF_2A;
-	  dl_dci->has_rb_bitmap = 1;
-	  dl_dci->rb_bitmap = allocate_prbs_sub(nb_rb, rballoc_sub);
-	  dl_dci->has_rb_shift = 1;
-	  dl_dci->rb_shift = 0;
-	  dl_dci->n_ndi = 2;
-	  dl_dci->ndi = (uint32_t *) malloc(sizeof(uint32_t) * dl_dci->n_ndi);
-	  dl_dci->ndi[0] = ndi;
-	  dl_dci->ndi[1] = ndi;
-	  dl_dci->n_rv = 2;
-	  dl_dci->rv = (uint32_t *) malloc(sizeof(uint32_t) * dl_dci->n_rv);
-	  dl_dci->rv[0] = round & 3;
-	  dl_dci->rv[1] = round & 3;
-	  dl_dci->has_tpc = 1;
-	  dl_dci->tpc = tpc;
-	  dl_dci->n_mcs = 2;
-	  dl_dci->mcs = (uint32_t *) malloc(sizeof(uint32_t) * dl_dci->n_mcs);
-	  dl_dci->mcs[0] = mcs;
-	  dl_dci->mcs[1] = mcs;
-	  dl_dci->n_tbs_size = 2;
-	  dl_dci->tbs_size = (uint32_t *) malloc(sizeof(uint32_t) * dl_dci->n_tbs_size);
-	  dl_dci->tbs_size[0] = dci_tbs;
-	  dl_dci->tbs_size[1] = dci_tbs;
-	  if (flexran_get_duplex_mode(mod_id, CC_id) == PROTOCOL__FLEX_DUPLEX_MODE__FLDM_TDD) {
-	    dl_dci->has_dai = 1;
-	    dl_dci->dai = (UE_list->UE_template[CC_id][UE_id].DAI-1)&3;
-	  }
-	  break;
-	case 5:
-	  dl_dci->has_res_alloc = 1;
-	  dl_dci->res_alloc = 0;
-	  dl_dci->has_vrb_format = 1;
-	  dl_dci->vrb_format = PROTOCOL__FLEX_VRB_FORMAT__FLVRBF_LOCALIZED;
-	  dl_dci->has_format = 1;
-	  dl_dci->format = PROTOCOL__FLEX_DCI_FORMAT__FLDCIF_1D;
-	  dl_dci->has_rb_bitmap = 1;
-	  dl_dci->rb_bitmap = allocate_prbs_sub(nb_rb, rballoc_sub);
-	  dl_dci->has_rb_shift = 1;
-	  dl_dci->rb_shift = 0;
-	  dl_dci->n_ndi = 1;
-	  dl_dci->ndi = 1;
-	  dl_dci->ndi[0] = ndi;
-	  dl_dci->n_rv = 1;
-	  dl_dci->rv = (uint32_t *) malloc(sizeof(uint32_t) * dl_dci->n_rv);
-	  dl_dci->rv[0] = round & 3;
-	  dl_dci->has_tpc = 1;
-	  dl_dci->tpc = tpc;
-	  dl_dci->n_mcs = 1;
-	  dl_dci->mcs = (uint32_t *) malloc(sizeof(uint32_t) * dl_dci->n_mcs);
-	  dl_dci->mcs[0] = mcs;
-	  dl_dci->n_tbs_size = 1;
-	  dl_dci->tbs_size = (uint32_t *) malloc(sizeof(uint32_t) * dl_dci->n_tbs_size);
-	  dl_dci->tbs_size[0] = dci_tbs;
-	  if (flexran_get_duplex_mode(mod_id, CC_id) == PROTOCOL__FLEX_DUPLEX_MODE__FLDM_TDD) {
-	    dl_dci->has_dai = 1;
-	    dl_dci->dai = (UE_list->UE_template[CC_id][UE_id].DAI-1)&3;
-	  }
-	  
-	  if(ue_sched_ctl->dl_pow_off[CC_id] == 2) {
-	    ue_sched_ctl->dl_pow_off[CC_id] = 1;
-	  }
-	  
-	  dl_dci->has_dl_power_offset = 1;
-	  dl_dci->dl_power_offset = ue_sched_ctl->dl_pow_off[CC_id];
-	  dl_dci->has_precoding_info = 1;
-	  dl_dci->precoding_info = 5; // Is this right??
-	  
-	  break;
-	case 6:
-	  dl_dci->has_res_alloc = 1;
-	  dl_dci->res_alloc = 0;
-	  dl_dci->has_vrb_format = 1;
-	  dl_dci->vrb_format = PROTOCOL__FLEX_VRB_FORMAT__FLVRBF_LOCALIZED;
-	  dl_dci->has_format = 1;
-	  dl_dci->format = PROTOCOL__FLEX_DCI_FORMAT__FLDCIF_1D;
-	  dl_dci->has_rb_bitmap = 1;
-	  dl_dci->rb_bitmap = allocate_prbs_sub(nb_rb, rballoc_sub);
-	  dl_dci->has_rb_shift = 1;
-	  dl_dci->rb_shift = 0;
-	  dl_dci->n_ndi = 1;
-	  dl_dci->ndi = (uint32_t *) malloc(sizeof(uint32_t) * dl_dci->n_ndi);
-	  dl_dci->ndi[0] = ndi;
-	  dl_dci->n_rv = 1;
-	  dl_dci->rv = (uint32_t *) malloc(sizeof(uint32_t) * dl_dci->n_rv);
-	  dl_dci->rv[0] = round & 3;
-	  dl_dci->has_tpc = 1;
-	  dl_dci->tpc = tpc;
-	  dl_dci->n_mcs = 1;
-	  dl_dci->mcs = (uint32_t *) malloc(sizeof(uint32_t) * dl_dci->n_mcs);
-	  dl_dci->mcs[0] = mcs;
-	  if (flexran_get_duplex_mode(mod_id, CC_id) == PROTOCOL__FLEX_DUPLEX_MODE__FLDM_TDD) {
-	    dl_dci->has_dai = 1;
-	    dl_dci->dai = (UE_list->UE_template[CC_id][UE_id].DAI-1)&3;
-	  }
-
-	  dl_dci->has_dl_power_offset = 1;
-	  dl_dci->dl_power_offset = ue_sched_ctl->dl_pow_off[CC_id];
-	  dl_dci->has_precoding_info = 1;
-	  dl_dci->precoding_info = 5; // Is this right??
-	  break;
-	}
-      }
-      
-      if (flexran_get_duplex_mode(mod_id, CC_id) == PROTOCOL__FLEX_DUPLEX_MODE__FLDM_TDD) {
-        
-	/* TODO */
-	//set_ul_DAI(mod_id, UE_id, CC_id, frame, subframe, frame_parms);
-      }
-    } // UE_id loop
-   } // CC_id loop
-
-   // Add all the dl_data elements to the flexran message
-   (*dl_info)->dl_mac_config_msg->n_dl_ue_data = num_ues_added;
-   (*dl_info)->dl_mac_config_msg->dl_ue_data = (Protocol__FlexDlData **) malloc(sizeof(Protocol__FlexDlData *) * num_ues_added);
-   for (i = 0; i < num_ues_added; i++) {
-     (*dl_info)->dl_mac_config_msg->dl_ue_data[i] = dl_data[i];
-   }
-   
-   stop_meas(&eNB->schedule_dlsch);
-   VCD_SIGNAL_DUMPER_DUMP_FUNCTION_BY_NAME(VCD_SIGNAL_DUMPER_FUNCTIONS_SCHEDULE_DLSCH,VCD_FUNCTION_OUT);
-}
-
-// This function stores the downlink buffer for all the logical channels
-void _store_dlsch_buffer (module_id_t Mod_id,
-			  frame_t     frameP,
-			  sub_frame_t subframeP)
-{
-
-  int                   UE_id,i;
-  rnti_t                rnti;
-  mac_rlc_status_resp_t rlc_status;
-  UE_list_t             *UE_list = &eNB_mac_inst[Mod_id].UE_list;
-  UE_TEMPLATE           *UE_template;
-
-  for (UE_id=UE_list->head; UE_id>=0; UE_id=UE_list->next[UE_id]) {
-
-    UE_template = &UE_list->UE_template[UE_PCCID(Mod_id,UE_id)][UE_id];
-
-    // clear logical channel interface variables
-    UE_template->dl_buffer_total = 0;
-    UE_template->dl_pdus_total = 0;
-
-    for(i=0; i< MAX_NUM_LCID; i++) {
-      UE_template->dl_buffer_info[i]=0;
-      UE_template->dl_pdus_in_buffer[i]=0;
-      UE_template->dl_buffer_head_sdu_creation_time[i]=0;
-      UE_template->dl_buffer_head_sdu_remaining_size_to_send[i]=0;
-    }
-
-    rnti = UE_RNTI(Mod_id,UE_id);
-
-    for(i=0; i< MAX_NUM_LCID; i++) { // loop over all the logical channels
-
-      rlc_status = mac_rlc_status_ind(Mod_id,rnti, Mod_id,frameP,ENB_FLAG_YES,MBMS_FLAG_NO,i,0 );
-      UE_template->dl_buffer_info[i] = rlc_status.bytes_in_buffer; //storing the dlsch buffer for each logical channel
-      UE_template->dl_pdus_in_buffer[i] = rlc_status.pdus_in_buffer;
-      UE_template->dl_buffer_head_sdu_creation_time[i] = rlc_status.head_sdu_creation_time ;
-      UE_template->dl_buffer_head_sdu_creation_time_max = cmax(UE_template->dl_buffer_head_sdu_creation_time_max,
-          rlc_status.head_sdu_creation_time );
-      UE_template->dl_buffer_head_sdu_remaining_size_to_send[i] = rlc_status.head_sdu_remaining_size_to_send;
-      UE_template->dl_buffer_head_sdu_is_segmented[i] = rlc_status.head_sdu_is_segmented;
-      UE_template->dl_buffer_total += UE_template->dl_buffer_info[i];//storing the total dlsch buffer
-      UE_template->dl_pdus_total   += UE_template->dl_pdus_in_buffer[i];
-
-#ifdef DEBUG_eNB_SCHEDULER
-
-      /* note for dl_buffer_head_sdu_remaining_size_to_send[i] :
-       * 0 if head SDU has not been segmented (yet), else remaining size not already segmented and sent
-       */
-      if (UE_template->dl_buffer_info[i]>0)
-        LOG_D(MAC,
-              "[eNB %d] Frame %d Subframe %d : RLC status for UE %d in LCID%d: total of %d pdus and size %d, head sdu queuing time %d, remaining size %d, is segmeneted %d \n",
-              Mod_id, frameP, subframeP, UE_id,
-              i, UE_template->dl_pdus_in_buffer[i],UE_template->dl_buffer_info[i],
-              UE_template->dl_buffer_head_sdu_creation_time[i],
-              UE_template->dl_buffer_head_sdu_remaining_size_to_send[i],
-              UE_template->dl_buffer_head_sdu_is_segmented[i]
-             );
-
-#endif
-
-    }
-
-    //#ifdef DEBUG_eNB_SCHEDULER
-    if ( UE_template->dl_buffer_total>0)
-      LOG_D(MAC,"[eNB %d] Frame %d Subframe %d : RLC status for UE %d : total DL buffer size %d and total number of pdu %d \n",
-            Mod_id, frameP, subframeP, UE_id,
-            UE_template->dl_buffer_total,
-            UE_template->dl_pdus_total
-           );
-
-    //#endif
-  }
-}
-
-
-// This function returns the estimated number of RBs required by each UE for downlink scheduling
-void _assign_rbs_required (module_id_t Mod_id,
-                          frame_t     frameP,
-                          sub_frame_t subframe,
-                          uint16_t    nb_rbs_required[MAX_NUM_CCs][NUMBER_OF_UE_MAX],
-                          int         min_rb_unit[MAX_NUM_CCs])
-{
-
-
-  rnti_t           rnti;
-  uint16_t         TBS = 0;
-  LTE_eNB_UE_stats *eNB_UE_stats[MAX_NUM_CCs];
-  int              UE_id,n,i,j,CC_id,pCCid,tmp;
-  UE_list_t        *UE_list = &eNB_mac_inst[Mod_id].UE_list;
-  //  UE_TEMPLATE           *UE_template;
-
-  // clear rb allocations across all CC_ids
-  for (UE_id=UE_list->head; UE_id>=0; UE_id=UE_list->next[UE_id]) {
-    pCCid = UE_PCCID(Mod_id,UE_id);
-    rnti = UE_list->UE_template[pCCid][UE_id].rnti;
-
-    //update CQI information across component carriers
-    for (n=0; n<UE_list->numactiveCCs[UE_id]; n++) {
-      CC_id = UE_list->ordered_CCids[n][UE_id];
-      eNB_UE_stats[CC_id] = mac_xface->get_eNB_UE_stats(Mod_id,CC_id,rnti);
-      eNB_UE_stats[CC_id]->dlsch_mcs1=cqi_to_mcs[flexran_get_ue_wcqi(Mod_id, UE_id)];
-    }
-
-    // provide the list of CCs sorted according to MCS
-    for (i=0; i<UE_list->numactiveCCs[UE_id]; i++) {
-      for (j=i+1; j<UE_list->numactiveCCs[UE_id]; j++) {
-        DevAssert( j < MAX_NUM_CCs );
-
-        if (eNB_UE_stats[UE_list->ordered_CCids[i][UE_id]]->dlsch_mcs1 >
-            eNB_UE_stats[UE_list->ordered_CCids[j][UE_id]]->dlsch_mcs1) {
-          tmp = UE_list->ordered_CCids[i][UE_id];
-          UE_list->ordered_CCids[i][UE_id] = UE_list->ordered_CCids[j][UE_id];
-          UE_list->ordered_CCids[j][UE_id] = tmp;
-        }
-      }
-    }
-
-    /* NN --> RK
-     * check the index of UE_template"
-     */
-    if (UE_list->UE_template[pCCid][UE_id].dl_buffer_total> 0) {
-      LOG_D(MAC,"[preprocessor] assign RB for UE %d\n",UE_id);
-
-      for (i=0; i<UE_list->numactiveCCs[UE_id]; i++) {
-        CC_id = UE_list->ordered_CCids[i][UE_id];
-	eNB_UE_stats[CC_id] = mac_xface->get_eNB_UE_stats(Mod_id,CC_id,rnti);
-
-        if (eNB_UE_stats[CC_id]->dlsch_mcs1==0) {
-          nb_rbs_required[CC_id][UE_id] = 4;  // don't let the TBS get too small
-        } else {
-          nb_rbs_required[CC_id][UE_id] = min_rb_unit[CC_id];
-        }
-
-        TBS = mac_xface->get_TBS_DL(eNB_UE_stats[CC_id]->dlsch_mcs1,nb_rbs_required[CC_id][UE_id]);
-
-        LOG_D(MAC,"[preprocessor] start RB assignement for UE %d CC_id %d dl buffer %d (RB unit %d, MCS %d, TBS %d) \n",
-              UE_id, CC_id, UE_list->UE_template[pCCid][UE_id].dl_buffer_total,
-              nb_rbs_required[CC_id][UE_id],eNB_UE_stats[CC_id]->dlsch_mcs1,TBS);
-
-        /* calculating required number of RBs for each UE */
-        while (TBS < UE_list->UE_template[pCCid][UE_id].dl_buffer_total)  {
-          nb_rbs_required[CC_id][UE_id] += min_rb_unit[CC_id];
-
-          if (nb_rbs_required[CC_id][UE_id] > flexran_get_N_RB_DL(Mod_id, CC_id)) {
-            TBS = mac_xface->get_TBS_DL(eNB_UE_stats[CC_id]->dlsch_mcs1, flexran_get_N_RB_DL(Mod_id, CC_id));
-            nb_rbs_required[CC_id][UE_id] = flexran_get_N_RB_DL(Mod_id, CC_id);
-            break;
-          }
-
-          TBS = mac_xface->get_TBS_DL(eNB_UE_stats[CC_id]->dlsch_mcs1,nb_rbs_required[CC_id][UE_id]);
-        } // end of while
-
-        LOG_D(MAC,"[eNB %d] Frame %d: UE %d on CC %d: RB unit %d,  nb_required RB %d (TBS %d, mcs %d)\n",
-              Mod_id, frameP,UE_id, CC_id,  min_rb_unit[CC_id], nb_rbs_required[CC_id][UE_id], TBS, eNB_UE_stats[CC_id]->dlsch_mcs1);
-      }
-    }
-  }
-}
-
-// This function scans all CC_ids for a particular UE to find the maximum round index of its HARQ processes
-int _maxround(module_id_t Mod_id,uint16_t rnti,int frame,sub_frame_t subframe,uint8_t ul_flag )
-{
-
-  uint8_t round,round_max=0,UE_id;
-  int CC_id;
-  UE_list_t *UE_list = &eNB_mac_inst[Mod_id].UE_list;
-
-  for (CC_id=0; CC_id<MAX_NUM_CCs; CC_id++) {
-
-    UE_id = find_UE_id(Mod_id,rnti);
-    round    = UE_list->UE_sched_ctrl[UE_id].round[CC_id];
-    if (round > round_max) {
-      round_max = round;
-    }
-  }
-
-  return round_max;
-}
-
-// This function scans all CC_ids for a particular UE to find the maximum DL CQI
-int _maxcqi(module_id_t Mod_id,int32_t UE_id)
-{
-
-  LTE_eNB_UE_stats *eNB_UE_stats = NULL;
-  UE_list_t *UE_list = &eNB_mac_inst[Mod_id].UE_list;
-  int CC_id,n;
-  int CQI = 0;
-
-  for (n=0; n<UE_list->numactiveCCs[UE_id]; n++) {
-    CC_id = UE_list->ordered_CCids[n][UE_id];
-    eNB_UE_stats = mac_xface->get_eNB_UE_stats(Mod_id,CC_id,UE_RNTI(Mod_id,UE_id));
-
-    if (eNB_UE_stats==NULL) {
-      mac_xface->macphy_exit("maxcqi: could not get eNB_UE_stats\n");
-      return 0; // not reached
-    }
-
-    if (eNB_UE_stats->DL_cqi[0] > CQI) {
-      CQI = eNB_UE_stats->DL_cqi[0];
-    }
-  }
-
-  return(CQI);
-}
-
-
-// This fuction sorts the UE in order their dlsch buffer and CQI
-void _sort_UEs (module_id_t Mod_idP,
-               int         frameP,
-               sub_frame_t subframeP)
-{
-
-
-  int               UE_id1,UE_id2;
-  int               pCC_id1,pCC_id2;
-  int               cqi1,cqi2,round1,round2;
-  int               i=0,ii=0;//,j=0;
-  rnti_t            rnti1,rnti2;
+  int               UE_id1,UE_id2;
+  int               pCC_id1,pCC_id2;
+  int               cqi1,cqi2,round1,round2;
+  int               i=0,ii=0;//,j=0;
+  rnti_t            rnti1,rnti2;
 
   UE_list_t *UE_list = &eNB_mac_inst[Mod_idP].UE_list;
 
@@ -1024,35 +402,170 @@ void _sort_UEs (module_id_t Mod_idP,
   }
 }
 
-// This function assigns pre-available RBS to each UE in specified sub-bands before scheduling is done
-void _dlsch_scheduler_pre_processor (module_id_t   Mod_id,
-                                    frame_t       frameP,
-                                    sub_frame_t   subframeP,
-                                    int           N_RBG[MAX_NUM_CCs],
-                                    int           *mbsfn_flag)
-{
-
-  unsigned char rballoc_sub[MAX_NUM_CCs][N_RBG_MAX], harq_pid=0, total_ue_count;
-  unsigned char MIMO_mode_indicator[MAX_NUM_CCs][N_RBG_MAX];
-  int                     UE_id, i;
-  unsigned char round = 0;
-  uint16_t                ii,j;
-  uint16_t                nb_rbs_required[MAX_NUM_CCs][NUMBER_OF_UE_MAX];
-  uint16_t                nb_rbs_required_remaining[MAX_NUM_CCs][NUMBER_OF_UE_MAX];
-  uint16_t                nb_rbs_required_remaining_1[MAX_NUM_CCs][NUMBER_OF_UE_MAX];
-  uint16_t                average_rbs_per_user[MAX_NUM_CCs] = {0};
-  rnti_t             rnti;
-  int                min_rb_unit[MAX_NUM_CCs];
-  uint16_t r1=0;
-  uint8_t CC_id;
-  UE_list_t *UE_list = &eNB_mac_inst[Mod_id].UE_list;
-  LTE_DL_FRAME_PARMS   *frame_parms[MAX_NUM_CCs] = {0};
+void _dlsch_scheduler_pre_processor_allocate (module_id_t   Mod_id,
+					      int           UE_id,
+					      uint8_t       CC_id,
+					      int           N_RBG,
+					      int           transmission_mode,
+					      int           min_rb_unit,
+					      uint8_t       N_RB_DL,
+					      uint16_t      nb_rbs_required[MAX_NUM_CCs][NUMBER_OF_UE_MAX],
+					      uint16_t      nb_rbs_required_remaining[MAX_NUM_CCs][NUMBER_OF_UE_MAX],
+					      unsigned char rballoc_sub[MAX_NUM_CCs][N_RBG_MAX],
+					      unsigned char MIMO_mode_indicator[MAX_NUM_CCs][N_RBG_MAX]) {
+  int i;
+  UE_list_t *UE_list=&eNB_mac_inst[Mod_id].UE_list;
+  UE_sched_ctrl *ue_sched_ctl = &UE_list->UE_sched_ctrl[UE_id];
 
-  int transmission_mode = 0;
-  UE_sched_ctrl *ue_sched_ctl;
+  for(i=0; i<N_RBG; i++) {
 
-  for (CC_id=0; CC_id<MAX_NUM_CCs; CC_id++) {
-    
+    if((rballoc_sub[CC_id][i] == 0)           &&
+        (ue_sched_ctl->rballoc_sub_UE[CC_id][i] == 0) &&
+        (nb_rbs_required_remaining[CC_id][UE_id]>0)   &&
+        (ue_sched_ctl->pre_nb_available_rbs[CC_id] < nb_rbs_required[CC_id][UE_id])) {
+
+      // if this UE is not scheduled for TM5
+      if (ue_sched_ctl->dl_pow_off[CC_id] != 0 )  {
+
+	if ((i == N_RBG-1) && ((N_RB_DL == 25) || (N_RB_DL == 50))) {
+	  rballoc_sub[CC_id][i] = 1;
+	  ue_sched_ctl->rballoc_sub_UE[CC_id][i] = 1;
+	  MIMO_mode_indicator[CC_id][i] = 1;
+	  if (transmission_mode == 5 ) {
+	    ue_sched_ctl->dl_pow_off[CC_id] = 1;
+	  }   
+	  nb_rbs_required_remaining[CC_id][UE_id] = nb_rbs_required_remaining[CC_id][UE_id] - min_rb_unit+1;
+          ue_sched_ctl->pre_nb_available_rbs[CC_id] = ue_sched_ctl->pre_nb_available_rbs[CC_id] + min_rb_unit - 1;
+        } else {
+	  if (nb_rbs_required_remaining[CC_id][UE_id] >=  min_rb_unit){
+	    rballoc_sub[CC_id][i] = 1;
+	    ue_sched_ctl->rballoc_sub_UE[CC_id][i] = 1;
+	    MIMO_mode_indicator[CC_id][i] = 1;
+	    if (transmission_mode == 5 ) {
+	      ue_sched_ctl->dl_pow_off[CC_id] = 1;
+	    }
+	    nb_rbs_required_remaining[CC_id][UE_id] = nb_rbs_required_remaining[CC_id][UE_id] - min_rb_unit;
+	    ue_sched_ctl->pre_nb_available_rbs[CC_id] = ue_sched_ctl->pre_nb_available_rbs[CC_id] + min_rb_unit;
+	  }
+	}
+      } // dl_pow_off[CC_id][UE_id] ! = 0
+    }
+  }
+}
+
+void _dlsch_scheduler_pre_processor_reset (int module_idP,
+					   int UE_id,
+					   uint8_t  CC_id,
+					   int frameP,
+					   int subframeP,					  
+					   int N_RBG,
+					   uint16_t nb_rbs_required[MAX_NUM_CCs][NUMBER_OF_UE_MAX],
+					   uint16_t nb_rbs_required_remaining[MAX_NUM_CCs][NUMBER_OF_UE_MAX],
+					   uint16_t nb_rbs_allowed_slice[MAX_NUM_CCs][MAX_NUM_SLICES],
+					   unsigned char rballoc_sub[MAX_NUM_CCs][N_RBG_MAX],
+					   unsigned char MIMO_mode_indicator[MAX_NUM_CCs][N_RBG_MAX]) {
+  int i,j;
+  UE_list_t *UE_list=&eNB_mac_inst[module_idP].UE_list;
+  UE_sched_ctrl *ue_sched_ctl = &UE_list->UE_sched_ctrl[UE_id];
+  uint8_t *vrb_map = eNB_mac_inst[module_idP].common_channels[CC_id].vrb_map;
+  int RBGsize = PHY_vars_eNB_g[module_idP][CC_id]->frame_parms.N_RB_DL/N_RBG;
+#ifdef SF05_LIMIT
+  //int subframe05_limit=0;
+  int sf05_upper=-1,sf05_lower=-1;
+#endif
+  //  LTE_eNB_UE_stats *eNB_UE_stats = mac_xface->get_eNB_UE_stats(module_idP,CC_id,rnti);
+  
+  flexran_update_TA(module_idP, UE_id, CC_id);
+
+  if (UE_id==0) {
+    VCD_SIGNAL_DUMPER_DUMP_VARIABLE_BY_NAME(VCD_SIGNAL_DUMPER_VARIABLES_UE0_TIMING_ADVANCE,ue_sched_ctl->ta_update);
+  }
+  nb_rbs_required[CC_id][UE_id]=0;
+  ue_sched_ctl->pre_nb_available_rbs[CC_id] = 0;
+  ue_sched_ctl->dl_pow_off[CC_id] = 2;
+  nb_rbs_required_remaining[CC_id][UE_id] = 0;
+  for (i=0; i<n_active_slices;i++)
+    nb_rbs_allowed_slice[CC_id][i] = 0;
+#ifdef SF05_LIMIT  
+  switch (N_RBG) {
+  case 6:
+    sf05_lower=0;
+    sf05_upper=5;
+    break;
+  case 8:
+    sf05_lower=2;
+    sf05_upper=5;
+    break;
+  case 13:
+    sf05_lower=4;
+    sf05_upper=7;
+    break;
+  case 17:
+    sf05_lower=7;
+    sf05_upper=9;
+    break;
+  case 25:
+    sf05_lower=11;
+    sf05_upper=13;
+    break;
+  }
+#endif
+  // Initialize Subbands according to VRB map
+  for (i=0; i<N_RBG; i++) {
+    ue_sched_ctl->rballoc_sub_UE[CC_id][i] = 0;
+    rballoc_sub[CC_id][i] = 0;
+#ifdef SF05_LIMIT
+    // for avoiding 6+ PRBs around DC in subframe 0-5 (avoid excessive errors)
+
+    if ((subframeP==0 || subframeP==5) && 
+	(i>=sf05_lower && i<=sf05_upper))
+      rballoc_sub[CC_id][i]=1;
+#endif
+    // for SI-RNTI,RA-RNTI and P-RNTI allocations
+    for (j=0;j<RBGsize;j++) {
+      if (vrb_map[j+(i*RBGsize)]!=0)  {
+	rballoc_sub[CC_id][i] = 1;
+	LOG_D(MAC,"Frame %d, subframe %d : vrb %d allocated\n",frameP,subframeP,j+(i*RBGsize));
+	break;
+      }
+    }
+    LOG_D(MAC,"Frame %d Subframe %d CC_id %d RBG %i : rb_alloc %d\n",frameP,subframeP,CC_id,i,rballoc_sub[CC_id][i]);
+    MIMO_mode_indicator[CC_id][i] = 2;
+  }
+}
+
+// This function assigns pre-available RBS to each UE in specified sub-bands before scheduling is done
+void _dlsch_scheduler_pre_processor (module_id_t   Mod_id,
+				     int      slice_id,
+				     frame_t       frameP,
+				     sub_frame_t   subframeP,
+				     int           N_RBG[MAX_NUM_CCs],
+				     int           *mbsfn_flag)
+{
+
+  unsigned char rballoc_sub[MAX_NUM_CCs][N_RBG_MAX], harq_pid=0, total_ue_count;
+  unsigned char MIMO_mode_indicator[MAX_NUM_CCs][N_RBG_MAX];
+  int                     UE_id, i;
+  unsigned char round = 0;
+  uint16_t                ii,j;
+  uint16_t                nb_rbs_required[MAX_NUM_CCs][NUMBER_OF_UE_MAX];
+  uint16_t                nb_rbs_allowed_slice[MAX_NUM_CCs][MAX_NUM_SLICES];
+  uint16_t                nb_rbs_required_remaining[MAX_NUM_CCs][NUMBER_OF_UE_MAX];
+  uint16_t                nb_rbs_required_remaining_1[MAX_NUM_CCs][NUMBER_OF_UE_MAX];
+  uint16_t                average_rbs_per_user[MAX_NUM_CCs] = {0};
+  rnti_t             rnti;
+  int                min_rb_unit[MAX_NUM_CCs];
+  uint16_t r1=0;
+  uint8_t CC_id;
+  UE_list_t *UE_list = &eNB_mac_inst[Mod_id].UE_list;
+  LTE_DL_FRAME_PARMS   *frame_parms[MAX_NUM_CCs] = {0};
+
+  int transmission_mode = 0;
+  UE_sched_ctrl *ue_sched_ctl;
+  
+  
+  for (CC_id=0; CC_id<MAX_NUM_CCs; CC_id++) {
+    
     if (mbsfn_flag[CC_id]>0)  // If this CC is allocated for MBSFN skip it here
       continue;
     
@@ -1065,7 +578,8 @@ void _dlsch_scheduler_pre_processor (module_id_t   Mod_id,
       UE_id = i;
       // Initialize scheduling information for all active UEs
       
-      
+      //if (flexran_slice_member(UE_id, slice_id) == 0)
+      //continue;
       _dlsch_scheduler_pre_processor_reset(Mod_id,
 					   UE_id,
 					   CC_id,
@@ -1074,6 +588,7 @@ void _dlsch_scheduler_pre_processor (module_id_t   Mod_id,
 					   N_RBG[CC_id],
 					   nb_rbs_required,
 					   nb_rbs_required_remaining,
+					   nb_rbs_allowed_slice, 
 					   rballoc_sub,
 					   MIMO_mode_indicator);
 
@@ -1081,10 +596,10 @@ void _dlsch_scheduler_pre_processor (module_id_t   Mod_id,
   }
   
   // Store the DLSCH buffer for each logical channel
-  _store_dlsch_buffer (Mod_id,frameP,subframeP);
+  _store_dlsch_buffer (Mod_id,slice_id,frameP,subframeP);
 
   // Calculate the number of RBs required by each UE on the basis of logical channel's buffer
-  _assign_rbs_required (Mod_id,frameP,subframeP,nb_rbs_required,min_rb_unit);
+  _assign_rbs_required (Mod_id,slice_id, frameP,subframeP,nb_rbs_required,nb_rbs_allowed_slice,min_rb_unit);
 
   // Sorts the user on the basis of dlsch logical channel buffer and CQI
   _sort_UEs (Mod_id,frameP,subframeP);
@@ -1099,7 +614,10 @@ void _dlsch_scheduler_pre_processor (module_id_t   Mod_id,
     if (UE_list->UE_sched_ctrl[i].ul_out_of_sync == 1)
       continue;
     UE_id = i;
-
+    
+    if (flexran_slice_member(UE_id, slice_id) == 0)
+      continue;
+    
     // if there is no available harq_process, skip the UE
     if (UE_list->UE_sched_ctrl[UE_id].harq_pid[CC_id]<0)
       continue;
@@ -1107,6 +625,7 @@ void _dlsch_scheduler_pre_processor (module_id_t   Mod_id,
     for (ii=0; ii < UE_num_active_CC(UE_list,UE_id); ii++) {
       CC_id = UE_list->ordered_CCids[ii][UE_id];
       ue_sched_ctl = &UE_list->UE_sched_ctrl[UE_id];
+      ue_sched_ctl->max_allowed_rbs[CC_id]=nb_rbs_allowed_slice[CC_id][slice_id];
       flexran_get_harq(Mod_id, CC_id, UE_id, frameP, subframeP, &harq_pid, &round);
 
       average_rbs_per_user[CC_id]=0;
@@ -1137,8 +656,8 @@ void _dlsch_scheduler_pre_processor (module_id_t   Mod_id,
 
       if (total_ue_count == 0) {
         average_rbs_per_user[CC_id] = 0;
-      } else if( (min_rb_unit[CC_id] * total_ue_count) <= (frame_parms[CC_id]->N_RB_DL) ) {
-        average_rbs_per_user[CC_id] = (uint16_t) floor(frame_parms[CC_id]->N_RB_DL/total_ue_count);
+      } else if( (min_rb_unit[CC_id] * total_ue_count) <= nb_rbs_allowed_slice[CC_id][slice_id] ) {
+        average_rbs_per_user[CC_id] = (uint16_t) floor(nb_rbs_allowed_slice[CC_id][slice_id]/total_ue_count);
       } else {
         average_rbs_per_user[CC_id] = min_rb_unit[CC_id]; // consider the total number of use that can be scheduled UE
       }
@@ -1149,7 +668,10 @@ void _dlsch_scheduler_pre_processor (module_id_t   Mod_id,
   // extend nb_rbs_required to capture per LCID RB required
   for(i=UE_list->head; i>=0; i=UE_list->next[i]) {
     rnti = UE_RNTI(Mod_id,i);
-
+   
+    if (flexran_slice_member(i, slice_id) == 0)
+      continue;
+    
     for (ii=0; ii<UE_num_active_CC(UE_list,i); ii++) {
       CC_id = UE_list->ordered_CCids[ii][i];
 
@@ -1169,6 +691,10 @@ void _dlsch_scheduler_pre_processor (module_id_t   Mod_id,
   for(r1=0; r1<2; r1++) {
 
     for(i=UE_list->head; i>=0; i=UE_list->next[i]) {
+      
+      if (flexran_slice_member(i, slice_id) == 0)
+	continue;
+      
       for (ii=0; ii<UE_num_active_CC(UE_list,i); ii++) {
         CC_id = UE_list->ordered_CCids[ii][i];
 
@@ -1194,8 +720,11 @@ void _dlsch_scheduler_pre_processor (module_id_t   Mod_id,
     if (total_ue_count > 0 ) {
       for(i=UE_list->head; i>=0; i=UE_list->next[i]) {
         UE_id = i;
-
-        for (ii=0; ii<UE_num_active_CC(UE_list,UE_id); ii++) {
+	
+	if (flexran_slice_member(UE_id, slice_id) == 0)
+	  continue;
+        
+	for (ii=0; ii<UE_num_active_CC(UE_list,UE_id); ii++) {
           CC_id = UE_list->ordered_CCids[ii][UE_id];
 	  ue_sched_ctl = &UE_list->UE_sched_ctrl[UE_id];
 	  flexran_get_harq(Mod_id, CC_id, UE_id, frameP, subframeP, &harq_pid, &round);	  
@@ -1234,6 +763,9 @@ void _dlsch_scheduler_pre_processor (module_id_t   Mod_id,
   for(i=UE_list->head; i>=0; i=UE_list->next[i]) {
     UE_id = i;
     ue_sched_ctl = &UE_list->UE_sched_ctrl[UE_id];
+ 
+    if (flexran_slice_member(UE_id, slice_id) == 0)
+      continue;
 
     for (ii=0; ii<UE_num_active_CC(UE_list,UE_id); ii++) {
       CC_id = UE_list->ordered_CCids[ii][UE_id];
@@ -1250,7 +782,8 @@ void _dlsch_scheduler_pre_processor (module_id_t   Mod_id,
         }
 
         //PHY_vars_eNB_g[Mod_id]->mu_mimo_mode[UE_id].pre_nb_available_rbs = pre_nb_available_rbs[CC_id][UE_id];
-        LOG_D(MAC,"Total RBs allocated for UE%d = %d\n",UE_id,ue_sched_ctl->pre_nb_available_rbs[CC_id]);
+        LOG_D(MAC,"[eNB %d][SLICE %d] Total RBs allocated for UE%d = %d\n",
+	      Mod_id, slice_id, UE_id,ue_sched_ctl->pre_nb_available_rbs[CC_id]);
       }
     }
   }
@@ -1258,134 +791,921 @@ void _dlsch_scheduler_pre_processor (module_id_t   Mod_id,
 
 #define SF05_LIMIT 1
 
-void _dlsch_scheduler_pre_processor_reset (int module_idP,
-					   int UE_id,
-					   uint8_t  CC_id,
-					   int frameP,
-					   int subframeP,					  
-					   int N_RBG,
-					   uint16_t nb_rbs_required[MAX_NUM_CCs][NUMBER_OF_UE_MAX],
-					   uint16_t nb_rbs_required_remaining[MAX_NUM_CCs][NUMBER_OF_UE_MAX],
-					   unsigned char rballoc_sub[MAX_NUM_CCs][N_RBG_MAX],
-					   unsigned char MIMO_mode_indicator[MAX_NUM_CCs][N_RBG_MAX]) {
-  int i,j;
-  UE_list_t *UE_list=&eNB_mac_inst[module_idP].UE_list;
-  UE_sched_ctrl *ue_sched_ctl = &UE_list->UE_sched_ctrl[UE_id];
-  rnti_t rnti = UE_RNTI(module_idP,UE_id);
-  uint8_t *vrb_map = eNB_mac_inst[module_idP].common_channels[CC_id].vrb_map;
-  int RBGsize = PHY_vars_eNB_g[module_idP][CC_id]->frame_parms.N_RB_DL/N_RBG;
-#ifdef SF05_LIMIT
-  //int subframe05_limit=0;
-  int sf05_upper=-1,sf05_lower=-1;
-#endif
-  LTE_eNB_UE_stats *eNB_UE_stats = mac_xface->get_eNB_UE_stats(module_idP,CC_id,rnti);
-  
-  flexran_update_TA(module_idP, UE_id, CC_id);
+/*
+ * Main scheduling functions to support slicing
+ *
+ */
 
-  if (UE_id==0) {
-    VCD_SIGNAL_DUMPER_DUMP_VARIABLE_BY_NAME(VCD_SIGNAL_DUMPER_VARIABLES_UE0_TIMING_ADVANCE,ue_sched_ctl->ta_update);
-  }
-  nb_rbs_required[CC_id][UE_id]=0;
-  ue_sched_ctl->pre_nb_available_rbs[CC_id] = 0;
-  ue_sched_ctl->dl_pow_off[CC_id] = 2;
-  nb_rbs_required_remaining[CC_id][UE_id] = 0;
+void
+flexran_schedule_ue_spec_default(mid_t   mod_id,
+				 uint32_t      frame,
+				 uint32_t      subframe,
+				 int           *mbsfn_flag,
+				 Protocol__FlexranMessage **dl_info)
+//------------------------------------------------------------------------------
+{
+  int i=0;
+  
+  flexran_agent_mac_create_empty_dl_config(mod_id, dl_info);
+   
+  for (i = 0; i < n_active_slices; i++) {
+    
+    // Load any updated functions
+    if (update_dl_scheduler[i] > 0 ) {
+      slice_sched[i] = dlsym(NULL, dl_scheduler_type[i]); 
+      update_dl_scheduler[i] = 0;
+      update_dl_scheduler_current[i] = 0;
+      slice_percentage_current[i]= slice_percentage[i];
+      total_slice_percentage+=slice_percentage[i];
+      LOG_N(MAC,"update dl scheduler slice %d\n", i);
+    }
+ 
+    // check if the number of slices has changed, and log 
+    if (n_active_slices_current != n_active_slices ){
+      if ((n_active_slices > 0) && (n_active_slices <= MAX_NUM_SLICES)) {
+	LOG_N(MAC,"[eNB %d]frame %d subframe %d: number of active slices has changed: %d-->%d\n",
+	      mod_id, frame, subframe, n_active_slices_current, n_active_slices);
+	n_active_slices_current = n_active_slices;
+      } else {
+	LOG_W(MAC,"invalid number of slices %d, revert to the previous value %d\n",n_active_slices, n_active_slices_current);
+	n_active_slices = n_active_slices_current;
+      }
+    }
+    
+    // check if the slice rb share has changed, and log the console
+    if (slice_percentage_current[i] != slice_percentage[i]){
+      if ((slice_percentage[i] >= 0.0) && (slice_percentage[i] <= 1.0)){
+	if ((total_slice_percentage - slice_percentage_current[i]  + slice_percentage[i]) <= 1.0) {
+	  total_slice_percentage=total_slice_percentage - slice_percentage_current[i]  + slice_percentage[i];
+	  LOG_N(MAC,"[eNB %d][SLICE %d] frame %d subframe %d: total percentage %f, slice RB percentage has changed: %f-->%f\n",
+		mod_id, i, frame, subframe, total_slice_percentage, slice_percentage_current[i], slice_percentage[i]);
+	  slice_percentage_current[i] = slice_percentage[i];
+	} else {
+	  LOG_W(MAC,"[eNB %d][SLICE %d] invalid total RB share (%f->%f), revert the previous value (%f->%f)\n",
+		mod_id,i,  
+		total_slice_percentage,
+		total_slice_percentage - slice_percentage_current[i]  + slice_percentage[i],
+		slice_percentage[i],slice_percentage_current[i]);
+	  slice_percentage[i]= slice_percentage_current[i];
 
-#ifdef SF05_LIMIT  
-  switch (N_RBG) {
-  case 6:
-    sf05_lower=0;
-    sf05_upper=5;
-    break;
-  case 8:
-    sf05_lower=2;
-    sf05_upper=5;
-    break;
-  case 13:
-    sf05_lower=4;
-    sf05_upper=7;
-    break;
-  case 17:
-    sf05_lower=7;
-    sf05_upper=9;
-    break;
-  case 25:
-    sf05_lower=11;
-    sf05_upper=13;
-    break;
-  }
-#endif
-  // Initialize Subbands according to VRB map
-  for (i=0; i<N_RBG; i++) {
-    ue_sched_ctl->rballoc_sub_UE[CC_id][i] = 0;
-    rballoc_sub[CC_id][i] = 0;
-#ifdef SF05_LIMIT
-    // for avoiding 6+ PRBs around DC in subframe 0-5 (avoid excessive errors)
+	}
+      } else {
+	LOG_W(MAC,"[eNB %d][SLICE %d] invalid slice RB share, revert the previous value (%f->%f)\n",mod_id, i,  slice_percentage[i],slice_percentage_current[i]);
+	slice_percentage[i]= slice_percentage_current[i];
 
-    if ((subframeP==0 || subframeP==5) && 
-	(i>=sf05_lower && i<=sf05_upper))
-      rballoc_sub[CC_id][i]=1;
-#endif
-    // for SI-RNTI,RA-RNTI and P-RNTI allocations
-    for (j=0;j<RBGsize;j++) {
-      if (vrb_map[j+(i*RBGsize)]!=0)  {
-	rballoc_sub[CC_id][i] = 1;
-	LOG_D(MAC,"Frame %d, subframe %d : vrb %d allocated\n",frameP,subframeP,j+(i*RBGsize));
-	break;
       }
     }
-    LOG_D(MAC,"Frame %d Subframe %d CC_id %d RBG %i : rb_alloc %d\n",frameP,subframeP,CC_id,i,rballoc_sub[CC_id][i]);
-    MIMO_mode_indicator[CC_id][i] = 2;
+  
+    // check if the slice max MCS, and log the console
+    if (slice_maxmcs_current[i] != slice_maxmcs[i]){
+      if ((slice_maxmcs[i] >= 0) && (slice_maxmcs[i] < 29)){
+	LOG_N(MAC,"[eNB %d][SLICE %d] frame %d subframe %d: slice MAX MCS has changed: %d-->%d\n",
+	      mod_id, i, frame, subframe, slice_maxmcs_current[i], slice_maxmcs[i]);
+	slice_maxmcs_current[i] = slice_maxmcs[i];
+      } else {
+	LOG_W(MAC,"[eNB %d][SLICE %d] invalid slice max mcs %d, revert the previous value %d\n",mod_id, i,  slice_percentage[i],slice_percentage[i]);
+	slice_maxmcs[i]= slice_maxmcs_current[i];
+      }
+    }
+    
+    // check if a new scheduler, and log the console
+    if (update_dl_scheduler_current[i] != update_dl_scheduler[i]){
+      LOG_N(MAC,"[eNB %d][SLICE %d] frame %d subframe %d: DL scheduler for this slice is updated: %s \n",
+	    mod_id, i, frame, subframe, dl_scheduler_type[i]);
+      update_dl_scheduler_current[i] = update_dl_scheduler[i];
+    }
+
+    // Run each enabled slice-specific schedulers one by one
+    //LOG_N(MAC,"[eNB %d]frame %d subframe %d slice %d: calling the scheduler\n", mod_id, frame, subframe,i);
+    slice_sched[i](mod_id, i, frame, subframe, mbsfn_flag,dl_info);
+
   }
+  
 }
 
+uint16_t flexran_nb_rbs_allowed_slice(float rb_percentage, int total_rbs){
+  return  (uint16_t) floor(rb_percentage * total_rbs); 
+}
 
-void _dlsch_scheduler_pre_processor_allocate (module_id_t   Mod_id,
-					      int           UE_id,
-					      uint8_t       CC_id,
-					      int           N_RBG,
-					      int           transmission_mode,
-					      int           min_rb_unit,
-					      uint8_t       N_RB_DL,
-					      uint16_t      nb_rbs_required[MAX_NUM_CCs][NUMBER_OF_UE_MAX],
-					      uint16_t      nb_rbs_required_remaining[MAX_NUM_CCs][NUMBER_OF_UE_MAX],
-					      unsigned char rballoc_sub[MAX_NUM_CCs][N_RBG_MAX],
-					      unsigned char MIMO_mode_indicator[MAX_NUM_CCs][N_RBG_MAX]) {
+int flexran_slice_maxmcs(int slice_id) {
+  return slice_maxmcs[slice_id];
+}
+
+int flexran_slice_member(int UE_id, int slice_id){
+  // group membership definition
+  int slice_member = 0 ;
+  
+  if ((slice_id < 0) || (slice_id > n_active_slices))
+    LOG_W(MAC,"out of range slice id %d\n",slice_id);
+
+  switch (slicing_strategy) {
+  case SLICE_MASK:
+    switch (slice_id){
+    case 0:
+      if (SLICE0_MASK&UE_id){
+	slice_member=1;
+      }
+      break;
+    case 1:
+      if (SLICE1_MASK&UE_id){
+	slice_member=1;
+      }
+      break;
+    case 2:
+      if (SLICE2_MASK&UE_id){
+	slice_member=1;
+      }
+       break;
+    case 3:
+      if (SLICE3_MASK&UE_id){
+	slice_member=1;
+      }
+      break;
+    default :
+      LOG_W(MAC,"unknown slice_id %d\n", slice_id);
+      break;
+      
+    }
+    break;
+  case UEID_TO_SLICEID:
+  default:
+    if ((UE_id % n_active_slices) == slice_id){
+      slice_member= 1; // this ue is a member of this slice
+    }
+    break;
+  }
+  
+  return slice_member;
+}
+/* more aggressive rb and mcs allocation with medium priority and the traffic qci */
+void
+flexran_schedule_ue_spec_embb(mid_t         mod_id,
+			      int           slice_id, 
+			      uint32_t      frame,
+			      uint32_t      subframe,
+			      int           *mbsfn_flag,
+			      Protocol__FlexranMessage **dl_info)
+
+{
+  flexran_schedule_ue_spec_common(mod_id,
+				  slice_id,
+				  frame,
+				  subframe,
+				  mbsfn_flag,
+				  dl_info);
+  
+}
+/* more conservative mcs allocation with high priority and the traffic qci */
+void
+flexran_schedule_ue_spec_urllc(mid_t         mod_id,
+			       int           slice_id, 
+			       uint32_t      frame,
+			       uint32_t      subframe,
+			       int           *mbsfn_flag,
+			       Protocol__FlexranMessage **dl_info)
+
+{
+  flexran_schedule_ue_spec_common(mod_id,
+				  slice_id,
+				  frame,
+				  subframe,
+				  mbsfn_flag,
+				  dl_info);
+  
+}
+/* constant rb allocation with low mcs with low priority and given the UE capabilities */
+void
+flexran_schedule_ue_spec_mmtc(mid_t         mod_id,
+			      int           slice_id, 
+			      uint32_t      frame,
+			      uint32_t      subframe,
+			      int           *mbsfn_flag,
+			      Protocol__FlexranMessage **dl_info)
+  
+{
+  
+  flexran_schedule_ue_spec_common(mod_id,
+				  slice_id,
+				  frame,
+				  subframe,
+				  mbsfn_flag,
+				  dl_info);
+  
+}
+/* regular rb and mcs allocation with low priority */
+void
+flexran_schedule_ue_spec_be(mid_t         mod_id,
+			    int           slice_id, 
+			    uint32_t      frame,
+			    uint32_t      subframe,
+			    int           *mbsfn_flag,
+			    Protocol__FlexranMessage **dl_info)
+  
+{
+  
+  flexran_schedule_ue_spec_common(mod_id,
+				  slice_id,
+				  frame,
+				  subframe,
+				  mbsfn_flag,
+				  dl_info);
+  
+}
+
+//------------------------------------------------------------------------------
+void
+flexran_schedule_ue_spec_common(mid_t   mod_id,
+				int           slice_id, 
+				uint32_t      frame,
+				uint32_t      subframe,
+				int           *mbsfn_flag,
+				Protocol__FlexranMessage **dl_info)
+//------------------------------------------------------------------------------
+{
+  uint8_t               CC_id;
+  int                   UE_id;
+  int                   N_RBG[MAX_NUM_CCs];
+  unsigned char         aggregation;
+  mac_rlc_status_resp_t rlc_status;
+  unsigned char         header_len = 0, ta_len = 0;
+  uint16_t              nb_rb, nb_rb_temp, total_nb_available_rb[MAX_NUM_CCs], nb_available_rb;
+  uint16_t              TBS, j, rnti;
+  unsigned char         round            = 0;
+  unsigned char         harq_pid         = 0;
+  uint16_t              sdu_length_total = 0;
+  int                   mcs, mcs_tmp;
+  uint16_t              min_rb_unit[MAX_NUM_CCs];
+  eNB_MAC_INST         *eNB      = &eNB_mac_inst[mod_id];
+  /* TODO: Must move the helper structs to scheduler implementation */
+  UE_list_t            *UE_list  = &eNB->UE_list;
+  int32_t                 normalized_rx_power, target_rx_power;
+  int32_t                 tpc = 1;
+  static int32_t          tpc_accumulated=0;
+  UE_sched_ctrl           *ue_sched_ctl;
+
+  Protocol__FlexDlData *dl_data[NUM_MAX_UE];
+  int num_ues_added = 0;
+  int channels_added = 0;
+
+  Protocol__FlexDlDci *dl_dci;
+  Protocol__FlexRlcPdu *rlc_pdus[11];
+  uint32_t ce_flags = 0;
+
+  uint8_t            rballoc_sub[25];
   int i;
-  UE_list_t *UE_list=&eNB_mac_inst[Mod_id].UE_list;
-  UE_sched_ctrl *ue_sched_ctl = &UE_list->UE_sched_ctrl[UE_id];
+  uint32_t data_to_request;
+  uint32_t dci_tbs;
+  uint8_t ue_has_transmission = 0;
+  uint32_t ndi;
+  
+  flexran_agent_mac_create_empty_dl_config(mod_id, dl_info);
+  
+  if (UE_list->head==-1) {
+    return;
+  }
+  
+  start_meas(&eNB->schedule_dlsch);
+  VCD_SIGNAL_DUMPER_DUMP_FUNCTION_BY_NAME(VCD_SIGNAL_DUMPER_FUNCTIONS_SCHEDULE_DLSCH,VCD_FUNCTION_IN);
 
-  for(i=0; i<N_RBG; i++) {
+  //weight = get_ue_weight(module_idP,UE_id);
+  aggregation = 2; // set to the maximum aggregation level
+
+  for (CC_id=0; CC_id<MAX_NUM_CCs; CC_id++) {
+    min_rb_unit[CC_id] = get_min_rb_unit(mod_id, CC_id);
+    // get number of PRBs less those used by common channels
+    total_nb_available_rb[CC_id] = flexran_get_N_RB_DL(mod_id, CC_id);
+    for (i=0;i < flexran_get_N_RB_DL(mod_id, CC_id); i++)
+      if (eNB->common_channels[CC_id].vrb_map[i] != 0)
+	total_nb_available_rb[CC_id]--;
+    
+    N_RBG[CC_id] = flexran_get_N_RBG(mod_id, CC_id);
+
+    // store the global enb stats:
+    eNB->eNB_stats[CC_id].num_dlactive_UEs =  UE_list->num_UEs;
+    eNB->eNB_stats[CC_id].available_prbs =  total_nb_available_rb[CC_id];
+    eNB->eNB_stats[CC_id].total_available_prbs +=  total_nb_available_rb[CC_id];
+    eNB->eNB_stats[CC_id].dlsch_bytes_tx=0;
+    eNB->eNB_stats[CC_id].dlsch_pdus_tx=0;
+  }
+
+   VCD_SIGNAL_DUMPER_DUMP_FUNCTION_BY_NAME(VCD_SIGNAL_DUMPER_FUNCTIONS_DLSCH_PREPROCESSOR,VCD_FUNCTION_IN);
+
+   start_meas(&eNB->schedule_dlsch_preprocessor);
+   _dlsch_scheduler_pre_processor(mod_id,
+				  slice_id,
+				  frame,
+				  subframe,
+				  N_RBG,
+				  mbsfn_flag);
+   stop_meas(&eNB->schedule_dlsch_preprocessor);
+   VCD_SIGNAL_DUMPER_DUMP_FUNCTION_BY_NAME(VCD_SIGNAL_DUMPER_FUNCTIONS_DLSCH_PREPROCESSOR,VCD_FUNCTION_OUT);
+
+   for (CC_id=0; CC_id<MAX_NUM_CCs; CC_id++) {
+    LOG_D(MAC, "doing schedule_ue_spec for CC_id %d\n",CC_id);
+
+    if (mbsfn_flag[CC_id]>0)
+      continue;
+
+    for (UE_id=UE_list->head; UE_id>=0; UE_id=UE_list->next[UE_id]) {
+      rnti = flexran_get_ue_crnti(mod_id, UE_id);
+      ue_sched_ctl = &UE_list->UE_sched_ctrl[UE_id];
+
+      if (flexran_slice_member(UE_id, slice_id) == 0)
+      	continue;
+      
+      if (rnti==NOT_A_RNTI) {
+        LOG_D(MAC,"Cannot find rnti for UE_id %d (num_UEs %d)\n", UE_id,UE_list->num_UEs);
+        // mac_xface->macphy_exit("Cannot find rnti for UE_id");
+        continue;
+      }
+
+      if (flexran_get_ue_crnti(mod_id, UE_id) == NOT_A_RNTI) {
+        LOG_D(MAC,"[eNB] Cannot find UE\n");
+        //  mac_xface->macphy_exit("[MAC][eNB] Cannot find eNB_UE_stats\n");
+        continue;
+      }
+
+      if ((ue_sched_ctl->pre_nb_available_rbs[CC_id] == 0) ||  // no RBs allocated 
+	  CCE_allocation_infeasible(mod_id, CC_id, 0, subframe, aggregation, rnti)) {
+        LOG_D(MAC,"[eNB %d] Frame %d : no RB allocated for UE %d on CC_id %d: continue \n",
+              mod_id, frame, UE_id, CC_id);
+        //if(mac_xface->get_transmission_mode(module_idP,rnti)==5)
+        continue; //to next user (there might be rbs availiable for other UEs in TM5
+        // else
+        //  break;
+      }
+
+      if (flexran_get_duplex_mode(mod_id, CC_id) == PROTOCOL__FLEX_DUPLEX_MODE__FLDM_TDD)  {
+        set_ue_dai (subframe,
+                    flexran_get_subframe_assignment(mod_id, CC_id),
+                    UE_id,
+                    CC_id,
+                    UE_list);
+        //TODO: update UL DAI after DLSCH scheduling
+        //set_ul_DAI(mod_id, UE_id, CC_id, frame, subframe,frame_parms);
+      }
+
+      channels_added = 0;
+
+      // After this point all the UEs will be scheduled
+      dl_data[num_ues_added] = (Protocol__FlexDlData *) malloc(sizeof(Protocol__FlexDlData));
+      protocol__flex_dl_data__init(dl_data[num_ues_added]);
+      dl_data[num_ues_added]->has_rnti = 1;
+      dl_data[num_ues_added]->rnti = rnti;
+      dl_data[num_ues_added]->n_rlc_pdu = 0;
+      dl_data[num_ues_added]->has_serv_cell_index = 1;
+      dl_data[num_ues_added]->serv_cell_index = CC_id;
+      
+      nb_available_rb = ue_sched_ctl->pre_nb_available_rbs[CC_id];
+      flexran_get_harq(mod_id, CC_id, UE_id, frame, subframe, &harq_pid, &round);
+      sdu_length_total=0;
+      mcs = cqi_to_mcs[flexran_get_ue_wcqi(mod_id, UE_id)];
+      mcs = cmin(mcs,flexran_slice_maxmcs(slice_id));
+#ifdef EXMIMO
+
+       if (mac_xface->get_transmission_mode(mod_id, CC_id, rnti) == 5) {
+	  mcs = cqi_to_mcs[flexran_get_ue_wcqi(mod_id, UE_id)];
+	  mcs =  cmin(mcs,16);
+       }
+
+#endif
+
+      // initializing the rb allocation indicator for each UE
+       for(j = 0; j < flexran_get_N_RBG(mod_id, CC_id); j++) {
+        UE_list->UE_template[CC_id][UE_id].rballoc_subband[harq_pid][j] = 0;
+	rballoc_sub[j] = 0;
+      }
+
+      /* LOG_D(MAC,"[eNB %d] Frame %d: Scheduling UE %d on CC_id %d (rnti %x, harq_pid %d, round %d, rb %d, cqi %d, mcs %d, rrc %d)\n", */
+      /*       mod_id, frame, UE_id, CC_id, rnti, harq_pid, round, nb_available_rb, */
+      /*       eNB_UE_stats->DL_cqi[0], mcs, */
+      /*       UE_list->eNB_UE_stats[CC_id][UE_id].rrc_status); */
+
+      dl_dci = (Protocol__FlexDlDci*) malloc(sizeof(Protocol__FlexDlDci));
+      protocol__flex_dl_dci__init(dl_dci);
+      dl_data[num_ues_added]->dl_dci = dl_dci;
+
+      
+      dl_dci->has_rnti = 1;
+      dl_dci->rnti = rnti;
+      dl_dci->has_harq_process = 1;
+      dl_dci->harq_process = harq_pid;
+      
+      /* process retransmission  */
+
+      if (round > 0) {
+
+	if (flexran_get_duplex_mode(mod_id, CC_id) == PROTOCOL__FLEX_DUPLEX_MODE__FLDM_TDD) {
+	  UE_list->UE_template[CC_id][UE_id].DAI++;
+	  update_ul_dci(mod_id, CC_id, rnti, UE_list->UE_template[CC_id][UE_id].DAI);
+	  LOG_D(MAC,"DAI update: CC_id %d subframeP %d: UE %d, DAI %d\n",
+		CC_id, subframe,UE_id,UE_list->UE_template[CC_id][UE_id].DAI);
+	}
+
+	mcs = UE_list->UE_template[CC_id][UE_id].mcs[harq_pid];
+
+	  // get freq_allocation
+	nb_rb = UE_list->UE_template[CC_id][UE_id].nb_rb[harq_pid];
+	  
+	/*TODO: Must add this to FlexRAN agent API */
+	dci_tbs = mac_xface->get_TBS_DL(mcs, nb_rb);
+
+	if (nb_rb <= nb_available_rb) {
+	  
+	  if(nb_rb == ue_sched_ctl->pre_nb_available_rbs[CC_id]) {
+	    for(j = 0; j < flexran_get_N_RBG(mod_id, CC_id); j++) { // for indicating the rballoc for each sub-band
+	      UE_list->UE_template[CC_id][UE_id].rballoc_subband[harq_pid][j] = ue_sched_ctl->rballoc_sub_UE[CC_id][j];
+            }
+	  } else {
+	    nb_rb_temp = nb_rb;
+	    j = 0;
+
+	    while((nb_rb_temp > 0) && (j < flexran_get_N_RBG(mod_id, CC_id))) {
+	      if(ue_sched_ctl->rballoc_sub_UE[CC_id][j] == 1) {
+		UE_list->UE_template[CC_id][UE_id].rballoc_subband[harq_pid][j] = ue_sched_ctl->rballoc_sub_UE[CC_id][j];
+		
+		if((j == flexran_get_N_RBG(mod_id, CC_id) - 1) &&
+		   ((flexran_get_N_RB_DL(mod_id, CC_id) == 25)||
+		    (flexran_get_N_RB_DL(mod_id, CC_id) == 50))) {
+		  nb_rb_temp = nb_rb_temp - min_rb_unit[CC_id]+1;
+		} else {
+		  nb_rb_temp = nb_rb_temp - min_rb_unit[CC_id];
+		}
+	      }
+	      j = j + 1;
+	    }
+	  }
+
+	  nb_available_rb -= nb_rb;
+	  aggregation = process_ue_cqi(mod_id, UE_id);
+	  
+	  PHY_vars_eNB_g[mod_id][CC_id]->mu_mimo_mode[UE_id].pre_nb_available_rbs = nb_rb;
+	  PHY_vars_eNB_g[mod_id][CC_id]->mu_mimo_mode[UE_id].dl_pow_off = ue_sched_ctl->dl_pow_off[CC_id];
+	  
+	  for(j=0; j < flexran_get_N_RBG(mod_id, CC_id); j++) {
+	    PHY_vars_eNB_g[mod_id][CC_id]->mu_mimo_mode[UE_id].rballoc_sub[j] = UE_list->UE_template[CC_id][UE_id].rballoc_subband[harq_pid][j];
+	    rballoc_sub[j] = UE_list->UE_template[CC_id][UE_id].rballoc_subband[harq_pid][j];
+	  }
+
+	  // Keep the old NDI, do not toggle
+	  ndi = UE_list->UE_template[CC_id][UE_id].oldNDI[harq_pid];
+	  tpc = UE_list->UE_template[CC_id][UE_id].oldTPC[harq_pid];
+	  UE_list->UE_template[CC_id][UE_id].mcs[harq_pid] = mcs;
+
+	  ue_has_transmission = 1;
+	  num_ues_added++;
+	} else {
+	  LOG_D(MAC,"[eNB %d] Frame %d CC_id %d : don't schedule UE %d, its retransmission takes more resources than we have\n",
+                mod_id, frame, CC_id, UE_id);
+	  ue_has_transmission = 0;
+	}
+	//End of retransmission
+      } else { /* This is a potentially new SDU opportunity */
+	rlc_status.bytes_in_buffer = 0;
+        // Now check RLC information to compute number of required RBs
+        // get maximum TBS size for RLC request
+        //TBS = mac_xface->get_TBS(eNB_UE_stats->DL_cqi[0]<<1,nb_available_rb);
+        TBS = mac_xface->get_TBS_DL(mcs, nb_available_rb);
+	dci_tbs = TBS;
+
+        // check first for RLC data on DCCH
+        // add the length for  all the control elements (timing adv, drx, etc) : header + payload
+
+	ta_len = (ue_sched_ctl->ta_update!=0) ? 2 : 0;
+	
+	dl_data[num_ues_added]->n_ce_bitmap = 2;
+	dl_data[num_ues_added]->ce_bitmap = (uint32_t *) malloc(sizeof(uint32_t) * 2);
+	
+	if (ta_len > 0) {
+	  ce_flags |= PROTOCOL__FLEX_CE_TYPE__FLPCET_TA;
+	}
+
+	/*TODO: Add other flags if DRX and other CE are required*/
+	
+	// Add the control element flags to the flexran message
+	dl_data[num_ues_added]->ce_bitmap[0] = ce_flags;
+	dl_data[num_ues_added]->ce_bitmap[1] = ce_flags;
+
+	// TODO : Need to prioritize DRBs
+	// Loop through the UE logical channels (DCCH, DCCH1, DTCH for now)
+	for (j = 1; j < NB_RB_MAX; j++) {
+	  header_len+=3;
+
+	  // Need to see if we have space for data from this channel
+	  if (dci_tbs - ta_len - header_len - sdu_length_total > 0) {
+	     LOG_D(MAC, "[TEST]Requested %d bytes from RLC buffer on channel %d during first call\n", dci_tbs-ta_len-header_len);
+	     //If we have space, we need to see how much data we can request at most (if any available)
+	     rlc_status = mac_rlc_status_ind(mod_id,
+					     rnti,
+					     mod_id,
+					     frame,
+					     ENB_FLAG_YES,
+					     MBMS_FLAG_NO,
+					     j,
+					     (dci_tbs-ta_len-header_len)); // transport block set size
+
+	     //If data are available in channel j
+	     if (rlc_status.bytes_in_buffer > 0) {
+	       LOG_D(MAC, "[TEST]Have %d bytes in DCCH buffer during first call\n", rlc_status.bytes_in_buffer);
+	       //Fill in as much as possible
+	       data_to_request = cmin(dci_tbs-ta_len-header_len, rlc_status.bytes_in_buffer);
+	       if (data_to_request < 128) { //The header will be one byte less
+		 header_len--;
+	       }
+	       /* if (j == 1 || j == 2) {
+		  data_to_request+=0; 
+		  } */
+	       LOG_D(MAC, "[TEST]Will request %d from channel %d\n", data_to_request, j);
+	       rlc_pdus[channels_added] = (Protocol__FlexRlcPdu *) malloc(sizeof(Protocol__FlexRlcPdu));
+	       protocol__flex_rlc_pdu__init(rlc_pdus[channels_added]);
+	       rlc_pdus[channels_added]->n_rlc_pdu_tb = 2;
+	       rlc_pdus[channels_added]->rlc_pdu_tb = (Protocol__FlexRlcPduTb **) malloc(sizeof(Protocol__FlexRlcPduTb *) * 2);
+	       rlc_pdus[channels_added]->rlc_pdu_tb[0] = (Protocol__FlexRlcPduTb *) malloc(sizeof(Protocol__FlexRlcPduTb));
+	       protocol__flex_rlc_pdu_tb__init(rlc_pdus[channels_added]->rlc_pdu_tb[0]);
+	       rlc_pdus[channels_added]->rlc_pdu_tb[0]->has_logical_channel_id = 1;
+	       rlc_pdus[channels_added]->rlc_pdu_tb[0]->logical_channel_id = j;
+	       rlc_pdus[channels_added]->rlc_pdu_tb[0]->has_size = 1;
+	       rlc_pdus[channels_added]->rlc_pdu_tb[0]->size = data_to_request;
+	       rlc_pdus[channels_added]->rlc_pdu_tb[1] = (Protocol__FlexRlcPduTb *) malloc(sizeof(Protocol__FlexRlcPduTb));
+	       protocol__flex_rlc_pdu_tb__init(rlc_pdus[channels_added]->rlc_pdu_tb[1]);
+	       rlc_pdus[channels_added]->rlc_pdu_tb[1]->has_logical_channel_id = 1;
+	       rlc_pdus[channels_added]->rlc_pdu_tb[1]->logical_channel_id = j;
+	       rlc_pdus[channels_added]->rlc_pdu_tb[1]->has_size = 1;
+	       rlc_pdus[channels_added]->rlc_pdu_tb[1]->size = data_to_request;
+	       dl_data[num_ues_added]->n_rlc_pdu++;
+	       channels_added++;
+	       //Set this to the max value that we might request
+	       sdu_length_total += data_to_request;
+	     } else {
+	       //Take back the assumption of a header for this channel
+	       header_len -= 3;
+	     } //End rlc_status.bytes_in_buffer <= 0
+	  } //end of if dci_tbs - ta_len - header_len > 0
+	} // End of iterating the logical channels
+	
+	// Add rlc_pdus to the dl_data message
+	dl_data[num_ues_added]->rlc_pdu = (Protocol__FlexRlcPdu **) malloc(sizeof(Protocol__FlexRlcPdu *) *
+									  dl_data[num_ues_added]->n_rlc_pdu);
+	for (i = 0; i < dl_data[num_ues_added]->n_rlc_pdu; i++) {
+	  dl_data[num_ues_added]->rlc_pdu[i] = rlc_pdus[i];
+	}
+	
+	// there is a payload
+        if (( dl_data[num_ues_added]->n_rlc_pdu > 0)) {
+	  // Now compute number of required RBs for total sdu length
+          // Assume RAH format 2
+          // adjust  header lengths
+
+	  if (header_len == 2 || header_len == 3) { //Only one SDU, remove length field
+	    header_len = 1;
+	  } else { //Remove length field from the last SDU
+	    header_len--;
+	  }
+
+	  mcs_tmp = mcs;
+	  if (mcs_tmp == 0) {
+            nb_rb = 4;  // don't let the TBS get too small
+          } else {
+            nb_rb=min_rb_unit[CC_id];
+          }
+
+	  LOG_D(MAC,"[TEST]The initial number of resource blocks was %d\n", nb_rb);
+	  LOG_D(MAC,"[TEST] The initial mcs was %d\n", mcs_tmp);
+
+	  TBS = mac_xface->get_TBS_DL(mcs_tmp, nb_rb);
+	  LOG_D(MAC,"[TEST]The TBS during rate matching was %d\n", TBS);
+
+	  while (TBS < (sdu_length_total + header_len + ta_len))  {
+            nb_rb += min_rb_unit[CC_id];  //
+	    LOG_D(MAC, "[TEST]Had to increase the number of RBs\n");
+            if (nb_rb > nb_available_rb) { // if we've gone beyond the maximum number of RBs
+              // (can happen if N_RB_DL is odd)
+              TBS = mac_xface->get_TBS_DL(mcs_tmp, nb_available_rb);
+              nb_rb = nb_available_rb;
+              break;
+            }
+
+            TBS = mac_xface->get_TBS_DL(mcs_tmp, nb_rb);
+          }
+
+	  if(nb_rb == ue_sched_ctl->pre_nb_available_rbs[CC_id]) {
+	    LOG_D(MAC, "[TEST]We had the exact number of rbs. Time to fill the rballoc subband\n");
+            for(j = 0; j < flexran_get_N_RBG(mod_id, CC_id); j++) { // for indicating the rballoc for each sub-band
+              UE_list->UE_template[CC_id][UE_id].rballoc_subband[harq_pid][j] = ue_sched_ctl->rballoc_sub_UE[CC_id][j];
+            }
+          } else {
+	    nb_rb_temp = nb_rb;
+            j = 0;
+	    LOG_D(MAC, "[TEST]Will only partially fill the bitmap\n");
+	    while((nb_rb_temp > 0) && (j < flexran_get_N_RBG(mod_id, CC_id))) {
+              if(ue_sched_ctl->rballoc_sub_UE[CC_id][j] == 1) {
+                UE_list->UE_template[CC_id][UE_id].rballoc_subband[harq_pid][j] = ue_sched_ctl->rballoc_sub_UE[CC_id][j];
+                if ((j == flexran_get_N_RBG(mod_id, CC_id) - 1) &&
+                    ((flexran_get_N_RB_DL(mod_id, CC_id) == 25)||
+                     (flexran_get_N_RB_DL(mod_id, CC_id) == 50))) {
+                  nb_rb_temp = nb_rb_temp - min_rb_unit[CC_id] + 1;
+                } else {
+                  nb_rb_temp = nb_rb_temp - min_rb_unit[CC_id];
+                }
+              }
+              j = j+1;
+            }
+	  }
+	  
+	  PHY_vars_eNB_g[mod_id][CC_id]->mu_mimo_mode[UE_id].pre_nb_available_rbs = nb_rb;
+          PHY_vars_eNB_g[mod_id][CC_id]->mu_mimo_mode[UE_id].dl_pow_off = ue_sched_ctl->dl_pow_off[CC_id];
+
+	  for(j = 0; j < flexran_get_N_RBG(mod_id, CC_id); j++) {
+            PHY_vars_eNB_g[mod_id][CC_id]->mu_mimo_mode[UE_id].rballoc_sub[j] = UE_list->UE_template[CC_id][UE_id].rballoc_subband[harq_pid][j];
+          }
+
+	  // decrease mcs until TBS falls below required length
+          while ((TBS > (sdu_length_total + header_len + ta_len)) && (mcs_tmp > 0)) {
+            mcs_tmp--;
+            TBS = mac_xface->get_TBS_DL(mcs_tmp, nb_rb);
+          }
+
+	  // if we have decreased too much or we don't have enough RBs, increase MCS
+          while ((TBS < (sdu_length_total + header_len + ta_len)) &&
+		 ((( ue_sched_ctl->dl_pow_off[CC_id] > 0) && (mcs_tmp < 28))											     || ( (ue_sched_ctl->dl_pow_off[CC_id]==0) && (mcs_tmp <= 15)))) {
+            mcs_tmp++;
+            TBS = mac_xface->get_TBS_DL(mcs_tmp, nb_rb);
+          }
+
+	  dci_tbs = TBS;
+	  mcs = mcs_tmp;
+
+	  aggregation = process_ue_cqi(mod_id,UE_id);
+	  dl_dci->has_aggr_level = 1;
+	  dl_dci->aggr_level = aggregation;
+	  
+          UE_list->UE_template[CC_id][UE_id].nb_rb[harq_pid] = nb_rb;
+
+	  if (flexran_get_duplex_mode(mod_id, CC_id) == PROTOCOL__FLEX_DUPLEX_MODE__FLDM_TDD) {
+            UE_list->UE_template[CC_id][UE_id].DAI++;
+            //  printf("DAI update: subframeP %d: UE %d, DAI %d\n",subframeP,UE_id,UE_list->UE_template[CC_id][UE_id].DAI);
+	    //#warning only for 5MHz channel
+            update_ul_dci(mod_id, CC_id, rnti, UE_list->UE_template[CC_id][UE_id].DAI);
+          }
+
+	  // do PUCCH power control
+          // this is the normalized RX power
+	  normalized_rx_power = flexran_get_p0_pucch_dbm(mod_id,UE_id, CC_id); //eNB_UE_stats->Po_PUCCH_dBm; 
+	  target_rx_power = flexran_get_p0_nominal_pucch(mod_id, CC_id) + 10; //mac_xface->get_target_pucch_rx_power(mod_id, CC_id) + 10;
+
+	  // this assumes accumulated tpc
+	  // make sure that we are only sending a tpc update once a frame, otherwise the control loop will freak out
+	  int32_t framex10psubframe = UE_list->UE_template[CC_id][UE_id].pucch_tpc_tx_frame*10+UE_list->UE_template[CC_id][UE_id].pucch_tpc_tx_subframe;
+
+	  if (((framex10psubframe+10)<=(frame*10+subframe)) || //normal case
+	      ((framex10psubframe>(frame*10+subframe)) && (((10240-framex10psubframe+frame*10+subframe)>=10)))) //frame wrap-around
+	    if (flexran_get_p0_pucch_status(mod_id, UE_id, CC_id) == 1) {
+	      flexran_update_p0_pucch(mod_id, UE_id, CC_id);
+	      
+	      UE_list->UE_template[CC_id][UE_id].pucch_tpc_tx_frame = frame;
+	      UE_list->UE_template[CC_id][UE_id].pucch_tpc_tx_subframe = subframe;
+	      if (normalized_rx_power>(target_rx_power+1)) {
+		tpc = 0; //-1
+		tpc_accumulated--;
+	      } else if (normalized_rx_power<(target_rx_power-1)) {
+		tpc = 2; //+1
+		tpc_accumulated++;
+	      } else {
+		tpc = 1; //0
+	      }
+	      LOG_D(MAC,"[eNB %d] DLSCH scheduler: frame %d, subframe %d, harq_pid %d, tpc %d, accumulated %d, normalized/target rx power %d/%d\n",
+		    mod_id, frame, subframe, harq_pid, tpc,
+		    tpc_accumulated, normalized_rx_power, target_rx_power);
+	    } // Po_PUCCH has been updated 
+	    else {
+	      tpc = 1; //0
+	    } // time to do TPC update 
+	  else {
+	    tpc = 1; //0
+	  }
 
-    if((rballoc_sub[CC_id][i] == 0)           &&
-        (ue_sched_ctl->rballoc_sub_UE[CC_id][i] == 0) &&
-        (nb_rbs_required_remaining[CC_id][UE_id]>0)   &&
-        (ue_sched_ctl->pre_nb_available_rbs[CC_id] < nb_rbs_required[CC_id][UE_id])) {
+	  for(i=0; i<PHY_vars_eNB_g[mod_id][CC_id]->frame_parms.N_RBG; i++) {
+	    rballoc_sub[i] = UE_list->UE_template[CC_id][UE_id].rballoc_subband[harq_pid][i];
+          }	
 
-      // if this UE is not scheduled for TM5
-      if (ue_sched_ctl->dl_pow_off[CC_id] != 0 )  {
+	   // Toggle NDI
+          LOG_D(MAC,"CC_id %d Frame %d, subframeP %d: Toggling Format1 NDI for UE %d (rnti %x/%d) oldNDI %d\n",
+                CC_id, frame, subframe, UE_id,
+                UE_list->UE_template[CC_id][UE_id].rnti,harq_pid, UE_list->UE_template[CC_id][UE_id].oldNDI[harq_pid]);
+          UE_list->UE_template[CC_id][UE_id].oldNDI[harq_pid]= 1 - UE_list->UE_template[CC_id][UE_id].oldNDI[harq_pid];
+	  ndi =  UE_list->UE_template[CC_id][UE_id].oldNDI[harq_pid];
+	  
+	  UE_list->UE_template[CC_id][UE_id].mcs[harq_pid] = mcs;
+	  UE_list->UE_template[CC_id][UE_id].oldTPC[harq_pid] = tpc;
 
-	if ((i == N_RBG-1) && ((N_RB_DL == 25) || (N_RB_DL == 50))) {
-	  rballoc_sub[CC_id][i] = 1;
-	  ue_sched_ctl->rballoc_sub_UE[CC_id][i] = 1;
-	  MIMO_mode_indicator[CC_id][i] = 1;
-	  if (transmission_mode == 5 ) {
+	  // Increase the pointer for the number of scheduled UEs
+	  num_ues_added++;
+	  ue_has_transmission = 1;
+	}  else { // There is no data from RLC or MAC header, so don't schedule
+	  ue_has_transmission = 0;
+	}
+      } // End of new scheduling
+      
+      // If we has transmission or retransmission
+      if (ue_has_transmission) {
+	switch (mac_xface->get_transmission_mode(mod_id, CC_id, rnti)) {
+	case 1:
+	case 2:
+	default:
+	  dl_dci->has_res_alloc = 1;
+	  dl_dci->res_alloc = 0;
+	  dl_dci->has_vrb_format = 1;
+	  dl_dci->vrb_format = PROTOCOL__FLEX_VRB_FORMAT__FLVRBF_LOCALIZED;
+	  dl_dci->has_format = 1;
+	  dl_dci->format = PROTOCOL__FLEX_DCI_FORMAT__FLDCIF_1;
+	  dl_dci->has_rb_bitmap = 1;
+	  dl_dci->rb_bitmap = allocate_prbs_sub(nb_rb, rballoc_sub);
+	  dl_dci->has_rb_shift = 1;
+	  dl_dci->rb_shift = 0;
+	  dl_dci->n_ndi = 1;
+	  dl_dci->ndi = (uint32_t *) malloc(sizeof(uint32_t) * dl_dci->n_ndi);
+	  dl_dci->ndi[0] = ndi;
+	  dl_dci->n_rv = 1;
+	  dl_dci->rv = (uint32_t *) malloc(sizeof(uint32_t) * dl_dci->n_rv);
+	  dl_dci->rv[0] = round & 3;
+	  dl_dci->has_tpc = 1;
+	  dl_dci->tpc = tpc;
+	  dl_dci->n_mcs = 1;
+	  dl_dci->mcs = (uint32_t *) malloc(sizeof(uint32_t) * dl_dci->n_mcs);
+	  dl_dci->mcs[0] = mcs;
+	  dl_dci->n_tbs_size = 1;
+	  dl_dci->tbs_size = (uint32_t *) malloc(sizeof(uint32_t) * dl_dci->n_tbs_size);
+	  dl_dci->tbs_size[0] = dci_tbs;
+	  if (flexran_get_duplex_mode(mod_id, CC_id) == PROTOCOL__FLEX_DUPLEX_MODE__FLDM_TDD) {
+	    dl_dci->has_dai = 1;
+	    dl_dci->dai = (UE_list->UE_template[CC_id][UE_id].DAI-1)&3;
+	  }
+	  break;
+	case 3:
+	  dl_dci->has_res_alloc = 1;
+	  dl_dci->res_alloc = 0;
+	  dl_dci->has_vrb_format = 1;
+	  dl_dci->vrb_format = PROTOCOL__FLEX_VRB_FORMAT__FLVRBF_LOCALIZED;
+	  dl_dci->has_format = 1;
+	  dl_dci->format = PROTOCOL__FLEX_DCI_FORMAT__FLDCIF_2A;
+	  dl_dci->has_rb_bitmap = 1;
+	  dl_dci->rb_bitmap = allocate_prbs_sub(nb_rb, rballoc_sub);
+	  dl_dci->has_rb_shift = 1;
+	  dl_dci->rb_shift = 0;
+	  dl_dci->n_ndi = 2;
+	  dl_dci->ndi = (uint32_t *) malloc(sizeof(uint32_t) * dl_dci->n_ndi);
+	  dl_dci->ndi[0] = ndi;
+	  dl_dci->ndi[1] = ndi;
+	  dl_dci->n_rv = 2;
+	  dl_dci->rv = (uint32_t *) malloc(sizeof(uint32_t) * dl_dci->n_rv);
+	  dl_dci->rv[0] = round & 3;
+	  dl_dci->rv[1] = round & 3;
+	  dl_dci->has_tpc = 1;
+	  dl_dci->tpc = tpc;
+	  dl_dci->n_mcs = 2;
+	  dl_dci->mcs = (uint32_t *) malloc(sizeof(uint32_t) * dl_dci->n_mcs);
+	  dl_dci->mcs[0] = mcs;
+	  dl_dci->mcs[1] = mcs;
+	  dl_dci->n_tbs_size = 2;
+	  dl_dci->tbs_size = (uint32_t *) malloc(sizeof(uint32_t) * dl_dci->n_tbs_size);
+	  dl_dci->tbs_size[0] = dci_tbs;
+	  dl_dci->tbs_size[1] = dci_tbs;
+	  if (flexran_get_duplex_mode(mod_id, CC_id) == PROTOCOL__FLEX_DUPLEX_MODE__FLDM_TDD) {
+	    dl_dci->has_dai = 1;
+	    dl_dci->dai = (UE_list->UE_template[CC_id][UE_id].DAI-1)&3;
+	  }
+	  break;
+	case 4:
+	  dl_dci->has_res_alloc = 1;
+	  dl_dci->res_alloc = 0;
+	  dl_dci->has_vrb_format = 1;
+	  dl_dci->vrb_format = PROTOCOL__FLEX_VRB_FORMAT__FLVRBF_LOCALIZED;
+	  dl_dci->has_format = 1;
+	  dl_dci->format = PROTOCOL__FLEX_DCI_FORMAT__FLDCIF_2A;
+	  dl_dci->has_rb_bitmap = 1;
+	  dl_dci->rb_bitmap = allocate_prbs_sub(nb_rb, rballoc_sub);
+	  dl_dci->has_rb_shift = 1;
+	  dl_dci->rb_shift = 0;
+	  dl_dci->n_ndi = 2;
+	  dl_dci->ndi = (uint32_t *) malloc(sizeof(uint32_t) * dl_dci->n_ndi);
+	  dl_dci->ndi[0] = ndi;
+	  dl_dci->ndi[1] = ndi;
+	  dl_dci->n_rv = 2;
+	  dl_dci->rv = (uint32_t *) malloc(sizeof(uint32_t) * dl_dci->n_rv);
+	  dl_dci->rv[0] = round & 3;
+	  dl_dci->rv[1] = round & 3;
+	  dl_dci->has_tpc = 1;
+	  dl_dci->tpc = tpc;
+	  dl_dci->n_mcs = 2;
+	  dl_dci->mcs = (uint32_t *) malloc(sizeof(uint32_t) * dl_dci->n_mcs);
+	  dl_dci->mcs[0] = mcs;
+	  dl_dci->mcs[1] = mcs;
+	  dl_dci->n_tbs_size = 2;
+	  dl_dci->tbs_size = (uint32_t *) malloc(sizeof(uint32_t) * dl_dci->n_tbs_size);
+	  dl_dci->tbs_size[0] = dci_tbs;
+	  dl_dci->tbs_size[1] = dci_tbs;
+	  if (flexran_get_duplex_mode(mod_id, CC_id) == PROTOCOL__FLEX_DUPLEX_MODE__FLDM_TDD) {
+	    dl_dci->has_dai = 1;
+	    dl_dci->dai = (UE_list->UE_template[CC_id][UE_id].DAI-1)&3;
+	  }
+	  break;
+	case 5:
+	  dl_dci->has_res_alloc = 1;
+	  dl_dci->res_alloc = 0;
+	  dl_dci->has_vrb_format = 1;
+	  dl_dci->vrb_format = PROTOCOL__FLEX_VRB_FORMAT__FLVRBF_LOCALIZED;
+	  dl_dci->has_format = 1;
+	  dl_dci->format = PROTOCOL__FLEX_DCI_FORMAT__FLDCIF_1D;
+	  dl_dci->has_rb_bitmap = 1;
+	  dl_dci->rb_bitmap = allocate_prbs_sub(nb_rb, rballoc_sub);
+	  dl_dci->has_rb_shift = 1;
+	  dl_dci->rb_shift = 0;
+	  dl_dci->n_ndi = 1;
+	  dl_dci->ndi[0] = ndi;
+	  dl_dci->n_rv = 1;
+	  dl_dci->rv = (uint32_t *) malloc(sizeof(uint32_t) * dl_dci->n_rv);
+	  dl_dci->rv[0] = round & 3;
+	  dl_dci->has_tpc = 1;
+	  dl_dci->tpc = tpc;
+	  dl_dci->n_mcs = 1;
+	  dl_dci->mcs = (uint32_t *) malloc(sizeof(uint32_t) * dl_dci->n_mcs);
+	  dl_dci->mcs[0] = mcs;
+	  dl_dci->n_tbs_size = 1;
+	  dl_dci->tbs_size = (uint32_t *) malloc(sizeof(uint32_t) * dl_dci->n_tbs_size);
+	  dl_dci->tbs_size[0] = dci_tbs;
+	  if (flexran_get_duplex_mode(mod_id, CC_id) == PROTOCOL__FLEX_DUPLEX_MODE__FLDM_TDD) {
+	    dl_dci->has_dai = 1;
+	    dl_dci->dai = (UE_list->UE_template[CC_id][UE_id].DAI-1)&3;
+	  }
+	  
+	  if(ue_sched_ctl->dl_pow_off[CC_id] == 2) {
 	    ue_sched_ctl->dl_pow_off[CC_id] = 1;
-	  }   
-	  nb_rbs_required_remaining[CC_id][UE_id] = nb_rbs_required_remaining[CC_id][UE_id] - min_rb_unit+1;
-          ue_sched_ctl->pre_nb_available_rbs[CC_id] = ue_sched_ctl->pre_nb_available_rbs[CC_id] + min_rb_unit - 1;
-        } else {
-	  if (nb_rbs_required_remaining[CC_id][UE_id] >=  min_rb_unit){
-	    rballoc_sub[CC_id][i] = 1;
-	    ue_sched_ctl->rballoc_sub_UE[CC_id][i] = 1;
-	    MIMO_mode_indicator[CC_id][i] = 1;
-	    if (transmission_mode == 5 ) {
-	      ue_sched_ctl->dl_pow_off[CC_id] = 1;
-	    }
-	    nb_rbs_required_remaining[CC_id][UE_id] = nb_rbs_required_remaining[CC_id][UE_id] - min_rb_unit;
-	    ue_sched_ctl->pre_nb_available_rbs[CC_id] = ue_sched_ctl->pre_nb_available_rbs[CC_id] + min_rb_unit;
 	  }
+	  
+	  dl_dci->has_dl_power_offset = 1;
+	  dl_dci->dl_power_offset = ue_sched_ctl->dl_pow_off[CC_id];
+	  dl_dci->has_precoding_info = 1;
+	  dl_dci->precoding_info = 5; // Is this right??
+	  
+	  break;
+	case 6:
+	  dl_dci->has_res_alloc = 1;
+	  dl_dci->res_alloc = 0;
+	  dl_dci->has_vrb_format = 1;
+	  dl_dci->vrb_format = PROTOCOL__FLEX_VRB_FORMAT__FLVRBF_LOCALIZED;
+	  dl_dci->has_format = 1;
+	  dl_dci->format = PROTOCOL__FLEX_DCI_FORMAT__FLDCIF_1D;
+	  dl_dci->has_rb_bitmap = 1;
+	  dl_dci->rb_bitmap = allocate_prbs_sub(nb_rb, rballoc_sub);
+	  dl_dci->has_rb_shift = 1;
+	  dl_dci->rb_shift = 0;
+	  dl_dci->n_ndi = 1;
+	  dl_dci->ndi = (uint32_t *) malloc(sizeof(uint32_t) * dl_dci->n_ndi);
+	  dl_dci->ndi[0] = ndi;
+	  dl_dci->n_rv = 1;
+	  dl_dci->rv = (uint32_t *) malloc(sizeof(uint32_t) * dl_dci->n_rv);
+	  dl_dci->rv[0] = round & 3;
+	  dl_dci->has_tpc = 1;
+	  dl_dci->tpc = tpc;
+	  dl_dci->n_mcs = 1;
+	  dl_dci->mcs = (uint32_t *) malloc(sizeof(uint32_t) * dl_dci->n_mcs);
+	  dl_dci->mcs[0] = mcs;
+	  if (flexran_get_duplex_mode(mod_id, CC_id) == PROTOCOL__FLEX_DUPLEX_MODE__FLDM_TDD) {
+	    dl_dci->has_dai = 1;
+	    dl_dci->dai = (UE_list->UE_template[CC_id][UE_id].DAI-1)&3;
+	  }
+
+	  dl_dci->has_dl_power_offset = 1;
+	  dl_dci->dl_power_offset = ue_sched_ctl->dl_pow_off[CC_id];
+	  dl_dci->has_precoding_info = 1;
+	  dl_dci->precoding_info = 5; // Is this right??
+	  break;
 	}
-      } // dl_pow_off[CC_id][UE_id] ! = 0
-    }
-  }
+      }
+      
+      if (flexran_get_duplex_mode(mod_id, CC_id) == PROTOCOL__FLEX_DUPLEX_MODE__FLDM_TDD) {
+        
+	/* TODO */
+	//set_ul_DAI(mod_id, UE_id, CC_id, frame, subframe, frame_parms);
+      }
+    } // UE_id loop
+   } // CC_id loop
+
+   // Add all the dl_data elements to the flexran message
+   int offset = (*dl_info)->dl_mac_config_msg->n_dl_ue_data;
+   (*dl_info)->dl_mac_config_msg->n_dl_ue_data += num_ues_added;
+   if ( num_ues_added > 0 ){
+     (*dl_info)->dl_mac_config_msg->dl_ue_data = (Protocol__FlexDlData **) realloc( (*dl_info)->dl_mac_config_msg->dl_ue_data,
+										    sizeof(Protocol__FlexDlData *) * ((*dl_info)->dl_mac_config_msg->n_dl_ue_data));
+     if ((*dl_info)->dl_mac_config_msg->dl_ue_data == NULL ){
+       LOG_E(MAC, "Request for memory reallocation failed\n");
+       return;
+     }
+     for (i = 0; i < num_ues_added; i++) {
+       (*dl_info)->dl_mac_config_msg->dl_ue_data[offset+i] = dl_data[i];
+     }
+   }
+      
+   stop_meas(&eNB->schedule_dlsch);
+   VCD_SIGNAL_DUMPER_DUMP_FUNCTION_BY_NAME(VCD_SIGNAL_DUMPER_FUNCTIONS_SCHEDULE_DLSCH,VCD_FUNCTION_OUT);
 }
diff --git a/openair2/UTIL/ASYNC_IF/link_manager.c b/openair2/UTIL/ASYNC_IF/link_manager.c
index 1c9fb72bfe3f79d9636fd2808799b09af85e51ab..235acef2471eb52c7c2bd6c30f25f79f8b1d93ff 100644
--- a/openair2/UTIL/ASYNC_IF/link_manager.c
+++ b/openair2/UTIL/ASYNC_IF/link_manager.c
@@ -61,9 +61,9 @@ static void *link_manager_sender_thread(void *_manager)
 
   return NULL;
 
-error:
-  LOG_E(MAC, "%s: error\n", __FUNCTION__);
-  return NULL;
+  //error:
+  //LOG_E(MAC, "%s: error\n", __FUNCTION__);
+  //return NULL;
 }
 
 /* that thread receives messages from the link and puts them in the queue */
@@ -118,7 +118,6 @@ link_manager_t *create_link_manager(
   // Make the async interface threads real-time
   //#ifndef LOWLATENCY
   struct sched_param sched_param_recv_thread;
-  struct sched_param sched_param_send_thread;
 
   sched_param_recv_thread.sched_priority = sched_get_priority_max(SCHED_RR) - 1;
   pthread_attr_setschedparam(&attr, &sched_param_recv_thread);
diff --git a/openair2/UTIL/LFDS/liblfds7.0.0/liblfds700/inc/liblfds700.h b/openair2/UTIL/LFDS/liblfds7.0.0/liblfds700/inc/liblfds700.h
index fdda50cefba67824e61c23fd2229129637d324ff..4f83d8d9a0b6ee440276a66f2adf712306221c42 100644
--- a/openair2/UTIL/LFDS/liblfds7.0.0/liblfds700/inc/liblfds700.h
+++ b/openair2/UTIL/LFDS/liblfds7.0.0/liblfds700/inc/liblfds700.h
@@ -4,9 +4,9 @@
   #define LIBLFDS700_H
 
   /***** pragmas on *****/
-  #pragma warning( disable : 4324 )                                          // TRD : 4324 disables MSVC warnings for structure alignment padding due to alignment specifiers
+//  #pragma warning( disable : 4324 )                                          // TRD : 4324 disables MSVC warnings for structure alignment padding due to alignment specifiers
 
-  #pragma prefast( disable : 28113 28182 28183, "blah" )
+//  #pragma prefast( disable : 28113 28182 28183, "blah" )
 
   /***** includes *****/
   #include "liblfds700/lfds700_porting_abstraction_layer_compiler.h"
@@ -25,7 +25,7 @@
   #include "liblfds700/lfds700_stack.h"
 
   /***** pragmas off *****/
-  #pragma warning( default : 4324 )
+//  #pragma warning( default : 4324 )
 
 #endif
 
diff --git a/openair2/UTIL/LFDS/liblfds7.0.0/liblfds700/inc/liblfds700/lfds700_misc.h b/openair2/UTIL/LFDS/liblfds7.0.0/liblfds700/inc/liblfds700/lfds700_misc.h
index 014f72dac533b8cdcf902ef69bae8adbca39a425..79eb3e7e5a7b0c2270acef279cd718fbfd567284 100644
--- a/openair2/UTIL/LFDS/liblfds7.0.0/liblfds700/inc/liblfds700/lfds700_misc.h
+++ b/openair2/UTIL/LFDS/liblfds7.0.0/liblfds700/inc/liblfds700/lfds700_misc.h
@@ -175,7 +175,7 @@ void lfds700_misc_prng_init( struct lfds700_misc_prng_state *ps );
 void lfds700_misc_query( enum lfds700_misc_query query_type, void *query_input, void *query_output );
 
 /***** public in-line functions *****/
-#pragma prefast( disable : 28112, "blah" )
+// #pragma prefast( disable : 28112, "blah" )
 
 static LFDS700_PAL_INLINE void lfds700_misc_force_store()
 {
diff --git a/openair2/UTIL/LFDS/liblfds7.0.0/liblfds700/src/lfds700_ringbuffer/lfds700_ringbuffer_cleanup.c b/openair2/UTIL/LFDS/liblfds7.0.0/liblfds700/src/lfds700_ringbuffer/lfds700_ringbuffer_cleanup.c
index 447c93041e36e8b626d2da09610b94c919efd61d..8ab44e46b5d52040062926c2a5ea93a72b3e2387 100644
--- a/openair2/UTIL/LFDS/liblfds7.0.0/liblfds700/src/lfds700_ringbuffer/lfds700_ringbuffer_cleanup.c
+++ b/openair2/UTIL/LFDS/liblfds7.0.0/liblfds700/src/lfds700_ringbuffer/lfds700_ringbuffer_cleanup.c
@@ -31,7 +31,7 @@ void lfds700_ringbuffer_cleanup( struct lfds700_ringbuffer_state *rs,
 
 
 /****************************************************************************/
-#pragma warning( disable : 4100 )
+//#pragma warning( disable : 4100 )
 
 static void lfds700_ringbuffer_internal_queue_element_cleanup_callback( struct lfds700_queue_state *qs, struct lfds700_queue_element *qe, enum lfds700_misc_flag dummy_element_flag )
 {
@@ -54,14 +54,14 @@ static void lfds700_ringbuffer_internal_queue_element_cleanup_callback( struct l
   return;
 }
 
-#pragma warning( default : 4100 )
+//#pragma warning( default : 4100 )
 
 
 
 
 
 /****************************************************************************/
-#pragma warning( disable : 4100 )
+//#pragma warning( disable : 4100 )
 
 static void lfds700_ringbuffer_internal_freelist_element_cleanup_callback( struct lfds700_freelist_state *fs, struct lfds700_freelist_element *fe )
 {
@@ -82,5 +82,5 @@ static void lfds700_ringbuffer_internal_freelist_element_cleanup_callback( struc
   return;
 }
 
-#pragma warning( default : 4100 )
+//#pragma warning( default : 4100 )