diff --git a/openair2/COMMON/platform_types.h b/openair2/COMMON/platform_types.h
index 3997d579bd309dde38f9bc7801011604c5dccc12..e5379d532d65b422321d7aeb5c44bbb2f381d8eb 100644
--- a/openair2/COMMON/platform_types.h
+++ b/openair2/COMMON/platform_types.h
@@ -69,6 +69,7 @@ typedef uint32_t              frame_t;
 typedef int32_t               sframe_t;
 typedef uint32_t              sub_frame_t;
 typedef uint8_t               module_id_t;
+typedef uint8_t               slice_id_t;
 typedef uint8_t               eNB_index_t;
 typedef uint16_t              ue_id_t;
 typedef int16_t               smodule_id_t;
diff --git a/openair2/LAYER2/MAC/defs.h b/openair2/LAYER2/MAC/defs.h
index 43d8167fe604315290e416dbd8190fbfcffae392..e27ef3648af6f02fa7a8cf189bf718db383d175b 100644
--- a/openair2/LAYER2/MAC/defs.h
+++ b/openair2/LAYER2/MAC/defs.h
@@ -800,7 +800,7 @@ typedef struct {
     ///Contention resolution timer used during random access
     uint8_t mac_ContentionResolutionTimer;
 
-    uint16_t max_allowed_rbs[MAX_NUM_LCID];
+    uint16_t max_rbs_allowed_slice[MAX_NUM_LCID][MAX_NUM_SLICES];
 
     uint8_t max_mcs[MAX_NUM_LCID];
 
diff --git a/openair2/LAYER2/MAC/eNB_scheduler.c b/openair2/LAYER2/MAC/eNB_scheduler.c
index 14715aff5a13a8f87eef758b37bd4dcc07a5a51c..432915eb8cd3588db81e318a676f9697cd2d2510 100644
--- a/openair2/LAYER2/MAC/eNB_scheduler.c
+++ b/openair2/LAYER2/MAC/eNB_scheduler.c
@@ -744,40 +744,14 @@ eNB_dlsch_ulsch_scheduler(module_id_t module_idP, frame_t frameP,
     copy_ulreq(module_idP, frameP, subframeP);
     // This schedules SRS in subframeP
     schedule_SRS(module_idP, frameP, subframeP);
-#if defined(FLEXRAN_AGENT_SB_IF)
-    if (mac_agent_registered[module_idP]){
-      agent_mac_xface[module_idP]->flexran_agent_schedule_ul_spec(module_idP,frameP,cooperation_flag,0,4, &msg);   
-    }
-    flexran_agent_mac_destroy_ul_config(msg);
-#else
     // This schedules ULSCH in subframeP (dci0)
     schedule_ulsch(module_idP, frameP, subframeP);
-#endif 
     // This schedules UCI_SR in subframeP
     schedule_SR(module_idP, frameP, subframeP);
     // This schedules UCI_CSI in subframeP
     schedule_CSI(module_idP, frameP, subframeP);
-
-#if defined(FLEXRAN_AGENT_SB_IF)
-     if (mac_agent_registered[module_idP]) {                                  
-	  agent_mac_xface[module_idP]->flexran_agent_schedule_ue_spec(
-								      module_idP,
-								      frameP,                  
-								      subframeP,
-								      mbsfn_status,
-								      &msg);
-	  
-	  flexran_apply_scheduling_decisions(module_idP,
-						frameP,
-						subframeP,
-						mbsfn_status,
-						msg);
-	  flexran_agent_mac_destroy_dl_config(msg);
-	}
-#else
     // This schedules DLSCH in subframeP
-    schedule_ue_spec(module_idP, frameP, subframeP, mbsfn_status);
-#endif
+    schedule_dlsch(module_idP, frameP, subframeP, mbsfn_status);
     
     // Allocate CCEs for good after scheduling is done
     for (CC_id = 0; CC_id < MAX_NUM_CCs; CC_id++)
diff --git a/openair2/LAYER2/MAC/eNB_scheduler_dlsch.c b/openair2/LAYER2/MAC/eNB_scheduler_dlsch.c
index 9dbbb39d0346c563e9acbf208ca114ad3bb49570..0d9f129fa640b8169b623bb0f3a985ce4bc2644b 100644
--- a/openair2/LAYER2/MAC/eNB_scheduler_dlsch.c
+++ b/openair2/LAYER2/MAC/eNB_scheduler_dlsch.c
@@ -57,6 +57,15 @@
 #include "intertask_interface.h"
 #endif
 
+#if defined FLEXRAN_AGENT_SB_IF
+#include "ENB_APP/flexran_agent_defs.h"
+#include "flexran_agent_ran_api.h"
+#include "header.pb-c.h"
+#include "flexran.pb-c.h"
+#include "flexran_agent_mac.h"
+#include <dlfcn.h>
+#endif
+
 #include "T.h"
 
 #define ENABLE_MAC_PAYLOAD_DEBUG
@@ -64,6 +73,36 @@
 
 extern RAN_CONTEXT_t RC;
 
+
+// number of active slices for  past and current time
+int n_active_slices = 1;
+int n_active_slices_current = 1;
+
+// RB share for each slice for past and current time
+float avg_slice_percentage=0.25;
+float slice_percentage[MAX_NUM_SLICES] = {1.0, 0.0, 0.0, 0.0};
+float slice_percentage_current[MAX_NUM_SLICES] = {1.0, 0.0, 0.0, 0.0};
+float total_slice_percentage = 0;
+float total_slice_percentage_current = 0;
+
+// MAX MCS for each slice for past and current time
+int slice_maxmcs[MAX_NUM_SLICES] = { 28, 28, 28, 28 };
+int slice_maxmcs_current[MAX_NUM_SLICES] = { 28, 28, 28, 28 };
+
+int update_dl_scheduler[MAX_NUM_SLICES] = { 1, 1, 1, 1 };
+int update_dl_scheduler_current[MAX_NUM_SLICES] = { 1, 1, 1, 1 };
+
+// name of available scheduler
+char *dl_scheduler_type[MAX_NUM_SLICES] =
+  { "schedule_ue_spec",
+    "schedule_ue_spec",
+    "schedule_ue_spec",
+    "schedule_ue_spec"
+  };
+
+// pointer to the slice specific scheduler
+slice_scheduler_dl slice_sched_dl[MAX_NUM_SLICES] = {0};
+
 //------------------------------------------------------------------------------
 void
 add_ue_dlsch_info(module_id_t module_idP,
@@ -408,12 +447,117 @@ set_ul_DAI(int module_idP, int UE_idP, int CC_idP, int frameP,
     }
 }
 
+//------------------------------------------------------------------------------
+void
+schedule_dlsch(module_id_ module_idP,
+	        frame_t frameP, sub_frame_t subframeP, int *mbsfn_flag)
+//------------------------------------------------------------------------------{
+{
+
+  int i=0;
+
+  total_slice_percentage=0;
+  avg_slice_percentage=1.0/n_active_slices;
+
+  // reset the slice percentage for inactive slices
+  for (i = n_active_slices; i< MAX_NUM_SLICES; i++) {
+    slice_percentage[i]=0;
+  }
+  for (i = 0; i < n_active_slices; i++) {
+    if (slice_percentage[i] < 0 ){
+      LOG_W(MAC, "[eNB %d] frame %d subframe %d:invalid slice %d percentage %f. resetting to zero",
+	    mod_id, frame, subframe, i, slice_percentage[i]);
+      slice_percentage[i]=0;
+    }
+    total_slice_percentage+=slice_percentage[i];
+  }
+
+  for (i = 0; i < n_active_slices; i++) {
+
+    // Load any updated functions
+    if (update_dl_scheduler[i] > 0 ) {
+      slice_sched_dl[i] = dlsym(NULL, dl_scheduler_type[i]);
+      update_dl_scheduler[i] = 0;
+      update_dl_scheduler_current[i] = 0;
+      LOG_N(MAC,"update dl scheduler slice %d\n", i);
+    }
+
+    if (total_slice_percentage <= 1.0){ // the new total RB share is within the range
+
+      // check if the number of slices has changed, and log
+      if (n_active_slices_current != n_active_slices ){
+	if ((n_active_slices > 0) && (n_active_slices <= MAX_NUM_SLICES)) {
+	  LOG_N(MAC,"[eNB %d]frame %d subframe %d: number of active DL slices has changed: %d-->%d\n",
+		mod_id, frame, subframe, n_active_slices_current, n_active_slices);
+
+	  n_active_slices_current = n_active_slices;
+
+	} else {
+	  LOG_W(MAC,"invalid number of DL slices %d, revert to the previous value %d\n",n_active_slices, n_active_slices_current);
+	  n_active_slices = n_active_slices_current;
+	}
+      }
+
+      // check if the slice rb share has changed, and log the console
+      if (slice_percentage_current[i] != slice_percentage[i]){ // new slice percentage
+	LOG_N(MAC,"[eNB %d][SLICE %d][DL] frame %d subframe %d: total percentage %f-->%f, slice RB percentage has changed: %f-->%f\n",
+	      mod_id, i, frame, subframe, total_slice_percentage_current, total_slice_percentage, slice_percentage_current[i], slice_percentage[i]);
+	total_slice_percentage_current= total_slice_percentage;
+	slice_percentage_current[i] = slice_percentage[i];
+
+      }
+
+      // check if the slice max MCS, and log the console
+      if (slice_maxmcs_current[i] != slice_maxmcs[i]){
+	if ((slice_maxmcs[i] >= 0) && (slice_maxmcs[i] < 29)){
+	  LOG_N(MAC,"[eNB %d][SLICE %d][DL] frame %d subframe %d: slice MAX MCS has changed: %d-->%d\n",
+		mod_id, i, frame, subframe, slice_maxmcs_current[i], slice_maxmcs[i]);
+	  slice_maxmcs_current[i] = slice_maxmcs[i];
+	} else {
+	  LOG_W(MAC,"[eNB %d][SLICE %d][DL] invalid slice max mcs %d, revert the previous value %d\n",mod_id, i, slice_maxmcs[i],slice_maxmcs_current[i]);
+	  slice_maxmcs[i]= slice_maxmcs_current[i];
+	}
+      }
+
+      // check if a new scheduler, and log the console
+      if (update_dl_scheduler_current[i] != update_dl_scheduler[i]){
+	LOG_N(MAC,"[eNB %d][SLICE %d][DL] frame %d subframe %d: DL scheduler for this slice is updated: %s \n",
+	      mod_id, i, frame, subframe, dl_scheduler_type[i]);
+	update_dl_scheduler_current[i] = update_dl_scheduler[i];
+      }
+
+    } else {
+      // here we can correct the values, e.g. reduce proportionally
+
+      if (n_active_slices == n_active_slices_current){
+	LOG_W(MAC,"[eNB %d][SLICE %d][DL] invalid total RB share (%f->%f), reduce proportionally the RB share by 0.1\n",
+	      mod_id,i,
+	      total_slice_percentage_current, total_slice_percentage);
+	if (slice_percentage[i] >= avg_slice_percentage){
+	  slice_percentage[i]-=0.1;
+	  total_slice_percentage-=0.1;
+	}
+      } else {
+	LOG_W(MAC,"[eNB %d][SLICE %d][DL] invalid total RB share (%f->%f), revert the number of slice to its previous value (%d->%d)\n",
+	      mod_id,i,
+	      total_slice_percentage_current, total_slice_percentage,
+	      n_active_slices, n_active_slices_current );
+	n_active_slices = n_active_slices_current;
+	slice_percentage[i] = slice_percentage_current[i];
+      }
+    }
+
+    // Run each enabled slice-specific schedulers one by one
+    slice_sched_dl[i](mod_id, i, frame, subframe, mbsfn_flag,dl_info);
+  }
+
+}
 
 // changes to pre-processor for eMTC
 
 //------------------------------------------------------------------------------
 void
-schedule_ue_spec(module_id_t module_idP,
+schedule_ue_spec(module_id_t module_idP,slice_id_t slice_idP,
 		 frame_t frameP, sub_frame_t subframeP, int *mbsfn_flag)
 //------------------------------------------------------------------------------
 {
@@ -539,7 +683,11 @@ schedule_ue_spec(module_id_t module_idP,
 	(VCD_SIGNAL_DUMPER_FUNCTIONS_DLSCH_PREPROCESSOR, VCD_FUNCTION_IN);
     start_meas(&eNB->schedule_dlsch_preprocessor);
     dlsch_scheduler_pre_processor(module_idP,
-				  frameP, subframeP, N_RBG, mbsfn_flag);
+				  slice_idP,
+				  frameP,
+				  subframeP,
+				  N_RBG,
+				  mbsfn_flag);
     stop_meas(&eNB->schedule_dlsch_preprocessor);
     VCD_SIGNAL_DUMPER_DUMP_FUNCTION_BY_NAME
 	(VCD_SIGNAL_DUMPER_FUNCTIONS_DLSCH_PREPROCESSOR, VCD_FUNCTION_OUT);
@@ -570,6 +718,8 @@ schedule_ue_spec(module_id_t module_idP,
 		LOG_D(MAC, "[eNB] Cannot find eNB_UE_stats\n");
 		continue_flag = 1;
 	    }
+	    if (flexran_slice_member(UE_id, slice_idP) == 0)
+			continue;
 
 	    if (continue_flag != 1) {
 		switch (get_tmode(module_idP, CC_id, UE_id)) {
@@ -656,7 +806,7 @@ schedule_ue_spec(module_id_t module_idP,
 	     */
 	    eNB_UE_stats->dlsch_mcs1 =
 		cqi_to_mcs[ue_sched_ctl->dl_cqi[CC_id]];
-	    eNB_UE_stats->dlsch_mcs1 = eNB_UE_stats->dlsch_mcs1;	//cmin(eNB_UE_stats->dlsch_mcs1, openair_daq_vars.target_ue_dl_mcs);
+	    eNB_UE_stats->dlsch_mcs1 =cmin(eNB_UE_stats->dlsch_mcs1, flexran_slice_maxmcs(slice_idP));//openair_daq_vars.target_ue_dl_mcs);
 
 
 	    // store stats
diff --git a/openair2/LAYER2/MAC/eNB_scheduler_primitives.c b/openair2/LAYER2/MAC/eNB_scheduler_primitives.c
index e5f608d74cbfb49da30c49a1febede3b8e1e0c76..4a316cf92c14d8d130dc22055e4dddb798a0834b 100644
--- a/openair2/LAYER2/MAC/eNB_scheduler_primitives.c
+++ b/openair2/LAYER2/MAC/eNB_scheduler_primitives.c
@@ -4472,3 +4472,28 @@ harq_indication(module_id_t mod_idP, int CC_idP, frame_t frameP,
 	sched_ctl->pucch1_cqi_update[CC_idP] = 1;
     }
 }
+
+// Flexran Slicing functions
+
+uint16_t flexran_nb_rbs_allowed_slice(float rb_percentage, int total_rbs)
+{
+    return (uint16_t) floor(rb_percentage * total_rbs);
+}
+
+int flexran_slice_maxmcs(int slice_id)
+{
+    return slice_maxmcs[slice_id];
+}
+
+int flexran_slice_member(int UE_id, int slice_id)
+{
+
+  if ((slice_id < 0) || (slice_id > n_active_slices))
+    LOG_W(MAC, "out of range slice id %d\n", slice_id);
+
+
+  if ((UE_id % n_active_slices) == slice_id) {
+    return 1;	// this ue is a member of this slice
+  }
+  return 0;
+}
diff --git a/openair2/LAYER2/MAC/eNB_scheduler_ulsch.c b/openair2/LAYER2/MAC/eNB_scheduler_ulsch.c
index 035e4c5fa307c316721f8f1c307ac74c9ad65dd3..b5256820bb9757df5e9f2ad185262c6a6c3d5079 100644
--- a/openair2/LAYER2/MAC/eNB_scheduler_ulsch.c
+++ b/openair2/LAYER2/MAC/eNB_scheduler_ulsch.c
@@ -57,6 +57,15 @@
 #include "intertask_interface.h"
 #endif
 
+#if defined FLEXRAN_AGENT_SB_IF
+#include "ENB_APP/flexran_agent_defs.h"
+#include "flexran_agent_ran_api.h"
+#include "header.pb-c.h"
+#include "flexran.pb-c.h"
+#include "flexran_agent_mac.h"
+#include <dlfcn.h>
+#endif
+
 #include "T.h"
 
 #define ENABLE_MAC_PAYLOAD_DEBUG
@@ -68,6 +77,37 @@ uint8_t rb_table[34] =
     36, 40, 45, 48, 50, 54, 60, 64, 72, 75, 80, 81, 90, 96, 100
 };
 
+/* number of active slices for  past and current time*/
+int n_active_slices_uplink = 1;
+int n_active_slices_current_uplink = 1;
+
+/* RB share for each slice for past and current time*/
+float avg_slice_percentage_uplink=0.25;
+float slice_percentage_uplink[MAX_NUM_SLICES] = {1.0, 0.0, 0.0, 0.0};
+float slice_percentage_current_uplink[MAX_NUM_SLICES] = {1.0, 0.0, 0.0, 0.0};
+float total_slice_percentage_uplink = 0;
+float total_slice_percentage_current_uplink = 0;
+
+// MAX MCS for each slice for past and current time
+int slice_maxmcs_uplink[MAX_NUM_SLICES] = {16, 16, 16, 16};
+int slice_maxmcs_current_uplink[MAX_NUM_SLICES] = {16,16,16,16};
+
+/*resource blocks allowed*/
+uint16_t         nb_rbs_allowed_slice_uplink[MAX_NUM_CCs][MAX_NUM_SLICES];
+/*Slice Update */
+int update_ul_scheduler[MAX_NUM_SLICES] = {1, 1, 1, 1};
+int update_ul_scheduler_current[MAX_NUM_SLICES] = {1, 1, 1, 1};
+
+/* name of available scheduler*/
+char *ul_scheduler_type[MAX_NUM_SLICES] = {"schedule_ulsch_rnti",
+					   "schedule_ulsch_rnti",
+					   "schedule_ulsch_rnti",
+					   "schedule_ulsch_rnti"
+};
+
+/* Slice Function Pointer */
+slice_scheduler_ul slice_sched_ul[MAX_NUM_SLICES] = {0};
+
 void
 rx_sdu(const module_id_t enb_mod_idP,
        const int CC_idP,
@@ -921,7 +961,6 @@ set_msg3_subframe(module_id_t Mod_id,
     }
 }
 
-
 void
 schedule_ulsch(module_id_t module_idP, frame_t frameP,
 	       sub_frame_t subframeP)
@@ -1035,14 +1074,117 @@ schedule_ulsch(module_id_t module_idP, frame_t frameP,
 	}
     }
 
-    schedule_ulsch_rnti(module_idP, frameP, subframeP, sched_subframe,
-			first_rb);
+	// perform slice-specifc operations
+
+    total_slice_percentage_uplink=0;
+	avg_slice_percentage_uplink=1.0/n_active_slices_uplink;
+
+  // reset the slice percentage for inactive slices
+	for (i = n_active_slices_uplink; i< MAX_NUM_SLICES; i++) {
+		slice_percentage_uplink[i]=0;
+	}
+	for (i = 0; i < n_active_slices_uplink; i++) {
+		if (slice_percentage_uplink[i] < 0 ){
+			LOG_W(MAC, "[eNB %d] frame %d subframe %d:invalid slice %d percentage %f. resetting to zero",
+				  mod_id, frame, subframe, i, slice_percentage_uplink[i]);
+			slice_percentage_uplink[i]=0;
+		}
+		total_slice_percentage_uplink+=slice_percentage_uplink[i];
+	}
+
+	for (i = 0; i < n_active_slices_uplink; i++) {
+
+		// Load any updated functions
+		if (update_ul_scheduler[i] > 0 ) {
+			slice_sched_ul[i] = dlsym(NULL, ul_scheduler_type[i]);
+			update_ul_scheduler[i] = 0;
+			update_ul_scheduler_current[i] = 0;
+			//slice_percentage_current_uplink[i]= slice_percentage_uplink[i];
+			//total_slice_percentage_current_uplink+=slice_percentage_uplink[i];
+			//if (total_slice_percentage_current_uplink> 1)
+			//	total_slice_percentage_current_uplink=1;
+			LOG_N(MAC,"update ul scheduler slice %d\n", i);
+		}
+		// the new total RB share is within the range
+		if (total_slice_percentage_uplink <= 1.0){
+
+			// check if the number of slices has changed, and log
+			if (n_active_slices_current_uplink != n_active_slices_uplink ){
+				if ((n_active_slices_uplink > 0) && (n_active_slices_uplink <= MAX_NUM_SLICES)) {
+					LOG_N(MAC,"[eNB %d]frame %d subframe %d: number of active UL slices has changed: %d-->%d\n",
+						  mod_id, frame, subframe, n_active_slices_current_uplink, n_active_slices_uplink);
+
+					n_active_slices_current_uplink = n_active_slices_uplink;
+
+				} else {
+					LOG_W(MAC,"invalid number of UL slices %d, revert to the previous value %d\n",n_active_slices_uplink, n_active_slices_current_uplink);
+					n_active_slices_uplink = n_active_slices_current_uplink;
+				}
+			}
+
+			// check if the slice rb share has changed, and log the console
+			if (slice_percentage_current_uplink[i] != slice_percentage_uplink[i]){
+				LOG_N(MAC,"[eNB %d][SLICE %d][UL] frame %d subframe %d: total percentage %f-->%f, slice RB percentage has changed: %f-->%f\n",
+					  mod_id, i, frame, subframe, total_slice_percentage_current_uplink, total_slice_percentage_uplink, slice_percentage_current_uplink[i], slice_percentage_uplink[i]);
+				total_slice_percentage_current_uplink= total_slice_percentage_uplink;
+				slice_percentage_current_uplink[i] = slice_percentage_uplink[i];
+
+			}
+
+			// check if the slice max MCS, and log the console
+			if (slice_maxmcs_current_uplink[i] != slice_maxmcs_uplink[i]){
+				if ((slice_maxmcs_uplink[i] >= 0) && (slice_maxmcs_uplink[i] <= 16)){
+					LOG_N(MAC,"[eNB %d][SLICE %d][UL] frame %d subframe %d: slice MAX MCS has changed: %d-->%d\n",
+						  mod_id, i, frame, subframe, slice_maxmcs_current_uplink[i], slice_maxmcs_uplink[i]);
+					slice_maxmcs_current_uplink[i] = slice_maxmcs_uplink[i];
+				} else {
+					LOG_W(MAC,"[eNB %d][SLICE %d][UL] invalid slice max mcs %d, revert the previous value %d\n",mod_id, i, slice_maxmcs_uplink[i],slice_maxmcs_current_uplink[i]);
+					slice_maxmcs_uplink[i]= slice_maxmcs_current_uplink[i];
+
+				}
+			}
+
+			// check if a new scheduler, and log the console
+			if (update_ul_scheduler_current[i] != update_ul_scheduler[i]){
+				LOG_N(MAC,"[eNB %d][SLICE %d][UL] frame %d subframe %d: UL scheduler for this slice is updated: %s \n",
+					  mod_id, i, frame, subframe, ul_scheduler_type[i]);
+
+				update_ul_scheduler_current[i] = update_ul_scheduler[i];
+			}
+		}
+		else {
+
+			if (n_active_slices_uplink == n_active_slices_current_uplink){
+				LOG_W(MAC,"[eNB %d][SLICE %d][UL] invalid total RB share (%f->%f), reduce proportionally the RB share by 0.1\n",
+					  mod_id,i,
+					  total_slice_percentage_current_uplink, total_slice_percentage_uplink);
+				if (slice_percentage_uplink[i] > avg_slice_percentage_uplink){
+					slice_percentage_uplink[i]-=0.1;
+					total_slice_percentage_uplink-=0.1;
+				}
+			} else {
+				// here we can correct the values, e.g. reduce proportionally
+				LOG_W(MAC,"[eNB %d][SLICE %d][UL] invalid total RB share (%f->%f), revert the  number of slice to its previous value (%d->%d)\n",
+					  mod_id,i,
+					  total_slice_percentage_current_uplink, total_slice_percentage_uplink,
+					  n_active_slices_uplink, n_active_slices_current_uplink);
+				n_active_slices_uplink = n_active_slices_current_uplink;
+				slice_percentage_uplink[i] = slice_percentage_current_uplink[i];
+			}
+		}
+
+		// Run each enabled slice-specific schedulers one by one
+		slice_sched_ul[i](module_idP, i,frameP, subframeP, sched_subframe,
+						  first_rb);
+	}
+
 
     stop_meas(&mac->schedule_ulsch);
 }
 
 void
 schedule_ulsch_rnti(module_id_t module_idP,
+					slice_id_t slice_id,
 		    frame_t frameP,
 		    sub_frame_t subframeP,
 		    unsigned char sched_subframeP, uint16_t * first_rb)
@@ -1081,7 +1223,7 @@ schedule_ulsch_rnti(module_id_t module_idP,
 	&mac->UL_req_tmp[CC_id][sched_subframeP].ul_config_request_body;
 
     LOG_D(MAC, "entering ulsch preprocesor\n");
-    ulsch_scheduler_pre_processor(module_idP, frameP, subframeP, first_rb);
+    ulsch_scheduler_pre_processor(module_idP, slice_id, frameP, subframeP, first_rb);
 
     LOG_D(MAC, "exiting ulsch preprocesor\n");
 
@@ -1115,7 +1257,6 @@ schedule_ulsch_rnti(module_id_t module_idP,
 	   for (n=0; n<UE_list->numactiveULCCs[UE_id]; n++) {
 	   CC_id = UE_list->ordered_ULCCids[n][UE_id];
 
-<<<<<<< HEAD
 	   if (mac_xface->get_eNB_UE_stats(module_idP,CC_id,rnti) == NULL) {
 	   LOG_W(MAC,"[eNB %d] frame %d subframe %d, UE %d/%x CC %d: no PHY context\n", module_idP,frameP,subframeP,UE_id,rnti,CC_id);
 	   drop_ue = 1;
@@ -1142,111 +1283,6 @@ schedule_ulsch_rnti(module_id_t module_idP,
 				       subframeP, rnti);
 		UE_list->UE_sched_ctrl[UE_id].ul_failure_timer = 0;
 		UE_list->UE_sched_ctrl[UE_id].ul_out_of_sync = 1;
-=======
-    // loop over all active UL CC_ids for this UE
-    for (n=0; n<UE_list->numactiveULCCs[UE_id]; n++) {
-      // This is the actual CC_id in the list
-      CC_id = UE_list->ordered_ULCCids[n][UE_id];
-      frame_parms = mac_xface->get_lte_frame_parms(module_idP,CC_id);
-      eNB_UE_stats = mac_xface->get_eNB_UE_stats(module_idP,CC_id,rnti);
-
-      aggregation=get_aggregation(get_bw_index(module_idP,CC_id), 
-				  eNB_UE_stats->DL_cqi[0],
-				  format0);
-      
-      if (CCE_allocation_infeasible(module_idP,CC_id,0,subframeP,aggregation,rnti)) {
-        LOG_W(MAC,"[eNB %d] frame %d subframe %d, UE %d/%x CC %d: not enough nCCE\n", module_idP,frameP,subframeP,UE_id,rnti,CC_id);
-        continue; // break;
-      } else{
-	LOG_D(MAC,"[eNB %d] frame %d subframe %d, UE %d/%x CC %d mode %s: aggregation level %d\n", 
-	      module_idP,frameP,subframeP,UE_id,rnti,CC_id, mode_string[eNB_UE_stats->mode], 1<<aggregation);
-      }
-
-
-      if (eNB_UE_stats->mode == PUSCH) { // ue has a ulsch channel
-
-        DCI_pdu = &eNB->common_channels[CC_id].DCI_pdu;
-        UE_template   = &UE_list->UE_template[CC_id][UE_id];
-        UE_sched_ctrl = &UE_list->UE_sched_ctrl[UE_id];
-
-        if (mac_xface->get_ue_active_harq_pid(module_idP,CC_id,rnti,frameP,subframeP,&harq_pid,&round,openair_harq_UL) == -1 ) {
-          LOG_W(MAC,"[eNB %d] Scheduler Frame %d, subframeP %d: candidate harq_pid from PHY for UE %d CC %d RNTI %x\n",
-                module_idP,frameP,subframeP, UE_id, CC_id, rnti);
-          continue;
-        } else
-          LOG_T(MAC,"[eNB %d] Frame %d, subframeP %d, UE %d CC %d : got harq pid %d  round %d (rnti %x,mode %s)\n",
-                module_idP,frameP,subframeP,UE_id,CC_id, harq_pid, round,rnti,mode_string[eNB_UE_stats->mode]);
-
-	PHY_vars_eNB_g[module_idP][CC_id]->pusch_stats_BO[UE_id][(frameP*10)+subframeP] = UE_template->ul_total_buffer;
-	VCD_SIGNAL_DUMPER_DUMP_VARIABLE_BY_NAME(VCD_SIGNAL_DUMPER_VARIABLES_UE0_BO,PHY_vars_eNB_g[module_idP][CC_id]->pusch_stats_BO[UE_id][(frameP*10)+subframeP]);	
-        if (((UE_is_to_be_scheduled(module_idP,CC_id,UE_id)>0)) || (round>0))// || ((frameP%10)==0))
-          // if there is information on bsr of DCCH, DTCH or if there is UL_SR, or if there is a packet to retransmit, or we want to schedule a periodic feedback every 10 frames
-        {
-	  LOG_D(MAC,"[eNB %d][PUSCH] Frame %d subframe %d Scheduling UE %d/%x in round %d(SR %d,UL_inactivity timer %d,UL_failure timer %d)\n",
-		module_idP,frameP,subframeP,UE_id,rnti,round,UE_template->ul_SR,
-		UE_sched_ctrl->ul_inactivity_timer,
-		UE_sched_ctrl->ul_failure_timer);
-          // reset the scheduling request
-          UE_template->ul_SR = 0;
-          status = mac_eNB_get_rrc_status(module_idP,rnti);
-	  if (status < RRC_CONNECTED)
-	    cqi_req = 0;
-	  else if (UE_sched_ctrl->cqi_req_timer>30) {
-	    cqi_req = 1;
-	    UE_sched_ctrl->cqi_req_timer=0;
-	  }
-	  else
-	    cqi_req = 0;
-
-          //power control
-          //compute the expected ULSCH RX power (for the stats)
-
-          // this is the normalized RX power and this should be constant (regardless of mcs
-          normalized_rx_power = eNB_UE_stats->UL_rssi[0];
-          target_rx_power = mac_xface->get_target_pusch_rx_power(module_idP,CC_id);
-
-          // this assumes accumulated tpc
-	  // make sure that we are only sending a tpc update once a frame, otherwise the control loop will freak out
-	  int32_t framex10psubframe = UE_template->pusch_tpc_tx_frame*10+UE_template->pusch_tpc_tx_subframe;
-          if (((framex10psubframe+10)<=(frameP*10+subframeP)) || //normal case
-	      ((framex10psubframe>(frameP*10+subframeP)) && (((10240-framex10psubframe+frameP*10+subframeP)>=10)))) //frame wrap-around
-	    {
-	    UE_template->pusch_tpc_tx_frame=frameP;
-	    UE_template->pusch_tpc_tx_subframe=subframeP;
-            if (normalized_rx_power>(target_rx_power+1)) {
-              tpc = 0; //-1
-              tpc_accumulated--;
-            } else if (normalized_rx_power<(target_rx_power-1)) {
-              tpc = 2; //+1
-              tpc_accumulated++;
-            } else {
-              tpc = 1; //0
-            }
-          } else {
-            tpc = 1; //0
-          }
-
-	  if (tpc!=1) {
-	    LOG_D(MAC,"[eNB %d] ULSCH scheduler: frame %d, subframe %d, harq_pid %d, tpc %d, accumulated %d, normalized/target rx power %d/%d\n",
-		  module_idP,frameP,subframeP,harq_pid,tpc,
-		  tpc_accumulated,normalized_rx_power,target_rx_power);
-	  }
-
-          // new transmission
-          if (round==0) {
-
-            ndi = 1-UE_template->oldNDI_UL[harq_pid];
-            UE_template->oldNDI_UL[harq_pid]=ndi;
-	    UE_list->eNB_UE_stats[CC_id][UE_id].normalized_rx_power=normalized_rx_power;
-	    UE_list->eNB_UE_stats[CC_id][UE_id].target_rx_power=target_rx_power;
-	    UE_list->eNB_UE_stats[CC_id][UE_id].ulsch_mcs1=UE_template->pre_assigned_mcs_ul;
-            mcs = UE_template->pre_assigned_mcs_ul;//cmin (UE_template->pre_assigned_mcs_ul, openair_daq_vars.target_ue_ul_mcs); // adjust, based on user-defined MCS
-            if (UE_template->pre_allocated_rb_table_index_ul >=0) {
-              rb_table_index=UE_template->pre_allocated_rb_table_index_ul;
-            } else {
-	      mcs=10;//cmin (10, openair_daq_vars.target_ue_ul_mcs);
-              rb_table_index=13; // for PHR
->>>>>>> feature-68-enb-agent
 	    }
 	    continue;
 	}
diff --git a/openair2/LAYER2/MAC/flexran_agent_scheduler_dlsch_ue.c b/openair2/LAYER2/MAC/flexran_agent_scheduler_dlsch_ue.c
index a988862542da31db73f8c600330aa36dffa0ff50..6445fa08456215f3015dc41213b4cd30904c861a 100644
--- a/openair2/LAYER2/MAC/flexran_agent_scheduler_dlsch_ue.c
+++ b/openair2/LAYER2/MAC/flexran_agent_scheduler_dlsch_ue.c
@@ -578,7 +578,7 @@ _dlsch_scheduler_pre_processor(module_id_t Mod_id,
       if (!phy_stats_exist(Mod_id, rnti))
 	continue;
       
-      for (ii=0; ii < UE_num_active_CC(UE_list,UE_id); ii++) {
+    for (ii=0; ii < UE_num_active_CC(UE_list,UE_id); ii++) {
 	CC_id = UE_list->ordered_CCids[ii][UE_id];
 	ue_sched_ctl = &UE_list->UE_sched_ctrl[UE_id];
 	ue_sched_ctl->max_allowed_rbs[CC_id]=nb_rbs_allowed_slice[CC_id][slice_id];
diff --git a/openair2/LAYER2/MAC/pre_processor.c b/openair2/LAYER2/MAC/pre_processor.c
index 23d3f3bd603770684c0b2c4364d5f59e609e9668..a0cc0dd412fd77fbe3d024aace690e2cca3d54c2 100644
--- a/openair2/LAYER2/MAC/pre_processor.c
+++ b/openair2/LAYER2/MAC/pre_processor.c
@@ -96,7 +96,7 @@ int phy_stats_exist(module_id_t Mod_id, int rnti)
 
 // This function stores the downlink buffer for all the logical channels
 void
-store_dlsch_buffer(module_id_t Mod_id, frame_t frameP,
+store_dlsch_buffer(module_id_t Mod_id, slice_id_t slice_id, frame_t frameP,
 		   sub_frame_t subframeP)
 {
 
@@ -110,6 +110,9 @@ store_dlsch_buffer(module_id_t Mod_id, frame_t frameP,
 	if (UE_list->active[UE_id] != TRUE)
 	    continue;
 
+	if (flexran_slice_member(UE_id, slice_id) == 0)
+	    continue;
+
 	UE_template =
 	    &UE_list->UE_template[UE_PCCID(Mod_id, UE_id)][UE_id];
 
@@ -153,8 +156,8 @@ store_dlsch_buffer(module_id_t Mod_id, frame_t frameP,
 	     */
 	    if (UE_template->dl_buffer_info[i] > 0)
 		LOG_D(MAC,
-		      "[eNB %d] Frame %d Subframe %d : RLC status for UE %d in LCID%d: total of %d pdus and size %d, head sdu queuing time %d, remaining size %d, is segmeneted %d \n",
-		      Mod_id, frameP, subframeP, UE_id,
+		      "[eNB %d][SLICE %d] Frame %d Subframe %d : RLC status for UE %d in LCID%d: total of %d pdus and size %d, head sdu queuing time %d, remaining size %d, is segmeneted %d \n",
+		      Mod_id, slice_id, frameP, subframeP, UE_id,
 		      i, UE_template->dl_pdus_in_buffer[i],
 		      UE_template->dl_buffer_info[i],
 		      UE_template->dl_buffer_head_sdu_creation_time[i],
@@ -182,6 +185,7 @@ store_dlsch_buffer(module_id_t Mod_id, frame_t frameP,
 // This function returns the estimated number of RBs required by each UE for downlink scheduling
 void
 assign_rbs_required(module_id_t Mod_id,
+			slice_id_t slice_id,
 		    frame_t frameP,
 		    sub_frame_t subframe,
 		    uint16_t
@@ -200,7 +204,8 @@ assign_rbs_required(module_id_t Mod_id,
     for (UE_id = 0; UE_id < NUMBER_OF_UE_MAX; UE_id++) {
 	if (UE_list->active[UE_id] != TRUE)
 	    continue;
-
+	if (flexran_slice_member(UE_id, slice_id) == 0)
+	    continue;
 	pCCid = UE_PCCID(Mod_id, UE_id);
 
 	//update CQI information across component carriers
@@ -262,16 +267,18 @@ assign_rbs_required(module_id_t Mod_id,
 		    to_prb(RC.mac[Mod_id]->common_channels[CC_id].
 			   mib->message.dl_Bandwidth);
 
+		UE_list->ue_sched_ctl.max_allowed_rbs[CC_id][slice_id]= flexran_nb_rbs_allowed_slice(slice_percentage[slice_id],N_RB_DL);
+
 		/* calculating required number of RBs for each UE */
 		while (TBS <
 		       UE_list->UE_template[pCCid][UE_id].
 		       dl_buffer_total) {
 		    nb_rbs_required[CC_id][UE_id] += min_rb_unit[CC_id];
 
-		    if (nb_rbs_required[CC_id][UE_id] > N_RB_DL) {
+		    if (nb_rbs_required[CC_id][UE_id] > UE_list->ue_sched_ctl.max_allowed_rbs[CC_id][slice_id]) {
 			TBS =
-			    get_TBS_DL(eNB_UE_stats->dlsch_mcs1, N_RB_DL);
-			nb_rbs_required[CC_id][UE_id] = N_RB_DL;
+			    get_TBS_DL(eNB_UE_stats->dlsch_mcs1, UE_list->ue_sched_ctl.max_allowed_rbs[CC_id][slice_id]);
+			nb_rbs_required[CC_id][UE_id] = UE_list->ue_sched_ctl.max_allowed_rbs[CC_id][slice_id];
 			break;
 		    }
 
@@ -556,13 +563,14 @@ void sort_UEs(module_id_t Mod_idP, int frameP, sub_frame_t subframeP)
 // This function assigns pre-available RBS to each UE in specified sub-bands before scheduling is done
 void
 dlsch_scheduler_pre_processor(module_id_t Mod_id,
+				  slice_id_t slice_id,
 			      frame_t frameP,
 			      sub_frame_t subframeP,
 			      int N_RBG[MAX_NUM_CCs], int *mbsfn_flag)
 {
 
     unsigned char rballoc_sub[MAX_NUM_CCs][N_RBG_MAX], harq_pid =
-	0, round = 0, total_ue_count;
+		0, round = 0, total_ue_count[MAX_NUM_CCs], total_rbs_used[MAX_NUM_CCs];
     unsigned char MIMO_mode_indicator[MAX_NUM_CCs][N_RBG_MAX];
     int UE_id, i;
     uint16_t ii, j;
@@ -619,6 +627,8 @@ dlsch_scheduler_pre_processor(module_id_t Mod_id,
 						N_RBG[CC_id],
 						nb_rbs_required,
 						nb_rbs_required_remaining,
+						total_ue_count,
+						total_rbs_used,
 						rballoc_sub,
 						MIMO_mode_indicator);
 
@@ -627,12 +637,12 @@ dlsch_scheduler_pre_processor(module_id_t Mod_id,
 
 
     // Store the DLSCH buffer for each logical channel
-    store_dlsch_buffer(Mod_id, frameP, subframeP);
+    store_dlsch_buffer(Mod_id, slice_id,frameP, subframeP);
 
 
 
     // Calculate the number of RBs required by each UE on the basis of logical channel's buffer
-    assign_rbs_required(Mod_id, frameP, subframeP, nb_rbs_required,
+    assign_rbs_required(Mod_id, slice_id, frameP, subframeP, nb_rbs_required,
 			min_rb_unit);
 
 
@@ -641,64 +651,88 @@ dlsch_scheduler_pre_processor(module_id_t Mod_id,
     sort_UEs(Mod_id, frameP, subframeP);
 
 
-
-    total_ue_count = 0;
-
     // loop over all active UEs
     for (i = UE_list->head; i >= 0; i = UE_list->next[i]) {
-	rnti = UE_RNTI(Mod_id, i);
+		rnti = UE_RNTI(Mod_id, i);
 
-	if (rnti == NOT_A_RNTI)
-	    continue;
-	if (UE_list->UE_sched_ctrl[i].ul_out_of_sync == 1)
-	    continue;
-	UE_id = i;
+		if (rnti == NOT_A_RNTI)
+			continue;
+		if (UE_list->UE_sched_ctrl[i].ul_out_of_sync == 1)
+			continue;
+		UE_id = i;
+		if (flexran_slice_member(UE_id, slice_id) == 0)
+			continue;
 
-	for (ii = 0; ii < UE_num_active_CC(UE_list, UE_id); ii++) {
-	    CC_id = UE_list->ordered_CCids[ii][UE_id];
-	    ue_sched_ctl = &UE_list->UE_sched_ctrl[UE_id];
-	    cc = &RC.mac[Mod_id]->common_channels[ii];
-	    if (cc->tdd_Config)
-		harq_pid = ((frameP * 10) + subframeP) % 10;
-	    else
-		harq_pid = ((frameP * 10) + subframeP) & 7;
-	    round = ue_sched_ctl->round[CC_id][harq_pid];
+		for (ii = 0; ii < UE_num_active_CC(UE_list, UE_id); ii++) {
+			CC_id = UE_list->ordered_CCids[ii][UE_id];
+			ue_sched_ctl = &UE_list->UE_sched_ctrl[UE_id];
+			cc = &RC.mac[Mod_id]->common_channels[ii];
+			if (cc->tdd_Config)
+				harq_pid = ((frameP * 10) + subframeP) % 10;
+			else
+				harq_pid = ((frameP * 10) + subframeP) & 7;
+			round = ue_sched_ctl->round[CC_id][harq_pid];
+
+			average_rbs_per_user[CC_id] = 0;
+
+
+			if (round != 8) {
+				nb_rbs_required[CC_id][UE_id] =
+					UE_list->UE_template[CC_id][UE_id].nb_rb[harq_pid];
+				total_rbs_used[CC_id]+=nb_rbs_required[CC_id][UE_id];
+			}
 
-	    average_rbs_per_user[CC_id] = 0;
+			//nb_rbs_required_remaining[UE_id] = nb_rbs_required[UE_id];
+			if (nb_rbs_required[CC_id][UE_id] > 0) {
+				total_ue_count[CC_id] = total_ue_count[CC_id] + 1;
+			}
 
+		}
+    }
 
-	    if (round != 8) {
-		nb_rbs_required[CC_id][UE_id] =
-		    UE_list->UE_template[CC_id][UE_id].nb_rb[harq_pid];
-	    }
-	    //nb_rbs_required_remaining[UE_id] = nb_rbs_required[UE_id];
-	    if (nb_rbs_required[CC_id][UE_id] > 0) {
-		total_ue_count = total_ue_count + 1;
-	    }
-	    // hypothetical assignment
-	    /*
-	     * If schedule is enabled and if the priority of the UEs is modified
-	     * The average rbs per logical channel per user will depend on the level of
-	     * priority. Concerning the hypothetical assignement, we should assign more
-	     * rbs to prioritized users. Maybe, we can do a mapping between the
-	     * average rbs per user and the level of priority or multiply the average rbs
-	     * per user by a coefficient which represents the degree of priority.
-	     */
+ // loop over all active UEs and calculate avg rb per user based on total active UEs
+    for (i = UE_list->head; i >= 0; i = UE_list->next[i]) {
+		rnti = UE_RNTI(Mod_id, i);
 
-	    N_RB_DL =
-		to_prb(RC.mac[Mod_id]->common_channels[CC_id].mib->
-		       message.dl_Bandwidth);
+		if (rnti == NOT_A_RNTI)
+			continue;
+		if (UE_list->UE_sched_ctrl[i].ul_out_of_sync == 1)
+			continue;
+		UE_id = i;
+		if (flexran_slice_member(UE_id, slice_id) == 0)
+			continue;
 
-	    if (total_ue_count == 0) {
-		average_rbs_per_user[CC_id] = 0;
-	    } else if ((min_rb_unit[CC_id] * total_ue_count) <= (N_RB_DL)) {
-		average_rbs_per_user[CC_id] =
-		    (uint16_t) floor(N_RB_DL / total_ue_count);
-	    } else {
-		average_rbs_per_user[CC_id] = min_rb_unit[CC_id];	// consider the total number of use that can be scheduled UE
-	    }
+		for (ii = 0; ii < UE_num_active_CC(UE_list, UE_id); ii++) {
+			CC_id = UE_list->ordered_CCids[ii][UE_id];
+
+			// hypothetical assignment
+			/*
+			 * If schedule is enabled and if the priority of the UEs is modified
+			 * The average rbs per logical channel per user will depend on the level of
+			 * priority. Concerning the hypothetical assignement, we should assign more
+			 * rbs to prioritized users. Maybe, we can do a mapping between the
+			 * average rbs per user and the level of priority or multiply the average rbs
+			 * per user by a coefficient which represents the degree of priority.
+			 */
+
+
+			N_RB_DL =
+				to_prb(RC.mac[Mod_id]->common_channels[CC_id].mib->
+					   message.dl_Bandwidth) - total_rbs_used[CC_id];
+
+			//recalcualte based on the what is left after retransmission
+			ue_sched_ctl.max_allowed_rbs[CC_id][slice_id]= flexran_nb_rbs_allowed_slice(slice_percentage[slice_id],N_RB_DL);
+
+			if (total_ue_count[CC_id] == 0) {
+				average_rbs_per_user[CC_id] = 0;
+			} else if ((min_rb_unit[CC_id] * total_ue_count[CC_id]) <= (ue_sched_ctl.max_allowed_rbs[CC_id][slice_id])) {
+				average_rbs_per_user[CC_id] =
+					(uint16_t) floor(ue_sched_ctl.max_allowed_rbs[CC_id][slice_id] / total_ue_count[CC_id]);
+			} else {
+				average_rbs_per_user[CC_id] = min_rb_unit[CC_id];	// consider the total number of use that can be scheduled UE
+			}
+		}
 	}
-    }
 
     // note: nb_rbs_required is assigned according to total_buffer_dl
     // extend nb_rbs_required to capture per LCID RB required
@@ -709,6 +743,8 @@ dlsch_scheduler_pre_processor(module_id_t Mod_id,
 	    continue;
 	if (UE_list->UE_sched_ctrl[i].ul_out_of_sync == 1)
 	    continue;
+	if (flexran_slice_member(i, slice_id) == 0)
+		continue;
 
 	for (ii = 0; ii < UE_num_active_CC(UE_list, i); ii++) {
 	    CC_id = UE_list->ordered_CCids[ii][i];
@@ -765,11 +801,14 @@ dlsch_scheduler_pre_processor(module_id_t Mod_id,
 	    }
 	}
 
-	if (total_ue_count > 0) {
-	    for (i = UE_list->head; i >= 0; i = UE_list->next[i]) {
+
+	for (i = UE_list->head; i >= 0; i = UE_list->next[i]) {
 		UE_id = i;
 
 		for (ii = 0; ii < UE_num_active_CC(UE_list, UE_id); ii++) {
+
+			// if there are UEs with traffic
+			if (total_ue_count [CC_id] > 0) {
 		    CC_id = UE_list->ordered_CCids[ii][UE_id];
 		    ue_sched_ctl = &UE_list->UE_sched_ctrl[UE_id];
 		    round = ue_sched_ctl->round[CC_id][harq_pid];
@@ -969,9 +1008,9 @@ dlsch_scheduler_pre_processor(module_id_t Mod_id,
 			}
 		    }
 #endif
-		}
+		}	// total_ue_count
 	    }
-	}			// total_ue_count
+	}
     }				// end of for for r1 and r2
 
 #ifdef TM5
@@ -1044,7 +1083,7 @@ dlsch_scheduler_pre_processor(module_id_t Mod_id,
 		}
 
 		//PHY_vars_eNB_g[Mod_id]->mu_mimo_mode[UE_id].pre_nb_available_rbs = pre_nb_available_rbs[CC_id][UE_id];
-		LOG_D(MAC, "Total RBs allocated for UE%d = %d\n", UE_id,
+		LOG_D(MAC, "[eNB %d][SLICE %d]Total RBs allocated for UE%d = %d\n",  Mod_id, slice_id, UE_id,
 		      ue_sched_ctl->pre_nb_available_rbs[CC_id]);
 	    }
 	}
@@ -1065,9 +1104,12 @@ dlsch_scheduler_pre_processor_reset(int module_idP,
 				    uint16_t
 				    nb_rbs_required_remaining
 				    [MAX_NUM_CCs][NUMBER_OF_UE_MAX],
+					unsigned char total_ue_count[MAX_NUM_CCs],
+					unsigned char total_rbs_used[MAX_NUM_CCs],
 				    unsigned char
 				    rballoc_sub[MAX_NUM_CCs]
-				    [N_RBG_MAX], unsigned char
+				    [N_RBG_MAX],
+				unsigned char
 				    MIMO_mode_indicator[MAX_NUM_CCs]
 				    [N_RBG_MAX])
 {
@@ -1154,7 +1196,8 @@ dlsch_scheduler_pre_processor_reset(int module_idP,
     ue_sched_ctl->pre_nb_available_rbs[CC_id] = 0;
     ue_sched_ctl->dl_pow_off[CC_id] = 2;
     nb_rbs_required_remaining[CC_id][UE_id] = 0;
-
+	total_ue_count[CC_id]=0;
+	total_rbs_used[CC_id]=0;
     switch (N_RB_DL) {
     case 6:
 	RBGsize = 1;
@@ -1322,7 +1365,7 @@ dlsch_scheduler_pre_processor_allocate(module_id_t Mod_id,
 
 void
 ulsch_scheduler_pre_processor(module_id_t module_idP,
-			      int frameP,
+			      slice_id_t slice_id, int frameP,
 			      sub_frame_t subframeP, uint16_t * first_rb)
 {
 
@@ -1538,7 +1581,7 @@ ulsch_scheduler_pre_processor(module_id_t module_idP,
 
 
 void
-assign_max_mcs_min_rb(module_id_t module_idP, int frameP,
+assign_max_mcs_min_rb(module_id_t module_idP, int slice_id, int frameP,
 		      sub_frame_t subframeP, uint16_t * first_rb)
 {
 
diff --git a/openair2/LAYER2/MAC/proto.h b/openair2/LAYER2/MAC/proto.h
index 9f3af1b053c2bb1a24feee5306e33edc8bf813cd..a83aabef270b05bbb45910559fbfe1e989c0d5be 100644
--- a/openair2/LAYER2/MAC/proto.h
+++ b/openair2/LAYER2/MAC/proto.h
@@ -102,11 +102,12 @@ void schedule_ulsch(module_id_t module_idP, frame_t frameP,
 
 /** \brief ULSCH Scheduling per RNTI
 @param Mod_id Instance ID of eNB
+@param slice_id Instance slice for this eNB
 @param frame Frame index
 @param subframe Subframe number on which to act
 @param sched_subframe Subframe number where PUSCH is transmitted (for DAI lookup)
 */
-void schedule_ulsch_rnti(module_id_t module_idP, frame_t frameP,
+void schedule_ulsch_rnti(module_id_t module_idP, slice_id_t slice_idP, frame_t frameP,
 			 sub_frame_t subframe,
 			 unsigned char sched_subframe,
 			 uint16_t * first_rb);
@@ -127,9 +128,12 @@ void fill_DLSCH_dci(module_id_t module_idP, frame_t frameP,
 
 @param mbsfn_flag  Indicates that MCH/MCCH is in this subframe
 */
-void schedule_ue_spec(module_id_t module_idP, frame_t frameP,
+void schedule_dlsch(module_id_t module_idP, frame_t frameP,
 		      sub_frame_t subframe, int *mbsfn_flag);
 
+void schedule_ue_spec(module_id_t module_idP, slice_id_t slice_idP,
+		      frame_t frameP,sub_frame_t subframe, int *mbsfn_flag);
+
 
 /** \brief Function for UE/PHY to compute PUSCH transmit power in power-control procedure.
     @param Mod_id Module id of UE
@@ -209,6 +213,7 @@ void dlsch_scheduler_pre_processor_reset(int module_idP, int UE_id,
 
 
 void dlsch_scheduler_pre_processor(module_id_t module_idP,
+				   slice_id_t slice_idP,
 				   frame_t frameP,
 				   sub_frame_t subframe,
 				   int N_RBG[MAX_NUM_CCs],
@@ -633,7 +638,7 @@ int UE_PCCID(module_id_t mod_idP, int ue_idP);
 rnti_t UE_RNTI(module_id_t mod_idP, int ue_idP);
 
 
-void ulsch_scheduler_pre_processor(module_id_t module_idP, int frameP,
+void ulsch_scheduler_pre_processor(module_id_t module_idP, slice_id_t slice_id, int frameP,
 				   sub_frame_t subframeP,
 				   uint16_t * first_rb);
 void store_ulsch_buffer(module_id_t module_idP, int frameP,
@@ -1158,5 +1163,12 @@ int32_t get_uldl_offset(int eutra_bandP);
 int l2_init_ue(int eMBMS_active, char *uecap_xer, uint8_t cba_group_active,
 	       uint8_t HO_active);
 
+/*Slice related functions */
+uint16_t flexran_nb_rbs_allowed_slice(float rb_percentage, int total_rbs);
+
+int flexran_slice_member(int UE_id, int slice_id);
+
+int flexran_slice_maxmcs(int slice_id;)
+
 #endif
 /** @}*/