From 462d923b48d27d8be33c98b4f66f7a4c9820e809 Mon Sep 17 00:00:00 2001
From: David Grove <groved@us.ibm.com>
Date: Thu, 7 Dec 2023 13:04:11 -0500
Subject: [PATCH 1/4] remove obsolete utility function; mcad v2 doesn't manage
 namespaces

---
 test/e2e/util.go | 28 ----------------------------
 1 file changed, 28 deletions(-)

diff --git a/test/e2e/util.go b/test/e2e/util.go
index ec51234..9ffad7c 100644
--- a/test/e2e/util.go
+++ b/test/e2e/util.go
@@ -268,34 +268,6 @@ func awPodPhase(ctx context.Context, aw *arbv1.AppWrapper, phase []v1.PodPhase,
 	}
 }
 
-func awNamespacePhase(ctx context.Context, aw *arbv1.AppWrapper, phase []v1.NamespacePhase) wait.ConditionFunc {
-	return func() (bool, error) {
-		awIgnored := &arbv1.AppWrapper{}
-		err := getClient(ctx).Get(ctx, client.ObjectKey{Namespace: aw.Namespace, Name: aw.Name}, awIgnored) // TODO: Do we actually need to do this Get?
-		Expect(err).NotTo(HaveOccurred())
-
-		namespaces := &v1.NamespaceList{}
-		err = getClient(ctx).List(ctx, namespaces)
-		Expect(err).NotTo(HaveOccurred())
-
-		readyTaskNum := 0
-		for _, namespace := range namespaces.Items {
-			if awns, found := namespace.Labels["appwrapper.mcad.ibm.com"]; !found || awns != aw.Name {
-				continue
-			}
-
-			for _, p := range phase {
-				if namespace.Status.Phase == p {
-					readyTaskNum++
-					break
-				}
-			}
-		}
-
-		return 0 < readyTaskNum, nil
-	}
-}
-
 func waitAWPodsReady(ctx context.Context, aw *arbv1.AppWrapper) error {
 	return waitAWPodsReadyEx(ctx, aw, ninetySeconds, int(aw.Spec.Scheduling.MinAvailable), false)
 }

From 4d0b961b521426081f51363d9c537dec6f1ee0a1 Mon Sep 17 00:00:00 2001
From: David Grove <groved@us.ibm.com>
Date: Thu, 7 Dec 2023 13:11:55 -0500
Subject: [PATCH 2/4] group related functions together

---
 test/e2e/util.go | 131 +++++++++++++++++++++++------------------------
 1 file changed, 65 insertions(+), 66 deletions(-)

diff --git a/test/e2e/util.go b/test/e2e/util.go
index 9ffad7c..e282eb0 100644
--- a/test/e2e/util.go
+++ b/test/e2e/util.go
@@ -73,6 +73,71 @@ func ensureNamespaceExists(ctx context.Context) {
 	Expect(client.IgnoreAlreadyExists(err)).NotTo(HaveOccurred())
 }
 
+func cleanupTestObjectsPtr(ctx context.Context, appwrappersPtr *[]*arbv1.AppWrapper) {
+	cleanupTestObjectsPtrVerbose(ctx, appwrappersPtr, true)
+}
+
+func cleanupTestObjectsPtrVerbose(ctx context.Context, appwrappersPtr *[]*arbv1.AppWrapper, verbose bool) {
+	if appwrappersPtr == nil {
+		fmt.Fprintf(GinkgoWriter, "[cleanupTestObjectsPtr] No  AppWrappers to cleanup.\n")
+	} else {
+		cleanupTestObjects(ctx, *appwrappersPtr)
+	}
+}
+
+func cleanupTestObjects(ctx context.Context, appwrappers []*arbv1.AppWrapper) {
+	cleanupTestObjectsVerbose(ctx, appwrappers, true)
+}
+
+func cleanupTestObjectsVerbose(ctx context.Context, appwrappers []*arbv1.AppWrapper, verbose bool) {
+	if appwrappers == nil {
+		fmt.Fprintf(GinkgoWriter, "[cleanupTestObjects] No AppWrappers to cleanup.\n")
+		return
+	}
+
+	for _, aw := range appwrappers {
+		pods := getPodsOfAppWrapper(ctx, aw)
+		awNamespace := aw.Namespace
+		awName := aw.Name
+		fmt.Fprintf(GinkgoWriter, "[cleanupTestObjects] Deleting AW %s.\n", aw.Name)
+		err := deleteAppWrapper(ctx, aw.Name, aw.Namespace)
+		Expect(err).NotTo(HaveOccurred())
+
+		// Wait for the pods of the deleted the appwrapper to be destroyed
+		for _, pod := range pods {
+			fmt.Fprintf(GinkgoWriter, "[cleanupTestObjects] Awaiting pod %s/%s to be deleted for AW %s.\n",
+				pod.Namespace, pod.Name, aw.Name)
+		}
+		err = waitAWPodsDeleted(ctx, awNamespace, awName, pods)
+
+		// Final check to see if pod exists
+		if err != nil {
+			var podsStillExisting []*v1.Pod
+			for _, pod := range pods {
+				podExist := &v1.Pod{}
+				err = getClient(ctx).Get(ctx, client.ObjectKey{Namespace: pod.Namespace, Name: pod.Name}, podExist)
+				if err != nil {
+					fmt.Fprintf(GinkgoWriter, "[cleanupTestObjects] Found pod %s/%s %s, not completedly deleted for AW %s.\n", podExist.Namespace, podExist.Name, podExist.Status.Phase, aw.Name)
+					podsStillExisting = append(podsStillExisting, podExist)
+				}
+			}
+			if len(podsStillExisting) > 0 {
+				err = waitAWPodsDeleted(ctx, awNamespace, awName, podsStillExisting)
+			}
+		}
+		Expect(err).NotTo(HaveOccurred())
+	}
+}
+
+func deleteAppWrapper(ctx context.Context, name string, namespace string) error {
+	foreground := metav1.DeletePropagationForeground
+	aw := &arbv1.AppWrapper{ObjectMeta: metav1.ObjectMeta{
+		Name:      name,
+		Namespace: namespace,
+	}}
+	return getClient(ctx).Delete(ctx, aw, &client.DeleteOptions{PropagationPolicy: &foreground})
+}
+
 func anyPodsExist(ctx context.Context, awNamespace string, awName string) wait.ConditionFunc {
 	return func() (bool, error) {
 		podList := &v1.PodList{}
@@ -149,62 +214,6 @@ func podPhase(ctx context.Context, awNamespace string, awName string, pods []*v1
 	}
 }
 
-func cleanupTestObjectsPtr(ctx context.Context, appwrappersPtr *[]*arbv1.AppWrapper) {
-	cleanupTestObjectsPtrVerbose(ctx, appwrappersPtr, true)
-}
-
-func cleanupTestObjectsPtrVerbose(ctx context.Context, appwrappersPtr *[]*arbv1.AppWrapper, verbose bool) {
-	if appwrappersPtr == nil {
-		fmt.Fprintf(GinkgoWriter, "[cleanupTestObjectsPtr] No  AppWrappers to cleanup.\n")
-	} else {
-		cleanupTestObjects(ctx, *appwrappersPtr)
-	}
-}
-
-func cleanupTestObjects(ctx context.Context, appwrappers []*arbv1.AppWrapper) {
-	cleanupTestObjectsVerbose(ctx, appwrappers, true)
-}
-
-func cleanupTestObjectsVerbose(ctx context.Context, appwrappers []*arbv1.AppWrapper, verbose bool) {
-	if appwrappers == nil {
-		fmt.Fprintf(GinkgoWriter, "[cleanupTestObjects] No AppWrappers to cleanup.\n")
-		return
-	}
-
-	for _, aw := range appwrappers {
-		pods := getPodsOfAppWrapper(ctx, aw)
-		awNamespace := aw.Namespace
-		awName := aw.Name
-		fmt.Fprintf(GinkgoWriter, "[cleanupTestObjects] Deleting AW %s.\n", aw.Name)
-		err := deleteAppWrapper(ctx, aw.Name, aw.Namespace)
-		Expect(err).NotTo(HaveOccurred())
-
-		// Wait for the pods of the deleted the appwrapper to be destroyed
-		for _, pod := range pods {
-			fmt.Fprintf(GinkgoWriter, "[cleanupTestObjects] Awaiting pod %s/%s to be deleted for AW %s.\n",
-				pod.Namespace, pod.Name, aw.Name)
-		}
-		err = waitAWPodsDeleted(ctx, awNamespace, awName, pods)
-
-		// Final check to see if pod exists
-		if err != nil {
-			var podsStillExisting []*v1.Pod
-			for _, pod := range pods {
-				podExist := &v1.Pod{}
-				err = getClient(ctx).Get(ctx, client.ObjectKey{Namespace: pod.Namespace, Name: pod.Name}, podExist)
-				if err != nil {
-					fmt.Fprintf(GinkgoWriter, "[cleanupTestObjects] Found pod %s/%s %s, not completedly deleted for AW %s.\n", podExist.Namespace, podExist.Name, podExist.Status.Phase, aw.Name)
-					podsStillExisting = append(podsStillExisting, podExist)
-				}
-			}
-			if len(podsStillExisting) > 0 {
-				err = waitAWPodsDeleted(ctx, awNamespace, awName, podsStillExisting)
-			}
-		}
-		Expect(err).NotTo(HaveOccurred())
-	}
-}
-
 func awPodPhase(ctx context.Context, aw *arbv1.AppWrapper, phase []v1.PodPhase, taskNum int, quite bool) wait.ConditionFunc {
 	return func() (bool, error) {
 		defer GinkgoRecover()
@@ -342,16 +351,6 @@ func waitAWPodsTerminatedExVerbose(ctx context.Context, namespace string, name s
 		[]v1.PodPhase{v1.PodRunning, v1.PodSucceeded, v1.PodUnknown, v1.PodFailed, v1.PodPending}, taskNum))
 }
 
-func deleteAppWrapper(ctx context.Context, name string, namespace string) error {
-	foreground := metav1.DeletePropagationForeground
-	aw := &arbv1.AppWrapper{ObjectMeta: metav1.ObjectMeta{
-		Name:      name,
-		Namespace: namespace,
-	}}
-	return getClient(ctx).Delete(ctx, aw, &client.DeleteOptions{PropagationPolicy: &foreground})
-
-}
-
 func getPodsOfAppWrapper(ctx context.Context, aw *arbv1.AppWrapper) []*v1.Pod {
 	awIgnored := &arbv1.AppWrapper{}
 	err := getClient(ctx).Get(ctx, client.ObjectKeyFromObject(aw), awIgnored) // TODO: Do we actually need to do this Get?

From 65549ae14e59fb599277a578b9994e26d738adf7 Mon Sep 17 00:00:00 2001
From: David Grove <groved@us.ibm.com>
Date: Thu, 7 Dec 2023 14:11:28 -0500
Subject: [PATCH 3/4] compute and log cluster capacity at start of test suite

---
 test/e2e/queue.go |  8 +++++---
 test/e2e/util.go  | 42 ++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 47 insertions(+), 3 deletions(-)

diff --git a/test/e2e/queue.go b/test/e2e/queue.go
index edf7b87..62dc867 100644
--- a/test/e2e/queue.go
+++ b/test/e2e/queue.go
@@ -33,18 +33,20 @@ import (
 	"sigs.k8s.io/controller-runtime/pkg/log/zap"
 )
 
+var ctx context.Context
+
 var _ = BeforeSuite(func() {
 	log.SetLogger(zap.New(zap.WriteTo(GinkgoWriter), zap.UseDevMode(true)))
+	ctx = extendContextWithClient(context.Background())
+	ensureNamespaceExists(ctx)
+	updateClusterCapacity(ctx)
 })
 
 var _ = Describe("AppWrapper E2E Tests", func() {
-	var ctx context.Context
 	var appwrappers []*arbv1.AppWrapper
 
 	BeforeEach(func() {
 		appwrappers = []*arbv1.AppWrapper{}
-		ctx = extendContextWithClient(context.Background())
-		ensureNamespaceExists(ctx)
 	})
 
 	AfterEach(func() {
diff --git a/test/e2e/util.go b/test/e2e/util.go
index e282eb0..ee00637 100644
--- a/test/e2e/util.go
+++ b/test/e2e/util.go
@@ -18,6 +18,7 @@ package e2e
 
 import (
 	"context"
+	"encoding/json"
 	"fmt"
 	"math/rand"
 	"strings"
@@ -37,6 +38,7 @@ import (
 	"k8s.io/apimachinery/pkg/util/wait"
 
 	arbv1 "github.com/project-codeflare/mcad/api/v1beta1"
+	arcont "github.com/project-codeflare/mcad/internal/controller"
 )
 
 const testNamespace = "test"
@@ -45,6 +47,7 @@ var ninetySeconds = 90 * time.Second
 var threeMinutes = 180 * time.Second
 var tenMinutes = 600 * time.Second
 var threeHundredSeconds = 300 * time.Second
+var clusterCapacity arcont.Weights = arcont.Weights{}
 
 type myKey struct {
 	key string
@@ -73,6 +76,45 @@ func ensureNamespaceExists(ctx context.Context) {
 	Expect(client.IgnoreAlreadyExists(err)).NotTo(HaveOccurred())
 }
 
+// Compute available cluster capacity
+func updateClusterCapacity(ctx context.Context) {
+	kc := getClient(ctx)
+	capacity := arcont.Weights{}
+	// add allocatable capacity for each schedulable node
+	nodes := &v1.NodeList{}
+	err := kc.List(ctx, nodes)
+	Expect(err).NotTo(HaveOccurred())
+
+LOOP:
+	for _, node := range nodes.Items {
+		// skip unschedulable nodes
+		if node.Spec.Unschedulable {
+			continue
+		}
+		for _, taint := range node.Spec.Taints {
+			if taint.Effect == v1.TaintEffectNoSchedule || taint.Effect == v1.TaintEffectNoExecute {
+				continue LOOP
+			}
+		}
+		// add allocatable capacity on the node
+		capacity.Add(arcont.NewWeights(node.Status.Allocatable))
+	}
+	// subtract requests from non-terminated pods
+	pods := &v1.PodList{}
+	err = kc.List(ctx, pods)
+	Expect(err).NotTo(HaveOccurred())
+	for _, pod := range pods.Items {
+		if pod.Status.Phase != v1.PodFailed && pod.Status.Phase != v1.PodSucceeded {
+			capacity.Sub(arcont.NewWeightsForPod(&pod))
+		}
+	}
+
+	clusterCapacity = capacity
+
+	t, _ := json.Marshal(clusterCapacity)
+	fmt.Fprintf(GinkgoWriter, "Computed cluster capacity: %v\n", string(t))
+}
+
 func cleanupTestObjectsPtr(ctx context.Context, appwrappersPtr *[]*arbv1.AppWrapper) {
 	cleanupTestObjectsPtrVerbose(ctx, appwrappersPtr, true)
 }

From 38684b2aa8d92b20686a35cd1a0cb5655f8824d0 Mon Sep 17 00:00:00 2001
From: David Grove <groved@us.ibm.com>
Date: Thu, 7 Dec 2023 16:54:16 -0500
Subject: [PATCH 4/4] enable tests that rely on CPU accounting to work as
 expected

---
 test/e2e/aw_fixtures.go | 305 +---------------------------------------
 test/e2e/queue.go       | 110 +++++----------
 test/e2e/util.go        |  15 +-
 3 files changed, 56 insertions(+), 374 deletions(-)

diff --git a/test/e2e/aw_fixtures.go b/test/e2e/aw_fixtures.go
index b52b3bd..196d857 100644
--- a/test/e2e/aw_fixtures.go
+++ b/test/e2e/aw_fixtures.go
@@ -261,298 +261,6 @@ func createDeploymentAW(ctx context.Context, name string) *arbv1.AppWrapper {
 	return aw
 }
 
-func createDeploymentAWwith550CPU(ctx context.Context, name string) *arbv1.AppWrapper {
-	rb := []byte(`{"apiVersion": "apps/v1",
-		"kind": "Deployment",
-	"metadata": {
-		"name": "` + name + `",
-		"namespace": "test",
-		"labels": {
-			"app": "` + name + `"
-		}
-	},
-	"spec": {
-		"replicas": 2,
-		"selector": {
-			"matchLabels": {
-				"app": "` + name + `"
-			}
-		},
-		"template": {
-			"metadata": {
-				"labels": {
-					"app": "` + name + `"
-				}
-			},
-			"spec": {
-				"containers": [
-					{
-						"name": "` + name + `",
-						"image": "quay.io/project-codeflare/echo-server:1.0",
-						"resources": {
-							"requests": {
-								"cpu": "550m"
-							}
-						},
-						"ports": [
-							{
-								"containerPort": 80
-							}
-						]
-					}
-				]
-			}
-		}
-	}} `)
-	var schedSpecMin int32 = 2
-
-	aw := &arbv1.AppWrapper{
-		ObjectMeta: metav1.ObjectMeta{
-			Name:      name,
-			Namespace: testNamespace,
-		},
-		Spec: arbv1.AppWrapperSpec{
-			Scheduling: arbv1.SchedulingSpec{
-				MinAvailable: schedSpecMin,
-			},
-			Resources: arbv1.AppWrapperResources{
-				GenericItems: []arbv1.GenericItem{
-					{
-						DoNotUseReplicas: 1,
-						GenericTemplate: runtime.RawExtension{
-							Raw: rb,
-						},
-					},
-				},
-			},
-		},
-	}
-
-	err := getClient(ctx).Create(ctx, aw)
-	Expect(err).NotTo(HaveOccurred())
-
-	return aw
-}
-
-func createDeploymentAWwith350CPU(ctx context.Context, name string) *arbv1.AppWrapper {
-	rb := []byte(`{"apiVersion": "apps/v1",
-		"kind": "Deployment",
-	"metadata": {
-		"name": "aw-deployment-2-350cpu",
-		"namespace": "test",
-		"labels": {
-			"app": "aw-deployment-2-350cpu"
-		}
-	},
-	"spec": {
-		"replicas": 2,
-		"selector": {
-			"matchLabels": {
-				"app": "aw-deployment-2-350cpu"
-			}
-		},
-		"template": {
-			"metadata": {
-				"labels": {
-					"app": "aw-deployment-2-350cpu"
-				}
-			},
-			"spec": {
-				"containers": [
-					{
-						"name": "aw-deployment-2-350cpu",
-						"image": "quay.io/project-codeflare/echo-server:1.0",
-						"resources": {
-							"requests": {
-								"cpu": "350m"
-							}
-						},
-						"ports": [
-							{
-								"containerPort": 80
-							}
-						]
-					}
-				]
-			}
-		}
-	}} `)
-	var schedSpecMin int32 = 2
-
-	aw := &arbv1.AppWrapper{
-		ObjectMeta: metav1.ObjectMeta{
-			Name:      name,
-			Namespace: testNamespace,
-		},
-		Spec: arbv1.AppWrapperSpec{
-			Scheduling: arbv1.SchedulingSpec{
-				MinAvailable: schedSpecMin,
-			},
-			Resources: arbv1.AppWrapperResources{
-				GenericItems: []arbv1.GenericItem{
-					{
-						DoNotUseReplicas: 1,
-						GenericTemplate: runtime.RawExtension{
-							Raw: rb,
-						},
-					},
-				},
-			},
-		},
-	}
-
-	err := getClient(ctx).Create(ctx, aw)
-	Expect(err).NotTo(HaveOccurred())
-
-	return aw
-}
-
-func createDeploymentAWwith426CPU(ctx context.Context, name string) *arbv1.AppWrapper {
-	rb := []byte(`{"apiVersion": "apps/v1",
-	"kind": "Deployment",
-	"metadata": {
-		"name": "` + name + `",
-		"namespace": "test",
-		"labels": {
-			"app": "` + name + `"
-		}
-	},
-	"spec": {
-		"replicas": 2,
-		"selector": {
-			"matchLabels": {
-				"app": "` + name + `"
-			}
-		},
-		"template": {
-			"metadata": {
-				"labels": {
-					"app": "` + name + `"
-				}
-			},
-			"spec": {
-				"containers": [
-					{
-						"name": "` + name + `",
-						"image": "quay.io/project-codeflare/echo-server:1.0",
-						"resources": {
-							"requests": {
-								"cpu": "427m"
-							}
-						},
-						"ports": [
-							{
-								"containerPort": 80
-							}
-						]
-					}
-				]
-			}
-		}
-	}} `)
-	var schedSpecMin int32 = 2
-
-	aw := &arbv1.AppWrapper{
-		ObjectMeta: metav1.ObjectMeta{
-			Name:      name,
-			Namespace: testNamespace,
-		},
-		Spec: arbv1.AppWrapperSpec{
-			Scheduling: arbv1.SchedulingSpec{
-				MinAvailable: schedSpecMin,
-			},
-			Resources: arbv1.AppWrapperResources{
-				GenericItems: []arbv1.GenericItem{
-					{
-						DoNotUseReplicas: 1,
-						GenericTemplate: runtime.RawExtension{
-							Raw: rb,
-						},
-					},
-				},
-			},
-		},
-	}
-
-	err := getClient(ctx).Create(ctx, aw)
-	Expect(err).NotTo(HaveOccurred())
-
-	return aw
-}
-
-func createDeploymentAWwith425CPU(ctx context.Context, name string) *arbv1.AppWrapper {
-	rb := []byte(`{"apiVersion": "apps/v1",
-	"kind": "Deployment",
-	"metadata": {
-		"name": "aw-deployment-2-425cpu",
-		"namespace": "test",
-		"labels": {
-			"app": "aw-deployment-2-425cpu"
-		}
-	},
-	"spec": {
-		"replicas": 2,
-		"selector": {
-			"matchLabels": {
-				"app": "aw-deployment-2-425cpu"
-			}
-		},
-		"template": {
-			"metadata": {
-				"labels": {
-					"app": "aw-deployment-2-425cpu"
-				}
-			},
-			"spec": {
-				"containers": [
-					{
-						"name": "aw-deployment-2-425cpu",
-						"image": "quay.io/project-codeflare/echo-server:1.0",
-						"resources": {
-							"requests": {
-								"cpu": "425m"
-							}
-						},
-						"ports": [
-							{
-								"containerPort": 80
-							}
-						]
-					}
-				]
-			}
-		}
-	}} `)
-	var schedSpecMin int32 = 2
-
-	aw := &arbv1.AppWrapper{
-		ObjectMeta: metav1.ObjectMeta{
-			Name:      name,
-			Namespace: testNamespace,
-		},
-		Spec: arbv1.AppWrapperSpec{
-			Scheduling: arbv1.SchedulingSpec{
-				MinAvailable: schedSpecMin,
-			},
-			Resources: arbv1.AppWrapperResources{
-				GenericItems: []arbv1.GenericItem{
-					{
-						DoNotUseReplicas: 1,
-						GenericTemplate: runtime.RawExtension{
-							Raw: rb,
-						},
-					},
-				},
-			},
-		},
-	}
-
-	err := getClient(ctx).Create(ctx, aw)
-	Expect(err).NotTo(HaveOccurred())
-
-	return aw
-}
-
 func createGenericDeploymentAW(ctx context.Context, name string) *arbv1.AppWrapper {
 	rb := []byte(`{"apiVersion": "apps/v1",
 		"kind": "Deployment",
@@ -1132,7 +840,7 @@ func createGenericDeploymentAWWithMultipleItems(ctx context.Context, name string
 	return aw
 }
 
-func createGenericDeploymentWithCPUAW(ctx context.Context, name string, cpuDemand string, replicas int) *arbv1.AppWrapper {
+func createGenericDeploymentWithCPUAW(ctx context.Context, name string, cpuDemand *resource.Quantity, replicas int) *arbv1.AppWrapper {
 	rb := []byte(fmt.Sprintf(`{
 	"apiVersion": "apps/v1",
 	"kind": "Deployment",
@@ -1177,8 +885,6 @@ func createGenericDeploymentWithCPUAW(ctx context.Context, name string, cpuDeman
 		}
 	}} `, name, name, replicas, name, name, name, cpuDemand))
 
-	var schedSpecMin int32 = int32(replicas)
-
 	aw := &arbv1.AppWrapper{
 		ObjectMeta: metav1.ObjectMeta{
 			Name:      name,
@@ -1186,12 +892,15 @@ func createGenericDeploymentWithCPUAW(ctx context.Context, name string, cpuDeman
 		},
 		Spec: arbv1.AppWrapperSpec{
 			Scheduling: arbv1.SchedulingSpec{
-				MinAvailable: schedSpecMin,
+				MinAvailable: int32(replicas),
 			},
 			Resources: arbv1.AppWrapperResources{
 				GenericItems: []arbv1.GenericItem{
-					{
-						DoNotUseReplicas: 1,
+					{CustomPodResources: []arbv1.CustomPodResource{
+						{
+							Replicas: int32(replicas),
+							Requests: map[v1.ResourceName]resource.Quantity{v1.ResourceCPU: *cpuDemand}},
+					},
 						GenericTemplate: runtime.RawExtension{
 							Raw: rb,
 						},
diff --git a/test/e2e/queue.go b/test/e2e/queue.go
index 62dc867..1a5ddea 100644
--- a/test/e2e/queue.go
+++ b/test/e2e/queue.go
@@ -57,51 +57,50 @@ var _ = Describe("AppWrapper E2E Tests", func() {
 
 		It("MCAD CPU Accounting Test", func() {
 			fmt.Fprintf(os.Stdout, "[e2e] MCAD CPU Accounting Test - Started.\n")
-			// This should fill up the worker node and most of the master node
-			aw := createDeploymentAWwith550CPU(ctx, appendRandomString("aw-deployment-2-550cpu"))
-			appwrappers = append(appwrappers, aw)
 
+			By("Request 55% of cluster CPU")
+			aw := createGenericDeploymentWithCPUAW(ctx, appendRandomString("aw-deployment-55-percent-cpu"), cpuDemand(0.275), 2)
+			appwrappers = append(appwrappers, aw)
 			err := waitAWPodsReady(ctx, aw)
-			Expect(err).NotTo(HaveOccurred(), "Ready pods are expected for app wrapper: aw-deployment-2-550cpu")
+			Expect(err).NotTo(HaveOccurred(), "Ready pods are expected for app wrapper: aw-deployment-55-percent-cpu")
 
-			// This should fill up the master node
-			aw2 := createDeploymentAWwith350CPU(ctx, appendRandomString("aw-deployment-2-350cpu"))
+			By("Request 30% of cluster CPU")
+			aw2 := createGenericDeploymentWithCPUAW(ctx, appendRandomString("aw-deployment-30-percent-cpu"), cpuDemand(0.15), 2)
 			appwrappers = append(appwrappers, aw2)
-
-			// Using quiet mode due to creating of pods in earlier step.
 			err = waitAWReadyQuiet(ctx, aw2)
-			Expect(err).NotTo(HaveOccurred(), "Ready pods are expected for app wrapper: aw-deployment-2-350cpu")
-			fmt.Fprintf(os.Stdout, "[e2e] MCAD CPU Accounting Test - Completed. Awaiting app wrapper cleanup...\n")
+			Expect(err).NotTo(HaveOccurred(), "Ready pods are expected for app wrapper:aw-deployment-30-percent-cpu")
 		})
 
-		/*
-			TODO: DAVE -- Test disabled because it is too fragile -- redo with % of capacity calculation
-			It("MCAD CPU Preemption Test", func() {
-				fmt.Fprintf(os.Stdout, "[e2e] MCAD CPU Preemption Test - Started.\n")
+		It("MCAD CPU Queueing Test", func() {
+			fmt.Fprintf(os.Stdout, "[e2e] MCAD CPU Queueing Test - Started.\n")
 
-				// This should fill up the worker node and most of the master node
-				aw := createDeploymentAWwith550CPU(ctx, appendRandomString("aw-deployment-2-550cpu"))
-				appwrappers = append(appwrappers, aw)
-				err := waitAWPodsReady(ctx, aw)
-				Expect(err).NotTo(HaveOccurred())
-
-				// This should not fit on cluster
-				aw2 := createDeploymentAWwith426CPU(ctx, appendRandomString("aw-deployment-2-426cpu"))
-				appwrappers = append(appwrappers, aw2)
-				err = waitAWAnyPodsExists(ctx, aw2)
-				// With improved accounting, no pods will be spawned
-				Expect(err).To(HaveOccurred())
+			By("Request 55% of cluster CPU")
+			aw := createGenericDeploymentWithCPUAW(ctx, appendRandomString("aw-deployment-55-percent-cpu"), cpuDemand(0.275), 2)
+			appwrappers = append(appwrappers, aw)
+			err := waitAWPodsReady(ctx, aw)
+			Expect(err).NotTo(HaveOccurred(), "Ready pods are expected for app wrapper: aw-deployment-55-percent-cpu")
+
+			By("Request 50% of cluster CPU (will not fit; should not be dispatched)")
+			aw2 := createGenericDeploymentWithCPUAW(ctx, appendRandomString("aw-deployment-50-percent-cpu"), cpuDemand(0.25), 2)
+			appwrappers = append(appwrappers, aw2)
+			err = waitAWAnyPodsExists(ctx, aw2)
+			Expect(err).To(HaveOccurred(), "Ready pods were not expected for app wrapper: aw-deployment-50-percent-cpu")
 
-				// This should fit on cluster, initially queued because of aw2 above but should eventually
-				// run after prevention of aw2 above.
-				aw3 := createDeploymentAWwith425CPU(ctx, "aw-deployment-2-425cpu")
-				appwrappers = append(appwrappers, aw3)
+			By("Request 30% of cluster CPU")
+			aw3 := createGenericDeploymentWithCPUAW(ctx, appendRandomString("aw-deployment-30-percent-cpu"), cpuDemand(0.15), 2)
+			appwrappers = append(appwrappers, aw3)
+			err = waitAWReadyQuiet(ctx, aw3)
+			Expect(err).NotTo(HaveOccurred(), "Ready pods are expected for app wrapper:aw-deployment-30-percent-cpu")
 
-				// Since preemption takes some time, increasing timeout wait time to 2 minutes
-				err = waitAWPodsExists(ctx, aw3, 2*time.Minute)
-				Expect(err).NotTo(HaveOccurred(), "Expecting pods for app wrapper : aw-deployment-2-425cpu")
-			})
-		*/
+			By("Free resources by deleting 55% of cluster AppWrapper")
+			err = deleteAppWrapper(ctx, aw.Name, aw.Namespace)
+			appwrappers = []*arbv1.AppWrapper{aw2, aw3}
+			Expect(err).NotTo(HaveOccurred(), "Should have been able to delete an the initial AppWrapper")
+
+			By("Wait for queued AppWrapper to finally be dispatched")
+			err = waitAWReadyQuiet(ctx, aw2)
+			Expect(err).NotTo(HaveOccurred(), "Ready pods are expected for app wrapper: aw-deployment-50-percent-cpu")
+		})
 
 		/*
 			 * TODO: Dave DISABLED BECASUSE V2 doesn't support exponential backoff of requeuing time
@@ -139,7 +138,6 @@ var _ = Describe("AppWrapper E2E Tests", func() {
 
 			aw := createStatefulSetAW(ctx, "aw-statefulset-2")
 			appwrappers = append(appwrappers, aw)
-
 			err := waitAWPodsReady(ctx, aw)
 			Expect(err).NotTo(HaveOccurred())
 		})
@@ -149,7 +147,6 @@ var _ = Describe("AppWrapper E2E Tests", func() {
 
 			aw := createGenericStatefulSetAW(ctx, "aw-generic-statefulset-2")
 			appwrappers = append(appwrappers, aw)
-
 			err := waitAWPodsReady(ctx, aw)
 			Expect(err).NotTo(HaveOccurred())
 		})
@@ -159,8 +156,6 @@ var _ = Describe("AppWrapper E2E Tests", func() {
 
 			aw := createDeploymentAW(ctx, "aw-deployment-3")
 			appwrappers = append(appwrappers, aw)
-
-			fmt.Fprintf(GinkgoWriter, "[e2e] Awaiting %d pods running for AW %s.\n", aw.Spec.Scheduling.MinAvailable, aw.Name)
 			err := waitAWPodsReady(ctx, aw)
 			Expect(err).NotTo(HaveOccurred())
 		})
@@ -170,7 +165,6 @@ var _ = Describe("AppWrapper E2E Tests", func() {
 
 			aw := createGenericDeploymentAW(ctx, "aw-generic-deployment-3")
 			appwrappers = append(appwrappers, aw)
-
 			err := waitAWPodsReady(ctx, aw)
 			Expect(err).NotTo(HaveOccurred())
 		})
@@ -180,7 +174,6 @@ var _ = Describe("AppWrapper E2E Tests", func() {
 
 			aw := createPodTemplateAW(ctx, "aw-podtemplate-2")
 			appwrappers = append(appwrappers, aw)
-
 			err := waitAWPodsReady(ctx, aw)
 			Expect(err).NotTo(HaveOccurred())
 		})
@@ -190,7 +183,6 @@ var _ = Describe("AppWrapper E2E Tests", func() {
 
 			aw := createGenericPodAW(ctx, "aw-generic-pod-1")
 			appwrappers = append(appwrappers, aw)
-
 			err := waitAWPodsReady(ctx, aw)
 			Expect(err).NotTo(HaveOccurred())
 		})
@@ -218,7 +210,6 @@ var _ = Describe("AppWrapper E2E Tests", func() {
 
 			aw := createBadPodTemplateAW(ctx, "aw-bad-podtemplate-2")
 			appwrappers = append(appwrappers, aw)
-
 			err := waitAWPodsExists(ctx, aw, 30*time.Second)
 			Expect(err).To(HaveOccurred())
 		})
@@ -238,7 +229,6 @@ var _ = Describe("AppWrapper E2E Tests", func() {
 
 			aw := createBadGenericPodAW(ctx, "aw-bad-generic-pod-1")
 			appwrappers = append(appwrappers, aw)
-
 			err := waitAWPodsCompleted(ctx, aw, 10*time.Second)
 			Expect(err).To(HaveOccurred())
 
@@ -249,7 +239,6 @@ var _ = Describe("AppWrapper E2E Tests", func() {
 
 			aw := createBadGenericItemAW(ctx, "aw-bad-generic-item-1")
 			appwrappers = append(appwrappers, aw)
-
 			err := waitAWPodsCompleted(ctx, aw, 10*time.Second)
 			Expect(err).To(HaveOccurred())
 
@@ -479,14 +468,13 @@ var _ = Describe("AppWrapper E2E Tests", func() {
 		appwrappers = append(appwrappers, aw)
 		fmt.Fprintf(GinkgoWriter, "The error is: %v", err1)
 		Expect(err1).To(HaveOccurred(), "Expecting for pods not to be ready for app wrapper: aw-test-job-with-comp-44")
-		fmt.Fprintf(os.Stdout, "[e2e] MCAD GenericItem Without Status Test - Completed.\n")
-
 	})
 
 	It("MCAD Job Completion No-requeue Test", Label("slow"), func() {
 		fmt.Fprintf(os.Stdout, "[e2e] MCAD Job Completion No-requeue Test - Started.\n")
 
 		aw := createGenericJobAWWithScheduleSpec(ctx, "aw-test-job-with-scheduling-spec")
+		appwrappers = append(appwrappers, aw)
 		err1 := waitAWPodsReady(ctx, aw)
 		Expect(err1).NotTo(HaveOccurred(), "Waiting for pods to be ready")
 		err2 := waitAWPodsCompleted(ctx, aw, 90*time.Second)
@@ -496,10 +484,6 @@ var _ = Describe("AppWrapper E2E Tests", func() {
 		// which SHOULD NOT happen because the job is done
 		err3 := waitAWPodsNotCompleted(ctx, aw)
 		Expect(err3).To(HaveOccurred(), "Waiting for pods not to be completed")
-
-		appwrappers = append(appwrappers, aw)
-		fmt.Fprintf(os.Stdout, "[e2e] MCAD Job Completion No-requeue Test - Completed.\n")
-
 	})
 
 	/*  TODO: DAVE: Status.State
@@ -515,27 +499,6 @@ var _ = Describe("AppWrapper E2E Tests", func() {
 	})
 	*/
 
-	/*
-				TODO: DAVE -- Test disabled because it is too fragile -- redo with % of capacity calculation
-		It("MCAD CPU Accounting Queuing Test", func() {
-			fmt.Fprintf(os.Stdout, "[e2e] MCAD CPU Accounting Queuing Test - Started.\n")
-
-			// This should fill up the worker node and most of the master node
-			aw := createDeploymentAWwith550CPU(ctx, appendRandomString("aw-deployment-2-550cpu"))
-			appwrappers = append(appwrappers, aw)
-			err := waitAWPodsReady(ctx, aw)
-			Expect(err).NotTo(HaveOccurred(), "Waiting for pods to be ready for app wrapper: aw-deployment-2-550cpu")
-
-			// This should not fit on cluster
-			// there may be a false positive dispatch which will cause MCAD to requeue AW
-			aw2 := createDeploymentAWwith426CPU(ctx, appendRandomString("aw-deployment-2-426cpu"))
-			appwrappers = append(appwrappers, aw2)
-
-			err = waitAWPodsReady(ctx, aw2)
-			Expect(err).To(HaveOccurred(), "No pods for app wrapper `aw-deployment-2-426cpu` are expected.")
-		})
-	*/
-
 	/*  TODO: DAVE: Status.State
 	It("MCAD Deployment RunningHoldCompletion Test", func() {
 		fmt.Fprintf(os.Stdout, "[e2e] MCAD Deployment RunningHoldCompletion Test - Started.\n")
@@ -570,9 +533,10 @@ var _ = Describe("AppWrapper E2E Tests", func() {
 			const (
 				awCount           = 50
 				reportingInterval = 10
-				cpuDemand         = "5m"
+				cpuPercent        = 0.005
 			)
 
+			cpuDemand := cpuDemand(cpuPercent)
 			replicas := 2
 			modDivisor := int(awCount / reportingInterval)
 			for i := 0; i < awCount; i++ {
diff --git a/test/e2e/util.go b/test/e2e/util.go
index ee00637..d2442dc 100644
--- a/test/e2e/util.go
+++ b/test/e2e/util.go
@@ -28,6 +28,7 @@ import (
 	"github.com/onsi/gomega"
 	. "github.com/onsi/gomega"
 
+	"k8s.io/apimachinery/pkg/api/resource"
 	"k8s.io/apimachinery/pkg/runtime"
 	clientgoscheme "k8s.io/client-go/kubernetes/scheme"
 	ctrl "sigs.k8s.io/controller-runtime"
@@ -47,7 +48,7 @@ var ninetySeconds = 90 * time.Second
 var threeMinutes = 180 * time.Second
 var tenMinutes = 600 * time.Second
 var threeHundredSeconds = 300 * time.Second
-var clusterCapacity arcont.Weights = arcont.Weights{}
+var clusterCapacity v1.ResourceList = v1.ResourceList{}
 
 type myKey struct {
 	key string
@@ -76,7 +77,9 @@ func ensureNamespaceExists(ctx context.Context) {
 	Expect(client.IgnoreAlreadyExists(err)).NotTo(HaveOccurred())
 }
 
-// Compute available cluster capacity
+// Compute available cluster capacity to allow tests to scale their resources appropriately.
+// The code is a simplification of AppWrapperReconciler.computeCapacity and intended to be run
+// in BeforeSuite methods (thus it is not necessary to filter out AppWrapper-owned pods)
 func updateClusterCapacity(ctx context.Context) {
 	kc := getClient(ctx)
 	capacity := arcont.Weights{}
@@ -109,12 +112,18 @@ LOOP:
 		}
 	}
 
-	clusterCapacity = capacity
+	clusterCapacity = capacity.AsResources()
 
 	t, _ := json.Marshal(clusterCapacity)
 	fmt.Fprintf(GinkgoWriter, "Computed cluster capacity: %v\n", string(t))
 }
 
+func cpuDemand(fractionOfCluster float64) *resource.Quantity {
+	clusterCPU := clusterCapacity[v1.ResourceCPU]
+	milliDemand := int64(float64(clusterCPU.MilliValue()) * fractionOfCluster)
+	return resource.NewMilliQuantity(milliDemand, resource.DecimalSI)
+}
+
 func cleanupTestObjectsPtr(ctx context.Context, appwrappersPtr *[]*arbv1.AppWrapper) {
 	cleanupTestObjectsPtrVerbose(ctx, appwrappersPtr, true)
 }