package benchmark
import "k8s.io/kubernetes/test/integration/scheduler_perf"
Index ¶
- Constants
- Variables
- func InitTests() error
- func NewIntegrationTestNodePreparer(client clientset.Interface, countToStrategy []testutils.CountToStrategy, nodeTemplate NodeTemplate) testutils.TestNodePreparer
- func RunBenchmarkPerfScheduling(b *testing.B, configFile string, topicName string, outOfTreePluginRegistry frameworkruntime.Registry)
- func RunIntegrationPerfScheduling(t *testing.T, configFile string)
- type DataItem
- type DataItems
- type FeatureGateFlag
- type IntegrationTestNodePreparer
- func (p *IntegrationTestNodePreparer) CleanupNodes(ctx context.Context) error
- func (p *IntegrationTestNodePreparer) PrepareNodes(ctx context.Context, nextNodeIndex int) error
- type NodeTemplate
- type SchedulingStage
- type WorkloadExecutor
Constants ¶
const ( // Create continuously create API objects without deleting them. Create = "create" // Recreate creates a number of API objects and then delete them, and repeat the iteration. Recreate = "recreate" )
const DefaultLoggingVerbosity = 2
Run with -v=2, this is the default log level in production.
In a PR this can be bumped up temporarily to run pull-kubernetes-scheduler-perf with more log output.
Variables ¶
var LoggingConfig *logsapi.LoggingConfiguration
var PerfSchedulingLabelFilter string
var ( // PluginNames is the names of the plugins that scheduler_perf collects metrics for. // We export this variable because people outside k/k may want to put their custom plugins. PluginNames = []string{ names.PrioritySort, names.DefaultBinder, names.DefaultPreemption, names.DynamicResources, names.ImageLocality, names.InterPodAffinity, names.NodeAffinity, names.NodeName, names.NodePorts, names.NodeResourcesBalancedAllocation, names.NodeResourcesFit, names.NodeUnschedulable, names.NodeVolumeLimits, names.PodTopologySpread, names.SchedulingGates, names.TaintToleration, names.VolumeBinding, names.VolumeRestrictions, names.VolumeZone, } )
var TestSchedulingLabelFilter string
var UseTestingLog bool
Functions ¶
func InitTests ¶
func InitTests() error
InitTests should be called in a TestMain in each config subdirectory.
func NewIntegrationTestNodePreparer ¶
func NewIntegrationTestNodePreparer(client clientset.Interface, countToStrategy []testutils.CountToStrategy, nodeTemplate NodeTemplate) testutils.TestNodePreparer
NewIntegrationTestNodePreparer creates an IntegrationTestNodePreparer with a given nodeTemplate.
func RunBenchmarkPerfScheduling ¶
func RunBenchmarkPerfScheduling(b *testing.B, configFile string, topicName string, outOfTreePluginRegistry frameworkruntime.Registry)
RunBenchmarkPerfScheduling runs the scheduler performance benchmark tests.
You can pass your own scheduler plugins via outOfTreePluginRegistry. Also, you may want to put your plugins in PluginNames variable in this package to collect metrics for them.
func RunIntegrationPerfScheduling ¶
RunIntegrationPerfScheduling runs the scheduler performance integration tests.
Types ¶
type DataItem ¶
type DataItem struct { // Data is a map from bucket to real data point (e.g. "Perc90" -> 23.5). Notice // that all data items with the same label combination should have the same buckets. Data map[string]float64 `json:"data"` // Unit is the data unit. Notice that all data items with the same label combination // should have the same unit. Unit string `json:"unit"` // Labels is the labels of the data item. Labels map[string]string `json:"labels,omitempty"` // contains filtered or unexported fields }
DataItem is the data point.
type DataItems ¶
DataItems is the data point set. It is the struct that perf dashboard expects.
type FeatureGateFlag ¶
type FeatureGateFlag interface { featuregate.FeatureGate flag.Value }
var LoggingFeatureGate FeatureGateFlag
type IntegrationTestNodePreparer ¶
type IntegrationTestNodePreparer struct {
// contains filtered or unexported fields
}
IntegrationTestNodePreparer holds configuration information for the test node preparer.
func (*IntegrationTestNodePreparer) CleanupNodes ¶
func (p *IntegrationTestNodePreparer) CleanupNodes(ctx context.Context) error
CleanupNodes deletes existing test nodes.
func (*IntegrationTestNodePreparer) PrepareNodes ¶
func (p *IntegrationTestNodePreparer) PrepareNodes(ctx context.Context, nextNodeIndex int) error
PrepareNodes prepares countToStrategy test nodes.
type NodeTemplate ¶
type NodeTemplate interface { // GetNodeTemplate returns a node template for one out of many different nodes. // It gets called multiple times with an increasing index and a fixed count parameters. // This number can, but doesn't have to be, used to modify parts of the node spec like // for example a named reference to some other object. GetNodeTemplate(index, count int) (*v1.Node, error) }
NodeTemplate is responsible for creating a v1.Node instance that is ready to be sent to the API server.
func StaticNodeTemplate ¶
func StaticNodeTemplate(node *v1.Node) NodeTemplate
StaticNodeTemplate returns an implementation of NodeTemplate for a fixed node that is the same regardless of the index.
type SchedulingStage ¶
type SchedulingStage string
const ( Scheduled SchedulingStage = "Scheduled" Attempted SchedulingStage = "Attempted" )
type WorkloadExecutor ¶
type WorkloadExecutor struct {
// contains filtered or unexported fields
}
Source Files ¶
create.go dra.go label_selector.go node_util.go scheduler_perf.go update.go util.go
Directories ¶
- Version
- v1.33.0 (latest)
- Published
- Apr 23, 2025
- Platform
- linux/amd64
- Imports
- 77 packages
- Last checked
- 3 hours ago –
Tools for package owners.