package benchmark
import "k8s.io/kubernetes/test/integration/scheduler_perf"
Index ¶
- Constants
- Variables
- func InitTests() error
- func NewIntegrationTestNodePreparer(client clientset.Interface, countToStrategy []testutils.CountToStrategy, nodeTemplate NodeTemplate) testutils.TestNodePreparer
- func RunBenchmarkPerfScheduling(b *testing.B, configFile string, topicName string, outOfTreePluginRegistry frameworkruntime.Registry, options ...SchedulerPerfOption)
- func RunIntegrationPerfScheduling(t *testing.T, configFile string, options ...SchedulerPerfOption)
- type DataItem
- type DataItems
- type FeatureGateFlag
- type HookFn
- type IntegrationTestNodePreparer
- func (p *IntegrationTestNodePreparer) CleanupNodes(ctx context.Context) error
- func (p *IntegrationTestNodePreparer) PrepareNodes(ctx context.Context, nextNodeIndex int) error
- type NodeTemplate
- type NodeUpdateFn
- type PreRunFn
- type SchedulerPerfOption
- func WithNodeUpdateFn(fn NodeUpdateFn) SchedulerPerfOption
- func WithPreRunFn(preRunFn PreRunFn) SchedulerPerfOption
- func WithPrepareFn(prepareFn HookFn) SchedulerPerfOption
- type SchedulingStage
- type Workload
- type WorkloadExecutor
Constants ¶
const ( // Create continuously create API objects without deleting them. Create = "create" // Recreate creates a number of API objects and then delete them, and repeat the iteration. Recreate = "recreate" )
const DefaultLoggingVerbosity = 2
Run with -v=2, this is the default log level in production.
In a PR this can be bumped up temporarily to run pull-kubernetes-scheduler-perf with more log output.
Variables ¶
var LoggingConfig *logsapi.LoggingConfiguration
var PerfSchedulingLabelFilter string
var ( // PluginNames is the names of the plugins that scheduler_perf collects metrics for. // We export this variable because people outside k/k may want to put their custom plugins. PluginNames = []string{ names.PrioritySort, names.DefaultBinder, names.DefaultPreemption, names.DynamicResources, names.ImageLocality, names.InterPodAffinity, names.NodeAffinity, names.NodeName, names.NodePorts, names.NodeResourcesBalancedAllocation, names.NodeResourcesFit, names.NodeUnschedulable, names.NodeVolumeLimits, names.PodTopologySpread, names.SchedulingGates, names.TaintToleration, names.VolumeBinding, names.VolumeRestrictions, names.VolumeZone, } )
var TestSchedulingLabelFilter string
var UseTestingLog bool
Functions ¶
func InitTests ¶
func InitTests() error
InitTests should be called in a TestMain in each config subdirectory.
func NewIntegrationTestNodePreparer ¶
func NewIntegrationTestNodePreparer(client clientset.Interface, countToStrategy []testutils.CountToStrategy, nodeTemplate NodeTemplate) testutils.TestNodePreparer
NewIntegrationTestNodePreparer creates an IntegrationTestNodePreparer with a given nodeTemplate.
func RunBenchmarkPerfScheduling ¶
func RunBenchmarkPerfScheduling(b *testing.B, configFile string, topicName string, outOfTreePluginRegistry frameworkruntime.Registry, options ...SchedulerPerfOption)
RunBenchmarkPerfScheduling runs the scheduler performance benchmark tests.
You can pass your own scheduler plugins via outOfTreePluginRegistry. Also, you may want to put your plugins in PluginNames variable in this package to collect metrics for them.
func RunIntegrationPerfScheduling ¶
func RunIntegrationPerfScheduling(t *testing.T, configFile string, options ...SchedulerPerfOption)
RunIntegrationPerfScheduling runs the scheduler performance integration tests.
Types ¶
type DataItem ¶
type DataItem struct {
// Data is a map from bucket to real data point (e.g. "Perc90" -> 23.5). Notice
// that all data items with the same label combination should have the same buckets.
Data map[string]float64 `json:"data"`
// Unit is the data unit. Notice that all data items with the same label combination
// should have the same unit.
Unit string `json:"unit"`
// Labels is the labels of the data item.
Labels map[string]string `json:"labels,omitempty"`
// contains filtered or unexported fields
}
DataItem is the data point.
type DataItems ¶
DataItems is the data point set. It is the struct that perf dashboard expects.
type FeatureGateFlag ¶
type FeatureGateFlag interface {
featuregate.FeatureGate
flag.Value
}
var LoggingFeatureGate FeatureGateFlag
type HookFn ¶
HookFn is a function that is called while going through the test execution. The function may record test errors or abort the current test run through tCtx. Alternatively, it may also return a non-nil error.
type IntegrationTestNodePreparer ¶
type IntegrationTestNodePreparer struct {
// contains filtered or unexported fields
}
IntegrationTestNodePreparer holds configuration information for the test node preparer.
func (*IntegrationTestNodePreparer) CleanupNodes ¶
func (p *IntegrationTestNodePreparer) CleanupNodes(ctx context.Context) error
CleanupNodes deletes existing test nodes.
func (*IntegrationTestNodePreparer) PrepareNodes ¶
func (p *IntegrationTestNodePreparer) PrepareNodes(ctx context.Context, nextNodeIndex int) error
PrepareNodes prepares countToStrategy test nodes.
type NodeTemplate ¶
type NodeTemplate interface {
// GetNodeTemplate returns a node template for one out of many different nodes.
// It gets called multiple times with an increasing index and a fixed count parameters.
// This number can, but doesn't have to be, used to modify parts of the node spec like
// for example a named reference to some other object.
GetNodeTemplate(index, count int) (*v1.Node, error)
}
NodeTemplate is responsible for creating a v1.Node instance that is ready to be sent to the API server.
func StaticNodeTemplate ¶
func StaticNodeTemplate(node *v1.Node) NodeTemplate
StaticNodeTemplate returns an implementation of NodeTemplate for a fixed node that is the same regardless of the index.
type NodeUpdateFn ¶
type NodeUpdateFn func(tCtx ktesting.TContext, scheduler *scheduler.Scheduler, w *Workload, nodes *v1.NodeList) error
NodeUpdateFn is a function called after nodes are created in a workload using createNodesOp.
type PreRunFn ¶
PreRunFn hook function is called for each workload after feature gates are set, but before he scheduler is started. It returns an optional cleanup function and an error.
type SchedulerPerfOption ¶
type SchedulerPerfOption func(options *schedulerPerfOptions)
func WithNodeUpdateFn ¶
func WithNodeUpdateFn(fn NodeUpdateFn) SchedulerPerfOption
WithNodeUpdateFn is the option to set a function that is called after nodes are created by createNodesOp within a workload execution.
func WithPreRunFn ¶
func WithPreRunFn(preRunFn PreRunFn) SchedulerPerfOption
WithPreRunFn is the option to set a function that is called after configuring the process (logging, feature gates) and before running any code (etcd, scheduler).
func WithPrepareFn ¶
func WithPrepareFn(prepareFn HookFn) SchedulerPerfOption
WithPrepareFn is the option to set a function that is called before each workload is run. (e.g. applying CRDs for custom plugins) Scheduler and etcd are started at that point.
type SchedulingStage ¶
type SchedulingStage string
const ( Scheduled SchedulingStage = "Scheduled" Attempted SchedulingStage = "Attempted" )
type Workload ¶
type Workload struct {
// Name of the workload.
Name string
// Values of parameters used in the workloadTemplate.
Params params
// Labels can be used to enable or disable a workload.
Labels []string
// Threshold is compared to average value of metric specified using thresholdMetricSelector.
// The comparison is performed for op with CollectMetrics set to true.
// If the measured value is below the threshold, the workload's test case will fail.
// If set to zero, the threshold check is disabled.
//
// May contain a single value or map of topic name to value.
// The single value is used if there is no entry in the map for the topic name.
// Topic names are passed to RunBenchmarkPerfScheduling. This approach
// makes it possible to reuse the same test cases in different configurations.
//
// Optional
Threshold thresholds
// ThresholdMetricSelector defines to what metric the Threshold should be compared.
// If nil, the metric is set to DefaultThresholdMetricSelector of the testCase.
// If DefaultThresholdMetricSelector is nil, the metric is set to "SchedulingThroughput".
// Optional
ThresholdMetricSelector *thresholdMetricSelector
// Feature gates to set before running the workload.
// Explicitly setting a feature in this map overrides the test case settings.
// Optional
FeatureGates map[featuregate.Feature]bool
}
Workload is a subtest under a testCase that tests the scheduler performance for a certain ordering of ops. The set of nodes created and pods scheduled in a Workload may be heterogeneous.
func (*Workload) GetParam ¶
GetParam retrieves a parameter from the Workload's parameters as an integer.
type WorkloadExecutor ¶
type WorkloadExecutor struct {
// contains filtered or unexported fields
}
Source Files ¶
dra.go executor.go label_selector.go node_util.go operations.go options.go scheduler_perf.go update.go util.go
Directories ¶
- Version
- v1.36.0 (latest)
- Published
- Apr 22, 2026
- Platform
- linux/amd64
- Imports
- 87 packages
- Last checked
- 1 hour ago –
Tools for package owners.