0%

XamlFlair

XamlFlair库的目标是简化常见动画的实现,并允许开发人员使用几行Xaml轻松地添加单个或组合的动画集。

安装

1
2
3
4
5
6
7
8
helm repo add traefik https://helm.traefik.io/traefik
helm repo update
helm upgrade traefik traefik/traefik \
--install --create-namespace \
--namespace=traefik

# 导出配置文件
helm show values traefik/traefik > traefik-values.yaml

修改配置

traefik-values.yaml

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143
1144
1145
1146
1147
1148
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188
1189
1190
1191
1192
1193
1194
1195
1196
1197
1198
1199
1200
1201
1202
1203
1204
1205
1206
1207
1208
1209
1210
1211
1212
1213
1214
1215
1216
1217
1218
1219
1220
1221
1222
1223
1224
1225
1226
1227
1228
1229
1230
1231
1232
1233
1234
1235
1236
1237
1238
1239
1240
1241
1242
1243
1244
1245
1246
1247
1248
1249
1250
1251
1252
1253
1254
1255
1256
1257
1258
1259
1260
1261
1262
1263
1264
1265
1266
1267
1268
1269
1270
1271
1272
1273
1274
1275
1276
1277
1278
1279
1280
1281
1282
1283
1284
1285
1286
1287
1288
1289
1290
1291
1292
1293
1294
1295
1296
1297
1298
1299
1300
1301
1302
1303
1304
1305
1306
1307
1308
1309
1310
1311
1312
1313
1314
1315
1316
1317
1318
1319
1320
1321
1322
1323
1324
1325
1326
1327
1328
1329
1330
1331
1332
1333
1334
1335
1336
1337
1338
1339
1340
1341
1342
1343
1344
1345
1346
1347
1348
1349
1350
1351
1352
1353
1354
1355
1356
1357
1358
1359
1360
1361
1362
1363
1364
1365
1366
1367
1368
1369
1370
1371
1372
1373
1374
1375
1376
1377
1378
1379
1380
1381
1382
1383
1384
1385
1386
1387
1388
1389
1390
1391
1392
1393
1394
1395
1396
1397
1398
1399
1400
1401
1402
1403
1404
1405
1406
1407
1408
1409
1410
1411
1412
1413
1414
1415
1416
1417
1418
1419
1420
1421
1422
1423
1424
1425
1426
1427
1428
1429
1430
1431
1432
1433
1434
1435
1436
1437
1438
1439
1440
1441
1442
1443
1444
1445
1446
1447
1448
1449
1450
1451
1452
1453
1454
1455
1456
1457
1458
1459
1460
1461
1462
1463
1464
1465
1466
1467
1468
1469
1470
1471
1472
1473
1474
1475
1476
1477
1478
1479
1480
1481
1482
1483
1484
1485
1486
1487
1488
1489
1490
1491
1492
1493
1494
1495
1496
1497
1498
1499
1500
1501
1502
1503
1504
1505
1506
1507
1508
1509
1510
1511
1512
1513
1514
1515
1516
1517
1518
1519
1520
1521
1522
1523
1524
1525
1526
1527
1528
1529
1530
1531
1532
1533
1534
1535
1536
1537
1538
1539
1540
1541
1542
1543
1544
1545
1546
1547
1548
1549
1550
1551
1552
1553
1554
1555
1556
1557
1558
1559
1560
1561
1562
1563
1564
1565
1566
1567
1568
1569
1570
1571
1572
1573
1574
1575
1576
1577
1578
1579
1580
1581
1582
1583
1584
1585
1586
1587
1588
1589
1590
1591
1592
1593
1594
1595
1596
1597
1598
1599
1600
1601
1602
1603
1604
1605
1606
1607
1608
1609
1610
1611
1612
1613
1614
1615
1616
1617
1618
1619
1620
1621
1622
1623
1624
1625
1626
1627
1628
1629
1630
1631
1632
1633
1634
1635
1636
1637
1638
1639
1640
1641
1642
1643
1644
1645
1646
1647
1648
1649
1650
1651
1652
1653
1654
1655
1656
1657
1658
1659
1660
1661
1662
1663
1664
1665
1666
1667
1668
1669
1670
1671
1672
1673
1674
1675
1676
1677
1678
1679
1680
1681
1682
1683
1684
1685
1686
1687
1688
1689
1690
1691
1692
1693
1694
1695
1696
1697
1698
1699
1700
1701
1702
1703
1704
1705
1706
1707
1708
1709
1710
1711
1712
1713
1714
1715
1716
1717
1718
1719
1720
1721
1722
1723
1724
1725
1726
1727
1728
1729
1730
1731
1732
1733
1734
1735
1736
1737
1738
1739
1740
1741
1742
1743
1744
1745
1746
1747
1748
1749
1750
1751
1752
1753
1754
1755
1756
1757
1758
1759
1760
1761
1762
1763
1764
1765
1766
1767
1768
1769
1770
1771
1772
1773
1774
1775
1776
1777
1778
1779
1780
1781
1782
1783
1784
1785
1786
1787
1788
1789
1790
1791
1792
1793
1794
1795
1796
1797
1798
1799
1800
1801
1802
1803
1804
1805
1806
1807
1808
1809
1810
1811
1812
1813
1814
1815
1816
1817
1818
1819
1820
1821
1822
1823
1824
1825
1826
1827
1828
1829
1830
1831
1832
1833
1834
1835
1836
1837
1838
1839
1840
1841
1842
1843
1844
1845
1846
1847
1848
1849
1850
1851
1852
1853
1854
1855
1856
1857
1858
1859
1860
1861
1862
1863
1864
1865
1866
1867
1868
1869
1870
1871
1872
1873
1874
1875
1876
1877
1878
1879
1880
1881
1882
1883
1884
1885
1886
1887
1888
1889
1890
1891
1892
1893
1894
1895
1896
1897
1898
1899
1900
1901
1902
1903
1904
1905
1906
1907
1908
1909
1910
1911
# Default values for Traefik

# This is a YAML-formatted file.

# Declare variables to be passed into templates



image:  # @schema additionalProperties: false

  # -- Traefik image host registry

  registry: docker.io

  # -- Traefik image repository

  repository: traefik

  # -- defaults to appVersion

  tag:  # @schema type:[string, null]

  # -- Traefik image pull policy

  pullPolicy: IfNotPresent



# -- Add additional label to all resources

commonLabels: {}



deployment:

  # -- Enable deployment

  enabled: true

  # -- Deployment or DaemonSet

  kind: Deployment

  # -- Number of pods of the deployment (only applies when kind == Deployment)

  replicas: 1

  # -- Number of old history to retain to allow rollback (If not set, default Kubernetes value is set to 10)

  revisionHistoryLimit:  # @schema type:[integer, null];minimum:0

  # -- Amount of time (in seconds) before Kubernetes will send the SIGKILL signal if Traefik does not shut down

  terminationGracePeriodSeconds: 60

  # -- The minimum number of seconds Traefik needs to be up and running before the DaemonSet/Deployment controller considers it available

  minReadySeconds: 0

  ## -- Override the liveness/readiness port. This is useful to integrate traefik

  ## with an external Load Balancer that performs healthchecks.

  ## Default: ports.traefik.port

  healthchecksPort:  # @schema type:[integer, null];minimum:0

  ## -- Override the liveness/readiness host. Useful for getting ping to respond on non-default entryPoint.

  ## Default: ports.traefik.hostIP if set, otherwise Pod IP

  healthchecksHost: ""

  ## -- Override the liveness/readiness scheme. Useful for getting ping to

  ## respond on websecure entryPoint.

  healthchecksScheme:   # @schema enum:[HTTP, HTTPS, null]; type:[string, null]; default: HTTP

  ## -- Override the readiness path.

  ## Default: /ping

  readinessPath: ""

  # -- Override the liveness path.

  # Default: /ping

  livenessPath: ""

  # -- Additional deployment annotations (e.g. for jaeger-operator sidecar injection)

  annotations: {}

  # -- Additional deployment labels (e.g. for filtering deployment by custom labels)

  labels: {}

  # -- Additional pod annotations (e.g. for mesh injection or prometheus scraping)

  # It supports templating. One can set it with values like traefik/name: '{{ template "traefik.name" . }}'

  podAnnotations: {}

  # -- Additional Pod labels (e.g. for filtering Pod by custom labels)

  podLabels: {}

  # -- Additional containers (e.g. for metric offloading sidecars)

  additionalContainers: []

  # https://docs.datadoghq.com/developers/dogstatsd/unix_socket/?tab=host

  # - name: socat-proxy

  #   image: alpine/socat:1.0.5

  #   args: ["-s", "-u", "udp-recv:8125", "unix-sendto:/socket/socket"]

  #   volumeMounts:

  #     - name: dsdsocket

  #       mountPath: /socket

  # -- Additional volumes available for use with initContainers and additionalContainers

  additionalVolumes: []

  # - name: dsdsocket

  #   hostPath:

  #     path: /var/run/statsd-exporter

  # -- Additional initContainers (e.g. for setting file permission as shown below)

  initContainers: []

  # The "volume-permissions" init container is required if you run into permission issues.

  # Related issue: https://github.com/traefik/traefik-helm-chart/issues/396

  # - name: volume-permissions

  #   image: busybox:latest

  #   command: ["sh", "-c", "touch /data/acme.json; chmod -v 600 /data/acme.json"]

  #   volumeMounts:

  #     - name: data

  #       mountPath: /data

  # -- Use process namespace sharing

  shareProcessNamespace: false

  # -- Custom pod DNS policy. Apply if `hostNetwork: true`

  dnsPolicy: ""

  # -- Custom pod [DNS config](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#poddnsconfig-v1-core)

  dnsConfig: {}

  # -- Custom [host aliases](https://kubernetes.io/docs/tasks/network/customize-hosts-file-for-pods/)

  hostAliases: []

  # -- Pull secret for fetching traefik container image

  imagePullSecrets: []

  # -- Pod lifecycle actions

  lifecycle: {}

  # preStop:

  #   exec:

  #     command: ["/bin/sh", "-c", "sleep 40"]

  # postStart:

  #   httpGet:

  #     path: /ping

  #     port: 9000

  #     host: localhost

  #     scheme: HTTP

  # -- Set a runtimeClassName on pod

  runtimeClassName: ""



# -- [Pod Disruption Budget](https://kubernetes.io/docs/reference/kubernetes-api/policy-resources/pod-disruption-budget-v1/)

podDisruptionBudget:  # @schema additionalProperties: false

  enabled: false

  maxUnavailable:  # @schema type:[string, integer, null];minimum:0

  minAvailable:    # @schema type:[string, integer, null];minimum:0



# -- Create a default IngressClass for Traefik

ingressClass:  # @schema additionalProperties: false

  enabled: true

  isDefaultClass: true

  name: ""



core:  # @schema additionalProperties: false

  # -- Can be used to use globally v2 router syntax

  # See https://doc.traefik.io/traefik/v3.0/migration/v2-to-v3/#new-v3-syntax-notable-changes

  defaultRuleSyntax: ""



# Traefik experimental features

experimental:

  # -- Enable traefik experimental plugins

  plugins: {}

  # demo:

  #   moduleName: github.com/traefik/plugindemo

  #   version: v0.2.1

  kubernetesGateway:

    # -- Enable traefik experimental GatewayClass CRD

    enabled: false



gateway:

  # -- When providers.kubernetesGateway.enabled, deploy a default gateway

  enabled: true

  # -- Set a custom name to gateway

  name: ""

  # -- By default, Gateway is created in the same `Namespace` than Traefik.

  namespace: ""

  # -- Additional gateway annotations (e.g. for cert-manager.io/issuer)

  annotations: {}

  # -- Define listeners

  listeners:

    web:

      # -- Port is the network port. Multiple listeners may use the same port, subject to the Listener compatibility rules.

      # The port must match a port declared in ports section.

      port: 8000

      # -- Optional hostname. See [Hostname](https://gateway-api.sigs.k8s.io/reference/spec/#gateway.networking.k8s.io/v1.Hostname)

      hostname: ""

      # Specify expected protocol on this listener. See [ProtocolType](https://gateway-api.sigs.k8s.io/reference/spec/#gateway.networking.k8s.io/v1.ProtocolType)

      protocol: HTTP

      # -- Routes are restricted to namespace of the gateway [by default](https://gateway-api.sigs.k8s.io/reference/spec/#gateway.networking.k8s.io/v1.FromNamespaces

      namespacePolicy:  # @schema type:[string, null]

    # websecure listener is disabled by default because certificateRefs needs to be added,

    # or you may specify TLS protocol with Passthrough mode and add "--providers.kubernetesGateway.experimentalChannel=true" in additionalArguments section.

    # websecure:

    #   # -- Port is the network port. Multiple listeners may use the same port, subject to the Listener compatibility rules.

    #   # The port must match a port declared in ports section.

    #   port: 8443

    #   # -- Optional hostname. See [Hostname](https://gateway-api.sigs.k8s.io/reference/spec/#gateway.networking.k8s.io/v1.Hostname)

    #   hostname:

    #   # Specify expected protocol on this listener See [ProtocolType](https://gateway-api.sigs.k8s.io/reference/spec/#gateway.networking.k8s.io/v1.ProtocolType)

    #   protocol: HTTPS

    #   # -- Routes are restricted to namespace of the gateway [by default](https://gateway-api.sigs.k8s.io/reference/spec/#gateway.networking.k8s.io/v1.FromNamespaces)

    #   namespacePolicy:

    #   # -- Add certificates for TLS or HTTPS protocols. See [GatewayTLSConfig](https://gateway-api.sigs.k8s.io/reference/spec/#gateway.networking.k8s.io%2fv1.GatewayTLSConfig)

    #   certificateRefs:

    #   # -- TLS behavior for the TLS session initiated by the client. See [TLSModeType](https://gateway-api.sigs.k8s.io/reference/spec/#gateway.networking.k8s.io/v1.TLSModeType).

    #   mode:



gatewayClass:  # @schema additionalProperties: false

  # -- When providers.kubernetesGateway.enabled and gateway.enabled, deploy a default gatewayClass

  enabled: true

  # -- Set a custom name to GatewayClass

  name: ""

  # -- Additional gatewayClass labels (e.g. for filtering gateway objects by custom labels)

  labels: {}



ingressRoute:

  dashboard:

    # -- Create an IngressRoute for the dashboard

    enabled: true # 修改此处,启用dashboard

    # -- Additional ingressRoute annotations (e.g. for kubernetes.io/ingress.class)

    annotations: # 修改此处,添加配置
    ingress.kubernetes.io/ssl-redirect: "true"
    ingress.kubernetes.io/proxy-body-size: "0"
    kubernetes.io/ingress.class: "traefik"
    traefik.ingress.kubernetes.io/router.tls: "true"
    traefik.ingress.kubernetes.io/router.entrypoints: websecure

    # -- Additional ingressRoute labels (e.g. for filtering IngressRoute by custom labels)

    labels: {}

    # -- The router match rule used for the dashboard ingressRoute

    matchRule: PathPrefix(`/dashboard`) || PathPrefix(`/api`)

    # -- The internal service used for the dashboard ingressRoute

    services:

      - name: api@internal

        kind: TraefikService

    # -- Specify the allowed entrypoints to use for the dashboard ingress route, (e.g. traefik, web, websecure).

    # By default, it's using traefik entrypoint, which is not exposed.

    # /!\ Do not expose your dashboard without any protection over the internet /!\

    entryPoints: ["traefik"]

    # -- Additional ingressRoute middlewares (e.g. for authentication)

    middlewares: []

    # -- TLS options (e.g. secret containing certificate)

    tls: # 修改此处,配置证书,需要cert-manager
    enabled: true
    certSource: secret
    secret:
    secretName: "traefik-tls-secret"
   

  healthcheck:

    # -- Create an IngressRoute for the healthcheck probe

    enabled: false

    # -- Additional ingressRoute annotations (e.g. for kubernetes.io/ingress.class)

    annotations: {}

    # -- Additional ingressRoute labels (e.g. for filtering IngressRoute by custom labels)

    labels: {}

    # -- The router match rule used for the healthcheck ingressRoute

    matchRule: PathPrefix(`/ping`)

    # -- The internal service used for the healthcheck ingressRoute

    services:

      - name: ping@internal

        kind: TraefikService

    # -- Specify the allowed entrypoints to use for the healthcheck ingress route, (e.g. traefik, web, websecure).

    # By default, it's using traefik entrypoint, which is not exposed.

    entryPoints: ["traefik"]

    # -- Additional ingressRoute middlewares (e.g. for authentication)

    middlewares: []

    # -- TLS options (e.g. secret containing certificate)

    tls: {}



updateStrategy:  # @schema additionalProperties: false

  # -- Customize updateStrategy of Deployment or DaemonSet

  type: RollingUpdate

  rollingUpdate:

    maxUnavailable: 0  # @schema type:[integer, string, null]

    maxSurge: 1        # @schema type:[integer, string, null]



readinessProbe:  # @schema additionalProperties: false

  # -- The number of consecutive failures allowed before considering the probe as failed.

  failureThreshold: 1

  # -- The number of seconds to wait before starting the first probe.

  initialDelaySeconds: 2

  # -- The number of seconds to wait between consecutive probes.

  periodSeconds: 10

  # -- The minimum consecutive successes required to consider the probe successful.

  successThreshold: 1

  # -- The number of seconds to wait for a probe response before considering it as failed.

  timeoutSeconds: 2

livenessProbe:  # @schema additionalProperties: false

  # -- The number of consecutive failures allowed before considering the probe as failed.

  failureThreshold: 3

  # -- The number of seconds to wait before starting the first probe.

  initialDelaySeconds: 2

  # -- The number of seconds to wait between consecutive probes.

  periodSeconds: 10

  # -- The minimum consecutive successes required to consider the probe successful.

  successThreshold: 1

  # -- The number of seconds to wait for a probe response before considering it as failed.

  timeoutSeconds: 2



# -- Define [Startup Probe](https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/#define-startup-probes)

startupProbe: {}



providers:  # @schema additionalProperties: false

  kubernetesCRD:

    # -- Load Kubernetes IngressRoute provider

    enabled: true

    # -- Allows IngressRoute to reference resources in namespace other than theirs

    allowCrossNamespace: false

    # -- Allows to reference ExternalName services in IngressRoute

    allowExternalNameServices: false

    # -- Allows to return 503 when there is no endpoints available

    allowEmptyServices: true

    # -- When the parameter is set, only resources containing an annotation with the same value are processed. Otherwise, resources missing the annotation, having an empty value, or the value traefik are processed. It will also set required annotation on Dashboard and Healthcheck IngressRoute when enabled.

    ingressClass: ""

    # labelSelector: environment=production,method=traefik

    # -- Array of namespaces to watch. If left empty, Traefik watches all namespaces.

    namespaces: []

    # -- Defines whether to use Native Kubernetes load-balancing mode by default.

    nativeLBByDefault: false



  kubernetesIngress:

    # -- Load Kubernetes Ingress provider

    enabled: true

    # -- Allows to reference ExternalName services in Ingress

    allowExternalNameServices: false

    # -- Allows to return 503 when there is no endpoints available

    allowEmptyServices: true

    # -- When ingressClass is set, only Ingresses containing an annotation with the same value are processed. Otherwise, Ingresses missing the annotation, having an empty value, or the value traefik are processed.

    ingressClass:  # @schema type:[string, null]

    # labelSelector: environment=production,method=traefik

    # -- Array of namespaces to watch. If left empty, Traefik watches all namespaces.

    namespaces: []

    # IP used for Kubernetes Ingress endpoints

    publishedService:

      enabled: false

      # Published Kubernetes Service to copy status from. Format: namespace/servicename

      # By default this Traefik service

      # pathOverride: ""

    # -- Defines whether to use Native Kubernetes load-balancing mode by default.

    nativeLBByDefault: false



  kubernetesGateway:

    # -- Enable Traefik Gateway provider for Gateway API

    enabled: false

    # -- Toggles support for the Experimental Channel resources (Gateway API release channels documentation).

    # This option currently enables support for TCPRoute and TLSRoute.

    experimentalChannel: false

    # -- Array of namespaces to watch. If left empty, Traefik watches all namespaces.

    namespaces: []

    # -- A label selector can be defined to filter on specific GatewayClass objects only.

    labelselector: ""



  file:

    # -- Create a file provider

    enabled: false

    # -- Allows Traefik to automatically watch for file changes

    watch: true

    # -- File content (YAML format, go template supported) (see https://doc.traefik.io/traefik/providers/file/)

    content: ""



# -- Add volumes to the traefik pod. The volume name will be passed to tpl.

# This can be used to mount a cert pair or a configmap that holds a config.toml file.

# After the volume has been mounted, add the configs into traefik by using the `additionalArguments` list below, eg:

# `additionalArguments:

# - "--providers.file.filename=/config/dynamic.toml"

# - "--ping"

# - "--ping.entrypoint=web"`

volumes: []

# - name: public-cert

#   mountPath: "/certs"

#   type: secret

# - name: '{{ printf "%s-configs" .Release.Name }}'

#   mountPath: "/config"

#   type: configMap



# -- Additional volumeMounts to add to the Traefik container

additionalVolumeMounts: []

# -- For instance when using a logshipper for access logs

# - name: traefik-logs

#   mountPath: /var/log/traefik



logs:

  general:

    # -- Set [logs format](https://doc.traefik.io/traefik/observability/logs/#format)

    format:  # @schema enum:["common", "json", null]; type:[string, null]; default: "common"

    # By default, the level is set to INFO.

    # -- Alternative logging levels are DEBUG, PANIC, FATAL, ERROR, WARN, and INFO.

    level: "INFO"  # @schema enum:[INFO,WARN,ERROR,FATAL,PANIC,DEBUG]; default: "INFO"

    # -- To write the logs into a log file, use the filePath option.

    filePath: ""

    # -- When set to true and format is common, it disables the colorized output.

    noColor: false

  access:

    # -- To enable access logs

    enabled: false

    # -- Set [access log format](https://doc.traefik.io/traefik/observability/access-logs/#format)

    format:  # @schema enum:["CLF", "json", null]; type:[string, null]; default: "CLF"

    # filePath: "/var/log/traefik/access.log

    # -- Set [bufferingSize](https://doc.traefik.io/traefik/observability/access-logs/#bufferingsize)

    bufferingSize:  # @schema type:[integer, null]

    # -- Set [filtering](https://docs.traefik.io/observability/access-logs/#filtering)

    filters: {}

    statuscodes: ""

    retryattempts: false

    minduration: ""

    # -- Enables accessLogs for internal resources. Default: false.

    addInternals: false

    fields:

      general:

        # -- Set default mode for fields.names

        defaultmode: keep  # @schema enum:[keep, drop, redact]; default: keep

        # -- Names of the fields to limit.

        names: {}

      # -- [Limit logged fields or headers](https://doc.traefik.io/traefik/observability/access-logs/#limiting-the-fieldsincluding-headers)

      headers:

        # -- Set default mode for fields.headers

        defaultmode: drop  # @schema enum:[keep, drop, redact]; default: drop

        names: {}



metrics:

  ## -- Enable metrics for internal resources. Default: false

  addInternals: false



  ## -- Prometheus is enabled by default.

  ## -- It can be disabled by setting "prometheus: null"

  prometheus:

    # -- Entry point used to expose metrics.

    entryPoint: metrics

    ## Enable metrics on entry points. Default: true

    addEntryPointsLabels:  # @schema type:[boolean, null]

    ## Enable metrics on routers. Default: false

    addRoutersLabels:  # @schema type:[boolean, null]

    ## Enable metrics on services. Default: true

    addServicesLabels:  # @schema type:[boolean, null]

    ## Buckets for latency metrics. Default="0.1,0.3,1.2,5.0"

    buckets: ""

    ## When manualRouting is true, it disables the default internal router in

    ## order to allow creating a custom router for prometheus@internal service.

    manualRouting: false

    service:

      # -- Create a dedicated metrics service to use with ServiceMonitor

      enabled: false

      labels: {}

      annotations: {}

    # -- When set to true, it won't check if Prometheus Operator CRDs are deployed

    disableAPICheck:  # @schema type:[boolean, null]

    serviceMonitor:

      # -- Enable optional CR for Prometheus Operator. See EXAMPLES.md for more details.

      enabled: false

      metricRelabelings: []

      relabelings: []

      jobLabel: ""

      interval: ""

      honorLabels: false

      scrapeTimeout: ""

      honorTimestamps: false

      enableHttp2: false

      followRedirects: false

      additionalLabels: {}

      namespace: ""

      namespaceSelector: {}

    prometheusRule:

      # -- Enable optional CR for Prometheus Operator. See EXAMPLES.md for more details.

      enabled: false

      additionalLabels: {}

      namespace: ""



  #  datadog:

  #    ## Address instructs exporter to send metrics to datadog-agent at this address.

  #    address: "127.0.0.1:8125"

  #    ## The interval used by the exporter to push metrics to datadog-agent. Default=10s

  #    # pushInterval: 30s

  #    ## The prefix to use for metrics collection. Default="traefik"

  #    # prefix: traefik

  #    ## Enable metrics on entry points. Default=true

  #    # addEntryPointsLabels: false

  #    ## Enable metrics on routers. Default=false

  #    # addRoutersLabels: true

  #    ## Enable metrics on services. Default=true

  #    # addServicesLabels: false

  #  influxdb2:

  #    ## Address instructs exporter to send metrics to influxdb v2 at this address.

  #    address: localhost:8086

  #    ## Token with which to connect to InfluxDB v2.

  #    token: xxx

  #    ## Organisation where metrics will be stored.

  #    org: ""

  #    ## Bucket where metrics will be stored.

  #    bucket: ""

  #    ## The interval used by the exporter to push metrics to influxdb. Default=10s

  #    # pushInterval: 30s

  #    ## Additional labels (influxdb tags) on all metrics.

  #    # additionalLabels:

  #    #   env: production

  #    #   foo: bar

  #    ## Enable metrics on entry points. Default=true

  #    # addEntryPointsLabels: false

  #    ## Enable metrics on routers. Default=false

  #    # addRoutersLabels: true

  #    ## Enable metrics on services. Default=true

  #    # addServicesLabels: false

  #  statsd:

  #    ## Address instructs exporter to send metrics to statsd at this address.

  #    address: localhost:8125

  #    ## The interval used by the exporter to push metrics to influxdb. Default=10s

  #    # pushInterval: 30s

  #    ## The prefix to use for metrics collection. Default="traefik"

  #    # prefix: traefik

  #    ## Enable metrics on entry points. Default=true

  #    # addEntryPointsLabels: false

  #    ## Enable metrics on routers. Default=false

  #    # addRoutersLabels: true

  #    ## Enable metrics on services. Default=true

  #    # addServicesLabels: false

  otlp:

    # -- Set to true in order to enable the OpenTelemetry metrics

    enabled: false

    # -- Enable metrics on entry points. Default: true

    addEntryPointsLabels:  # @schema type:[boolean, null]

    # -- Enable metrics on routers. Default: false

    addRoutersLabels:  # @schema type:[boolean, null]

    # -- Enable metrics on services. Default: true

    addServicesLabels:  # @schema type:[boolean, null]

    # -- Explicit boundaries for Histogram data points. Default: [.005, .01, .025, .05, .1, .25, .5, 1, 2.5, 5, 10]

    explicitBoundaries: []

    # -- Interval at which metrics are sent to the OpenTelemetry Collector. Default: 10s

    pushInterval: ""

    http:

      # -- Set to true in order to send metrics to the OpenTelemetry Collector using HTTP.

      enabled: false

      # -- Format: <scheme>://<host>:<port><path>. Default: http://localhost:4318/v1/metrics

      endpoint: ""

      # -- Additional headers sent with metrics by the reporter to the OpenTelemetry Collector.

      headers: {}

      ## Defines the TLS configuration used by the reporter to send metrics to the OpenTelemetry Collector.

      tls:

        # -- The path to the certificate authority, it defaults to the system bundle.

        ca: ""

        # -- The path to the public certificate. When using this option, setting the key option is required.

        cert: ""

        # -- The path to the private key. When using this option, setting the cert option is required.

        key: ""

        # -- When set to true, the TLS connection accepts any certificate presented by the server regardless of the hostnames it covers.

        insecureSkipVerify:  # @schema type:[boolean, null]

    grpc:

      # -- Set to true in order to send metrics to the OpenTelemetry Collector using gRPC

      enabled: false

      # -- Format: <scheme>://<host>:<port><path>. Default: http://localhost:4318/v1/metrics

      endpoint: ""

      # -- Allows reporter to send metrics to the OpenTelemetry Collector without using a secured protocol.

      insecure: false

      ## Defines the TLS configuration used by the reporter to send metrics to the OpenTelemetry Collector.

      tls:

        # -- The path to the certificate authority, it defaults to the system bundle.

        ca: ""

        # -- The path to the public certificate. When using this option, setting the key option is required.

        cert: ""

        # -- The path to the private key. When using this option, setting the cert option is required.

        key: ""

        # -- When set to true, the TLS connection accepts any certificate presented by the server regardless of the hostnames it covers.

        insecureSkipVerify: false



## Tracing

# -- https://doc.traefik.io/traefik/observability/tracing/overview/

tracing:  # @schema additionalProperties: false

  # -- Enables tracing for internal resources. Default: false.

  addInternals: false

  otlp:

    # -- See https://doc.traefik.io/traefik/v3.0/observability/tracing/opentelemetry/

    enabled: false

    http:

      # -- Set to true in order to send metrics to the OpenTelemetry Collector using HTTP.

      enabled: false

      # -- Format: <scheme>://<host>:<port><path>. Default: http://localhost:4318/v1/metrics

      endpoint: ""

      # -- Additional headers sent with metrics by the reporter to the OpenTelemetry Collector.

      headers: {}

      ## Defines the TLS configuration used by the reporter to send metrics to the OpenTelemetry Collector.

      tls:

        # -- The path to the certificate authority, it defaults to the system bundle.

        ca: ""

        # -- The path to the public certificate. When using this option, setting the key option is required.

        cert: ""

        # -- The path to the private key. When using this option, setting the cert option is required.

        key: ""

        # -- When set to true, the TLS connection accepts any certificate presented by the server regardless of the hostnames it covers.

        insecureSkipVerify: false

    grpc:

      # -- Set to true in order to send metrics to the OpenTelemetry Collector using gRPC

      enabled: false

      # -- Format: <scheme>://<host>:<port><path>. Default: http://localhost:4318/v1/metrics

      endpoint: ""

      # -- Allows reporter to send metrics to the OpenTelemetry Collector without using a secured protocol.

      insecure: false

      ## Defines the TLS configuration used by the reporter to send metrics to the OpenTelemetry Collector.

      tls:

        # -- The path to the certificate authority, it defaults to the system bundle.

        ca: ""

        # -- The path to the public certificate. When using this option, setting the key option is required.

        cert: ""

        # -- The path to the private key. When using this option, setting the cert option is required.

        key: ""

        # -- When set to true, the TLS connection accepts any certificate presented by the server regardless of the hostnames it covers.

        insecureSkipVerify: false



# -- Global command arguments to be passed to all traefik's pods

globalArguments:

- "--global.checknewversion"

- "--global.sendanonymoususage"



# -- Additional arguments to be passed at Traefik's binary

# See [CLI Reference](https://docs.traefik.io/reference/static-configuration/cli/)

# Use curly braces to pass values: `helm install --set="additionalArguments={--providers.kubernetesingress.ingressclass=traefik-internal,--log.level=DEBUG}"`

additionalArguments: []

#  - "--providers.kubernetesingress.ingressclass=traefik-internal"

#  - "--log.level=DEBUG"



# -- Environment variables to be passed to Traefik's binary

# @default -- See _values.yaml_

env:

- name: POD_NAME

  valueFrom:

    fieldRef:

      fieldPath: metadata.name

- name: POD_NAMESPACE

  valueFrom:

    fieldRef:

      fieldPath: metadata.namespace



# -- Environment variables to be passed to Traefik's binary from configMaps or secrets

envFrom: []



ports:

redis: # 添加此部分,用于暴露Redis对外访问
port: 6379
expose: true
exposedPort: 6379 # 对外暴露端口
protocol: TCP
mysql: #添加此部分,用于暴露MySQL对外访问
port: 3306
expose: true
exposedPort: 3306 # 对外暴露端口
protocol: TCP

  traefik:

    port: 9000

    # -- Use hostPort if set.

    hostPort:  # @schema type:[integer, null]; minimum:0

    # -- Use hostIP if set. If not set, Kubernetes will default to 0.0.0.0, which

    # means it's listening on all your interfaces and all your IPs. You may want

    # to set this value if you need traefik to listen on specific interface

    # only.

    hostIP:  # @schema type:[string, null]



    # Defines whether the port is exposed if service.type is LoadBalancer or

    # NodePort.

    #

    # -- You SHOULD NOT expose the traefik port on production deployments.

    # If you want to access it from outside your cluster,

    # use `kubectl port-forward` or create a secure ingress

    expose:

      default: false

    # -- The exposed port for this service

    exposedPort: 9000

    # -- The port protocol (TCP/UDP)

    protocol: TCP

  web:

    ## -- Enable this entrypoint as a default entrypoint. When a service doesn't explicitly set an entrypoint it will only use this entrypoint.

    # asDefault: true

    port: 8000

    # hostPort: 8000

    # containerPort: 8000

    expose:

      default: true

    exposedPort: 80

    ## -- Different target traefik port on the cluster, useful for IP type LB

    targetPort:  # @schema type:[integer, null]; minimum:0

    # The port protocol (TCP/UDP)

    protocol: TCP

    # -- See [upstream documentation](https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport)

    nodePort:  # @schema type:[integer, null]; minimum:0

    # Port Redirections

    # Added in 2.2, you can make permanent redirects via entrypoints.

    # https://docs.traefik.io/routing/entrypoints/#redirection

    redirectTo: {}

    forwardedHeaders:

      # -- Trust forwarded headers information (X-Forwarded-*).

      trustedIPs: []

      insecure: false

    proxyProtocol:

      # -- Enable the Proxy Protocol header parsing for the entry point

      trustedIPs: []

      insecure: false

    # -- Set transport settings for the entrypoint; see also

    # https://doc.traefik.io/traefik/routing/entrypoints/#transport

    transport:

      respondingTimeouts:

        readTimeout:   # @schema type:[string, integer, null]

        writeTimeout:  # @schema type:[string, integer, null]

        idleTimeout:   # @schema type:[string, integer, null]

      lifeCycle:

        requestAcceptGraceTimeout:  # @schema type:[string, integer, null]

        graceTimeOut:               # @schema type:[string, integer, null]

      keepAliveMaxRequests:         # @schema type:[integer, null]; minimum:0

      keepAliveMaxTime:             # @schema type:[string, integer, null]

  websecure:

    ## -- Enable this entrypoint as a default entrypoint. When a service doesn't explicitly set an entrypoint it will only use this entrypoint.

    # asDefault: true

    port: 8443

    hostPort:  # @schema type:[integer, null]; minimum:0

    containerPort:  # @schema type:[integer, null]; minimum:0

    expose:

      default: true

    exposedPort: 443

    ## -- Different target traefik port on the cluster, useful for IP type LB

    targetPort:  # @schema type:[integer, null]; minimum:0

    ## -- The port protocol (TCP/UDP)

    protocol: TCP

    # -- See [upstream documentation](https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport)

    nodePort:  # @schema type:[integer, null]; minimum:0

    # -- See [upstream documentation](https://kubernetes.io/docs/concepts/services-networking/service/#application-protocol)

    appProtocol:  # @schema type:[string, null]

    # -- See [upstream documentation](https://doc.traefik.io/traefik/routing/entrypoints/#allowacmebypass)

    allowACMEByPass: false

    http3:

      ## -- Enable HTTP/3 on the entrypoint

      ## Enabling it will also enable http3 experimental feature

      ## https://doc.traefik.io/traefik/routing/entrypoints/#http3

      ## There are known limitations when trying to listen on same ports for

      ## TCP & UDP (Http3). There is a workaround in this chart using dual Service.

      ## https://github.com/kubernetes/kubernetes/issues/47249#issuecomment-587960741

      enabled: false

      advertisedPort:  # @schema type:[integer, null]; minimum:0

    forwardedHeaders:

        # -- Trust forwarded headers information (X-Forwarded-*).

      trustedIPs: []

      insecure: false

    proxyProtocol:

      # -- Enable the Proxy Protocol header parsing for the entry point

      trustedIPs: []

      insecure: false

    # -- See [upstream documentation](https://doc.traefik.io/traefik/routing/entrypoints/#transport)

    transport:

      respondingTimeouts:

        readTimeout:   # @schema type:[string, integer, null]

        writeTimeout:  # @schema type:[string, integer, null]

        idleTimeout:   # @schema type:[string, integer, null]

      lifeCycle:

        requestAcceptGraceTimeout:  # @schema type:[string, integer, null]

        graceTimeOut:               # @schema type:[string, integer, null]

      keepAliveMaxRequests:         # @schema type:[integer, null]; minimum:0

      keepAliveMaxTime:             # @schema type:[string, integer, null]

    # --  See [upstream documentation](https://doc.traefik.io/traefik/routing/entrypoints/#tls)

    tls:

      enabled: true

      options: ""

      certResolver: ""

      domains: []

    # -- One can apply Middlewares on an entrypoint

    # https://doc.traefik.io/traefik/middlewares/overview/

    # https://doc.traefik.io/traefik/routing/entrypoints/#middlewares

    # -- /!\ It introduces here a link between your static configuration and your dynamic configuration /!\

    # It follows the provider naming convention: https://doc.traefik.io/traefik/providers/overview/#provider-namespace

    #   - namespace-name1@kubernetescrd

    #   - namespace-name2@kubernetescrd

    middlewares: []

  metrics:

    # -- When using hostNetwork, use another port to avoid conflict with node exporter:

    # https://github.com/prometheus/prometheus/wiki/Default-port-allocations

    port: 9100

    # -- You may not want to expose the metrics port on production deployments.

    # If you want to access it from outside your cluster,

    # use `kubectl port-forward` or create a secure ingress

    expose:

      default: false

    # -- The exposed port for this service

    exposedPort: 9100

    # -- The port protocol (TCP/UDP)

    protocol: TCP



# -- TLS Options are created as [TLSOption CRDs](https://doc.traefik.io/traefik/https/tls/#tls-options)

# When using `labelSelector`, you'll need to set labels on tlsOption accordingly.

# See EXAMPLE.md for details.

tlsOptions: {}



# -- TLS Store are created as [TLSStore CRDs](https://doc.traefik.io/traefik/https/tls/#default-certificate). This is useful if you want to set a default certificate. See EXAMPLE.md for details.

tlsStore: {}



service:

  enabled: true

  ## -- Single service is using `MixedProtocolLBService` feature gate.

  ## -- When set to false, it will create two Service, one for TCP and one for UDP.

  single: true

  type: LoadBalancer

  # -- Additional annotations applied to both TCP and UDP services (e.g. for cloud provider specific config)

  annotations: {}

  # -- Additional annotations for TCP service only

  annotationsTCP: {}

  # -- Additional annotations for UDP service only

  annotationsUDP: {}

  # -- Additional service labels (e.g. for filtering Service by custom labels)

  labels: {}

  # -- Additional entries here will be added to the service spec.

  # -- Cannot contain type, selector or ports entries.

  spec: {}

  # externalTrafficPolicy: Cluster

  # loadBalancerIP: "1.2.3.4"

  # clusterIP: "2.3.4.5"

  loadBalancerSourceRanges: []

  # - 192.168.0.1/32

  # - 172.16.0.0/16

  ## -- Class of the load balancer implementation

  # loadBalancerClass: service.k8s.aws/nlb

  externalIPs: []

  # - 1.2.3.4

  ## One of SingleStack, PreferDualStack, or RequireDualStack.

  # ipFamilyPolicy: SingleStack

  ## List of IP families (e.g. IPv4 and/or IPv6).

  ## ref: https://kubernetes.io/docs/concepts/services-networking/dual-stack/#services

  # ipFamilies:

  #   - IPv4

  #   - IPv6

  ##

  additionalServices: {}

  ## -- An additional and optional internal Service.

  ## Same parameters as external Service

  # internal:

  #   type: ClusterIP

  #   # labels: {}

  #   # annotations: {}

  #   # spec: {}

  #   # loadBalancerSourceRanges: []

  #   # externalIPs: []

  #   # ipFamilies: [ "IPv4","IPv6" ]



autoscaling:

  # -- Create HorizontalPodAutoscaler object.

  # See EXAMPLES.md for more details.

  enabled: false



persistence:

  # -- Enable persistence using Persistent Volume Claims

  # ref: http://kubernetes.io/docs/user-guide/persistent-volumes/

  # It can be used to store TLS certificates, see `storage` in certResolvers

  enabled: false

  name: data

  existingClaim: ""

  accessMode: ReadWriteOnce

  size: 128Mi

  storageClass: ""

  volumeName: ""

  path: /data

  annotations: {}

  # -- Only mount a subpath of the Volume into the pod

  subPath: ""



# -- Certificates resolvers configuration.

# Ref: https://doc.traefik.io/traefik/https/acme/#certificate-resolvers

# See EXAMPLES.md for more details.

certResolvers: {}



# -- If hostNetwork is true, runs traefik in the host network namespace

# To prevent unschedulabel pods due to port collisions, if hostNetwork=true

# and replicas>1, a pod anti-affinity is recommended and will be set if the

# affinity is left as default.

hostNetwork: false



# -- Whether Role Based Access Control objects like roles and rolebindings should be created

rbac:  # @schema additionalProperties: false

  enabled: true

  # When set to true:

  # 1. Use `Role` and `RoleBinding` instead of `ClusterRole` and `ClusterRoleBinding`.

  # 2. Set `disableIngressClassLookup` on Kubernetes Ingress providers with Traefik Proxy v3 until v3.1.1

  # 3. Set `disableClusterScopeResources` on Kubernetes Ingress and CRD providers with Traefik Proxy v3.1.2+

  # **NOTE**: `IngressClass`, `NodePortLB` and **Gateway** provider cannot be used with namespaced RBAC.

  # See [upstream documentation](https://doc.traefik.io/traefik/providers/kubernetes-ingress/#disableclusterscoperesources) for more details.

  namespaced: false

  # Enable user-facing roles

  # https://kubernetes.io/docs/reference/access-authn-authz/rbac/#user-facing-roles

  aggregateTo: []

  # List of Kubernetes secrets that are accessible for Traefik. If empty, then access is granted to every secret.

  secretResourceNames: []



# -- Enable to create a PodSecurityPolicy and assign it to the Service Account via RoleBinding or ClusterRoleBinding

podSecurityPolicy:

  enabled: false



# -- The service account the pods will use to interact with the Kubernetes API

serviceAccount:  # @schema additionalProperties: false

  # If set, an existing service account is used

  # If not set, a service account is created automatically using the fullname template

  name: ""



# -- Additional serviceAccount annotations (e.g. for oidc authentication)

serviceAccountAnnotations: {}



# -- [Resources](https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/) for `traefik` container.

resources: {}



# -- This example pod anti-affinity forces the scheduler to put traefik pods

# -- on nodes where no other traefik pods are scheduled.

# It should be used when hostNetwork: true to prevent port conflicts

affinity: {}

#  podAntiAffinity:

#    requiredDuringSchedulingIgnoredDuringExecution:

#      - labelSelector:

#          matchLabels:

#            app.kubernetes.io/name: '{{ template "traefik.name" . }}'

#            app.kubernetes.io/instance: '{{ .Release.Name }}-{{ .Release.Namespace }}'

#        topologyKey: kubernetes.io/hostname



# -- nodeSelector is the simplest recommended form of node selection constraint.

nodeSelector: {}

# -- Tolerations allow the scheduler to schedule pods with matching taints.

tolerations: []

# -- You can use topology spread constraints to control

# how Pods are spread across your cluster among failure-domains.

topologySpreadConstraints: []

# This example topologySpreadConstraints forces the scheduler to put traefik pods

# on nodes where no other traefik pods are scheduled.

#  - labelSelector:

#      matchLabels:

#        app: '{{ template "traefik.name" . }}'

#    maxSkew: 1

#    topologyKey: kubernetes.io/hostname

#    whenUnsatisfiable: DoNotSchedule



# -- [Pod Priority and Preemption](https://kubernetes.io/docs/concepts/scheduling-eviction/pod-priority-preemption/)

priorityClassName: ""



# -- [SecurityContext](https://kubernetes.io/docs/reference/kubernetes-api/workload-resources/pod-v1/#security-context-1)

# @default -- See _values.yaml_

securityContext:

  allowPrivilegeEscalation: false

  capabilities:

    drop: [ALL]

  readOnlyRootFilesystem: true



# -- [Pod Security Context](https://kubernetes.io/docs/reference/kubernetes-api/workload-resources/pod-v1/#security-context)

# @default -- See _values.yaml_

podSecurityContext:

  runAsGroup: 65532

  runAsNonRoot: true

  runAsUser: 65532



#

# -- Extra objects to deploy (value evaluated as a template)

#

# In some cases, it can avoid the need for additional, extended or adhoc deployments.

# See #595 for more details and traefik/tests/values/extra.yaml for example.

extraObjects: []



# -- This field override the default Release Namespace for Helm.

# It will not affect optional CRDs such as `ServiceMonitor` and `PrometheusRules`

namespaceOverride: ""



## -- This field override the default app.kubernetes.io/instance label for all Objects.

instanceLabelOverride: ""



# Traefik Hub configuration. See https://doc.traefik.io/traefik-hub/

hub:

  # -- Name of `Secret` with key 'token' set to a valid license token.

  # It enables API Gateway.

  token: ""

  apimanagement:

    # -- Set to true in order to enable API Management. Requires a valid license token.

    enabled: false

    admission:

      # -- WebHook admission server listen address. Default: "0.0.0.0:9943".

      listenAddr: ""

      # -- Certificate of the WebHook admission server. Default: "hub-agent-cert".

      secretName: ""



  ratelimit:

    redis:

      # -- Enable Redis Cluster. Default: true.

      cluster:    # @schema type:[boolean, null]

      # -- Database used to store information. Default: "0".

      database:   # @schema type:[string, null]

      # -- Endpoints of the Redis instances to connect to. Default: "".

      endpoints: ""

      # -- The username to use when connecting to Redis endpoints. Default: "".

      username: ""

      # -- The password to use when connecting to Redis endpoints. Default: "".

      password: ""

      sentinel:

        # -- Name of the set of main nodes to use for main selection. Required when using Sentinel. Default: "".

        masterset: ""

        # -- Username to use for sentinel authentication (can be different from endpoint username). Default: "".

        username: ""

        # -- Password to use for sentinel authentication (can be different from endpoint password). Default: "".

        password: ""

      # -- Timeout applied on connection with redis. Default: "0s".

      timeout: ""

      tls:

        # -- Path to the certificate authority used for the secured connection.

        ca: ""

        # -- Path to the public certificate used for the secure connection.

        cert: ""

        # -- Path to the private key used for the secure connection.

        key: ""

        # -- When insecureSkipVerify is set to true, the TLS connection accepts any certificate presented by the server. Default: false.

        insecureSkipVerify: false

  # Enable export of errors logs to the platform. Default: true.

  sendlogs:  # @schema type:[boolean, null]

应用配置

1
2
3
helm upgrade traefik traefik/traefik \
--namespace traefik -f traefik-values.yaml

IngressRouteTCP示例

mysql-traefik-ingress.yaml

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: mysql
namespace: default
spec:
...
---
apiVersion: v1
kind: Service
metadata:
name: mysql-svc
namespace: default
spec:
selector:
app: mysql
ports:
- port: 3306
name: mysql-tcp
protocol: TCP
clusterIP: None # 定义Headless Service
---
apiVersion: traefik.io/v1alpha1
kind: IngressRouteTCP
metadata:
name: mysql-ingress
namespace: default #根据实际情况修改,或应用文件时指定
spec:
entryPoints:
- mysql
routes:
- match: HostSNI(`*`)
services:
- name: mysql-svc
namespace: default

YOLOv8目标检测:使用ONNX模型进行推理_onnx模型推理-CSDN博客

Excerpt

文章浏览阅读8.2k次,点赞46次,收藏119次。本文详细介绍了如何在COCO数据集上使用YOLOv8目标检测模型进行推理,涉及环境配置、代码实现(包括图像、视频和摄像头检测),以及展示ONNX模型在不同大小版本(YOLOv8n,YOLOv8s,YOLOv8m,YOLOv8l,YOLOv8x)上的实验结果。


基于COCO数据集的YOLOv8目标检测onnx模型推理

在本博客中,我们将探讨如何使用YOLOv8目标检测模型进行推理,包括图片,视频文件,摄像头实时检测,特别是ONNX在不同大小(YOLOv8n, YOLOv8s, YOLOv8m, YOLOv8l, YOLOv8x)的模型上进行的实验。我们还将讨论所需的环境配置,代码实现,以及如何展示推理结果。

环境配置

在详细描述环境配置和安装步骤之前,请确保您的系统已经安装了Python和pip。下面是详细的环境配置步骤,适用于基于YOLOv8模型进行目标检测的项目。

1. 安装必要的Python库

1
pip install onnxruntime-gpu==1.13.1 opencv-python==4.7.0.68 numpy==1.24.1 Pillow==9.4.0 -i https://pypi.tuna.tsinghua.edu.cn/simple/

如果您没有GPU或者不打算使用GPU,可以安装onnxruntime而不是onnxruntime-gpu

1
pip install onnxruntime==1.13.1 opencv-python==4.7.0.68 numpy==1.24.1 Pillow==9.4.0 -i https://pypi.tuna.tsinghua.edu.cn/simple/

2. 验证安装

安装完成后,您可以通过运行Python并尝试导入安装的包来验证是否成功安装了所有必要的库:

1
import onnxruntime import cv2 import numpy import PIL

如果上述命令没有引发任何错误,那么恭喜您,您已成功配置了运行环境。

小贴士

  • 如果您在安装过程中遇到任何问题,可能需要更新pip到最新版本:pip install --upgrade pip
  • 对于使用NVIDIA GPU的用户,确保您的系统已安装CUDA和cuDNN。onnxruntime-gpu要求系统预装这些NVIDIA库以利用GPU加速。

按照这些步骤,您应该能够成功配置环境并运行基于YOLOv8的目标检测项目了。

权重下载

YOLOv8模型的权重可以通过以下百度网盘链接下载:

请确保下载适合您需求的模型版本。

代码实现

以下是进行目标检测的整体代码流程,包括模型加载、图像预处理、推理执行、后处理及结果展示的步骤。

1
import cv2 import onnxruntime as ort from PIL import Image import numpy as np # 置信度 confidence_thres = 0.35 # iou阈值 iou_thres = 0.5 # 类别 classes = {0: 'person', 1: 'bicycle', 2: 'car', 3: 'motorcycle', 4: 'airplane', 5: 'bus', 6: 'train', 7: 'truck', 8: 'boat', 9: 'traffic light', 10: 'fire hydrant', 11: 'stop sign', 12: 'parking meter', 13: 'bench', 14: 'bird', 15: 'cat', 16: 'dog', 17: 'horse', 18: 'sheep', 19: 'cow', 20: 'elephant', 21: 'bear', 22: 'zebra', 23: 'giraffe', 24: 'backpack', 25: 'umbrella', 26: 'handbag', 27: 'tie', 28: 'suitcase', 29: 'frisbee', 30: 'skis', 31: 'snowboard', 32: 'sports ball', 33: 'kite', 34: 'baseball bat', 35: 'baseball glove', 36: 'skateboard', 37: 'surfboard', 38: 'tennis racket', 39: 'bottle', 40: 'wine glass', 41: 'cup', 42: 'fork', 43: 'knife', 44: 'spoon', 45: 'bowl', 46: 'banana', 47: 'apple', 48: 'sandwich', 49: 'orange', 50: 'broccoli', 51: 'carrot', 52: 'hot dog', 53: 'pizza', 54: 'donut', 55: 'cake', 56: 'chair', 57: 'couch', 58: 'potted plant', 59: 'bed', 60: 'dining table', 61: 'toilet', 62: 'tv', 63: 'laptop', 64: 'mouse', 65: 'remote', 66: 'keyboard', 67: 'cell phone', 68: 'microwave', 69: 'oven', 70: 'toaster', 71: 'sink', 72: 'refrigerator', 73: 'book', 74: 'clock', 75: 'vase', 76: 'scissors', 77: 'teddy bear', 78: 'hair drier', 79: 'toothbrush'} # 随机颜色 color_palette = np.random.uniform(100, 255, size=(len(classes), 3)) # 判断是使用GPU或CPU providers = [ ('CUDAExecutionProvider', { 'device_id': 0, # 可以选择GPU设备ID,如果你有多个GPU }), 'CPUExecutionProvider', # 也可以设置CPU作为备选 ] def calculate_iou(box, other_boxes): """ 计算给定边界框与一组其他边界框之间的交并比(IoU)。 参数: - box: 单个边界框,格式为 [x1, y1, width, height]。 - other_boxes: 其他边界框的数组,每个边界框的格式也为 [x1, y1, width, height]。 返回值: - iou: 一个数组,包含给定边界框与每个其他边界框的IoU值。 """ # 计算交集的左上角坐标 x1 = np.maximum(box[0], np.array(other_boxes)[:, 0]) y1 = np.maximum(box[1], np.array(other_boxes)[:, 1]) # 计算交集的右下角坐标 x2 = np.minimum(box[0] + box[2], np.array(other_boxes)[:, 0] + np.array(other_boxes)[:, 2]) y2 = np.minimum(box[1] + box[3], np.array(other_boxes)[:, 1] + np.array(other_boxes)[:, 3]) # 计算交集区域的面积 intersection_area = np.maximum(0, x2 - x1) * np.maximum(0, y2 - y1) # 计算给定边界框的面积 box_area = box[2] * box[3] # 计算其他边界框的面积 other_boxes_area = np.array(other_boxes)[:, 2] * np.array(other_boxes)[:, 3] # 计算IoU值 iou = intersection_area / (box_area + other_boxes_area - intersection_area) return iou def custom_NMSBoxes(boxes, scores, confidence_threshold, iou_threshold): # 如果没有边界框,则直接返回空列表 if len(boxes) == 0: return [] # 将得分和边界框转换为NumPy数组 scores = np.array(scores) boxes = np.array(boxes) # 根据置信度阈值过滤边界框 mask = scores > confidence_threshold filtered_boxes = boxes[mask] filtered_scores = scores[mask] # 如果过滤后没有边界框,则返回空列表 if len(filtered_boxes) == 0: return [] # 根据置信度得分对边界框进行排序 sorted_indices = np.argsort(filtered_scores)[::-1] # 初始化一个空列表来存储选择的边界框索引 indices = [] # 当还有未处理的边界框时,循环继续 while len(sorted_indices) > 0: # 选择得分最高的边界框索引 current_index = sorted_indices[0] indices.append(current_index) # 如果只剩一个边界框,则结束循环 if len(sorted_indices) == 1: break # 获取当前边界框和其他边界框 current_box = filtered_boxes[current_index] other_boxes = filtered_boxes[sorted_indices[1:]] # 计算当前边界框与其他边界框的IoU iou = calculate_iou(current_box, other_boxes) # 找到IoU低于阈值的边界框,即与当前边界框不重叠的边界框 non_overlapping_indices = np.where(iou <= iou_threshold)[0] # 更新sorted_indices以仅包含不重叠的边界框 sorted_indices = sorted_indices[non_overlapping_indices + 1] # 返回选择的边界框索引 return indices def draw_detections(img, box, score, class_id): """ 在输入图像上绘制检测到的对象的边界框和标签。 参数: img: 要在其上绘制检测结果的输入图像。 box: 检测到的边界框。 score: 对应的检测得分。 class_id: 检测到的对象的类别ID。 返回: 无 """ # 提取边界框的坐标 x1, y1, w, h = box # 根据类别ID检索颜色 color = color_palette[class_id] # 在图像上绘制边界框 cv2.rectangle(img, (int(x1), int(y1)), (int(x1 + w), int(y1 + h)), color, 2) # 创建标签文本,包括类名和得分 label = f'{classes[class_id]}: {score:.2f}' # 计算标签文本的尺寸 (label_width, label_height), _ = cv2.getTextSize(label, cv2.FONT_HERSHEY_SIMPLEX, 0.5, 1) # 计算标签文本的位置 label_x = x1 label_y = y1 - 10 if y1 - 10 > label_height else y1 + 10 # 绘制填充的矩形作为标签文本的背景 cv2.rectangle(img, (label_x, label_y - label_height), (label_x + label_width, label_y + label_height), color, cv2.FILLED) # 在图像上绘制标签文本 cv2.putText(img, label, (label_x, label_y), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 0), 1, cv2.LINE_AA) def preprocess(img, input_width, input_height): """ 在执行推理之前预处理输入图像。 返回: image_data: 为推理准备好的预处理后的图像数据。 """ # 获取输入图像的高度和宽度 img_height, img_width = img.shape[:2] # 将图像颜色空间从BGR转换为RGB img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) # 将图像大小调整为匹配输入形状 img = cv2.resize(img, (input_width, input_height)) # 通过除以255.0来归一化图像数据 image_data = np.array(img) / 255.0 # 转置图像,使通道维度为第一维 image_data = np.transpose(image_data, (2, 0, 1)) # 通道首 # 扩展图像数据的维度以匹配预期的输入形状 image_data = np.expand_dims(image_data, axis=0).astype(np.float32) # 返回预处理后的图像数据 return image_data, img_height, img_width def postprocess(input_image, output, input_width, input_height, img_width, img_height): """ 对模型输出进行后处理,提取边界框、得分和类别ID。 参数: input_image (numpy.ndarray): 输入图像。 output (numpy.ndarray): 模型的输出。 input_width (int): 模型输入宽度。 input_height (int): 模型输入高度。 img_width (int): 原始图像宽度。 img_height (int): 原始图像高度。 返回: numpy.ndarray: 绘制了检测结果的输入图像。 """ # 转置和压缩输出以匹配预期的形状 outputs = np.transpose(np.squeeze(output[0])) # 获取输出数组的行数 rows = outputs.shape[0] # 用于存储检测的边界框、得分和类别ID的列表 boxes = [] scores = [] class_ids = [] # 计算边界框坐标的缩放因子 x_factor = img_width / input_width y_factor = img_height / input_height # 遍历输出数组的每一行 for i in range(rows): # 从当前行提取类别得分 classes_scores = outputs[i][4:] # 找到类别得分中的最大得分 max_score = np.amax(classes_scores) # 如果最大得分高于置信度阈值 if max_score >= confidence_thres: # 获取得分最高的类别ID class_id = np.argmax(classes_scores) # 从当前行提取边界框坐标 x, y, w, h = outputs[i][0], outputs[i][1], outputs[i][2], outputs[i][3] # 计算边界框的缩放坐标 left = int((x - w / 2) * x_factor) top = int((y - h / 2) * y_factor) width = int(w * x_factor) height = int(h * y_factor) # 将类别ID、得分和框坐标添加到各自的列表中 class_ids.append(class_id) scores.append(max_score) boxes.append([left, top, width, height]) # 应用非最大抑制过滤重叠的边界框 indices = custom_NMSBoxes(boxes, scores, confidence_thres, iou_thres) # 遍历非最大抑制后的选定索引 for i in indices: # 根据索引获取框、得分和类别ID box = boxes[i] score = scores[i] class_id = class_ids[i] # 在输入图像上绘制检测结果 draw_detections(input_image, box, score, class_id) # 返回修改后的输入图像 return input_image def init_detect_model(model_path): # 使用ONNX模型文件创建一个推理会话,并指定执行提供者 session = ort.InferenceSession(model_path, providers=providers) # 获取模型的输入信息 model_inputs = session.get_inputs() # 获取输入的形状,用于后续使用 input_shape = model_inputs[0].shape # 从输入形状中提取输入宽度 input_width = input_shape[2] # 从输入形状中提取输入高度 input_height = input_shape[3] # 返回会话、模型输入信息、输入宽度和输入高度 return session, model_inputs, input_width, input_height def detect_object(image, session, model_inputs, input_width, input_height): # 如果输入的图像是PIL图像对象,将其转换为NumPy数组 if isinstance(image, Image.Image): result_image = np.array(image) else: # 否则,直接使用输入的图像(假定已经是NumPy数组) result_image = image # 预处理图像数据,调整图像大小并可能进行归一化等操作 img_data, img_height, img_width = preprocess(result_image, input_width, input_height) # 使用预处理后的图像数据进行推理 outputs = session.run(None, {model_inputs[0].name: img_data}) # 对推理结果进行后处理,例如解码检测框,过滤低置信度的检测等 output_image = postprocess(result_image, outputs, input_width, input_height, img_width, img_height) # 返回处理后的图像 return output_image if __name__ == '__main__': # 模型文件的路径 model_path = "yolov8n.onnx" # 初始化检测模型,加载模型并获取模型输入节点信息和输入图像的宽度、高度 session, model_inputs, input_width, input_height = init_detect_model(model_path) # 三种模式 1为图片预测,并显示结果图片;2为摄像头检测,并实时显示FPS; 3为视频检测,并保存结果视频 mode = 1 if mode == 1: # 读取图像文件 image_data = cv2.imread("street.jpg") # 使用检测模型对读入的图像进行对象检测 result_image = detect_object(image_data, session, model_inputs, input_width, input_height) # 将检测后的图像保存到文件 cv2.imwrite("output_image.jpg", result_image) # 在窗口中显示检测后的图像 cv2.imshow('Output', result_image) # 等待用户按键,然后关闭显示窗口 cv2.waitKey(0) elif mode == 2: # 打开摄像头 cap = cv2.VideoCapture() # 0表示默认摄像头,如果有多个摄像头可以尝试使用1、2等 # 检查摄像头是否成功打开 if not cap.isOpened(): print("Error: Could not open camera.") exit() # 初始化帧数计数器和起始时间 frame_count = 0 start_time = time.time() # 循环读取摄像头视频流 while True: # 读取一帧 ret, frame = cap.read() # 检查帧是否成功读取 if not ret: print("Error: Could not read frame.") break # 使用检测模型对读入的帧进行对象检测 output_image = detect_object(frame, session, model_inputs, input_width, input_height) # 计算帧速率 frame_count += 1 end_time = time.time() elapsed_time = end_time - start_time fps = frame_count / elapsed_time print(f"FPS: {fps:.2f}") # 将FPS绘制在图像上 cv2.putText(output_image, f"FPS: {fps:.2f}", (10, 30), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 2, cv2.LINE_AA) # 在窗口中显示当前帧 cv2.imshow("Video", output_image) # 按下 'q' 键退出循环 if cv2.waitKey(1) & 0xFF == ord('q'): break # 释放摄像头资源 cap.release() # 关闭窗口 cv2.destroyAllWindows() elif mode == 3: # 输入视频路径 input_video_path = 'kun.mp4' # 输出视频路径 output_video_path = 'kun_det.mp4' # 打开视频文件 cap = cv2.VideoCapture(input_video_path) # 检查视频是否成功打开 if not cap.isOpened(): print("Error: Could not open video.") exit() # 读取视频的基本信息 frame_width = int(cap.get(3)) frame_height = int(cap.get(4)) fps = cap.get(cv2.CAP_PROP_FPS) # 定义视频编码器和创建VideoWriter对象 fourcc = cv2.VideoWriter_fourcc(*'mp4v') # 根据文件名后缀使用合适的编码器 out = cv2.VideoWriter(output_video_path, fourcc, fps, (frame_width, frame_height)) # 初始化帧数计数器和起始时间 frame_count = 0 start_time = time.time() while True: ret, frame = cap.read() if not ret: print("Info: End of video file.") break # 对读入的帧进行对象检测 output_image = detect_object(frame, session, model_inputs, input_width, input_height) # 计算并打印帧速率 frame_count += 1 end_time = time.time() elapsed_time = end_time - start_time if elapsed_time > 0: fps = frame_count / elapsed_time print(f"FPS: {fps:.2f}") # 将处理后的帧写入输出视频 out.write(output_image) #(可选)实时显示处理后的视频帧 cv2.imshow("Output Video", output_image) if cv2.waitKey(1) & 0xFF == ord('q'): break # 释放资源 cap.release() out.release() cv2.destroyAllWindows() else: print("输入错误,请检查mode的赋值")

请根据您的需求调整置信度阈值、IOU阈值以及模型和mode的值(1为图片预测;2为摄像头检测; 3为视频检测)。

结果展示

推理完成后,您可以查看处理后的图像,如下所示:

  • 原始图片:

  • 检测后的图片:

请替换为您自己的图像路径来查看效果;或者其他两种模式(摄像头实时检测、视频文件检测)进行尝试。

总结

通过以上步骤,我们展示了如何使用YOLOv8进行目标检测的完整流程,从环境配置到代码实现和结果展示。此过程适用于YOLOv8目标检测任意模型进行检测任务。


希望这篇博客能够帮助您理解和实现基于YOLOv8的目标检测项目。如果有任何问题或需要进一步的帮助,请随时留言讨论。

C# 经典排序算法大全

Excerpt

文章浏览阅读84次。C# 经典排序算法大全选择排序using System;using System.Collections.Generic;using System.Linq;using System.Text;namespace sorter{ public class SelectionSorter { private int min; …_c# case复杂排序


C# 经典排序算法大全

选择排序

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
using System;

using System.Collections.Generic;

using System.Linq;

using System.Text;

namespace sorter

{

public class SelectionSorter

{

private int min;

public void Sort(int[] arr)

{

for (int i = 0; i < arr.Length - 1; ++i)

{

min = i;

for (int j = i + 1; j < arr.Length; ++j)

{

if (arr[j] < arr[min])

{

min = j;

}

}

int t = arr[min];

arr[min] = arr[i];

arr[i] = t;

}

}

}

class Program

{

static void Main(string[] args)

{

int[] arrInt = new int[] { 4, 2, 7, 1, 8, 3, 9, 0, 5, 6 };

SelectionSorter selSor = new SelectionSorter();

selSor.Sort(arrInt);

foreach (int i in arrInt)

{

Console.WriteLine(i);

}

Console.ReadKey();

}

}

}

冒泡排序

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
using System;

using System.Collections.Generic;

using System.Linq;

using System.Text;

namespace sorter

{

public class EbullitionSorter

{

public void Sort(int[] arr)

{

int i, j, temp;

bool done = false;

j = 1;

while ((j < arr.Length) && (!done))

{

done = true;

for (i = 0; i < arr.Length - j; i++)

{

if (arr[i] > arr[i + 1])

{

done = false;

temp = arr[i];

arr[i] = arr[i + 1];

arr[i + 1] = temp;

}

}

j++;

}

}

}

class Program

{

static void Main(string[] args)

{

int[] arrInt = new int[] { 4, 2, 7, 1, 8, 3, 9, 0, 5, 6 };

EbullitionSorter selSor = new EbullitionSorter();

selSor.Sort(arrInt);

foreach (int i in arrInt)

{

Console.WriteLine(i);

}

Console.ReadKey();

}

}

}

高速排序

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
using System;

using System.Collections.Generic;

using System.Linq;

using System.Text;

namespace sorter

{

public class QuickSorter

{

private void swap(ref int l, ref int r)

{

int temp;

temp = l;

l = r;

r = temp;

}

public void Sort(int[] list, int low, int high)

{

int pivot;

int l, r;

int mid;

if (high <= low)

{

return;

}

else if (high == low + 1)

{

if (list[low] > list[high])

{

swap(ref list[low], ref list[high]);

}

return;

}

mid = (low + high) >> 1;

pivot = list[mid];

swap(ref list[low], ref list[mid]);

l = low + 1;

r = high;

do

{

while (l <= r && list[l] < pivot)

{

l++;

}

while (list[r] >= pivot)

{

r--;

}

if (l < r)

{

swap(ref list[l], ref list[r]);

}

} while (l < r);

list[low] = list[r];

list[r] = pivot;

if (low + 1 < r)

{

Sort(list, low, r - 1);

}

if (r + 1 < high)

{

Sort(list, r + 1, high);

}

}

}

class Program

{

static void Main(string[] args)

{

int[] arrInt = new int[] { 4, 2, 7, 1, 8, 3, 9, 0, 5, 6 };

QuickSorter selSor = new QuickSorter();

selSor.Sort(arrInt, 0, arrInt.Length - 1);

foreach (int i in arrInt)

{

Console.WriteLine(i);

}

Console.ReadKey();

}

}

}

插入排序

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
using System;

using System.Collections.Generic;

using System.Linq;

using System.Text;

namespace sorter

{

public class InsertionSorter

{

public void Sort(int[] arr)

{

for (int i = 1; i < arr.Length; i++)

{

int t = arr[i];

int j = i;

while ((j > 0) && (arr[j - 1] > t))

{

arr[j] = arr[j - 1];

--j;

}

arr[j] = t;

}

}

}

class Program

{

static void Main(string[] args)

{

int[] arrInt = new int[] { 4, 2, 7, 1, 8, 3, 9, 0, 5, 6 };

InsertionSorter selSor = new InsertionSorter();

selSor.Sort(arrInt);

foreach (int i in arrInt)

{

Console.WriteLine(i);

}

Console.ReadKey();

}

}

}

希尔排序

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
using System;

using System.Collections.Generic;

using System.Linq;

using System.Text;

namespace sorter

{

public class ShellSorter

{

public void Sort(int[] arr)

{

int inc;

for (inc = 1; inc <= arr.Length / 9; inc = 3 * inc + 1) ;

for (; inc > 0; inc /= 3)

{

for (int i = inc + 1; i <= arr.Length; i += inc)

{

int t = arr[i - 1];

int j = i;

while ((j > inc) && (arr[j - inc - 1] > t))

{

arr[j - 1] = arr[j - inc - 1];

j -= inc;

}

arr[j - 1] = t;

}

}

}

}

class Program

{

static void Main(string[] args)

{

int[] arrInt = new int[] { 4, 2, 7, 1, 8, 3, 9, 0, 5, 6 };

ShellSorter selSor = new ShellSorter();

selSor.Sort(arrInt);

foreach (int i in arrInt)

{

Console.WriteLine(i);

}

Console.ReadKey();

}

}

}

归并排序

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
using System;

using System.Collections.Generic;

using System.Linq;

using System.Text;

namespace Merge

{

public class Function

{

private int Groups;

private int CopyGroups;

private int mergerows;

private int[] Array27;

private static Random ran = new Random();

public Function(int length)

{

Array27 = new int[length];

for (int i = 0; i < length; i++)

Array27[i] = ran.Next(1, 100);

}

public void ToMergeSort()

{

MergeSort(Array27);

}

public void ToRecursiveMergeSort()

{

RecursiveMergeSort(Array27, 0, Array27.Length - 1);

}

public void ToNaturalMergeSort()

{

NaturalMergeSort(Array27);

}

public void RecursiveMergeSort(int[] Array, int left, int right)

{

int middle = (left + right) / 2;

if (left < right)

{

RecursiveMergeSort(Array, left, middle);

RecursiveMergeSort(Array, middle + 1, right);

MergeOne(Array, left, middle, right);

}

}

public void MergeOne(int[] Array, int left, int middle, int right)

{

int leftindex = left;

int rightindex = middle + 1;

int[] merge = new int[right + 1];

int index = 0;

while (leftindex <= middle && rightindex <= right)

merge[index++] = (Array[leftindex] - Array[rightindex]) >= 0 ? Array[rightindex++] : Array[leftindex++];

if (leftindex <= middle)

{

for (int i = leftindex; i <= middle; i++)

merge[index++] = Array[i];

}

if (rightindex <= right)

{

for (int i = rightindex; i <= right; i++)

merge[index++] = Array[i];

}

index = 0;

for (int i = left; i <= right; i++)

Array[i] = merge[index++];

}

public void MergeSort(int[] Array)

{

int[] merge = new int[Array.Length];

int P = 0;

while (true)

{

int index = 0;

int ENumb = (int)Math.Pow(2, P);

if (ENumb < Array.Length)

{

while (true)

{

int TorFAndrightindex = index;

if (TorFAndrightindex <= Array.Length - 1)

MergeTwo(Array, merge, index, ENumb);

else

break;

index += 2 * ENumb;

}

}

else

break;

P++;

}

}

public void MergeTwo(int[] Array, int[] merge, int index, int ENumb)

{

int left = index;

int middle = left + ENumb - 1;

if (middle >= Array.Length)

{

middle = index;

}

int mergeindex = index;

int right;

int middleTwo = (index + ENumb - 1) + 1;

right = index + ENumb + ENumb - 1;

if (right >= Array.Length - 1)

{

right = Array.Length - 1;

}

while (left <= middle && middleTwo <= right)

{

merge[mergeindex++] = Array[left] >= Array[middleTwo] ? Array[middleTwo++] : Array[left++];

}

if (left <= middle)

{

while (left <= middle && mergeindex < merge.Length)

merge[mergeindex++] = Array[left++];

}

if (middleTwo <= right)

{

while (middleTwo <= right)

merge[mergeindex++] = Array[middleTwo++];

}

if (right + 1 >= Array.Length)

Copy(Array, merge);

}

public void NaturalMergeSort(int[] Array)

{

int[,] PointsSymbol = LinearPoints(Array);

if (PointsSymbol[0, 1] == Array.Length - 1)

return;

else

NaturalMerge(Array, PointsSymbol);

}

public void NaturalMerge(int[] Array, int[,] PointsSymbol)

{

int left;

int right;

int leftend;

int rightend;

mergerows = GNumberTwo(Groups);

CopyGroups = Groups;

int[] TempArray = new int[Array.Length];

while (true)

{

int[,] TempPointsSymbol = new int[mergerows, 2];

int row = 0;

do

{

if (row != CopyGroups - 1)

{

left = PointsSymbol[row, 0];

leftend = PointsSymbol[row, 1];

right = PointsSymbol[row + 1, 0];

rightend = PointsSymbol[row + 1, 1];

MergeThree(Array, TempArray, left, leftend, right, rightend);

MergePointSymbol(PointsSymbol, TempPointsSymbol, row);

}

else

{

默认剩下的单独一个子数组已经虚拟合并。然后Copy进TempArray。

int TempRow = PointsSymbol[row, 0];

int TempCol = PointsSymbol[row, 1];

while (TempRow <= TempCol)

TempArray[TempRow] = Array[TempRow++];

TempPointsSymbol[row / 2, 0] = PointsSymbol[row, 0];

TempPointsSymbol[row / 2, 1] = PointsSymbol[row, 1];

break;

}

row += 2;

if (TempPointsSymbol[0, 1] == Array.Length - 1)

break;

}

while (row <= CopyGroups - 1);

Copy(Array, TempArray);

UpdatePointSymbol(PointsSymbol, TempPointsSymbol, row);

mergerows = GNumber(mergerows);

CopyGroups = GNumberTwo(CopyGroups);

if (PointsSymbol[0, 1] == Array.Length - 1)

break;

}

}

public int GNumber(int Value)

{

if (Value % 2 == 0)

Value /= 2;

else

Value -= 1;

return Value;

}

public int GNumberTwo(int Value)

{

if (Value % 2 == 0)

mergerows = Value / 2;

else

mergerows = Value / 2 + 1;

return mergerows;

}

public void MergeThree(int[] Array, int[] Temp, int left, int leftend, int right, int rightend)

{

int index = left;

while (left <= leftend && right <= rightend)

Temp[index++] = Array[left] >= Array[right] ? Array[right++] : Array[left++];

while (left <= leftend)

Temp[index++] = Array[left++];

while (right <= rightend)

Temp[index++] = Array[right++];

}

public void MergePointSymbol(int[,] PointsSymbol, int[,] TempPointsSymbol, int row)

{

int rowindex = row / 2;

TempPointsSymbol[rowindex, 0] = PointsSymbol[row, 0];

TempPointsSymbol[rowindex, 1] = PointsSymbol[row + 1, 1];

}

public void UpdatePointSymbol(int[,] PointsSymbol, int[,] TempPointsSymbol, int rows)

{

int row = 0;

for (; row < TempPointsSymbol.GetLength(0); row++)

{

for (int col = 0; col < 2; col++)

PointsSymbol[row, col] = TempPointsSymbol[row, col];

}

for (; row < PointsSymbol.GetLength(0); row++)

{

for (int col2 = 0; col2 < 2; col2++)

PointsSymbol[row, col2] = 0;

}

补剩下的index组,

// int row3 = TempPointsSymbol.GetLength(0); // PointsSymbol[row3, 0] = PointsSymbol[rows, 0]; // PointsSymbol[row3, 1] = PointsSymbol[rows, 1]; // //后面的清零 // for (int row4 = row3 + 1; row4 < PointsSymbol.GetLength(0); row4++) // { // for (int col4 = 0; col4 < 2; col4++) // PointsSymbol[row4, col4] = 0; // } //} } public int[,] LinearPoints(int[] Array) { Groups = 1; int StartPoint = 0; int row = 0; int col = 0; //最糟糕的情况就是有Array.Length行。 int[,] PointsSet = new int[Array.Length, 2]; //线性扫描Array,划分数组 //初始前index=0 PointsSet[row, col] = 0; do { //推断升序子数组终于的index开关 bool Judge = false; //从Array第二个数推断是否要结束或者是否是升序子数组. while (++StartPoint < Array.Length && Array[StartPoint] < Array[StartPoint - 1]) { //打开第一个升序子数组结束的index开关 Judge = true; //又一次開始第二个升序子数组的前index PointsSet[row, col + 1] = StartPoint - 1; //计算子数组个数 Groups++; //换行记录自然子数组的index row++; break; //–StartPoint; } //升序子数组结束index if (Judge) PointsSet[row, col] = StartPoint; //else // –StartPoint; } while (StartPoint < Array.Length); //终于index=StartPoint - 1,可是糟糕情况下还有剩余若干行为: 0,0 … PointsSet[row, col + 1] = StartPoint - 1; //调用展示方法 DisplaySubarray(Array, PointsSet, Groups); return PointsSet; } public void DisplaySubarray(int[] Array, int[,] PointsSet, int Groups) { Console.WriteLine(“Subarray is {0}:”, Groups); //展示子数组的前后index for (int r = 0; r < Groups; r++) { for (int c = 0; c < PointsSet.GetLength(1); c++) { Console.Write(PointsSet[r, c]); if (c < PointsSet.GetLength(1) - 1) Console.Write(“,”); } Console.Write(“\t\t”); } Console.WriteLine(); //展示分出的子数组 for (int v = 0; v < Groups; v++) { int i = 1; for (int r = PointsSet[v, 0]; r <= PointsSet[v, 1]; r++) { Console.Write(Array[r] + “ “); i++; } if (i <= 3) Console.Write(“\t\t”); else Console.Write(“\t”); if (PointsSet[v, 1] == Array.Length) break; } } public void Copy(int[] Array, int[] merge) { //一部分排好序的元素替换掉原来Array中的元素 for (int i = 0; i < Array.Length; i++) { Array[i] = merge[i]; } } //输出 public override string ToString() { string temporary = string.Empty; foreach (var element in Array27) temporary += element + “ “; temporary += “\n”; return temporary; } } class Program { static void Main(string[] args) { while (true) { Console.WriteLine(“请选择:”); Console.WriteLine(“1.归并排序(非递归)”); Console.WriteLine(“2.归并排序(递归)”); Console.WriteLine(“3.归并排序(自然合并)”); Console.WriteLine(“4.退出”); int Arraynum = Convert.ToInt32(Console.ReadLine()); switch (Arraynum) { case 4: Environment.Exit(0); break; case 1: Console.WriteLine(“Please Input Array Length”); int Leng271 = Convert.ToInt32(Console.ReadLine()); Function obj1 = new Function(Leng271); Console.WriteLine(“The original sequence:”); Console.WriteLine(obj1); Console.WriteLine(“‘MergeSort’ Finaly Sorting Result:”); obj1.ToMergeSort(); Console.WriteLine(obj1); break; case 2: Console.WriteLine(“Please Input Array Length”); int Leng272 = Convert.ToInt32(Console.ReadLine()); Function obj2 = new Function(Leng272); Console.WriteLine(“The original sequence:”); Console.WriteLine(obj2); Console.WriteLine(“‘RecursiveMergeSort’ Finaly Sorting Result:”); obj2.ToRecursiveMergeSort(); Console.WriteLine(obj2); break; case 3: Console.WriteLine(“Please Input Array Length”); int Leng273 = Convert.ToInt32(Console.ReadLine()); Function obj3 = new Function(Leng273); Console.WriteLine(“The original sequence:”); Console.WriteLine(obj3); obj3.ToNaturalMergeSort(); Console.WriteLine(); Console.WriteLine(); Console.WriteLine(“‘NaturalMergeSort’ Finaly Sorting Result:”); Console.WriteLine(obj3); break; } } } } }

基数排序

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
using System;

using System.Collections.Generic;

using System.Linq;

using System.Text;

namespace Merge

{

public class RadixSorter

{

public int[] RadixSort(int[] ArrayToSort, int digit)

{

for (int k = 1; k <= digit; k++)

{

int[] tmpArray = new int[ArrayToSort.Length];

int[] tmpCountingSortArray = new int[10] { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };

for (int i = 0; i < ArrayToSort.Length; i++)

{

int tmpSplitDigit = ArrayToSort[i] / (int)Math.Pow(10, k - 1) - (ArrayToSort[i] / (int)Math.Pow(10, k)) * 10;

tmpCountingSortArray[tmpSplitDigit] += 1;

}

for (int m = 1; m < 10; m++)

{

tmpCountingSortArray[m] += tmpCountingSortArray[m -

1];

}

for (int n = ArrayToSort.Length - 1; n >= 0; n--)

{

int tmpSplitDigit = ArrayToSort[n] / (int)Math.Pow(10, k - 1) -

(ArrayToSort[n] / (int)Math.Pow(10, k)) * 10;

tmpArray[tmpCountingSortArray[tmpSplitDigit] - 1] = ArrayToSort

[n];

tmpCountingSortArray[tmpSplitDigit] -= 1;

}

for (int p = 0; p < ArrayToSort.Length; p++)

{

ArrayToSort[p] = tmpArray[p];

}

}

return ArrayToSort;

}

}

class Program

{

static void Main(string[] args)

{

int[] intArray = new int[] { 5, 3, 7, 4, 8, 2, 9, 1, 0, 6 };

int[] newIntArray = intArray;

RadixSorter rS=new RadixSorter();

newIntArray = rS.RadixSort(intArray, intArray.Length);

foreach (int i in intArray)

{

Console.Write(i + " ");

}

Console.ReadKey();

}

}

}

计数排序

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
using System;

using System.Collections.Generic;

using System.Linq;

using System.Text;

namespace Merge

{

class Program

{

/// 要求: /// arrayToSort的元素必须大于等于0。或者经过一定的转换使其元素在 /// 大于等于0范围内。比如有例如以下序列(-1,-8,10,11),那么依据最小值8, /// 将各个数字加8转化为(7,0,18,19),然后进行计数排序。结果为(0,7,18,19), /// 然后再将结果个数字减8即为(-8,-1,10,11) /// /// 要排序的数组 /// 数组的最大值加一 /// 计数排序后的结果 public static int[] CountingSort(int[] arrayToSort, int k) { // 排序后的结果存储 int[] sortedArray = new int[arrayToSort.Length]; // 计数数组 int[] countingArray = new int[k]; // 计数数组初始化 for (int i = 0; i < countingArray.Length; i++) { countingArray[i] = 0; } // 单个元素计数(经过该步骤countingArray[i]的含义为数字i的个数为countingArray[i]) for (int i = 0; i < arrayToSort.Length; i++) { countingArray[arrayToSort[i]] = countingArray[arrayToSort[i]] + 1; } // 计算小于等于某数的个数(经过该步骤countingArray[i]的含义为小于等于数字i的元素个数为countingArray[i]) for (int i = 1; i < countingArray.Length; i++) { countingArray[i] += countingArray[i - 1]; } // 得到排序后的结果 for (int i = 0; i < sortedArray.Length; i++) { int numIndex = countingArray[arrayToSort[i]] - 1; sortedArray[numIndex] = arrayToSort[i]; countingArray[arrayToSort[i]] = countingArray[arrayToSort[i]] - 1; } return sortedArray; } static void Main(string[] args) { int[] intArray = new int[] { 5, 3, 7, 4, 8, 2, 9, 1, 0, 6 }; int[] intNewArray = intArray; intNewArray = CountingSort(intArray, intArray.Length); foreach (int i in intNewArray) { Console.Write(i + “ “); } Console.ReadKey(); } } }

堆排序

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
using System;

using System.Collections.Generic;

using System.Linq;

using System.Text;

namespace Merge

{

class Program

{

private static void HeapSortFunction(int[] array)

{

try

{

BuildMaxHeap(array);

for (int i = array.Length - 1; i > 0; i--)

{

Swap(ref array[0], ref array[i]);

MaxHeapify(array, 0, i);

}

}

catch (Exception ex)

{

Console.Write(ex.Message);

}

}

private static void BuildMaxHeap(int[] array)

{

try

{

for (int i = array.Length / 2 - 1; i >= 0; i--)

{

MaxHeapify(array, i, array.Length);

}

}

catch (Exception ex)

{

Console.Write(ex.Message);

}

}

private static void MaxHeapify(int[] array, int currentIndex, int heapSize)

{

try

{

int left = 2 * currentIndex + 1;

int right = 2 * currentIndex + 2;

int large = currentIndex;

if (left < heapSize && array[left] > array[large])

{

large = left;

}

if (right < heapSize && array[right] > array[large])

{

large = right;

}

if (currentIndex != large)

{

Swap(ref array[currentIndex], ref array[large]);

MaxHeapify(array, large, heapSize);

}

}

catch (Exception ex)

{

Console.Write(ex.Message);

}

}

private static void Swap(ref int a, ref int b)

{

int temp = 0;

temp = a;

a = b;

b = temp;

}

static void Main(string[] args)

{

int[] intArray = new int[] { 5, 3, 7, 4, 8, 2, 9, 1, 0, 6 };

HeapSortFunction(intArray);

foreach (int i in intArray)

{

Console.Write(i + " ");

}

Console.ReadKey();

}

}

}

排序的分类/稳定性/时间复杂度和空间复杂度总结

版权声明:本文博客原创文章。博客,未经同意,不得转载。

冒泡排序算法(C#实现) - Eric Sun - 博客园

Excerpt

简单的冒泡排序算法,代码如下://冒泡排序(从数组的起始位置开始遍历,以大数为基准:大的数向下沉一位)privatestaticvoid BubbleSortFunction(int[] array) { try { int length = array.Length; int temp; bool


简单的冒泡排序算法,代码如下:

1
<span>//</span><span>冒泡排序(从数组的起始位置开始遍历,以大数为基准:大的数向下沉一位)</span><span><br></span><span>private</span><span>static</span><span>void</span><span> BubbleSortFunction(</span><span>int</span><span>[] array)<br>        {<br>            </span><span>try</span><span><br>            {<br>                </span><span>int</span><span> length </span><span>=</span><span> array.Length;<br>                </span><span>int</span><span> temp;<br>                </span><span>bool</span><span> hasExchangeAction; </span><span>//</span><span>记录此次大循环中相邻的两个数是否发生过互换(如果没有互换,则数组已经是有序的)</span><span><br></span><span><br>                </span><span>for</span><span> (</span><span>int</span><span> i </span><span>=</span><span>0</span><span>; i </span><span>&lt;</span><span> length </span><span>-</span><span>1</span><span>; i</span><span>++</span><span>)    </span><span>//</span><span>数组有N个数,那么用N-1次大循环就可以排完</span><span><br></span><span>                {<br>                    hasExchangeAction </span><span>=</span><span>false</span><span>;  </span><span>//</span><span>每次大循环都假设数组有序</span><span><br></span><span><br>                    </span><span>for</span><span> (</span><span>int</span><span> j </span><span>=</span><span>0</span><span>; j </span><span>&lt;</span><span> length </span><span>-</span><span> i </span><span>-</span><span>1</span><span>; j</span><span>++</span><span>)    </span><span>//</span><span>从数组下标0处开始遍历,(length - i - 1 是刨除已经排好的大数)</span><span><br></span><span>                    {<br>                        </span><span>if</span><span> (array[j] </span><span>&gt;</span><span> array[j </span><span>+</span><span>1</span><span>])    </span><span>//</span><span>相邻两个数进行比较,如果前面的数大于后面的数,则将这相邻的两个数进行互换</span><span><br></span><span>                        {<br>                            temp </span><span>=</span><span> array[j];<br>                            array[j] </span><span>=</span><span> array[j </span><span>+</span><span>1</span><span>];<br>                            array[j </span><span>+</span><span>1</span><span>] </span><span>=</span><span> temp;<br>                            hasExchangeAction </span><span>=</span><span>true</span><span>;   </span><span>//</span><span>发生过互换</span><span><br></span><span>                        }<br>                    }<br><br>                    </span><span>if</span><span> (</span><span>!</span><span>hasExchangeAction) </span><span>//</span><span>如果没有发生过互换,则数组已经是有序的了,跳出循环</span><span><br></span><span>                    {<br>                        </span><span>break</span><span>;<br>                    }<br>                }<br>            }<br>            </span><span>catch</span><span> (Exception ex)<br>            { }<br>        }</span>

。。。。。

posted @ 2011-08-17 16:02  Eric Sun  阅读(7637)  评论()  编辑  收藏  举报

Qt配置onnx_runtime

首先,onnx_runtime官方也给编译好的release版本,下载即可。但是在qt中配置有一个坑。
在这里插入图片描述
在Qt Creator中正常添加外部库,但是你会发现构建会找不到onnxruntime.lib,这是如果你替换成全路径,即把注释的部分换成下面的lib路径,直接指明onnxruntime.lib。这时构建成功,可以include <onnxruntime_cxx_api.h>,但是在运行你会遇到应用程序无法启动。
在这里插入图片描述
根据百度把onnxruntime.dll复制到.exe目录下。

OK,启动成功。

opencv读取视频流

居中显示,随意拉伸。
在这里插入图片描述
在这里插入图片描述
实现居中的逻辑:

1
// 调整QImage的大小以匹配QLabel的大小 QPixmap scaledPixmap = QPixmap::fromImage(qimg).scaled(ui->Origin_Video->size(), Qt::KeepAspectRatio, Qt::FastTransformation);

而在界面当中需要对窗口随意拉伸,这是就需要界面允许缩放。修改QLabel的属性:
在这里插入图片描述
修改成minimum,并给定最小宽度和高度。(还不知道原因,等有空学习一下)

最后opencv读取视频流并拉取每一帧显示在QLabel中,这里采用的是用一个Qtimer,定时去获取视频帧。

1
// 创建定时器,每隔一定时间显示下一帧 timer = new QTimer(this); connect(timer, &QTimer::timeout, this, &MainWindow::showNextFrame); timer->start(33); // 设置帧率为30FPS,即每隔33毫秒显示一帧

完整代码如下:

1
// 在槽函数中处理视频的加载和显示 void MainWindow::on_actionvideo_triggered() { camera->stop(); viewfinder->close(); QString curPath = QDir::homePath(); QString dlgTitle = "选择视频文件"; QString filter = "视频文件(*.wmv *.mp4);;所有文件(*.*)"; QString aFile = QFileDialog::getOpenFileName(this, dlgTitle, curPath, filter); if (aFile.isEmpty()) { return; } ui->dir_Edit->setText(aFile); currentSource = File; // 更新当前视频源为视频文件 displayVideo(); // 显示视频 } // 根据当前视频源显示视频的函数 void MainWindow::displayVideo() { if (currentSource == File) { std::string video_path = ui->dir_Edit->text().toLocal8Bit().constData(); cap.open(video_path); if (!cap.isOpened()) { qDebug() << "Error: Unable to open the video file"; return; } // 创建定时器,每隔一定时间显示下一帧 timer = new QTimer(this); connect(timer, &QTimer::timeout, this, &MainWindow::showNextFrame); timer->start(33); // 设置帧率为30FPS,即每隔33毫秒显示一帧 } else if (currentSource == Camera) { // 创建定时器,每隔一定时间显示下一帧 timer = new QTimer(this); connect(timer, &QTimer::timeout, this, &MainWindow::viewfinderchange); timer->start(33); // 设置帧率为30FPS,即每隔33毫秒显示一帧 // cameras = QCameraInfo::availableCameras(); //获取所有相机的列表 // camera = new QCamera(cameras[0]); //camera指向指定的摄像头 camera->setCaptureMode(QCamera::CaptureStillImage); //设定捕获模式 camera->setViewfinder(viewfinder); //设置取景器 camera->start(); } } // 显示下一帧的槽函数 void MainWindow::showNextFrame() { cv::Mat frame; cap >> frame; // 从视频流中获取一帧 if (frame.empty()) { cap.set(cv::CAP_PROP_POS_FRAMES, 0); // 如果视频结束,重新开始播放 cap >> frame; } currentFrame = frame; // 保存当前帧 displayCurrentFrame(); // 显示当前帧 } void MainWindow::displayCurrentFrame() { // 将OpenCV帧转换为QImage QImage qimg(currentFrame.data, currentFrame.cols, currentFrame.rows, currentFrame.step, QImage::Format_RGB888); qimg = qimg.rgbSwapped(); // 将格式从BGR转换为RGB // 调整QImage的大小以匹配QLabel的大小 QPixmap scaledPixmap = QPixmap::fromImage(qimg).scaled(ui->Origin_Video->size(), Qt::KeepAspectRatio, Qt::FastTransformation); // 将调整大小后的图像居中显示在QLabel中 centerImageInLabel(ui->Origin_Video, scaledPixmap); }

QCamra

居中显示,随意拉伸
在这里插入图片描述
QCamera其实同理,中间拉伸也用了一个QTimer定时获取QLabel的size。
QCamera的使用包括初始化一个camera和设置取景器viewfinder,viewfinder的作用就是控制图像在空间中的展示。

1
void MainWindow::on_actioncamera_triggered() { cameras = QCameraInfo::availableCameras(); //获取所有相机的列表 //qDebug() << "this is camera: "; if (cameras.count() > 0) { for(const QCameraInfo &cameraInfo:cameras) { qDebug() << cameraInfo.description(); } camera = new QCamera(cameras.at(0)); //初始化实例化一个相机对象 } //设置取景器 viewfinder = new QCameraViewfinder(ui->Origin_Video); camera->setViewfinder(viewfinder); centerCameraViewfinderInLabel(viewfinder, ui->Origin_Video); camera->start(); //开启相机 //设置默认摄像头参数 QCameraViewfinderSettings set; // set.setResolution(640, 480); //设置显示分辨率 set.setMaximumFrameRate(30); //设置帧率 camera->setViewfinderSettings(set); stopVideo(); ui->Origin_Video->setPixmap(QPixmap("")); currentSource = Camera; // 更新当前视频源为摄像头 viewfinder->show(); displayVideo(); // 显示视频 }

yolov8 onnx 推理

1
void MainWindow::on_actionTest_triggered() { // std::string projectBasePath = "./"; // Set your ultralytics base path QString qs = QCoreApplication::applicationDirPath(); std::string projectBasePath = qs.toLocal8Bit().constData(); bool runOnGPU = false; // Note that in this example the classes are hard-coded and 'classes.txt' is a place holder. Inference inf(projectBasePath + "/moust_best.onnx", cv::Size(640, 640), "mouse.txt", runOnGPU); std::string video_path = ui->dir_Edit->text().toLocal8Bit().constData(); // 读取视频文件 // cv::VideoCapture cap(projectBasePath + "/video/video.mp4"); cv::VideoCapture cap(video_path); if (!cap.isOpened()) { std::cout << "Error opening video file" << std::endl; return ; } cv::Mat frame; while (cap.read(frame)) { // 推断开始... std::vector<Detection> output = inf.runInference(frame); int detections = output.size(); std::cout << "Number of detections:" << detections << std::endl; for (int i = 0; i < detections; ++i) { Detection detection = output[i]; cv::Rect box = detection.box; cv::Scalar color = detection.color; // Detection box cv::rectangle(frame, box, color, 2); // Detection box text std::string classString = detection.className + ' ' + std::to_string(detection.confidence).substr(0, 4); cv::Size textSize = cv::getTextSize(classString, cv::FONT_HERSHEY_DUPLEX, 1, 2, 0); cv::Rect textBox(box.x, box.y - 40, textSize.width + 10, textSize.height + 20); cv::rectangle(frame, textBox, color, cv::FILLED); cv::putText(frame, classString, cv::Point(box.x + 5, box.y - 10), cv::FONT_HERSHEY_DUPLEX, 1, cv::Scalar(0, 0, 0), 2, 0); } // 推断结束... // 仅用于预览 float scale = 0.8; cv::resize(frame, frame, cv::Size(frame.cols*scale, frame.rows*scale)); cv::imshow("Inference", frame); if (cv::waitKey(1) == 27) { break; } } cap.release(); cv::destroyAllWindows(); }

多线程(onnx推理线程和界面主线程)

在这里插入图片描述
摄像头与onnx互不干扰,说明主界面线程与onnx推理是分开线程进行的,ok!

######################### 2024 05 09 更新 ##############################################