12
12
// See the License for the specific language governing permissions and
13
13
// limitations under the License.
14
14
15
- #include < chrono >
15
+ #include " benchmark_runner.hpp "
16
16
17
- #include < mongocxx/instance.hpp>
18
17
#include " bson/bson_encoding.hpp"
19
- #include " microbench.hpp"
20
18
#include " multi_doc/bulk_insert.hpp"
21
19
#include " multi_doc/find_many.hpp"
22
20
#include " multi_doc/gridfs_download.hpp"
26
24
#include " single_doc/run_command.hpp"
27
25
28
26
namespace benchmark {
29
- const std::chrono::duration<int , std::milli> mintime{60000 };
30
- const std::chrono::duration<int , std::milli> maxtime{300000 };
31
27
32
- bool finished_running ( const std::chrono::duration< int , std::milli>& curr_time, std:: uint32_t iter) {
33
- return (curr_time > maxtime || (curr_time > mintime && iter > 100 ));
34
- }
28
+ // The task sizes and iteration numbers come from the Driver Perfomance Benchmarking Reference Doc.
29
+ benchmark_runner::benchmark_runner () {
30
+ using bsoncxx::stdx::make_unique;
35
31
36
- void run_microbench (microbench* bench,
37
- bsoncxx::stdx::string_view input_json_filename = bsoncxx::stdx::string_view()) {
38
- bench->setup (input_json_filename);
32
+ // Bson microbenchmarks
33
+ _microbenches.push_back (make_unique<bson_encoding>(75.31 , " extended_bson/flat_bson.json" ));
34
+ _microbenches.push_back (make_unique<bson_encoding>(19.64 , " extended_bson/deep_bson.json" ));
35
+ _microbenches.push_back (make_unique<bson_encoding>(57.34 , " extended_bson/full_bson.json" ));
36
+ // TODO CXX-1241: Add bson_decoding equivalents.
39
37
40
- for (std::uint32_t iteration = 0 ; !finished_running (bench->get_execution_time (), iteration);
41
- iteration++) {
42
- bench->before_task ();
38
+ // Single doc microbenchmarks
39
+ _microbenches.push_back (make_unique<run_command>());
40
+ _microbenches.push_back (make_unique<find_one_by_id>(" single_and_multi_document/tweet.json" ));
41
+ _microbenches.push_back (
42
+ make_unique<insert_one>(2.75 , 10000 , " single_and_multi_document/small_doc.json" ));
43
+ _microbenches.push_back (
44
+ make_unique<insert_one>(27.31 , 10 , " single_and_multi_document/large_doc.json" ));
43
45
44
- bench->do_task ();
46
+ // Multi doc microbenchmarks
47
+ _microbenches.push_back (make_unique<find_many>(" single_and_multi_document/tweet.json" ));
48
+ _microbenches.push_back (
49
+ make_unique<bulk_insert>(2.75 , 10000 , " single_and_multi_document/small_doc.json" ));
50
+ _microbenches.push_back (
51
+ make_unique<bulk_insert>(27.31 , 10 , " single_and_multi_document/large_doc.json" ));
52
+ _microbenches.push_back (
53
+ make_unique<gridfs_upload>(" single_and_multi_document/gridfs_large.bin" ));
54
+ _microbenches.push_back (
55
+ make_unique<gridfs_download>(" single_and_multi_document/gridfs_large.bin" ));
45
56
46
- bench->after_task ();
47
- }
48
- bench->teardown ();
57
+ // TODO CXX-1378: add parallel microbenchmarks
49
58
}
50
59
51
- // this would run the benchmarks and collect the data.
52
- int main () {
60
+ void benchmark_runner::run_microbenches (benchmark_type tag) {
53
61
mongocxx::instance instance{};
54
62
55
- bson_encoding flat_bson_encode;
56
- run_microbench (&flat_bson_encode, " FLAT_BSON.json" );
57
- bson_encoding deep_bson_encode;
58
- run_microbench (&deep_bson_encode, " DEEP_BSON.json" );
59
- bson_encoding full_bson_encode;
60
- run_microbench (&full_bson_encode, " FULL_BSON.json" );
61
- // TODO CXX-1241: Add bson_decoding equivalents.
63
+ for (std::unique_ptr<microbench>& bench : _microbenches) {
64
+ if (tag == benchmark::benchmark_type::all_benchmarks || bench->has_tag (tag)) {
65
+ bench->run ();
66
+ }
67
+ }
68
+ }
69
+
70
+ double benchmark_runner::calculate_average (benchmark_type tag) {
71
+ std::uint32_t count = 0 ;
72
+ double total = 0.0 ;
73
+ for (std::unique_ptr<microbench>& bench : _microbenches) {
74
+ if (bench->has_tag (tag)) {
75
+ count++;
76
+ total += bench->get_results ().get_score ();
77
+ }
78
+ }
79
+ return total / static_cast <double >(count);
80
+ }
81
+
82
+ double benchmark_runner::calculate_bson_bench_score () {
83
+ return calculate_average (benchmark_type::bson_bench);
84
+ }
85
+
86
+ double benchmark_runner::calculate_single_bench_score () {
87
+ return calculate_average (benchmark_type::single_bench);
88
+ }
89
+
90
+ double benchmark_runner::calculate_multi_bench_score () {
91
+ return calculate_average (benchmark_type::multi_bench);
92
+ }
93
+
94
+ double benchmark_runner::calculate_parallel_bench_score () {
95
+ return calculate_average (benchmark_type::parallel_bench);
96
+ }
97
+
98
+ double benchmark_runner::calculate_read_bench_score () {
99
+ return calculate_average (benchmark_type::read_bench);
100
+ }
101
+
102
+ double benchmark_runner::calculate_write_bench_score () {
103
+ return calculate_average (benchmark_type::write_bench);
104
+ }
62
105
63
- run_command run_command_bench;
64
- run_microbench (&run_command_bench);
65
- find_one_by_id find_one_by_id_bench;
66
- run_microbench (&find_one_by_id_bench, " TWEET.json" );
67
- insert_one small_doc_insert_one (10000 );
68
- run_microbench (&small_doc_insert_one, " SMALL_DOC.json" );
69
- insert_one large_doc_insert_one (10 );
70
- run_microbench (&large_doc_insert_one, " LARGE_DOC.json" );
71
-
72
- find_many find_many_bench;
73
- run_microbench (&find_many_bench, " TWEET.json" );
74
- bulk_insert small_doc_bulk_insert{10000 };
75
- run_microbench (&small_doc_bulk_insert, " SMALL_DOC.json" );
76
- bulk_insert large_doc_bulk_insert{10 };
77
- run_microbench (&large_doc_bulk_insert, " LARGE_DOC.json" );
78
- gridfs_upload gridfs_upload_bench;
79
- run_microbench (&gridfs_upload_bench, " GRIDFS_LARGE.bin" );
80
- gridfs_download gridfs_download_bench;
81
- run_microbench (&gridfs_download_bench, " GRIDFS_LARGE.bin" );
82
-
83
- // get results from the microbenches...
106
+ double benchmark_runner::calculate_driver_bench_score () {
107
+ return (calculate_read_bench_score () + calculate_write_bench_score ()) / 2.0 ;
84
108
}
85
109
}
0 commit comments