@@ -636,9 +636,13 @@ async fn search_partial_hits_phase_with_scroll(
636
636
/// metadata count.
637
637
///
638
638
/// This is done by exclusion, so we will need to keep it up to date if fields are added.
639
- pub fn is_metadata_count_request ( request : & SearchRequest ) -> bool {
639
+ pub fn is_metadata_count_request ( request : & SearchRequest , split : & SplitMetadata ) -> bool {
640
640
let query_ast: QueryAst = serde_json:: from_str ( & request. query_ast ) . unwrap ( ) ;
641
- is_metadata_count_request_with_ast ( & query_ast, request)
641
+
642
+ let start_time = split. time_range . as_ref ( ) . map ( |x| x. start ( ) ) . copied ( ) ;
643
+ let end_time = split. time_range . as_ref ( ) . map ( |x| x. end ( ) ) . copied ( ) ;
644
+
645
+ is_metadata_count_request_with_ast ( & query_ast, request, start_time, end_time)
642
646
}
643
647
644
648
/// Check if the request is a count request without any filters, so we can just return the split
@@ -647,42 +651,53 @@ pub fn is_metadata_count_request(request: &SearchRequest) -> bool {
647
651
/// This is done by exclusion, so we will need to keep it up to date if fields are added.
648
652
///
649
653
/// The passed query_ast should match the serialized on in request.
650
- pub fn is_metadata_count_request_with_ast ( query_ast : & QueryAst , request : & SearchRequest ) -> bool {
654
+ pub fn is_metadata_count_request_with_ast (
655
+ query_ast : & QueryAst ,
656
+ request : & SearchRequest ,
657
+ split_start_timestamp : Option < i64 > ,
658
+ split_end_timestamp : Option < i64 > ,
659
+ ) -> bool {
651
660
if query_ast != & QueryAst :: MatchAll {
652
661
return false ;
653
662
}
654
663
if request. max_hits != 0 {
655
664
return false ;
656
665
}
657
666
658
- // If the start and end timestamp encompass the whole split, it is still a count query.
659
- // We remove this currently on the leaf level, but not yet on the root level.
660
- // There's a small advantage when we would do this on the root level, since we have the
661
- // counts available on the split. On the leaf it is currently required to open the split
662
- // to get the count.
663
- if request . start_timestamp . is_some ( ) || request . end_timestamp . is_some ( ) {
664
- return false ;
667
+ if let Some ( request_start_timestamp ) = request . start_timestamp {
668
+ let Some ( split_start_timestamp ) = split_start_timestamp else {
669
+ return false ;
670
+ } ;
671
+ if split_start_timestamp < request_start_timestamp {
672
+ return false ;
673
+ }
665
674
}
675
+ if let Some ( request_end_timestamp) = request. end_timestamp {
676
+ let Some ( split_end_timestamp) = split_end_timestamp else {
677
+ return false ;
678
+ } ;
679
+ if split_end_timestamp >= request_end_timestamp {
680
+ return false ;
681
+ }
682
+ }
683
+
666
684
if request. aggregation_request . is_some ( ) || !request. snippet_fields . is_empty ( ) {
667
685
return false ;
668
686
}
669
687
true
670
688
}
671
689
672
690
/// Get a leaf search response that returns the num_docs of the split
673
- pub fn get_count_from_metadata ( split_metadatas : & [ SplitMetadata ] ) -> Vec < LeafSearchResponse > {
674
- split_metadatas
675
- . iter ( )
676
- . map ( |metadata| LeafSearchResponse {
677
- num_hits : metadata. num_docs as u64 ,
678
- partial_hits : Vec :: new ( ) ,
679
- failed_splits : Vec :: new ( ) ,
680
- num_attempted_splits : 1 ,
681
- num_successful_splits : 1 ,
682
- intermediate_aggregation_result : None ,
683
- resource_stats : None ,
684
- } )
685
- . collect ( )
691
+ pub fn get_count_from_metadata ( metadata : & SplitMetadata ) -> LeafSearchResponse {
692
+ LeafSearchResponse {
693
+ num_hits : metadata. num_docs as u64 ,
694
+ partial_hits : Vec :: new ( ) ,
695
+ failed_splits : Vec :: new ( ) ,
696
+ num_attempted_splits : 1 ,
697
+ num_successful_splits : 1 ,
698
+ intermediate_aggregation_result : None ,
699
+ resource_stats : None ,
700
+ }
686
701
}
687
702
688
703
/// Returns true if the query is particularly memory intensive.
@@ -730,26 +745,31 @@ pub(crate) async fn search_partial_hits_phase(
730
745
split_metadatas : & [ SplitMetadata ] ,
731
746
cluster_client : & ClusterClient ,
732
747
) -> crate :: Result < LeafSearchResponse > {
733
- let leaf_search_responses: Vec < LeafSearchResponse > =
734
- if is_metadata_count_request ( search_request) {
735
- get_count_from_metadata ( split_metadatas)
748
+ let mut leaf_search_responses: Vec < LeafSearchResponse > =
749
+ Vec :: with_capacity ( split_metadatas. len ( ) ) ;
750
+ let mut leaf_search_jobs = Vec :: new ( ) ;
751
+ for split in split_metadatas {
752
+ if is_metadata_count_request ( search_request, split) {
753
+ leaf_search_responses. push ( get_count_from_metadata ( split) ) ;
736
754
} else {
737
- let jobs: Vec < SearchJob > = split_metadatas. iter ( ) . map ( SearchJob :: from) . collect ( ) ;
738
- let assigned_leaf_search_jobs = cluster_client
739
- . search_job_placer
740
- . assign_jobs ( jobs, & HashSet :: default ( ) )
741
- . await ?;
742
- let mut leaf_request_tasks = Vec :: new ( ) ;
743
- for ( client, client_jobs) in assigned_leaf_search_jobs {
744
- let leaf_request = jobs_to_leaf_request (
745
- search_request,
746
- indexes_metas_for_leaf_search,
747
- client_jobs,
748
- ) ?;
749
- leaf_request_tasks. push ( cluster_client. leaf_search ( leaf_request, client. clone ( ) ) ) ;
750
- }
751
- try_join_all ( leaf_request_tasks) . await ?
752
- } ;
755
+ leaf_search_jobs. push ( SearchJob :: from ( split) ) ;
756
+ }
757
+ }
758
+
759
+ if !leaf_search_jobs. is_empty ( ) {
760
+ let assigned_leaf_search_jobs = cluster_client
761
+ . search_job_placer
762
+ . assign_jobs ( leaf_search_jobs, & HashSet :: default ( ) )
763
+ . await ?;
764
+ let mut leaf_request_tasks = Vec :: new ( ) ;
765
+ for ( client, client_jobs) in assigned_leaf_search_jobs {
766
+ let leaf_request =
767
+ jobs_to_leaf_request ( search_request, indexes_metas_for_leaf_search, client_jobs) ?;
768
+ leaf_request_tasks. push ( cluster_client. leaf_search ( leaf_request, client. clone ( ) ) ) ;
769
+ }
770
+ let executed_leaf_search_responses = try_join_all ( leaf_request_tasks) . await ?;
771
+ leaf_search_responses. extend ( executed_leaf_search_responses) ;
772
+ }
753
773
754
774
// Creates a collector which merges responses into one
755
775
let merge_collector =
0 commit comments