idx
int64 2
7.85k
| idx_lca
int64 0
223
| offset
int64 165
54.4k
| repo
stringclasses 54
values | commit_hash
stringclasses 100
values | target_file
stringclasses 118
values | line_type_lca
stringclasses 6
values | ground_truth
stringlengths 1
40
| in_completions
bool 1
class | completion_type
stringclasses 6
values | non_dunder_count_intellij
int64 1
127
| non_dunder_count_jedi
int64 1
128
| start_with_
bool 1
class | first_occurrence
bool 2
classes | intellij_completions
listlengths 1
149
| jedi_completions
listlengths 3
148
| prefix
stringlengths 165
54.4k
| intellij_completions_filtered
listlengths 1
123
| prefix_truncated
stringlengths 165
1.92k
| __index_level_0__
int64 1
5.53k
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
2
| 0
| 1,333
|
ossf__fuzz-introspector
|
4867924b714a7789f94fbcde53713a29ceab7272
|
post-processing/analyses/fuzz_calltree_analysis.py
|
inproject
|
MergedProjectProfile
| true
|
class
| 14
| 19
| false
| true
|
[
"FuzzerProfile",
"List",
"MergedProjectProfile",
"logger",
"None",
"add_func_to_reached_and_clone",
"FunctionProfile",
"load_all_profiles",
"read_fuzzer_data_file_to_profile",
"Any",
"Dict",
"Optional",
"Set",
"Tuple"
] |
[
{
"name": "add_func_to_reached_and_clone",
"type": "function"
},
{
"name": "Any",
"type": "class"
},
{
"name": "copy",
"type": "module"
},
{
"name": "Dict",
"type": "class"
},
{
"name": "FunctionProfile",
"type": "class"
},
{
"name": "fuzz_cfg_load",
"type": "module"
},
{
"name": "fuzz_cov_load",
"type": "module"
},
{
"name": "fuzz_utils",
"type": "module"
},
{
"name": "FuzzerProfile",
"type": "class"
},
{
"name": "List",
"type": "class"
},
{
"name": "load_all_profiles",
"type": "function"
},
{
"name": "logger",
"type": "statement"
},
{
"name": "logging",
"type": "module"
},
{
"name": "MergedProjectProfile",
"type": "class"
},
{
"name": "Optional",
"type": "class"
},
{
"name": "os",
"type": "module"
},
{
"name": "read_fuzzer_data_file_to_profile",
"type": "function"
},
{
"name": "Set",
"type": "class"
},
{
"name": "Tuple",
"type": "class"
},
{
"name": "__doc__",
"type": "instance"
},
{
"name": "__file__",
"type": "instance"
},
{
"name": "__name__",
"type": "instance"
},
{
"name": "__package__",
"type": "instance"
}
] |
# Copyright 2022 Fuzz Introspector Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Logic related to calltree analysis"""
import os
import logging
import json
from typing import (
List,
Tuple,
Optional,
Set,
)
import fuzz_analysis
import fuzz_data_loader
import fuzz_utils
import fuzz_cfg_load
import fuzz_html_helpers
# For pretty printing the html code:
from bs4 import BeautifulSoup as bs
logger = logging.getLogger(name=__name__)
class FuzzCalltreeAnalysis(fuzz_analysis.AnalysisInterface):
def __init__(self):
self.name = "FuzzCalltreeAnalysis"
logger.info("Creating FuzzCalltreeAnalysis")
def analysis_func(self,
toc_list: List[Tuple[str, str, int]],
tables: List[str],
project_profile: fuzz_data_loader.
|
[
"FuzzerProfile",
"List",
"MergedProjectProfile",
"logger",
"None",
"add_func_to_reached_and_clone",
"FunctionProfile",
"load_all_profiles",
"read_fuzzer_data_file_to_profile",
"Any",
"Dict",
"Optional",
"Set",
"Tuple"
] |
# Copyright 2022 Fuzz Introspector Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Logic related to calltree analysis"""
import os
import logging
import json
from typing import (
List,
Tuple,
Optional,
Set,
)
import fuzz_analysis
import fuzz_data_loader
import fuzz_utils
import fuzz_cfg_load
import fuzz_html_helpers
# For pretty printing the html code:
from bs4 import BeautifulSoup as bs
logger = logging.getLogger(name=__name__)
class FuzzCalltreeAnalysis(fuzz_analysis.AnalysisInterface):
def __init__(self):
self.name = "FuzzCalltreeAnalysis"
logger.info("Creating FuzzCalltreeAnalysis")
def analysis_func(self,
toc_list: List[Tuple[str, str, int]],
tables: List[str],
project_profile: fuzz_data_loader.
| 1
|
3
| 0
| 1,409
|
ossf__fuzz-introspector
|
4867924b714a7789f94fbcde53713a29ceab7272
|
post-processing/analyses/fuzz_calltree_analysis.py
|
inproject
|
FuzzerProfile
| true
|
class
| 14
| 19
| false
| true
|
[
"FuzzerProfile",
"MergedProjectProfile",
"List",
"logger",
"None",
"add_func_to_reached_and_clone",
"FunctionProfile",
"load_all_profiles",
"read_fuzzer_data_file_to_profile",
"Any",
"Dict",
"Optional",
"Set",
"Tuple"
] |
[
{
"name": "add_func_to_reached_and_clone",
"type": "function"
},
{
"name": "Any",
"type": "class"
},
{
"name": "copy",
"type": "module"
},
{
"name": "Dict",
"type": "class"
},
{
"name": "FunctionProfile",
"type": "class"
},
{
"name": "fuzz_cfg_load",
"type": "module"
},
{
"name": "fuzz_cov_load",
"type": "module"
},
{
"name": "fuzz_utils",
"type": "module"
},
{
"name": "FuzzerProfile",
"type": "class"
},
{
"name": "List",
"type": "class"
},
{
"name": "load_all_profiles",
"type": "function"
},
{
"name": "logger",
"type": "statement"
},
{
"name": "logging",
"type": "module"
},
{
"name": "MergedProjectProfile",
"type": "class"
},
{
"name": "Optional",
"type": "class"
},
{
"name": "os",
"type": "module"
},
{
"name": "read_fuzzer_data_file_to_profile",
"type": "function"
},
{
"name": "Set",
"type": "class"
},
{
"name": "Tuple",
"type": "class"
},
{
"name": "__doc__",
"type": "instance"
},
{
"name": "__file__",
"type": "instance"
},
{
"name": "__name__",
"type": "instance"
},
{
"name": "__package__",
"type": "instance"
}
] |
# Copyright 2022 Fuzz Introspector Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Logic related to calltree analysis"""
import os
import logging
import json
from typing import (
List,
Tuple,
Optional,
Set,
)
import fuzz_analysis
import fuzz_data_loader
import fuzz_utils
import fuzz_cfg_load
import fuzz_html_helpers
# For pretty printing the html code:
from bs4 import BeautifulSoup as bs
logger = logging.getLogger(name=__name__)
class FuzzCalltreeAnalysis(fuzz_analysis.AnalysisInterface):
def __init__(self):
self.name = "FuzzCalltreeAnalysis"
logger.info("Creating FuzzCalltreeAnalysis")
def analysis_func(self,
toc_list: List[Tuple[str, str, int]],
tables: List[str],
project_profile: fuzz_data_loader.MergedProjectProfile,
profiles: List[fuzz_data_loader.
|
[
"FuzzerProfile",
"MergedProjectProfile",
"List",
"logger",
"None",
"add_func_to_reached_and_clone",
"FunctionProfile",
"load_all_profiles",
"read_fuzzer_data_file_to_profile",
"Any",
"Dict",
"Optional",
"Set",
"Tuple"
] |
# Copyright 2022 Fuzz Introspector Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Logic related to calltree analysis"""
import os
import logging
import json
from typing import (
List,
Tuple,
Optional,
Set,
)
import fuzz_analysis
import fuzz_data_loader
import fuzz_utils
import fuzz_cfg_load
import fuzz_html_helpers
# For pretty printing the html code:
from bs4 import BeautifulSoup as bs
logger = logging.getLogger(name=__name__)
class FuzzCalltreeAnalysis(fuzz_analysis.AnalysisInterface):
def __init__(self):
self.name = "FuzzCalltreeAnalysis"
logger.info("Creating FuzzCalltreeAnalysis")
def analysis_func(self,
toc_list: List[Tuple[str, str, int]],
tables: List[str],
project_profile: fuzz_data_loader.MergedProjectProfile,
profiles: List[fuzz_data_loader.
| 2
|
5
| 0
| 2,009
|
ossf__fuzz-introspector
|
4867924b714a7789f94fbcde53713a29ceab7272
|
post-processing/analyses/fuzz_calltree_analysis.py
|
inproject
|
extract_all_callsites
| true
|
function
| 8
| 9
| false
| true
|
[
"extract_all_callsites",
"CalltreeCallsite",
"List",
"logger",
"Optional",
"data_file_read_calltree",
"extract_all_callsites_recursive",
"print_ctcs_tree"
] |
[
{
"name": "CalltreeCallsite",
"type": "class"
},
{
"name": "data_file_read_calltree",
"type": "function"
},
{
"name": "extract_all_callsites",
"type": "function"
},
{
"name": "extract_all_callsites_recursive",
"type": "function"
},
{
"name": "List",
"type": "class"
},
{
"name": "logger",
"type": "statement"
},
{
"name": "logging",
"type": "module"
},
{
"name": "Optional",
"type": "class"
},
{
"name": "print_ctcs_tree",
"type": "function"
},
{
"name": "__doc__",
"type": "instance"
},
{
"name": "__file__",
"type": "instance"
},
{
"name": "__name__",
"type": "instance"
},
{
"name": "__package__",
"type": "instance"
}
] |
# Copyright 2022 Fuzz Introspector Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Logic related to calltree analysis"""
import os
import logging
import json
from typing import (
List,
Tuple,
Optional,
Set,
)
import fuzz_analysis
import fuzz_data_loader
import fuzz_utils
import fuzz_cfg_load
import fuzz_html_helpers
# For pretty printing the html code:
from bs4 import BeautifulSoup as bs
logger = logging.getLogger(name=__name__)
class FuzzCalltreeAnalysis(fuzz_analysis.AnalysisInterface):
def __init__(self):
self.name = "FuzzCalltreeAnalysis"
logger.info("Creating FuzzCalltreeAnalysis")
def analysis_func(self,
toc_list: List[Tuple[str, str, int]],
tables: List[str],
project_profile: fuzz_data_loader.MergedProjectProfile,
profiles: List[fuzz_data_loader.FuzzerProfile],
basefolder: str,
coverage_url: str,
conclusions) -> str:
"""
Creates the HTML of the calltree. Returns the HTML as a string.
"""
logger.info("Not implemented")
return ""
def create_calltree(self, profile: fuzz_data_loader.FuzzerProfile) -> str:
logger.info("In calltree")
# Generate HTML for the calltree
calltree_html_string = "<div class='section-wrapper'>"
calltree_html_string += "<h1>Fuzzer calltree</h1>"
nodes = fuzz_cfg_load.
|
[
"extract_all_callsites",
"CalltreeCallsite",
"List",
"logger",
"Optional",
"data_file_read_calltree",
"extract_all_callsites_recursive",
"print_ctcs_tree"
] |
2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Logic related to calltree analysis"""
import os
import logging
import json
from typing import (
List,
Tuple,
Optional,
Set,
)
import fuzz_analysis
import fuzz_data_loader
import fuzz_utils
import fuzz_cfg_load
import fuzz_html_helpers
# For pretty printing the html code:
from bs4 import BeautifulSoup as bs
logger = logging.getLogger(name=__name__)
class FuzzCalltreeAnalysis(fuzz_analysis.AnalysisInterface):
def __init__(self):
self.name = "FuzzCalltreeAnalysis"
logger.info("Creating FuzzCalltreeAnalysis")
def analysis_func(self,
toc_list: List[Tuple[str, str, int]],
tables: List[str],
project_profile: fuzz_data_loader.MergedProjectProfile,
profiles: List[fuzz_data_loader.FuzzerProfile],
basefolder: str,
coverage_url: str,
conclusions) -> str:
"""
Creates the HTML of the calltree. Returns the HTML as a string.
"""
logger.info("Not implemented")
return ""
def create_calltree(self, profile: fuzz_data_loader.FuzzerProfile) -> str:
logger.info("In calltree")
# Generate HTML for the calltree
calltree_html_string = "<div class='section-wrapper'>"
calltree_html_string += "<h1>Fuzzer calltree</h1>"
nodes = fuzz_cfg_load.
| 4
|
7
| 0
| 2,166
|
ossf__fuzz-introspector
|
4867924b714a7789f94fbcde53713a29ceab7272
|
post-processing/analyses/fuzz_calltree_analysis.py
|
inproject
|
demangle_cpp_func
| true
|
function
| 13
| 18
| false
| true
|
[
"List",
"logger",
"Optional",
"demangle_cpp_func",
"scan_executables_for_fuzz_introspector_logs",
"data_file_read_yaml",
"get_all_files_in_tree_with_regex",
"get_target_coverage_url",
"longest_common_prefix",
"normalise_str",
"safe_decode",
"Any",
"Dict"
] |
[
{
"name": "Any",
"type": "class"
},
{
"name": "cxxfilt",
"type": "module"
},
{
"name": "data_file_read_yaml",
"type": "function"
},
{
"name": "demangle_cpp_func",
"type": "function"
},
{
"name": "Dict",
"type": "class"
},
{
"name": "get_all_files_in_tree_with_regex",
"type": "function"
},
{
"name": "get_target_coverage_url",
"type": "function"
},
{
"name": "List",
"type": "class"
},
{
"name": "logger",
"type": "statement"
},
{
"name": "logging",
"type": "module"
},
{
"name": "longest_common_prefix",
"type": "function"
},
{
"name": "normalise_str",
"type": "function"
},
{
"name": "Optional",
"type": "class"
},
{
"name": "os",
"type": "module"
},
{
"name": "re",
"type": "module"
},
{
"name": "safe_decode",
"type": "function"
},
{
"name": "scan_executables_for_fuzz_introspector_logs",
"type": "function"
},
{
"name": "yaml",
"type": "module"
},
{
"name": "__doc__",
"type": "instance"
},
{
"name": "__file__",
"type": "instance"
},
{
"name": "__name__",
"type": "instance"
},
{
"name": "__package__",
"type": "instance"
}
] |
# Copyright 2022 Fuzz Introspector Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Logic related to calltree analysis"""
import os
import logging
import json
from typing import (
List,
Tuple,
Optional,
Set,
)
import fuzz_analysis
import fuzz_data_loader
import fuzz_utils
import fuzz_cfg_load
import fuzz_html_helpers
# For pretty printing the html code:
from bs4 import BeautifulSoup as bs
logger = logging.getLogger(name=__name__)
class FuzzCalltreeAnalysis(fuzz_analysis.AnalysisInterface):
def __init__(self):
self.name = "FuzzCalltreeAnalysis"
logger.info("Creating FuzzCalltreeAnalysis")
def analysis_func(self,
toc_list: List[Tuple[str, str, int]],
tables: List[str],
project_profile: fuzz_data_loader.MergedProjectProfile,
profiles: List[fuzz_data_loader.FuzzerProfile],
basefolder: str,
coverage_url: str,
conclusions) -> str:
"""
Creates the HTML of the calltree. Returns the HTML as a string.
"""
logger.info("Not implemented")
return ""
def create_calltree(self, profile: fuzz_data_loader.FuzzerProfile) -> str:
logger.info("In calltree")
# Generate HTML for the calltree
calltree_html_string = "<div class='section-wrapper'>"
calltree_html_string += "<h1>Fuzzer calltree</h1>"
nodes = fuzz_cfg_load.extract_all_callsites(profile.function_call_depths)
for i in range(len(nodes)):
node = nodes[i]
demangled_name = fuzz_utils.
|
[
"List",
"logger",
"Optional",
"demangle_cpp_func",
"scan_executables_for_fuzz_introspector_logs",
"data_file_read_yaml",
"get_all_files_in_tree_with_regex",
"get_target_coverage_url",
"longest_common_prefix",
"normalise_str",
"safe_decode",
"Any",
"Dict"
] |
.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Logic related to calltree analysis"""
import os
import logging
import json
from typing import (
List,
Tuple,
Optional,
Set,
)
import fuzz_analysis
import fuzz_data_loader
import fuzz_utils
import fuzz_cfg_load
import fuzz_html_helpers
# For pretty printing the html code:
from bs4 import BeautifulSoup as bs
logger = logging.getLogger(name=__name__)
class FuzzCalltreeAnalysis(fuzz_analysis.AnalysisInterface):
def __init__(self):
self.name = "FuzzCalltreeAnalysis"
logger.info("Creating FuzzCalltreeAnalysis")
def analysis_func(self,
toc_list: List[Tuple[str, str, int]],
tables: List[str],
project_profile: fuzz_data_loader.MergedProjectProfile,
profiles: List[fuzz_data_loader.FuzzerProfile],
basefolder: str,
coverage_url: str,
conclusions) -> str:
"""
Creates the HTML of the calltree. Returns the HTML as a string.
"""
logger.info("Not implemented")
return ""
def create_calltree(self, profile: fuzz_data_loader.FuzzerProfile) -> str:
logger.info("In calltree")
# Generate HTML for the calltree
calltree_html_string = "<div class='section-wrapper'>"
calltree_html_string += "<h1>Fuzzer calltree</h1>"
nodes = fuzz_cfg_load.extract_all_callsites(profile.function_call_depths)
for i in range(len(nodes)):
node = nodes[i]
demangled_name = fuzz_utils.
| 5
|
8
| 0
| 2,189
|
ossf__fuzz-introspector
|
4867924b714a7789f94fbcde53713a29ceab7272
|
post-processing/analyses/fuzz_calltree_analysis.py
|
inproject
|
dst_function_name
| true
|
statement
| 17
| 17
| false
| true
|
[
"depth",
"cov_ct_idx",
"cov_forward_reds",
"cov_largest_blocked_func",
"cov_link",
"children",
"cov_callsite_link",
"cov_color",
"cov_hitcount",
"cov_parent",
"dst_function_name",
"dst_function_source_file",
"hitcount",
"parent_calltree_callsite",
"src_function_name",
"src_function_source_file",
"src_linenumber",
"__init__",
"__annotations__",
"__class__",
"__delattr__",
"__dict__",
"__dir__",
"__eq__",
"__format__",
"__getattribute__",
"__hash__",
"__init_subclass__",
"__ne__",
"__new__",
"__reduce__",
"__reduce_ex__",
"__repr__",
"__setattr__",
"__sizeof__",
"__str__",
"__subclasshook__",
"__doc__",
"__module__"
] |
[
{
"name": "children",
"type": "statement"
},
{
"name": "cov_callsite_link",
"type": "statement"
},
{
"name": "cov_color",
"type": "statement"
},
{
"name": "cov_ct_idx",
"type": "statement"
},
{
"name": "cov_forward_reds",
"type": "statement"
},
{
"name": "cov_hitcount",
"type": "statement"
},
{
"name": "cov_largest_blocked_func",
"type": "statement"
},
{
"name": "cov_link",
"type": "statement"
},
{
"name": "cov_parent",
"type": "statement"
},
{
"name": "depth",
"type": "statement"
},
{
"name": "dst_function_name",
"type": "statement"
},
{
"name": "dst_function_source_file",
"type": "statement"
},
{
"name": "hitcount",
"type": "statement"
},
{
"name": "parent_calltree_callsite",
"type": "statement"
},
{
"name": "src_function_name",
"type": "statement"
},
{
"name": "src_function_source_file",
"type": "statement"
},
{
"name": "src_linenumber",
"type": "statement"
},
{
"name": "__annotations__",
"type": "statement"
},
{
"name": "__class__",
"type": "property"
},
{
"name": "__delattr__",
"type": "function"
},
{
"name": "__dict__",
"type": "statement"
},
{
"name": "__dir__",
"type": "function"
},
{
"name": "__doc__",
"type": "statement"
},
{
"name": "__eq__",
"type": "function"
},
{
"name": "__format__",
"type": "function"
},
{
"name": "__getattribute__",
"type": "function"
},
{
"name": "__hash__",
"type": "function"
},
{
"name": "__init__",
"type": "function"
},
{
"name": "__init_subclass__",
"type": "function"
},
{
"name": "__module__",
"type": "statement"
},
{
"name": "__ne__",
"type": "function"
},
{
"name": "__new__",
"type": "function"
},
{
"name": "__reduce__",
"type": "function"
},
{
"name": "__reduce_ex__",
"type": "function"
},
{
"name": "__repr__",
"type": "function"
},
{
"name": "__setattr__",
"type": "function"
},
{
"name": "__sizeof__",
"type": "function"
},
{
"name": "__slots__",
"type": "statement"
},
{
"name": "__str__",
"type": "function"
}
] |
# Copyright 2022 Fuzz Introspector Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Logic related to calltree analysis"""
import os
import logging
import json
from typing import (
List,
Tuple,
Optional,
Set,
)
import fuzz_analysis
import fuzz_data_loader
import fuzz_utils
import fuzz_cfg_load
import fuzz_html_helpers
# For pretty printing the html code:
from bs4 import BeautifulSoup as bs
logger = logging.getLogger(name=__name__)
class FuzzCalltreeAnalysis(fuzz_analysis.AnalysisInterface):
def __init__(self):
self.name = "FuzzCalltreeAnalysis"
logger.info("Creating FuzzCalltreeAnalysis")
def analysis_func(self,
toc_list: List[Tuple[str, str, int]],
tables: List[str],
project_profile: fuzz_data_loader.MergedProjectProfile,
profiles: List[fuzz_data_loader.FuzzerProfile],
basefolder: str,
coverage_url: str,
conclusions) -> str:
"""
Creates the HTML of the calltree. Returns the HTML as a string.
"""
logger.info("Not implemented")
return ""
def create_calltree(self, profile: fuzz_data_loader.FuzzerProfile) -> str:
logger.info("In calltree")
# Generate HTML for the calltree
calltree_html_string = "<div class='section-wrapper'>"
calltree_html_string += "<h1>Fuzzer calltree</h1>"
nodes = fuzz_cfg_load.extract_all_callsites(profile.function_call_depths)
for i in range(len(nodes)):
node = nodes[i]
demangled_name = fuzz_utils.demangle_cpp_func(node.
|
[
"depth",
"cov_ct_idx",
"cov_forward_reds",
"cov_largest_blocked_func",
"cov_link",
"children",
"cov_callsite_link",
"cov_color",
"cov_hitcount",
"cov_parent",
"dst_function_name",
"dst_function_source_file",
"hitcount",
"parent_calltree_callsite",
"src_function_name",
"src_function_source_file",
"src_linenumber"
] |
.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Logic related to calltree analysis"""
import os
import logging
import json
from typing import (
List,
Tuple,
Optional,
Set,
)
import fuzz_analysis
import fuzz_data_loader
import fuzz_utils
import fuzz_cfg_load
import fuzz_html_helpers
# For pretty printing the html code:
from bs4 import BeautifulSoup as bs
logger = logging.getLogger(name=__name__)
class FuzzCalltreeAnalysis(fuzz_analysis.AnalysisInterface):
def __init__(self):
self.name = "FuzzCalltreeAnalysis"
logger.info("Creating FuzzCalltreeAnalysis")
def analysis_func(self,
toc_list: List[Tuple[str, str, int]],
tables: List[str],
project_profile: fuzz_data_loader.MergedProjectProfile,
profiles: List[fuzz_data_loader.FuzzerProfile],
basefolder: str,
coverage_url: str,
conclusions) -> str:
"""
Creates the HTML of the calltree. Returns the HTML as a string.
"""
logger.info("Not implemented")
return ""
def create_calltree(self, profile: fuzz_data_loader.FuzzerProfile) -> str:
logger.info("In calltree")
# Generate HTML for the calltree
calltree_html_string = "<div class='section-wrapper'>"
calltree_html_string += "<h1>Fuzzer calltree</h1>"
nodes = fuzz_cfg_load.extract_all_callsites(profile.function_call_depths)
for i in range(len(nodes)):
node = nodes[i]
demangled_name = fuzz_utils.demangle_cpp_func(node.
| 6
|
10
| 0
| 2,668
|
ossf__fuzz-introspector
|
4867924b714a7789f94fbcde53713a29ceab7272
|
post-processing/analyses/fuzz_calltree_analysis.py
|
random
|
cov_callsite_link
| true
|
statement
| 17
| 17
| false
| true
|
[
"cov_link",
"depth",
"cov_color",
"cov_ct_idx",
"cov_callsite_link",
"children",
"cov_forward_reds",
"cov_hitcount",
"cov_largest_blocked_func",
"cov_parent",
"dst_function_name",
"dst_function_source_file",
"hitcount",
"parent_calltree_callsite",
"src_function_name",
"src_function_source_file",
"src_linenumber",
"__init__",
"__annotations__",
"__class__",
"__delattr__",
"__dict__",
"__dir__",
"__eq__",
"__format__",
"__getattribute__",
"__hash__",
"__init_subclass__",
"__ne__",
"__new__",
"__reduce__",
"__reduce_ex__",
"__repr__",
"__setattr__",
"__sizeof__",
"__str__",
"__subclasshook__",
"__doc__",
"__module__"
] |
[
{
"name": "children",
"type": "statement"
},
{
"name": "cov_callsite_link",
"type": "statement"
},
{
"name": "cov_color",
"type": "statement"
},
{
"name": "cov_ct_idx",
"type": "statement"
},
{
"name": "cov_forward_reds",
"type": "statement"
},
{
"name": "cov_hitcount",
"type": "statement"
},
{
"name": "cov_largest_blocked_func",
"type": "statement"
},
{
"name": "cov_link",
"type": "statement"
},
{
"name": "cov_parent",
"type": "statement"
},
{
"name": "depth",
"type": "statement"
},
{
"name": "dst_function_name",
"type": "statement"
},
{
"name": "dst_function_source_file",
"type": "statement"
},
{
"name": "hitcount",
"type": "statement"
},
{
"name": "parent_calltree_callsite",
"type": "statement"
},
{
"name": "src_function_name",
"type": "statement"
},
{
"name": "src_function_source_file",
"type": "statement"
},
{
"name": "src_linenumber",
"type": "statement"
},
{
"name": "__annotations__",
"type": "statement"
},
{
"name": "__class__",
"type": "property"
},
{
"name": "__delattr__",
"type": "function"
},
{
"name": "__dict__",
"type": "statement"
},
{
"name": "__dir__",
"type": "function"
},
{
"name": "__doc__",
"type": "statement"
},
{
"name": "__eq__",
"type": "function"
},
{
"name": "__format__",
"type": "function"
},
{
"name": "__getattribute__",
"type": "function"
},
{
"name": "__hash__",
"type": "function"
},
{
"name": "__init__",
"type": "function"
},
{
"name": "__init_subclass__",
"type": "function"
},
{
"name": "__module__",
"type": "statement"
},
{
"name": "__ne__",
"type": "function"
},
{
"name": "__new__",
"type": "function"
},
{
"name": "__reduce__",
"type": "function"
},
{
"name": "__reduce_ex__",
"type": "function"
},
{
"name": "__repr__",
"type": "function"
},
{
"name": "__setattr__",
"type": "function"
},
{
"name": "__sizeof__",
"type": "function"
},
{
"name": "__slots__",
"type": "statement"
},
{
"name": "__str__",
"type": "function"
}
] |
# Copyright 2022 Fuzz Introspector Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Logic related to calltree analysis"""
import os
import logging
import json
from typing import (
List,
Tuple,
Optional,
Set,
)
import fuzz_analysis
import fuzz_data_loader
import fuzz_utils
import fuzz_cfg_load
import fuzz_html_helpers
# For pretty printing the html code:
from bs4 import BeautifulSoup as bs
logger = logging.getLogger(name=__name__)
class FuzzCalltreeAnalysis(fuzz_analysis.AnalysisInterface):
def __init__(self):
self.name = "FuzzCalltreeAnalysis"
logger.info("Creating FuzzCalltreeAnalysis")
def analysis_func(self,
toc_list: List[Tuple[str, str, int]],
tables: List[str],
project_profile: fuzz_data_loader.MergedProjectProfile,
profiles: List[fuzz_data_loader.FuzzerProfile],
basefolder: str,
coverage_url: str,
conclusions) -> str:
"""
Creates the HTML of the calltree. Returns the HTML as a string.
"""
logger.info("Not implemented")
return ""
def create_calltree(self, profile: fuzz_data_loader.FuzzerProfile) -> str:
logger.info("In calltree")
# Generate HTML for the calltree
calltree_html_string = "<div class='section-wrapper'>"
calltree_html_string += "<h1>Fuzzer calltree</h1>"
nodes = fuzz_cfg_load.extract_all_callsites(profile.function_call_depths)
for i in range(len(nodes)):
node = nodes[i]
demangled_name = fuzz_utils.demangle_cpp_func(node.dst_function_name)
# We may not want to show certain functions at times, e.g. libc functions
# in case it bloats the calltree
# libc_funcs = { "free" }
libc_funcs: Set[str] = set()
avoid = len([fn for fn in libc_funcs if fn in demangled_name]) > 0
if avoid:
continue
# Prepare strings needed in the HTML
color_to_be = node.cov_color
callsite_link = node.
|
[
"cov_link",
"depth",
"cov_color",
"cov_ct_idx",
"cov_callsite_link",
"children",
"cov_forward_reds",
"cov_hitcount",
"cov_largest_blocked_func",
"cov_parent",
"dst_function_name",
"dst_function_source_file",
"hitcount",
"parent_calltree_callsite",
"src_function_name",
"src_function_source_file",
"src_linenumber"
] |
z_analysis
import fuzz_data_loader
import fuzz_utils
import fuzz_cfg_load
import fuzz_html_helpers
# For pretty printing the html code:
from bs4 import BeautifulSoup as bs
logger = logging.getLogger(name=__name__)
class FuzzCalltreeAnalysis(fuzz_analysis.AnalysisInterface):
def __init__(self):
self.name = "FuzzCalltreeAnalysis"
logger.info("Creating FuzzCalltreeAnalysis")
def analysis_func(self,
toc_list: List[Tuple[str, str, int]],
tables: List[str],
project_profile: fuzz_data_loader.MergedProjectProfile,
profiles: List[fuzz_data_loader.FuzzerProfile],
basefolder: str,
coverage_url: str,
conclusions) -> str:
"""
Creates the HTML of the calltree. Returns the HTML as a string.
"""
logger.info("Not implemented")
return ""
def create_calltree(self, profile: fuzz_data_loader.FuzzerProfile) -> str:
logger.info("In calltree")
# Generate HTML for the calltree
calltree_html_string = "<div class='section-wrapper'>"
calltree_html_string += "<h1>Fuzzer calltree</h1>"
nodes = fuzz_cfg_load.extract_all_callsites(profile.function_call_depths)
for i in range(len(nodes)):
node = nodes[i]
demangled_name = fuzz_utils.demangle_cpp_func(node.dst_function_name)
# We may not want to show certain functions at times, e.g. libc functions
# in case it bloats the calltree
# libc_funcs = { "free" }
libc_funcs: Set[str] = set()
avoid = len([fn for fn in libc_funcs if fn in demangled_name]) > 0
if avoid:
continue
# Prepare strings needed in the HTML
color_to_be = node.cov_color
callsite_link = node.
| 8
|
12
| 0
| 2,749
|
ossf__fuzz-introspector
|
4867924b714a7789f94fbcde53713a29ceab7272
|
post-processing/analyses/fuzz_calltree_analysis.py
|
infile
|
create_str_node_ctx_idx
| true
|
function
| 7
| 7
| false
| true
|
[
"create_str_node_ctx_idx",
"get_fuzz_blockers",
"name",
"create_fuzz_blocker_table",
"html_create_dedicated_calltree_file",
"__init__",
"analysis_func",
"create_calltree",
"__annotations__",
"__class__",
"__delattr__",
"__dict__",
"__dir__",
"__eq__",
"__format__",
"__getattribute__",
"__hash__",
"__init_subclass__",
"__ne__",
"__new__",
"__reduce__",
"__reduce_ex__",
"__repr__",
"__setattr__",
"__sizeof__",
"__slots__",
"__str__",
"__subclasshook__",
"__doc__",
"__module__"
] |
[
{
"name": "analysis_func",
"type": "function"
},
{
"name": "create_calltree",
"type": "function"
},
{
"name": "create_fuzz_blocker_table",
"type": "function"
},
{
"name": "create_str_node_ctx_idx",
"type": "function"
},
{
"name": "get_fuzz_blockers",
"type": "function"
},
{
"name": "html_create_dedicated_calltree_file",
"type": "function"
},
{
"name": "name",
"type": "statement"
},
{
"name": "__annotations__",
"type": "statement"
},
{
"name": "__class__",
"type": "property"
},
{
"name": "__delattr__",
"type": "function"
},
{
"name": "__dict__",
"type": "statement"
},
{
"name": "__dir__",
"type": "function"
},
{
"name": "__doc__",
"type": "statement"
},
{
"name": "__eq__",
"type": "function"
},
{
"name": "__format__",
"type": "function"
},
{
"name": "__getattribute__",
"type": "function"
},
{
"name": "__hash__",
"type": "function"
},
{
"name": "__init__",
"type": "function"
},
{
"name": "__init_subclass__",
"type": "function"
},
{
"name": "__module__",
"type": "statement"
},
{
"name": "__ne__",
"type": "function"
},
{
"name": "__new__",
"type": "function"
},
{
"name": "__reduce__",
"type": "function"
},
{
"name": "__reduce_ex__",
"type": "function"
},
{
"name": "__repr__",
"type": "function"
},
{
"name": "__setattr__",
"type": "function"
},
{
"name": "__sizeof__",
"type": "function"
},
{
"name": "__slots__",
"type": "statement"
},
{
"name": "__str__",
"type": "function"
}
] |
# Copyright 2022 Fuzz Introspector Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Logic related to calltree analysis"""
import os
import logging
import json
from typing import (
List,
Tuple,
Optional,
Set,
)
import fuzz_analysis
import fuzz_data_loader
import fuzz_utils
import fuzz_cfg_load
import fuzz_html_helpers
# For pretty printing the html code:
from bs4 import BeautifulSoup as bs
logger = logging.getLogger(name=__name__)
class FuzzCalltreeAnalysis(fuzz_analysis.AnalysisInterface):
def __init__(self):
self.name = "FuzzCalltreeAnalysis"
logger.info("Creating FuzzCalltreeAnalysis")
def analysis_func(self,
toc_list: List[Tuple[str, str, int]],
tables: List[str],
project_profile: fuzz_data_loader.MergedProjectProfile,
profiles: List[fuzz_data_loader.FuzzerProfile],
basefolder: str,
coverage_url: str,
conclusions) -> str:
"""
Creates the HTML of the calltree. Returns the HTML as a string.
"""
logger.info("Not implemented")
return ""
def create_calltree(self, profile: fuzz_data_loader.FuzzerProfile) -> str:
logger.info("In calltree")
# Generate HTML for the calltree
calltree_html_string = "<div class='section-wrapper'>"
calltree_html_string += "<h1>Fuzzer calltree</h1>"
nodes = fuzz_cfg_load.extract_all_callsites(profile.function_call_depths)
for i in range(len(nodes)):
node = nodes[i]
demangled_name = fuzz_utils.demangle_cpp_func(node.dst_function_name)
# We may not want to show certain functions at times, e.g. libc functions
# in case it bloats the calltree
# libc_funcs = { "free" }
libc_funcs: Set[str] = set()
avoid = len([fn for fn in libc_funcs if fn in demangled_name]) > 0
if avoid:
continue
# Prepare strings needed in the HTML
color_to_be = node.cov_color
callsite_link = node.cov_callsite_link
link = node.cov_link
ct_idx_str = self.
|
[
"create_str_node_ctx_idx",
"get_fuzz_blockers",
"name",
"create_fuzz_blocker_table",
"html_create_dedicated_calltree_file",
"analysis_func",
"create_calltree"
] |
fuzz_html_helpers
# For pretty printing the html code:
from bs4 import BeautifulSoup as bs
logger = logging.getLogger(name=__name__)
class FuzzCalltreeAnalysis(fuzz_analysis.AnalysisInterface):
def __init__(self):
self.name = "FuzzCalltreeAnalysis"
logger.info("Creating FuzzCalltreeAnalysis")
def analysis_func(self,
toc_list: List[Tuple[str, str, int]],
tables: List[str],
project_profile: fuzz_data_loader.MergedProjectProfile,
profiles: List[fuzz_data_loader.FuzzerProfile],
basefolder: str,
coverage_url: str,
conclusions) -> str:
"""
Creates the HTML of the calltree. Returns the HTML as a string.
"""
logger.info("Not implemented")
return ""
def create_calltree(self, profile: fuzz_data_loader.FuzzerProfile) -> str:
logger.info("In calltree")
# Generate HTML for the calltree
calltree_html_string = "<div class='section-wrapper'>"
calltree_html_string += "<h1>Fuzzer calltree</h1>"
nodes = fuzz_cfg_load.extract_all_callsites(profile.function_call_depths)
for i in range(len(nodes)):
node = nodes[i]
demangled_name = fuzz_utils.demangle_cpp_func(node.dst_function_name)
# We may not want to show certain functions at times, e.g. libc functions
# in case it bloats the calltree
# libc_funcs = { "free" }
libc_funcs: Set[str] = set()
avoid = len([fn for fn in libc_funcs if fn in demangled_name]) > 0
if avoid:
continue
# Prepare strings needed in the HTML
color_to_be = node.cov_color
callsite_link = node.cov_callsite_link
link = node.cov_link
ct_idx_str = self.
| 10
|
13
| 0
| 2,782
|
ossf__fuzz-introspector
|
4867924b714a7789f94fbcde53713a29ceab7272
|
post-processing/analyses/fuzz_calltree_analysis.py
|
infile
|
cov_ct_idx
| true
|
statement
| 17
| 17
| false
| true
|
[
"cov_ct_idx",
"depth",
"dst_function_name",
"cov_forward_reds",
"cov_link",
"children",
"cov_callsite_link",
"cov_color",
"cov_hitcount",
"cov_largest_blocked_func",
"cov_parent",
"dst_function_source_file",
"hitcount",
"parent_calltree_callsite",
"src_function_name",
"src_function_source_file",
"src_linenumber",
"__init__",
"__annotations__",
"__class__",
"__delattr__",
"__dict__",
"__dir__",
"__eq__",
"__format__",
"__getattribute__",
"__hash__",
"__init_subclass__",
"__ne__",
"__new__",
"__reduce__",
"__reduce_ex__",
"__repr__",
"__setattr__",
"__sizeof__",
"__str__",
"__subclasshook__",
"__doc__",
"__module__"
] |
[
{
"name": "children",
"type": "statement"
},
{
"name": "cov_callsite_link",
"type": "statement"
},
{
"name": "cov_color",
"type": "statement"
},
{
"name": "cov_ct_idx",
"type": "statement"
},
{
"name": "cov_forward_reds",
"type": "statement"
},
{
"name": "cov_hitcount",
"type": "statement"
},
{
"name": "cov_largest_blocked_func",
"type": "statement"
},
{
"name": "cov_link",
"type": "statement"
},
{
"name": "cov_parent",
"type": "statement"
},
{
"name": "depth",
"type": "statement"
},
{
"name": "dst_function_name",
"type": "statement"
},
{
"name": "dst_function_source_file",
"type": "statement"
},
{
"name": "hitcount",
"type": "statement"
},
{
"name": "parent_calltree_callsite",
"type": "statement"
},
{
"name": "src_function_name",
"type": "statement"
},
{
"name": "src_function_source_file",
"type": "statement"
},
{
"name": "src_linenumber",
"type": "statement"
},
{
"name": "__annotations__",
"type": "statement"
},
{
"name": "__class__",
"type": "property"
},
{
"name": "__delattr__",
"type": "function"
},
{
"name": "__dict__",
"type": "statement"
},
{
"name": "__dir__",
"type": "function"
},
{
"name": "__doc__",
"type": "statement"
},
{
"name": "__eq__",
"type": "function"
},
{
"name": "__format__",
"type": "function"
},
{
"name": "__getattribute__",
"type": "function"
},
{
"name": "__hash__",
"type": "function"
},
{
"name": "__init__",
"type": "function"
},
{
"name": "__init_subclass__",
"type": "function"
},
{
"name": "__module__",
"type": "statement"
},
{
"name": "__ne__",
"type": "function"
},
{
"name": "__new__",
"type": "function"
},
{
"name": "__reduce__",
"type": "function"
},
{
"name": "__reduce_ex__",
"type": "function"
},
{
"name": "__repr__",
"type": "function"
},
{
"name": "__setattr__",
"type": "function"
},
{
"name": "__sizeof__",
"type": "function"
},
{
"name": "__slots__",
"type": "statement"
},
{
"name": "__str__",
"type": "function"
}
] |
# Copyright 2022 Fuzz Introspector Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Logic related to calltree analysis"""
import os
import logging
import json
from typing import (
List,
Tuple,
Optional,
Set,
)
import fuzz_analysis
import fuzz_data_loader
import fuzz_utils
import fuzz_cfg_load
import fuzz_html_helpers
# For pretty printing the html code:
from bs4 import BeautifulSoup as bs
logger = logging.getLogger(name=__name__)
class FuzzCalltreeAnalysis(fuzz_analysis.AnalysisInterface):
def __init__(self):
self.name = "FuzzCalltreeAnalysis"
logger.info("Creating FuzzCalltreeAnalysis")
def analysis_func(self,
toc_list: List[Tuple[str, str, int]],
tables: List[str],
project_profile: fuzz_data_loader.MergedProjectProfile,
profiles: List[fuzz_data_loader.FuzzerProfile],
basefolder: str,
coverage_url: str,
conclusions) -> str:
"""
Creates the HTML of the calltree. Returns the HTML as a string.
"""
logger.info("Not implemented")
return ""
def create_calltree(self, profile: fuzz_data_loader.FuzzerProfile) -> str:
logger.info("In calltree")
# Generate HTML for the calltree
calltree_html_string = "<div class='section-wrapper'>"
calltree_html_string += "<h1>Fuzzer calltree</h1>"
nodes = fuzz_cfg_load.extract_all_callsites(profile.function_call_depths)
for i in range(len(nodes)):
node = nodes[i]
demangled_name = fuzz_utils.demangle_cpp_func(node.dst_function_name)
# We may not want to show certain functions at times, e.g. libc functions
# in case it bloats the calltree
# libc_funcs = { "free" }
libc_funcs: Set[str] = set()
avoid = len([fn for fn in libc_funcs if fn in demangled_name]) > 0
if avoid:
continue
# Prepare strings needed in the HTML
color_to_be = node.cov_color
callsite_link = node.cov_callsite_link
link = node.cov_link
ct_idx_str = self.create_str_node_ctx_idx(str(node.
|
[
"cov_ct_idx",
"depth",
"dst_function_name",
"cov_forward_reds",
"cov_link",
"children",
"cov_callsite_link",
"cov_color",
"cov_hitcount",
"cov_largest_blocked_func",
"cov_parent",
"dst_function_source_file",
"hitcount",
"parent_calltree_callsite",
"src_function_name",
"src_function_source_file",
"src_linenumber"
] |
rinting the html code:
from bs4 import BeautifulSoup as bs
logger = logging.getLogger(name=__name__)
class FuzzCalltreeAnalysis(fuzz_analysis.AnalysisInterface):
def __init__(self):
self.name = "FuzzCalltreeAnalysis"
logger.info("Creating FuzzCalltreeAnalysis")
def analysis_func(self,
toc_list: List[Tuple[str, str, int]],
tables: List[str],
project_profile: fuzz_data_loader.MergedProjectProfile,
profiles: List[fuzz_data_loader.FuzzerProfile],
basefolder: str,
coverage_url: str,
conclusions) -> str:
"""
Creates the HTML of the calltree. Returns the HTML as a string.
"""
logger.info("Not implemented")
return ""
def create_calltree(self, profile: fuzz_data_loader.FuzzerProfile) -> str:
logger.info("In calltree")
# Generate HTML for the calltree
calltree_html_string = "<div class='section-wrapper'>"
calltree_html_string += "<h1>Fuzzer calltree</h1>"
nodes = fuzz_cfg_load.extract_all_callsites(profile.function_call_depths)
for i in range(len(nodes)):
node = nodes[i]
demangled_name = fuzz_utils.demangle_cpp_func(node.dst_function_name)
# We may not want to show certain functions at times, e.g. libc functions
# in case it bloats the calltree
# libc_funcs = { "free" }
libc_funcs: Set[str] = set()
avoid = len([fn for fn in libc_funcs if fn in demangled_name]) > 0
if avoid:
continue
# Prepare strings needed in the HTML
color_to_be = node.cov_color
callsite_link = node.cov_callsite_link
link = node.cov_link
ct_idx_str = self.create_str_node_ctx_idx(str(node.
| 11
|
28
| 0
| 5,388
|
ossf__fuzz-introspector
|
4867924b714a7789f94fbcde53713a29ceab7272
|
post-processing/analyses/fuzz_calltree_analysis.py
|
inproject
|
FuzzerProfile
| true
|
class
| 14
| 19
| false
| false
|
[
"FuzzerProfile",
"MergedProjectProfile",
"None",
"List",
"logger",
"add_func_to_reached_and_clone",
"FunctionProfile",
"load_all_profiles",
"read_fuzzer_data_file_to_profile",
"Any",
"Dict",
"Optional",
"Set",
"Tuple"
] |
[
{
"name": "add_func_to_reached_and_clone",
"type": "function"
},
{
"name": "Any",
"type": "class"
},
{
"name": "copy",
"type": "module"
},
{
"name": "Dict",
"type": "class"
},
{
"name": "FunctionProfile",
"type": "class"
},
{
"name": "fuzz_cfg_load",
"type": "module"
},
{
"name": "fuzz_cov_load",
"type": "module"
},
{
"name": "fuzz_utils",
"type": "module"
},
{
"name": "FuzzerProfile",
"type": "class"
},
{
"name": "List",
"type": "class"
},
{
"name": "load_all_profiles",
"type": "function"
},
{
"name": "logger",
"type": "statement"
},
{
"name": "logging",
"type": "module"
},
{
"name": "MergedProjectProfile",
"type": "class"
},
{
"name": "Optional",
"type": "class"
},
{
"name": "os",
"type": "module"
},
{
"name": "read_fuzzer_data_file_to_profile",
"type": "function"
},
{
"name": "Set",
"type": "class"
},
{
"name": "Tuple",
"type": "class"
},
{
"name": "__doc__",
"type": "instance"
},
{
"name": "__file__",
"type": "instance"
},
{
"name": "__name__",
"type": "instance"
},
{
"name": "__package__",
"type": "instance"
}
] |
# Copyright 2022 Fuzz Introspector Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Logic related to calltree analysis"""
import os
import logging
import json
from typing import (
List,
Tuple,
Optional,
Set,
)
import fuzz_analysis
import fuzz_data_loader
import fuzz_utils
import fuzz_cfg_load
import fuzz_html_helpers
# For pretty printing the html code:
from bs4 import BeautifulSoup as bs
logger = logging.getLogger(name=__name__)
class FuzzCalltreeAnalysis(fuzz_analysis.AnalysisInterface):
def __init__(self):
self.name = "FuzzCalltreeAnalysis"
logger.info("Creating FuzzCalltreeAnalysis")
def analysis_func(self,
toc_list: List[Tuple[str, str, int]],
tables: List[str],
project_profile: fuzz_data_loader.MergedProjectProfile,
profiles: List[fuzz_data_loader.FuzzerProfile],
basefolder: str,
coverage_url: str,
conclusions) -> str:
"""
Creates the HTML of the calltree. Returns the HTML as a string.
"""
logger.info("Not implemented")
return ""
def create_calltree(self, profile: fuzz_data_loader.FuzzerProfile) -> str:
logger.info("In calltree")
# Generate HTML for the calltree
calltree_html_string = "<div class='section-wrapper'>"
calltree_html_string += "<h1>Fuzzer calltree</h1>"
nodes = fuzz_cfg_load.extract_all_callsites(profile.function_call_depths)
for i in range(len(nodes)):
node = nodes[i]
demangled_name = fuzz_utils.demangle_cpp_func(node.dst_function_name)
# We may not want to show certain functions at times, e.g. libc functions
# in case it bloats the calltree
# libc_funcs = { "free" }
libc_funcs: Set[str] = set()
avoid = len([fn for fn in libc_funcs if fn in demangled_name]) > 0
if avoid:
continue
# Prepare strings needed in the HTML
color_to_be = node.cov_color
callsite_link = node.cov_callsite_link
link = node.cov_link
ct_idx_str = self.create_str_node_ctx_idx(str(node.cov_ct_idx))
# Only display [function] link if we have, otherwhise show no [function] text.
if node.dst_function_source_file.replace(" ", "") != "/":
func_href = f"""<a href="{link}">[function]</a>"""
else:
func_href = ""
if i > 0:
previous_node = nodes[i - 1]
if previous_node.depth == node.depth:
calltree_html_string += "</div>"
depth_diff = previous_node.depth - node.depth
if depth_diff >= 1:
closing_divs = "</div>" # To close "calltree-line-wrapper"
closing_divs = "</div>" * (int(depth_diff) + 1)
calltree_html_string += closing_divs
calltree_html_string += f"""
<div class="{color_to_be}-background coverage-line">
<span class="coverage-line-inner" data-calltree-idx="{ct_idx_str}">
{node.depth}
<code class="language-clike">
{demangled_name}
</code>
<span class="coverage-line-filename">
{func_href}
<a href="{callsite_link}">
[call site2]
</a>
<span class="calltree-idx">[calltree idx: {ct_idx_str}]</span>
</span>
</span>
"""
if i != len(nodes) - 1:
next_node = nodes[i + 1]
if next_node.depth > node.depth:
calltree_html_string += f"""<div
class="calltree-line-wrapper open level-{int(node.depth)}"
style="padding-left: 16px">"""
elif next_node.depth < node.depth:
depth_diff = int(node.depth - next_node.depth)
calltree_html_string += "</div>" * depth_diff
calltree_html_string += "</div>"
logger.info("Calltree created")
# Write the HTML to a file called calltree_view_XX.html where XX is a counter.
calltree_file_idx = 0
calltree_html_file = f"calltree_view_{calltree_file_idx}.html"
while os.path.isfile(calltree_html_file):
calltree_file_idx += 1
calltree_html_file = f"calltree_view_{calltree_file_idx}.html"
self.html_create_dedicated_calltree_file(
calltree_html_string,
calltree_html_file,
profile,
)
return calltree_html_file
def html_create_dedicated_calltree_file(
self,
calltree_html_string,
filename,
profile: fuzz_data_loader.
|
[
"FuzzerProfile",
"MergedProjectProfile",
"None",
"List",
"logger",
"add_func_to_reached_and_clone",
"FunctionProfile",
"load_all_profiles",
"read_fuzzer_data_file_to_profile",
"Any",
"Dict",
"Optional",
"Set",
"Tuple"
] |
" * (int(depth_diff) + 1)
calltree_html_string += closing_divs
calltree_html_string += f"""
<div class="{color_to_be}-background coverage-line">
<span class="coverage-line-inner" data-calltree-idx="{ct_idx_str}">
{node.depth}
<code class="language-clike">
{demangled_name}
</code>
<span class="coverage-line-filename">
{func_href}
<a href="{callsite_link}">
[call site2]
</a>
<span class="calltree-idx">[calltree idx: {ct_idx_str}]</span>
</span>
</span>
"""
if i != len(nodes) - 1:
next_node = nodes[i + 1]
if next_node.depth > node.depth:
calltree_html_string += f"""<div
class="calltree-line-wrapper open level-{int(node.depth)}"
style="padding-left: 16px">"""
elif next_node.depth < node.depth:
depth_diff = int(node.depth - next_node.depth)
calltree_html_string += "</div>" * depth_diff
calltree_html_string += "</div>"
logger.info("Calltree created")
# Write the HTML to a file called calltree_view_XX.html where XX is a counter.
calltree_file_idx = 0
calltree_html_file = f"calltree_view_{calltree_file_idx}.html"
while os.path.isfile(calltree_html_file):
calltree_file_idx += 1
calltree_html_file = f"calltree_view_{calltree_file_idx}.html"
self.html_create_dedicated_calltree_file(
calltree_html_string,
calltree_html_file,
profile,
)
return calltree_html_file
def html_create_dedicated_calltree_file(
self,
calltree_html_string,
filename,
profile: fuzz_data_loader.
| 19
|
29
| 0
| 5,761
|
ossf__fuzz-introspector
|
4867924b714a7789f94fbcde53713a29ceab7272
|
post-processing/analyses/fuzz_calltree_analysis.py
|
inproject
|
html_get_header
| true
|
function
| 9
| 9
| false
| true
|
[
"html_create_table_head",
"html_table_add_row",
"html_get_header",
"html_get_navbar",
"List",
"html_add_header_with_link",
"html_get_table_of_contents",
"Any",
"Tuple"
] |
[
{
"name": "Any",
"type": "class"
},
{
"name": "html_add_header_with_link",
"type": "function"
},
{
"name": "html_create_table_head",
"type": "function"
},
{
"name": "html_get_header",
"type": "function"
},
{
"name": "html_get_navbar",
"type": "function"
},
{
"name": "html_get_table_of_contents",
"type": "function"
},
{
"name": "html_table_add_row",
"type": "function"
},
{
"name": "List",
"type": "class"
},
{
"name": "Tuple",
"type": "class"
},
{
"name": "__doc__",
"type": "instance"
},
{
"name": "__file__",
"type": "instance"
},
{
"name": "__name__",
"type": "instance"
},
{
"name": "__package__",
"type": "instance"
}
] |
# Copyright 2022 Fuzz Introspector Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Logic related to calltree analysis"""
import os
import logging
import json
from typing import (
List,
Tuple,
Optional,
Set,
)
import fuzz_analysis
import fuzz_data_loader
import fuzz_utils
import fuzz_cfg_load
import fuzz_html_helpers
# For pretty printing the html code:
from bs4 import BeautifulSoup as bs
logger = logging.getLogger(name=__name__)
class FuzzCalltreeAnalysis(fuzz_analysis.AnalysisInterface):
def __init__(self):
self.name = "FuzzCalltreeAnalysis"
logger.info("Creating FuzzCalltreeAnalysis")
def analysis_func(self,
toc_list: List[Tuple[str, str, int]],
tables: List[str],
project_profile: fuzz_data_loader.MergedProjectProfile,
profiles: List[fuzz_data_loader.FuzzerProfile],
basefolder: str,
coverage_url: str,
conclusions) -> str:
"""
Creates the HTML of the calltree. Returns the HTML as a string.
"""
logger.info("Not implemented")
return ""
def create_calltree(self, profile: fuzz_data_loader.FuzzerProfile) -> str:
logger.info("In calltree")
# Generate HTML for the calltree
calltree_html_string = "<div class='section-wrapper'>"
calltree_html_string += "<h1>Fuzzer calltree</h1>"
nodes = fuzz_cfg_load.extract_all_callsites(profile.function_call_depths)
for i in range(len(nodes)):
node = nodes[i]
demangled_name = fuzz_utils.demangle_cpp_func(node.dst_function_name)
# We may not want to show certain functions at times, e.g. libc functions
# in case it bloats the calltree
# libc_funcs = { "free" }
libc_funcs: Set[str] = set()
avoid = len([fn for fn in libc_funcs if fn in demangled_name]) > 0
if avoid:
continue
# Prepare strings needed in the HTML
color_to_be = node.cov_color
callsite_link = node.cov_callsite_link
link = node.cov_link
ct_idx_str = self.create_str_node_ctx_idx(str(node.cov_ct_idx))
# Only display [function] link if we have, otherwhise show no [function] text.
if node.dst_function_source_file.replace(" ", "") != "/":
func_href = f"""<a href="{link}">[function]</a>"""
else:
func_href = ""
if i > 0:
previous_node = nodes[i - 1]
if previous_node.depth == node.depth:
calltree_html_string += "</div>"
depth_diff = previous_node.depth - node.depth
if depth_diff >= 1:
closing_divs = "</div>" # To close "calltree-line-wrapper"
closing_divs = "</div>" * (int(depth_diff) + 1)
calltree_html_string += closing_divs
calltree_html_string += f"""
<div class="{color_to_be}-background coverage-line">
<span class="coverage-line-inner" data-calltree-idx="{ct_idx_str}">
{node.depth}
<code class="language-clike">
{demangled_name}
</code>
<span class="coverage-line-filename">
{func_href}
<a href="{callsite_link}">
[call site2]
</a>
<span class="calltree-idx">[calltree idx: {ct_idx_str}]</span>
</span>
</span>
"""
if i != len(nodes) - 1:
next_node = nodes[i + 1]
if next_node.depth > node.depth:
calltree_html_string += f"""<div
class="calltree-line-wrapper open level-{int(node.depth)}"
style="padding-left: 16px">"""
elif next_node.depth < node.depth:
depth_diff = int(node.depth - next_node.depth)
calltree_html_string += "</div>" * depth_diff
calltree_html_string += "</div>"
logger.info("Calltree created")
# Write the HTML to a file called calltree_view_XX.html where XX is a counter.
calltree_file_idx = 0
calltree_html_file = f"calltree_view_{calltree_file_idx}.html"
while os.path.isfile(calltree_html_file):
calltree_file_idx += 1
calltree_html_file = f"calltree_view_{calltree_file_idx}.html"
self.html_create_dedicated_calltree_file(
calltree_html_string,
calltree_html_file,
profile,
)
return calltree_html_file
def html_create_dedicated_calltree_file(
self,
calltree_html_string,
filename,
profile: fuzz_data_loader.FuzzerProfile):
"""
Write a wrapped HTML file with the tags needed from fuzz-introspector
We use this only for wrapping calltrees at the moment, however, down
the line it makes sense to have an easy wrapper for other HTML pages too.
"""
complete_html_string = ""
# HTML start
html_header = fuzz_html_helpers.
|
[
"html_create_table_head",
"html_table_add_row",
"html_get_header",
"html_get_navbar",
"List",
"html_add_header_with_link",
"html_get_table_of_contents",
"Any",
"Tuple"
] |
ode>
<span class="coverage-line-filename">
{func_href}
<a href="{callsite_link}">
[call site2]
</a>
<span class="calltree-idx">[calltree idx: {ct_idx_str}]</span>
</span>
</span>
"""
if i != len(nodes) - 1:
next_node = nodes[i + 1]
if next_node.depth > node.depth:
calltree_html_string += f"""<div
class="calltree-line-wrapper open level-{int(node.depth)}"
style="padding-left: 16px">"""
elif next_node.depth < node.depth:
depth_diff = int(node.depth - next_node.depth)
calltree_html_string += "</div>" * depth_diff
calltree_html_string += "</div>"
logger.info("Calltree created")
# Write the HTML to a file called calltree_view_XX.html where XX is a counter.
calltree_file_idx = 0
calltree_html_file = f"calltree_view_{calltree_file_idx}.html"
while os.path.isfile(calltree_html_file):
calltree_file_idx += 1
calltree_html_file = f"calltree_view_{calltree_file_idx}.html"
self.html_create_dedicated_calltree_file(
calltree_html_string,
calltree_html_file,
profile,
)
return calltree_html_file
def html_create_dedicated_calltree_file(
self,
calltree_html_string,
filename,
profile: fuzz_data_loader.FuzzerProfile):
"""
Write a wrapped HTML file with the tags needed from fuzz-introspector
We use this only for wrapping calltrees at the moment, however, down
the line it makes sense to have an easy wrapper for other HTML pages too.
"""
complete_html_string = ""
# HTML start
html_header = fuzz_html_helpers.
| 20
|
32
| 0
| 6,201
|
ossf__fuzz-introspector
|
4867924b714a7789f94fbcde53713a29ceab7272
|
post-processing/analyses/fuzz_calltree_analysis.py
|
infile
|
create_fuzz_blocker_table
| true
|
function
| 7
| 7
| false
| true
|
[
"get_fuzz_blockers",
"name",
"create_fuzz_blocker_table",
"create_str_node_ctx_idx",
"html_create_dedicated_calltree_file",
"__init__",
"analysis_func",
"create_calltree",
"__annotations__",
"__class__",
"__delattr__",
"__dict__",
"__dir__",
"__eq__",
"__format__",
"__getattribute__",
"__hash__",
"__init_subclass__",
"__ne__",
"__new__",
"__reduce__",
"__reduce_ex__",
"__repr__",
"__setattr__",
"__sizeof__",
"__slots__",
"__str__",
"__subclasshook__",
"__doc__",
"__module__"
] |
[
{
"name": "analysis_func",
"type": "function"
},
{
"name": "create_calltree",
"type": "function"
},
{
"name": "create_fuzz_blocker_table",
"type": "function"
},
{
"name": "create_str_node_ctx_idx",
"type": "function"
},
{
"name": "get_fuzz_blockers",
"type": "function"
},
{
"name": "html_create_dedicated_calltree_file",
"type": "function"
},
{
"name": "name",
"type": "statement"
},
{
"name": "__annotations__",
"type": "statement"
},
{
"name": "__class__",
"type": "property"
},
{
"name": "__delattr__",
"type": "function"
},
{
"name": "__dict__",
"type": "statement"
},
{
"name": "__dir__",
"type": "function"
},
{
"name": "__doc__",
"type": "statement"
},
{
"name": "__eq__",
"type": "function"
},
{
"name": "__format__",
"type": "function"
},
{
"name": "__getattribute__",
"type": "function"
},
{
"name": "__hash__",
"type": "function"
},
{
"name": "__init__",
"type": "function"
},
{
"name": "__init_subclass__",
"type": "function"
},
{
"name": "__module__",
"type": "statement"
},
{
"name": "__ne__",
"type": "function"
},
{
"name": "__new__",
"type": "function"
},
{
"name": "__reduce__",
"type": "function"
},
{
"name": "__reduce_ex__",
"type": "function"
},
{
"name": "__repr__",
"type": "function"
},
{
"name": "__setattr__",
"type": "function"
},
{
"name": "__sizeof__",
"type": "function"
},
{
"name": "__slots__",
"type": "statement"
},
{
"name": "__str__",
"type": "function"
}
] |
# Copyright 2022 Fuzz Introspector Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Logic related to calltree analysis"""
import os
import logging
import json
from typing import (
List,
Tuple,
Optional,
Set,
)
import fuzz_analysis
import fuzz_data_loader
import fuzz_utils
import fuzz_cfg_load
import fuzz_html_helpers
# For pretty printing the html code:
from bs4 import BeautifulSoup as bs
logger = logging.getLogger(name=__name__)
class FuzzCalltreeAnalysis(fuzz_analysis.AnalysisInterface):
def __init__(self):
self.name = "FuzzCalltreeAnalysis"
logger.info("Creating FuzzCalltreeAnalysis")
def analysis_func(self,
toc_list: List[Tuple[str, str, int]],
tables: List[str],
project_profile: fuzz_data_loader.MergedProjectProfile,
profiles: List[fuzz_data_loader.FuzzerProfile],
basefolder: str,
coverage_url: str,
conclusions) -> str:
"""
Creates the HTML of the calltree. Returns the HTML as a string.
"""
logger.info("Not implemented")
return ""
def create_calltree(self, profile: fuzz_data_loader.FuzzerProfile) -> str:
logger.info("In calltree")
# Generate HTML for the calltree
calltree_html_string = "<div class='section-wrapper'>"
calltree_html_string += "<h1>Fuzzer calltree</h1>"
nodes = fuzz_cfg_load.extract_all_callsites(profile.function_call_depths)
for i in range(len(nodes)):
node = nodes[i]
demangled_name = fuzz_utils.demangle_cpp_func(node.dst_function_name)
# We may not want to show certain functions at times, e.g. libc functions
# in case it bloats the calltree
# libc_funcs = { "free" }
libc_funcs: Set[str] = set()
avoid = len([fn for fn in libc_funcs if fn in demangled_name]) > 0
if avoid:
continue
# Prepare strings needed in the HTML
color_to_be = node.cov_color
callsite_link = node.cov_callsite_link
link = node.cov_link
ct_idx_str = self.create_str_node_ctx_idx(str(node.cov_ct_idx))
# Only display [function] link if we have, otherwhise show no [function] text.
if node.dst_function_source_file.replace(" ", "") != "/":
func_href = f"""<a href="{link}">[function]</a>"""
else:
func_href = ""
if i > 0:
previous_node = nodes[i - 1]
if previous_node.depth == node.depth:
calltree_html_string += "</div>"
depth_diff = previous_node.depth - node.depth
if depth_diff >= 1:
closing_divs = "</div>" # To close "calltree-line-wrapper"
closing_divs = "</div>" * (int(depth_diff) + 1)
calltree_html_string += closing_divs
calltree_html_string += f"""
<div class="{color_to_be}-background coverage-line">
<span class="coverage-line-inner" data-calltree-idx="{ct_idx_str}">
{node.depth}
<code class="language-clike">
{demangled_name}
</code>
<span class="coverage-line-filename">
{func_href}
<a href="{callsite_link}">
[call site2]
</a>
<span class="calltree-idx">[calltree idx: {ct_idx_str}]</span>
</span>
</span>
"""
if i != len(nodes) - 1:
next_node = nodes[i + 1]
if next_node.depth > node.depth:
calltree_html_string += f"""<div
class="calltree-line-wrapper open level-{int(node.depth)}"
style="padding-left: 16px">"""
elif next_node.depth < node.depth:
depth_diff = int(node.depth - next_node.depth)
calltree_html_string += "</div>" * depth_diff
calltree_html_string += "</div>"
logger.info("Calltree created")
# Write the HTML to a file called calltree_view_XX.html where XX is a counter.
calltree_file_idx = 0
calltree_html_file = f"calltree_view_{calltree_file_idx}.html"
while os.path.isfile(calltree_html_file):
calltree_file_idx += 1
calltree_html_file = f"calltree_view_{calltree_file_idx}.html"
self.html_create_dedicated_calltree_file(
calltree_html_string,
calltree_html_file,
profile,
)
return calltree_html_file
def html_create_dedicated_calltree_file(
self,
calltree_html_string,
filename,
profile: fuzz_data_loader.FuzzerProfile):
"""
Write a wrapped HTML file with the tags needed from fuzz-introspector
We use this only for wrapping calltrees at the moment, however, down
the line it makes sense to have an easy wrapper for other HTML pages too.
"""
complete_html_string = ""
# HTML start
html_header = fuzz_html_helpers.html_get_header(
calltree=True,
title=f"Fuzz introspector: { profile.get_key() }"
)
html_header += '<div class="content-section calltree-content-section">'
complete_html_string += html_header
# Display fuzz blocker at top of page
fuzz_blockers = self.get_fuzz_blockers(
profile,
max_blockers_to_extract=12
)
fuzz_blocker_table = self.
|
[
"get_fuzz_blockers",
"name",
"create_fuzz_blocker_table",
"create_str_node_ctx_idx",
"html_create_dedicated_calltree_file",
"analysis_func",
"create_calltree"
] |
calltree_html_string += f"""<div
class="calltree-line-wrapper open level-{int(node.depth)}"
style="padding-left: 16px">"""
elif next_node.depth < node.depth:
depth_diff = int(node.depth - next_node.depth)
calltree_html_string += "</div>" * depth_diff
calltree_html_string += "</div>"
logger.info("Calltree created")
# Write the HTML to a file called calltree_view_XX.html where XX is a counter.
calltree_file_idx = 0
calltree_html_file = f"calltree_view_{calltree_file_idx}.html"
while os.path.isfile(calltree_html_file):
calltree_file_idx += 1
calltree_html_file = f"calltree_view_{calltree_file_idx}.html"
self.html_create_dedicated_calltree_file(
calltree_html_string,
calltree_html_file,
profile,
)
return calltree_html_file
def html_create_dedicated_calltree_file(
self,
calltree_html_string,
filename,
profile: fuzz_data_loader.FuzzerProfile):
"""
Write a wrapped HTML file with the tags needed from fuzz-introspector
We use this only for wrapping calltrees at the moment, however, down
the line it makes sense to have an easy wrapper for other HTML pages too.
"""
complete_html_string = ""
# HTML start
html_header = fuzz_html_helpers.html_get_header(
calltree=True,
title=f"Fuzz introspector: { profile.get_key() }"
)
html_header += '<div class="content-section calltree-content-section">'
complete_html_string += html_header
# Display fuzz blocker at top of page
fuzz_blockers = self.get_fuzz_blockers(
profile,
max_blockers_to_extract=12
)
fuzz_blocker_table = self.
| 22
|
33
| 0
| 6,814
|
ossf__fuzz-introspector
|
4867924b714a7789f94fbcde53713a29ceab7272
|
post-processing/analyses/fuzz_calltree_analysis.py
|
infile
|
create_str_node_ctx_idx
| true
|
function
| 7
| 7
| false
| false
|
[
"get_fuzz_blockers",
"name",
"create_fuzz_blocker_table",
"create_str_node_ctx_idx",
"create_calltree",
"__init__",
"analysis_func",
"html_create_dedicated_calltree_file",
"__annotations__",
"__class__",
"__delattr__",
"__dict__",
"__dir__",
"__eq__",
"__format__",
"__getattribute__",
"__hash__",
"__init_subclass__",
"__ne__",
"__new__",
"__reduce__",
"__reduce_ex__",
"__repr__",
"__setattr__",
"__sizeof__",
"__slots__",
"__str__",
"__subclasshook__",
"__doc__",
"__module__"
] |
[
{
"name": "analysis_func",
"type": "function"
},
{
"name": "create_calltree",
"type": "function"
},
{
"name": "create_fuzz_blocker_table",
"type": "function"
},
{
"name": "create_str_node_ctx_idx",
"type": "function"
},
{
"name": "get_fuzz_blockers",
"type": "function"
},
{
"name": "html_create_dedicated_calltree_file",
"type": "function"
},
{
"name": "name",
"type": "statement"
},
{
"name": "__annotations__",
"type": "statement"
},
{
"name": "__class__",
"type": "property"
},
{
"name": "__delattr__",
"type": "function"
},
{
"name": "__dict__",
"type": "statement"
},
{
"name": "__dir__",
"type": "function"
},
{
"name": "__doc__",
"type": "statement"
},
{
"name": "__eq__",
"type": "function"
},
{
"name": "__format__",
"type": "function"
},
{
"name": "__getattribute__",
"type": "function"
},
{
"name": "__hash__",
"type": "function"
},
{
"name": "__init__",
"type": "function"
},
{
"name": "__init_subclass__",
"type": "function"
},
{
"name": "__module__",
"type": "statement"
},
{
"name": "__ne__",
"type": "function"
},
{
"name": "__new__",
"type": "function"
},
{
"name": "__reduce__",
"type": "function"
},
{
"name": "__reduce_ex__",
"type": "function"
},
{
"name": "__repr__",
"type": "function"
},
{
"name": "__setattr__",
"type": "function"
},
{
"name": "__sizeof__",
"type": "function"
},
{
"name": "__slots__",
"type": "statement"
},
{
"name": "__str__",
"type": "function"
}
] |
# Copyright 2022 Fuzz Introspector Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Logic related to calltree analysis"""
import os
import logging
import json
from typing import (
List,
Tuple,
Optional,
Set,
)
import fuzz_analysis
import fuzz_data_loader
import fuzz_utils
import fuzz_cfg_load
import fuzz_html_helpers
# For pretty printing the html code:
from bs4 import BeautifulSoup as bs
logger = logging.getLogger(name=__name__)
class FuzzCalltreeAnalysis(fuzz_analysis.AnalysisInterface):
def __init__(self):
self.name = "FuzzCalltreeAnalysis"
logger.info("Creating FuzzCalltreeAnalysis")
def analysis_func(self,
toc_list: List[Tuple[str, str, int]],
tables: List[str],
project_profile: fuzz_data_loader.MergedProjectProfile,
profiles: List[fuzz_data_loader.FuzzerProfile],
basefolder: str,
coverage_url: str,
conclusions) -> str:
"""
Creates the HTML of the calltree. Returns the HTML as a string.
"""
logger.info("Not implemented")
return ""
def create_calltree(self, profile: fuzz_data_loader.FuzzerProfile) -> str:
logger.info("In calltree")
# Generate HTML for the calltree
calltree_html_string = "<div class='section-wrapper'>"
calltree_html_string += "<h1>Fuzzer calltree</h1>"
nodes = fuzz_cfg_load.extract_all_callsites(profile.function_call_depths)
for i in range(len(nodes)):
node = nodes[i]
demangled_name = fuzz_utils.demangle_cpp_func(node.dst_function_name)
# We may not want to show certain functions at times, e.g. libc functions
# in case it bloats the calltree
# libc_funcs = { "free" }
libc_funcs: Set[str] = set()
avoid = len([fn for fn in libc_funcs if fn in demangled_name]) > 0
if avoid:
continue
# Prepare strings needed in the HTML
color_to_be = node.cov_color
callsite_link = node.cov_callsite_link
link = node.cov_link
ct_idx_str = self.create_str_node_ctx_idx(str(node.cov_ct_idx))
# Only display [function] link if we have, otherwhise show no [function] text.
if node.dst_function_source_file.replace(" ", "") != "/":
func_href = f"""<a href="{link}">[function]</a>"""
else:
func_href = ""
if i > 0:
previous_node = nodes[i - 1]
if previous_node.depth == node.depth:
calltree_html_string += "</div>"
depth_diff = previous_node.depth - node.depth
if depth_diff >= 1:
closing_divs = "</div>" # To close "calltree-line-wrapper"
closing_divs = "</div>" * (int(depth_diff) + 1)
calltree_html_string += closing_divs
calltree_html_string += f"""
<div class="{color_to_be}-background coverage-line">
<span class="coverage-line-inner" data-calltree-idx="{ct_idx_str}">
{node.depth}
<code class="language-clike">
{demangled_name}
</code>
<span class="coverage-line-filename">
{func_href}
<a href="{callsite_link}">
[call site2]
</a>
<span class="calltree-idx">[calltree idx: {ct_idx_str}]</span>
</span>
</span>
"""
if i != len(nodes) - 1:
next_node = nodes[i + 1]
if next_node.depth > node.depth:
calltree_html_string += f"""<div
class="calltree-line-wrapper open level-{int(node.depth)}"
style="padding-left: 16px">"""
elif next_node.depth < node.depth:
depth_diff = int(node.depth - next_node.depth)
calltree_html_string += "</div>" * depth_diff
calltree_html_string += "</div>"
logger.info("Calltree created")
# Write the HTML to a file called calltree_view_XX.html where XX is a counter.
calltree_file_idx = 0
calltree_html_file = f"calltree_view_{calltree_file_idx}.html"
while os.path.isfile(calltree_html_file):
calltree_file_idx += 1
calltree_html_file = f"calltree_view_{calltree_file_idx}.html"
self.html_create_dedicated_calltree_file(
calltree_html_string,
calltree_html_file,
profile,
)
return calltree_html_file
def html_create_dedicated_calltree_file(
self,
calltree_html_string,
filename,
profile: fuzz_data_loader.FuzzerProfile):
"""
Write a wrapped HTML file with the tags needed from fuzz-introspector
We use this only for wrapping calltrees at the moment, however, down
the line it makes sense to have an easy wrapper for other HTML pages too.
"""
complete_html_string = ""
# HTML start
html_header = fuzz_html_helpers.html_get_header(
calltree=True,
title=f"Fuzz introspector: { profile.get_key() }"
)
html_header += '<div class="content-section calltree-content-section">'
complete_html_string += html_header
# Display fuzz blocker at top of page
fuzz_blockers = self.get_fuzz_blockers(
profile,
max_blockers_to_extract=12
)
fuzz_blocker_table = self.create_fuzz_blocker_table(profile, [], "", fuzz_blockers)
if fuzz_blocker_table is not None:
complete_html_string += "<div class=\"report-box\">"
complete_html_string += "<h1>Fuzz blockers</h1>"
complete_html_string += fuzz_blocker_table
complete_html_string += "</div>"
# Display calltree
complete_html_string += calltree_html_string
complete_html_string += "</div></div></div></div>"
# HTML end
html_end = '</div>'
blocker_idxs = []
for node in fuzz_blockers:
blocker_idxs.append(self.
|
[
"get_fuzz_blockers",
"name",
"create_fuzz_blocker_table",
"create_str_node_ctx_idx",
"create_calltree",
"analysis_func",
"html_create_dedicated_calltree_file"
] |
while os.path.isfile(calltree_html_file):
calltree_file_idx += 1
calltree_html_file = f"calltree_view_{calltree_file_idx}.html"
self.html_create_dedicated_calltree_file(
calltree_html_string,
calltree_html_file,
profile,
)
return calltree_html_file
def html_create_dedicated_calltree_file(
self,
calltree_html_string,
filename,
profile: fuzz_data_loader.FuzzerProfile):
"""
Write a wrapped HTML file with the tags needed from fuzz-introspector
We use this only for wrapping calltrees at the moment, however, down
the line it makes sense to have an easy wrapper for other HTML pages too.
"""
complete_html_string = ""
# HTML start
html_header = fuzz_html_helpers.html_get_header(
calltree=True,
title=f"Fuzz introspector: { profile.get_key() }"
)
html_header += '<div class="content-section calltree-content-section">'
complete_html_string += html_header
# Display fuzz blocker at top of page
fuzz_blockers = self.get_fuzz_blockers(
profile,
max_blockers_to_extract=12
)
fuzz_blocker_table = self.create_fuzz_blocker_table(profile, [], "", fuzz_blockers)
if fuzz_blocker_table is not None:
complete_html_string += "<div class=\"report-box\">"
complete_html_string += "<h1>Fuzz blockers</h1>"
complete_html_string += fuzz_blocker_table
complete_html_string += "</div>"
# Display calltree
complete_html_string += calltree_html_string
complete_html_string += "</div></div></div></div>"
# HTML end
html_end = '</div>'
blocker_idxs = []
for node in fuzz_blockers:
blocker_idxs.append(self.
| 23
|
34
| 0
| 6,847
|
ossf__fuzz-introspector
|
4867924b714a7789f94fbcde53713a29ceab7272
|
post-processing/analyses/fuzz_calltree_analysis.py
|
infile
|
cov_ct_idx
| true
|
statement
| 17
| 17
| false
| false
|
[
"cov_ct_idx",
"depth",
"cov_forward_reds",
"dst_function_name",
"dst_function_source_file",
"children",
"cov_callsite_link",
"cov_color",
"cov_hitcount",
"cov_largest_blocked_func",
"cov_link",
"cov_parent",
"hitcount",
"parent_calltree_callsite",
"src_function_name",
"src_function_source_file",
"src_linenumber",
"__init__",
"__annotations__",
"__class__",
"__delattr__",
"__dict__",
"__dir__",
"__eq__",
"__format__",
"__getattribute__",
"__hash__",
"__init_subclass__",
"__ne__",
"__new__",
"__reduce__",
"__reduce_ex__",
"__repr__",
"__setattr__",
"__sizeof__",
"__str__",
"__subclasshook__",
"__doc__",
"__module__"
] |
[
{
"name": "children",
"type": "statement"
},
{
"name": "cov_callsite_link",
"type": "statement"
},
{
"name": "cov_color",
"type": "statement"
},
{
"name": "cov_ct_idx",
"type": "statement"
},
{
"name": "cov_forward_reds",
"type": "statement"
},
{
"name": "cov_hitcount",
"type": "statement"
},
{
"name": "cov_largest_blocked_func",
"type": "statement"
},
{
"name": "cov_link",
"type": "statement"
},
{
"name": "cov_parent",
"type": "statement"
},
{
"name": "depth",
"type": "statement"
},
{
"name": "dst_function_name",
"type": "statement"
},
{
"name": "dst_function_source_file",
"type": "statement"
},
{
"name": "hitcount",
"type": "statement"
},
{
"name": "parent_calltree_callsite",
"type": "statement"
},
{
"name": "src_function_name",
"type": "statement"
},
{
"name": "src_function_source_file",
"type": "statement"
},
{
"name": "src_linenumber",
"type": "statement"
},
{
"name": "__annotations__",
"type": "statement"
},
{
"name": "__class__",
"type": "property"
},
{
"name": "__delattr__",
"type": "function"
},
{
"name": "__dict__",
"type": "statement"
},
{
"name": "__dir__",
"type": "function"
},
{
"name": "__doc__",
"type": "statement"
},
{
"name": "__eq__",
"type": "function"
},
{
"name": "__format__",
"type": "function"
},
{
"name": "__getattribute__",
"type": "function"
},
{
"name": "__hash__",
"type": "function"
},
{
"name": "__init__",
"type": "function"
},
{
"name": "__init_subclass__",
"type": "function"
},
{
"name": "__module__",
"type": "statement"
},
{
"name": "__ne__",
"type": "function"
},
{
"name": "__new__",
"type": "function"
},
{
"name": "__reduce__",
"type": "function"
},
{
"name": "__reduce_ex__",
"type": "function"
},
{
"name": "__repr__",
"type": "function"
},
{
"name": "__setattr__",
"type": "function"
},
{
"name": "__sizeof__",
"type": "function"
},
{
"name": "__slots__",
"type": "statement"
},
{
"name": "__str__",
"type": "function"
}
] |
# Copyright 2022 Fuzz Introspector Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Logic related to calltree analysis"""
import os
import logging
import json
from typing import (
List,
Tuple,
Optional,
Set,
)
import fuzz_analysis
import fuzz_data_loader
import fuzz_utils
import fuzz_cfg_load
import fuzz_html_helpers
# For pretty printing the html code:
from bs4 import BeautifulSoup as bs
logger = logging.getLogger(name=__name__)
class FuzzCalltreeAnalysis(fuzz_analysis.AnalysisInterface):
def __init__(self):
self.name = "FuzzCalltreeAnalysis"
logger.info("Creating FuzzCalltreeAnalysis")
def analysis_func(self,
toc_list: List[Tuple[str, str, int]],
tables: List[str],
project_profile: fuzz_data_loader.MergedProjectProfile,
profiles: List[fuzz_data_loader.FuzzerProfile],
basefolder: str,
coverage_url: str,
conclusions) -> str:
"""
Creates the HTML of the calltree. Returns the HTML as a string.
"""
logger.info("Not implemented")
return ""
def create_calltree(self, profile: fuzz_data_loader.FuzzerProfile) -> str:
logger.info("In calltree")
# Generate HTML for the calltree
calltree_html_string = "<div class='section-wrapper'>"
calltree_html_string += "<h1>Fuzzer calltree</h1>"
nodes = fuzz_cfg_load.extract_all_callsites(profile.function_call_depths)
for i in range(len(nodes)):
node = nodes[i]
demangled_name = fuzz_utils.demangle_cpp_func(node.dst_function_name)
# We may not want to show certain functions at times, e.g. libc functions
# in case it bloats the calltree
# libc_funcs = { "free" }
libc_funcs: Set[str] = set()
avoid = len([fn for fn in libc_funcs if fn in demangled_name]) > 0
if avoid:
continue
# Prepare strings needed in the HTML
color_to_be = node.cov_color
callsite_link = node.cov_callsite_link
link = node.cov_link
ct_idx_str = self.create_str_node_ctx_idx(str(node.cov_ct_idx))
# Only display [function] link if we have, otherwhise show no [function] text.
if node.dst_function_source_file.replace(" ", "") != "/":
func_href = f"""<a href="{link}">[function]</a>"""
else:
func_href = ""
if i > 0:
previous_node = nodes[i - 1]
if previous_node.depth == node.depth:
calltree_html_string += "</div>"
depth_diff = previous_node.depth - node.depth
if depth_diff >= 1:
closing_divs = "</div>" # To close "calltree-line-wrapper"
closing_divs = "</div>" * (int(depth_diff) + 1)
calltree_html_string += closing_divs
calltree_html_string += f"""
<div class="{color_to_be}-background coverage-line">
<span class="coverage-line-inner" data-calltree-idx="{ct_idx_str}">
{node.depth}
<code class="language-clike">
{demangled_name}
</code>
<span class="coverage-line-filename">
{func_href}
<a href="{callsite_link}">
[call site2]
</a>
<span class="calltree-idx">[calltree idx: {ct_idx_str}]</span>
</span>
</span>
"""
if i != len(nodes) - 1:
next_node = nodes[i + 1]
if next_node.depth > node.depth:
calltree_html_string += f"""<div
class="calltree-line-wrapper open level-{int(node.depth)}"
style="padding-left: 16px">"""
elif next_node.depth < node.depth:
depth_diff = int(node.depth - next_node.depth)
calltree_html_string += "</div>" * depth_diff
calltree_html_string += "</div>"
logger.info("Calltree created")
# Write the HTML to a file called calltree_view_XX.html where XX is a counter.
calltree_file_idx = 0
calltree_html_file = f"calltree_view_{calltree_file_idx}.html"
while os.path.isfile(calltree_html_file):
calltree_file_idx += 1
calltree_html_file = f"calltree_view_{calltree_file_idx}.html"
self.html_create_dedicated_calltree_file(
calltree_html_string,
calltree_html_file,
profile,
)
return calltree_html_file
def html_create_dedicated_calltree_file(
self,
calltree_html_string,
filename,
profile: fuzz_data_loader.FuzzerProfile):
"""
Write a wrapped HTML file with the tags needed from fuzz-introspector
We use this only for wrapping calltrees at the moment, however, down
the line it makes sense to have an easy wrapper for other HTML pages too.
"""
complete_html_string = ""
# HTML start
html_header = fuzz_html_helpers.html_get_header(
calltree=True,
title=f"Fuzz introspector: { profile.get_key() }"
)
html_header += '<div class="content-section calltree-content-section">'
complete_html_string += html_header
# Display fuzz blocker at top of page
fuzz_blockers = self.get_fuzz_blockers(
profile,
max_blockers_to_extract=12
)
fuzz_blocker_table = self.create_fuzz_blocker_table(profile, [], "", fuzz_blockers)
if fuzz_blocker_table is not None:
complete_html_string += "<div class=\"report-box\">"
complete_html_string += "<h1>Fuzz blockers</h1>"
complete_html_string += fuzz_blocker_table
complete_html_string += "</div>"
# Display calltree
complete_html_string += calltree_html_string
complete_html_string += "</div></div></div></div>"
# HTML end
html_end = '</div>'
blocker_idxs = []
for node in fuzz_blockers:
blocker_idxs.append(self.create_str_node_ctx_idx(str(node.
|
[
"cov_ct_idx",
"depth",
"cov_forward_reds",
"dst_function_name",
"dst_function_source_file",
"children",
"cov_callsite_link",
"cov_color",
"cov_hitcount",
"cov_largest_blocked_func",
"cov_link",
"cov_parent",
"hitcount",
"parent_calltree_callsite",
"src_function_name",
"src_function_source_file",
"src_linenumber"
] |
tml_file):
calltree_file_idx += 1
calltree_html_file = f"calltree_view_{calltree_file_idx}.html"
self.html_create_dedicated_calltree_file(
calltree_html_string,
calltree_html_file,
profile,
)
return calltree_html_file
def html_create_dedicated_calltree_file(
self,
calltree_html_string,
filename,
profile: fuzz_data_loader.FuzzerProfile):
"""
Write a wrapped HTML file with the tags needed from fuzz-introspector
We use this only for wrapping calltrees at the moment, however, down
the line it makes sense to have an easy wrapper for other HTML pages too.
"""
complete_html_string = ""
# HTML start
html_header = fuzz_html_helpers.html_get_header(
calltree=True,
title=f"Fuzz introspector: { profile.get_key() }"
)
html_header += '<div class="content-section calltree-content-section">'
complete_html_string += html_header
# Display fuzz blocker at top of page
fuzz_blockers = self.get_fuzz_blockers(
profile,
max_blockers_to_extract=12
)
fuzz_blocker_table = self.create_fuzz_blocker_table(profile, [], "", fuzz_blockers)
if fuzz_blocker_table is not None:
complete_html_string += "<div class=\"report-box\">"
complete_html_string += "<h1>Fuzz blockers</h1>"
complete_html_string += fuzz_blocker_table
complete_html_string += "</div>"
# Display calltree
complete_html_string += calltree_html_string
complete_html_string += "</div></div></div></div>"
# HTML end
html_end = '</div>'
blocker_idxs = []
for node in fuzz_blockers:
blocker_idxs.append(self.create_str_node_ctx_idx(str(node.
| 24
|
35
| 0
| 7,754
|
ossf__fuzz-introspector
|
4867924b714a7789f94fbcde53713a29ceab7272
|
post-processing/analyses/fuzz_calltree_analysis.py
|
inproject
|
FuzzerProfile
| true
|
class
| 14
| 19
| false
| false
|
[
"FuzzerProfile",
"MergedProjectProfile",
"List",
"None",
"logger",
"add_func_to_reached_and_clone",
"FunctionProfile",
"load_all_profiles",
"read_fuzzer_data_file_to_profile",
"Any",
"Dict",
"Optional",
"Set",
"Tuple"
] |
[
{
"name": "add_func_to_reached_and_clone",
"type": "function"
},
{
"name": "Any",
"type": "class"
},
{
"name": "copy",
"type": "module"
},
{
"name": "Dict",
"type": "class"
},
{
"name": "FunctionProfile",
"type": "class"
},
{
"name": "fuzz_cfg_load",
"type": "module"
},
{
"name": "fuzz_cov_load",
"type": "module"
},
{
"name": "fuzz_utils",
"type": "module"
},
{
"name": "FuzzerProfile",
"type": "class"
},
{
"name": "List",
"type": "class"
},
{
"name": "load_all_profiles",
"type": "function"
},
{
"name": "logger",
"type": "statement"
},
{
"name": "logging",
"type": "module"
},
{
"name": "MergedProjectProfile",
"type": "class"
},
{
"name": "Optional",
"type": "class"
},
{
"name": "os",
"type": "module"
},
{
"name": "read_fuzzer_data_file_to_profile",
"type": "function"
},
{
"name": "Set",
"type": "class"
},
{
"name": "Tuple",
"type": "class"
},
{
"name": "__doc__",
"type": "instance"
},
{
"name": "__file__",
"type": "instance"
},
{
"name": "__name__",
"type": "instance"
},
{
"name": "__package__",
"type": "instance"
}
] |
# Copyright 2022 Fuzz Introspector Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Logic related to calltree analysis"""
import os
import logging
import json
from typing import (
List,
Tuple,
Optional,
Set,
)
import fuzz_analysis
import fuzz_data_loader
import fuzz_utils
import fuzz_cfg_load
import fuzz_html_helpers
# For pretty printing the html code:
from bs4 import BeautifulSoup as bs
logger = logging.getLogger(name=__name__)
class FuzzCalltreeAnalysis(fuzz_analysis.AnalysisInterface):
def __init__(self):
self.name = "FuzzCalltreeAnalysis"
logger.info("Creating FuzzCalltreeAnalysis")
def analysis_func(self,
toc_list: List[Tuple[str, str, int]],
tables: List[str],
project_profile: fuzz_data_loader.MergedProjectProfile,
profiles: List[fuzz_data_loader.FuzzerProfile],
basefolder: str,
coverage_url: str,
conclusions) -> str:
"""
Creates the HTML of the calltree. Returns the HTML as a string.
"""
logger.info("Not implemented")
return ""
def create_calltree(self, profile: fuzz_data_loader.FuzzerProfile) -> str:
logger.info("In calltree")
# Generate HTML for the calltree
calltree_html_string = "<div class='section-wrapper'>"
calltree_html_string += "<h1>Fuzzer calltree</h1>"
nodes = fuzz_cfg_load.extract_all_callsites(profile.function_call_depths)
for i in range(len(nodes)):
node = nodes[i]
demangled_name = fuzz_utils.demangle_cpp_func(node.dst_function_name)
# We may not want to show certain functions at times, e.g. libc functions
# in case it bloats the calltree
# libc_funcs = { "free" }
libc_funcs: Set[str] = set()
avoid = len([fn for fn in libc_funcs if fn in demangled_name]) > 0
if avoid:
continue
# Prepare strings needed in the HTML
color_to_be = node.cov_color
callsite_link = node.cov_callsite_link
link = node.cov_link
ct_idx_str = self.create_str_node_ctx_idx(str(node.cov_ct_idx))
# Only display [function] link if we have, otherwhise show no [function] text.
if node.dst_function_source_file.replace(" ", "") != "/":
func_href = f"""<a href="{link}">[function]</a>"""
else:
func_href = ""
if i > 0:
previous_node = nodes[i - 1]
if previous_node.depth == node.depth:
calltree_html_string += "</div>"
depth_diff = previous_node.depth - node.depth
if depth_diff >= 1:
closing_divs = "</div>" # To close "calltree-line-wrapper"
closing_divs = "</div>" * (int(depth_diff) + 1)
calltree_html_string += closing_divs
calltree_html_string += f"""
<div class="{color_to_be}-background coverage-line">
<span class="coverage-line-inner" data-calltree-idx="{ct_idx_str}">
{node.depth}
<code class="language-clike">
{demangled_name}
</code>
<span class="coverage-line-filename">
{func_href}
<a href="{callsite_link}">
[call site2]
</a>
<span class="calltree-idx">[calltree idx: {ct_idx_str}]</span>
</span>
</span>
"""
if i != len(nodes) - 1:
next_node = nodes[i + 1]
if next_node.depth > node.depth:
calltree_html_string += f"""<div
class="calltree-line-wrapper open level-{int(node.depth)}"
style="padding-left: 16px">"""
elif next_node.depth < node.depth:
depth_diff = int(node.depth - next_node.depth)
calltree_html_string += "</div>" * depth_diff
calltree_html_string += "</div>"
logger.info("Calltree created")
# Write the HTML to a file called calltree_view_XX.html where XX is a counter.
calltree_file_idx = 0
calltree_html_file = f"calltree_view_{calltree_file_idx}.html"
while os.path.isfile(calltree_html_file):
calltree_file_idx += 1
calltree_html_file = f"calltree_view_{calltree_file_idx}.html"
self.html_create_dedicated_calltree_file(
calltree_html_string,
calltree_html_file,
profile,
)
return calltree_html_file
def html_create_dedicated_calltree_file(
self,
calltree_html_string,
filename,
profile: fuzz_data_loader.FuzzerProfile):
"""
Write a wrapped HTML file with the tags needed from fuzz-introspector
We use this only for wrapping calltrees at the moment, however, down
the line it makes sense to have an easy wrapper for other HTML pages too.
"""
complete_html_string = ""
# HTML start
html_header = fuzz_html_helpers.html_get_header(
calltree=True,
title=f"Fuzz introspector: { profile.get_key() }"
)
html_header += '<div class="content-section calltree-content-section">'
complete_html_string += html_header
# Display fuzz blocker at top of page
fuzz_blockers = self.get_fuzz_blockers(
profile,
max_blockers_to_extract=12
)
fuzz_blocker_table = self.create_fuzz_blocker_table(profile, [], "", fuzz_blockers)
if fuzz_blocker_table is not None:
complete_html_string += "<div class=\"report-box\">"
complete_html_string += "<h1>Fuzz blockers</h1>"
complete_html_string += fuzz_blocker_table
complete_html_string += "</div>"
# Display calltree
complete_html_string += calltree_html_string
complete_html_string += "</div></div></div></div>"
# HTML end
html_end = '</div>'
blocker_idxs = []
for node in fuzz_blockers:
blocker_idxs.append(self.create_str_node_ctx_idx(str(node.cov_ct_idx)))
if len(blocker_idxs) > 0:
html_end = "<script>"
html_end += f"var fuzz_blocker_idxs = {json.dumps(blocker_idxs)};"
html_end += "</script>"
html_end += "<script src=\"prism.js\"></script>"
html_end += "<script src=\"clike.js\"></script>"
html_end += "<script src=\"calltree.js\"></script>"
complete_html_string += html_end
complete_html_string += "</body></html>"
# Beautify and write HTML
soup = bs(complete_html_string, 'lxml')
pretty_html = soup.prettify()
with open(filename, "w+") as cf:
cf.write(pretty_html)
def create_str_node_ctx_idx(self, cov_ct_idx):
prefixed_zeros = "0" * (len("00000") - len(cov_ct_idx))
return f"{prefixed_zeros}{cov_ct_idx}"
def get_fuzz_blockers(
self,
profile: fuzz_data_loader.
|
[
"FuzzerProfile",
"MergedProjectProfile",
"List",
"None",
"logger",
"add_func_to_reached_and_clone",
"FunctionProfile",
"load_all_profiles",
"read_fuzzer_data_file_to_profile",
"Any",
"Dict",
"Optional",
"Set",
"Tuple"
] |
ospector: { profile.get_key() }"
)
html_header += '<div class="content-section calltree-content-section">'
complete_html_string += html_header
# Display fuzz blocker at top of page
fuzz_blockers = self.get_fuzz_blockers(
profile,
max_blockers_to_extract=12
)
fuzz_blocker_table = self.create_fuzz_blocker_table(profile, [], "", fuzz_blockers)
if fuzz_blocker_table is not None:
complete_html_string += "<div class=\"report-box\">"
complete_html_string += "<h1>Fuzz blockers</h1>"
complete_html_string += fuzz_blocker_table
complete_html_string += "</div>"
# Display calltree
complete_html_string += calltree_html_string
complete_html_string += "</div></div></div></div>"
# HTML end
html_end = '</div>'
blocker_idxs = []
for node in fuzz_blockers:
blocker_idxs.append(self.create_str_node_ctx_idx(str(node.cov_ct_idx)))
if len(blocker_idxs) > 0:
html_end = "<script>"
html_end += f"var fuzz_blocker_idxs = {json.dumps(blocker_idxs)};"
html_end += "</script>"
html_end += "<script src=\"prism.js\"></script>"
html_end += "<script src=\"clike.js\"></script>"
html_end += "<script src=\"calltree.js\"></script>"
complete_html_string += html_end
complete_html_string += "</body></html>"
# Beautify and write HTML
soup = bs(complete_html_string, 'lxml')
pretty_html = soup.prettify()
with open(filename, "w+") as cf:
cf.write(pretty_html)
def create_str_node_ctx_idx(self, cov_ct_idx):
prefixed_zeros = "0" * (len("00000") - len(cov_ct_idx))
return f"{prefixed_zeros}{cov_ct_idx}"
def get_fuzz_blockers(
self,
profile: fuzz_data_loader.
| 25
|
36
| 0
| 7,895
|
ossf__fuzz-introspector
|
4867924b714a7789f94fbcde53713a29ceab7272
|
post-processing/analyses/fuzz_calltree_analysis.py
|
inproject
|
CalltreeCallsite
| true
|
class
| 9
| 9
| false
| true
|
[
"extract_all_callsites",
"List",
"None",
"logger",
"Optional",
"CalltreeCallsite",
"data_file_read_calltree",
"extract_all_callsites_recursive",
"print_ctcs_tree"
] |
[
{
"name": "CalltreeCallsite",
"type": "class"
},
{
"name": "data_file_read_calltree",
"type": "function"
},
{
"name": "extract_all_callsites",
"type": "function"
},
{
"name": "extract_all_callsites_recursive",
"type": "function"
},
{
"name": "List",
"type": "class"
},
{
"name": "logger",
"type": "statement"
},
{
"name": "logging",
"type": "module"
},
{
"name": "Optional",
"type": "class"
},
{
"name": "print_ctcs_tree",
"type": "function"
},
{
"name": "__doc__",
"type": "instance"
},
{
"name": "__file__",
"type": "instance"
},
{
"name": "__name__",
"type": "instance"
},
{
"name": "__package__",
"type": "instance"
}
] |
# Copyright 2022 Fuzz Introspector Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Logic related to calltree analysis"""
import os
import logging
import json
from typing import (
List,
Tuple,
Optional,
Set,
)
import fuzz_analysis
import fuzz_data_loader
import fuzz_utils
import fuzz_cfg_load
import fuzz_html_helpers
# For pretty printing the html code:
from bs4 import BeautifulSoup as bs
logger = logging.getLogger(name=__name__)
class FuzzCalltreeAnalysis(fuzz_analysis.AnalysisInterface):
def __init__(self):
self.name = "FuzzCalltreeAnalysis"
logger.info("Creating FuzzCalltreeAnalysis")
def analysis_func(self,
toc_list: List[Tuple[str, str, int]],
tables: List[str],
project_profile: fuzz_data_loader.MergedProjectProfile,
profiles: List[fuzz_data_loader.FuzzerProfile],
basefolder: str,
coverage_url: str,
conclusions) -> str:
"""
Creates the HTML of the calltree. Returns the HTML as a string.
"""
logger.info("Not implemented")
return ""
def create_calltree(self, profile: fuzz_data_loader.FuzzerProfile) -> str:
logger.info("In calltree")
# Generate HTML for the calltree
calltree_html_string = "<div class='section-wrapper'>"
calltree_html_string += "<h1>Fuzzer calltree</h1>"
nodes = fuzz_cfg_load.extract_all_callsites(profile.function_call_depths)
for i in range(len(nodes)):
node = nodes[i]
demangled_name = fuzz_utils.demangle_cpp_func(node.dst_function_name)
# We may not want to show certain functions at times, e.g. libc functions
# in case it bloats the calltree
# libc_funcs = { "free" }
libc_funcs: Set[str] = set()
avoid = len([fn for fn in libc_funcs if fn in demangled_name]) > 0
if avoid:
continue
# Prepare strings needed in the HTML
color_to_be = node.cov_color
callsite_link = node.cov_callsite_link
link = node.cov_link
ct_idx_str = self.create_str_node_ctx_idx(str(node.cov_ct_idx))
# Only display [function] link if we have, otherwhise show no [function] text.
if node.dst_function_source_file.replace(" ", "") != "/":
func_href = f"""<a href="{link}">[function]</a>"""
else:
func_href = ""
if i > 0:
previous_node = nodes[i - 1]
if previous_node.depth == node.depth:
calltree_html_string += "</div>"
depth_diff = previous_node.depth - node.depth
if depth_diff >= 1:
closing_divs = "</div>" # To close "calltree-line-wrapper"
closing_divs = "</div>" * (int(depth_diff) + 1)
calltree_html_string += closing_divs
calltree_html_string += f"""
<div class="{color_to_be}-background coverage-line">
<span class="coverage-line-inner" data-calltree-idx="{ct_idx_str}">
{node.depth}
<code class="language-clike">
{demangled_name}
</code>
<span class="coverage-line-filename">
{func_href}
<a href="{callsite_link}">
[call site2]
</a>
<span class="calltree-idx">[calltree idx: {ct_idx_str}]</span>
</span>
</span>
"""
if i != len(nodes) - 1:
next_node = nodes[i + 1]
if next_node.depth > node.depth:
calltree_html_string += f"""<div
class="calltree-line-wrapper open level-{int(node.depth)}"
style="padding-left: 16px">"""
elif next_node.depth < node.depth:
depth_diff = int(node.depth - next_node.depth)
calltree_html_string += "</div>" * depth_diff
calltree_html_string += "</div>"
logger.info("Calltree created")
# Write the HTML to a file called calltree_view_XX.html where XX is a counter.
calltree_file_idx = 0
calltree_html_file = f"calltree_view_{calltree_file_idx}.html"
while os.path.isfile(calltree_html_file):
calltree_file_idx += 1
calltree_html_file = f"calltree_view_{calltree_file_idx}.html"
self.html_create_dedicated_calltree_file(
calltree_html_string,
calltree_html_file,
profile,
)
return calltree_html_file
def html_create_dedicated_calltree_file(
self,
calltree_html_string,
filename,
profile: fuzz_data_loader.FuzzerProfile):
"""
Write a wrapped HTML file with the tags needed from fuzz-introspector
We use this only for wrapping calltrees at the moment, however, down
the line it makes sense to have an easy wrapper for other HTML pages too.
"""
complete_html_string = ""
# HTML start
html_header = fuzz_html_helpers.html_get_header(
calltree=True,
title=f"Fuzz introspector: { profile.get_key() }"
)
html_header += '<div class="content-section calltree-content-section">'
complete_html_string += html_header
# Display fuzz blocker at top of page
fuzz_blockers = self.get_fuzz_blockers(
profile,
max_blockers_to_extract=12
)
fuzz_blocker_table = self.create_fuzz_blocker_table(profile, [], "", fuzz_blockers)
if fuzz_blocker_table is not None:
complete_html_string += "<div class=\"report-box\">"
complete_html_string += "<h1>Fuzz blockers</h1>"
complete_html_string += fuzz_blocker_table
complete_html_string += "</div>"
# Display calltree
complete_html_string += calltree_html_string
complete_html_string += "</div></div></div></div>"
# HTML end
html_end = '</div>'
blocker_idxs = []
for node in fuzz_blockers:
blocker_idxs.append(self.create_str_node_ctx_idx(str(node.cov_ct_idx)))
if len(blocker_idxs) > 0:
html_end = "<script>"
html_end += f"var fuzz_blocker_idxs = {json.dumps(blocker_idxs)};"
html_end += "</script>"
html_end += "<script src=\"prism.js\"></script>"
html_end += "<script src=\"clike.js\"></script>"
html_end += "<script src=\"calltree.js\"></script>"
complete_html_string += html_end
complete_html_string += "</body></html>"
# Beautify and write HTML
soup = bs(complete_html_string, 'lxml')
pretty_html = soup.prettify()
with open(filename, "w+") as cf:
cf.write(pretty_html)
def create_str_node_ctx_idx(self, cov_ct_idx):
prefixed_zeros = "0" * (len("00000") - len(cov_ct_idx))
return f"{prefixed_zeros}{cov_ct_idx}"
def get_fuzz_blockers(
self,
profile: fuzz_data_loader.FuzzerProfile,
max_blockers_to_extract=999):
"""Gets a list of fuzz blockers"""
blocker_list: List[fuzz_cfg_load.
|
[
"extract_all_callsites",
"List",
"None",
"logger",
"Optional",
"CalltreeCallsite",
"data_file_read_calltree",
"extract_all_callsites_recursive",
"print_ctcs_tree"
] |
tml_string += html_header
# Display fuzz blocker at top of page
fuzz_blockers = self.get_fuzz_blockers(
profile,
max_blockers_to_extract=12
)
fuzz_blocker_table = self.create_fuzz_blocker_table(profile, [], "", fuzz_blockers)
if fuzz_blocker_table is not None:
complete_html_string += "<div class=\"report-box\">"
complete_html_string += "<h1>Fuzz blockers</h1>"
complete_html_string += fuzz_blocker_table
complete_html_string += "</div>"
# Display calltree
complete_html_string += calltree_html_string
complete_html_string += "</div></div></div></div>"
# HTML end
html_end = '</div>'
blocker_idxs = []
for node in fuzz_blockers:
blocker_idxs.append(self.create_str_node_ctx_idx(str(node.cov_ct_idx)))
if len(blocker_idxs) > 0:
html_end = "<script>"
html_end += f"var fuzz_blocker_idxs = {json.dumps(blocker_idxs)};"
html_end += "</script>"
html_end += "<script src=\"prism.js\"></script>"
html_end += "<script src=\"clike.js\"></script>"
html_end += "<script src=\"calltree.js\"></script>"
complete_html_string += html_end
complete_html_string += "</body></html>"
# Beautify and write HTML
soup = bs(complete_html_string, 'lxml')
pretty_html = soup.prettify()
with open(filename, "w+") as cf:
cf.write(pretty_html)
def create_str_node_ctx_idx(self, cov_ct_idx):
prefixed_zeros = "0" * (len("00000") - len(cov_ct_idx))
return f"{prefixed_zeros}{cov_ct_idx}"
def get_fuzz_blockers(
self,
profile: fuzz_data_loader.FuzzerProfile,
max_blockers_to_extract=999):
"""Gets a list of fuzz blockers"""
blocker_list: List[fuzz_cfg_load.
| 26
|
37
| 0
| 8,028
|
ossf__fuzz-introspector
|
4867924b714a7789f94fbcde53713a29ceab7272
|
post-processing/analyses/fuzz_calltree_analysis.py
|
inproject
|
extract_all_callsites
| true
|
function
| 8
| 9
| false
| false
|
[
"extract_all_callsites",
"CalltreeCallsite",
"List",
"logger",
"extract_all_callsites_recursive",
"data_file_read_calltree",
"print_ctcs_tree",
"Optional"
] |
[
{
"name": "CalltreeCallsite",
"type": "class"
},
{
"name": "data_file_read_calltree",
"type": "function"
},
{
"name": "extract_all_callsites",
"type": "function"
},
{
"name": "extract_all_callsites_recursive",
"type": "function"
},
{
"name": "List",
"type": "class"
},
{
"name": "logger",
"type": "statement"
},
{
"name": "logging",
"type": "module"
},
{
"name": "Optional",
"type": "class"
},
{
"name": "print_ctcs_tree",
"type": "function"
},
{
"name": "__doc__",
"type": "instance"
},
{
"name": "__file__",
"type": "instance"
},
{
"name": "__name__",
"type": "instance"
},
{
"name": "__package__",
"type": "instance"
}
] |
# Copyright 2022 Fuzz Introspector Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Logic related to calltree analysis"""
import os
import logging
import json
from typing import (
List,
Tuple,
Optional,
Set,
)
import fuzz_analysis
import fuzz_data_loader
import fuzz_utils
import fuzz_cfg_load
import fuzz_html_helpers
# For pretty printing the html code:
from bs4 import BeautifulSoup as bs
logger = logging.getLogger(name=__name__)
class FuzzCalltreeAnalysis(fuzz_analysis.AnalysisInterface):
def __init__(self):
self.name = "FuzzCalltreeAnalysis"
logger.info("Creating FuzzCalltreeAnalysis")
def analysis_func(self,
toc_list: List[Tuple[str, str, int]],
tables: List[str],
project_profile: fuzz_data_loader.MergedProjectProfile,
profiles: List[fuzz_data_loader.FuzzerProfile],
basefolder: str,
coverage_url: str,
conclusions) -> str:
"""
Creates the HTML of the calltree. Returns the HTML as a string.
"""
logger.info("Not implemented")
return ""
def create_calltree(self, profile: fuzz_data_loader.FuzzerProfile) -> str:
logger.info("In calltree")
# Generate HTML for the calltree
calltree_html_string = "<div class='section-wrapper'>"
calltree_html_string += "<h1>Fuzzer calltree</h1>"
nodes = fuzz_cfg_load.extract_all_callsites(profile.function_call_depths)
for i in range(len(nodes)):
node = nodes[i]
demangled_name = fuzz_utils.demangle_cpp_func(node.dst_function_name)
# We may not want to show certain functions at times, e.g. libc functions
# in case it bloats the calltree
# libc_funcs = { "free" }
libc_funcs: Set[str] = set()
avoid = len([fn for fn in libc_funcs if fn in demangled_name]) > 0
if avoid:
continue
# Prepare strings needed in the HTML
color_to_be = node.cov_color
callsite_link = node.cov_callsite_link
link = node.cov_link
ct_idx_str = self.create_str_node_ctx_idx(str(node.cov_ct_idx))
# Only display [function] link if we have, otherwhise show no [function] text.
if node.dst_function_source_file.replace(" ", "") != "/":
func_href = f"""<a href="{link}">[function]</a>"""
else:
func_href = ""
if i > 0:
previous_node = nodes[i - 1]
if previous_node.depth == node.depth:
calltree_html_string += "</div>"
depth_diff = previous_node.depth - node.depth
if depth_diff >= 1:
closing_divs = "</div>" # To close "calltree-line-wrapper"
closing_divs = "</div>" * (int(depth_diff) + 1)
calltree_html_string += closing_divs
calltree_html_string += f"""
<div class="{color_to_be}-background coverage-line">
<span class="coverage-line-inner" data-calltree-idx="{ct_idx_str}">
{node.depth}
<code class="language-clike">
{demangled_name}
</code>
<span class="coverage-line-filename">
{func_href}
<a href="{callsite_link}">
[call site2]
</a>
<span class="calltree-idx">[calltree idx: {ct_idx_str}]</span>
</span>
</span>
"""
if i != len(nodes) - 1:
next_node = nodes[i + 1]
if next_node.depth > node.depth:
calltree_html_string += f"""<div
class="calltree-line-wrapper open level-{int(node.depth)}"
style="padding-left: 16px">"""
elif next_node.depth < node.depth:
depth_diff = int(node.depth - next_node.depth)
calltree_html_string += "</div>" * depth_diff
calltree_html_string += "</div>"
logger.info("Calltree created")
# Write the HTML to a file called calltree_view_XX.html where XX is a counter.
calltree_file_idx = 0
calltree_html_file = f"calltree_view_{calltree_file_idx}.html"
while os.path.isfile(calltree_html_file):
calltree_file_idx += 1
calltree_html_file = f"calltree_view_{calltree_file_idx}.html"
self.html_create_dedicated_calltree_file(
calltree_html_string,
calltree_html_file,
profile,
)
return calltree_html_file
def html_create_dedicated_calltree_file(
self,
calltree_html_string,
filename,
profile: fuzz_data_loader.FuzzerProfile):
"""
Write a wrapped HTML file with the tags needed from fuzz-introspector
We use this only for wrapping calltrees at the moment, however, down
the line it makes sense to have an easy wrapper for other HTML pages too.
"""
complete_html_string = ""
# HTML start
html_header = fuzz_html_helpers.html_get_header(
calltree=True,
title=f"Fuzz introspector: { profile.get_key() }"
)
html_header += '<div class="content-section calltree-content-section">'
complete_html_string += html_header
# Display fuzz blocker at top of page
fuzz_blockers = self.get_fuzz_blockers(
profile,
max_blockers_to_extract=12
)
fuzz_blocker_table = self.create_fuzz_blocker_table(profile, [], "", fuzz_blockers)
if fuzz_blocker_table is not None:
complete_html_string += "<div class=\"report-box\">"
complete_html_string += "<h1>Fuzz blockers</h1>"
complete_html_string += fuzz_blocker_table
complete_html_string += "</div>"
# Display calltree
complete_html_string += calltree_html_string
complete_html_string += "</div></div></div></div>"
# HTML end
html_end = '</div>'
blocker_idxs = []
for node in fuzz_blockers:
blocker_idxs.append(self.create_str_node_ctx_idx(str(node.cov_ct_idx)))
if len(blocker_idxs) > 0:
html_end = "<script>"
html_end += f"var fuzz_blocker_idxs = {json.dumps(blocker_idxs)};"
html_end += "</script>"
html_end += "<script src=\"prism.js\"></script>"
html_end += "<script src=\"clike.js\"></script>"
html_end += "<script src=\"calltree.js\"></script>"
complete_html_string += html_end
complete_html_string += "</body></html>"
# Beautify and write HTML
soup = bs(complete_html_string, 'lxml')
pretty_html = soup.prettify()
with open(filename, "w+") as cf:
cf.write(pretty_html)
def create_str_node_ctx_idx(self, cov_ct_idx):
prefixed_zeros = "0" * (len("00000") - len(cov_ct_idx))
return f"{prefixed_zeros}{cov_ct_idx}"
def get_fuzz_blockers(
self,
profile: fuzz_data_loader.FuzzerProfile,
max_blockers_to_extract=999):
"""Gets a list of fuzz blockers"""
blocker_list: List[fuzz_cfg_load.CalltreeCallsite] = list()
# Extract all callsites in calltree and exit early if none
all_callsites = fuzz_cfg_load.
|
[
"extract_all_callsites",
"CalltreeCallsite",
"List",
"logger",
"extract_all_callsites_recursive",
"data_file_read_calltree",
"print_ctcs_tree",
"Optional"
] |
profile,
max_blockers_to_extract=12
)
fuzz_blocker_table = self.create_fuzz_blocker_table(profile, [], "", fuzz_blockers)
if fuzz_blocker_table is not None:
complete_html_string += "<div class=\"report-box\">"
complete_html_string += "<h1>Fuzz blockers</h1>"
complete_html_string += fuzz_blocker_table
complete_html_string += "</div>"
# Display calltree
complete_html_string += calltree_html_string
complete_html_string += "</div></div></div></div>"
# HTML end
html_end = '</div>'
blocker_idxs = []
for node in fuzz_blockers:
blocker_idxs.append(self.create_str_node_ctx_idx(str(node.cov_ct_idx)))
if len(blocker_idxs) > 0:
html_end = "<script>"
html_end += f"var fuzz_blocker_idxs = {json.dumps(blocker_idxs)};"
html_end += "</script>"
html_end += "<script src=\"prism.js\"></script>"
html_end += "<script src=\"clike.js\"></script>"
html_end += "<script src=\"calltree.js\"></script>"
complete_html_string += html_end
complete_html_string += "</body></html>"
# Beautify and write HTML
soup = bs(complete_html_string, 'lxml')
pretty_html = soup.prettify()
with open(filename, "w+") as cf:
cf.write(pretty_html)
def create_str_node_ctx_idx(self, cov_ct_idx):
prefixed_zeros = "0" * (len("00000") - len(cov_ct_idx))
return f"{prefixed_zeros}{cov_ct_idx}"
def get_fuzz_blockers(
self,
profile: fuzz_data_loader.FuzzerProfile,
max_blockers_to_extract=999):
"""Gets a list of fuzz blockers"""
blocker_list: List[fuzz_cfg_load.CalltreeCallsite] = list()
# Extract all callsites in calltree and exit early if none
all_callsites = fuzz_cfg_load.
| 27
|
41
| 0
| 9,121
|
ossf__fuzz-introspector
|
4867924b714a7789f94fbcde53713a29ceab7272
|
post-processing/analyses/fuzz_calltree_analysis.py
|
infile
|
get_fuzz_blockers
| true
|
function
| 7
| 7
| false
| false
|
[
"get_fuzz_blockers",
"create_fuzz_blocker_table",
"name",
"create_str_node_ctx_idx",
"html_create_dedicated_calltree_file",
"__init__",
"analysis_func",
"create_calltree",
"__annotations__",
"__class__",
"__delattr__",
"__dict__",
"__dir__",
"__eq__",
"__format__",
"__getattribute__",
"__hash__",
"__init_subclass__",
"__ne__",
"__new__",
"__reduce__",
"__reduce_ex__",
"__repr__",
"__setattr__",
"__sizeof__",
"__slots__",
"__str__",
"__subclasshook__",
"__doc__",
"__module__"
] |
[
{
"name": "analysis_func",
"type": "function"
},
{
"name": "create_calltree",
"type": "function"
},
{
"name": "create_fuzz_blocker_table",
"type": "function"
},
{
"name": "create_str_node_ctx_idx",
"type": "function"
},
{
"name": "get_fuzz_blockers",
"type": "function"
},
{
"name": "html_create_dedicated_calltree_file",
"type": "function"
},
{
"name": "name",
"type": "statement"
},
{
"name": "__annotations__",
"type": "statement"
},
{
"name": "__class__",
"type": "property"
},
{
"name": "__delattr__",
"type": "function"
},
{
"name": "__dict__",
"type": "statement"
},
{
"name": "__dir__",
"type": "function"
},
{
"name": "__doc__",
"type": "statement"
},
{
"name": "__eq__",
"type": "function"
},
{
"name": "__format__",
"type": "function"
},
{
"name": "__getattribute__",
"type": "function"
},
{
"name": "__hash__",
"type": "function"
},
{
"name": "__init__",
"type": "function"
},
{
"name": "__init_subclass__",
"type": "function"
},
{
"name": "__module__",
"type": "statement"
},
{
"name": "__ne__",
"type": "function"
},
{
"name": "__new__",
"type": "function"
},
{
"name": "__reduce__",
"type": "function"
},
{
"name": "__reduce_ex__",
"type": "function"
},
{
"name": "__repr__",
"type": "function"
},
{
"name": "__setattr__",
"type": "function"
},
{
"name": "__sizeof__",
"type": "function"
},
{
"name": "__slots__",
"type": "statement"
},
{
"name": "__str__",
"type": "function"
}
] |
# Copyright 2022 Fuzz Introspector Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Logic related to calltree analysis"""
import os
import logging
import json
from typing import (
List,
Tuple,
Optional,
Set,
)
import fuzz_analysis
import fuzz_data_loader
import fuzz_utils
import fuzz_cfg_load
import fuzz_html_helpers
# For pretty printing the html code:
from bs4 import BeautifulSoup as bs
logger = logging.getLogger(name=__name__)
class FuzzCalltreeAnalysis(fuzz_analysis.AnalysisInterface):
def __init__(self):
self.name = "FuzzCalltreeAnalysis"
logger.info("Creating FuzzCalltreeAnalysis")
def analysis_func(self,
toc_list: List[Tuple[str, str, int]],
tables: List[str],
project_profile: fuzz_data_loader.MergedProjectProfile,
profiles: List[fuzz_data_loader.FuzzerProfile],
basefolder: str,
coverage_url: str,
conclusions) -> str:
"""
Creates the HTML of the calltree. Returns the HTML as a string.
"""
logger.info("Not implemented")
return ""
def create_calltree(self, profile: fuzz_data_loader.FuzzerProfile) -> str:
logger.info("In calltree")
# Generate HTML for the calltree
calltree_html_string = "<div class='section-wrapper'>"
calltree_html_string += "<h1>Fuzzer calltree</h1>"
nodes = fuzz_cfg_load.extract_all_callsites(profile.function_call_depths)
for i in range(len(nodes)):
node = nodes[i]
demangled_name = fuzz_utils.demangle_cpp_func(node.dst_function_name)
# We may not want to show certain functions at times, e.g. libc functions
# in case it bloats the calltree
# libc_funcs = { "free" }
libc_funcs: Set[str] = set()
avoid = len([fn for fn in libc_funcs if fn in demangled_name]) > 0
if avoid:
continue
# Prepare strings needed in the HTML
color_to_be = node.cov_color
callsite_link = node.cov_callsite_link
link = node.cov_link
ct_idx_str = self.create_str_node_ctx_idx(str(node.cov_ct_idx))
# Only display [function] link if we have, otherwhise show no [function] text.
if node.dst_function_source_file.replace(" ", "") != "/":
func_href = f"""<a href="{link}">[function]</a>"""
else:
func_href = ""
if i > 0:
previous_node = nodes[i - 1]
if previous_node.depth == node.depth:
calltree_html_string += "</div>"
depth_diff = previous_node.depth - node.depth
if depth_diff >= 1:
closing_divs = "</div>" # To close "calltree-line-wrapper"
closing_divs = "</div>" * (int(depth_diff) + 1)
calltree_html_string += closing_divs
calltree_html_string += f"""
<div class="{color_to_be}-background coverage-line">
<span class="coverage-line-inner" data-calltree-idx="{ct_idx_str}">
{node.depth}
<code class="language-clike">
{demangled_name}
</code>
<span class="coverage-line-filename">
{func_href}
<a href="{callsite_link}">
[call site2]
</a>
<span class="calltree-idx">[calltree idx: {ct_idx_str}]</span>
</span>
</span>
"""
if i != len(nodes) - 1:
next_node = nodes[i + 1]
if next_node.depth > node.depth:
calltree_html_string += f"""<div
class="calltree-line-wrapper open level-{int(node.depth)}"
style="padding-left: 16px">"""
elif next_node.depth < node.depth:
depth_diff = int(node.depth - next_node.depth)
calltree_html_string += "</div>" * depth_diff
calltree_html_string += "</div>"
logger.info("Calltree created")
# Write the HTML to a file called calltree_view_XX.html where XX is a counter.
calltree_file_idx = 0
calltree_html_file = f"calltree_view_{calltree_file_idx}.html"
while os.path.isfile(calltree_html_file):
calltree_file_idx += 1
calltree_html_file = f"calltree_view_{calltree_file_idx}.html"
self.html_create_dedicated_calltree_file(
calltree_html_string,
calltree_html_file,
profile,
)
return calltree_html_file
def html_create_dedicated_calltree_file(
self,
calltree_html_string,
filename,
profile: fuzz_data_loader.FuzzerProfile):
"""
Write a wrapped HTML file with the tags needed from fuzz-introspector
We use this only for wrapping calltrees at the moment, however, down
the line it makes sense to have an easy wrapper for other HTML pages too.
"""
complete_html_string = ""
# HTML start
html_header = fuzz_html_helpers.html_get_header(
calltree=True,
title=f"Fuzz introspector: { profile.get_key() }"
)
html_header += '<div class="content-section calltree-content-section">'
complete_html_string += html_header
# Display fuzz blocker at top of page
fuzz_blockers = self.get_fuzz_blockers(
profile,
max_blockers_to_extract=12
)
fuzz_blocker_table = self.create_fuzz_blocker_table(profile, [], "", fuzz_blockers)
if fuzz_blocker_table is not None:
complete_html_string += "<div class=\"report-box\">"
complete_html_string += "<h1>Fuzz blockers</h1>"
complete_html_string += fuzz_blocker_table
complete_html_string += "</div>"
# Display calltree
complete_html_string += calltree_html_string
complete_html_string += "</div></div></div></div>"
# HTML end
html_end = '</div>'
blocker_idxs = []
for node in fuzz_blockers:
blocker_idxs.append(self.create_str_node_ctx_idx(str(node.cov_ct_idx)))
if len(blocker_idxs) > 0:
html_end = "<script>"
html_end += f"var fuzz_blocker_idxs = {json.dumps(blocker_idxs)};"
html_end += "</script>"
html_end += "<script src=\"prism.js\"></script>"
html_end += "<script src=\"clike.js\"></script>"
html_end += "<script src=\"calltree.js\"></script>"
complete_html_string += html_end
complete_html_string += "</body></html>"
# Beautify and write HTML
soup = bs(complete_html_string, 'lxml')
pretty_html = soup.prettify()
with open(filename, "w+") as cf:
cf.write(pretty_html)
def create_str_node_ctx_idx(self, cov_ct_idx):
prefixed_zeros = "0" * (len("00000") - len(cov_ct_idx))
return f"{prefixed_zeros}{cov_ct_idx}"
def get_fuzz_blockers(
self,
profile: fuzz_data_loader.FuzzerProfile,
max_blockers_to_extract=999):
"""Gets a list of fuzz blockers"""
blocker_list: List[fuzz_cfg_load.CalltreeCallsite] = list()
# Extract all callsites in calltree and exit early if none
all_callsites = fuzz_cfg_load.extract_all_callsites(profile.function_call_depths)
if len(all_callsites) == 0:
return blocker_list
# Filter nodes that has forward reds. Extract maximum max_blockers_to_extract nodes.
nodes_sorted_by_red_ahead = sorted(all_callsites,
key=lambda x: x.cov_forward_reds,
reverse=True)
for node in nodes_sorted_by_red_ahead:
if node.cov_forward_reds == 0 or len(blocker_list) >= max_blockers_to_extract:
break
blocker_list.append(node)
return blocker_list
def create_fuzz_blocker_table(
self,
profile: fuzz_data_loader.FuzzerProfile,
tables: List[str],
calltree_file_name: str,
fuzz_blockers=None) -> Optional[str]:
"""
Creates HTML string for table showing fuzz blockers.
"""
logger.info("Creating fuzz blocker table")
# Get the fuzz blockers
if fuzz_blockers is None:
fuzz_blockers = self.
|
[
"get_fuzz_blockers",
"create_fuzz_blocker_table",
"name",
"create_str_node_ctx_idx",
"html_create_dedicated_calltree_file",
"analysis_func",
"create_calltree"
] |
ee.js\"></script>"
complete_html_string += html_end
complete_html_string += "</body></html>"
# Beautify and write HTML
soup = bs(complete_html_string, 'lxml')
pretty_html = soup.prettify()
with open(filename, "w+") as cf:
cf.write(pretty_html)
def create_str_node_ctx_idx(self, cov_ct_idx):
prefixed_zeros = "0" * (len("00000") - len(cov_ct_idx))
return f"{prefixed_zeros}{cov_ct_idx}"
def get_fuzz_blockers(
self,
profile: fuzz_data_loader.FuzzerProfile,
max_blockers_to_extract=999):
"""Gets a list of fuzz blockers"""
blocker_list: List[fuzz_cfg_load.CalltreeCallsite] = list()
# Extract all callsites in calltree and exit early if none
all_callsites = fuzz_cfg_load.extract_all_callsites(profile.function_call_depths)
if len(all_callsites) == 0:
return blocker_list
# Filter nodes that has forward reds. Extract maximum max_blockers_to_extract nodes.
nodes_sorted_by_red_ahead = sorted(all_callsites,
key=lambda x: x.cov_forward_reds,
reverse=True)
for node in nodes_sorted_by_red_ahead:
if node.cov_forward_reds == 0 or len(blocker_list) >= max_blockers_to_extract:
break
blocker_list.append(node)
return blocker_list
def create_fuzz_blocker_table(
self,
profile: fuzz_data_loader.FuzzerProfile,
tables: List[str],
calltree_file_name: str,
fuzz_blockers=None) -> Optional[str]:
"""
Creates HTML string for table showing fuzz blockers.
"""
logger.info("Creating fuzz blocker table")
# Get the fuzz blockers
if fuzz_blockers is None:
fuzz_blockers = self.
| 30
|
42
| 0
| 9,537
|
ossf__fuzz-introspector
|
4867924b714a7789f94fbcde53713a29ceab7272
|
post-processing/analyses/fuzz_calltree_analysis.py
|
inproject
|
html_create_table_head
| true
|
function
| 9
| 9
| false
| true
|
[
"html_table_add_row",
"html_get_header",
"html_create_table_head",
"html_get_table_of_contents",
"html_get_navbar",
"html_add_header_with_link",
"Any",
"List",
"Tuple"
] |
[
{
"name": "Any",
"type": "class"
},
{
"name": "html_add_header_with_link",
"type": "function"
},
{
"name": "html_create_table_head",
"type": "function"
},
{
"name": "html_get_header",
"type": "function"
},
{
"name": "html_get_navbar",
"type": "function"
},
{
"name": "html_get_table_of_contents",
"type": "function"
},
{
"name": "html_table_add_row",
"type": "function"
},
{
"name": "List",
"type": "class"
},
{
"name": "Tuple",
"type": "class"
},
{
"name": "__doc__",
"type": "instance"
},
{
"name": "__file__",
"type": "instance"
},
{
"name": "__name__",
"type": "instance"
},
{
"name": "__package__",
"type": "instance"
}
] |
# Copyright 2022 Fuzz Introspector Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Logic related to calltree analysis"""
import os
import logging
import json
from typing import (
List,
Tuple,
Optional,
Set,
)
import fuzz_analysis
import fuzz_data_loader
import fuzz_utils
import fuzz_cfg_load
import fuzz_html_helpers
# For pretty printing the html code:
from bs4 import BeautifulSoup as bs
logger = logging.getLogger(name=__name__)
class FuzzCalltreeAnalysis(fuzz_analysis.AnalysisInterface):
def __init__(self):
self.name = "FuzzCalltreeAnalysis"
logger.info("Creating FuzzCalltreeAnalysis")
def analysis_func(self,
toc_list: List[Tuple[str, str, int]],
tables: List[str],
project_profile: fuzz_data_loader.MergedProjectProfile,
profiles: List[fuzz_data_loader.FuzzerProfile],
basefolder: str,
coverage_url: str,
conclusions) -> str:
"""
Creates the HTML of the calltree. Returns the HTML as a string.
"""
logger.info("Not implemented")
return ""
def create_calltree(self, profile: fuzz_data_loader.FuzzerProfile) -> str:
logger.info("In calltree")
# Generate HTML for the calltree
calltree_html_string = "<div class='section-wrapper'>"
calltree_html_string += "<h1>Fuzzer calltree</h1>"
nodes = fuzz_cfg_load.extract_all_callsites(profile.function_call_depths)
for i in range(len(nodes)):
node = nodes[i]
demangled_name = fuzz_utils.demangle_cpp_func(node.dst_function_name)
# We may not want to show certain functions at times, e.g. libc functions
# in case it bloats the calltree
# libc_funcs = { "free" }
libc_funcs: Set[str] = set()
avoid = len([fn for fn in libc_funcs if fn in demangled_name]) > 0
if avoid:
continue
# Prepare strings needed in the HTML
color_to_be = node.cov_color
callsite_link = node.cov_callsite_link
link = node.cov_link
ct_idx_str = self.create_str_node_ctx_idx(str(node.cov_ct_idx))
# Only display [function] link if we have, otherwhise show no [function] text.
if node.dst_function_source_file.replace(" ", "") != "/":
func_href = f"""<a href="{link}">[function]</a>"""
else:
func_href = ""
if i > 0:
previous_node = nodes[i - 1]
if previous_node.depth == node.depth:
calltree_html_string += "</div>"
depth_diff = previous_node.depth - node.depth
if depth_diff >= 1:
closing_divs = "</div>" # To close "calltree-line-wrapper"
closing_divs = "</div>" * (int(depth_diff) + 1)
calltree_html_string += closing_divs
calltree_html_string += f"""
<div class="{color_to_be}-background coverage-line">
<span class="coverage-line-inner" data-calltree-idx="{ct_idx_str}">
{node.depth}
<code class="language-clike">
{demangled_name}
</code>
<span class="coverage-line-filename">
{func_href}
<a href="{callsite_link}">
[call site2]
</a>
<span class="calltree-idx">[calltree idx: {ct_idx_str}]</span>
</span>
</span>
"""
if i != len(nodes) - 1:
next_node = nodes[i + 1]
if next_node.depth > node.depth:
calltree_html_string += f"""<div
class="calltree-line-wrapper open level-{int(node.depth)}"
style="padding-left: 16px">"""
elif next_node.depth < node.depth:
depth_diff = int(node.depth - next_node.depth)
calltree_html_string += "</div>" * depth_diff
calltree_html_string += "</div>"
logger.info("Calltree created")
# Write the HTML to a file called calltree_view_XX.html where XX is a counter.
calltree_file_idx = 0
calltree_html_file = f"calltree_view_{calltree_file_idx}.html"
while os.path.isfile(calltree_html_file):
calltree_file_idx += 1
calltree_html_file = f"calltree_view_{calltree_file_idx}.html"
self.html_create_dedicated_calltree_file(
calltree_html_string,
calltree_html_file,
profile,
)
return calltree_html_file
def html_create_dedicated_calltree_file(
self,
calltree_html_string,
filename,
profile: fuzz_data_loader.FuzzerProfile):
"""
Write a wrapped HTML file with the tags needed from fuzz-introspector
We use this only for wrapping calltrees at the moment, however, down
the line it makes sense to have an easy wrapper for other HTML pages too.
"""
complete_html_string = ""
# HTML start
html_header = fuzz_html_helpers.html_get_header(
calltree=True,
title=f"Fuzz introspector: { profile.get_key() }"
)
html_header += '<div class="content-section calltree-content-section">'
complete_html_string += html_header
# Display fuzz blocker at top of page
fuzz_blockers = self.get_fuzz_blockers(
profile,
max_blockers_to_extract=12
)
fuzz_blocker_table = self.create_fuzz_blocker_table(profile, [], "", fuzz_blockers)
if fuzz_blocker_table is not None:
complete_html_string += "<div class=\"report-box\">"
complete_html_string += "<h1>Fuzz blockers</h1>"
complete_html_string += fuzz_blocker_table
complete_html_string += "</div>"
# Display calltree
complete_html_string += calltree_html_string
complete_html_string += "</div></div></div></div>"
# HTML end
html_end = '</div>'
blocker_idxs = []
for node in fuzz_blockers:
blocker_idxs.append(self.create_str_node_ctx_idx(str(node.cov_ct_idx)))
if len(blocker_idxs) > 0:
html_end = "<script>"
html_end += f"var fuzz_blocker_idxs = {json.dumps(blocker_idxs)};"
html_end += "</script>"
html_end += "<script src=\"prism.js\"></script>"
html_end += "<script src=\"clike.js\"></script>"
html_end += "<script src=\"calltree.js\"></script>"
complete_html_string += html_end
complete_html_string += "</body></html>"
# Beautify and write HTML
soup = bs(complete_html_string, 'lxml')
pretty_html = soup.prettify()
with open(filename, "w+") as cf:
cf.write(pretty_html)
def create_str_node_ctx_idx(self, cov_ct_idx):
prefixed_zeros = "0" * (len("00000") - len(cov_ct_idx))
return f"{prefixed_zeros}{cov_ct_idx}"
def get_fuzz_blockers(
self,
profile: fuzz_data_loader.FuzzerProfile,
max_blockers_to_extract=999):
"""Gets a list of fuzz blockers"""
blocker_list: List[fuzz_cfg_load.CalltreeCallsite] = list()
# Extract all callsites in calltree and exit early if none
all_callsites = fuzz_cfg_load.extract_all_callsites(profile.function_call_depths)
if len(all_callsites) == 0:
return blocker_list
# Filter nodes that has forward reds. Extract maximum max_blockers_to_extract nodes.
nodes_sorted_by_red_ahead = sorted(all_callsites,
key=lambda x: x.cov_forward_reds,
reverse=True)
for node in nodes_sorted_by_red_ahead:
if node.cov_forward_reds == 0 or len(blocker_list) >= max_blockers_to_extract:
break
blocker_list.append(node)
return blocker_list
def create_fuzz_blocker_table(
self,
profile: fuzz_data_loader.FuzzerProfile,
tables: List[str],
calltree_file_name: str,
fuzz_blockers=None) -> Optional[str]:
"""
Creates HTML string for table showing fuzz blockers.
"""
logger.info("Creating fuzz blocker table")
# Get the fuzz blockers
if fuzz_blockers is None:
fuzz_blockers = self.get_fuzz_blockers(
profile,
max_blockers_to_extract=12
)
if len(fuzz_blockers) == 0:
return None
html_table_string = "<p class='no-top-margin'>The followings nodes " \
"represent call sites where fuzz blockers occur</p>"
tables.append(f"myTable{len(tables)}")
html_table_string += fuzz_html_helpers.
|
[
"html_table_add_row",
"html_get_header",
"html_create_table_head",
"html_get_table_of_contents",
"html_get_navbar",
"html_add_header_with_link",
"Any",
"List",
"Tuple"
] |
idx))
return f"{prefixed_zeros}{cov_ct_idx}"
def get_fuzz_blockers(
self,
profile: fuzz_data_loader.FuzzerProfile,
max_blockers_to_extract=999):
"""Gets a list of fuzz blockers"""
blocker_list: List[fuzz_cfg_load.CalltreeCallsite] = list()
# Extract all callsites in calltree and exit early if none
all_callsites = fuzz_cfg_load.extract_all_callsites(profile.function_call_depths)
if len(all_callsites) == 0:
return blocker_list
# Filter nodes that has forward reds. Extract maximum max_blockers_to_extract nodes.
nodes_sorted_by_red_ahead = sorted(all_callsites,
key=lambda x: x.cov_forward_reds,
reverse=True)
for node in nodes_sorted_by_red_ahead:
if node.cov_forward_reds == 0 or len(blocker_list) >= max_blockers_to_extract:
break
blocker_list.append(node)
return blocker_list
def create_fuzz_blocker_table(
self,
profile: fuzz_data_loader.FuzzerProfile,
tables: List[str],
calltree_file_name: str,
fuzz_blockers=None) -> Optional[str]:
"""
Creates HTML string for table showing fuzz blockers.
"""
logger.info("Creating fuzz blocker table")
# Get the fuzz blockers
if fuzz_blockers is None:
fuzz_blockers = self.get_fuzz_blockers(
profile,
max_blockers_to_extract=12
)
if len(fuzz_blockers) == 0:
return None
html_table_string = "<p class='no-top-margin'>The followings nodes " \
"represent call sites where fuzz blockers occur</p>"
tables.append(f"myTable{len(tables)}")
html_table_string += fuzz_html_helpers.
| 31
|
45
| 0
| 10,653
|
ossf__fuzz-introspector
|
4867924b714a7789f94fbcde53713a29ceab7272
|
post-processing/analyses/fuzz_calltree_analysis.py
|
inproject
|
html_table_add_row
| true
|
function
| 9
| 9
| false
| true
|
[
"html_create_table_head",
"html_get_header",
"List",
"html_get_table_of_contents",
"html_table_add_row",
"html_add_header_with_link",
"html_get_navbar",
"Any",
"Tuple"
] |
[
{
"name": "Any",
"type": "class"
},
{
"name": "html_add_header_with_link",
"type": "function"
},
{
"name": "html_create_table_head",
"type": "function"
},
{
"name": "html_get_header",
"type": "function"
},
{
"name": "html_get_navbar",
"type": "function"
},
{
"name": "html_get_table_of_contents",
"type": "function"
},
{
"name": "html_table_add_row",
"type": "function"
},
{
"name": "List",
"type": "class"
},
{
"name": "Tuple",
"type": "class"
},
{
"name": "__doc__",
"type": "instance"
},
{
"name": "__file__",
"type": "instance"
},
{
"name": "__name__",
"type": "instance"
},
{
"name": "__package__",
"type": "instance"
}
] |
# Copyright 2022 Fuzz Introspector Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Logic related to calltree analysis"""
import os
import logging
import json
from typing import (
List,
Tuple,
Optional,
Set,
)
import fuzz_analysis
import fuzz_data_loader
import fuzz_utils
import fuzz_cfg_load
import fuzz_html_helpers
# For pretty printing the html code:
from bs4 import BeautifulSoup as bs
logger = logging.getLogger(name=__name__)
class FuzzCalltreeAnalysis(fuzz_analysis.AnalysisInterface):
def __init__(self):
self.name = "FuzzCalltreeAnalysis"
logger.info("Creating FuzzCalltreeAnalysis")
def analysis_func(self,
toc_list: List[Tuple[str, str, int]],
tables: List[str],
project_profile: fuzz_data_loader.MergedProjectProfile,
profiles: List[fuzz_data_loader.FuzzerProfile],
basefolder: str,
coverage_url: str,
conclusions) -> str:
"""
Creates the HTML of the calltree. Returns the HTML as a string.
"""
logger.info("Not implemented")
return ""
def create_calltree(self, profile: fuzz_data_loader.FuzzerProfile) -> str:
logger.info("In calltree")
# Generate HTML for the calltree
calltree_html_string = "<div class='section-wrapper'>"
calltree_html_string += "<h1>Fuzzer calltree</h1>"
nodes = fuzz_cfg_load.extract_all_callsites(profile.function_call_depths)
for i in range(len(nodes)):
node = nodes[i]
demangled_name = fuzz_utils.demangle_cpp_func(node.dst_function_name)
# We may not want to show certain functions at times, e.g. libc functions
# in case it bloats the calltree
# libc_funcs = { "free" }
libc_funcs: Set[str] = set()
avoid = len([fn for fn in libc_funcs if fn in demangled_name]) > 0
if avoid:
continue
# Prepare strings needed in the HTML
color_to_be = node.cov_color
callsite_link = node.cov_callsite_link
link = node.cov_link
ct_idx_str = self.create_str_node_ctx_idx(str(node.cov_ct_idx))
# Only display [function] link if we have, otherwhise show no [function] text.
if node.dst_function_source_file.replace(" ", "") != "/":
func_href = f"""<a href="{link}">[function]</a>"""
else:
func_href = ""
if i > 0:
previous_node = nodes[i - 1]
if previous_node.depth == node.depth:
calltree_html_string += "</div>"
depth_diff = previous_node.depth - node.depth
if depth_diff >= 1:
closing_divs = "</div>" # To close "calltree-line-wrapper"
closing_divs = "</div>" * (int(depth_diff) + 1)
calltree_html_string += closing_divs
calltree_html_string += f"""
<div class="{color_to_be}-background coverage-line">
<span class="coverage-line-inner" data-calltree-idx="{ct_idx_str}">
{node.depth}
<code class="language-clike">
{demangled_name}
</code>
<span class="coverage-line-filename">
{func_href}
<a href="{callsite_link}">
[call site2]
</a>
<span class="calltree-idx">[calltree idx: {ct_idx_str}]</span>
</span>
</span>
"""
if i != len(nodes) - 1:
next_node = nodes[i + 1]
if next_node.depth > node.depth:
calltree_html_string += f"""<div
class="calltree-line-wrapper open level-{int(node.depth)}"
style="padding-left: 16px">"""
elif next_node.depth < node.depth:
depth_diff = int(node.depth - next_node.depth)
calltree_html_string += "</div>" * depth_diff
calltree_html_string += "</div>"
logger.info("Calltree created")
# Write the HTML to a file called calltree_view_XX.html where XX is a counter.
calltree_file_idx = 0
calltree_html_file = f"calltree_view_{calltree_file_idx}.html"
while os.path.isfile(calltree_html_file):
calltree_file_idx += 1
calltree_html_file = f"calltree_view_{calltree_file_idx}.html"
self.html_create_dedicated_calltree_file(
calltree_html_string,
calltree_html_file,
profile,
)
return calltree_html_file
def html_create_dedicated_calltree_file(
self,
calltree_html_string,
filename,
profile: fuzz_data_loader.FuzzerProfile):
"""
Write a wrapped HTML file with the tags needed from fuzz-introspector
We use this only for wrapping calltrees at the moment, however, down
the line it makes sense to have an easy wrapper for other HTML pages too.
"""
complete_html_string = ""
# HTML start
html_header = fuzz_html_helpers.html_get_header(
calltree=True,
title=f"Fuzz introspector: { profile.get_key() }"
)
html_header += '<div class="content-section calltree-content-section">'
complete_html_string += html_header
# Display fuzz blocker at top of page
fuzz_blockers = self.get_fuzz_blockers(
profile,
max_blockers_to_extract=12
)
fuzz_blocker_table = self.create_fuzz_blocker_table(profile, [], "", fuzz_blockers)
if fuzz_blocker_table is not None:
complete_html_string += "<div class=\"report-box\">"
complete_html_string += "<h1>Fuzz blockers</h1>"
complete_html_string += fuzz_blocker_table
complete_html_string += "</div>"
# Display calltree
complete_html_string += calltree_html_string
complete_html_string += "</div></div></div></div>"
# HTML end
html_end = '</div>'
blocker_idxs = []
for node in fuzz_blockers:
blocker_idxs.append(self.create_str_node_ctx_idx(str(node.cov_ct_idx)))
if len(blocker_idxs) > 0:
html_end = "<script>"
html_end += f"var fuzz_blocker_idxs = {json.dumps(blocker_idxs)};"
html_end += "</script>"
html_end += "<script src=\"prism.js\"></script>"
html_end += "<script src=\"clike.js\"></script>"
html_end += "<script src=\"calltree.js\"></script>"
complete_html_string += html_end
complete_html_string += "</body></html>"
# Beautify and write HTML
soup = bs(complete_html_string, 'lxml')
pretty_html = soup.prettify()
with open(filename, "w+") as cf:
cf.write(pretty_html)
def create_str_node_ctx_idx(self, cov_ct_idx):
prefixed_zeros = "0" * (len("00000") - len(cov_ct_idx))
return f"{prefixed_zeros}{cov_ct_idx}"
def get_fuzz_blockers(
self,
profile: fuzz_data_loader.FuzzerProfile,
max_blockers_to_extract=999):
"""Gets a list of fuzz blockers"""
blocker_list: List[fuzz_cfg_load.CalltreeCallsite] = list()
# Extract all callsites in calltree and exit early if none
all_callsites = fuzz_cfg_load.extract_all_callsites(profile.function_call_depths)
if len(all_callsites) == 0:
return blocker_list
# Filter nodes that has forward reds. Extract maximum max_blockers_to_extract nodes.
nodes_sorted_by_red_ahead = sorted(all_callsites,
key=lambda x: x.cov_forward_reds,
reverse=True)
for node in nodes_sorted_by_red_ahead:
if node.cov_forward_reds == 0 or len(blocker_list) >= max_blockers_to_extract:
break
blocker_list.append(node)
return blocker_list
def create_fuzz_blocker_table(
self,
profile: fuzz_data_loader.FuzzerProfile,
tables: List[str],
calltree_file_name: str,
fuzz_blockers=None) -> Optional[str]:
"""
Creates HTML string for table showing fuzz blockers.
"""
logger.info("Creating fuzz blocker table")
# Get the fuzz blockers
if fuzz_blockers is None:
fuzz_blockers = self.get_fuzz_blockers(
profile,
max_blockers_to_extract=12
)
if len(fuzz_blockers) == 0:
return None
html_table_string = "<p class='no-top-margin'>The followings nodes " \
"represent call sites where fuzz blockers occur</p>"
tables.append(f"myTable{len(tables)}")
html_table_string += fuzz_html_helpers.html_create_table_head(
tables[-1],
[
("Amount of callsites blocked",
"Total amount of callsites blocked"),
("Calltree index",
"Index in call tree where the fuzz blocker is."),
("Parent function",
"Function in which the call site that blocks resides."),
("Callsite",
""),
("Largest blocked function",
"This is the function with highest cyclomatiic complexity amongst"
"all of the functions that are blocked. As such, it's a way of "
"highlighting a potentially important function being blocked")
],
sort_by_column=0,
sort_order="desc"
)
for node in fuzz_blockers:
link_prefix = "0" * (5 - len(str(node.cov_ct_idx)))
node_link = "%s?scrollToNode=%s%s" % (
calltree_file_name,
link_prefix,
node.cov_ct_idx
)
html_table_string += fuzz_html_helpers.
|
[
"html_create_table_head",
"html_get_header",
"List",
"html_get_table_of_contents",
"html_table_add_row",
"html_add_header_with_link",
"html_get_navbar",
"Any",
"Tuple"
] |
: fuzz_data_loader.FuzzerProfile,
tables: List[str],
calltree_file_name: str,
fuzz_blockers=None) -> Optional[str]:
"""
Creates HTML string for table showing fuzz blockers.
"""
logger.info("Creating fuzz blocker table")
# Get the fuzz blockers
if fuzz_blockers is None:
fuzz_blockers = self.get_fuzz_blockers(
profile,
max_blockers_to_extract=12
)
if len(fuzz_blockers) == 0:
return None
html_table_string = "<p class='no-top-margin'>The followings nodes " \
"represent call sites where fuzz blockers occur</p>"
tables.append(f"myTable{len(tables)}")
html_table_string += fuzz_html_helpers.html_create_table_head(
tables[-1],
[
("Amount of callsites blocked",
"Total amount of callsites blocked"),
("Calltree index",
"Index in call tree where the fuzz blocker is."),
("Parent function",
"Function in which the call site that blocks resides."),
("Callsite",
""),
("Largest blocked function",
"This is the function with highest cyclomatiic complexity amongst"
"all of the functions that are blocked. As such, it's a way of "
"highlighting a potentially important function being blocked")
],
sort_by_column=0,
sort_order="desc"
)
for node in fuzz_blockers:
link_prefix = "0" * (5 - len(str(node.cov_ct_idx)))
node_link = "%s?scrollToNode=%s%s" % (
calltree_file_name,
link_prefix,
node.cov_ct_idx
)
html_table_string += fuzz_html_helpers.
| 34
|
64
| 1
| 4,330
|
vanheeringen-lab__seq2science
|
d5ff9782c8f6c4cd989f74684154c508b7c65127
|
docs/scripts/clean_dags.py
|
infile
|
type
| true
|
statement
| 17
| 16
| false
| true
|
[
"edges",
"nodes",
"label2id",
"name",
"type",
"__init__",
"_get_node_id",
"_order_edges",
"color_node",
"edge_style",
"graph_style",
"hide_node",
"node_style",
"remove_edge",
"remove_node",
"transitive_reduction",
"write",
"for",
"__annotations__",
"__class__",
"__delattr__",
"__dict__",
"__dir__",
"__eq__",
"__format__",
"__getattribute__",
"__hash__",
"__init_subclass__",
"__ne__",
"__new__",
"__reduce__",
"__reduce_ex__",
"__repr__",
"__setattr__",
"__sizeof__",
"__str__",
"__subclasshook__",
"__doc__",
"__module__"
] |
[
{
"name": "color_node",
"type": "function"
},
{
"name": "edge_style",
"type": "statement"
},
{
"name": "edges",
"type": "statement"
},
{
"name": "graph_style",
"type": "statement"
},
{
"name": "hide_node",
"type": "function"
},
{
"name": "label2id",
"type": "statement"
},
{
"name": "name",
"type": "statement"
},
{
"name": "node_style",
"type": "statement"
},
{
"name": "nodes",
"type": "statement"
},
{
"name": "remove_edge",
"type": "function"
},
{
"name": "remove_node",
"type": "function"
},
{
"name": "transitive_reduction",
"type": "function"
},
{
"name": "type",
"type": "statement"
},
{
"name": "write",
"type": "function"
},
{
"name": "_get_node_id",
"type": "function"
},
{
"name": "_order_edges",
"type": "function"
},
{
"name": "__annotations__",
"type": "statement"
},
{
"name": "__class__",
"type": "property"
},
{
"name": "__delattr__",
"type": "function"
},
{
"name": "__dict__",
"type": "statement"
},
{
"name": "__dir__",
"type": "function"
},
{
"name": "__doc__",
"type": "statement"
},
{
"name": "__eq__",
"type": "function"
},
{
"name": "__format__",
"type": "function"
},
{
"name": "__getattribute__",
"type": "function"
},
{
"name": "__hash__",
"type": "function"
},
{
"name": "__init__",
"type": "function"
},
{
"name": "__init_subclass__",
"type": "function"
},
{
"name": "__module__",
"type": "statement"
},
{
"name": "__ne__",
"type": "function"
},
{
"name": "__new__",
"type": "function"
},
{
"name": "__reduce__",
"type": "function"
},
{
"name": "__reduce_ex__",
"type": "function"
},
{
"name": "__repr__",
"type": "function"
},
{
"name": "__setattr__",
"type": "function"
},
{
"name": "__sizeof__",
"type": "function"
},
{
"name": "__slots__",
"type": "statement"
},
{
"name": "__str__",
"type": "function"
}
] |
import re
import networkx as nx
rules_to_keep = {
# "0.13 0.6 0.85", # yellow
# "0.09 0.6 0.85", # brown
# "0.28 0.6 0.85", # green
# "0.04 0.6 0.85", # red
# "0.00 0.6 0.85", # cherry
# "0.63 0.6 0.85", # purple
# "0.59 0.6 0.85", # dark blue
# "0.58 0.6 0.85", # blue
# "0.49 0.6 0.85", # teal
# input
"get_genome": "0.49 0.6 0.85", # teal
"ena2fastq_SE": "0.49 0.6 0.85", # teal
"ena2fastq_PE": "0.49 0.6 0.85", # teal
"sra2fastq_SE": "0.49 0.6 0.85", # teal
"sra2fastq_PE": "0.49 0.6 0.85", # teal
# fastq
"fastp_SE": "0.13 0.6 0.85", # yellow
"fastp_PE": "0.13 0.6 0.85", # yellow
"trimgalore_SE": "0.13 0.6 0.85", # yellow
"trimgalore_PE": "0.13 0.6 0.85", # yellow
"merge_replicates": "0.13 0.6 0.85", # yellow
# align
"bowtie2_align": "0.13 0.6 0.85", # yellow
"bwa_mem": "0.13 0.6 0.85", # yellow
"bwa_mem2": "0.13 0.6 0.85", # yellow
"hisat2_align": "0.13 0.6 0.85", # yellow
"minimap2_align": "0.13 0.6 0.85", # yellow
"star_align": "0.13 0.6 0.85", # yellow
"mark_duplicates": "0.13 0.6 0.85", # yellow
"sieve_bam": "0.13 0.6 0.85", # yellow
# peak counting
"macs2_callpeak": "0.13 0.6 0.85", # yellow
"call_peak_genrich": "0.13 0.6 0.85", # yellow
"hmmratac": "0.13 0.6 0.85", # yellow
"create_SNAP_object": "0.13 0.6 0.85", # yellow
# gene counting/quantification
"htseq_count": "0.13 0.6 0.85", # yellow
"featurecounts": "0.13 0.6 0.85", # yellow
"salmon_quant": "0.13 0.6 0.85", # yellow
# trackhub
"bam_bigwig": "0.00 0.6 0.85", # cherry
"peak_bigpeak": "0.00 0.6 0.85", # cherry
"bedgraph_bigwig": "0.00 0.6 0.85", # cherry
"trackhub": "0.00 0.6 0.85", # cherry
# multiqc
"multiqc": "0.63 0.6 0.85", # purple
# peak files
"coverage_table": "0.28 0.6 0.85", # green
"onehot_peaks": "0.28 0.6 0.85", # green
"create_bins_SNAP_object": "0.28 0.6 0.85", # green
# gene files
"gene_id2name": "0.28 0.6 0.85", # green
"tpm_matrix": "0.28 0.6 0.85", # green
"count_matrix": "0.28 0.6 0.85", # green
"txi_count_matrix": "0.28 0.6 0.85", # green
"pytxi_count_matrix": "0.28 0.6 0.85", # green
"citeseqcount": "0.28 0.6 0.85", # green
"kallistobus_count": "0.28 0.6 0.85", # green
# other
"gimme_maelstrom": "0.28 0.6 0.85", # green
"deseq2": "0.28 0.6 0.85", # green
"dexseq_count_matrix": "0.28 0.6 0.85", # green
}
class Digraph:
def __init__(self, infile):
with open(infile) as f:
lines = f.readlines()
self.type, self.name = lines[0].split()[0:2]
self.graph_style = lines[1]
self.node_style = lines[2]
self.edge_style = lines[3]
self.nodes = dict()
self.edges = set()
self.label2id = dict()
l = re.compile(r'label = "(.*?)"')
c = re.compile(r'color = "(.*?)"')
s = re.compile(r'style="(.*?)"')
for line in lines[4:]:
line = line.strip()
# read edges
edge = tuple(line.split(" -> "))
if len(edge) == 2:
self.edges.add(edge)
# read nodes
elif "[" in line[:5]:
node_id = line.split("[")[0]
label = l.search(line).groups()[0]
color = c.search(line).groups()[0]
style = s.search(line).groups()[0]
self.nodes[node_id] = {
"label": label,
"color": color,
"style": style,
}
self.label2id[label] = node_id
def _order_edges(self):
"""
edges are sorted by
1) ascending target nodes and
2) descending source nodes
"""
ordered = []
sources = sorted(set(int(e[0]) for e in list(self.edges)), reverse=True)
targets = sorted(set(int(e[1]) for e in list(self.edges)))
for target in targets:
for source in sources:
edge = (str(source), str(target))
if edge in self.edges:
ordered.append(edge)
return ordered
def write(self, fname):
with open(fname, "w") as f:
f.write(" ".join([self.
|
[
"edges",
"nodes",
"label2id",
"name",
"type",
"color_node",
"edge_style",
"graph_style",
"hide_node",
"node_style",
"remove_edge",
"remove_node",
"transitive_reduction",
"write",
"for"
] |
8 0.6 0.85", # green
"deseq2": "0.28 0.6 0.85", # green
"dexseq_count_matrix": "0.28 0.6 0.85", # green
}
class Digraph:
def __init__(self, infile):
with open(infile) as f:
lines = f.readlines()
self.type, self.name = lines[0].split()[0:2]
self.graph_style = lines[1]
self.node_style = lines[2]
self.edge_style = lines[3]
self.nodes = dict()
self.edges = set()
self.label2id = dict()
l = re.compile(r'label = "(.*?)"')
c = re.compile(r'color = "(.*?)"')
s = re.compile(r'style="(.*?)"')
for line in lines[4:]:
line = line.strip()
# read edges
edge = tuple(line.split(" -> "))
if len(edge) == 2:
self.edges.add(edge)
# read nodes
elif "[" in line[:5]:
node_id = line.split("[")[0]
label = l.search(line).groups()[0]
color = c.search(line).groups()[0]
style = s.search(line).groups()[0]
self.nodes[node_id] = {
"label": label,
"color": color,
"style": style,
}
self.label2id[label] = node_id
def _order_edges(self):
"""
edges are sorted by
1) ascending target nodes and
2) descending source nodes
"""
ordered = []
sources = sorted(set(int(e[0]) for e in list(self.edges)), reverse=True)
targets = sorted(set(int(e[1]) for e in list(self.edges)))
for target in targets:
for source in sources:
edge = (str(source), str(target))
if edge in self.edges:
ordered.append(edge)
return ordered
def write(self, fname):
with open(fname, "w") as f:
f.write(" ".join([self.
| 45
|
65
| 1
| 4,341
|
vanheeringen-lab__seq2science
|
d5ff9782c8f6c4cd989f74684154c508b7c65127
|
docs/scripts/clean_dags.py
|
infile
|
name
| true
|
statement
| 17
| 16
| false
| true
|
[
"name",
"type",
"edges",
"nodes",
"label2id",
"__init__",
"_get_node_id",
"_order_edges",
"color_node",
"edge_style",
"graph_style",
"hide_node",
"node_style",
"remove_edge",
"remove_node",
"transitive_reduction",
"write",
"for",
"__annotations__",
"__class__",
"__delattr__",
"__dict__",
"__dir__",
"__eq__",
"__format__",
"__getattribute__",
"__hash__",
"__init_subclass__",
"__ne__",
"__new__",
"__reduce__",
"__reduce_ex__",
"__repr__",
"__setattr__",
"__sizeof__",
"__str__",
"__subclasshook__",
"__doc__",
"__module__"
] |
[
{
"name": "color_node",
"type": "function"
},
{
"name": "edge_style",
"type": "statement"
},
{
"name": "edges",
"type": "statement"
},
{
"name": "graph_style",
"type": "statement"
},
{
"name": "hide_node",
"type": "function"
},
{
"name": "label2id",
"type": "statement"
},
{
"name": "name",
"type": "statement"
},
{
"name": "node_style",
"type": "statement"
},
{
"name": "nodes",
"type": "statement"
},
{
"name": "remove_edge",
"type": "function"
},
{
"name": "remove_node",
"type": "function"
},
{
"name": "transitive_reduction",
"type": "function"
},
{
"name": "type",
"type": "statement"
},
{
"name": "write",
"type": "function"
},
{
"name": "_get_node_id",
"type": "function"
},
{
"name": "_order_edges",
"type": "function"
},
{
"name": "__annotations__",
"type": "statement"
},
{
"name": "__class__",
"type": "property"
},
{
"name": "__delattr__",
"type": "function"
},
{
"name": "__dict__",
"type": "statement"
},
{
"name": "__dir__",
"type": "function"
},
{
"name": "__doc__",
"type": "statement"
},
{
"name": "__eq__",
"type": "function"
},
{
"name": "__format__",
"type": "function"
},
{
"name": "__getattribute__",
"type": "function"
},
{
"name": "__hash__",
"type": "function"
},
{
"name": "__init__",
"type": "function"
},
{
"name": "__init_subclass__",
"type": "function"
},
{
"name": "__module__",
"type": "statement"
},
{
"name": "__ne__",
"type": "function"
},
{
"name": "__new__",
"type": "function"
},
{
"name": "__reduce__",
"type": "function"
},
{
"name": "__reduce_ex__",
"type": "function"
},
{
"name": "__repr__",
"type": "function"
},
{
"name": "__setattr__",
"type": "function"
},
{
"name": "__sizeof__",
"type": "function"
},
{
"name": "__slots__",
"type": "statement"
},
{
"name": "__str__",
"type": "function"
}
] |
import re
import networkx as nx
rules_to_keep = {
# "0.13 0.6 0.85", # yellow
# "0.09 0.6 0.85", # brown
# "0.28 0.6 0.85", # green
# "0.04 0.6 0.85", # red
# "0.00 0.6 0.85", # cherry
# "0.63 0.6 0.85", # purple
# "0.59 0.6 0.85", # dark blue
# "0.58 0.6 0.85", # blue
# "0.49 0.6 0.85", # teal
# input
"get_genome": "0.49 0.6 0.85", # teal
"ena2fastq_SE": "0.49 0.6 0.85", # teal
"ena2fastq_PE": "0.49 0.6 0.85", # teal
"sra2fastq_SE": "0.49 0.6 0.85", # teal
"sra2fastq_PE": "0.49 0.6 0.85", # teal
# fastq
"fastp_SE": "0.13 0.6 0.85", # yellow
"fastp_PE": "0.13 0.6 0.85", # yellow
"trimgalore_SE": "0.13 0.6 0.85", # yellow
"trimgalore_PE": "0.13 0.6 0.85", # yellow
"merge_replicates": "0.13 0.6 0.85", # yellow
# align
"bowtie2_align": "0.13 0.6 0.85", # yellow
"bwa_mem": "0.13 0.6 0.85", # yellow
"bwa_mem2": "0.13 0.6 0.85", # yellow
"hisat2_align": "0.13 0.6 0.85", # yellow
"minimap2_align": "0.13 0.6 0.85", # yellow
"star_align": "0.13 0.6 0.85", # yellow
"mark_duplicates": "0.13 0.6 0.85", # yellow
"sieve_bam": "0.13 0.6 0.85", # yellow
# peak counting
"macs2_callpeak": "0.13 0.6 0.85", # yellow
"call_peak_genrich": "0.13 0.6 0.85", # yellow
"hmmratac": "0.13 0.6 0.85", # yellow
"create_SNAP_object": "0.13 0.6 0.85", # yellow
# gene counting/quantification
"htseq_count": "0.13 0.6 0.85", # yellow
"featurecounts": "0.13 0.6 0.85", # yellow
"salmon_quant": "0.13 0.6 0.85", # yellow
# trackhub
"bam_bigwig": "0.00 0.6 0.85", # cherry
"peak_bigpeak": "0.00 0.6 0.85", # cherry
"bedgraph_bigwig": "0.00 0.6 0.85", # cherry
"trackhub": "0.00 0.6 0.85", # cherry
# multiqc
"multiqc": "0.63 0.6 0.85", # purple
# peak files
"coverage_table": "0.28 0.6 0.85", # green
"onehot_peaks": "0.28 0.6 0.85", # green
"create_bins_SNAP_object": "0.28 0.6 0.85", # green
# gene files
"gene_id2name": "0.28 0.6 0.85", # green
"tpm_matrix": "0.28 0.6 0.85", # green
"count_matrix": "0.28 0.6 0.85", # green
"txi_count_matrix": "0.28 0.6 0.85", # green
"pytxi_count_matrix": "0.28 0.6 0.85", # green
"citeseqcount": "0.28 0.6 0.85", # green
"kallistobus_count": "0.28 0.6 0.85", # green
# other
"gimme_maelstrom": "0.28 0.6 0.85", # green
"deseq2": "0.28 0.6 0.85", # green
"dexseq_count_matrix": "0.28 0.6 0.85", # green
}
class Digraph:
def __init__(self, infile):
with open(infile) as f:
lines = f.readlines()
self.type, self.name = lines[0].split()[0:2]
self.graph_style = lines[1]
self.node_style = lines[2]
self.edge_style = lines[3]
self.nodes = dict()
self.edges = set()
self.label2id = dict()
l = re.compile(r'label = "(.*?)"')
c = re.compile(r'color = "(.*?)"')
s = re.compile(r'style="(.*?)"')
for line in lines[4:]:
line = line.strip()
# read edges
edge = tuple(line.split(" -> "))
if len(edge) == 2:
self.edges.add(edge)
# read nodes
elif "[" in line[:5]:
node_id = line.split("[")[0]
label = l.search(line).groups()[0]
color = c.search(line).groups()[0]
style = s.search(line).groups()[0]
self.nodes[node_id] = {
"label": label,
"color": color,
"style": style,
}
self.label2id[label] = node_id
def _order_edges(self):
"""
edges are sorted by
1) ascending target nodes and
2) descending source nodes
"""
ordered = []
sources = sorted(set(int(e[0]) for e in list(self.edges)), reverse=True)
targets = sorted(set(int(e[1]) for e in list(self.edges)))
for target in targets:
for source in sources:
edge = (str(source), str(target))
if edge in self.edges:
ordered.append(edge)
return ordered
def write(self, fname):
with open(fname, "w") as f:
f.write(" ".join([self.type, self.
|
[
"name",
"type",
"edges",
"nodes",
"label2id",
"color_node",
"edge_style",
"graph_style",
"hide_node",
"node_style",
"remove_edge",
"remove_node",
"transitive_reduction",
"write",
"for"
] |
, # green
"deseq2": "0.28 0.6 0.85", # green
"dexseq_count_matrix": "0.28 0.6 0.85", # green
}
class Digraph:
def __init__(self, infile):
with open(infile) as f:
lines = f.readlines()
self.type, self.name = lines[0].split()[0:2]
self.graph_style = lines[1]
self.node_style = lines[2]
self.edge_style = lines[3]
self.nodes = dict()
self.edges = set()
self.label2id = dict()
l = re.compile(r'label = "(.*?)"')
c = re.compile(r'color = "(.*?)"')
s = re.compile(r'style="(.*?)"')
for line in lines[4:]:
line = line.strip()
# read edges
edge = tuple(line.split(" -> "))
if len(edge) == 2:
self.edges.add(edge)
# read nodes
elif "[" in line[:5]:
node_id = line.split("[")[0]
label = l.search(line).groups()[0]
color = c.search(line).groups()[0]
style = s.search(line).groups()[0]
self.nodes[node_id] = {
"label": label,
"color": color,
"style": style,
}
self.label2id[label] = node_id
def _order_edges(self):
"""
edges are sorted by
1) ascending target nodes and
2) descending source nodes
"""
ordered = []
sources = sorted(set(int(e[0]) for e in list(self.edges)), reverse=True)
targets = sorted(set(int(e[1]) for e in list(self.edges)))
for target in targets:
for source in sources:
edge = (str(source), str(target))
if edge in self.edges:
ordered.append(edge)
return ordered
def write(self, fname):
with open(fname, "w") as f:
f.write(" ".join([self.type, self.
| 46
|
66
| 1
| 4,381
|
vanheeringen-lab__seq2science
|
d5ff9782c8f6c4cd989f74684154c508b7c65127
|
docs/scripts/clean_dags.py
|
infile
|
graph_style
| true
|
statement
| 16
| 16
| false
| true
|
[
"edges",
"nodes",
"label2id",
"edge_style",
"node_style",
"__init__",
"_get_node_id",
"_order_edges",
"color_node",
"graph_style",
"hide_node",
"name",
"remove_edge",
"remove_node",
"transitive_reduction",
"type",
"write",
"__annotations__",
"__class__",
"__delattr__",
"__dict__",
"__dir__",
"__eq__",
"__format__",
"__getattribute__",
"__hash__",
"__init_subclass__",
"__ne__",
"__new__",
"__reduce__",
"__reduce_ex__",
"__repr__",
"__setattr__",
"__sizeof__",
"__str__",
"__subclasshook__",
"__doc__",
"__module__"
] |
[
{
"name": "color_node",
"type": "function"
},
{
"name": "edge_style",
"type": "statement"
},
{
"name": "edges",
"type": "statement"
},
{
"name": "graph_style",
"type": "statement"
},
{
"name": "hide_node",
"type": "function"
},
{
"name": "label2id",
"type": "statement"
},
{
"name": "name",
"type": "statement"
},
{
"name": "node_style",
"type": "statement"
},
{
"name": "nodes",
"type": "statement"
},
{
"name": "remove_edge",
"type": "function"
},
{
"name": "remove_node",
"type": "function"
},
{
"name": "transitive_reduction",
"type": "function"
},
{
"name": "type",
"type": "statement"
},
{
"name": "write",
"type": "function"
},
{
"name": "_get_node_id",
"type": "function"
},
{
"name": "_order_edges",
"type": "function"
},
{
"name": "__annotations__",
"type": "statement"
},
{
"name": "__class__",
"type": "property"
},
{
"name": "__delattr__",
"type": "function"
},
{
"name": "__dict__",
"type": "statement"
},
{
"name": "__dir__",
"type": "function"
},
{
"name": "__doc__",
"type": "statement"
},
{
"name": "__eq__",
"type": "function"
},
{
"name": "__format__",
"type": "function"
},
{
"name": "__getattribute__",
"type": "function"
},
{
"name": "__hash__",
"type": "function"
},
{
"name": "__init__",
"type": "function"
},
{
"name": "__init_subclass__",
"type": "function"
},
{
"name": "__module__",
"type": "statement"
},
{
"name": "__ne__",
"type": "function"
},
{
"name": "__new__",
"type": "function"
},
{
"name": "__reduce__",
"type": "function"
},
{
"name": "__reduce_ex__",
"type": "function"
},
{
"name": "__repr__",
"type": "function"
},
{
"name": "__setattr__",
"type": "function"
},
{
"name": "__sizeof__",
"type": "function"
},
{
"name": "__slots__",
"type": "statement"
},
{
"name": "__str__",
"type": "function"
}
] |
import re
import networkx as nx
rules_to_keep = {
# "0.13 0.6 0.85", # yellow
# "0.09 0.6 0.85", # brown
# "0.28 0.6 0.85", # green
# "0.04 0.6 0.85", # red
# "0.00 0.6 0.85", # cherry
# "0.63 0.6 0.85", # purple
# "0.59 0.6 0.85", # dark blue
# "0.58 0.6 0.85", # blue
# "0.49 0.6 0.85", # teal
# input
"get_genome": "0.49 0.6 0.85", # teal
"ena2fastq_SE": "0.49 0.6 0.85", # teal
"ena2fastq_PE": "0.49 0.6 0.85", # teal
"sra2fastq_SE": "0.49 0.6 0.85", # teal
"sra2fastq_PE": "0.49 0.6 0.85", # teal
# fastq
"fastp_SE": "0.13 0.6 0.85", # yellow
"fastp_PE": "0.13 0.6 0.85", # yellow
"trimgalore_SE": "0.13 0.6 0.85", # yellow
"trimgalore_PE": "0.13 0.6 0.85", # yellow
"merge_replicates": "0.13 0.6 0.85", # yellow
# align
"bowtie2_align": "0.13 0.6 0.85", # yellow
"bwa_mem": "0.13 0.6 0.85", # yellow
"bwa_mem2": "0.13 0.6 0.85", # yellow
"hisat2_align": "0.13 0.6 0.85", # yellow
"minimap2_align": "0.13 0.6 0.85", # yellow
"star_align": "0.13 0.6 0.85", # yellow
"mark_duplicates": "0.13 0.6 0.85", # yellow
"sieve_bam": "0.13 0.6 0.85", # yellow
# peak counting
"macs2_callpeak": "0.13 0.6 0.85", # yellow
"call_peak_genrich": "0.13 0.6 0.85", # yellow
"hmmratac": "0.13 0.6 0.85", # yellow
"create_SNAP_object": "0.13 0.6 0.85", # yellow
# gene counting/quantification
"htseq_count": "0.13 0.6 0.85", # yellow
"featurecounts": "0.13 0.6 0.85", # yellow
"salmon_quant": "0.13 0.6 0.85", # yellow
# trackhub
"bam_bigwig": "0.00 0.6 0.85", # cherry
"peak_bigpeak": "0.00 0.6 0.85", # cherry
"bedgraph_bigwig": "0.00 0.6 0.85", # cherry
"trackhub": "0.00 0.6 0.85", # cherry
# multiqc
"multiqc": "0.63 0.6 0.85", # purple
# peak files
"coverage_table": "0.28 0.6 0.85", # green
"onehot_peaks": "0.28 0.6 0.85", # green
"create_bins_SNAP_object": "0.28 0.6 0.85", # green
# gene files
"gene_id2name": "0.28 0.6 0.85", # green
"tpm_matrix": "0.28 0.6 0.85", # green
"count_matrix": "0.28 0.6 0.85", # green
"txi_count_matrix": "0.28 0.6 0.85", # green
"pytxi_count_matrix": "0.28 0.6 0.85", # green
"citeseqcount": "0.28 0.6 0.85", # green
"kallistobus_count": "0.28 0.6 0.85", # green
# other
"gimme_maelstrom": "0.28 0.6 0.85", # green
"deseq2": "0.28 0.6 0.85", # green
"dexseq_count_matrix": "0.28 0.6 0.85", # green
}
class Digraph:
def __init__(self, infile):
with open(infile) as f:
lines = f.readlines()
self.type, self.name = lines[0].split()[0:2]
self.graph_style = lines[1]
self.node_style = lines[2]
self.edge_style = lines[3]
self.nodes = dict()
self.edges = set()
self.label2id = dict()
l = re.compile(r'label = "(.*?)"')
c = re.compile(r'color = "(.*?)"')
s = re.compile(r'style="(.*?)"')
for line in lines[4:]:
line = line.strip()
# read edges
edge = tuple(line.split(" -> "))
if len(edge) == 2:
self.edges.add(edge)
# read nodes
elif "[" in line[:5]:
node_id = line.split("[")[0]
label = l.search(line).groups()[0]
color = c.search(line).groups()[0]
style = s.search(line).groups()[0]
self.nodes[node_id] = {
"label": label,
"color": color,
"style": style,
}
self.label2id[label] = node_id
def _order_edges(self):
"""
edges are sorted by
1) ascending target nodes and
2) descending source nodes
"""
ordered = []
sources = sorted(set(int(e[0]) for e in list(self.edges)), reverse=True)
targets = sorted(set(int(e[1]) for e in list(self.edges)))
for target in targets:
for source in sources:
edge = (str(source), str(target))
if edge in self.edges:
ordered.append(edge)
return ordered
def write(self, fname):
with open(fname, "w") as f:
f.write(" ".join([self.type, self.name, "{\n"]))
f.write(self.
|
[
"edges",
"nodes",
"label2id",
"edge_style",
"node_style",
"color_node",
"graph_style",
"hide_node",
"name",
"remove_edge",
"remove_node",
"transitive_reduction",
"type",
"write"
] |
, # green
"dexseq_count_matrix": "0.28 0.6 0.85", # green
}
class Digraph:
def __init__(self, infile):
with open(infile) as f:
lines = f.readlines()
self.type, self.name = lines[0].split()[0:2]
self.graph_style = lines[1]
self.node_style = lines[2]
self.edge_style = lines[3]
self.nodes = dict()
self.edges = set()
self.label2id = dict()
l = re.compile(r'label = "(.*?)"')
c = re.compile(r'color = "(.*?)"')
s = re.compile(r'style="(.*?)"')
for line in lines[4:]:
line = line.strip()
# read edges
edge = tuple(line.split(" -> "))
if len(edge) == 2:
self.edges.add(edge)
# read nodes
elif "[" in line[:5]:
node_id = line.split("[")[0]
label = l.search(line).groups()[0]
color = c.search(line).groups()[0]
style = s.search(line).groups()[0]
self.nodes[node_id] = {
"label": label,
"color": color,
"style": style,
}
self.label2id[label] = node_id
def _order_edges(self):
"""
edges are sorted by
1) ascending target nodes and
2) descending source nodes
"""
ordered = []
sources = sorted(set(int(e[0]) for e in list(self.edges)), reverse=True)
targets = sorted(set(int(e[1]) for e in list(self.edges)))
for target in targets:
for source in sources:
edge = (str(source), str(target))
if edge in self.edges:
ordered.append(edge)
return ordered
def write(self, fname):
with open(fname, "w") as f:
f.write(" ".join([self.type, self.name, "{\n"]))
f.write(self.
| 47
|
67
| 1
| 4,419
|
vanheeringen-lab__seq2science
|
d5ff9782c8f6c4cd989f74684154c508b7c65127
|
docs/scripts/clean_dags.py
|
infile
|
node_style
| true
|
statement
| 16
| 16
| false
| true
|
[
"edges",
"nodes",
"label2id",
"edge_style",
"graph_style",
"__init__",
"_get_node_id",
"_order_edges",
"color_node",
"hide_node",
"name",
"node_style",
"remove_edge",
"remove_node",
"transitive_reduction",
"type",
"write",
"__annotations__",
"__class__",
"__delattr__",
"__dict__",
"__dir__",
"__eq__",
"__format__",
"__getattribute__",
"__hash__",
"__init_subclass__",
"__ne__",
"__new__",
"__reduce__",
"__reduce_ex__",
"__repr__",
"__setattr__",
"__sizeof__",
"__str__",
"__subclasshook__",
"__doc__",
"__module__"
] |
[
{
"name": "color_node",
"type": "function"
},
{
"name": "edge_style",
"type": "statement"
},
{
"name": "edges",
"type": "statement"
},
{
"name": "graph_style",
"type": "statement"
},
{
"name": "hide_node",
"type": "function"
},
{
"name": "label2id",
"type": "statement"
},
{
"name": "name",
"type": "statement"
},
{
"name": "node_style",
"type": "statement"
},
{
"name": "nodes",
"type": "statement"
},
{
"name": "remove_edge",
"type": "function"
},
{
"name": "remove_node",
"type": "function"
},
{
"name": "transitive_reduction",
"type": "function"
},
{
"name": "type",
"type": "statement"
},
{
"name": "write",
"type": "function"
},
{
"name": "_get_node_id",
"type": "function"
},
{
"name": "_order_edges",
"type": "function"
},
{
"name": "__annotations__",
"type": "statement"
},
{
"name": "__class__",
"type": "property"
},
{
"name": "__delattr__",
"type": "function"
},
{
"name": "__dict__",
"type": "statement"
},
{
"name": "__dir__",
"type": "function"
},
{
"name": "__doc__",
"type": "statement"
},
{
"name": "__eq__",
"type": "function"
},
{
"name": "__format__",
"type": "function"
},
{
"name": "__getattribute__",
"type": "function"
},
{
"name": "__hash__",
"type": "function"
},
{
"name": "__init__",
"type": "function"
},
{
"name": "__init_subclass__",
"type": "function"
},
{
"name": "__module__",
"type": "statement"
},
{
"name": "__ne__",
"type": "function"
},
{
"name": "__new__",
"type": "function"
},
{
"name": "__reduce__",
"type": "function"
},
{
"name": "__reduce_ex__",
"type": "function"
},
{
"name": "__repr__",
"type": "function"
},
{
"name": "__setattr__",
"type": "function"
},
{
"name": "__sizeof__",
"type": "function"
},
{
"name": "__slots__",
"type": "statement"
},
{
"name": "__str__",
"type": "function"
}
] |
import re
import networkx as nx
rules_to_keep = {
# "0.13 0.6 0.85", # yellow
# "0.09 0.6 0.85", # brown
# "0.28 0.6 0.85", # green
# "0.04 0.6 0.85", # red
# "0.00 0.6 0.85", # cherry
# "0.63 0.6 0.85", # purple
# "0.59 0.6 0.85", # dark blue
# "0.58 0.6 0.85", # blue
# "0.49 0.6 0.85", # teal
# input
"get_genome": "0.49 0.6 0.85", # teal
"ena2fastq_SE": "0.49 0.6 0.85", # teal
"ena2fastq_PE": "0.49 0.6 0.85", # teal
"sra2fastq_SE": "0.49 0.6 0.85", # teal
"sra2fastq_PE": "0.49 0.6 0.85", # teal
# fastq
"fastp_SE": "0.13 0.6 0.85", # yellow
"fastp_PE": "0.13 0.6 0.85", # yellow
"trimgalore_SE": "0.13 0.6 0.85", # yellow
"trimgalore_PE": "0.13 0.6 0.85", # yellow
"merge_replicates": "0.13 0.6 0.85", # yellow
# align
"bowtie2_align": "0.13 0.6 0.85", # yellow
"bwa_mem": "0.13 0.6 0.85", # yellow
"bwa_mem2": "0.13 0.6 0.85", # yellow
"hisat2_align": "0.13 0.6 0.85", # yellow
"minimap2_align": "0.13 0.6 0.85", # yellow
"star_align": "0.13 0.6 0.85", # yellow
"mark_duplicates": "0.13 0.6 0.85", # yellow
"sieve_bam": "0.13 0.6 0.85", # yellow
# peak counting
"macs2_callpeak": "0.13 0.6 0.85", # yellow
"call_peak_genrich": "0.13 0.6 0.85", # yellow
"hmmratac": "0.13 0.6 0.85", # yellow
"create_SNAP_object": "0.13 0.6 0.85", # yellow
# gene counting/quantification
"htseq_count": "0.13 0.6 0.85", # yellow
"featurecounts": "0.13 0.6 0.85", # yellow
"salmon_quant": "0.13 0.6 0.85", # yellow
# trackhub
"bam_bigwig": "0.00 0.6 0.85", # cherry
"peak_bigpeak": "0.00 0.6 0.85", # cherry
"bedgraph_bigwig": "0.00 0.6 0.85", # cherry
"trackhub": "0.00 0.6 0.85", # cherry
# multiqc
"multiqc": "0.63 0.6 0.85", # purple
# peak files
"coverage_table": "0.28 0.6 0.85", # green
"onehot_peaks": "0.28 0.6 0.85", # green
"create_bins_SNAP_object": "0.28 0.6 0.85", # green
# gene files
"gene_id2name": "0.28 0.6 0.85", # green
"tpm_matrix": "0.28 0.6 0.85", # green
"count_matrix": "0.28 0.6 0.85", # green
"txi_count_matrix": "0.28 0.6 0.85", # green
"pytxi_count_matrix": "0.28 0.6 0.85", # green
"citeseqcount": "0.28 0.6 0.85", # green
"kallistobus_count": "0.28 0.6 0.85", # green
# other
"gimme_maelstrom": "0.28 0.6 0.85", # green
"deseq2": "0.28 0.6 0.85", # green
"dexseq_count_matrix": "0.28 0.6 0.85", # green
}
class Digraph:
def __init__(self, infile):
with open(infile) as f:
lines = f.readlines()
self.type, self.name = lines[0].split()[0:2]
self.graph_style = lines[1]
self.node_style = lines[2]
self.edge_style = lines[3]
self.nodes = dict()
self.edges = set()
self.label2id = dict()
l = re.compile(r'label = "(.*?)"')
c = re.compile(r'color = "(.*?)"')
s = re.compile(r'style="(.*?)"')
for line in lines[4:]:
line = line.strip()
# read edges
edge = tuple(line.split(" -> "))
if len(edge) == 2:
self.edges.add(edge)
# read nodes
elif "[" in line[:5]:
node_id = line.split("[")[0]
label = l.search(line).groups()[0]
color = c.search(line).groups()[0]
style = s.search(line).groups()[0]
self.nodes[node_id] = {
"label": label,
"color": color,
"style": style,
}
self.label2id[label] = node_id
def _order_edges(self):
"""
edges are sorted by
1) ascending target nodes and
2) descending source nodes
"""
ordered = []
sources = sorted(set(int(e[0]) for e in list(self.edges)), reverse=True)
targets = sorted(set(int(e[1]) for e in list(self.edges)))
for target in targets:
for source in sources:
edge = (str(source), str(target))
if edge in self.edges:
ordered.append(edge)
return ordered
def write(self, fname):
with open(fname, "w") as f:
f.write(" ".join([self.type, self.name, "{\n"]))
f.write(self.graph_style)
f.write(self.
|
[
"edges",
"nodes",
"label2id",
"edge_style",
"graph_style",
"color_node",
"hide_node",
"name",
"node_style",
"remove_edge",
"remove_node",
"transitive_reduction",
"type",
"write"
] |
"0.28 0.6 0.85", # green
}
class Digraph:
def __init__(self, infile):
with open(infile) as f:
lines = f.readlines()
self.type, self.name = lines[0].split()[0:2]
self.graph_style = lines[1]
self.node_style = lines[2]
self.edge_style = lines[3]
self.nodes = dict()
self.edges = set()
self.label2id = dict()
l = re.compile(r'label = "(.*?)"')
c = re.compile(r'color = "(.*?)"')
s = re.compile(r'style="(.*?)"')
for line in lines[4:]:
line = line.strip()
# read edges
edge = tuple(line.split(" -> "))
if len(edge) == 2:
self.edges.add(edge)
# read nodes
elif "[" in line[:5]:
node_id = line.split("[")[0]
label = l.search(line).groups()[0]
color = c.search(line).groups()[0]
style = s.search(line).groups()[0]
self.nodes[node_id] = {
"label": label,
"color": color,
"style": style,
}
self.label2id[label] = node_id
def _order_edges(self):
"""
edges are sorted by
1) ascending target nodes and
2) descending source nodes
"""
ordered = []
sources = sorted(set(int(e[0]) for e in list(self.edges)), reverse=True)
targets = sorted(set(int(e[1]) for e in list(self.edges)))
for target in targets:
for source in sources:
edge = (str(source), str(target))
if edge in self.edges:
ordered.append(edge)
return ordered
def write(self, fname):
with open(fname, "w") as f:
f.write(" ".join([self.type, self.name, "{\n"]))
f.write(self.graph_style)
f.write(self.
| 48
|
68
| 1
| 4,456
|
vanheeringen-lab__seq2science
|
d5ff9782c8f6c4cd989f74684154c508b7c65127
|
docs/scripts/clean_dags.py
|
infile
|
edge_style
| true
|
statement
| 16
| 16
| false
| true
|
[
"edges",
"nodes",
"label2id",
"node_style",
"graph_style",
"__init__",
"_get_node_id",
"_order_edges",
"color_node",
"edge_style",
"hide_node",
"name",
"remove_edge",
"remove_node",
"transitive_reduction",
"type",
"write",
"__annotations__",
"__class__",
"__delattr__",
"__dict__",
"__dir__",
"__eq__",
"__format__",
"__getattribute__",
"__hash__",
"__init_subclass__",
"__ne__",
"__new__",
"__reduce__",
"__reduce_ex__",
"__repr__",
"__setattr__",
"__sizeof__",
"__str__",
"__subclasshook__",
"__doc__",
"__module__"
] |
[
{
"name": "color_node",
"type": "function"
},
{
"name": "edge_style",
"type": "statement"
},
{
"name": "edges",
"type": "statement"
},
{
"name": "graph_style",
"type": "statement"
},
{
"name": "hide_node",
"type": "function"
},
{
"name": "label2id",
"type": "statement"
},
{
"name": "name",
"type": "statement"
},
{
"name": "node_style",
"type": "statement"
},
{
"name": "nodes",
"type": "statement"
},
{
"name": "remove_edge",
"type": "function"
},
{
"name": "remove_node",
"type": "function"
},
{
"name": "transitive_reduction",
"type": "function"
},
{
"name": "type",
"type": "statement"
},
{
"name": "write",
"type": "function"
},
{
"name": "_get_node_id",
"type": "function"
},
{
"name": "_order_edges",
"type": "function"
},
{
"name": "__annotations__",
"type": "statement"
},
{
"name": "__class__",
"type": "property"
},
{
"name": "__delattr__",
"type": "function"
},
{
"name": "__dict__",
"type": "statement"
},
{
"name": "__dir__",
"type": "function"
},
{
"name": "__doc__",
"type": "statement"
},
{
"name": "__eq__",
"type": "function"
},
{
"name": "__format__",
"type": "function"
},
{
"name": "__getattribute__",
"type": "function"
},
{
"name": "__hash__",
"type": "function"
},
{
"name": "__init__",
"type": "function"
},
{
"name": "__init_subclass__",
"type": "function"
},
{
"name": "__module__",
"type": "statement"
},
{
"name": "__ne__",
"type": "function"
},
{
"name": "__new__",
"type": "function"
},
{
"name": "__reduce__",
"type": "function"
},
{
"name": "__reduce_ex__",
"type": "function"
},
{
"name": "__repr__",
"type": "function"
},
{
"name": "__setattr__",
"type": "function"
},
{
"name": "__sizeof__",
"type": "function"
},
{
"name": "__slots__",
"type": "statement"
},
{
"name": "__str__",
"type": "function"
}
] |
import re
import networkx as nx
rules_to_keep = {
# "0.13 0.6 0.85", # yellow
# "0.09 0.6 0.85", # brown
# "0.28 0.6 0.85", # green
# "0.04 0.6 0.85", # red
# "0.00 0.6 0.85", # cherry
# "0.63 0.6 0.85", # purple
# "0.59 0.6 0.85", # dark blue
# "0.58 0.6 0.85", # blue
# "0.49 0.6 0.85", # teal
# input
"get_genome": "0.49 0.6 0.85", # teal
"ena2fastq_SE": "0.49 0.6 0.85", # teal
"ena2fastq_PE": "0.49 0.6 0.85", # teal
"sra2fastq_SE": "0.49 0.6 0.85", # teal
"sra2fastq_PE": "0.49 0.6 0.85", # teal
# fastq
"fastp_SE": "0.13 0.6 0.85", # yellow
"fastp_PE": "0.13 0.6 0.85", # yellow
"trimgalore_SE": "0.13 0.6 0.85", # yellow
"trimgalore_PE": "0.13 0.6 0.85", # yellow
"merge_replicates": "0.13 0.6 0.85", # yellow
# align
"bowtie2_align": "0.13 0.6 0.85", # yellow
"bwa_mem": "0.13 0.6 0.85", # yellow
"bwa_mem2": "0.13 0.6 0.85", # yellow
"hisat2_align": "0.13 0.6 0.85", # yellow
"minimap2_align": "0.13 0.6 0.85", # yellow
"star_align": "0.13 0.6 0.85", # yellow
"mark_duplicates": "0.13 0.6 0.85", # yellow
"sieve_bam": "0.13 0.6 0.85", # yellow
# peak counting
"macs2_callpeak": "0.13 0.6 0.85", # yellow
"call_peak_genrich": "0.13 0.6 0.85", # yellow
"hmmratac": "0.13 0.6 0.85", # yellow
"create_SNAP_object": "0.13 0.6 0.85", # yellow
# gene counting/quantification
"htseq_count": "0.13 0.6 0.85", # yellow
"featurecounts": "0.13 0.6 0.85", # yellow
"salmon_quant": "0.13 0.6 0.85", # yellow
# trackhub
"bam_bigwig": "0.00 0.6 0.85", # cherry
"peak_bigpeak": "0.00 0.6 0.85", # cherry
"bedgraph_bigwig": "0.00 0.6 0.85", # cherry
"trackhub": "0.00 0.6 0.85", # cherry
# multiqc
"multiqc": "0.63 0.6 0.85", # purple
# peak files
"coverage_table": "0.28 0.6 0.85", # green
"onehot_peaks": "0.28 0.6 0.85", # green
"create_bins_SNAP_object": "0.28 0.6 0.85", # green
# gene files
"gene_id2name": "0.28 0.6 0.85", # green
"tpm_matrix": "0.28 0.6 0.85", # green
"count_matrix": "0.28 0.6 0.85", # green
"txi_count_matrix": "0.28 0.6 0.85", # green
"pytxi_count_matrix": "0.28 0.6 0.85", # green
"citeseqcount": "0.28 0.6 0.85", # green
"kallistobus_count": "0.28 0.6 0.85", # green
# other
"gimme_maelstrom": "0.28 0.6 0.85", # green
"deseq2": "0.28 0.6 0.85", # green
"dexseq_count_matrix": "0.28 0.6 0.85", # green
}
class Digraph:
def __init__(self, infile):
with open(infile) as f:
lines = f.readlines()
self.type, self.name = lines[0].split()[0:2]
self.graph_style = lines[1]
self.node_style = lines[2]
self.edge_style = lines[3]
self.nodes = dict()
self.edges = set()
self.label2id = dict()
l = re.compile(r'label = "(.*?)"')
c = re.compile(r'color = "(.*?)"')
s = re.compile(r'style="(.*?)"')
for line in lines[4:]:
line = line.strip()
# read edges
edge = tuple(line.split(" -> "))
if len(edge) == 2:
self.edges.add(edge)
# read nodes
elif "[" in line[:5]:
node_id = line.split("[")[0]
label = l.search(line).groups()[0]
color = c.search(line).groups()[0]
style = s.search(line).groups()[0]
self.nodes[node_id] = {
"label": label,
"color": color,
"style": style,
}
self.label2id[label] = node_id
def _order_edges(self):
"""
edges are sorted by
1) ascending target nodes and
2) descending source nodes
"""
ordered = []
sources = sorted(set(int(e[0]) for e in list(self.edges)), reverse=True)
targets = sorted(set(int(e[1]) for e in list(self.edges)))
for target in targets:
for source in sources:
edge = (str(source), str(target))
if edge in self.edges:
ordered.append(edge)
return ordered
def write(self, fname):
with open(fname, "w") as f:
f.write(" ".join([self.type, self.name, "{\n"]))
f.write(self.graph_style)
f.write(self.node_style)
f.write(self.
|
[
"edges",
"nodes",
"label2id",
"node_style",
"graph_style",
"color_node",
"edge_style",
"hide_node",
"name",
"remove_edge",
"remove_node",
"transitive_reduction",
"type",
"write"
] |
igraph:
def __init__(self, infile):
with open(infile) as f:
lines = f.readlines()
self.type, self.name = lines[0].split()[0:2]
self.graph_style = lines[1]
self.node_style = lines[2]
self.edge_style = lines[3]
self.nodes = dict()
self.edges = set()
self.label2id = dict()
l = re.compile(r'label = "(.*?)"')
c = re.compile(r'color = "(.*?)"')
s = re.compile(r'style="(.*?)"')
for line in lines[4:]:
line = line.strip()
# read edges
edge = tuple(line.split(" -> "))
if len(edge) == 2:
self.edges.add(edge)
# read nodes
elif "[" in line[:5]:
node_id = line.split("[")[0]
label = l.search(line).groups()[0]
color = c.search(line).groups()[0]
style = s.search(line).groups()[0]
self.nodes[node_id] = {
"label": label,
"color": color,
"style": style,
}
self.label2id[label] = node_id
def _order_edges(self):
"""
edges are sorted by
1) ascending target nodes and
2) descending source nodes
"""
ordered = []
sources = sorted(set(int(e[0]) for e in list(self.edges)), reverse=True)
targets = sorted(set(int(e[1]) for e in list(self.edges)))
for target in targets:
for source in sources:
edge = (str(source), str(target))
if edge in self.edges:
ordered.append(edge)
return ordered
def write(self, fname):
with open(fname, "w") as f:
f.write(" ".join([self.type, self.name, "{\n"]))
f.write(self.graph_style)
f.write(self.node_style)
f.write(self.
| 49
|
72
| 1
| 5,014
|
vanheeringen-lab__seq2science
|
d5ff9782c8f6c4cd989f74684154c508b7c65127
|
docs/scripts/clean_dags.py
|
common
|
label2id
| true
|
statement
| 16
| 16
| false
| false
|
[
"nodes",
"edges",
"node_style",
"name",
"type",
"__init__",
"_get_node_id",
"_order_edges",
"color_node",
"edge_style",
"graph_style",
"hide_node",
"label2id",
"remove_edge",
"remove_node",
"transitive_reduction",
"write",
"__annotations__",
"__class__",
"__delattr__",
"__dict__",
"__dir__",
"__eq__",
"__format__",
"__getattribute__",
"__hash__",
"__init_subclass__",
"__ne__",
"__new__",
"__reduce__",
"__reduce_ex__",
"__repr__",
"__setattr__",
"__sizeof__",
"__str__",
"__subclasshook__",
"__doc__",
"__module__"
] |
[
{
"name": "color_node",
"type": "function"
},
{
"name": "edge_style",
"type": "statement"
},
{
"name": "edges",
"type": "statement"
},
{
"name": "graph_style",
"type": "statement"
},
{
"name": "hide_node",
"type": "function"
},
{
"name": "label2id",
"type": "statement"
},
{
"name": "name",
"type": "statement"
},
{
"name": "node_style",
"type": "statement"
},
{
"name": "nodes",
"type": "statement"
},
{
"name": "remove_edge",
"type": "function"
},
{
"name": "remove_node",
"type": "function"
},
{
"name": "transitive_reduction",
"type": "function"
},
{
"name": "type",
"type": "statement"
},
{
"name": "write",
"type": "function"
},
{
"name": "_get_node_id",
"type": "function"
},
{
"name": "_order_edges",
"type": "function"
},
{
"name": "__annotations__",
"type": "statement"
},
{
"name": "__class__",
"type": "property"
},
{
"name": "__delattr__",
"type": "function"
},
{
"name": "__dict__",
"type": "statement"
},
{
"name": "__dir__",
"type": "function"
},
{
"name": "__doc__",
"type": "statement"
},
{
"name": "__eq__",
"type": "function"
},
{
"name": "__format__",
"type": "function"
},
{
"name": "__getattribute__",
"type": "function"
},
{
"name": "__hash__",
"type": "function"
},
{
"name": "__init__",
"type": "function"
},
{
"name": "__init_subclass__",
"type": "function"
},
{
"name": "__module__",
"type": "statement"
},
{
"name": "__ne__",
"type": "function"
},
{
"name": "__new__",
"type": "function"
},
{
"name": "__reduce__",
"type": "function"
},
{
"name": "__reduce_ex__",
"type": "function"
},
{
"name": "__repr__",
"type": "function"
},
{
"name": "__setattr__",
"type": "function"
},
{
"name": "__sizeof__",
"type": "function"
},
{
"name": "__slots__",
"type": "statement"
},
{
"name": "__str__",
"type": "function"
}
] |
import re
import networkx as nx
rules_to_keep = {
# "0.13 0.6 0.85", # yellow
# "0.09 0.6 0.85", # brown
# "0.28 0.6 0.85", # green
# "0.04 0.6 0.85", # red
# "0.00 0.6 0.85", # cherry
# "0.63 0.6 0.85", # purple
# "0.59 0.6 0.85", # dark blue
# "0.58 0.6 0.85", # blue
# "0.49 0.6 0.85", # teal
# input
"get_genome": "0.49 0.6 0.85", # teal
"ena2fastq_SE": "0.49 0.6 0.85", # teal
"ena2fastq_PE": "0.49 0.6 0.85", # teal
"sra2fastq_SE": "0.49 0.6 0.85", # teal
"sra2fastq_PE": "0.49 0.6 0.85", # teal
# fastq
"fastp_SE": "0.13 0.6 0.85", # yellow
"fastp_PE": "0.13 0.6 0.85", # yellow
"trimgalore_SE": "0.13 0.6 0.85", # yellow
"trimgalore_PE": "0.13 0.6 0.85", # yellow
"merge_replicates": "0.13 0.6 0.85", # yellow
# align
"bowtie2_align": "0.13 0.6 0.85", # yellow
"bwa_mem": "0.13 0.6 0.85", # yellow
"bwa_mem2": "0.13 0.6 0.85", # yellow
"hisat2_align": "0.13 0.6 0.85", # yellow
"minimap2_align": "0.13 0.6 0.85", # yellow
"star_align": "0.13 0.6 0.85", # yellow
"mark_duplicates": "0.13 0.6 0.85", # yellow
"sieve_bam": "0.13 0.6 0.85", # yellow
# peak counting
"macs2_callpeak": "0.13 0.6 0.85", # yellow
"call_peak_genrich": "0.13 0.6 0.85", # yellow
"hmmratac": "0.13 0.6 0.85", # yellow
"create_SNAP_object": "0.13 0.6 0.85", # yellow
# gene counting/quantification
"htseq_count": "0.13 0.6 0.85", # yellow
"featurecounts": "0.13 0.6 0.85", # yellow
"salmon_quant": "0.13 0.6 0.85", # yellow
# trackhub
"bam_bigwig": "0.00 0.6 0.85", # cherry
"peak_bigpeak": "0.00 0.6 0.85", # cherry
"bedgraph_bigwig": "0.00 0.6 0.85", # cherry
"trackhub": "0.00 0.6 0.85", # cherry
# multiqc
"multiqc": "0.63 0.6 0.85", # purple
# peak files
"coverage_table": "0.28 0.6 0.85", # green
"onehot_peaks": "0.28 0.6 0.85", # green
"create_bins_SNAP_object": "0.28 0.6 0.85", # green
# gene files
"gene_id2name": "0.28 0.6 0.85", # green
"tpm_matrix": "0.28 0.6 0.85", # green
"count_matrix": "0.28 0.6 0.85", # green
"txi_count_matrix": "0.28 0.6 0.85", # green
"pytxi_count_matrix": "0.28 0.6 0.85", # green
"citeseqcount": "0.28 0.6 0.85", # green
"kallistobus_count": "0.28 0.6 0.85", # green
# other
"gimme_maelstrom": "0.28 0.6 0.85", # green
"deseq2": "0.28 0.6 0.85", # green
"dexseq_count_matrix": "0.28 0.6 0.85", # green
}
class Digraph:
def __init__(self, infile):
with open(infile) as f:
lines = f.readlines()
self.type, self.name = lines[0].split()[0:2]
self.graph_style = lines[1]
self.node_style = lines[2]
self.edge_style = lines[3]
self.nodes = dict()
self.edges = set()
self.label2id = dict()
l = re.compile(r'label = "(.*?)"')
c = re.compile(r'color = "(.*?)"')
s = re.compile(r'style="(.*?)"')
for line in lines[4:]:
line = line.strip()
# read edges
edge = tuple(line.split(" -> "))
if len(edge) == 2:
self.edges.add(edge)
# read nodes
elif "[" in line[:5]:
node_id = line.split("[")[0]
label = l.search(line).groups()[0]
color = c.search(line).groups()[0]
style = s.search(line).groups()[0]
self.nodes[node_id] = {
"label": label,
"color": color,
"style": style,
}
self.label2id[label] = node_id
def _order_edges(self):
"""
edges are sorted by
1) ascending target nodes and
2) descending source nodes
"""
ordered = []
sources = sorted(set(int(e[0]) for e in list(self.edges)), reverse=True)
targets = sorted(set(int(e[1]) for e in list(self.edges)))
for target in targets:
for source in sources:
edge = (str(source), str(target))
if edge in self.edges:
ordered.append(edge)
return ordered
def write(self, fname):
with open(fname, "w") as f:
f.write(" ".join([self.type, self.name, "{\n"]))
f.write(self.graph_style)
f.write(self.node_style)
f.write(self.edge_style)
for k, v in self.nodes.items():
l = v["label"]
c = v["color"]
s = v["style"]
line = f' {k}[label = "{l}", color = "{c}", style="{s}"];\n'
f.write(line)
for a, b in self._order_edges():
line = f" {a} -> {b}\n"
f.write(line)
f.write("}\n")
def _get_node_id(self, node):
node = str(node)
if node.isdigit() and node in self.nodes:
return node
return self.
|
[
"nodes",
"edges",
"node_style",
"name",
"type",
"color_node",
"edge_style",
"graph_style",
"hide_node",
"label2id",
"remove_edge",
"remove_node",
"transitive_reduction",
"write"
] |
ead edges
edge = tuple(line.split(" -> "))
if len(edge) == 2:
self.edges.add(edge)
# read nodes
elif "[" in line[:5]:
node_id = line.split("[")[0]
label = l.search(line).groups()[0]
color = c.search(line).groups()[0]
style = s.search(line).groups()[0]
self.nodes[node_id] = {
"label": label,
"color": color,
"style": style,
}
self.label2id[label] = node_id
def _order_edges(self):
"""
edges are sorted by
1) ascending target nodes and
2) descending source nodes
"""
ordered = []
sources = sorted(set(int(e[0]) for e in list(self.edges)), reverse=True)
targets = sorted(set(int(e[1]) for e in list(self.edges)))
for target in targets:
for source in sources:
edge = (str(source), str(target))
if edge in self.edges:
ordered.append(edge)
return ordered
def write(self, fname):
with open(fname, "w") as f:
f.write(" ".join([self.type, self.name, "{\n"]))
f.write(self.graph_style)
f.write(self.node_style)
f.write(self.edge_style)
for k, v in self.nodes.items():
l = v["label"]
c = v["color"]
s = v["style"]
line = f' {k}[label = "{l}", color = "{c}", style="{s}"];\n'
f.write(line)
for a, b in self._order_edges():
line = f" {a} -> {b}\n"
f.write(line)
f.write("}\n")
def _get_node_id(self, node):
node = str(node)
if node.isdigit() and node in self.nodes:
return node
return self.
| 53
|
87
| 1
| 6,374
|
vanheeringen-lab__seq2science
|
d5ff9782c8f6c4cd989f74684154c508b7c65127
|
docs/scripts/clean_dags.py
|
infile
|
remove_node
| true
|
function
| 16
| 16
| false
| true
|
[
"edges",
"nodes",
"type",
"label2id",
"node_style",
"__init__",
"_get_node_id",
"_order_edges",
"color_node",
"edge_style",
"graph_style",
"hide_node",
"name",
"remove_edge",
"remove_node",
"transitive_reduction",
"write",
"__annotations__",
"__class__",
"__delattr__",
"__dict__",
"__dir__",
"__eq__",
"__format__",
"__getattribute__",
"__hash__",
"__init_subclass__",
"__ne__",
"__new__",
"__reduce__",
"__reduce_ex__",
"__repr__",
"__setattr__",
"__sizeof__",
"__str__",
"__subclasshook__",
"__doc__",
"__module__"
] |
[
{
"name": "color_node",
"type": "function"
},
{
"name": "edge_style",
"type": "statement"
},
{
"name": "edges",
"type": "statement"
},
{
"name": "graph_style",
"type": "statement"
},
{
"name": "hide_node",
"type": "function"
},
{
"name": "label2id",
"type": "statement"
},
{
"name": "name",
"type": "statement"
},
{
"name": "node_style",
"type": "statement"
},
{
"name": "nodes",
"type": "statement"
},
{
"name": "remove_edge",
"type": "function"
},
{
"name": "remove_node",
"type": "function"
},
{
"name": "transitive_reduction",
"type": "function"
},
{
"name": "type",
"type": "statement"
},
{
"name": "write",
"type": "function"
},
{
"name": "_get_node_id",
"type": "function"
},
{
"name": "_order_edges",
"type": "function"
},
{
"name": "__annotations__",
"type": "statement"
},
{
"name": "__class__",
"type": "property"
},
{
"name": "__delattr__",
"type": "function"
},
{
"name": "__dict__",
"type": "statement"
},
{
"name": "__dir__",
"type": "function"
},
{
"name": "__doc__",
"type": "statement"
},
{
"name": "__eq__",
"type": "function"
},
{
"name": "__format__",
"type": "function"
},
{
"name": "__getattribute__",
"type": "function"
},
{
"name": "__hash__",
"type": "function"
},
{
"name": "__init__",
"type": "function"
},
{
"name": "__init_subclass__",
"type": "function"
},
{
"name": "__module__",
"type": "statement"
},
{
"name": "__ne__",
"type": "function"
},
{
"name": "__new__",
"type": "function"
},
{
"name": "__reduce__",
"type": "function"
},
{
"name": "__reduce_ex__",
"type": "function"
},
{
"name": "__repr__",
"type": "function"
},
{
"name": "__setattr__",
"type": "function"
},
{
"name": "__sizeof__",
"type": "function"
},
{
"name": "__slots__",
"type": "statement"
},
{
"name": "__str__",
"type": "function"
}
] |
import re
import networkx as nx
rules_to_keep = {
# "0.13 0.6 0.85", # yellow
# "0.09 0.6 0.85", # brown
# "0.28 0.6 0.85", # green
# "0.04 0.6 0.85", # red
# "0.00 0.6 0.85", # cherry
# "0.63 0.6 0.85", # purple
# "0.59 0.6 0.85", # dark blue
# "0.58 0.6 0.85", # blue
# "0.49 0.6 0.85", # teal
# input
"get_genome": "0.49 0.6 0.85", # teal
"ena2fastq_SE": "0.49 0.6 0.85", # teal
"ena2fastq_PE": "0.49 0.6 0.85", # teal
"sra2fastq_SE": "0.49 0.6 0.85", # teal
"sra2fastq_PE": "0.49 0.6 0.85", # teal
# fastq
"fastp_SE": "0.13 0.6 0.85", # yellow
"fastp_PE": "0.13 0.6 0.85", # yellow
"trimgalore_SE": "0.13 0.6 0.85", # yellow
"trimgalore_PE": "0.13 0.6 0.85", # yellow
"merge_replicates": "0.13 0.6 0.85", # yellow
# align
"bowtie2_align": "0.13 0.6 0.85", # yellow
"bwa_mem": "0.13 0.6 0.85", # yellow
"bwa_mem2": "0.13 0.6 0.85", # yellow
"hisat2_align": "0.13 0.6 0.85", # yellow
"minimap2_align": "0.13 0.6 0.85", # yellow
"star_align": "0.13 0.6 0.85", # yellow
"mark_duplicates": "0.13 0.6 0.85", # yellow
"sieve_bam": "0.13 0.6 0.85", # yellow
# peak counting
"macs2_callpeak": "0.13 0.6 0.85", # yellow
"call_peak_genrich": "0.13 0.6 0.85", # yellow
"hmmratac": "0.13 0.6 0.85", # yellow
"create_SNAP_object": "0.13 0.6 0.85", # yellow
# gene counting/quantification
"htseq_count": "0.13 0.6 0.85", # yellow
"featurecounts": "0.13 0.6 0.85", # yellow
"salmon_quant": "0.13 0.6 0.85", # yellow
# trackhub
"bam_bigwig": "0.00 0.6 0.85", # cherry
"peak_bigpeak": "0.00 0.6 0.85", # cherry
"bedgraph_bigwig": "0.00 0.6 0.85", # cherry
"trackhub": "0.00 0.6 0.85", # cherry
# multiqc
"multiqc": "0.63 0.6 0.85", # purple
# peak files
"coverage_table": "0.28 0.6 0.85", # green
"onehot_peaks": "0.28 0.6 0.85", # green
"create_bins_SNAP_object": "0.28 0.6 0.85", # green
# gene files
"gene_id2name": "0.28 0.6 0.85", # green
"tpm_matrix": "0.28 0.6 0.85", # green
"count_matrix": "0.28 0.6 0.85", # green
"txi_count_matrix": "0.28 0.6 0.85", # green
"pytxi_count_matrix": "0.28 0.6 0.85", # green
"citeseqcount": "0.28 0.6 0.85", # green
"kallistobus_count": "0.28 0.6 0.85", # green
# other
"gimme_maelstrom": "0.28 0.6 0.85", # green
"deseq2": "0.28 0.6 0.85", # green
"dexseq_count_matrix": "0.28 0.6 0.85", # green
}
class Digraph:
def __init__(self, infile):
with open(infile) as f:
lines = f.readlines()
self.type, self.name = lines[0].split()[0:2]
self.graph_style = lines[1]
self.node_style = lines[2]
self.edge_style = lines[3]
self.nodes = dict()
self.edges = set()
self.label2id = dict()
l = re.compile(r'label = "(.*?)"')
c = re.compile(r'color = "(.*?)"')
s = re.compile(r'style="(.*?)"')
for line in lines[4:]:
line = line.strip()
# read edges
edge = tuple(line.split(" -> "))
if len(edge) == 2:
self.edges.add(edge)
# read nodes
elif "[" in line[:5]:
node_id = line.split("[")[0]
label = l.search(line).groups()[0]
color = c.search(line).groups()[0]
style = s.search(line).groups()[0]
self.nodes[node_id] = {
"label": label,
"color": color,
"style": style,
}
self.label2id[label] = node_id
def _order_edges(self):
"""
edges are sorted by
1) ascending target nodes and
2) descending source nodes
"""
ordered = []
sources = sorted(set(int(e[0]) for e in list(self.edges)), reverse=True)
targets = sorted(set(int(e[1]) for e in list(self.edges)))
for target in targets:
for source in sources:
edge = (str(source), str(target))
if edge in self.edges:
ordered.append(edge)
return ordered
def write(self, fname):
with open(fname, "w") as f:
f.write(" ".join([self.type, self.name, "{\n"]))
f.write(self.graph_style)
f.write(self.node_style)
f.write(self.edge_style)
for k, v in self.nodes.items():
l = v["label"]
c = v["color"]
s = v["style"]
line = f' {k}[label = "{l}", color = "{c}", style="{s}"];\n'
f.write(line)
for a, b in self._order_edges():
line = f" {a} -> {b}\n"
f.write(line)
f.write("}\n")
def _get_node_id(self, node):
node = str(node)
if node.isdigit() and node in self.nodes:
return node
return self.label2id.get(node)
def color_node(self, node, color):
node_id = self._get_node_id(node)
if node_id is None:
return
self.nodes[node_id]["color"] = color
def remove_node(self, node):
node_id = self._get_node_id(node)
if node_id is None:
return
# remove the node
label = self.nodes[node_id]["label"]
del self.nodes[node_id]
del self.label2id[label]
# remove all edges with this node
for edge in self.edges.copy():
if node_id in edge:
self.edges.remove(edge)
def remove_edge(self, source, target):
source_id = self._get_node_id(source)
target_id = self._get_node_id(target)
edge = (source_id, target_id)
if edge in self.edges:
self.edges.remove(edge)
def hide_node(self, node):
"""remove a node, and connect incoming edges and outgoing edges"""
node_id = self._get_node_id(node)
if node_id is None:
return
# identify parent and daughter nodes
parents = []
daughters = []
for edge in self.edges:
if node_id == edge[0]:
daughters.append(edge[1])
elif node_id == edge[1]:
parents.append(edge[0])
# remove the node
self.
|
[
"edges",
"nodes",
"type",
"label2id",
"node_style",
"color_node",
"edge_style",
"graph_style",
"hide_node",
"name",
"remove_edge",
"remove_node",
"transitive_reduction",
"write"
] |
f.edge_style)
for k, v in self.nodes.items():
l = v["label"]
c = v["color"]
s = v["style"]
line = f' {k}[label = "{l}", color = "{c}", style="{s}"];\n'
f.write(line)
for a, b in self._order_edges():
line = f" {a} -> {b}\n"
f.write(line)
f.write("}\n")
def _get_node_id(self, node):
node = str(node)
if node.isdigit() and node in self.nodes:
return node
return self.label2id.get(node)
def color_node(self, node, color):
node_id = self._get_node_id(node)
if node_id is None:
return
self.nodes[node_id]["color"] = color
def remove_node(self, node):
node_id = self._get_node_id(node)
if node_id is None:
return
# remove the node
label = self.nodes[node_id]["label"]
del self.nodes[node_id]
del self.label2id[label]
# remove all edges with this node
for edge in self.edges.copy():
if node_id in edge:
self.edges.remove(edge)
def remove_edge(self, source, target):
source_id = self._get_node_id(source)
target_id = self._get_node_id(target)
edge = (source_id, target_id)
if edge in self.edges:
self.edges.remove(edge)
def hide_node(self, node):
"""remove a node, and connect incoming edges and outgoing edges"""
node_id = self._get_node_id(node)
if node_id is None:
return
# identify parent and daughter nodes
parents = []
daughters = []
for edge in self.edges:
if node_id == edge[0]:
daughters.append(edge[1])
elif node_id == edge[1]:
parents.append(edge[0])
# remove the node
self.
| 68
|
99
| 4
| 2,249
|
Significant-Gravitas__Auto-GPT
|
0569d6652fbffa665de6e42403a48783fbede8ce
|
autogpt/commands.py
|
inproject
|
add
| true
|
function
| 12
| 12
| false
| true
|
[
"get",
"cfg",
"get_stats",
"data",
"index",
"add",
"clear",
"dimension",
"filename",
"get_relevant",
"redis",
"vec_num",
"__init__",
"__annotations__",
"__class__",
"__delattr__",
"__dict__",
"__dir__",
"__eq__",
"__format__",
"__getattribute__",
"__hash__",
"__init_subclass__",
"__ne__",
"__new__",
"__reduce__",
"__reduce_ex__",
"__repr__",
"__setattr__",
"__sizeof__",
"__slots__",
"__str__",
"__subclasshook__",
"__doc__",
"__module__"
] |
[
{
"name": "add",
"type": "function"
},
{
"name": "cfg",
"type": "statement"
},
{
"name": "clear",
"type": "function"
},
{
"name": "data",
"type": "statement"
},
{
"name": "dimension",
"type": "statement"
},
{
"name": "filename",
"type": "statement"
},
{
"name": "get",
"type": "function"
},
{
"name": "get_relevant",
"type": "function"
},
{
"name": "get_stats",
"type": "function"
},
{
"name": "index",
"type": "statement"
},
{
"name": "redis",
"type": "statement"
},
{
"name": "vec_num",
"type": "statement"
},
{
"name": "__annotations__",
"type": "statement"
},
{
"name": "__bool__",
"type": "instance"
},
{
"name": "__class__",
"type": "property"
},
{
"name": "__delattr__",
"type": "function"
},
{
"name": "__dict__",
"type": "statement"
},
{
"name": "__dir__",
"type": "function"
},
{
"name": "__doc__",
"type": "statement"
},
{
"name": "__eq__",
"type": "function"
},
{
"name": "__format__",
"type": "function"
},
{
"name": "__ge__",
"type": "instance"
},
{
"name": "__getattribute__",
"type": "function"
},
{
"name": "__gt__",
"type": "instance"
},
{
"name": "__hash__",
"type": "function"
},
{
"name": "__init__",
"type": "function"
},
{
"name": "__init_subclass__",
"type": "function"
},
{
"name": "__le__",
"type": "instance"
},
{
"name": "__lt__",
"type": "instance"
},
{
"name": "__module__",
"type": "statement"
},
{
"name": "__ne__",
"type": "function"
},
{
"name": "__new__",
"type": "function"
},
{
"name": "__reduce__",
"type": "function"
},
{
"name": "__reduce_ex__",
"type": "function"
},
{
"name": "__repr__",
"type": "function"
},
{
"name": "__setattr__",
"type": "function"
},
{
"name": "__sizeof__",
"type": "function"
},
{
"name": "__slots__",
"type": "statement"
},
{
"name": "__str__",
"type": "function"
},
{
"name": "__subclasshook__",
"type": "function"
}
] |
from autogpt import browse
import json
from autogpt.memory import get_memory
import datetime
import autogpt.agent_manager as agents
from autogpt import speak
from autogpt.config import Config
import autogpt.ai_functions as ai
from autogpt.file_operations import read_file, write_to_file, append_to_file, delete_file, search_files
from autogpt.execute_code import execute_python_file, execute_shell
from autogpt.json_parser import fix_and_parse_json
from autogpt.image_gen import generate_image
from duckduckgo_search import ddg
from googleapiclient.discovery import build
from googleapiclient.errors import HttpError
cfg = Config()
def is_valid_int(value):
try:
int(value)
return True
except ValueError:
return False
def get_command(response):
"""Parse the response and return the command name and arguments"""
try:
response_json = fix_and_parse_json(response)
if "command" not in response_json:
return "Error:" , "Missing 'command' object in JSON"
command = response_json["command"]
if "name" not in command:
return "Error:", "Missing 'name' field in 'command' object"
command_name = command["name"]
# Use an empty dictionary if 'args' field is not present in 'command' object
arguments = command.get("args", {})
return command_name, arguments
except json.decoder.JSONDecodeError:
return "Error:", "Invalid JSON"
# All other errors, return "Error: + error message"
except Exception as e:
return "Error:", str(e)
def execute_command(command_name, arguments):
"""Execute the command and return the result"""
memory = get_memory(cfg)
try:
if command_name == "google":
# Check if the Google API key is set and use the official search method
# If the API key is not set or has only whitespaces, use the unofficial search method
if cfg.google_api_key and (cfg.google_api_key.strip() if cfg.google_api_key else None):
return google_official_search(arguments["input"])
else:
return google_search(arguments["input"])
elif command_name == "memory_add":
return memory.
|
[
"get",
"cfg",
"get_stats",
"data",
"index",
"add",
"clear",
"dimension",
"filename",
"get_relevant",
"redis",
"vec_num"
] |
from autogpt.execute_code import execute_python_file, execute_shell
from autogpt.json_parser import fix_and_parse_json
from autogpt.image_gen import generate_image
from duckduckgo_search import ddg
from googleapiclient.discovery import build
from googleapiclient.errors import HttpError
cfg = Config()
def is_valid_int(value):
try:
int(value)
return True
except ValueError:
return False
def get_command(response):
"""Parse the response and return the command name and arguments"""
try:
response_json = fix_and_parse_json(response)
if "command" not in response_json:
return "Error:" , "Missing 'command' object in JSON"
command = response_json["command"]
if "name" not in command:
return "Error:", "Missing 'name' field in 'command' object"
command_name = command["name"]
# Use an empty dictionary if 'args' field is not present in 'command' object
arguments = command.get("args", {})
return command_name, arguments
except json.decoder.JSONDecodeError:
return "Error:", "Invalid JSON"
# All other errors, return "Error: + error message"
except Exception as e:
return "Error:", str(e)
def execute_command(command_name, arguments):
"""Execute the command and return the result"""
memory = get_memory(cfg)
try:
if command_name == "google":
# Check if the Google API key is set and use the official search method
# If the API key is not set or has only whitespaces, use the unofficial search method
if cfg.google_api_key and (cfg.google_api_key.strip() if cfg.google_api_key else None):
return google_official_search(arguments["input"])
else:
return google_search(arguments["input"])
elif command_name == "memory_add":
return memory.
| 74
|
106
| 4
| 10,947
|
Significant-Gravitas__Auto-GPT
|
0569d6652fbffa665de6e42403a48783fbede8ce
|
autogpt/commands.py
|
random
|
speak_mode
| true
|
statement
| 63
| 63
| false
| false
|
[
"google_api_key",
"speak_mode",
"execute_local_commands",
"fast_llm_model",
"custom_search_engine_id",
"ai_settings_file",
"AZURE_CONFIG_FILE",
"azure_model_to_deployment_id_map",
"browse_chunk_max_length",
"browse_summary_max_token",
"continuous_limit",
"continuous_mode",
"debug_mode",
"elevenlabs_api_key",
"elevenlabs_voice_1_id",
"elevenlabs_voice_2_id",
"fast_token_limit",
"get_azure_deployment_id_for_model",
"github_api_key",
"github_username",
"huggingface_api_token",
"image_provider",
"load_azure_config",
"memory_backend",
"memory_index",
"milvus_addr",
"milvus_collection",
"openai_api_base",
"openai_api_key",
"openai_api_type",
"openai_api_version",
"pinecone_api_key",
"pinecone_region",
"redis_host",
"redis_password",
"redis_port",
"set_browse_chunk_max_length",
"set_browse_summary_max_token",
"set_continuous_limit",
"set_continuous_mode",
"set_custom_search_engine_id",
"set_debug_mode",
"set_elevenlabs_api_key",
"set_elevenlabs_voice_1_id",
"set_elevenlabs_voice_2_id",
"set_fast_llm_model",
"set_fast_token_limit",
"set_google_api_key",
"set_openai_api_key",
"set_pinecone_api_key",
"set_pinecone_region",
"set_smart_llm_model",
"set_smart_token_limit",
"set_speak_mode",
"skip_reprompt",
"smart_llm_model",
"smart_token_limit",
"temperature",
"use_azure",
"use_brian_tts",
"use_mac_os_tts",
"user_agent",
"wipe_redis_on_start",
"__init__",
"__annotations__",
"__class__",
"__delattr__",
"__dict__",
"__dir__",
"__eq__",
"__format__",
"__getattribute__",
"__hash__",
"__init_subclass__",
"__ne__",
"__new__",
"__reduce__",
"__reduce_ex__",
"__repr__",
"__setattr__",
"__sizeof__",
"__str__",
"__subclasshook__",
"__doc__",
"__module__"
] |
[
{
"name": "ai_settings_file",
"type": "statement"
},
{
"name": "AZURE_CONFIG_FILE",
"type": "statement"
},
{
"name": "azure_model_to_deployment_id_map",
"type": "statement"
},
{
"name": "browse_chunk_max_length",
"type": "statement"
},
{
"name": "browse_summary_max_token",
"type": "statement"
},
{
"name": "continuous_limit",
"type": "statement"
},
{
"name": "continuous_mode",
"type": "statement"
},
{
"name": "custom_search_engine_id",
"type": "statement"
},
{
"name": "debug_mode",
"type": "statement"
},
{
"name": "elevenlabs_api_key",
"type": "statement"
},
{
"name": "elevenlabs_voice_1_id",
"type": "statement"
},
{
"name": "elevenlabs_voice_2_id",
"type": "statement"
},
{
"name": "execute_local_commands",
"type": "statement"
},
{
"name": "fast_llm_model",
"type": "statement"
},
{
"name": "fast_token_limit",
"type": "statement"
},
{
"name": "get_azure_deployment_id_for_model",
"type": "function"
},
{
"name": "github_api_key",
"type": "statement"
},
{
"name": "github_username",
"type": "statement"
},
{
"name": "google_api_key",
"type": "statement"
},
{
"name": "huggingface_api_token",
"type": "statement"
},
{
"name": "image_provider",
"type": "statement"
},
{
"name": "load_azure_config",
"type": "function"
},
{
"name": "memory_backend",
"type": "statement"
},
{
"name": "memory_index",
"type": "statement"
},
{
"name": "milvus_addr",
"type": "statement"
},
{
"name": "milvus_collection",
"type": "statement"
},
{
"name": "openai_api_base",
"type": "statement"
},
{
"name": "openai_api_key",
"type": "statement"
},
{
"name": "openai_api_type",
"type": "statement"
},
{
"name": "openai_api_version",
"type": "statement"
},
{
"name": "pinecone_api_key",
"type": "statement"
},
{
"name": "pinecone_region",
"type": "statement"
},
{
"name": "redis_host",
"type": "statement"
},
{
"name": "redis_password",
"type": "statement"
},
{
"name": "redis_port",
"type": "statement"
},
{
"name": "set_browse_chunk_max_length",
"type": "function"
},
{
"name": "set_browse_summary_max_token",
"type": "function"
},
{
"name": "set_continuous_limit",
"type": "function"
},
{
"name": "set_continuous_mode",
"type": "function"
},
{
"name": "set_custom_search_engine_id",
"type": "function"
},
{
"name": "set_debug_mode",
"type": "function"
},
{
"name": "set_elevenlabs_api_key",
"type": "function"
},
{
"name": "set_elevenlabs_voice_1_id",
"type": "function"
},
{
"name": "set_elevenlabs_voice_2_id",
"type": "function"
},
{
"name": "set_fast_llm_model",
"type": "function"
},
{
"name": "set_fast_token_limit",
"type": "function"
},
{
"name": "set_google_api_key",
"type": "function"
},
{
"name": "set_openai_api_key",
"type": "function"
},
{
"name": "set_pinecone_api_key",
"type": "function"
},
{
"name": "set_pinecone_region",
"type": "function"
},
{
"name": "set_smart_llm_model",
"type": "function"
},
{
"name": "set_smart_token_limit",
"type": "function"
},
{
"name": "set_speak_mode",
"type": "function"
},
{
"name": "skip_reprompt",
"type": "statement"
},
{
"name": "smart_llm_model",
"type": "statement"
},
{
"name": "smart_token_limit",
"type": "statement"
},
{
"name": "speak_mode",
"type": "statement"
},
{
"name": "temperature",
"type": "statement"
},
{
"name": "use_azure",
"type": "statement"
},
{
"name": "use_brian_tts",
"type": "statement"
},
{
"name": "use_mac_os_tts",
"type": "statement"
},
{
"name": "user_agent",
"type": "statement"
},
{
"name": "wipe_redis_on_start",
"type": "statement"
},
{
"name": "__annotations__",
"type": "statement"
},
{
"name": "__class__",
"type": "property"
},
{
"name": "__delattr__",
"type": "function"
},
{
"name": "__dict__",
"type": "statement"
},
{
"name": "__dir__",
"type": "function"
},
{
"name": "__doc__",
"type": "statement"
},
{
"name": "__eq__",
"type": "function"
},
{
"name": "__format__",
"type": "function"
},
{
"name": "__getattribute__",
"type": "function"
},
{
"name": "__hash__",
"type": "function"
},
{
"name": "__init__",
"type": "function"
},
{
"name": "__init_subclass__",
"type": "function"
},
{
"name": "__module__",
"type": "statement"
},
{
"name": "__ne__",
"type": "function"
},
{
"name": "__new__",
"type": "function"
},
{
"name": "__reduce__",
"type": "function"
},
{
"name": "__reduce_ex__",
"type": "function"
},
{
"name": "__repr__",
"type": "function"
},
{
"name": "__setattr__",
"type": "function"
},
{
"name": "__sizeof__",
"type": "function"
},
{
"name": "__slots__",
"type": "statement"
},
{
"name": "__str__",
"type": "function"
}
] |
from autogpt import browse
import json
from autogpt.memory import get_memory
import datetime
import autogpt.agent_manager as agents
from autogpt import speak
from autogpt.config import Config
import autogpt.ai_functions as ai
from autogpt.file_operations import read_file, write_to_file, append_to_file, delete_file, search_files
from autogpt.execute_code import execute_python_file, execute_shell
from autogpt.json_parser import fix_and_parse_json
from autogpt.image_gen import generate_image
from duckduckgo_search import ddg
from googleapiclient.discovery import build
from googleapiclient.errors import HttpError
cfg = Config()
def is_valid_int(value):
try:
int(value)
return True
except ValueError:
return False
def get_command(response):
"""Parse the response and return the command name and arguments"""
try:
response_json = fix_and_parse_json(response)
if "command" not in response_json:
return "Error:" , "Missing 'command' object in JSON"
command = response_json["command"]
if "name" not in command:
return "Error:", "Missing 'name' field in 'command' object"
command_name = command["name"]
# Use an empty dictionary if 'args' field is not present in 'command' object
arguments = command.get("args", {})
return command_name, arguments
except json.decoder.JSONDecodeError:
return "Error:", "Invalid JSON"
# All other errors, return "Error: + error message"
except Exception as e:
return "Error:", str(e)
def execute_command(command_name, arguments):
"""Execute the command and return the result"""
memory = get_memory(cfg)
try:
if command_name == "google":
# Check if the Google API key is set and use the official search method
# If the API key is not set or has only whitespaces, use the unofficial search method
if cfg.google_api_key and (cfg.google_api_key.strip() if cfg.google_api_key else None):
return google_official_search(arguments["input"])
else:
return google_search(arguments["input"])
elif command_name == "memory_add":
return memory.add(arguments["string"])
elif command_name == "start_agent":
return start_agent(
arguments["name"],
arguments["task"],
arguments["prompt"])
elif command_name == "message_agent":
return message_agent(arguments["key"], arguments["message"])
elif command_name == "list_agents":
return list_agents()
elif command_name == "delete_agent":
return delete_agent(arguments["key"])
elif command_name == "get_text_summary":
return get_text_summary(arguments["url"], arguments["question"])
elif command_name == "get_hyperlinks":
return get_hyperlinks(arguments["url"])
elif command_name == "clone_repository":
return clone_repository(arguments["repo_url"], arguments["clone_path"])
elif command_name == "read_file":
return read_file(arguments["file"])
elif command_name == "write_to_file":
return write_to_file(arguments["file"], arguments["text"])
elif command_name == "append_to_file":
return append_to_file(arguments["file"], arguments["text"])
elif command_name == "delete_file":
return delete_file(arguments["file"])
elif command_name == "search_files":
return search_files(arguments["directory"])
elif command_name == "browse_website":
return browse_website(arguments["url"], arguments["question"])
# TODO: Change these to take in a file rather than pasted code, if
# non-file is given, return instructions "Input should be a python
# filepath, write your code to file and try again"
elif command_name == "evaluate_code":
return ai.evaluate_code(arguments["code"])
elif command_name == "improve_code":
return ai.improve_code(arguments["suggestions"], arguments["code"])
elif command_name == "write_tests":
return ai.write_tests(arguments["code"], arguments.get("focus"))
elif command_name == "execute_python_file": # Add this command
return execute_python_file(arguments["file"])
elif command_name == "execute_shell":
if cfg.execute_local_commands:
return execute_shell(arguments["command_line"])
else:
return "You are not allowed to run local shell commands. To execute shell commands, EXECUTE_LOCAL_COMMANDS must be set to 'True' in your config. Do not attempt to bypass the restriction."
elif command_name == "generate_image":
return generate_image(arguments["prompt"])
elif command_name == "do_nothing":
return "No action performed."
elif command_name == "task_complete":
shutdown()
else:
return f"Unknown command '{command_name}'. Please refer to the 'COMMANDS' list for available commands and only respond in the specified JSON format."
# All errors, return "Error: + error message"
except Exception as e:
return "Error: " + str(e)
def get_datetime():
"""Return the current date and time"""
return "Current date and time: " + \
datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
def google_search(query, num_results=8):
"""Return the results of a google search"""
search_results = []
for j in ddg(query, max_results=num_results):
search_results.append(j)
return json.dumps(search_results, ensure_ascii=False, indent=4)
def google_official_search(query, num_results=8):
"""Return the results of a google search using the official Google API"""
from googleapiclient.discovery import build
from googleapiclient.errors import HttpError
import json
try:
# Get the Google API key and Custom Search Engine ID from the config file
api_key = cfg.google_api_key
custom_search_engine_id = cfg.custom_search_engine_id
# Initialize the Custom Search API service
service = build("customsearch", "v1", developerKey=api_key)
# Send the search query and retrieve the results
result = service.cse().list(q=query, cx=custom_search_engine_id, num=num_results).execute()
# Extract the search result items from the response
search_results = result.get("items", [])
# Create a list of only the URLs from the search results
search_results_links = [item["link"] for item in search_results]
except HttpError as e:
# Handle errors in the API call
error_details = json.loads(e.content.decode())
# Check if the error is related to an invalid or missing API key
if error_details.get("error", {}).get("code") == 403 and "invalid API key" in error_details.get("error", {}).get("message", ""):
return "Error: The provided Google API key is invalid or missing."
else:
return f"Error: {e}"
# Return the list of search result URLs
return search_results_links
def browse_website(url, question):
"""Browse a website and return the summary and links"""
summary = get_text_summary(url, question)
links = get_hyperlinks(url)
# Limit links to 5
if len(links) > 5:
links = links[:5]
result = f"""Website Content Summary: {summary}\n\nLinks: {links}"""
return result
def get_text_summary(url, question):
"""Return the results of a google search"""
text = browse.scrape_text(url)
summary = browse.summarize_text(url, text, question)
return """ "Result" : """ + summary
def get_hyperlinks(url):
"""Return the results of a google search"""
link_list = browse.scrape_links(url)
return link_list
def commit_memory(string):
"""Commit a string to memory"""
_text = f"""Committing memory with string "{string}" """
mem.permanent_memory.append(string)
return _text
def delete_memory(key):
"""Delete a memory with a given key"""
if key >= 0 and key < len(mem.permanent_memory):
_text = "Deleting memory with key " + str(key)
del mem.permanent_memory[key]
print(_text)
return _text
else:
print("Invalid key, cannot delete memory.")
return None
def overwrite_memory(key, string):
"""Overwrite a memory with a given key and string"""
# Check if the key is a valid integer
if is_valid_int(key):
key_int = int(key)
# Check if the integer key is within the range of the permanent_memory list
if 0 <= key_int < len(mem.permanent_memory):
_text = "Overwriting memory with key " + str(key) + " and string " + string
# Overwrite the memory slot with the given integer key and string
mem.permanent_memory[key_int] = string
print(_text)
return _text
else:
print(f"Invalid key '{key}', out of range.")
return None
# Check if the key is a valid string
elif isinstance(key, str):
_text = "Overwriting memory with key " + key + " and string " + string
# Overwrite the memory slot with the given string key and string
mem.permanent_memory[key] = string
print(_text)
return _text
else:
print(f"Invalid key '{key}', must be an integer or a string.")
return None
def shutdown():
"""Shut down the program"""
print("Shutting down...")
quit()
def start_agent(name, task, prompt, model=cfg.fast_llm_model):
"""Start an agent with a given name, task, and prompt"""
global cfg
# Remove underscores from name
voice_name = name.replace("_", " ")
first_message = f"""You are {name}. Respond with: "Acknowledged"."""
agent_intro = f"{voice_name} here, Reporting for duty!"
# Create agent
if cfg.speak_mode:
speak.say_text(agent_intro, 1)
key, ack = agents.create_agent(task, first_message, model)
if cfg.speak_mode:
speak.say_text(f"Hello {voice_name}. Your task is as follows. {task}.")
# Assign task (prompt), get response
agent_response = message_agent(key, prompt)
return f"Agent {name} created with key {key}. First response: {agent_response}"
def message_agent(key, message):
"""Message an agent with a given key and message"""
global cfg
# Check if the key is a valid integer
if is_valid_int(key):
agent_response = agents.message_agent(int(key), message)
# Check if the key is a valid string
elif isinstance(key, str):
agent_response = agents.message_agent(key, message)
else:
return "Invalid key, must be an integer or a string."
# Speak response
if cfg.
|
[
"google_api_key",
"speak_mode",
"execute_local_commands",
"fast_llm_model",
"custom_search_engine_id",
"ai_settings_file",
"AZURE_CONFIG_FILE",
"azure_model_to_deployment_id_map",
"browse_chunk_max_length",
"browse_summary_max_token",
"continuous_limit",
"continuous_mode",
"debug_mode",
"elevenlabs_api_key",
"elevenlabs_voice_1_id",
"elevenlabs_voice_2_id",
"fast_token_limit",
"get_azure_deployment_id_for_model",
"github_api_key",
"github_username",
"huggingface_api_token",
"image_provider",
"load_azure_config",
"memory_backend",
"memory_index",
"milvus_addr",
"milvus_collection",
"openai_api_base",
"openai_api_key",
"openai_api_type",
"openai_api_version",
"pinecone_api_key",
"pinecone_region",
"redis_host",
"redis_password",
"redis_port",
"set_browse_chunk_max_length",
"set_browse_summary_max_token",
"set_continuous_limit",
"set_continuous_mode",
"set_custom_search_engine_id",
"set_debug_mode",
"set_elevenlabs_api_key",
"set_elevenlabs_voice_1_id",
"set_elevenlabs_voice_2_id",
"set_fast_llm_model",
"set_fast_token_limit",
"set_google_api_key",
"set_openai_api_key",
"set_pinecone_api_key",
"set_pinecone_region",
"set_smart_llm_model",
"set_smart_token_limit",
"set_speak_mode",
"skip_reprompt",
"smart_llm_model",
"smart_token_limit",
"temperature",
"use_azure",
"use_brian_tts",
"use_mac_os_tts",
"user_agent",
"wipe_redis_on_start"
] |
ry[key_int] = string
print(_text)
return _text
else:
print(f"Invalid key '{key}', out of range.")
return None
# Check if the key is a valid string
elif isinstance(key, str):
_text = "Overwriting memory with key " + key + " and string " + string
# Overwrite the memory slot with the given string key and string
mem.permanent_memory[key] = string
print(_text)
return _text
else:
print(f"Invalid key '{key}', must be an integer or a string.")
return None
def shutdown():
"""Shut down the program"""
print("Shutting down...")
quit()
def start_agent(name, task, prompt, model=cfg.fast_llm_model):
"""Start an agent with a given name, task, and prompt"""
global cfg
# Remove underscores from name
voice_name = name.replace("_", " ")
first_message = f"""You are {name}. Respond with: "Acknowledged"."""
agent_intro = f"{voice_name} here, Reporting for duty!"
# Create agent
if cfg.speak_mode:
speak.say_text(agent_intro, 1)
key, ack = agents.create_agent(task, first_message, model)
if cfg.speak_mode:
speak.say_text(f"Hello {voice_name}. Your task is as follows. {task}.")
# Assign task (prompt), get response
agent_response = message_agent(key, prompt)
return f"Agent {name} created with key {key}. First response: {agent_response}"
def message_agent(key, message):
"""Message an agent with a given key and message"""
global cfg
# Check if the key is a valid integer
if is_valid_int(key):
agent_response = agents.message_agent(int(key), message)
# Check if the key is a valid string
elif isinstance(key, str):
agent_response = agents.message_agent(key, message)
else:
return "Invalid key, must be an integer or a string."
# Speak response
if cfg.
| 81
|
116
| 6
| 1,280
|
christophermayes__xopt
|
683fd0c3af2f0fc12a598932b20e3afe8070112b
|
xopt/generators/ga/cnsga.py
|
inproject
|
toolbox
| true
|
statement
| 15
| 17
| false
| true
|
[
"toolbox",
"offspring",
"_vocs",
"population",
"children",
"__init__",
"create_children",
"crossover_probability",
"generate",
"mutation_probability",
"n_pop",
"update_data",
"data",
"is_done",
"options",
"vocs",
"__annotations__",
"__class__",
"__delattr__",
"__dict__",
"__dir__",
"__eq__",
"__format__",
"__getattribute__",
"__hash__",
"__init_subclass__",
"__ne__",
"__new__",
"__reduce__",
"__reduce_ex__",
"__repr__",
"__setattr__",
"__sizeof__",
"__slots__",
"__str__",
"__subclasshook__",
"__doc__",
"__module__"
] |
[
{
"name": "children",
"type": "statement"
},
{
"name": "create_children",
"type": "function"
},
{
"name": "crossover_probability",
"type": "statement"
},
{
"name": "data",
"type": "property"
},
{
"name": "generate",
"type": "function"
},
{
"name": "is_done",
"type": "property"
},
{
"name": "mutation_probability",
"type": "statement"
},
{
"name": "n_pop",
"type": "statement"
},
{
"name": "offspring",
"type": "statement"
},
{
"name": "options",
"type": "statement"
},
{
"name": "population",
"type": "statement"
},
{
"name": "toolbox",
"type": "statement"
},
{
"name": "update_data",
"type": "function"
},
{
"name": "vocs",
"type": "property"
},
{
"name": "_data",
"type": "statement"
},
{
"name": "_is_done",
"type": "statement"
},
{
"name": "_vocs",
"type": "statement"
},
{
"name": "__annotations__",
"type": "statement"
},
{
"name": "__class__",
"type": "property"
},
{
"name": "__delattr__",
"type": "function"
},
{
"name": "__dict__",
"type": "statement"
},
{
"name": "__dir__",
"type": "function"
},
{
"name": "__doc__",
"type": "statement"
},
{
"name": "__eq__",
"type": "function"
},
{
"name": "__format__",
"type": "function"
},
{
"name": "__getattribute__",
"type": "function"
},
{
"name": "__hash__",
"type": "function"
},
{
"name": "__init__",
"type": "function"
},
{
"name": "__init_subclass__",
"type": "function"
},
{
"name": "__module__",
"type": "statement"
},
{
"name": "__ne__",
"type": "function"
},
{
"name": "__new__",
"type": "function"
},
{
"name": "__reduce__",
"type": "function"
},
{
"name": "__reduce_ex__",
"type": "function"
},
{
"name": "__repr__",
"type": "function"
},
{
"name": "__setattr__",
"type": "function"
},
{
"name": "__sizeof__",
"type": "function"
},
{
"name": "__slots__",
"type": "statement"
},
{
"name": "__str__",
"type": "function"
}
] |
from xopt.generators.ga import deap_creator
from xopt.generators.ga.deap_fitness_with_constraints import FitnessWithConstraints
from xopt import Generator
from deap import base as deap_base
from deap import tools as deap_tools
from deap import algorithms as deap_algorithms
import pandas as pd
import random
import array
from typing import List, Dict
import logging
logger = logging.getLogger(__name__)
from typing import List, Dict
class CNSGAGenerator(Generator):
def __init__(self, vocs, *,
n_pop,
data = None,
crossover_probability = 0.9,
mutation_probability = 1.0
):
self._vocs = vocs # TODO: use proper options
self.n_pop = n_pop
self.crossover_probability = crossover_probability
self.mutation_probability = mutation_probability
# Internal data structures
self.children = [] # unevaluated inputs. This should be a list of dicts.
self.population = None # The latest population (fully evaluated)
self.offspring = None # Newly evaluated data, but not yet added to population
# DEAP toolbox (internal)
self.toolbox = cnsga_toolbox(vocs, selection='auto')
if data is not None:
self.population = cnsga_select(data, n_pop, vocs, self.
|
[
"toolbox",
"offspring",
"population",
"children",
"create_children",
"crossover_probability",
"generate",
"mutation_probability",
"n_pop",
"update_data",
"data",
"is_done",
"options",
"vocs"
] |
from xopt.generators.ga import deap_creator
from xopt.generators.ga.deap_fitness_with_constraints import FitnessWithConstraints
from xopt import Generator
from deap import base as deap_base
from deap import tools as deap_tools
from deap import algorithms as deap_algorithms
import pandas as pd
import random
import array
from typing import List, Dict
import logging
logger = logging.getLogger(__name__)
from typing import List, Dict
class CNSGAGenerator(Generator):
def __init__(self, vocs, *,
n_pop,
data = None,
crossover_probability = 0.9,
mutation_probability = 1.0
):
self._vocs = vocs # TODO: use proper options
self.n_pop = n_pop
self.crossover_probability = crossover_probability
self.mutation_probability = mutation_probability
# Internal data structures
self.children = [] # unevaluated inputs. This should be a list of dicts.
self.population = None # The latest population (fully evaluated)
self.offspring = None # Newly evaluated data, but not yet added to population
# DEAP toolbox (internal)
self.toolbox = cnsga_toolbox(vocs, selection='auto')
if data is not None:
self.population = cnsga_select(data, n_pop, vocs, self.
| 82
|
118
| 6
| 1,435
|
christophermayes__xopt
|
683fd0c3af2f0fc12a598932b20e3afe8070112b
|
xopt/generators/ga/cnsga.py
|
inproject
|
vocs
| true
|
property
| 15
| 17
| false
| true
|
[
"children",
"toolbox",
"n_pop",
"offspring",
"population",
"__init__",
"_vocs",
"create_children",
"crossover_probability",
"generate",
"mutation_probability",
"update_data",
"data",
"is_done",
"options",
"vocs",
"__annotations__",
"__class__",
"__delattr__",
"__dict__",
"__dir__",
"__eq__",
"__format__",
"__getattribute__",
"__hash__",
"__init_subclass__",
"__ne__",
"__new__",
"__reduce__",
"__reduce_ex__",
"__repr__",
"__setattr__",
"__sizeof__",
"__slots__",
"__str__",
"__subclasshook__",
"__doc__",
"__module__"
] |
[
{
"name": "children",
"type": "statement"
},
{
"name": "create_children",
"type": "function"
},
{
"name": "crossover_probability",
"type": "statement"
},
{
"name": "data",
"type": "property"
},
{
"name": "generate",
"type": "function"
},
{
"name": "is_done",
"type": "property"
},
{
"name": "mutation_probability",
"type": "statement"
},
{
"name": "n_pop",
"type": "statement"
},
{
"name": "offspring",
"type": "statement"
},
{
"name": "options",
"type": "statement"
},
{
"name": "population",
"type": "statement"
},
{
"name": "toolbox",
"type": "statement"
},
{
"name": "update_data",
"type": "function"
},
{
"name": "vocs",
"type": "property"
},
{
"name": "_data",
"type": "statement"
},
{
"name": "_is_done",
"type": "statement"
},
{
"name": "_vocs",
"type": "statement"
},
{
"name": "__annotations__",
"type": "statement"
},
{
"name": "__class__",
"type": "property"
},
{
"name": "__delattr__",
"type": "function"
},
{
"name": "__dict__",
"type": "statement"
},
{
"name": "__dir__",
"type": "function"
},
{
"name": "__doc__",
"type": "statement"
},
{
"name": "__eq__",
"type": "function"
},
{
"name": "__format__",
"type": "function"
},
{
"name": "__getattribute__",
"type": "function"
},
{
"name": "__hash__",
"type": "function"
},
{
"name": "__init__",
"type": "function"
},
{
"name": "__init_subclass__",
"type": "function"
},
{
"name": "__module__",
"type": "statement"
},
{
"name": "__ne__",
"type": "function"
},
{
"name": "__new__",
"type": "function"
},
{
"name": "__reduce__",
"type": "function"
},
{
"name": "__reduce_ex__",
"type": "function"
},
{
"name": "__repr__",
"type": "function"
},
{
"name": "__setattr__",
"type": "function"
},
{
"name": "__sizeof__",
"type": "function"
},
{
"name": "__slots__",
"type": "statement"
},
{
"name": "__str__",
"type": "function"
}
] |
from xopt.generators.ga import deap_creator
from xopt.generators.ga.deap_fitness_with_constraints import FitnessWithConstraints
from xopt import Generator
from deap import base as deap_base
from deap import tools as deap_tools
from deap import algorithms as deap_algorithms
import pandas as pd
import random
import array
from typing import List, Dict
import logging
logger = logging.getLogger(__name__)
from typing import List, Dict
class CNSGAGenerator(Generator):
def __init__(self, vocs, *,
n_pop,
data = None,
crossover_probability = 0.9,
mutation_probability = 1.0
):
self._vocs = vocs # TODO: use proper options
self.n_pop = n_pop
self.crossover_probability = crossover_probability
self.mutation_probability = mutation_probability
# Internal data structures
self.children = [] # unevaluated inputs. This should be a list of dicts.
self.population = None # The latest population (fully evaluated)
self.offspring = None # Newly evaluated data, but not yet added to population
# DEAP toolbox (internal)
self.toolbox = cnsga_toolbox(vocs, selection='auto')
if data is not None:
self.population = cnsga_select(data, n_pop, vocs, self.toolbox)
def create_children(self):
# No population, so create random children
if self.population is None:
return [self.
|
[
"children",
"toolbox",
"n_pop",
"offspring",
"population",
"create_children",
"crossover_probability",
"generate",
"mutation_probability",
"update_data",
"data",
"is_done",
"options",
"vocs"
] |
from xopt.generators.ga import deap_creator
from xopt.generators.ga.deap_fitness_with_constraints import FitnessWithConstraints
from xopt import Generator
from deap import base as deap_base
from deap import tools as deap_tools
from deap import algorithms as deap_algorithms
import pandas as pd
import random
import array
from typing import List, Dict
import logging
logger = logging.getLogger(__name__)
from typing import List, Dict
class CNSGAGenerator(Generator):
def __init__(self, vocs, *,
n_pop,
data = None,
crossover_probability = 0.9,
mutation_probability = 1.0
):
self._vocs = vocs # TODO: use proper options
self.n_pop = n_pop
self.crossover_probability = crossover_probability
self.mutation_probability = mutation_probability
# Internal data structures
self.children = [] # unevaluated inputs. This should be a list of dicts.
self.population = None # The latest population (fully evaluated)
self.offspring = None # Newly evaluated data, but not yet added to population
# DEAP toolbox (internal)
self.toolbox = cnsga_toolbox(vocs, selection='auto')
if data is not None:
self.population = cnsga_select(data, n_pop, vocs, self.toolbox)
def create_children(self):
# No population, so create random children
if self.population is None:
return [self.
| 84
|
119
| 6
| 1,476
|
christophermayes__xopt
|
683fd0c3af2f0fc12a598932b20e3afe8070112b
|
xopt/generators/ga/cnsga.py
|
inproject
|
n_pop
| true
|
statement
| 15
| 17
| false
| true
|
[
"children",
"offspring",
"population",
"_vocs",
"n_pop",
"__init__",
"create_children",
"crossover_probability",
"generate",
"mutation_probability",
"toolbox",
"update_data",
"data",
"is_done",
"options",
"vocs",
"__annotations__",
"__class__",
"__delattr__",
"__dict__",
"__dir__",
"__eq__",
"__format__",
"__getattribute__",
"__hash__",
"__init_subclass__",
"__ne__",
"__new__",
"__reduce__",
"__reduce_ex__",
"__repr__",
"__setattr__",
"__sizeof__",
"__slots__",
"__str__",
"__subclasshook__",
"__doc__",
"__module__"
] |
[
{
"name": "children",
"type": "statement"
},
{
"name": "create_children",
"type": "function"
},
{
"name": "crossover_probability",
"type": "statement"
},
{
"name": "data",
"type": "property"
},
{
"name": "generate",
"type": "function"
},
{
"name": "is_done",
"type": "property"
},
{
"name": "mutation_probability",
"type": "statement"
},
{
"name": "n_pop",
"type": "statement"
},
{
"name": "offspring",
"type": "statement"
},
{
"name": "options",
"type": "statement"
},
{
"name": "population",
"type": "statement"
},
{
"name": "toolbox",
"type": "statement"
},
{
"name": "update_data",
"type": "function"
},
{
"name": "vocs",
"type": "property"
},
{
"name": "_data",
"type": "statement"
},
{
"name": "_is_done",
"type": "statement"
},
{
"name": "_vocs",
"type": "statement"
},
{
"name": "__annotations__",
"type": "statement"
},
{
"name": "__class__",
"type": "property"
},
{
"name": "__delattr__",
"type": "function"
},
{
"name": "__dict__",
"type": "statement"
},
{
"name": "__dir__",
"type": "function"
},
{
"name": "__doc__",
"type": "statement"
},
{
"name": "__eq__",
"type": "function"
},
{
"name": "__format__",
"type": "function"
},
{
"name": "__getattribute__",
"type": "function"
},
{
"name": "__hash__",
"type": "function"
},
{
"name": "__init__",
"type": "function"
},
{
"name": "__init_subclass__",
"type": "function"
},
{
"name": "__module__",
"type": "statement"
},
{
"name": "__ne__",
"type": "function"
},
{
"name": "__new__",
"type": "function"
},
{
"name": "__reduce__",
"type": "function"
},
{
"name": "__reduce_ex__",
"type": "function"
},
{
"name": "__repr__",
"type": "function"
},
{
"name": "__setattr__",
"type": "function"
},
{
"name": "__sizeof__",
"type": "function"
},
{
"name": "__slots__",
"type": "statement"
},
{
"name": "__str__",
"type": "function"
}
] |
from xopt.generators.ga import deap_creator
from xopt.generators.ga.deap_fitness_with_constraints import FitnessWithConstraints
from xopt import Generator
from deap import base as deap_base
from deap import tools as deap_tools
from deap import algorithms as deap_algorithms
import pandas as pd
import random
import array
from typing import List, Dict
import logging
logger = logging.getLogger(__name__)
from typing import List, Dict
class CNSGAGenerator(Generator):
def __init__(self, vocs, *,
n_pop,
data = None,
crossover_probability = 0.9,
mutation_probability = 1.0
):
self._vocs = vocs # TODO: use proper options
self.n_pop = n_pop
self.crossover_probability = crossover_probability
self.mutation_probability = mutation_probability
# Internal data structures
self.children = [] # unevaluated inputs. This should be a list of dicts.
self.population = None # The latest population (fully evaluated)
self.offspring = None # Newly evaluated data, but not yet added to population
# DEAP toolbox (internal)
self.toolbox = cnsga_toolbox(vocs, selection='auto')
if data is not None:
self.population = cnsga_select(data, n_pop, vocs, self.toolbox)
def create_children(self):
# No population, so create random children
if self.population is None:
return [self.vocs.random_inputs() for _ in range(self.
|
[
"children",
"offspring",
"population",
"n_pop",
"create_children",
"crossover_probability",
"generate",
"mutation_probability",
"toolbox",
"update_data",
"data",
"is_done",
"options",
"vocs"
] |
from xopt.generators.ga import deap_creator
from xopt.generators.ga.deap_fitness_with_constraints import FitnessWithConstraints
from xopt import Generator
from deap import base as deap_base
from deap import tools as deap_tools
from deap import algorithms as deap_algorithms
import pandas as pd
import random
import array
from typing import List, Dict
import logging
logger = logging.getLogger(__name__)
from typing import List, Dict
class CNSGAGenerator(Generator):
def __init__(self, vocs, *,
n_pop,
data = None,
crossover_probability = 0.9,
mutation_probability = 1.0
):
self._vocs = vocs # TODO: use proper options
self.n_pop = n_pop
self.crossover_probability = crossover_probability
self.mutation_probability = mutation_probability
# Internal data structures
self.children = [] # unevaluated inputs. This should be a list of dicts.
self.population = None # The latest population (fully evaluated)
self.offspring = None # Newly evaluated data, but not yet added to population
# DEAP toolbox (internal)
self.toolbox = cnsga_toolbox(vocs, selection='auto')
if data is not None:
self.population = cnsga_select(data, n_pop, vocs, self.toolbox)
def create_children(self):
# No population, so create random children
if self.population is None:
return [self.vocs.random_inputs() for _ in range(self.
| 85
|
120
| 6
| 1,567
|
christophermayes__xopt
|
683fd0c3af2f0fc12a598932b20e3afe8070112b
|
xopt/generators/ga/cnsga.py
|
inproject
|
population
| true
|
statement
| 15
| 17
| false
| false
|
[
"children",
"data",
"offspring",
"n_pop",
"toolbox",
"__init__",
"_vocs",
"create_children",
"crossover_probability",
"generate",
"mutation_probability",
"population",
"update_data",
"is_done",
"options",
"vocs",
"__annotations__",
"__class__",
"__delattr__",
"__dict__",
"__dir__",
"__eq__",
"__format__",
"__getattribute__",
"__hash__",
"__init_subclass__",
"__ne__",
"__new__",
"__reduce__",
"__reduce_ex__",
"__repr__",
"__setattr__",
"__sizeof__",
"__slots__",
"__str__",
"__subclasshook__",
"__doc__",
"__module__"
] |
[
{
"name": "children",
"type": "statement"
},
{
"name": "create_children",
"type": "function"
},
{
"name": "crossover_probability",
"type": "statement"
},
{
"name": "data",
"type": "property"
},
{
"name": "generate",
"type": "function"
},
{
"name": "is_done",
"type": "property"
},
{
"name": "mutation_probability",
"type": "statement"
},
{
"name": "n_pop",
"type": "statement"
},
{
"name": "offspring",
"type": "statement"
},
{
"name": "options",
"type": "statement"
},
{
"name": "population",
"type": "statement"
},
{
"name": "toolbox",
"type": "statement"
},
{
"name": "update_data",
"type": "function"
},
{
"name": "vocs",
"type": "property"
},
{
"name": "_data",
"type": "statement"
},
{
"name": "_is_done",
"type": "statement"
},
{
"name": "_vocs",
"type": "statement"
},
{
"name": "__annotations__",
"type": "statement"
},
{
"name": "__class__",
"type": "property"
},
{
"name": "__delattr__",
"type": "function"
},
{
"name": "__dict__",
"type": "statement"
},
{
"name": "__dir__",
"type": "function"
},
{
"name": "__doc__",
"type": "statement"
},
{
"name": "__eq__",
"type": "function"
},
{
"name": "__format__",
"type": "function"
},
{
"name": "__getattribute__",
"type": "function"
},
{
"name": "__hash__",
"type": "function"
},
{
"name": "__init__",
"type": "function"
},
{
"name": "__init_subclass__",
"type": "function"
},
{
"name": "__module__",
"type": "statement"
},
{
"name": "__ne__",
"type": "function"
},
{
"name": "__new__",
"type": "function"
},
{
"name": "__reduce__",
"type": "function"
},
{
"name": "__reduce_ex__",
"type": "function"
},
{
"name": "__repr__",
"type": "function"
},
{
"name": "__setattr__",
"type": "function"
},
{
"name": "__sizeof__",
"type": "function"
},
{
"name": "__slots__",
"type": "statement"
},
{
"name": "__str__",
"type": "function"
}
] |
from xopt.generators.ga import deap_creator
from xopt.generators.ga.deap_fitness_with_constraints import FitnessWithConstraints
from xopt import Generator
from deap import base as deap_base
from deap import tools as deap_tools
from deap import algorithms as deap_algorithms
import pandas as pd
import random
import array
from typing import List, Dict
import logging
logger = logging.getLogger(__name__)
from typing import List, Dict
class CNSGAGenerator(Generator):
def __init__(self, vocs, *,
n_pop,
data = None,
crossover_probability = 0.9,
mutation_probability = 1.0
):
self._vocs = vocs # TODO: use proper options
self.n_pop = n_pop
self.crossover_probability = crossover_probability
self.mutation_probability = mutation_probability
# Internal data structures
self.children = [] # unevaluated inputs. This should be a list of dicts.
self.population = None # The latest population (fully evaluated)
self.offspring = None # Newly evaluated data, but not yet added to population
# DEAP toolbox (internal)
self.toolbox = cnsga_toolbox(vocs, selection='auto')
if data is not None:
self.population = cnsga_select(data, n_pop, vocs, self.toolbox)
def create_children(self):
# No population, so create random children
if self.population is None:
return [self.vocs.random_inputs() for _ in range(self.n_pop)]
# Use population to create children
inputs = cnsga_variation(self.
|
[
"children",
"data",
"offspring",
"n_pop",
"toolbox",
"create_children",
"crossover_probability",
"generate",
"mutation_probability",
"population",
"update_data",
"is_done",
"options",
"vocs"
] |
from xopt.generators.ga import deap_creator
from xopt.generators.ga.deap_fitness_with_constraints import FitnessWithConstraints
from xopt import Generator
from deap import base as deap_base
from deap import tools as deap_tools
from deap import algorithms as deap_algorithms
import pandas as pd
import random
import array
from typing import List, Dict
import logging
logger = logging.getLogger(__name__)
from typing import List, Dict
class CNSGAGenerator(Generator):
def __init__(self, vocs, *,
n_pop,
data = None,
crossover_probability = 0.9,
mutation_probability = 1.0
):
self._vocs = vocs # TODO: use proper options
self.n_pop = n_pop
self.crossover_probability = crossover_probability
self.mutation_probability = mutation_probability
# Internal data structures
self.children = [] # unevaluated inputs. This should be a list of dicts.
self.population = None # The latest population (fully evaluated)
self.offspring = None # Newly evaluated data, but not yet added to population
# DEAP toolbox (internal)
self.toolbox = cnsga_toolbox(vocs, selection='auto')
if data is not None:
self.population = cnsga_select(data, n_pop, vocs, self.toolbox)
def create_children(self):
# No population, so create random children
if self.population is None:
return [self.vocs.random_inputs() for _ in range(self.n_pop)]
# Use population to create children
inputs = cnsga_variation(self.
| 86
|
121
| 6
| 1,584
|
christophermayes__xopt
|
683fd0c3af2f0fc12a598932b20e3afe8070112b
|
xopt/generators/ga/cnsga.py
|
inproject
|
vocs
| true
|
property
| 15
| 17
| false
| false
|
[
"vocs",
"children",
"population",
"toolbox",
"offspring",
"__init__",
"_vocs",
"create_children",
"crossover_probability",
"generate",
"mutation_probability",
"n_pop",
"update_data",
"data",
"is_done",
"options",
"__annotations__",
"__class__",
"__delattr__",
"__dict__",
"__dir__",
"__eq__",
"__format__",
"__getattribute__",
"__hash__",
"__init_subclass__",
"__ne__",
"__new__",
"__reduce__",
"__reduce_ex__",
"__repr__",
"__setattr__",
"__sizeof__",
"__slots__",
"__str__",
"__subclasshook__",
"__doc__",
"__module__"
] |
[
{
"name": "children",
"type": "statement"
},
{
"name": "create_children",
"type": "function"
},
{
"name": "crossover_probability",
"type": "statement"
},
{
"name": "data",
"type": "property"
},
{
"name": "generate",
"type": "function"
},
{
"name": "is_done",
"type": "property"
},
{
"name": "mutation_probability",
"type": "statement"
},
{
"name": "n_pop",
"type": "statement"
},
{
"name": "offspring",
"type": "statement"
},
{
"name": "options",
"type": "statement"
},
{
"name": "population",
"type": "statement"
},
{
"name": "toolbox",
"type": "statement"
},
{
"name": "update_data",
"type": "function"
},
{
"name": "vocs",
"type": "property"
},
{
"name": "_data",
"type": "statement"
},
{
"name": "_is_done",
"type": "statement"
},
{
"name": "_vocs",
"type": "statement"
},
{
"name": "__annotations__",
"type": "statement"
},
{
"name": "__class__",
"type": "property"
},
{
"name": "__delattr__",
"type": "function"
},
{
"name": "__dict__",
"type": "statement"
},
{
"name": "__dir__",
"type": "function"
},
{
"name": "__doc__",
"type": "statement"
},
{
"name": "__eq__",
"type": "function"
},
{
"name": "__format__",
"type": "function"
},
{
"name": "__getattribute__",
"type": "function"
},
{
"name": "__hash__",
"type": "function"
},
{
"name": "__init__",
"type": "function"
},
{
"name": "__init_subclass__",
"type": "function"
},
{
"name": "__module__",
"type": "statement"
},
{
"name": "__ne__",
"type": "function"
},
{
"name": "__new__",
"type": "function"
},
{
"name": "__reduce__",
"type": "function"
},
{
"name": "__reduce_ex__",
"type": "function"
},
{
"name": "__repr__",
"type": "function"
},
{
"name": "__setattr__",
"type": "function"
},
{
"name": "__sizeof__",
"type": "function"
},
{
"name": "__slots__",
"type": "statement"
},
{
"name": "__str__",
"type": "function"
}
] |
from xopt.generators.ga import deap_creator
from xopt.generators.ga.deap_fitness_with_constraints import FitnessWithConstraints
from xopt import Generator
from deap import base as deap_base
from deap import tools as deap_tools
from deap import algorithms as deap_algorithms
import pandas as pd
import random
import array
from typing import List, Dict
import logging
logger = logging.getLogger(__name__)
from typing import List, Dict
class CNSGAGenerator(Generator):
def __init__(self, vocs, *,
n_pop,
data = None,
crossover_probability = 0.9,
mutation_probability = 1.0
):
self._vocs = vocs # TODO: use proper options
self.n_pop = n_pop
self.crossover_probability = crossover_probability
self.mutation_probability = mutation_probability
# Internal data structures
self.children = [] # unevaluated inputs. This should be a list of dicts.
self.population = None # The latest population (fully evaluated)
self.offspring = None # Newly evaluated data, but not yet added to population
# DEAP toolbox (internal)
self.toolbox = cnsga_toolbox(vocs, selection='auto')
if data is not None:
self.population = cnsga_select(data, n_pop, vocs, self.toolbox)
def create_children(self):
# No population, so create random children
if self.population is None:
return [self.vocs.random_inputs() for _ in range(self.n_pop)]
# Use population to create children
inputs = cnsga_variation(self.population, self.
|
[
"vocs",
"children",
"population",
"toolbox",
"offspring",
"create_children",
"crossover_probability",
"generate",
"mutation_probability",
"n_pop",
"update_data",
"data",
"is_done",
"options"
] |
from xopt.generators.ga import deap_creator
from xopt.generators.ga.deap_fitness_with_constraints import FitnessWithConstraints
from xopt import Generator
from deap import base as deap_base
from deap import tools as deap_tools
from deap import algorithms as deap_algorithms
import pandas as pd
import random
import array
from typing import List, Dict
import logging
logger = logging.getLogger(__name__)
from typing import List, Dict
class CNSGAGenerator(Generator):
def __init__(self, vocs, *,
n_pop,
data = None,
crossover_probability = 0.9,
mutation_probability = 1.0
):
self._vocs = vocs # TODO: use proper options
self.n_pop = n_pop
self.crossover_probability = crossover_probability
self.mutation_probability = mutation_probability
# Internal data structures
self.children = [] # unevaluated inputs. This should be a list of dicts.
self.population = None # The latest population (fully evaluated)
self.offspring = None # Newly evaluated data, but not yet added to population
# DEAP toolbox (internal)
self.toolbox = cnsga_toolbox(vocs, selection='auto')
if data is not None:
self.population = cnsga_select(data, n_pop, vocs, self.toolbox)
def create_children(self):
# No population, so create random children
if self.population is None:
return [self.vocs.random_inputs() for _ in range(self.n_pop)]
# Use population to create children
inputs = cnsga_variation(self.population, self.
| 87
|
122
| 6
| 1,595
|
christophermayes__xopt
|
683fd0c3af2f0fc12a598932b20e3afe8070112b
|
xopt/generators/ga/cnsga.py
|
inproject
|
toolbox
| true
|
statement
| 15
| 17
| false
| false
|
[
"toolbox",
"children",
"population",
"offspring",
"n_pop",
"__init__",
"_vocs",
"create_children",
"crossover_probability",
"generate",
"mutation_probability",
"update_data",
"data",
"is_done",
"options",
"vocs",
"__annotations__",
"__class__",
"__delattr__",
"__dict__",
"__dir__",
"__eq__",
"__format__",
"__getattribute__",
"__hash__",
"__init_subclass__",
"__ne__",
"__new__",
"__reduce__",
"__reduce_ex__",
"__repr__",
"__setattr__",
"__sizeof__",
"__slots__",
"__str__",
"__subclasshook__",
"__doc__",
"__module__"
] |
[
{
"name": "children",
"type": "statement"
},
{
"name": "create_children",
"type": "function"
},
{
"name": "crossover_probability",
"type": "statement"
},
{
"name": "data",
"type": "property"
},
{
"name": "generate",
"type": "function"
},
{
"name": "is_done",
"type": "property"
},
{
"name": "mutation_probability",
"type": "statement"
},
{
"name": "n_pop",
"type": "statement"
},
{
"name": "offspring",
"type": "statement"
},
{
"name": "options",
"type": "statement"
},
{
"name": "population",
"type": "statement"
},
{
"name": "toolbox",
"type": "statement"
},
{
"name": "update_data",
"type": "function"
},
{
"name": "vocs",
"type": "property"
},
{
"name": "_data",
"type": "statement"
},
{
"name": "_is_done",
"type": "statement"
},
{
"name": "_vocs",
"type": "statement"
},
{
"name": "__annotations__",
"type": "statement"
},
{
"name": "__class__",
"type": "property"
},
{
"name": "__delattr__",
"type": "function"
},
{
"name": "__dict__",
"type": "statement"
},
{
"name": "__dir__",
"type": "function"
},
{
"name": "__doc__",
"type": "statement"
},
{
"name": "__eq__",
"type": "function"
},
{
"name": "__format__",
"type": "function"
},
{
"name": "__getattribute__",
"type": "function"
},
{
"name": "__hash__",
"type": "function"
},
{
"name": "__init__",
"type": "function"
},
{
"name": "__init_subclass__",
"type": "function"
},
{
"name": "__module__",
"type": "statement"
},
{
"name": "__ne__",
"type": "function"
},
{
"name": "__new__",
"type": "function"
},
{
"name": "__reduce__",
"type": "function"
},
{
"name": "__reduce_ex__",
"type": "function"
},
{
"name": "__repr__",
"type": "function"
},
{
"name": "__setattr__",
"type": "function"
},
{
"name": "__sizeof__",
"type": "function"
},
{
"name": "__slots__",
"type": "statement"
},
{
"name": "__str__",
"type": "function"
}
] |
from xopt.generators.ga import deap_creator
from xopt.generators.ga.deap_fitness_with_constraints import FitnessWithConstraints
from xopt import Generator
from deap import base as deap_base
from deap import tools as deap_tools
from deap import algorithms as deap_algorithms
import pandas as pd
import random
import array
from typing import List, Dict
import logging
logger = logging.getLogger(__name__)
from typing import List, Dict
class CNSGAGenerator(Generator):
def __init__(self, vocs, *,
n_pop,
data = None,
crossover_probability = 0.9,
mutation_probability = 1.0
):
self._vocs = vocs # TODO: use proper options
self.n_pop = n_pop
self.crossover_probability = crossover_probability
self.mutation_probability = mutation_probability
# Internal data structures
self.children = [] # unevaluated inputs. This should be a list of dicts.
self.population = None # The latest population (fully evaluated)
self.offspring = None # Newly evaluated data, but not yet added to population
# DEAP toolbox (internal)
self.toolbox = cnsga_toolbox(vocs, selection='auto')
if data is not None:
self.population = cnsga_select(data, n_pop, vocs, self.toolbox)
def create_children(self):
# No population, so create random children
if self.population is None:
return [self.vocs.random_inputs() for _ in range(self.n_pop)]
# Use population to create children
inputs = cnsga_variation(self.population, self.vocs, self.
|
[
"toolbox",
"children",
"population",
"offspring",
"n_pop",
"create_children",
"crossover_probability",
"generate",
"mutation_probability",
"update_data",
"data",
"is_done",
"options",
"vocs"
] |
from xopt.generators.ga import deap_creator
from xopt.generators.ga.deap_fitness_with_constraints import FitnessWithConstraints
from xopt import Generator
from deap import base as deap_base
from deap import tools as deap_tools
from deap import algorithms as deap_algorithms
import pandas as pd
import random
import array
from typing import List, Dict
import logging
logger = logging.getLogger(__name__)
from typing import List, Dict
class CNSGAGenerator(Generator):
def __init__(self, vocs, *,
n_pop,
data = None,
crossover_probability = 0.9,
mutation_probability = 1.0
):
self._vocs = vocs # TODO: use proper options
self.n_pop = n_pop
self.crossover_probability = crossover_probability
self.mutation_probability = mutation_probability
# Internal data structures
self.children = [] # unevaluated inputs. This should be a list of dicts.
self.population = None # The latest population (fully evaluated)
self.offspring = None # Newly evaluated data, but not yet added to population
# DEAP toolbox (internal)
self.toolbox = cnsga_toolbox(vocs, selection='auto')
if data is not None:
self.population = cnsga_select(data, n_pop, vocs, self.toolbox)
def create_children(self):
# No population, so create random children
if self.population is None:
return [self.vocs.random_inputs() for _ in range(self.n_pop)]
# Use population to create children
inputs = cnsga_variation(self.population, self.vocs, self.
| 88
|
139
| 6
| 2,283
|
christophermayes__xopt
|
683fd0c3af2f0fc12a598932b20e3afe8070112b
|
xopt/generators/ga/cnsga.py
|
inproject
|
n_pop
| true
|
statement
| 15
| 17
| false
| false
|
[
"population",
"toolbox",
"offspring",
"n_pop",
"children",
"__init__",
"_vocs",
"create_children",
"crossover_probability",
"generate",
"mutation_probability",
"update_data",
"data",
"is_done",
"options",
"vocs",
"__annotations__",
"__class__",
"__delattr__",
"__dict__",
"__dir__",
"__eq__",
"__format__",
"__getattribute__",
"__hash__",
"__init_subclass__",
"__ne__",
"__new__",
"__reduce__",
"__reduce_ex__",
"__repr__",
"__setattr__",
"__sizeof__",
"__slots__",
"__str__",
"__subclasshook__",
"__doc__",
"__module__"
] |
[
{
"name": "children",
"type": "statement"
},
{
"name": "create_children",
"type": "function"
},
{
"name": "crossover_probability",
"type": "statement"
},
{
"name": "data",
"type": "property"
},
{
"name": "generate",
"type": "function"
},
{
"name": "is_done",
"type": "property"
},
{
"name": "mutation_probability",
"type": "statement"
},
{
"name": "n_pop",
"type": "statement"
},
{
"name": "offspring",
"type": "statement"
},
{
"name": "options",
"type": "statement"
},
{
"name": "population",
"type": "statement"
},
{
"name": "toolbox",
"type": "statement"
},
{
"name": "update_data",
"type": "function"
},
{
"name": "vocs",
"type": "property"
},
{
"name": "_data",
"type": "statement"
},
{
"name": "_is_done",
"type": "statement"
},
{
"name": "_vocs",
"type": "statement"
},
{
"name": "__annotations__",
"type": "statement"
},
{
"name": "__class__",
"type": "property"
},
{
"name": "__delattr__",
"type": "function"
},
{
"name": "__dict__",
"type": "statement"
},
{
"name": "__dir__",
"type": "function"
},
{
"name": "__doc__",
"type": "statement"
},
{
"name": "__eq__",
"type": "function"
},
{
"name": "__format__",
"type": "function"
},
{
"name": "__getattribute__",
"type": "function"
},
{
"name": "__hash__",
"type": "function"
},
{
"name": "__init__",
"type": "function"
},
{
"name": "__init_subclass__",
"type": "function"
},
{
"name": "__module__",
"type": "statement"
},
{
"name": "__ne__",
"type": "function"
},
{
"name": "__new__",
"type": "function"
},
{
"name": "__reduce__",
"type": "function"
},
{
"name": "__reduce_ex__",
"type": "function"
},
{
"name": "__repr__",
"type": "function"
},
{
"name": "__setattr__",
"type": "function"
},
{
"name": "__sizeof__",
"type": "function"
},
{
"name": "__slots__",
"type": "statement"
},
{
"name": "__str__",
"type": "function"
}
] |
from xopt.generators.ga import deap_creator
from xopt.generators.ga.deap_fitness_with_constraints import FitnessWithConstraints
from xopt import Generator
from deap import base as deap_base
from deap import tools as deap_tools
from deap import algorithms as deap_algorithms
import pandas as pd
import random
import array
from typing import List, Dict
import logging
logger = logging.getLogger(__name__)
from typing import List, Dict
class CNSGAGenerator(Generator):
def __init__(self, vocs, *,
n_pop,
data = None,
crossover_probability = 0.9,
mutation_probability = 1.0
):
self._vocs = vocs # TODO: use proper options
self.n_pop = n_pop
self.crossover_probability = crossover_probability
self.mutation_probability = mutation_probability
# Internal data structures
self.children = [] # unevaluated inputs. This should be a list of dicts.
self.population = None # The latest population (fully evaluated)
self.offspring = None # Newly evaluated data, but not yet added to population
# DEAP toolbox (internal)
self.toolbox = cnsga_toolbox(vocs, selection='auto')
if data is not None:
self.population = cnsga_select(data, n_pop, vocs, self.toolbox)
def create_children(self):
# No population, so create random children
if self.population is None:
return [self.vocs.random_inputs() for _ in range(self.n_pop)]
# Use population to create children
inputs = cnsga_variation(self.population, self.vocs, self.toolbox,
crossover_probability=self.crossover_probability, mutation_probability=self.mutation_probability)
return inputs.to_dict(orient='records')
def update_data(self, new_data: pd.DataFrame):
self.offspring = pd.concat([self.offspring, new_data])
# Next generation
if len(self.offspring) >= self.n_pop:
if self.population is None:
self.population = self.offspring.iloc[:self.n_pop]
self.offspring = self.offspring.iloc[self.n_pop:]
else:
candidates = pd.concat([self.population, self.offspring])
self.population = cnsga_select(candidates, self.
|
[
"population",
"toolbox",
"offspring",
"n_pop",
"children",
"create_children",
"crossover_probability",
"generate",
"mutation_probability",
"update_data",
"data",
"is_done",
"options",
"vocs"
] |
rt logging
logger = logging.getLogger(__name__)
from typing import List, Dict
class CNSGAGenerator(Generator):
def __init__(self, vocs, *,
n_pop,
data = None,
crossover_probability = 0.9,
mutation_probability = 1.0
):
self._vocs = vocs # TODO: use proper options
self.n_pop = n_pop
self.crossover_probability = crossover_probability
self.mutation_probability = mutation_probability
# Internal data structures
self.children = [] # unevaluated inputs. This should be a list of dicts.
self.population = None # The latest population (fully evaluated)
self.offspring = None # Newly evaluated data, but not yet added to population
# DEAP toolbox (internal)
self.toolbox = cnsga_toolbox(vocs, selection='auto')
if data is not None:
self.population = cnsga_select(data, n_pop, vocs, self.toolbox)
def create_children(self):
# No population, so create random children
if self.population is None:
return [self.vocs.random_inputs() for _ in range(self.n_pop)]
# Use population to create children
inputs = cnsga_variation(self.population, self.vocs, self.toolbox,
crossover_probability=self.crossover_probability, mutation_probability=self.mutation_probability)
return inputs.to_dict(orient='records')
def update_data(self, new_data: pd.DataFrame):
self.offspring = pd.concat([self.offspring, new_data])
# Next generation
if len(self.offspring) >= self.n_pop:
if self.population is None:
self.population = self.offspring.iloc[:self.n_pop]
self.offspring = self.offspring.iloc[self.n_pop:]
else:
candidates = pd.concat([self.population, self.offspring])
self.population = cnsga_select(candidates, self.
| 101
|
140
| 6
| 2,295
|
christophermayes__xopt
|
683fd0c3af2f0fc12a598932b20e3afe8070112b
|
xopt/generators/ga/cnsga.py
|
inproject
|
vocs
| true
|
property
| 15
| 17
| false
| false
|
[
"vocs",
"population",
"toolbox",
"offspring",
"n_pop",
"__init__",
"_vocs",
"children",
"create_children",
"crossover_probability",
"generate",
"mutation_probability",
"update_data",
"data",
"is_done",
"options",
"__annotations__",
"__class__",
"__delattr__",
"__dict__",
"__dir__",
"__eq__",
"__format__",
"__getattribute__",
"__hash__",
"__init_subclass__",
"__ne__",
"__new__",
"__reduce__",
"__reduce_ex__",
"__repr__",
"__setattr__",
"__sizeof__",
"__slots__",
"__str__",
"__subclasshook__",
"__doc__",
"__module__"
] |
[
{
"name": "children",
"type": "statement"
},
{
"name": "create_children",
"type": "function"
},
{
"name": "crossover_probability",
"type": "statement"
},
{
"name": "data",
"type": "property"
},
{
"name": "generate",
"type": "function"
},
{
"name": "is_done",
"type": "property"
},
{
"name": "mutation_probability",
"type": "statement"
},
{
"name": "n_pop",
"type": "statement"
},
{
"name": "offspring",
"type": "statement"
},
{
"name": "options",
"type": "statement"
},
{
"name": "population",
"type": "statement"
},
{
"name": "toolbox",
"type": "statement"
},
{
"name": "update_data",
"type": "function"
},
{
"name": "vocs",
"type": "property"
},
{
"name": "_data",
"type": "statement"
},
{
"name": "_is_done",
"type": "statement"
},
{
"name": "_vocs",
"type": "statement"
},
{
"name": "__annotations__",
"type": "statement"
},
{
"name": "__class__",
"type": "property"
},
{
"name": "__delattr__",
"type": "function"
},
{
"name": "__dict__",
"type": "statement"
},
{
"name": "__dir__",
"type": "function"
},
{
"name": "__doc__",
"type": "statement"
},
{
"name": "__eq__",
"type": "function"
},
{
"name": "__format__",
"type": "function"
},
{
"name": "__getattribute__",
"type": "function"
},
{
"name": "__hash__",
"type": "function"
},
{
"name": "__init__",
"type": "function"
},
{
"name": "__init_subclass__",
"type": "function"
},
{
"name": "__module__",
"type": "statement"
},
{
"name": "__ne__",
"type": "function"
},
{
"name": "__new__",
"type": "function"
},
{
"name": "__reduce__",
"type": "function"
},
{
"name": "__reduce_ex__",
"type": "function"
},
{
"name": "__repr__",
"type": "function"
},
{
"name": "__setattr__",
"type": "function"
},
{
"name": "__sizeof__",
"type": "function"
},
{
"name": "__slots__",
"type": "statement"
},
{
"name": "__str__",
"type": "function"
}
] |
from xopt.generators.ga import deap_creator
from xopt.generators.ga.deap_fitness_with_constraints import FitnessWithConstraints
from xopt import Generator
from deap import base as deap_base
from deap import tools as deap_tools
from deap import algorithms as deap_algorithms
import pandas as pd
import random
import array
from typing import List, Dict
import logging
logger = logging.getLogger(__name__)
from typing import List, Dict
class CNSGAGenerator(Generator):
def __init__(self, vocs, *,
n_pop,
data = None,
crossover_probability = 0.9,
mutation_probability = 1.0
):
self._vocs = vocs # TODO: use proper options
self.n_pop = n_pop
self.crossover_probability = crossover_probability
self.mutation_probability = mutation_probability
# Internal data structures
self.children = [] # unevaluated inputs. This should be a list of dicts.
self.population = None # The latest population (fully evaluated)
self.offspring = None # Newly evaluated data, but not yet added to population
# DEAP toolbox (internal)
self.toolbox = cnsga_toolbox(vocs, selection='auto')
if data is not None:
self.population = cnsga_select(data, n_pop, vocs, self.toolbox)
def create_children(self):
# No population, so create random children
if self.population is None:
return [self.vocs.random_inputs() for _ in range(self.n_pop)]
# Use population to create children
inputs = cnsga_variation(self.population, self.vocs, self.toolbox,
crossover_probability=self.crossover_probability, mutation_probability=self.mutation_probability)
return inputs.to_dict(orient='records')
def update_data(self, new_data: pd.DataFrame):
self.offspring = pd.concat([self.offspring, new_data])
# Next generation
if len(self.offspring) >= self.n_pop:
if self.population is None:
self.population = self.offspring.iloc[:self.n_pop]
self.offspring = self.offspring.iloc[self.n_pop:]
else:
candidates = pd.concat([self.population, self.offspring])
self.population = cnsga_select(candidates, self.n_pop, self.
|
[
"vocs",
"population",
"toolbox",
"offspring",
"n_pop",
"children",
"create_children",
"crossover_probability",
"generate",
"mutation_probability",
"update_data",
"data",
"is_done",
"options"
] |
ogger = logging.getLogger(__name__)
from typing import List, Dict
class CNSGAGenerator(Generator):
def __init__(self, vocs, *,
n_pop,
data = None,
crossover_probability = 0.9,
mutation_probability = 1.0
):
self._vocs = vocs # TODO: use proper options
self.n_pop = n_pop
self.crossover_probability = crossover_probability
self.mutation_probability = mutation_probability
# Internal data structures
self.children = [] # unevaluated inputs. This should be a list of dicts.
self.population = None # The latest population (fully evaluated)
self.offspring = None # Newly evaluated data, but not yet added to population
# DEAP toolbox (internal)
self.toolbox = cnsga_toolbox(vocs, selection='auto')
if data is not None:
self.population = cnsga_select(data, n_pop, vocs, self.toolbox)
def create_children(self):
# No population, so create random children
if self.population is None:
return [self.vocs.random_inputs() for _ in range(self.n_pop)]
# Use population to create children
inputs = cnsga_variation(self.population, self.vocs, self.toolbox,
crossover_probability=self.crossover_probability, mutation_probability=self.mutation_probability)
return inputs.to_dict(orient='records')
def update_data(self, new_data: pd.DataFrame):
self.offspring = pd.concat([self.offspring, new_data])
# Next generation
if len(self.offspring) >= self.n_pop:
if self.population is None:
self.population = self.offspring.iloc[:self.n_pop]
self.offspring = self.offspring.iloc[self.n_pop:]
else:
candidates = pd.concat([self.population, self.offspring])
self.population = cnsga_select(candidates, self.n_pop, self.
| 102
|
141
| 6
| 2,306
|
christophermayes__xopt
|
683fd0c3af2f0fc12a598932b20e3afe8070112b
|
xopt/generators/ga/cnsga.py
|
inproject
|
toolbox
| true
|
statement
| 15
| 17
| false
| false
|
[
"toolbox",
"population",
"offspring",
"n_pop",
"children",
"__init__",
"_vocs",
"create_children",
"crossover_probability",
"generate",
"mutation_probability",
"update_data",
"data",
"is_done",
"options",
"vocs",
"__annotations__",
"__class__",
"__delattr__",
"__dict__",
"__dir__",
"__eq__",
"__format__",
"__getattribute__",
"__hash__",
"__init_subclass__",
"__ne__",
"__new__",
"__reduce__",
"__reduce_ex__",
"__repr__",
"__setattr__",
"__sizeof__",
"__slots__",
"__str__",
"__subclasshook__",
"__doc__",
"__module__"
] |
[
{
"name": "children",
"type": "statement"
},
{
"name": "create_children",
"type": "function"
},
{
"name": "crossover_probability",
"type": "statement"
},
{
"name": "data",
"type": "property"
},
{
"name": "generate",
"type": "function"
},
{
"name": "is_done",
"type": "property"
},
{
"name": "mutation_probability",
"type": "statement"
},
{
"name": "n_pop",
"type": "statement"
},
{
"name": "offspring",
"type": "statement"
},
{
"name": "options",
"type": "statement"
},
{
"name": "population",
"type": "statement"
},
{
"name": "toolbox",
"type": "statement"
},
{
"name": "update_data",
"type": "function"
},
{
"name": "vocs",
"type": "property"
},
{
"name": "_data",
"type": "statement"
},
{
"name": "_is_done",
"type": "statement"
},
{
"name": "_vocs",
"type": "statement"
},
{
"name": "__annotations__",
"type": "statement"
},
{
"name": "__class__",
"type": "property"
},
{
"name": "__delattr__",
"type": "function"
},
{
"name": "__dict__",
"type": "statement"
},
{
"name": "__dir__",
"type": "function"
},
{
"name": "__doc__",
"type": "statement"
},
{
"name": "__eq__",
"type": "function"
},
{
"name": "__format__",
"type": "function"
},
{
"name": "__getattribute__",
"type": "function"
},
{
"name": "__hash__",
"type": "function"
},
{
"name": "__init__",
"type": "function"
},
{
"name": "__init_subclass__",
"type": "function"
},
{
"name": "__module__",
"type": "statement"
},
{
"name": "__ne__",
"type": "function"
},
{
"name": "__new__",
"type": "function"
},
{
"name": "__reduce__",
"type": "function"
},
{
"name": "__reduce_ex__",
"type": "function"
},
{
"name": "__repr__",
"type": "function"
},
{
"name": "__setattr__",
"type": "function"
},
{
"name": "__sizeof__",
"type": "function"
},
{
"name": "__slots__",
"type": "statement"
},
{
"name": "__str__",
"type": "function"
}
] |
from xopt.generators.ga import deap_creator
from xopt.generators.ga.deap_fitness_with_constraints import FitnessWithConstraints
from xopt import Generator
from deap import base as deap_base
from deap import tools as deap_tools
from deap import algorithms as deap_algorithms
import pandas as pd
import random
import array
from typing import List, Dict
import logging
logger = logging.getLogger(__name__)
from typing import List, Dict
class CNSGAGenerator(Generator):
def __init__(self, vocs, *,
n_pop,
data = None,
crossover_probability = 0.9,
mutation_probability = 1.0
):
self._vocs = vocs # TODO: use proper options
self.n_pop = n_pop
self.crossover_probability = crossover_probability
self.mutation_probability = mutation_probability
# Internal data structures
self.children = [] # unevaluated inputs. This should be a list of dicts.
self.population = None # The latest population (fully evaluated)
self.offspring = None # Newly evaluated data, but not yet added to population
# DEAP toolbox (internal)
self.toolbox = cnsga_toolbox(vocs, selection='auto')
if data is not None:
self.population = cnsga_select(data, n_pop, vocs, self.toolbox)
def create_children(self):
# No population, so create random children
if self.population is None:
return [self.vocs.random_inputs() for _ in range(self.n_pop)]
# Use population to create children
inputs = cnsga_variation(self.population, self.vocs, self.toolbox,
crossover_probability=self.crossover_probability, mutation_probability=self.mutation_probability)
return inputs.to_dict(orient='records')
def update_data(self, new_data: pd.DataFrame):
self.offspring = pd.concat([self.offspring, new_data])
# Next generation
if len(self.offspring) >= self.n_pop:
if self.population is None:
self.population = self.offspring.iloc[:self.n_pop]
self.offspring = self.offspring.iloc[self.n_pop:]
else:
candidates = pd.concat([self.population, self.offspring])
self.population = cnsga_select(candidates, self.n_pop, self.vocs, self.
|
[
"toolbox",
"population",
"offspring",
"n_pop",
"children",
"create_children",
"crossover_probability",
"generate",
"mutation_probability",
"update_data",
"data",
"is_done",
"options",
"vocs"
] |
ging.getLogger(__name__)
from typing import List, Dict
class CNSGAGenerator(Generator):
def __init__(self, vocs, *,
n_pop,
data = None,
crossover_probability = 0.9,
mutation_probability = 1.0
):
self._vocs = vocs # TODO: use proper options
self.n_pop = n_pop
self.crossover_probability = crossover_probability
self.mutation_probability = mutation_probability
# Internal data structures
self.children = [] # unevaluated inputs. This should be a list of dicts.
self.population = None # The latest population (fully evaluated)
self.offspring = None # Newly evaluated data, but not yet added to population
# DEAP toolbox (internal)
self.toolbox = cnsga_toolbox(vocs, selection='auto')
if data is not None:
self.population = cnsga_select(data, n_pop, vocs, self.toolbox)
def create_children(self):
# No population, so create random children
if self.population is None:
return [self.vocs.random_inputs() for _ in range(self.n_pop)]
# Use population to create children
inputs = cnsga_variation(self.population, self.vocs, self.toolbox,
crossover_probability=self.crossover_probability, mutation_probability=self.mutation_probability)
return inputs.to_dict(orient='records')
def update_data(self, new_data: pd.DataFrame):
self.offspring = pd.concat([self.offspring, new_data])
# Next generation
if len(self.offspring) >= self.n_pop:
if self.population is None:
self.population = self.offspring.iloc[:self.n_pop]
self.offspring = self.offspring.iloc[self.n_pop:]
else:
candidates = pd.concat([self.population, self.offspring])
self.population = cnsga_select(candidates, self.n_pop, self.vocs, self.
| 103
|
145
| 6
| 2,678
|
christophermayes__xopt
|
683fd0c3af2f0fc12a598932b20e3afe8070112b
|
xopt/generators/ga/cnsga.py
|
infile
|
children
| true
|
statement
| 15
| 17
| false
| false
|
[
"n_pop",
"toolbox",
"offspring",
"population",
"children",
"__init__",
"_vocs",
"create_children",
"crossover_probability",
"generate",
"mutation_probability",
"update_data",
"data",
"is_done",
"options",
"vocs",
"__annotations__",
"__class__",
"__delattr__",
"__dict__",
"__dir__",
"__eq__",
"__format__",
"__getattribute__",
"__hash__",
"__init_subclass__",
"__ne__",
"__new__",
"__reduce__",
"__reduce_ex__",
"__repr__",
"__setattr__",
"__sizeof__",
"__slots__",
"__str__",
"__subclasshook__",
"__doc__",
"__module__"
] |
[
{
"name": "children",
"type": "statement"
},
{
"name": "create_children",
"type": "function"
},
{
"name": "crossover_probability",
"type": "statement"
},
{
"name": "data",
"type": "property"
},
{
"name": "generate",
"type": "function"
},
{
"name": "is_done",
"type": "property"
},
{
"name": "mutation_probability",
"type": "statement"
},
{
"name": "n_pop",
"type": "statement"
},
{
"name": "offspring",
"type": "statement"
},
{
"name": "options",
"type": "statement"
},
{
"name": "population",
"type": "statement"
},
{
"name": "toolbox",
"type": "statement"
},
{
"name": "update_data",
"type": "function"
},
{
"name": "vocs",
"type": "property"
},
{
"name": "_data",
"type": "statement"
},
{
"name": "_is_done",
"type": "statement"
},
{
"name": "_vocs",
"type": "statement"
},
{
"name": "__annotations__",
"type": "statement"
},
{
"name": "__class__",
"type": "property"
},
{
"name": "__delattr__",
"type": "function"
},
{
"name": "__dict__",
"type": "statement"
},
{
"name": "__dir__",
"type": "function"
},
{
"name": "__doc__",
"type": "statement"
},
{
"name": "__eq__",
"type": "function"
},
{
"name": "__format__",
"type": "function"
},
{
"name": "__getattribute__",
"type": "function"
},
{
"name": "__hash__",
"type": "function"
},
{
"name": "__init__",
"type": "function"
},
{
"name": "__init_subclass__",
"type": "function"
},
{
"name": "__module__",
"type": "statement"
},
{
"name": "__ne__",
"type": "function"
},
{
"name": "__new__",
"type": "function"
},
{
"name": "__reduce__",
"type": "function"
},
{
"name": "__reduce_ex__",
"type": "function"
},
{
"name": "__repr__",
"type": "function"
},
{
"name": "__setattr__",
"type": "function"
},
{
"name": "__sizeof__",
"type": "function"
},
{
"name": "__slots__",
"type": "statement"
},
{
"name": "__str__",
"type": "function"
}
] |
from xopt.generators.ga import deap_creator
from xopt.generators.ga.deap_fitness_with_constraints import FitnessWithConstraints
from xopt import Generator
from deap import base as deap_base
from deap import tools as deap_tools
from deap import algorithms as deap_algorithms
import pandas as pd
import random
import array
from typing import List, Dict
import logging
logger = logging.getLogger(__name__)
from typing import List, Dict
class CNSGAGenerator(Generator):
def __init__(self, vocs, *,
n_pop,
data = None,
crossover_probability = 0.9,
mutation_probability = 1.0
):
self._vocs = vocs # TODO: use proper options
self.n_pop = n_pop
self.crossover_probability = crossover_probability
self.mutation_probability = mutation_probability
# Internal data structures
self.children = [] # unevaluated inputs. This should be a list of dicts.
self.population = None # The latest population (fully evaluated)
self.offspring = None # Newly evaluated data, but not yet added to population
# DEAP toolbox (internal)
self.toolbox = cnsga_toolbox(vocs, selection='auto')
if data is not None:
self.population = cnsga_select(data, n_pop, vocs, self.toolbox)
def create_children(self):
# No population, so create random children
if self.population is None:
return [self.vocs.random_inputs() for _ in range(self.n_pop)]
# Use population to create children
inputs = cnsga_variation(self.population, self.vocs, self.toolbox,
crossover_probability=self.crossover_probability, mutation_probability=self.mutation_probability)
return inputs.to_dict(orient='records')
def update_data(self, new_data: pd.DataFrame):
self.offspring = pd.concat([self.offspring, new_data])
# Next generation
if len(self.offspring) >= self.n_pop:
if self.population is None:
self.population = self.offspring.iloc[:self.n_pop]
self.offspring = self.offspring.iloc[self.n_pop:]
else:
candidates = pd.concat([self.population, self.offspring])
self.population = cnsga_select(candidates, self.n_pop, self.vocs, self.toolbox)
self.children = [] # reset children
self.offspring = None # reset offspring
def generate(self, n_candidates) -> List[Dict]:
"""
generate `n_candidates` candidates
"""
# Make sure we have enough children to fulfill the request
while len(self.children) < n_candidates:
self.
|
[
"n_pop",
"toolbox",
"offspring",
"population",
"children",
"create_children",
"crossover_probability",
"generate",
"mutation_probability",
"update_data",
"data",
"is_done",
"options",
"vocs"
] |
self.mutation_probability = mutation_probability
# Internal data structures
self.children = [] # unevaluated inputs. This should be a list of dicts.
self.population = None # The latest population (fully evaluated)
self.offspring = None # Newly evaluated data, but not yet added to population
# DEAP toolbox (internal)
self.toolbox = cnsga_toolbox(vocs, selection='auto')
if data is not None:
self.population = cnsga_select(data, n_pop, vocs, self.toolbox)
def create_children(self):
# No population, so create random children
if self.population is None:
return [self.vocs.random_inputs() for _ in range(self.n_pop)]
# Use population to create children
inputs = cnsga_variation(self.population, self.vocs, self.toolbox,
crossover_probability=self.crossover_probability, mutation_probability=self.mutation_probability)
return inputs.to_dict(orient='records')
def update_data(self, new_data: pd.DataFrame):
self.offspring = pd.concat([self.offspring, new_data])
# Next generation
if len(self.offspring) >= self.n_pop:
if self.population is None:
self.population = self.offspring.iloc[:self.n_pop]
self.offspring = self.offspring.iloc[self.n_pop:]
else:
candidates = pd.concat([self.population, self.offspring])
self.population = cnsga_select(candidates, self.n_pop, self.vocs, self.toolbox)
self.children = [] # reset children
self.offspring = None # reset offspring
def generate(self, n_candidates) -> List[Dict]:
"""
generate `n_candidates` candidates
"""
# Make sure we have enough children to fulfill the request
while len(self.children) < n_candidates:
self.
| 105
|
146
| 6
| 2,699
|
christophermayes__xopt
|
683fd0c3af2f0fc12a598932b20e3afe8070112b
|
xopt/generators/ga/cnsga.py
|
infile
|
create_children
| true
|
function
| 15
| 17
| false
| true
|
[
"children",
"n_pop",
"offspring",
"population",
"toolbox",
"__init__",
"_vocs",
"create_children",
"crossover_probability",
"generate",
"mutation_probability",
"update_data",
"data",
"is_done",
"options",
"vocs",
"__annotations__",
"__class__",
"__delattr__",
"__dict__",
"__dir__",
"__eq__",
"__format__",
"__getattribute__",
"__hash__",
"__init_subclass__",
"__ne__",
"__new__",
"__reduce__",
"__reduce_ex__",
"__repr__",
"__setattr__",
"__sizeof__",
"__slots__",
"__str__",
"__subclasshook__",
"__doc__",
"__module__"
] |
[
{
"name": "children",
"type": "statement"
},
{
"name": "create_children",
"type": "function"
},
{
"name": "crossover_probability",
"type": "statement"
},
{
"name": "data",
"type": "property"
},
{
"name": "generate",
"type": "function"
},
{
"name": "is_done",
"type": "property"
},
{
"name": "mutation_probability",
"type": "statement"
},
{
"name": "n_pop",
"type": "statement"
},
{
"name": "offspring",
"type": "statement"
},
{
"name": "options",
"type": "statement"
},
{
"name": "population",
"type": "statement"
},
{
"name": "toolbox",
"type": "statement"
},
{
"name": "update_data",
"type": "function"
},
{
"name": "vocs",
"type": "property"
},
{
"name": "_data",
"type": "statement"
},
{
"name": "_is_done",
"type": "statement"
},
{
"name": "_vocs",
"type": "statement"
},
{
"name": "__annotations__",
"type": "statement"
},
{
"name": "__class__",
"type": "property"
},
{
"name": "__delattr__",
"type": "function"
},
{
"name": "__dict__",
"type": "statement"
},
{
"name": "__dir__",
"type": "function"
},
{
"name": "__doc__",
"type": "statement"
},
{
"name": "__eq__",
"type": "function"
},
{
"name": "__format__",
"type": "function"
},
{
"name": "__getattribute__",
"type": "function"
},
{
"name": "__hash__",
"type": "function"
},
{
"name": "__init__",
"type": "function"
},
{
"name": "__init_subclass__",
"type": "function"
},
{
"name": "__module__",
"type": "statement"
},
{
"name": "__ne__",
"type": "function"
},
{
"name": "__new__",
"type": "function"
},
{
"name": "__reduce__",
"type": "function"
},
{
"name": "__reduce_ex__",
"type": "function"
},
{
"name": "__repr__",
"type": "function"
},
{
"name": "__setattr__",
"type": "function"
},
{
"name": "__sizeof__",
"type": "function"
},
{
"name": "__slots__",
"type": "statement"
},
{
"name": "__str__",
"type": "function"
}
] |
from xopt.generators.ga import deap_creator
from xopt.generators.ga.deap_fitness_with_constraints import FitnessWithConstraints
from xopt import Generator
from deap import base as deap_base
from deap import tools as deap_tools
from deap import algorithms as deap_algorithms
import pandas as pd
import random
import array
from typing import List, Dict
import logging
logger = logging.getLogger(__name__)
from typing import List, Dict
class CNSGAGenerator(Generator):
def __init__(self, vocs, *,
n_pop,
data = None,
crossover_probability = 0.9,
mutation_probability = 1.0
):
self._vocs = vocs # TODO: use proper options
self.n_pop = n_pop
self.crossover_probability = crossover_probability
self.mutation_probability = mutation_probability
# Internal data structures
self.children = [] # unevaluated inputs. This should be a list of dicts.
self.population = None # The latest population (fully evaluated)
self.offspring = None # Newly evaluated data, but not yet added to population
# DEAP toolbox (internal)
self.toolbox = cnsga_toolbox(vocs, selection='auto')
if data is not None:
self.population = cnsga_select(data, n_pop, vocs, self.toolbox)
def create_children(self):
# No population, so create random children
if self.population is None:
return [self.vocs.random_inputs() for _ in range(self.n_pop)]
# Use population to create children
inputs = cnsga_variation(self.population, self.vocs, self.toolbox,
crossover_probability=self.crossover_probability, mutation_probability=self.mutation_probability)
return inputs.to_dict(orient='records')
def update_data(self, new_data: pd.DataFrame):
self.offspring = pd.concat([self.offspring, new_data])
# Next generation
if len(self.offspring) >= self.n_pop:
if self.population is None:
self.population = self.offspring.iloc[:self.n_pop]
self.offspring = self.offspring.iloc[self.n_pop:]
else:
candidates = pd.concat([self.population, self.offspring])
self.population = cnsga_select(candidates, self.n_pop, self.vocs, self.toolbox)
self.children = [] # reset children
self.offspring = None # reset offspring
def generate(self, n_candidates) -> List[Dict]:
"""
generate `n_candidates` candidates
"""
# Make sure we have enough children to fulfill the request
while len(self.children) < n_candidates:
self.children.extend(self.
|
[
"children",
"n_pop",
"offspring",
"population",
"toolbox",
"create_children",
"crossover_probability",
"generate",
"mutation_probability",
"update_data",
"data",
"is_done",
"options",
"vocs"
] |
ability = mutation_probability
# Internal data structures
self.children = [] # unevaluated inputs. This should be a list of dicts.
self.population = None # The latest population (fully evaluated)
self.offspring = None # Newly evaluated data, but not yet added to population
# DEAP toolbox (internal)
self.toolbox = cnsga_toolbox(vocs, selection='auto')
if data is not None:
self.population = cnsga_select(data, n_pop, vocs, self.toolbox)
def create_children(self):
# No population, so create random children
if self.population is None:
return [self.vocs.random_inputs() for _ in range(self.n_pop)]
# Use population to create children
inputs = cnsga_variation(self.population, self.vocs, self.toolbox,
crossover_probability=self.crossover_probability, mutation_probability=self.mutation_probability)
return inputs.to_dict(orient='records')
def update_data(self, new_data: pd.DataFrame):
self.offspring = pd.concat([self.offspring, new_data])
# Next generation
if len(self.offspring) >= self.n_pop:
if self.population is None:
self.population = self.offspring.iloc[:self.n_pop]
self.offspring = self.offspring.iloc[self.n_pop:]
else:
candidates = pd.concat([self.population, self.offspring])
self.population = cnsga_select(candidates, self.n_pop, self.vocs, self.toolbox)
self.children = [] # reset children
self.offspring = None # reset offspring
def generate(self, n_candidates) -> List[Dict]:
"""
generate `n_candidates` candidates
"""
# Make sure we have enough children to fulfill the request
while len(self.children) < n_candidates:
self.children.extend(self.
| 106
|
149
| 6
| 4,085
|
christophermayes__xopt
|
683fd0c3af2f0fc12a598932b20e3afe8070112b
|
xopt/generators/ga/cnsga.py
|
common
|
create
| true
|
function
| 6
| 11
| false
| true
|
[
"create",
"MetaCreator",
"meta_create",
"class_replacers",
"_array",
"_numpy_array"
] |
[
{
"name": "array",
"type": "module"
},
{
"name": "class_replacers",
"type": "statement"
},
{
"name": "copy",
"type": "module"
},
{
"name": "copyreg",
"type": "module"
},
{
"name": "create",
"type": "function"
},
{
"name": "meta_create",
"type": "function"
},
{
"name": "MetaCreator",
"type": "class"
},
{
"name": "numpy",
"type": "module"
},
{
"name": "warnings",
"type": "module"
},
{
"name": "_array",
"type": "class"
},
{
"name": "_numpy_array",
"type": "class"
},
{
"name": "__doc__",
"type": "instance"
},
{
"name": "__file__",
"type": "instance"
},
{
"name": "__name__",
"type": "instance"
},
{
"name": "__package__",
"type": "instance"
}
] |
from xopt.generators.ga import deap_creator
from xopt.generators.ga.deap_fitness_with_constraints import FitnessWithConstraints
from xopt import Generator
from deap import base as deap_base
from deap import tools as deap_tools
from deap import algorithms as deap_algorithms
import pandas as pd
import random
import array
from typing import List, Dict
import logging
logger = logging.getLogger(__name__)
from typing import List, Dict
class CNSGAGenerator(Generator):
def __init__(self, vocs, *,
n_pop,
data = None,
crossover_probability = 0.9,
mutation_probability = 1.0
):
self._vocs = vocs # TODO: use proper options
self.n_pop = n_pop
self.crossover_probability = crossover_probability
self.mutation_probability = mutation_probability
# Internal data structures
self.children = [] # unevaluated inputs. This should be a list of dicts.
self.population = None # The latest population (fully evaluated)
self.offspring = None # Newly evaluated data, but not yet added to population
# DEAP toolbox (internal)
self.toolbox = cnsga_toolbox(vocs, selection='auto')
if data is not None:
self.population = cnsga_select(data, n_pop, vocs, self.toolbox)
def create_children(self):
# No population, so create random children
if self.population is None:
return [self.vocs.random_inputs() for _ in range(self.n_pop)]
# Use population to create children
inputs = cnsga_variation(self.population, self.vocs, self.toolbox,
crossover_probability=self.crossover_probability, mutation_probability=self.mutation_probability)
return inputs.to_dict(orient='records')
def update_data(self, new_data: pd.DataFrame):
self.offspring = pd.concat([self.offspring, new_data])
# Next generation
if len(self.offspring) >= self.n_pop:
if self.population is None:
self.population = self.offspring.iloc[:self.n_pop]
self.offspring = self.offspring.iloc[self.n_pop:]
else:
candidates = pd.concat([self.population, self.offspring])
self.population = cnsga_select(candidates, self.n_pop, self.vocs, self.toolbox)
self.children = [] # reset children
self.offspring = None # reset offspring
def generate(self, n_candidates) -> List[Dict]:
"""
generate `n_candidates` candidates
"""
# Make sure we have enough children to fulfill the request
while len(self.children) < n_candidates:
self.children.extend(self.create_children())
return [self.children.pop() for _ in range(n_candidates)]
def uniform(low, up, size=None):
"""
"""
try:
return [random.uniform(a, b) for a, b in zip(low, up)]
except TypeError:
return [random.uniform(a, b) for a, b in zip([low] * size, [up] * size)]
def cnsga_toolbox(vocs, selection='auto'):
"""
Creates a DEAP toolbox from VOCS dict for use with cnsga.
Selection options:
nsga2: Standard NSGA2 [Deb2002] selection
nsga3: NSGA3 [Deb2014] selection
spea2: SPEA-II [Zitzler2001] selection
auto: will choose nsga2 for <= 2 objectives, otherwise nsga3
See DEAP code for details.
"""
var, obj, con = vocs.variables, vocs.objectives, vocs.constraints
n_var = len(var)
n_obj = len(obj)
n_con = len(con)
var_labels = vocs.variable_names
obj_labels = vocs.objective_names
bound_low, bound_up = vocs.bounds
# DEAP does not like arrays, needs tuples.
bound_low = tuple(bound_low)
bound_up = tuple(bound_up)
# creator should assign already weighted values (for minimization)
weights = tuple([-1]*n_obj)
# Create MyFitness
if 'MyFitness' in dir(deap_creator):
del deap_creator.MyFitness
if n_con == 0:
# Normal Fitness class
deap_creator.
|
[
"create",
"MetaCreator",
"meta_create",
"class_replacers"
] |
idates = pd.concat([self.population, self.offspring])
self.population = cnsga_select(candidates, self.n_pop, self.vocs, self.toolbox)
self.children = [] # reset children
self.offspring = None # reset offspring
def generate(self, n_candidates) -> List[Dict]:
"""
generate `n_candidates` candidates
"""
# Make sure we have enough children to fulfill the request
while len(self.children) < n_candidates:
self.children.extend(self.create_children())
return [self.children.pop() for _ in range(n_candidates)]
def uniform(low, up, size=None):
"""
"""
try:
return [random.uniform(a, b) for a, b in zip(low, up)]
except TypeError:
return [random.uniform(a, b) for a, b in zip([low] * size, [up] * size)]
def cnsga_toolbox(vocs, selection='auto'):
"""
Creates a DEAP toolbox from VOCS dict for use with cnsga.
Selection options:
nsga2: Standard NSGA2 [Deb2002] selection
nsga3: NSGA3 [Deb2014] selection
spea2: SPEA-II [Zitzler2001] selection
auto: will choose nsga2 for <= 2 objectives, otherwise nsga3
See DEAP code for details.
"""
var, obj, con = vocs.variables, vocs.objectives, vocs.constraints
n_var = len(var)
n_obj = len(obj)
n_con = len(con)
var_labels = vocs.variable_names
obj_labels = vocs.objective_names
bound_low, bound_up = vocs.bounds
# DEAP does not like arrays, needs tuples.
bound_low = tuple(bound_low)
bound_up = tuple(bound_up)
# creator should assign already weighted values (for minimization)
weights = tuple([-1]*n_obj)
# Create MyFitness
if 'MyFitness' in dir(deap_creator):
del deap_creator.MyFitness
if n_con == 0:
# Normal Fitness class
deap_creator.
| 108
|
150
| 6
| 4,226
|
christophermayes__xopt
|
683fd0c3af2f0fc12a598932b20e3afe8070112b
|
xopt/generators/ga/cnsga.py
|
common
|
create
| true
|
function
| 6
| 11
| false
| false
|
[
"create",
"MetaCreator",
"meta_create",
"class_replacers",
"_array",
"_numpy_array"
] |
[
{
"name": "array",
"type": "module"
},
{
"name": "class_replacers",
"type": "statement"
},
{
"name": "copy",
"type": "module"
},
{
"name": "copyreg",
"type": "module"
},
{
"name": "create",
"type": "function"
},
{
"name": "meta_create",
"type": "function"
},
{
"name": "MetaCreator",
"type": "class"
},
{
"name": "numpy",
"type": "module"
},
{
"name": "warnings",
"type": "module"
},
{
"name": "_array",
"type": "class"
},
{
"name": "_numpy_array",
"type": "class"
},
{
"name": "__doc__",
"type": "instance"
},
{
"name": "__file__",
"type": "instance"
},
{
"name": "__name__",
"type": "instance"
},
{
"name": "__package__",
"type": "instance"
}
] |
from xopt.generators.ga import deap_creator
from xopt.generators.ga.deap_fitness_with_constraints import FitnessWithConstraints
from xopt import Generator
from deap import base as deap_base
from deap import tools as deap_tools
from deap import algorithms as deap_algorithms
import pandas as pd
import random
import array
from typing import List, Dict
import logging
logger = logging.getLogger(__name__)
from typing import List, Dict
class CNSGAGenerator(Generator):
def __init__(self, vocs, *,
n_pop,
data = None,
crossover_probability = 0.9,
mutation_probability = 1.0
):
self._vocs = vocs # TODO: use proper options
self.n_pop = n_pop
self.crossover_probability = crossover_probability
self.mutation_probability = mutation_probability
# Internal data structures
self.children = [] # unevaluated inputs. This should be a list of dicts.
self.population = None # The latest population (fully evaluated)
self.offspring = None # Newly evaluated data, but not yet added to population
# DEAP toolbox (internal)
self.toolbox = cnsga_toolbox(vocs, selection='auto')
if data is not None:
self.population = cnsga_select(data, n_pop, vocs, self.toolbox)
def create_children(self):
# No population, so create random children
if self.population is None:
return [self.vocs.random_inputs() for _ in range(self.n_pop)]
# Use population to create children
inputs = cnsga_variation(self.population, self.vocs, self.toolbox,
crossover_probability=self.crossover_probability, mutation_probability=self.mutation_probability)
return inputs.to_dict(orient='records')
def update_data(self, new_data: pd.DataFrame):
self.offspring = pd.concat([self.offspring, new_data])
# Next generation
if len(self.offspring) >= self.n_pop:
if self.population is None:
self.population = self.offspring.iloc[:self.n_pop]
self.offspring = self.offspring.iloc[self.n_pop:]
else:
candidates = pd.concat([self.population, self.offspring])
self.population = cnsga_select(candidates, self.n_pop, self.vocs, self.toolbox)
self.children = [] # reset children
self.offspring = None # reset offspring
def generate(self, n_candidates) -> List[Dict]:
"""
generate `n_candidates` candidates
"""
# Make sure we have enough children to fulfill the request
while len(self.children) < n_candidates:
self.children.extend(self.create_children())
return [self.children.pop() for _ in range(n_candidates)]
def uniform(low, up, size=None):
"""
"""
try:
return [random.uniform(a, b) for a, b in zip(low, up)]
except TypeError:
return [random.uniform(a, b) for a, b in zip([low] * size, [up] * size)]
def cnsga_toolbox(vocs, selection='auto'):
"""
Creates a DEAP toolbox from VOCS dict for use with cnsga.
Selection options:
nsga2: Standard NSGA2 [Deb2002] selection
nsga3: NSGA3 [Deb2014] selection
spea2: SPEA-II [Zitzler2001] selection
auto: will choose nsga2 for <= 2 objectives, otherwise nsga3
See DEAP code for details.
"""
var, obj, con = vocs.variables, vocs.objectives, vocs.constraints
n_var = len(var)
n_obj = len(obj)
n_con = len(con)
var_labels = vocs.variable_names
obj_labels = vocs.objective_names
bound_low, bound_up = vocs.bounds
# DEAP does not like arrays, needs tuples.
bound_low = tuple(bound_low)
bound_up = tuple(bound_up)
# creator should assign already weighted values (for minimization)
weights = tuple([-1]*n_obj)
# Create MyFitness
if 'MyFitness' in dir(deap_creator):
del deap_creator.MyFitness
if n_con == 0:
# Normal Fitness class
deap_creator.create('MyFitness', deap_base.Fitness, weights=weights, labels=obj_labels)
else:
# Fitness with Constraints
deap_creator.
|
[
"create",
"MetaCreator",
"meta_create",
"class_replacers"
] |
toolbox)
self.children = [] # reset children
self.offspring = None # reset offspring
def generate(self, n_candidates) -> List[Dict]:
"""
generate `n_candidates` candidates
"""
# Make sure we have enough children to fulfill the request
while len(self.children) < n_candidates:
self.children.extend(self.create_children())
return [self.children.pop() for _ in range(n_candidates)]
def uniform(low, up, size=None):
"""
"""
try:
return [random.uniform(a, b) for a, b in zip(low, up)]
except TypeError:
return [random.uniform(a, b) for a, b in zip([low] * size, [up] * size)]
def cnsga_toolbox(vocs, selection='auto'):
"""
Creates a DEAP toolbox from VOCS dict for use with cnsga.
Selection options:
nsga2: Standard NSGA2 [Deb2002] selection
nsga3: NSGA3 [Deb2014] selection
spea2: SPEA-II [Zitzler2001] selection
auto: will choose nsga2 for <= 2 objectives, otherwise nsga3
See DEAP code for details.
"""
var, obj, con = vocs.variables, vocs.objectives, vocs.constraints
n_var = len(var)
n_obj = len(obj)
n_con = len(con)
var_labels = vocs.variable_names
obj_labels = vocs.objective_names
bound_low, bound_up = vocs.bounds
# DEAP does not like arrays, needs tuples.
bound_low = tuple(bound_low)
bound_up = tuple(bound_up)
# creator should assign already weighted values (for minimization)
weights = tuple([-1]*n_obj)
# Create MyFitness
if 'MyFitness' in dir(deap_creator):
del deap_creator.MyFitness
if n_con == 0:
# Normal Fitness class
deap_creator.create('MyFitness', deap_base.Fitness, weights=weights, labels=obj_labels)
else:
# Fitness with Constraints
deap_creator.
| 109
|
152
| 6
| 4,500
|
christophermayes__xopt
|
683fd0c3af2f0fc12a598932b20e3afe8070112b
|
xopt/generators/ga/cnsga.py
|
common
|
create
| true
|
function
| 6
| 11
| false
| false
|
[
"create",
"MetaCreator",
"meta_create",
"class_replacers",
"_array",
"_numpy_array"
] |
[
{
"name": "array",
"type": "module"
},
{
"name": "class_replacers",
"type": "statement"
},
{
"name": "copy",
"type": "module"
},
{
"name": "copyreg",
"type": "module"
},
{
"name": "create",
"type": "function"
},
{
"name": "meta_create",
"type": "function"
},
{
"name": "MetaCreator",
"type": "class"
},
{
"name": "numpy",
"type": "module"
},
{
"name": "warnings",
"type": "module"
},
{
"name": "_array",
"type": "class"
},
{
"name": "_numpy_array",
"type": "class"
},
{
"name": "__doc__",
"type": "instance"
},
{
"name": "__file__",
"type": "instance"
},
{
"name": "__name__",
"type": "instance"
},
{
"name": "__package__",
"type": "instance"
}
] |
from xopt.generators.ga import deap_creator
from xopt.generators.ga.deap_fitness_with_constraints import FitnessWithConstraints
from xopt import Generator
from deap import base as deap_base
from deap import tools as deap_tools
from deap import algorithms as deap_algorithms
import pandas as pd
import random
import array
from typing import List, Dict
import logging
logger = logging.getLogger(__name__)
from typing import List, Dict
class CNSGAGenerator(Generator):
def __init__(self, vocs, *,
n_pop,
data = None,
crossover_probability = 0.9,
mutation_probability = 1.0
):
self._vocs = vocs # TODO: use proper options
self.n_pop = n_pop
self.crossover_probability = crossover_probability
self.mutation_probability = mutation_probability
# Internal data structures
self.children = [] # unevaluated inputs. This should be a list of dicts.
self.population = None # The latest population (fully evaluated)
self.offspring = None # Newly evaluated data, but not yet added to population
# DEAP toolbox (internal)
self.toolbox = cnsga_toolbox(vocs, selection='auto')
if data is not None:
self.population = cnsga_select(data, n_pop, vocs, self.toolbox)
def create_children(self):
# No population, so create random children
if self.population is None:
return [self.vocs.random_inputs() for _ in range(self.n_pop)]
# Use population to create children
inputs = cnsga_variation(self.population, self.vocs, self.toolbox,
crossover_probability=self.crossover_probability, mutation_probability=self.mutation_probability)
return inputs.to_dict(orient='records')
def update_data(self, new_data: pd.DataFrame):
self.offspring = pd.concat([self.offspring, new_data])
# Next generation
if len(self.offspring) >= self.n_pop:
if self.population is None:
self.population = self.offspring.iloc[:self.n_pop]
self.offspring = self.offspring.iloc[self.n_pop:]
else:
candidates = pd.concat([self.population, self.offspring])
self.population = cnsga_select(candidates, self.n_pop, self.vocs, self.toolbox)
self.children = [] # reset children
self.offspring = None # reset offspring
def generate(self, n_candidates) -> List[Dict]:
"""
generate `n_candidates` candidates
"""
# Make sure we have enough children to fulfill the request
while len(self.children) < n_candidates:
self.children.extend(self.create_children())
return [self.children.pop() for _ in range(n_candidates)]
def uniform(low, up, size=None):
"""
"""
try:
return [random.uniform(a, b) for a, b in zip(low, up)]
except TypeError:
return [random.uniform(a, b) for a, b in zip([low] * size, [up] * size)]
def cnsga_toolbox(vocs, selection='auto'):
"""
Creates a DEAP toolbox from VOCS dict for use with cnsga.
Selection options:
nsga2: Standard NSGA2 [Deb2002] selection
nsga3: NSGA3 [Deb2014] selection
spea2: SPEA-II [Zitzler2001] selection
auto: will choose nsga2 for <= 2 objectives, otherwise nsga3
See DEAP code for details.
"""
var, obj, con = vocs.variables, vocs.objectives, vocs.constraints
n_var = len(var)
n_obj = len(obj)
n_con = len(con)
var_labels = vocs.variable_names
obj_labels = vocs.objective_names
bound_low, bound_up = vocs.bounds
# DEAP does not like arrays, needs tuples.
bound_low = tuple(bound_low)
bound_up = tuple(bound_up)
# creator should assign already weighted values (for minimization)
weights = tuple([-1]*n_obj)
# Create MyFitness
if 'MyFitness' in dir(deap_creator):
del deap_creator.MyFitness
if n_con == 0:
# Normal Fitness class
deap_creator.create('MyFitness', deap_base.Fitness, weights=weights, labels=obj_labels)
else:
# Fitness with Constraints
deap_creator.create('MyFitness', FitnessWithConstraints,
weights=weights, n_constraints=n_con, labels=obj_labels)
# Create Individual. Check if exists first.
if 'Individual' in dir(deap_creator):
del deap_creator.Individual
deap_creator.
|
[
"create",
"MetaCreator",
"meta_create",
"class_replacers"
] |
children to fulfill the request
while len(self.children) < n_candidates:
self.children.extend(self.create_children())
return [self.children.pop() for _ in range(n_candidates)]
def uniform(low, up, size=None):
"""
"""
try:
return [random.uniform(a, b) for a, b in zip(low, up)]
except TypeError:
return [random.uniform(a, b) for a, b in zip([low] * size, [up] * size)]
def cnsga_toolbox(vocs, selection='auto'):
"""
Creates a DEAP toolbox from VOCS dict for use with cnsga.
Selection options:
nsga2: Standard NSGA2 [Deb2002] selection
nsga3: NSGA3 [Deb2014] selection
spea2: SPEA-II [Zitzler2001] selection
auto: will choose nsga2 for <= 2 objectives, otherwise nsga3
See DEAP code for details.
"""
var, obj, con = vocs.variables, vocs.objectives, vocs.constraints
n_var = len(var)
n_obj = len(obj)
n_con = len(con)
var_labels = vocs.variable_names
obj_labels = vocs.objective_names
bound_low, bound_up = vocs.bounds
# DEAP does not like arrays, needs tuples.
bound_low = tuple(bound_low)
bound_up = tuple(bound_up)
# creator should assign already weighted values (for minimization)
weights = tuple([-1]*n_obj)
# Create MyFitness
if 'MyFitness' in dir(deap_creator):
del deap_creator.MyFitness
if n_con == 0:
# Normal Fitness class
deap_creator.create('MyFitness', deap_base.Fitness, weights=weights, labels=obj_labels)
else:
# Fitness with Constraints
deap_creator.create('MyFitness', FitnessWithConstraints,
weights=weights, n_constraints=n_con, labels=obj_labels)
# Create Individual. Check if exists first.
if 'Individual' in dir(deap_creator):
del deap_creator.Individual
deap_creator.
| 110
|
157
| 7
| 2,688
|
cerfacs-globc__icclim
|
7d571ec4d1e1b1fcc1433bb178de2bc0f2f2f0b7
|
icclim/tests/unit_tests/test_input_parsing.py
|
inproject
|
WW
| true
|
instance
| 53
| 62
| false
| true
|
[
"TX90P",
"name",
"CD",
"CW",
"SD",
"CDD",
"CFD",
"CSDI",
"CSU",
"CWD",
"DTR",
"ETR",
"FD",
"GD4",
"HD17",
"ID",
"lookup",
"PRCPTOT",
"R10MM",
"R20MM",
"R75P",
"R75PTOT",
"R95P",
"R95PTOT",
"R99P",
"R99PTOT",
"RR1",
"RX1DAY",
"RX5DAY",
"SD1",
"SD5CM",
"SD50CM",
"SDII",
"SU",
"TG",
"TG10P",
"TG90P",
"TN",
"TN10P",
"TN90P",
"TNN",
"TNX",
"TR",
"TX",
"TX10P",
"TXN",
"TXX",
"VDTR",
"WD",
"WSDI",
"WW",
"mro",
"value",
"__init__",
"__annotations__",
"__base__",
"__bases__",
"__basicsize__",
"__bool__",
"__call__",
"__contains__",
"__delattr__",
"__dict__",
"__dictoffset__",
"__dir__",
"__eq__",
"__flags__",
"__format__",
"__getattribute__",
"__getitem__",
"__hash__",
"__init_subclass__",
"__instancecheck__",
"__itemsize__",
"__iter__",
"__len__",
"__members__",
"__mro__",
"__name__",
"__ne__",
"__new__",
"__or__",
"__order__",
"__prepare__",
"__qualname__",
"__reduce__",
"__reduce_ex__",
"__repr__",
"__reversed__",
"__ror__",
"__setattr__",
"__sizeof__",
"__str__",
"__subclasscheck__",
"__subclasses__",
"__subclasshook__",
"__text_signature__",
"__weakrefoffset__",
"__class__",
"__doc__",
"__module__"
] |
[
{
"name": "CD",
"type": "instance"
},
{
"name": "CDD",
"type": "instance"
},
{
"name": "CFD",
"type": "instance"
},
{
"name": "CSDI",
"type": "instance"
},
{
"name": "CSU",
"type": "instance"
},
{
"name": "CW",
"type": "instance"
},
{
"name": "CWD",
"type": "instance"
},
{
"name": "DTR",
"type": "instance"
},
{
"name": "ETR",
"type": "instance"
},
{
"name": "FD",
"type": "instance"
},
{
"name": "GD4",
"type": "instance"
},
{
"name": "HD17",
"type": "instance"
},
{
"name": "ID",
"type": "instance"
},
{
"name": "lookup",
"type": "function"
},
{
"name": "mro",
"type": "function"
},
{
"name": "name",
"type": "statement"
},
{
"name": "PRCPTOT",
"type": "instance"
},
{
"name": "R10MM",
"type": "instance"
},
{
"name": "R20MM",
"type": "instance"
},
{
"name": "R75P",
"type": "instance"
},
{
"name": "R75PTOT",
"type": "instance"
},
{
"name": "R95P",
"type": "instance"
},
{
"name": "R95PTOT",
"type": "instance"
},
{
"name": "R99P",
"type": "instance"
},
{
"name": "R99PTOT",
"type": "instance"
},
{
"name": "RR1",
"type": "instance"
},
{
"name": "RX1DAY",
"type": "instance"
},
{
"name": "RX5DAY",
"type": "instance"
},
{
"name": "SD",
"type": "instance"
},
{
"name": "SD1",
"type": "instance"
},
{
"name": "SD50CM",
"type": "instance"
},
{
"name": "SD5CM",
"type": "instance"
},
{
"name": "SDII",
"type": "instance"
},
{
"name": "SU",
"type": "instance"
},
{
"name": "TG",
"type": "instance"
},
{
"name": "TG10P",
"type": "instance"
},
{
"name": "TG90P",
"type": "instance"
},
{
"name": "TN",
"type": "instance"
},
{
"name": "TN10P",
"type": "instance"
},
{
"name": "TN90P",
"type": "instance"
},
{
"name": "TNN",
"type": "instance"
},
{
"name": "TNX",
"type": "instance"
},
{
"name": "TR",
"type": "instance"
},
{
"name": "TX",
"type": "instance"
},
{
"name": "TX10P",
"type": "instance"
},
{
"name": "TX90P",
"type": "instance"
},
{
"name": "TXN",
"type": "instance"
},
{
"name": "TXX",
"type": "instance"
},
{
"name": "value",
"type": "statement"
},
{
"name": "VDTR",
"type": "instance"
},
{
"name": "WD",
"type": "instance"
},
{
"name": "WSDI",
"type": "instance"
},
{
"name": "WW",
"type": "instance"
},
{
"name": "_generate_next_value_",
"type": "function"
},
{
"name": "_ignore_",
"type": "statement"
},
{
"name": "_member_map_",
"type": "statement"
},
{
"name": "_member_names_",
"type": "statement"
},
{
"name": "_missing_",
"type": "function"
},
{
"name": "_name_",
"type": "statement"
},
{
"name": "_order_",
"type": "statement"
},
{
"name": "_value2member_map_",
"type": "statement"
},
{
"name": "_value_",
"type": "statement"
},
{
"name": "__annotations__",
"type": "statement"
},
{
"name": "__base__",
"type": "statement"
},
{
"name": "__bases__",
"type": "statement"
},
{
"name": "__basicsize__",
"type": "statement"
},
{
"name": "__call__",
"type": "function"
},
{
"name": "__class__",
"type": "property"
},
{
"name": "__delattr__",
"type": "function"
},
{
"name": "__dict__",
"type": "statement"
},
{
"name": "__dictoffset__",
"type": "statement"
},
{
"name": "__dir__",
"type": "function"
},
{
"name": "__doc__",
"type": "statement"
},
{
"name": "__eq__",
"type": "function"
},
{
"name": "__flags__",
"type": "statement"
},
{
"name": "__format__",
"type": "function"
},
{
"name": "__getattribute__",
"type": "function"
},
{
"name": "__hash__",
"type": "function"
},
{
"name": "__init__",
"type": "function"
},
{
"name": "__init_subclass__",
"type": "function"
},
{
"name": "__instancecheck__",
"type": "function"
},
{
"name": "__itemsize__",
"type": "statement"
},
{
"name": "__module__",
"type": "statement"
},
{
"name": "__mro__",
"type": "statement"
},
{
"name": "__name__",
"type": "statement"
},
{
"name": "__ne__",
"type": "function"
},
{
"name": "__new__",
"type": "function"
},
{
"name": "__order__",
"type": "statement"
},
{
"name": "__prepare__",
"type": "function"
},
{
"name": "__qualname__",
"type": "statement"
},
{
"name": "__reduce__",
"type": "function"
},
{
"name": "__reduce_ex__",
"type": "function"
},
{
"name": "__repr__",
"type": "function"
},
{
"name": "__setattr__",
"type": "function"
},
{
"name": "__sizeof__",
"type": "function"
},
{
"name": "__slots__",
"type": "statement"
},
{
"name": "__str__",
"type": "function"
},
{
"name": "__subclasscheck__",
"type": "function"
},
{
"name": "__subclasses__",
"type": "function"
},
{
"name": "__text_signature__",
"type": "statement"
},
{
"name": "__weakrefoffset__",
"type": "statement"
}
] |
import os
import shutil
import numpy as np
import pandas as pd
import pytest
import xarray as xr
from icclim.icclim_exceptions import InvalidIcclimArgumentError
from icclim.models.ecad_indices import EcadIndex
from icclim.pre_processing.input_parsing import read_dataset, update_to_standard_coords
def test_update_to_standard_coords():
# GIVEN
ds = xr.Dataset(
{
"pouet": xr.DataArray(
data=np.full(10, 42).reshape((10, 1, 1)),
coords=dict(
latitude=[42],
longitude=[42],
t=pd.date_range("2042-01-01", periods=10, freq="D"),
),
dims=["t", "latitude", "longitude"],
name="pr",
attrs={"units": "kg m-2 d-1"},
)
}
)
# WHEN
res, revert = update_to_standard_coords(ds)
# THEN
assert "lat" in res.coords
assert "time" in res.coords
assert "lon" in res.coords
assert res.rename(revert).coords.keys() == ds.coords.keys()
class Test_ReadDataset:
OUTPUT_NC_FILE = "tmp.nc"
OUTPUT_NC_FILE_2 = "tmp-2.nc"
OUTPUT_ZARR_STORE = "tmp.zarr"
OUTPUT_UNKNOWN_FORMAT = "tmp.cacahuete"
@pytest.fixture(autouse=True)
def cleanup(self):
# setup
yield
# teardown
shutil.rmtree(self.OUTPUT_ZARR_STORE, ignore_errors=True)
for f in [
self.OUTPUT_NC_FILE,
self.OUTPUT_NC_FILE_2,
self.OUTPUT_UNKNOWN_FORMAT,
]:
try:
os.remove(f)
except FileNotFoundError:
pass
def test_read_dataset_xr_da_user_index_error(self):
da = xr.DataArray(
data=np.full(10, 42).reshape((10, 1, 1)),
coords=dict(
latitude=[42],
longitude=[42],
t=pd.date_range("2042-01-01", periods=10, freq="D"),
),
dims=["t", "latitude", "longitude"],
name="pr",
attrs={"units": "kg m-2 d-1"},
)
with pytest.raises(InvalidIcclimArgumentError):
read_dataset(da)
def test_read_dataset_xr_da_ecad_index_error(self):
da = xr.DataArray(
data=np.full(10, 42).reshape((10, 1, 1)),
coords=dict(
latitude=[42],
longitude=[42],
t=pd.date_range("2042-01-01", periods=10, freq="D"),
),
dims=["t", "latitude", "longitude"],
name="pr",
attrs={"units": "kg m-2 d-1"},
)
with pytest.raises(InvalidIcclimArgumentError):
read_dataset(da, EcadIndex.
|
[
"TX90P",
"name",
"CD",
"CW",
"SD",
"CDD",
"CFD",
"CSDI",
"CSU",
"CWD",
"DTR",
"ETR",
"FD",
"GD4",
"HD17",
"ID",
"lookup",
"PRCPTOT",
"R10MM",
"R20MM",
"R75P",
"R75PTOT",
"R95P",
"R95PTOT",
"R99P",
"R99PTOT",
"RR1",
"RX1DAY",
"RX5DAY",
"SD1",
"SD5CM",
"SD50CM",
"SDII",
"SU",
"TG",
"TG10P",
"TG90P",
"TN",
"TN10P",
"TN90P",
"TNN",
"TNX",
"TR",
"TX",
"TX10P",
"TXN",
"TXX",
"VDTR",
"WD",
"WSDI",
"WW",
"mro",
"value"
] |
s={"units": "kg m-2 d-1"},
)
}
)
# WHEN
res, revert = update_to_standard_coords(ds)
# THEN
assert "lat" in res.coords
assert "time" in res.coords
assert "lon" in res.coords
assert res.rename(revert).coords.keys() == ds.coords.keys()
class Test_ReadDataset:
OUTPUT_NC_FILE = "tmp.nc"
OUTPUT_NC_FILE_2 = "tmp-2.nc"
OUTPUT_ZARR_STORE = "tmp.zarr"
OUTPUT_UNKNOWN_FORMAT = "tmp.cacahuete"
@pytest.fixture(autouse=True)
def cleanup(self):
# setup
yield
# teardown
shutil.rmtree(self.OUTPUT_ZARR_STORE, ignore_errors=True)
for f in [
self.OUTPUT_NC_FILE,
self.OUTPUT_NC_FILE_2,
self.OUTPUT_UNKNOWN_FORMAT,
]:
try:
os.remove(f)
except FileNotFoundError:
pass
def test_read_dataset_xr_da_user_index_error(self):
da = xr.DataArray(
data=np.full(10, 42).reshape((10, 1, 1)),
coords=dict(
latitude=[42],
longitude=[42],
t=pd.date_range("2042-01-01", periods=10, freq="D"),
),
dims=["t", "latitude", "longitude"],
name="pr",
attrs={"units": "kg m-2 d-1"},
)
with pytest.raises(InvalidIcclimArgumentError):
read_dataset(da)
def test_read_dataset_xr_da_ecad_index_error(self):
da = xr.DataArray(
data=np.full(10, 42).reshape((10, 1, 1)),
coords=dict(
latitude=[42],
longitude=[42],
t=pd.date_range("2042-01-01", periods=10, freq="D"),
),
dims=["t", "latitude", "longitude"],
name="pr",
attrs={"units": "kg m-2 d-1"},
)
with pytest.raises(InvalidIcclimArgumentError):
read_dataset(da, EcadIndex.
| 115
|
158
| 7
| 3,183
|
cerfacs-globc__icclim
|
7d571ec4d1e1b1fcc1433bb178de2bc0f2f2f0b7
|
icclim/tests/unit_tests/test_input_parsing.py
|
inproject
|
TX90P
| true
|
instance
| 53
| 62
| false
| true
|
[
"WW",
"name",
"CD",
"CW",
"SD",
"CDD",
"CFD",
"CSDI",
"CSU",
"CWD",
"DTR",
"ETR",
"FD",
"GD4",
"HD17",
"ID",
"lookup",
"PRCPTOT",
"R10MM",
"R20MM",
"R75P",
"R75PTOT",
"R95P",
"R95PTOT",
"R99P",
"R99PTOT",
"RR1",
"RX1DAY",
"RX5DAY",
"SD1",
"SD5CM",
"SD50CM",
"SDII",
"SU",
"TG",
"TG10P",
"TG90P",
"TN",
"TN10P",
"TN90P",
"TNN",
"TNX",
"TR",
"TX",
"TX10P",
"TX90P",
"TXN",
"TXX",
"VDTR",
"WD",
"WSDI",
"mro",
"value",
"__init__",
"__annotations__",
"__base__",
"__bases__",
"__basicsize__",
"__bool__",
"__call__",
"__contains__",
"__delattr__",
"__dict__",
"__dictoffset__",
"__dir__",
"__eq__",
"__flags__",
"__format__",
"__getattribute__",
"__getitem__",
"__hash__",
"__init_subclass__",
"__instancecheck__",
"__itemsize__",
"__iter__",
"__len__",
"__members__",
"__mro__",
"__name__",
"__ne__",
"__new__",
"__or__",
"__order__",
"__prepare__",
"__qualname__",
"__reduce__",
"__reduce_ex__",
"__repr__",
"__reversed__",
"__ror__",
"__setattr__",
"__sizeof__",
"__str__",
"__subclasscheck__",
"__subclasses__",
"__subclasshook__",
"__text_signature__",
"__weakrefoffset__",
"__class__",
"__doc__",
"__module__"
] |
[
{
"name": "CD",
"type": "instance"
},
{
"name": "CDD",
"type": "instance"
},
{
"name": "CFD",
"type": "instance"
},
{
"name": "CSDI",
"type": "instance"
},
{
"name": "CSU",
"type": "instance"
},
{
"name": "CW",
"type": "instance"
},
{
"name": "CWD",
"type": "instance"
},
{
"name": "DTR",
"type": "instance"
},
{
"name": "ETR",
"type": "instance"
},
{
"name": "FD",
"type": "instance"
},
{
"name": "GD4",
"type": "instance"
},
{
"name": "HD17",
"type": "instance"
},
{
"name": "ID",
"type": "instance"
},
{
"name": "lookup",
"type": "function"
},
{
"name": "mro",
"type": "function"
},
{
"name": "name",
"type": "statement"
},
{
"name": "PRCPTOT",
"type": "instance"
},
{
"name": "R10MM",
"type": "instance"
},
{
"name": "R20MM",
"type": "instance"
},
{
"name": "R75P",
"type": "instance"
},
{
"name": "R75PTOT",
"type": "instance"
},
{
"name": "R95P",
"type": "instance"
},
{
"name": "R95PTOT",
"type": "instance"
},
{
"name": "R99P",
"type": "instance"
},
{
"name": "R99PTOT",
"type": "instance"
},
{
"name": "RR1",
"type": "instance"
},
{
"name": "RX1DAY",
"type": "instance"
},
{
"name": "RX5DAY",
"type": "instance"
},
{
"name": "SD",
"type": "instance"
},
{
"name": "SD1",
"type": "instance"
},
{
"name": "SD50CM",
"type": "instance"
},
{
"name": "SD5CM",
"type": "instance"
},
{
"name": "SDII",
"type": "instance"
},
{
"name": "SU",
"type": "instance"
},
{
"name": "TG",
"type": "instance"
},
{
"name": "TG10P",
"type": "instance"
},
{
"name": "TG90P",
"type": "instance"
},
{
"name": "TN",
"type": "instance"
},
{
"name": "TN10P",
"type": "instance"
},
{
"name": "TN90P",
"type": "instance"
},
{
"name": "TNN",
"type": "instance"
},
{
"name": "TNX",
"type": "instance"
},
{
"name": "TR",
"type": "instance"
},
{
"name": "TX",
"type": "instance"
},
{
"name": "TX10P",
"type": "instance"
},
{
"name": "TX90P",
"type": "instance"
},
{
"name": "TXN",
"type": "instance"
},
{
"name": "TXX",
"type": "instance"
},
{
"name": "value",
"type": "statement"
},
{
"name": "VDTR",
"type": "instance"
},
{
"name": "WD",
"type": "instance"
},
{
"name": "WSDI",
"type": "instance"
},
{
"name": "WW",
"type": "instance"
},
{
"name": "_generate_next_value_",
"type": "function"
},
{
"name": "_ignore_",
"type": "statement"
},
{
"name": "_member_map_",
"type": "statement"
},
{
"name": "_member_names_",
"type": "statement"
},
{
"name": "_missing_",
"type": "function"
},
{
"name": "_name_",
"type": "statement"
},
{
"name": "_order_",
"type": "statement"
},
{
"name": "_value2member_map_",
"type": "statement"
},
{
"name": "_value_",
"type": "statement"
},
{
"name": "__annotations__",
"type": "statement"
},
{
"name": "__base__",
"type": "statement"
},
{
"name": "__bases__",
"type": "statement"
},
{
"name": "__basicsize__",
"type": "statement"
},
{
"name": "__call__",
"type": "function"
},
{
"name": "__class__",
"type": "property"
},
{
"name": "__delattr__",
"type": "function"
},
{
"name": "__dict__",
"type": "statement"
},
{
"name": "__dictoffset__",
"type": "statement"
},
{
"name": "__dir__",
"type": "function"
},
{
"name": "__doc__",
"type": "statement"
},
{
"name": "__eq__",
"type": "function"
},
{
"name": "__flags__",
"type": "statement"
},
{
"name": "__format__",
"type": "function"
},
{
"name": "__getattribute__",
"type": "function"
},
{
"name": "__hash__",
"type": "function"
},
{
"name": "__init__",
"type": "function"
},
{
"name": "__init_subclass__",
"type": "function"
},
{
"name": "__instancecheck__",
"type": "function"
},
{
"name": "__itemsize__",
"type": "statement"
},
{
"name": "__module__",
"type": "statement"
},
{
"name": "__mro__",
"type": "statement"
},
{
"name": "__name__",
"type": "statement"
},
{
"name": "__ne__",
"type": "function"
},
{
"name": "__new__",
"type": "function"
},
{
"name": "__order__",
"type": "statement"
},
{
"name": "__prepare__",
"type": "function"
},
{
"name": "__qualname__",
"type": "statement"
},
{
"name": "__reduce__",
"type": "function"
},
{
"name": "__reduce_ex__",
"type": "function"
},
{
"name": "__repr__",
"type": "function"
},
{
"name": "__setattr__",
"type": "function"
},
{
"name": "__sizeof__",
"type": "function"
},
{
"name": "__slots__",
"type": "statement"
},
{
"name": "__str__",
"type": "function"
},
{
"name": "__subclasscheck__",
"type": "function"
},
{
"name": "__subclasses__",
"type": "function"
},
{
"name": "__text_signature__",
"type": "statement"
},
{
"name": "__weakrefoffset__",
"type": "statement"
}
] |
import os
import shutil
import numpy as np
import pandas as pd
import pytest
import xarray as xr
from icclim.icclim_exceptions import InvalidIcclimArgumentError
from icclim.models.ecad_indices import EcadIndex
from icclim.pre_processing.input_parsing import read_dataset, update_to_standard_coords
def test_update_to_standard_coords():
# GIVEN
ds = xr.Dataset(
{
"pouet": xr.DataArray(
data=np.full(10, 42).reshape((10, 1, 1)),
coords=dict(
latitude=[42],
longitude=[42],
t=pd.date_range("2042-01-01", periods=10, freq="D"),
),
dims=["t", "latitude", "longitude"],
name="pr",
attrs={"units": "kg m-2 d-1"},
)
}
)
# WHEN
res, revert = update_to_standard_coords(ds)
# THEN
assert "lat" in res.coords
assert "time" in res.coords
assert "lon" in res.coords
assert res.rename(revert).coords.keys() == ds.coords.keys()
class Test_ReadDataset:
OUTPUT_NC_FILE = "tmp.nc"
OUTPUT_NC_FILE_2 = "tmp-2.nc"
OUTPUT_ZARR_STORE = "tmp.zarr"
OUTPUT_UNKNOWN_FORMAT = "tmp.cacahuete"
@pytest.fixture(autouse=True)
def cleanup(self):
# setup
yield
# teardown
shutil.rmtree(self.OUTPUT_ZARR_STORE, ignore_errors=True)
for f in [
self.OUTPUT_NC_FILE,
self.OUTPUT_NC_FILE_2,
self.OUTPUT_UNKNOWN_FORMAT,
]:
try:
os.remove(f)
except FileNotFoundError:
pass
def test_read_dataset_xr_da_user_index_error(self):
da = xr.DataArray(
data=np.full(10, 42).reshape((10, 1, 1)),
coords=dict(
latitude=[42],
longitude=[42],
t=pd.date_range("2042-01-01", periods=10, freq="D"),
),
dims=["t", "latitude", "longitude"],
name="pr",
attrs={"units": "kg m-2 d-1"},
)
with pytest.raises(InvalidIcclimArgumentError):
read_dataset(da)
def test_read_dataset_xr_da_ecad_index_error(self):
da = xr.DataArray(
data=np.full(10, 42).reshape((10, 1, 1)),
coords=dict(
latitude=[42],
longitude=[42],
t=pd.date_range("2042-01-01", periods=10, freq="D"),
),
dims=["t", "latitude", "longitude"],
name="pr",
attrs={"units": "kg m-2 d-1"},
)
with pytest.raises(InvalidIcclimArgumentError):
read_dataset(da, EcadIndex.WW)
def test_read_dataset_xr_da_ecad_index_success(self):
da = xr.DataArray(
data=np.full(10, 42).reshape((10, 1, 1)),
coords=dict(
latitude=[42],
longitude=[42],
t=pd.date_range("2042-01-01", periods=10, freq="D"),
),
dims=["t", "latitude", "longitude"],
name="pr",
attrs={"units": "kg m-2 d-1"},
)
ds_res, chunk_it = read_dataset(da, EcadIndex.
|
[
"WW",
"name",
"CD",
"CW",
"SD",
"CDD",
"CFD",
"CSDI",
"CSU",
"CWD",
"DTR",
"ETR",
"FD",
"GD4",
"HD17",
"ID",
"lookup",
"PRCPTOT",
"R10MM",
"R20MM",
"R75P",
"R75PTOT",
"R95P",
"R95PTOT",
"R99P",
"R99PTOT",
"RR1",
"RX1DAY",
"RX5DAY",
"SD1",
"SD5CM",
"SD50CM",
"SDII",
"SU",
"TG",
"TG10P",
"TG90P",
"TN",
"TN10P",
"TN90P",
"TNN",
"TNX",
"TR",
"TX",
"TX10P",
"TX90P",
"TXN",
"TXX",
"VDTR",
"WD",
"WSDI",
"mro",
"value"
] |
f cleanup(self):
# setup
yield
# teardown
shutil.rmtree(self.OUTPUT_ZARR_STORE, ignore_errors=True)
for f in [
self.OUTPUT_NC_FILE,
self.OUTPUT_NC_FILE_2,
self.OUTPUT_UNKNOWN_FORMAT,
]:
try:
os.remove(f)
except FileNotFoundError:
pass
def test_read_dataset_xr_da_user_index_error(self):
da = xr.DataArray(
data=np.full(10, 42).reshape((10, 1, 1)),
coords=dict(
latitude=[42],
longitude=[42],
t=pd.date_range("2042-01-01", periods=10, freq="D"),
),
dims=["t", "latitude", "longitude"],
name="pr",
attrs={"units": "kg m-2 d-1"},
)
with pytest.raises(InvalidIcclimArgumentError):
read_dataset(da)
def test_read_dataset_xr_da_ecad_index_error(self):
da = xr.DataArray(
data=np.full(10, 42).reshape((10, 1, 1)),
coords=dict(
latitude=[42],
longitude=[42],
t=pd.date_range("2042-01-01", periods=10, freq="D"),
),
dims=["t", "latitude", "longitude"],
name="pr",
attrs={"units": "kg m-2 d-1"},
)
with pytest.raises(InvalidIcclimArgumentError):
read_dataset(da, EcadIndex.WW)
def test_read_dataset_xr_da_ecad_index_success(self):
da = xr.DataArray(
data=np.full(10, 42).reshape((10, 1, 1)),
coords=dict(
latitude=[42],
longitude=[42],
t=pd.date_range("2042-01-01", periods=10, freq="D"),
),
dims=["t", "latitude", "longitude"],
name="pr",
attrs={"units": "kg m-2 d-1"},
)
ds_res, chunk_it = read_dataset(da, EcadIndex.
| 116
|
160
| 7
| 5,234
|
cerfacs-globc__icclim
|
7d571ec4d1e1b1fcc1433bb178de2bc0f2f2f0b7
|
icclim/tests/unit_tests/test_input_parsing.py
|
inproject
|
OUTPUT_NC_FILE
| true
|
statement
| 14
| 14
| false
| false
|
[
"OUTPUT_NC_FILE",
"OUTPUT_ZARR_STORE",
"OUTPUT_NC_FILE_2",
"test_read_dataset_xr_ds_success",
"OUTPUT_UNKNOWN_FORMAT",
"cleanup",
"test_read_dataset_multi_netcdf_success",
"test_read_dataset_netcdf_success",
"test_read_dataset_not_implemented_error",
"test_read_dataset_xr_da_ecad_index_error",
"test_read_dataset_xr_da_ecad_index_success",
"test_read_dataset_xr_da_user_index_error",
"test_read_dataset_xr_da_user_index_success",
"test_read_dataset_zarr_store_success",
"__annotations__",
"__class__",
"__delattr__",
"__dict__",
"__dir__",
"__eq__",
"__format__",
"__getattribute__",
"__hash__",
"__init__",
"__init_subclass__",
"__ne__",
"__new__",
"__reduce__",
"__reduce_ex__",
"__repr__",
"__setattr__",
"__sizeof__",
"__str__",
"__subclasshook__",
"__doc__",
"__module__"
] |
[
{
"name": "cleanup",
"type": "function"
},
{
"name": "OUTPUT_NC_FILE",
"type": "statement"
},
{
"name": "OUTPUT_NC_FILE_2",
"type": "statement"
},
{
"name": "OUTPUT_UNKNOWN_FORMAT",
"type": "statement"
},
{
"name": "OUTPUT_ZARR_STORE",
"type": "statement"
},
{
"name": "test_read_dataset_multi_netcdf_success",
"type": "function"
},
{
"name": "test_read_dataset_netcdf_success",
"type": "function"
},
{
"name": "test_read_dataset_not_implemented_error",
"type": "function"
},
{
"name": "test_read_dataset_xr_da_ecad_index_error",
"type": "function"
},
{
"name": "test_read_dataset_xr_da_ecad_index_success",
"type": "function"
},
{
"name": "test_read_dataset_xr_da_user_index_error",
"type": "function"
},
{
"name": "test_read_dataset_xr_da_user_index_success",
"type": "function"
},
{
"name": "test_read_dataset_xr_ds_success",
"type": "function"
},
{
"name": "test_read_dataset_zarr_store_success",
"type": "function"
},
{
"name": "__annotations__",
"type": "statement"
},
{
"name": "__class__",
"type": "property"
},
{
"name": "__delattr__",
"type": "function"
},
{
"name": "__dict__",
"type": "statement"
},
{
"name": "__dir__",
"type": "function"
},
{
"name": "__doc__",
"type": "statement"
},
{
"name": "__eq__",
"type": "function"
},
{
"name": "__format__",
"type": "function"
},
{
"name": "__getattribute__",
"type": "function"
},
{
"name": "__hash__",
"type": "function"
},
{
"name": "__init__",
"type": "function"
},
{
"name": "__init_subclass__",
"type": "function"
},
{
"name": "__module__",
"type": "statement"
},
{
"name": "__ne__",
"type": "function"
},
{
"name": "__new__",
"type": "function"
},
{
"name": "__reduce__",
"type": "function"
},
{
"name": "__reduce_ex__",
"type": "function"
},
{
"name": "__repr__",
"type": "function"
},
{
"name": "__setattr__",
"type": "function"
},
{
"name": "__sizeof__",
"type": "function"
},
{
"name": "__slots__",
"type": "statement"
},
{
"name": "__str__",
"type": "function"
}
] |
import os
import shutil
import numpy as np
import pandas as pd
import pytest
import xarray as xr
from icclim.icclim_exceptions import InvalidIcclimArgumentError
from icclim.models.ecad_indices import EcadIndex
from icclim.pre_processing.input_parsing import read_dataset, update_to_standard_coords
def test_update_to_standard_coords():
# GIVEN
ds = xr.Dataset(
{
"pouet": xr.DataArray(
data=np.full(10, 42).reshape((10, 1, 1)),
coords=dict(
latitude=[42],
longitude=[42],
t=pd.date_range("2042-01-01", periods=10, freq="D"),
),
dims=["t", "latitude", "longitude"],
name="pr",
attrs={"units": "kg m-2 d-1"},
)
}
)
# WHEN
res, revert = update_to_standard_coords(ds)
# THEN
assert "lat" in res.coords
assert "time" in res.coords
assert "lon" in res.coords
assert res.rename(revert).coords.keys() == ds.coords.keys()
class Test_ReadDataset:
OUTPUT_NC_FILE = "tmp.nc"
OUTPUT_NC_FILE_2 = "tmp-2.nc"
OUTPUT_ZARR_STORE = "tmp.zarr"
OUTPUT_UNKNOWN_FORMAT = "tmp.cacahuete"
@pytest.fixture(autouse=True)
def cleanup(self):
# setup
yield
# teardown
shutil.rmtree(self.OUTPUT_ZARR_STORE, ignore_errors=True)
for f in [
self.OUTPUT_NC_FILE,
self.OUTPUT_NC_FILE_2,
self.OUTPUT_UNKNOWN_FORMAT,
]:
try:
os.remove(f)
except FileNotFoundError:
pass
def test_read_dataset_xr_da_user_index_error(self):
da = xr.DataArray(
data=np.full(10, 42).reshape((10, 1, 1)),
coords=dict(
latitude=[42],
longitude=[42],
t=pd.date_range("2042-01-01", periods=10, freq="D"),
),
dims=["t", "latitude", "longitude"],
name="pr",
attrs={"units": "kg m-2 d-1"},
)
with pytest.raises(InvalidIcclimArgumentError):
read_dataset(da)
def test_read_dataset_xr_da_ecad_index_error(self):
da = xr.DataArray(
data=np.full(10, 42).reshape((10, 1, 1)),
coords=dict(
latitude=[42],
longitude=[42],
t=pd.date_range("2042-01-01", periods=10, freq="D"),
),
dims=["t", "latitude", "longitude"],
name="pr",
attrs={"units": "kg m-2 d-1"},
)
with pytest.raises(InvalidIcclimArgumentError):
read_dataset(da, EcadIndex.WW)
def test_read_dataset_xr_da_ecad_index_success(self):
da = xr.DataArray(
data=np.full(10, 42).reshape((10, 1, 1)),
coords=dict(
latitude=[42],
longitude=[42],
t=pd.date_range("2042-01-01", periods=10, freq="D"),
),
dims=["t", "latitude", "longitude"],
name="pr",
attrs={"units": "kg m-2 d-1"},
)
ds_res, chunk_it = read_dataset(da, EcadIndex.TX90P)
xr.testing.assert_equal(ds_res.tasmax, da)
assert chunk_it is False
def test_read_dataset_xr_da_user_index_success(self):
da = xr.DataArray(
data=np.full(10, 42).reshape((10, 1, 1)),
coords=dict(
latitude=[42],
longitude=[42],
t=pd.date_range("2042-01-01", periods=10, freq="D"),
),
dims=["t", "latitude", "longitude"],
name="pr",
attrs={"units": "kg m-2 d-1"},
)
ds_res, chunk_it = read_dataset(da, None, "doto")
xr.testing.assert_equal(ds_res.doto, da)
assert chunk_it is False
def test_read_dataset_xr_ds_success(self):
ds = xr.Dataset(
{
"pouet": xr.DataArray(
data=np.full(10, 42).reshape((10, 1, 1)),
coords=dict(
latitude=[42],
longitude=[42],
t=pd.date_range("2042-01-01", periods=10, freq="D"),
),
dims=["t", "latitude", "longitude"],
name="pr",
attrs={"units": "kg m-2 d-1"},
)
}
)
ds_res, chunk_it = read_dataset(ds)
xr.testing.assert_equal(ds_res.pouet, ds.pouet)
assert chunk_it is False
def test_read_dataset_netcdf_success(self):
ds = xr.Dataset(
{
"pouet": xr.DataArray(
data=np.full(10, 42).reshape((10, 1, 1)),
coords=dict(
latitude=[42],
longitude=[42],
t=pd.date_range("2042-01-01", periods=10, freq="D"),
),
dims=["t", "latitude", "longitude"],
name="pr",
attrs={"units": "kg m-2 d-1"},
)
}
)
ds.to_netcdf(self.OUTPUT_NC_FILE)
ds_res, chunk_it = read_dataset(self.
|
[
"OUTPUT_NC_FILE",
"OUTPUT_ZARR_STORE",
"OUTPUT_NC_FILE_2",
"test_read_dataset_xr_ds_success",
"OUTPUT_UNKNOWN_FORMAT",
"cleanup",
"test_read_dataset_multi_netcdf_success",
"test_read_dataset_netcdf_success",
"test_read_dataset_not_implemented_error",
"test_read_dataset_xr_da_ecad_index_error",
"test_read_dataset_xr_da_ecad_index_success",
"test_read_dataset_xr_da_user_index_error",
"test_read_dataset_xr_da_user_index_success",
"test_read_dataset_zarr_store_success"
] |
dex_success(self):
da = xr.DataArray(
data=np.full(10, 42).reshape((10, 1, 1)),
coords=dict(
latitude=[42],
longitude=[42],
t=pd.date_range("2042-01-01", periods=10, freq="D"),
),
dims=["t", "latitude", "longitude"],
name="pr",
attrs={"units": "kg m-2 d-1"},
)
ds_res, chunk_it = read_dataset(da, None, "doto")
xr.testing.assert_equal(ds_res.doto, da)
assert chunk_it is False
def test_read_dataset_xr_ds_success(self):
ds = xr.Dataset(
{
"pouet": xr.DataArray(
data=np.full(10, 42).reshape((10, 1, 1)),
coords=dict(
latitude=[42],
longitude=[42],
t=pd.date_range("2042-01-01", periods=10, freq="D"),
),
dims=["t", "latitude", "longitude"],
name="pr",
attrs={"units": "kg m-2 d-1"},
)
}
)
ds_res, chunk_it = read_dataset(ds)
xr.testing.assert_equal(ds_res.pouet, ds.pouet)
assert chunk_it is False
def test_read_dataset_netcdf_success(self):
ds = xr.Dataset(
{
"pouet": xr.DataArray(
data=np.full(10, 42).reshape((10, 1, 1)),
coords=dict(
latitude=[42],
longitude=[42],
t=pd.date_range("2042-01-01", periods=10, freq="D"),
),
dims=["t", "latitude", "longitude"],
name="pr",
attrs={"units": "kg m-2 d-1"},
)
}
)
ds.to_netcdf(self.OUTPUT_NC_FILE)
ds_res, chunk_it = read_dataset(self.
| 118
|
163
| 7
| 6,104
|
cerfacs-globc__icclim
|
7d571ec4d1e1b1fcc1433bb178de2bc0f2f2f0b7
|
icclim/tests/unit_tests/test_input_parsing.py
|
inproject
|
OUTPUT_NC_FILE
| true
|
statement
| 15
| 14
| false
| false
|
[
"OUTPUT_NC_FILE",
"test_read_dataset_xr_ds_success",
"OUTPUT_NC_FILE_2",
"OUTPUT_ZARR_STORE",
"test_read_dataset_netcdf_success",
"cleanup",
"OUTPUT_UNKNOWN_FORMAT",
"test_read_dataset_multi_netcdf_success",
"test_read_dataset_not_implemented_error",
"test_read_dataset_xr_da_ecad_index_error",
"test_read_dataset_xr_da_ecad_index_success",
"test_read_dataset_xr_da_user_index_error",
"test_read_dataset_xr_da_user_index_success",
"test_read_dataset_zarr_store_success",
"for",
"__annotations__",
"__class__",
"__delattr__",
"__dict__",
"__dir__",
"__eq__",
"__format__",
"__getattribute__",
"__hash__",
"__init__",
"__init_subclass__",
"__ne__",
"__new__",
"__reduce__",
"__reduce_ex__",
"__repr__",
"__setattr__",
"__sizeof__",
"__str__",
"__subclasshook__",
"__doc__",
"__module__"
] |
[
{
"name": "cleanup",
"type": "function"
},
{
"name": "OUTPUT_NC_FILE",
"type": "statement"
},
{
"name": "OUTPUT_NC_FILE_2",
"type": "statement"
},
{
"name": "OUTPUT_UNKNOWN_FORMAT",
"type": "statement"
},
{
"name": "OUTPUT_ZARR_STORE",
"type": "statement"
},
{
"name": "test_read_dataset_multi_netcdf_success",
"type": "function"
},
{
"name": "test_read_dataset_netcdf_success",
"type": "function"
},
{
"name": "test_read_dataset_not_implemented_error",
"type": "function"
},
{
"name": "test_read_dataset_xr_da_ecad_index_error",
"type": "function"
},
{
"name": "test_read_dataset_xr_da_ecad_index_success",
"type": "function"
},
{
"name": "test_read_dataset_xr_da_user_index_error",
"type": "function"
},
{
"name": "test_read_dataset_xr_da_user_index_success",
"type": "function"
},
{
"name": "test_read_dataset_xr_ds_success",
"type": "function"
},
{
"name": "test_read_dataset_zarr_store_success",
"type": "function"
},
{
"name": "__annotations__",
"type": "statement"
},
{
"name": "__class__",
"type": "property"
},
{
"name": "__delattr__",
"type": "function"
},
{
"name": "__dict__",
"type": "statement"
},
{
"name": "__dir__",
"type": "function"
},
{
"name": "__doc__",
"type": "statement"
},
{
"name": "__eq__",
"type": "function"
},
{
"name": "__format__",
"type": "function"
},
{
"name": "__getattribute__",
"type": "function"
},
{
"name": "__hash__",
"type": "function"
},
{
"name": "__init__",
"type": "function"
},
{
"name": "__init_subclass__",
"type": "function"
},
{
"name": "__module__",
"type": "statement"
},
{
"name": "__ne__",
"type": "function"
},
{
"name": "__new__",
"type": "function"
},
{
"name": "__reduce__",
"type": "function"
},
{
"name": "__reduce_ex__",
"type": "function"
},
{
"name": "__repr__",
"type": "function"
},
{
"name": "__setattr__",
"type": "function"
},
{
"name": "__sizeof__",
"type": "function"
},
{
"name": "__slots__",
"type": "statement"
},
{
"name": "__str__",
"type": "function"
}
] |
import os
import shutil
import numpy as np
import pandas as pd
import pytest
import xarray as xr
from icclim.icclim_exceptions import InvalidIcclimArgumentError
from icclim.models.ecad_indices import EcadIndex
from icclim.pre_processing.input_parsing import read_dataset, update_to_standard_coords
def test_update_to_standard_coords():
# GIVEN
ds = xr.Dataset(
{
"pouet": xr.DataArray(
data=np.full(10, 42).reshape((10, 1, 1)),
coords=dict(
latitude=[42],
longitude=[42],
t=pd.date_range("2042-01-01", periods=10, freq="D"),
),
dims=["t", "latitude", "longitude"],
name="pr",
attrs={"units": "kg m-2 d-1"},
)
}
)
# WHEN
res, revert = update_to_standard_coords(ds)
# THEN
assert "lat" in res.coords
assert "time" in res.coords
assert "lon" in res.coords
assert res.rename(revert).coords.keys() == ds.coords.keys()
class Test_ReadDataset:
OUTPUT_NC_FILE = "tmp.nc"
OUTPUT_NC_FILE_2 = "tmp-2.nc"
OUTPUT_ZARR_STORE = "tmp.zarr"
OUTPUT_UNKNOWN_FORMAT = "tmp.cacahuete"
@pytest.fixture(autouse=True)
def cleanup(self):
# setup
yield
# teardown
shutil.rmtree(self.OUTPUT_ZARR_STORE, ignore_errors=True)
for f in [
self.OUTPUT_NC_FILE,
self.OUTPUT_NC_FILE_2,
self.OUTPUT_UNKNOWN_FORMAT,
]:
try:
os.remove(f)
except FileNotFoundError:
pass
def test_read_dataset_xr_da_user_index_error(self):
da = xr.DataArray(
data=np.full(10, 42).reshape((10, 1, 1)),
coords=dict(
latitude=[42],
longitude=[42],
t=pd.date_range("2042-01-01", periods=10, freq="D"),
),
dims=["t", "latitude", "longitude"],
name="pr",
attrs={"units": "kg m-2 d-1"},
)
with pytest.raises(InvalidIcclimArgumentError):
read_dataset(da)
def test_read_dataset_xr_da_ecad_index_error(self):
da = xr.DataArray(
data=np.full(10, 42).reshape((10, 1, 1)),
coords=dict(
latitude=[42],
longitude=[42],
t=pd.date_range("2042-01-01", periods=10, freq="D"),
),
dims=["t", "latitude", "longitude"],
name="pr",
attrs={"units": "kg m-2 d-1"},
)
with pytest.raises(InvalidIcclimArgumentError):
read_dataset(da, EcadIndex.WW)
def test_read_dataset_xr_da_ecad_index_success(self):
da = xr.DataArray(
data=np.full(10, 42).reshape((10, 1, 1)),
coords=dict(
latitude=[42],
longitude=[42],
t=pd.date_range("2042-01-01", periods=10, freq="D"),
),
dims=["t", "latitude", "longitude"],
name="pr",
attrs={"units": "kg m-2 d-1"},
)
ds_res, chunk_it = read_dataset(da, EcadIndex.TX90P)
xr.testing.assert_equal(ds_res.tasmax, da)
assert chunk_it is False
def test_read_dataset_xr_da_user_index_success(self):
da = xr.DataArray(
data=np.full(10, 42).reshape((10, 1, 1)),
coords=dict(
latitude=[42],
longitude=[42],
t=pd.date_range("2042-01-01", periods=10, freq="D"),
),
dims=["t", "latitude", "longitude"],
name="pr",
attrs={"units": "kg m-2 d-1"},
)
ds_res, chunk_it = read_dataset(da, None, "doto")
xr.testing.assert_equal(ds_res.doto, da)
assert chunk_it is False
def test_read_dataset_xr_ds_success(self):
ds = xr.Dataset(
{
"pouet": xr.DataArray(
data=np.full(10, 42).reshape((10, 1, 1)),
coords=dict(
latitude=[42],
longitude=[42],
t=pd.date_range("2042-01-01", periods=10, freq="D"),
),
dims=["t", "latitude", "longitude"],
name="pr",
attrs={"units": "kg m-2 d-1"},
)
}
)
ds_res, chunk_it = read_dataset(ds)
xr.testing.assert_equal(ds_res.pouet, ds.pouet)
assert chunk_it is False
def test_read_dataset_netcdf_success(self):
ds = xr.Dataset(
{
"pouet": xr.DataArray(
data=np.full(10, 42).reshape((10, 1, 1)),
coords=dict(
latitude=[42],
longitude=[42],
t=pd.date_range("2042-01-01", periods=10, freq="D"),
),
dims=["t", "latitude", "longitude"],
name="pr",
attrs={"units": "kg m-2 d-1"},
)
}
)
ds.to_netcdf(self.OUTPUT_NC_FILE)
ds_res, chunk_it = read_dataset(self.OUTPUT_NC_FILE)
xr.testing.assert_equal(ds_res.pouet, ds.pouet)
assert chunk_it is True
def test_read_dataset_multi_netcdf_success(self):
ds = xr.Dataset(
{
"pouet": xr.DataArray(
data=np.full(10, 42).reshape((10, 1, 1)),
coords=dict(
latitude=[42],
longitude=[42],
t=pd.date_range("2042-01-01", periods=10, freq="D"),
),
dims=["t", "latitude", "longitude"],
name="pr",
attrs={"units": "kg m-2 d-1"},
)
}
)
ds.to_netcdf(self.OUTPUT_NC_FILE)
ds.rename({"pouet": "patapouet"}).to_netcdf(self.OUTPUT_NC_FILE_2)
# WHEN
ds_res, chunk_it = read_dataset([self.
|
[
"OUTPUT_NC_FILE",
"test_read_dataset_xr_ds_success",
"OUTPUT_NC_FILE_2",
"OUTPUT_ZARR_STORE",
"test_read_dataset_netcdf_success",
"cleanup",
"OUTPUT_UNKNOWN_FORMAT",
"test_read_dataset_multi_netcdf_success",
"test_read_dataset_not_implemented_error",
"test_read_dataset_xr_da_ecad_index_error",
"test_read_dataset_xr_da_ecad_index_success",
"test_read_dataset_xr_da_user_index_error",
"test_read_dataset_xr_da_user_index_success",
"test_read_dataset_zarr_store_success",
"for"
] |
_range("2042-01-01", periods=10, freq="D"),
),
dims=["t", "latitude", "longitude"],
name="pr",
attrs={"units": "kg m-2 d-1"},
)
}
)
ds_res, chunk_it = read_dataset(ds)
xr.testing.assert_equal(ds_res.pouet, ds.pouet)
assert chunk_it is False
def test_read_dataset_netcdf_success(self):
ds = xr.Dataset(
{
"pouet": xr.DataArray(
data=np.full(10, 42).reshape((10, 1, 1)),
coords=dict(
latitude=[42],
longitude=[42],
t=pd.date_range("2042-01-01", periods=10, freq="D"),
),
dims=["t", "latitude", "longitude"],
name="pr",
attrs={"units": "kg m-2 d-1"},
)
}
)
ds.to_netcdf(self.OUTPUT_NC_FILE)
ds_res, chunk_it = read_dataset(self.OUTPUT_NC_FILE)
xr.testing.assert_equal(ds_res.pouet, ds.pouet)
assert chunk_it is True
def test_read_dataset_multi_netcdf_success(self):
ds = xr.Dataset(
{
"pouet": xr.DataArray(
data=np.full(10, 42).reshape((10, 1, 1)),
coords=dict(
latitude=[42],
longitude=[42],
t=pd.date_range("2042-01-01", periods=10, freq="D"),
),
dims=["t", "latitude", "longitude"],
name="pr",
attrs={"units": "kg m-2 d-1"},
)
}
)
ds.to_netcdf(self.OUTPUT_NC_FILE)
ds.rename({"pouet": "patapouet"}).to_netcdf(self.OUTPUT_NC_FILE_2)
# WHEN
ds_res, chunk_it = read_dataset([self.
| 121
|
164
| 7
| 6,125
|
cerfacs-globc__icclim
|
7d571ec4d1e1b1fcc1433bb178de2bc0f2f2f0b7
|
icclim/tests/unit_tests/test_input_parsing.py
|
inproject
|
OUTPUT_NC_FILE_2
| true
|
statement
| 15
| 14
| false
| false
|
[
"OUTPUT_NC_FILE_2",
"OUTPUT_NC_FILE",
"OUTPUT_UNKNOWN_FORMAT",
"OUTPUT_ZARR_STORE",
"test_read_dataset_xr_ds_success",
"cleanup",
"test_read_dataset_multi_netcdf_success",
"test_read_dataset_netcdf_success",
"test_read_dataset_not_implemented_error",
"test_read_dataset_xr_da_ecad_index_error",
"test_read_dataset_xr_da_ecad_index_success",
"test_read_dataset_xr_da_user_index_error",
"test_read_dataset_xr_da_user_index_success",
"test_read_dataset_zarr_store_success",
"for",
"__annotations__",
"__class__",
"__delattr__",
"__dict__",
"__dir__",
"__eq__",
"__format__",
"__getattribute__",
"__hash__",
"__init__",
"__init_subclass__",
"__ne__",
"__new__",
"__reduce__",
"__reduce_ex__",
"__repr__",
"__setattr__",
"__sizeof__",
"__str__",
"__subclasshook__",
"__doc__",
"__module__"
] |
[
{
"name": "cleanup",
"type": "function"
},
{
"name": "OUTPUT_NC_FILE",
"type": "statement"
},
{
"name": "OUTPUT_NC_FILE_2",
"type": "statement"
},
{
"name": "OUTPUT_UNKNOWN_FORMAT",
"type": "statement"
},
{
"name": "OUTPUT_ZARR_STORE",
"type": "statement"
},
{
"name": "test_read_dataset_multi_netcdf_success",
"type": "function"
},
{
"name": "test_read_dataset_netcdf_success",
"type": "function"
},
{
"name": "test_read_dataset_not_implemented_error",
"type": "function"
},
{
"name": "test_read_dataset_xr_da_ecad_index_error",
"type": "function"
},
{
"name": "test_read_dataset_xr_da_ecad_index_success",
"type": "function"
},
{
"name": "test_read_dataset_xr_da_user_index_error",
"type": "function"
},
{
"name": "test_read_dataset_xr_da_user_index_success",
"type": "function"
},
{
"name": "test_read_dataset_xr_ds_success",
"type": "function"
},
{
"name": "test_read_dataset_zarr_store_success",
"type": "function"
},
{
"name": "__annotations__",
"type": "statement"
},
{
"name": "__class__",
"type": "property"
},
{
"name": "__delattr__",
"type": "function"
},
{
"name": "__dict__",
"type": "statement"
},
{
"name": "__dir__",
"type": "function"
},
{
"name": "__doc__",
"type": "statement"
},
{
"name": "__eq__",
"type": "function"
},
{
"name": "__format__",
"type": "function"
},
{
"name": "__getattribute__",
"type": "function"
},
{
"name": "__hash__",
"type": "function"
},
{
"name": "__init__",
"type": "function"
},
{
"name": "__init_subclass__",
"type": "function"
},
{
"name": "__module__",
"type": "statement"
},
{
"name": "__ne__",
"type": "function"
},
{
"name": "__new__",
"type": "function"
},
{
"name": "__reduce__",
"type": "function"
},
{
"name": "__reduce_ex__",
"type": "function"
},
{
"name": "__repr__",
"type": "function"
},
{
"name": "__setattr__",
"type": "function"
},
{
"name": "__sizeof__",
"type": "function"
},
{
"name": "__slots__",
"type": "statement"
},
{
"name": "__str__",
"type": "function"
}
] |
import os
import shutil
import numpy as np
import pandas as pd
import pytest
import xarray as xr
from icclim.icclim_exceptions import InvalidIcclimArgumentError
from icclim.models.ecad_indices import EcadIndex
from icclim.pre_processing.input_parsing import read_dataset, update_to_standard_coords
def test_update_to_standard_coords():
# GIVEN
ds = xr.Dataset(
{
"pouet": xr.DataArray(
data=np.full(10, 42).reshape((10, 1, 1)),
coords=dict(
latitude=[42],
longitude=[42],
t=pd.date_range("2042-01-01", periods=10, freq="D"),
),
dims=["t", "latitude", "longitude"],
name="pr",
attrs={"units": "kg m-2 d-1"},
)
}
)
# WHEN
res, revert = update_to_standard_coords(ds)
# THEN
assert "lat" in res.coords
assert "time" in res.coords
assert "lon" in res.coords
assert res.rename(revert).coords.keys() == ds.coords.keys()
class Test_ReadDataset:
OUTPUT_NC_FILE = "tmp.nc"
OUTPUT_NC_FILE_2 = "tmp-2.nc"
OUTPUT_ZARR_STORE = "tmp.zarr"
OUTPUT_UNKNOWN_FORMAT = "tmp.cacahuete"
@pytest.fixture(autouse=True)
def cleanup(self):
# setup
yield
# teardown
shutil.rmtree(self.OUTPUT_ZARR_STORE, ignore_errors=True)
for f in [
self.OUTPUT_NC_FILE,
self.OUTPUT_NC_FILE_2,
self.OUTPUT_UNKNOWN_FORMAT,
]:
try:
os.remove(f)
except FileNotFoundError:
pass
def test_read_dataset_xr_da_user_index_error(self):
da = xr.DataArray(
data=np.full(10, 42).reshape((10, 1, 1)),
coords=dict(
latitude=[42],
longitude=[42],
t=pd.date_range("2042-01-01", periods=10, freq="D"),
),
dims=["t", "latitude", "longitude"],
name="pr",
attrs={"units": "kg m-2 d-1"},
)
with pytest.raises(InvalidIcclimArgumentError):
read_dataset(da)
def test_read_dataset_xr_da_ecad_index_error(self):
da = xr.DataArray(
data=np.full(10, 42).reshape((10, 1, 1)),
coords=dict(
latitude=[42],
longitude=[42],
t=pd.date_range("2042-01-01", periods=10, freq="D"),
),
dims=["t", "latitude", "longitude"],
name="pr",
attrs={"units": "kg m-2 d-1"},
)
with pytest.raises(InvalidIcclimArgumentError):
read_dataset(da, EcadIndex.WW)
def test_read_dataset_xr_da_ecad_index_success(self):
da = xr.DataArray(
data=np.full(10, 42).reshape((10, 1, 1)),
coords=dict(
latitude=[42],
longitude=[42],
t=pd.date_range("2042-01-01", periods=10, freq="D"),
),
dims=["t", "latitude", "longitude"],
name="pr",
attrs={"units": "kg m-2 d-1"},
)
ds_res, chunk_it = read_dataset(da, EcadIndex.TX90P)
xr.testing.assert_equal(ds_res.tasmax, da)
assert chunk_it is False
def test_read_dataset_xr_da_user_index_success(self):
da = xr.DataArray(
data=np.full(10, 42).reshape((10, 1, 1)),
coords=dict(
latitude=[42],
longitude=[42],
t=pd.date_range("2042-01-01", periods=10, freq="D"),
),
dims=["t", "latitude", "longitude"],
name="pr",
attrs={"units": "kg m-2 d-1"},
)
ds_res, chunk_it = read_dataset(da, None, "doto")
xr.testing.assert_equal(ds_res.doto, da)
assert chunk_it is False
def test_read_dataset_xr_ds_success(self):
ds = xr.Dataset(
{
"pouet": xr.DataArray(
data=np.full(10, 42).reshape((10, 1, 1)),
coords=dict(
latitude=[42],
longitude=[42],
t=pd.date_range("2042-01-01", periods=10, freq="D"),
),
dims=["t", "latitude", "longitude"],
name="pr",
attrs={"units": "kg m-2 d-1"},
)
}
)
ds_res, chunk_it = read_dataset(ds)
xr.testing.assert_equal(ds_res.pouet, ds.pouet)
assert chunk_it is False
def test_read_dataset_netcdf_success(self):
ds = xr.Dataset(
{
"pouet": xr.DataArray(
data=np.full(10, 42).reshape((10, 1, 1)),
coords=dict(
latitude=[42],
longitude=[42],
t=pd.date_range("2042-01-01", periods=10, freq="D"),
),
dims=["t", "latitude", "longitude"],
name="pr",
attrs={"units": "kg m-2 d-1"},
)
}
)
ds.to_netcdf(self.OUTPUT_NC_FILE)
ds_res, chunk_it = read_dataset(self.OUTPUT_NC_FILE)
xr.testing.assert_equal(ds_res.pouet, ds.pouet)
assert chunk_it is True
def test_read_dataset_multi_netcdf_success(self):
ds = xr.Dataset(
{
"pouet": xr.DataArray(
data=np.full(10, 42).reshape((10, 1, 1)),
coords=dict(
latitude=[42],
longitude=[42],
t=pd.date_range("2042-01-01", periods=10, freq="D"),
),
dims=["t", "latitude", "longitude"],
name="pr",
attrs={"units": "kg m-2 d-1"},
)
}
)
ds.to_netcdf(self.OUTPUT_NC_FILE)
ds.rename({"pouet": "patapouet"}).to_netcdf(self.OUTPUT_NC_FILE_2)
# WHEN
ds_res, chunk_it = read_dataset([self.OUTPUT_NC_FILE, self.
|
[
"OUTPUT_NC_FILE_2",
"OUTPUT_NC_FILE",
"OUTPUT_UNKNOWN_FORMAT",
"OUTPUT_ZARR_STORE",
"test_read_dataset_xr_ds_success",
"cleanup",
"test_read_dataset_multi_netcdf_success",
"test_read_dataset_netcdf_success",
"test_read_dataset_not_implemented_error",
"test_read_dataset_xr_da_ecad_index_error",
"test_read_dataset_xr_da_ecad_index_success",
"test_read_dataset_xr_da_user_index_error",
"test_read_dataset_xr_da_user_index_success",
"test_read_dataset_zarr_store_success",
"for"
] |
periods=10, freq="D"),
),
dims=["t", "latitude", "longitude"],
name="pr",
attrs={"units": "kg m-2 d-1"},
)
}
)
ds_res, chunk_it = read_dataset(ds)
xr.testing.assert_equal(ds_res.pouet, ds.pouet)
assert chunk_it is False
def test_read_dataset_netcdf_success(self):
ds = xr.Dataset(
{
"pouet": xr.DataArray(
data=np.full(10, 42).reshape((10, 1, 1)),
coords=dict(
latitude=[42],
longitude=[42],
t=pd.date_range("2042-01-01", periods=10, freq="D"),
),
dims=["t", "latitude", "longitude"],
name="pr",
attrs={"units": "kg m-2 d-1"},
)
}
)
ds.to_netcdf(self.OUTPUT_NC_FILE)
ds_res, chunk_it = read_dataset(self.OUTPUT_NC_FILE)
xr.testing.assert_equal(ds_res.pouet, ds.pouet)
assert chunk_it is True
def test_read_dataset_multi_netcdf_success(self):
ds = xr.Dataset(
{
"pouet": xr.DataArray(
data=np.full(10, 42).reshape((10, 1, 1)),
coords=dict(
latitude=[42],
longitude=[42],
t=pd.date_range("2042-01-01", periods=10, freq="D"),
),
dims=["t", "latitude", "longitude"],
name="pr",
attrs={"units": "kg m-2 d-1"},
)
}
)
ds.to_netcdf(self.OUTPUT_NC_FILE)
ds.rename({"pouet": "patapouet"}).to_netcdf(self.OUTPUT_NC_FILE_2)
# WHEN
ds_res, chunk_it = read_dataset([self.OUTPUT_NC_FILE, self.
| 122
|
166
| 7
| 6,996
|
cerfacs-globc__icclim
|
7d571ec4d1e1b1fcc1433bb178de2bc0f2f2f0b7
|
icclim/tests/unit_tests/test_input_parsing.py
|
inproject
|
OUTPUT_ZARR_STORE
| true
|
statement
| 14
| 14
| false
| false
|
[
"OUTPUT_ZARR_STORE",
"OUTPUT_NC_FILE",
"OUTPUT_NC_FILE_2",
"test_read_dataset_xr_ds_success",
"OUTPUT_UNKNOWN_FORMAT",
"cleanup",
"test_read_dataset_multi_netcdf_success",
"test_read_dataset_netcdf_success",
"test_read_dataset_not_implemented_error",
"test_read_dataset_xr_da_ecad_index_error",
"test_read_dataset_xr_da_ecad_index_success",
"test_read_dataset_xr_da_user_index_error",
"test_read_dataset_xr_da_user_index_success",
"test_read_dataset_zarr_store_success",
"__annotations__",
"__class__",
"__delattr__",
"__dict__",
"__dir__",
"__eq__",
"__format__",
"__getattribute__",
"__hash__",
"__init__",
"__init_subclass__",
"__ne__",
"__new__",
"__reduce__",
"__reduce_ex__",
"__repr__",
"__setattr__",
"__sizeof__",
"__str__",
"__subclasshook__",
"__doc__",
"__module__"
] |
[
{
"name": "cleanup",
"type": "function"
},
{
"name": "OUTPUT_NC_FILE",
"type": "statement"
},
{
"name": "OUTPUT_NC_FILE_2",
"type": "statement"
},
{
"name": "OUTPUT_UNKNOWN_FORMAT",
"type": "statement"
},
{
"name": "OUTPUT_ZARR_STORE",
"type": "statement"
},
{
"name": "test_read_dataset_multi_netcdf_success",
"type": "function"
},
{
"name": "test_read_dataset_netcdf_success",
"type": "function"
},
{
"name": "test_read_dataset_not_implemented_error",
"type": "function"
},
{
"name": "test_read_dataset_xr_da_ecad_index_error",
"type": "function"
},
{
"name": "test_read_dataset_xr_da_ecad_index_success",
"type": "function"
},
{
"name": "test_read_dataset_xr_da_user_index_error",
"type": "function"
},
{
"name": "test_read_dataset_xr_da_user_index_success",
"type": "function"
},
{
"name": "test_read_dataset_xr_ds_success",
"type": "function"
},
{
"name": "test_read_dataset_zarr_store_success",
"type": "function"
},
{
"name": "__annotations__",
"type": "statement"
},
{
"name": "__class__",
"type": "property"
},
{
"name": "__delattr__",
"type": "function"
},
{
"name": "__dict__",
"type": "statement"
},
{
"name": "__dir__",
"type": "function"
},
{
"name": "__doc__",
"type": "statement"
},
{
"name": "__eq__",
"type": "function"
},
{
"name": "__format__",
"type": "function"
},
{
"name": "__getattribute__",
"type": "function"
},
{
"name": "__hash__",
"type": "function"
},
{
"name": "__init__",
"type": "function"
},
{
"name": "__init_subclass__",
"type": "function"
},
{
"name": "__module__",
"type": "statement"
},
{
"name": "__ne__",
"type": "function"
},
{
"name": "__new__",
"type": "function"
},
{
"name": "__reduce__",
"type": "function"
},
{
"name": "__reduce_ex__",
"type": "function"
},
{
"name": "__repr__",
"type": "function"
},
{
"name": "__setattr__",
"type": "function"
},
{
"name": "__sizeof__",
"type": "function"
},
{
"name": "__slots__",
"type": "statement"
},
{
"name": "__str__",
"type": "function"
}
] |
import os
import shutil
import numpy as np
import pandas as pd
import pytest
import xarray as xr
from icclim.icclim_exceptions import InvalidIcclimArgumentError
from icclim.models.ecad_indices import EcadIndex
from icclim.pre_processing.input_parsing import read_dataset, update_to_standard_coords
def test_update_to_standard_coords():
# GIVEN
ds = xr.Dataset(
{
"pouet": xr.DataArray(
data=np.full(10, 42).reshape((10, 1, 1)),
coords=dict(
latitude=[42],
longitude=[42],
t=pd.date_range("2042-01-01", periods=10, freq="D"),
),
dims=["t", "latitude", "longitude"],
name="pr",
attrs={"units": "kg m-2 d-1"},
)
}
)
# WHEN
res, revert = update_to_standard_coords(ds)
# THEN
assert "lat" in res.coords
assert "time" in res.coords
assert "lon" in res.coords
assert res.rename(revert).coords.keys() == ds.coords.keys()
class Test_ReadDataset:
OUTPUT_NC_FILE = "tmp.nc"
OUTPUT_NC_FILE_2 = "tmp-2.nc"
OUTPUT_ZARR_STORE = "tmp.zarr"
OUTPUT_UNKNOWN_FORMAT = "tmp.cacahuete"
@pytest.fixture(autouse=True)
def cleanup(self):
# setup
yield
# teardown
shutil.rmtree(self.OUTPUT_ZARR_STORE, ignore_errors=True)
for f in [
self.OUTPUT_NC_FILE,
self.OUTPUT_NC_FILE_2,
self.OUTPUT_UNKNOWN_FORMAT,
]:
try:
os.remove(f)
except FileNotFoundError:
pass
def test_read_dataset_xr_da_user_index_error(self):
da = xr.DataArray(
data=np.full(10, 42).reshape((10, 1, 1)),
coords=dict(
latitude=[42],
longitude=[42],
t=pd.date_range("2042-01-01", periods=10, freq="D"),
),
dims=["t", "latitude", "longitude"],
name="pr",
attrs={"units": "kg m-2 d-1"},
)
with pytest.raises(InvalidIcclimArgumentError):
read_dataset(da)
def test_read_dataset_xr_da_ecad_index_error(self):
da = xr.DataArray(
data=np.full(10, 42).reshape((10, 1, 1)),
coords=dict(
latitude=[42],
longitude=[42],
t=pd.date_range("2042-01-01", periods=10, freq="D"),
),
dims=["t", "latitude", "longitude"],
name="pr",
attrs={"units": "kg m-2 d-1"},
)
with pytest.raises(InvalidIcclimArgumentError):
read_dataset(da, EcadIndex.WW)
def test_read_dataset_xr_da_ecad_index_success(self):
da = xr.DataArray(
data=np.full(10, 42).reshape((10, 1, 1)),
coords=dict(
latitude=[42],
longitude=[42],
t=pd.date_range("2042-01-01", periods=10, freq="D"),
),
dims=["t", "latitude", "longitude"],
name="pr",
attrs={"units": "kg m-2 d-1"},
)
ds_res, chunk_it = read_dataset(da, EcadIndex.TX90P)
xr.testing.assert_equal(ds_res.tasmax, da)
assert chunk_it is False
def test_read_dataset_xr_da_user_index_success(self):
da = xr.DataArray(
data=np.full(10, 42).reshape((10, 1, 1)),
coords=dict(
latitude=[42],
longitude=[42],
t=pd.date_range("2042-01-01", periods=10, freq="D"),
),
dims=["t", "latitude", "longitude"],
name="pr",
attrs={"units": "kg m-2 d-1"},
)
ds_res, chunk_it = read_dataset(da, None, "doto")
xr.testing.assert_equal(ds_res.doto, da)
assert chunk_it is False
def test_read_dataset_xr_ds_success(self):
ds = xr.Dataset(
{
"pouet": xr.DataArray(
data=np.full(10, 42).reshape((10, 1, 1)),
coords=dict(
latitude=[42],
longitude=[42],
t=pd.date_range("2042-01-01", periods=10, freq="D"),
),
dims=["t", "latitude", "longitude"],
name="pr",
attrs={"units": "kg m-2 d-1"},
)
}
)
ds_res, chunk_it = read_dataset(ds)
xr.testing.assert_equal(ds_res.pouet, ds.pouet)
assert chunk_it is False
def test_read_dataset_netcdf_success(self):
ds = xr.Dataset(
{
"pouet": xr.DataArray(
data=np.full(10, 42).reshape((10, 1, 1)),
coords=dict(
latitude=[42],
longitude=[42],
t=pd.date_range("2042-01-01", periods=10, freq="D"),
),
dims=["t", "latitude", "longitude"],
name="pr",
attrs={"units": "kg m-2 d-1"},
)
}
)
ds.to_netcdf(self.OUTPUT_NC_FILE)
ds_res, chunk_it = read_dataset(self.OUTPUT_NC_FILE)
xr.testing.assert_equal(ds_res.pouet, ds.pouet)
assert chunk_it is True
def test_read_dataset_multi_netcdf_success(self):
ds = xr.Dataset(
{
"pouet": xr.DataArray(
data=np.full(10, 42).reshape((10, 1, 1)),
coords=dict(
latitude=[42],
longitude=[42],
t=pd.date_range("2042-01-01", periods=10, freq="D"),
),
dims=["t", "latitude", "longitude"],
name="pr",
attrs={"units": "kg m-2 d-1"},
)
}
)
ds.to_netcdf(self.OUTPUT_NC_FILE)
ds.rename({"pouet": "patapouet"}).to_netcdf(self.OUTPUT_NC_FILE_2)
# WHEN
ds_res, chunk_it = read_dataset([self.OUTPUT_NC_FILE, self.OUTPUT_NC_FILE_2])
# THEN
xr.testing.assert_equal(ds_res.pouet, ds.pouet)
xr.testing.assert_equal(ds_res.patapouet, ds.pouet)
assert chunk_it is True
def test_read_dataset_zarr_store_success(self):
ds = xr.Dataset(
{
"pouet": xr.DataArray(
data=np.full(10, 42).reshape((10, 1, 1)),
coords=dict(
latitude=[42],
longitude=[42],
t=pd.date_range("2042-01-01", periods=10, freq="D"),
),
dims=["t", "latitude", "longitude"],
name="pr",
attrs={"units": "kg m-2 d-1"},
)
}
)
ds.to_zarr(self.OUTPUT_ZARR_STORE)
# WHEN
ds_res, chunk_it = read_dataset(self.
|
[
"OUTPUT_ZARR_STORE",
"OUTPUT_NC_FILE",
"OUTPUT_NC_FILE_2",
"test_read_dataset_xr_ds_success",
"OUTPUT_UNKNOWN_FORMAT",
"cleanup",
"test_read_dataset_multi_netcdf_success",
"test_read_dataset_netcdf_success",
"test_read_dataset_not_implemented_error",
"test_read_dataset_xr_da_ecad_index_error",
"test_read_dataset_xr_da_ecad_index_success",
"test_read_dataset_xr_da_user_index_error",
"test_read_dataset_xr_da_user_index_success",
"test_read_dataset_zarr_store_success"
] |
trs={"units": "kg m-2 d-1"},
)
}
)
ds.to_netcdf(self.OUTPUT_NC_FILE)
ds_res, chunk_it = read_dataset(self.OUTPUT_NC_FILE)
xr.testing.assert_equal(ds_res.pouet, ds.pouet)
assert chunk_it is True
def test_read_dataset_multi_netcdf_success(self):
ds = xr.Dataset(
{
"pouet": xr.DataArray(
data=np.full(10, 42).reshape((10, 1, 1)),
coords=dict(
latitude=[42],
longitude=[42],
t=pd.date_range("2042-01-01", periods=10, freq="D"),
),
dims=["t", "latitude", "longitude"],
name="pr",
attrs={"units": "kg m-2 d-1"},
)
}
)
ds.to_netcdf(self.OUTPUT_NC_FILE)
ds.rename({"pouet": "patapouet"}).to_netcdf(self.OUTPUT_NC_FILE_2)
# WHEN
ds_res, chunk_it = read_dataset([self.OUTPUT_NC_FILE, self.OUTPUT_NC_FILE_2])
# THEN
xr.testing.assert_equal(ds_res.pouet, ds.pouet)
xr.testing.assert_equal(ds_res.patapouet, ds.pouet)
assert chunk_it is True
def test_read_dataset_zarr_store_success(self):
ds = xr.Dataset(
{
"pouet": xr.DataArray(
data=np.full(10, 42).reshape((10, 1, 1)),
coords=dict(
latitude=[42],
longitude=[42],
t=pd.date_range("2042-01-01", periods=10, freq="D"),
),
dims=["t", "latitude", "longitude"],
name="pr",
attrs={"units": "kg m-2 d-1"},
)
}
)
ds.to_zarr(self.OUTPUT_ZARR_STORE)
# WHEN
ds_res, chunk_it = read_dataset(self.
| 124
|
181
| 12
| 3,068
|
googlecloudplatform__dataproc-templates
|
4d63cf08165dbee145ce8b26d7aa0b11ff4c5a8f
|
python/dataproc_templates/jdbc/jdbc_to_jdbc.py
|
random
|
JDBCTOJDBC_OUTPUT_URL
| true
|
statement
| 75
| 75
| false
| true
|
[
"JDBC_URL",
"JDBC_TABLE",
"FORMAT_JDBC",
"JDBC_DRIVER",
"JDBC_NUMPARTITIONS",
"BQ_GCS_INPUT_TABLE",
"BQ_GCS_OUTPUT_FORMAT",
"BQ_GCS_OUTPUT_LOCATION",
"BQ_GCS_OUTPUT_MODE",
"CSV_HEADER",
"CSV_INFER_SCHEMA",
"FORMAT_AVRO",
"FORMAT_AVRO_EXTD",
"FORMAT_BIGQUERY",
"FORMAT_CSV",
"FORMAT_HBASE",
"FORMAT_JSON",
"FORMAT_PRQT",
"GCS_BQ_INPUT_FORMAT",
"GCS_BQ_INPUT_LOCATION",
"GCS_BQ_LD_TEMP_BUCKET_NAME",
"GCS_BQ_OUTPUT_DATASET",
"GCS_BQ_OUTPUT_MODE",
"GCS_BQ_OUTPUT_TABLE",
"GCS_BQ_TEMP_BUCKET",
"GCS_BT_HBASE_CATALOG_JSON",
"GCS_BT_INPUT_FORMAT",
"GCS_BT_INPUT_LOCATION",
"GCS_JDBC_BATCH_SIZE",
"GCS_JDBC_INPUT_FORMAT",
"GCS_JDBC_INPUT_LOCATION",
"GCS_JDBC_OUTPUT_DRIVER",
"GCS_JDBC_OUTPUT_MODE",
"GCS_JDBC_OUTPUT_TABLE",
"GCS_JDBC_OUTPUT_URL",
"HBASE_GCS_CATALOG_JSON",
"HBASE_GCS_OUTPUT_FORMAT",
"HBASE_GCS_OUTPUT_LOCATION",
"HBASE_GCS_OUTPUT_MODE",
"HIVE_BQ_INPUT_DATABASE",
"HIVE_BQ_INPUT_TABLE",
"HIVE_BQ_LD_TEMP_BUCKET_NAME",
"HIVE_BQ_OUTPUT_DATASET",
"HIVE_BQ_OUTPUT_MODE",
"HIVE_BQ_OUTPUT_TABLE",
"HIVE_GCS_INPUT_DATABASE",
"HIVE_GCS_INPUT_TABLE",
"HIVE_GCS_OUTPUT_FORMAT",
"HIVE_GCS_OUTPUT_LOCATION",
"HIVE_GCS_OUTPUT_MODE",
"JDBC_BATCH_SIZE",
"JDBC_CREATE_TABLE_OPTIONS",
"JDBC_LOWERBOUND",
"JDBC_PARTITIONCOLUMN",
"JDBC_UPPERBOUND",
"JDBCTOJDBC_INPUT_DRIVER",
"JDBCTOJDBC_INPUT_LOWERBOUND",
"JDBCTOJDBC_INPUT_PARTITIONCOLUMN",
"JDBCTOJDBC_INPUT_TABLE",
"JDBCTOJDBC_INPUT_UPPERBOUND",
"JDBCTOJDBC_INPUT_URL",
"JDBCTOJDBC_NUMPARTITIONS",
"JDBCTOJDBC_OUTPUT_BATCH_SIZE",
"JDBCTOJDBC_OUTPUT_CREATE_TABLE_OPTION",
"JDBCTOJDBC_OUTPUT_DRIVER",
"JDBCTOJDBC_OUTPUT_MODE",
"JDBCTOJDBC_OUTPUT_TABLE",
"JDBCTOJDBC_OUTPUT_URL",
"OUTPUT_MODE_APPEND",
"OUTPUT_MODE_ERRORIFEXISTS",
"OUTPUT_MODE_IGNORE",
"OUTPUT_MODE_OVERWRITE",
"PROJECT_ID_PROP",
"TABLE",
"TEMP_GCS_BUCKET"
] |
[
{
"name": "BQ_GCS_INPUT_TABLE",
"type": "statement"
},
{
"name": "BQ_GCS_OUTPUT_FORMAT",
"type": "statement"
},
{
"name": "BQ_GCS_OUTPUT_LOCATION",
"type": "statement"
},
{
"name": "BQ_GCS_OUTPUT_MODE",
"type": "statement"
},
{
"name": "CSV_HEADER",
"type": "statement"
},
{
"name": "CSV_INFER_SCHEMA",
"type": "statement"
},
{
"name": "FORMAT_AVRO",
"type": "statement"
},
{
"name": "FORMAT_AVRO_EXTD",
"type": "statement"
},
{
"name": "FORMAT_BIGQUERY",
"type": "statement"
},
{
"name": "FORMAT_CSV",
"type": "statement"
},
{
"name": "FORMAT_HBASE",
"type": "statement"
},
{
"name": "FORMAT_JDBC",
"type": "statement"
},
{
"name": "FORMAT_JSON",
"type": "statement"
},
{
"name": "FORMAT_PRQT",
"type": "statement"
},
{
"name": "GCS_BQ_INPUT_FORMAT",
"type": "statement"
},
{
"name": "GCS_BQ_INPUT_LOCATION",
"type": "statement"
},
{
"name": "GCS_BQ_LD_TEMP_BUCKET_NAME",
"type": "statement"
},
{
"name": "GCS_BQ_OUTPUT_DATASET",
"type": "statement"
},
{
"name": "GCS_BQ_OUTPUT_MODE",
"type": "statement"
},
{
"name": "GCS_BQ_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "GCS_BQ_TEMP_BUCKET",
"type": "statement"
},
{
"name": "GCS_BT_HBASE_CATALOG_JSON",
"type": "statement"
},
{
"name": "GCS_BT_INPUT_FORMAT",
"type": "statement"
},
{
"name": "GCS_BT_INPUT_LOCATION",
"type": "statement"
},
{
"name": "GCS_JDBC_BATCH_SIZE",
"type": "statement"
},
{
"name": "GCS_JDBC_INPUT_FORMAT",
"type": "statement"
},
{
"name": "GCS_JDBC_INPUT_LOCATION",
"type": "statement"
},
{
"name": "GCS_JDBC_OUTPUT_DRIVER",
"type": "statement"
},
{
"name": "GCS_JDBC_OUTPUT_MODE",
"type": "statement"
},
{
"name": "GCS_JDBC_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "GCS_JDBC_OUTPUT_URL",
"type": "statement"
},
{
"name": "HBASE_GCS_CATALOG_JSON",
"type": "statement"
},
{
"name": "HBASE_GCS_OUTPUT_FORMAT",
"type": "statement"
},
{
"name": "HBASE_GCS_OUTPUT_LOCATION",
"type": "statement"
},
{
"name": "HBASE_GCS_OUTPUT_MODE",
"type": "statement"
},
{
"name": "HIVE_BQ_INPUT_DATABASE",
"type": "statement"
},
{
"name": "HIVE_BQ_INPUT_TABLE",
"type": "statement"
},
{
"name": "HIVE_BQ_LD_TEMP_BUCKET_NAME",
"type": "statement"
},
{
"name": "HIVE_BQ_OUTPUT_DATASET",
"type": "statement"
},
{
"name": "HIVE_BQ_OUTPUT_MODE",
"type": "statement"
},
{
"name": "HIVE_BQ_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "HIVE_GCS_INPUT_DATABASE",
"type": "statement"
},
{
"name": "HIVE_GCS_INPUT_TABLE",
"type": "statement"
},
{
"name": "HIVE_GCS_OUTPUT_FORMAT",
"type": "statement"
},
{
"name": "HIVE_GCS_OUTPUT_LOCATION",
"type": "statement"
},
{
"name": "HIVE_GCS_OUTPUT_MODE",
"type": "statement"
},
{
"name": "JDBC_BATCH_SIZE",
"type": "statement"
},
{
"name": "JDBC_CREATE_TABLE_OPTIONS",
"type": "statement"
},
{
"name": "JDBC_DRIVER",
"type": "statement"
},
{
"name": "JDBC_LOWERBOUND",
"type": "statement"
},
{
"name": "JDBC_NUMPARTITIONS",
"type": "statement"
},
{
"name": "JDBC_PARTITIONCOLUMN",
"type": "statement"
},
{
"name": "JDBC_TABLE",
"type": "statement"
},
{
"name": "JDBC_UPPERBOUND",
"type": "statement"
},
{
"name": "JDBC_URL",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_DRIVER",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_LOWERBOUND",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_PARTITIONCOLUMN",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_TABLE",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_UPPERBOUND",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_URL",
"type": "statement"
},
{
"name": "JDBCTOJDBC_NUMPARTITIONS",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_BATCH_SIZE",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_CREATE_TABLE_OPTION",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_DRIVER",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_MODE",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_URL",
"type": "statement"
},
{
"name": "OUTPUT_MODE_APPEND",
"type": "statement"
},
{
"name": "OUTPUT_MODE_ERRORIFEXISTS",
"type": "statement"
},
{
"name": "OUTPUT_MODE_IGNORE",
"type": "statement"
},
{
"name": "OUTPUT_MODE_OVERWRITE",
"type": "statement"
},
{
"name": "PROJECT_ID_PROP",
"type": "statement"
},
{
"name": "TABLE",
"type": "statement"
},
{
"name": "TEMP_GCS_BUCKET",
"type": "statement"
},
{
"name": "__doc__",
"type": "instance"
},
{
"name": "__file__",
"type": "instance"
},
{
"name": "__name__",
"type": "instance"
},
{
"name": "__package__",
"type": "instance"
}
] |
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Dict, Sequence, Optional, Any
from logging import Logger
import argparse
import pprint
from pyspark.sql import SparkSession, DataFrame
from dataproc_templates import BaseTemplate
import dataproc_templates.util.template_constants as constants
__all__ = ['JDBCToJDBCTemplate']
class JDBCToJDBCTemplate(BaseTemplate):
"""
Dataproc template implementing loads from JDBC into JDBC
"""
@staticmethod
def parse_args(args: Optional[Sequence[str]] = None) -> Dict[str, Any]:
parser: argparse.ArgumentParser = argparse.ArgumentParser()
parser.add_argument(
f'--{constants.JDBCTOJDBC_INPUT_URL}',
dest=constants.JDBCTOJDBC_INPUT_URL,
required=True,
help='JDBC input URL'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_INPUT_DRIVER}',
dest=constants.JDBCTOJDBC_INPUT_DRIVER,
required=True,
help='JDBC input driver name'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_INPUT_TABLE}',
dest=constants.JDBCTOJDBC_INPUT_TABLE,
required=True,
help='JDBC input table name'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_INPUT_PARTITIONCOLUMN}',
dest=constants.JDBCTOJDBC_INPUT_PARTITIONCOLUMN,
required=False,
default="",
help='JDBC input table partition column name'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_INPUT_LOWERBOUND}',
dest=constants.JDBCTOJDBC_INPUT_LOWERBOUND,
required=False,
default="",
help='JDBC input table partition column lower bound which is used to decide the partition stride'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_INPUT_UPPERBOUND}',
dest=constants.JDBCTOJDBC_INPUT_UPPERBOUND,
required=False,
default="",
help='JDBC input table partition column upper bound which is used to decide the partition stride'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_NUMPARTITIONS}',
dest=constants.JDBCTOJDBC_NUMPARTITIONS,
required=False,
default=10,
help='The maximum number of partitions that can be used for parallelism in table reading and writing. Default set to 10'
)
parser.add_argument(
f'--{constants.
|
[
"JDBC_URL",
"JDBC_TABLE",
"FORMAT_JDBC",
"JDBC_DRIVER",
"JDBC_NUMPARTITIONS",
"BQ_GCS_INPUT_TABLE",
"BQ_GCS_OUTPUT_FORMAT",
"BQ_GCS_OUTPUT_LOCATION",
"BQ_GCS_OUTPUT_MODE",
"CSV_HEADER",
"CSV_INFER_SCHEMA",
"FORMAT_AVRO",
"FORMAT_AVRO_EXTD",
"FORMAT_BIGQUERY",
"FORMAT_CSV",
"FORMAT_HBASE",
"FORMAT_JSON",
"FORMAT_PRQT",
"GCS_BQ_INPUT_FORMAT",
"GCS_BQ_INPUT_LOCATION",
"GCS_BQ_LD_TEMP_BUCKET_NAME",
"GCS_BQ_OUTPUT_DATASET",
"GCS_BQ_OUTPUT_MODE",
"GCS_BQ_OUTPUT_TABLE",
"GCS_BQ_TEMP_BUCKET",
"GCS_BT_HBASE_CATALOG_JSON",
"GCS_BT_INPUT_FORMAT",
"GCS_BT_INPUT_LOCATION",
"GCS_JDBC_BATCH_SIZE",
"GCS_JDBC_INPUT_FORMAT",
"GCS_JDBC_INPUT_LOCATION",
"GCS_JDBC_OUTPUT_DRIVER",
"GCS_JDBC_OUTPUT_MODE",
"GCS_JDBC_OUTPUT_TABLE",
"GCS_JDBC_OUTPUT_URL",
"HBASE_GCS_CATALOG_JSON",
"HBASE_GCS_OUTPUT_FORMAT",
"HBASE_GCS_OUTPUT_LOCATION",
"HBASE_GCS_OUTPUT_MODE",
"HIVE_BQ_INPUT_DATABASE",
"HIVE_BQ_INPUT_TABLE",
"HIVE_BQ_LD_TEMP_BUCKET_NAME",
"HIVE_BQ_OUTPUT_DATASET",
"HIVE_BQ_OUTPUT_MODE",
"HIVE_BQ_OUTPUT_TABLE",
"HIVE_GCS_INPUT_DATABASE",
"HIVE_GCS_INPUT_TABLE",
"HIVE_GCS_OUTPUT_FORMAT",
"HIVE_GCS_OUTPUT_LOCATION",
"HIVE_GCS_OUTPUT_MODE",
"JDBC_BATCH_SIZE",
"JDBC_CREATE_TABLE_OPTIONS",
"JDBC_LOWERBOUND",
"JDBC_PARTITIONCOLUMN",
"JDBC_UPPERBOUND",
"JDBCTOJDBC_INPUT_DRIVER",
"JDBCTOJDBC_INPUT_LOWERBOUND",
"JDBCTOJDBC_INPUT_PARTITIONCOLUMN",
"JDBCTOJDBC_INPUT_TABLE",
"JDBCTOJDBC_INPUT_UPPERBOUND",
"JDBCTOJDBC_INPUT_URL",
"JDBCTOJDBC_NUMPARTITIONS",
"JDBCTOJDBC_OUTPUT_BATCH_SIZE",
"JDBCTOJDBC_OUTPUT_CREATE_TABLE_OPTION",
"JDBCTOJDBC_OUTPUT_DRIVER",
"JDBCTOJDBC_OUTPUT_MODE",
"JDBCTOJDBC_OUTPUT_TABLE",
"JDBCTOJDBC_OUTPUT_URL",
"OUTPUT_MODE_APPEND",
"OUTPUT_MODE_ERRORIFEXISTS",
"OUTPUT_MODE_IGNORE",
"OUTPUT_MODE_OVERWRITE",
"PROJECT_ID_PROP",
"TABLE",
"TEMP_GCS_BUCKET"
] |
()
parser.add_argument(
f'--{constants.JDBCTOJDBC_INPUT_URL}',
dest=constants.JDBCTOJDBC_INPUT_URL,
required=True,
help='JDBC input URL'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_INPUT_DRIVER}',
dest=constants.JDBCTOJDBC_INPUT_DRIVER,
required=True,
help='JDBC input driver name'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_INPUT_TABLE}',
dest=constants.JDBCTOJDBC_INPUT_TABLE,
required=True,
help='JDBC input table name'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_INPUT_PARTITIONCOLUMN}',
dest=constants.JDBCTOJDBC_INPUT_PARTITIONCOLUMN,
required=False,
default="",
help='JDBC input table partition column name'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_INPUT_LOWERBOUND}',
dest=constants.JDBCTOJDBC_INPUT_LOWERBOUND,
required=False,
default="",
help='JDBC input table partition column lower bound which is used to decide the partition stride'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_INPUT_UPPERBOUND}',
dest=constants.JDBCTOJDBC_INPUT_UPPERBOUND,
required=False,
default="",
help='JDBC input table partition column upper bound which is used to decide the partition stride'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_NUMPARTITIONS}',
dest=constants.JDBCTOJDBC_NUMPARTITIONS,
required=False,
default=10,
help='The maximum number of partitions that can be used for parallelism in table reading and writing. Default set to 10'
)
parser.add_argument(
f'--{constants.
| 139
|
193
| 12
| 4,480
|
googlecloudplatform__dataproc-templates
|
4d63cf08165dbee145ce8b26d7aa0b11ff4c5a8f
|
python/dataproc_templates/jdbc/jdbc_to_jdbc.py
|
random
|
OUTPUT_MODE_APPEND
| true
|
statement
| 76
| 75
| false
| false
|
[
"OUTPUT_MODE_IGNORE",
"JDBC_URL",
"JDBC_TABLE",
"OUTPUT_MODE_ERRORIFEXISTS",
"FORMAT_JDBC",
"BQ_GCS_INPUT_TABLE",
"BQ_GCS_OUTPUT_FORMAT",
"BQ_GCS_OUTPUT_LOCATION",
"BQ_GCS_OUTPUT_MODE",
"CSV_HEADER",
"CSV_INFER_SCHEMA",
"FORMAT_AVRO",
"FORMAT_AVRO_EXTD",
"FORMAT_BIGQUERY",
"FORMAT_CSV",
"FORMAT_HBASE",
"FORMAT_JSON",
"FORMAT_PRQT",
"GCS_BQ_INPUT_FORMAT",
"GCS_BQ_INPUT_LOCATION",
"GCS_BQ_LD_TEMP_BUCKET_NAME",
"GCS_BQ_OUTPUT_DATASET",
"GCS_BQ_OUTPUT_MODE",
"GCS_BQ_OUTPUT_TABLE",
"GCS_BQ_TEMP_BUCKET",
"GCS_BT_HBASE_CATALOG_JSON",
"GCS_BT_INPUT_FORMAT",
"GCS_BT_INPUT_LOCATION",
"GCS_JDBC_BATCH_SIZE",
"GCS_JDBC_INPUT_FORMAT",
"GCS_JDBC_INPUT_LOCATION",
"GCS_JDBC_OUTPUT_DRIVER",
"GCS_JDBC_OUTPUT_MODE",
"GCS_JDBC_OUTPUT_TABLE",
"GCS_JDBC_OUTPUT_URL",
"HBASE_GCS_CATALOG_JSON",
"HBASE_GCS_OUTPUT_FORMAT",
"HBASE_GCS_OUTPUT_LOCATION",
"HBASE_GCS_OUTPUT_MODE",
"HIVE_BQ_INPUT_DATABASE",
"HIVE_BQ_INPUT_TABLE",
"HIVE_BQ_LD_TEMP_BUCKET_NAME",
"HIVE_BQ_OUTPUT_DATASET",
"HIVE_BQ_OUTPUT_MODE",
"HIVE_BQ_OUTPUT_TABLE",
"HIVE_GCS_INPUT_DATABASE",
"HIVE_GCS_INPUT_TABLE",
"HIVE_GCS_OUTPUT_FORMAT",
"HIVE_GCS_OUTPUT_LOCATION",
"HIVE_GCS_OUTPUT_MODE",
"JDBC_BATCH_SIZE",
"JDBC_CREATE_TABLE_OPTIONS",
"JDBC_DRIVER",
"JDBC_LOWERBOUND",
"JDBC_NUMPARTITIONS",
"JDBC_PARTITIONCOLUMN",
"JDBC_UPPERBOUND",
"JDBCTOJDBC_INPUT_DRIVER",
"JDBCTOJDBC_INPUT_LOWERBOUND",
"JDBCTOJDBC_INPUT_PARTITIONCOLUMN",
"JDBCTOJDBC_INPUT_TABLE",
"JDBCTOJDBC_INPUT_UPPERBOUND",
"JDBCTOJDBC_INPUT_URL",
"JDBCTOJDBC_NUMPARTITIONS",
"JDBCTOJDBC_OUTPUT_BATCH_SIZE",
"JDBCTOJDBC_OUTPUT_CREATE_TABLE_OPTION",
"JDBCTOJDBC_OUTPUT_DRIVER",
"JDBCTOJDBC_OUTPUT_MODE",
"JDBCTOJDBC_OUTPUT_TABLE",
"JDBCTOJDBC_OUTPUT_URL",
"OUTPUT_MODE_APPEND",
"OUTPUT_MODE_OVERWRITE",
"PROJECT_ID_PROP",
"TABLE",
"TEMP_GCS_BUCKET",
"for"
] |
[
{
"name": "BQ_GCS_INPUT_TABLE",
"type": "statement"
},
{
"name": "BQ_GCS_OUTPUT_FORMAT",
"type": "statement"
},
{
"name": "BQ_GCS_OUTPUT_LOCATION",
"type": "statement"
},
{
"name": "BQ_GCS_OUTPUT_MODE",
"type": "statement"
},
{
"name": "CSV_HEADER",
"type": "statement"
},
{
"name": "CSV_INFER_SCHEMA",
"type": "statement"
},
{
"name": "FORMAT_AVRO",
"type": "statement"
},
{
"name": "FORMAT_AVRO_EXTD",
"type": "statement"
},
{
"name": "FORMAT_BIGQUERY",
"type": "statement"
},
{
"name": "FORMAT_CSV",
"type": "statement"
},
{
"name": "FORMAT_HBASE",
"type": "statement"
},
{
"name": "FORMAT_JDBC",
"type": "statement"
},
{
"name": "FORMAT_JSON",
"type": "statement"
},
{
"name": "FORMAT_PRQT",
"type": "statement"
},
{
"name": "GCS_BQ_INPUT_FORMAT",
"type": "statement"
},
{
"name": "GCS_BQ_INPUT_LOCATION",
"type": "statement"
},
{
"name": "GCS_BQ_LD_TEMP_BUCKET_NAME",
"type": "statement"
},
{
"name": "GCS_BQ_OUTPUT_DATASET",
"type": "statement"
},
{
"name": "GCS_BQ_OUTPUT_MODE",
"type": "statement"
},
{
"name": "GCS_BQ_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "GCS_BQ_TEMP_BUCKET",
"type": "statement"
},
{
"name": "GCS_BT_HBASE_CATALOG_JSON",
"type": "statement"
},
{
"name": "GCS_BT_INPUT_FORMAT",
"type": "statement"
},
{
"name": "GCS_BT_INPUT_LOCATION",
"type": "statement"
},
{
"name": "GCS_JDBC_BATCH_SIZE",
"type": "statement"
},
{
"name": "GCS_JDBC_INPUT_FORMAT",
"type": "statement"
},
{
"name": "GCS_JDBC_INPUT_LOCATION",
"type": "statement"
},
{
"name": "GCS_JDBC_OUTPUT_DRIVER",
"type": "statement"
},
{
"name": "GCS_JDBC_OUTPUT_MODE",
"type": "statement"
},
{
"name": "GCS_JDBC_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "GCS_JDBC_OUTPUT_URL",
"type": "statement"
},
{
"name": "HBASE_GCS_CATALOG_JSON",
"type": "statement"
},
{
"name": "HBASE_GCS_OUTPUT_FORMAT",
"type": "statement"
},
{
"name": "HBASE_GCS_OUTPUT_LOCATION",
"type": "statement"
},
{
"name": "HBASE_GCS_OUTPUT_MODE",
"type": "statement"
},
{
"name": "HIVE_BQ_INPUT_DATABASE",
"type": "statement"
},
{
"name": "HIVE_BQ_INPUT_TABLE",
"type": "statement"
},
{
"name": "HIVE_BQ_LD_TEMP_BUCKET_NAME",
"type": "statement"
},
{
"name": "HIVE_BQ_OUTPUT_DATASET",
"type": "statement"
},
{
"name": "HIVE_BQ_OUTPUT_MODE",
"type": "statement"
},
{
"name": "HIVE_BQ_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "HIVE_GCS_INPUT_DATABASE",
"type": "statement"
},
{
"name": "HIVE_GCS_INPUT_TABLE",
"type": "statement"
},
{
"name": "HIVE_GCS_OUTPUT_FORMAT",
"type": "statement"
},
{
"name": "HIVE_GCS_OUTPUT_LOCATION",
"type": "statement"
},
{
"name": "HIVE_GCS_OUTPUT_MODE",
"type": "statement"
},
{
"name": "JDBC_BATCH_SIZE",
"type": "statement"
},
{
"name": "JDBC_CREATE_TABLE_OPTIONS",
"type": "statement"
},
{
"name": "JDBC_DRIVER",
"type": "statement"
},
{
"name": "JDBC_LOWERBOUND",
"type": "statement"
},
{
"name": "JDBC_NUMPARTITIONS",
"type": "statement"
},
{
"name": "JDBC_PARTITIONCOLUMN",
"type": "statement"
},
{
"name": "JDBC_TABLE",
"type": "statement"
},
{
"name": "JDBC_UPPERBOUND",
"type": "statement"
},
{
"name": "JDBC_URL",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_DRIVER",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_LOWERBOUND",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_PARTITIONCOLUMN",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_TABLE",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_UPPERBOUND",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_URL",
"type": "statement"
},
{
"name": "JDBCTOJDBC_NUMPARTITIONS",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_BATCH_SIZE",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_CREATE_TABLE_OPTION",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_DRIVER",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_MODE",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_URL",
"type": "statement"
},
{
"name": "OUTPUT_MODE_APPEND",
"type": "statement"
},
{
"name": "OUTPUT_MODE_ERRORIFEXISTS",
"type": "statement"
},
{
"name": "OUTPUT_MODE_IGNORE",
"type": "statement"
},
{
"name": "OUTPUT_MODE_OVERWRITE",
"type": "statement"
},
{
"name": "PROJECT_ID_PROP",
"type": "statement"
},
{
"name": "TABLE",
"type": "statement"
},
{
"name": "TEMP_GCS_BUCKET",
"type": "statement"
},
{
"name": "__doc__",
"type": "instance"
},
{
"name": "__file__",
"type": "instance"
},
{
"name": "__name__",
"type": "instance"
},
{
"name": "__package__",
"type": "instance"
}
] |
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Dict, Sequence, Optional, Any
from logging import Logger
import argparse
import pprint
from pyspark.sql import SparkSession, DataFrame
from dataproc_templates import BaseTemplate
import dataproc_templates.util.template_constants as constants
__all__ = ['JDBCToJDBCTemplate']
class JDBCToJDBCTemplate(BaseTemplate):
"""
Dataproc template implementing loads from JDBC into JDBC
"""
@staticmethod
def parse_args(args: Optional[Sequence[str]] = None) -> Dict[str, Any]:
parser: argparse.ArgumentParser = argparse.ArgumentParser()
parser.add_argument(
f'--{constants.JDBCTOJDBC_INPUT_URL}',
dest=constants.JDBCTOJDBC_INPUT_URL,
required=True,
help='JDBC input URL'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_INPUT_DRIVER}',
dest=constants.JDBCTOJDBC_INPUT_DRIVER,
required=True,
help='JDBC input driver name'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_INPUT_TABLE}',
dest=constants.JDBCTOJDBC_INPUT_TABLE,
required=True,
help='JDBC input table name'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_INPUT_PARTITIONCOLUMN}',
dest=constants.JDBCTOJDBC_INPUT_PARTITIONCOLUMN,
required=False,
default="",
help='JDBC input table partition column name'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_INPUT_LOWERBOUND}',
dest=constants.JDBCTOJDBC_INPUT_LOWERBOUND,
required=False,
default="",
help='JDBC input table partition column lower bound which is used to decide the partition stride'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_INPUT_UPPERBOUND}',
dest=constants.JDBCTOJDBC_INPUT_UPPERBOUND,
required=False,
default="",
help='JDBC input table partition column upper bound which is used to decide the partition stride'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_NUMPARTITIONS}',
dest=constants.JDBCTOJDBC_NUMPARTITIONS,
required=False,
default=10,
help='The maximum number of partitions that can be used for parallelism in table reading and writing. Default set to 10'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_OUTPUT_URL}',
dest=constants.JDBCTOJDBC_OUTPUT_URL,
required=True,
help='JDBC output URL'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_OUTPUT_DRIVER}',
dest=constants.JDBCTOJDBC_OUTPUT_DRIVER,
required=True,
help='JDBC output driver name'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_OUTPUT_TABLE}',
dest=constants.JDBCTOJDBC_OUTPUT_TABLE,
required=True,
help='JDBC output table name'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_OUTPUT_CREATE_TABLE_OPTION}',
dest=constants.JDBCTOJDBC_OUTPUT_CREATE_TABLE_OPTION,
required=False,
default="",
help='This option allows setting of database-specific table and partition options when creating a output table'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_OUTPUT_MODE}',
dest=constants.JDBCTOJDBC_OUTPUT_MODE,
required=False,
default=constants.OUTPUT_MODE_APPEND,
help=(
'Output write mode '
'(one of: append,overwrite,ignore,errorifexists) '
'(Defaults to append)'
),
choices=[
constants.OUTPUT_MODE_OVERWRITE,
constants.
|
[
"OUTPUT_MODE_IGNORE",
"JDBC_URL",
"JDBC_TABLE",
"OUTPUT_MODE_ERRORIFEXISTS",
"FORMAT_JDBC",
"BQ_GCS_INPUT_TABLE",
"BQ_GCS_OUTPUT_FORMAT",
"BQ_GCS_OUTPUT_LOCATION",
"BQ_GCS_OUTPUT_MODE",
"CSV_HEADER",
"CSV_INFER_SCHEMA",
"FORMAT_AVRO",
"FORMAT_AVRO_EXTD",
"FORMAT_BIGQUERY",
"FORMAT_CSV",
"FORMAT_HBASE",
"FORMAT_JSON",
"FORMAT_PRQT",
"GCS_BQ_INPUT_FORMAT",
"GCS_BQ_INPUT_LOCATION",
"GCS_BQ_LD_TEMP_BUCKET_NAME",
"GCS_BQ_OUTPUT_DATASET",
"GCS_BQ_OUTPUT_MODE",
"GCS_BQ_OUTPUT_TABLE",
"GCS_BQ_TEMP_BUCKET",
"GCS_BT_HBASE_CATALOG_JSON",
"GCS_BT_INPUT_FORMAT",
"GCS_BT_INPUT_LOCATION",
"GCS_JDBC_BATCH_SIZE",
"GCS_JDBC_INPUT_FORMAT",
"GCS_JDBC_INPUT_LOCATION",
"GCS_JDBC_OUTPUT_DRIVER",
"GCS_JDBC_OUTPUT_MODE",
"GCS_JDBC_OUTPUT_TABLE",
"GCS_JDBC_OUTPUT_URL",
"HBASE_GCS_CATALOG_JSON",
"HBASE_GCS_OUTPUT_FORMAT",
"HBASE_GCS_OUTPUT_LOCATION",
"HBASE_GCS_OUTPUT_MODE",
"HIVE_BQ_INPUT_DATABASE",
"HIVE_BQ_INPUT_TABLE",
"HIVE_BQ_LD_TEMP_BUCKET_NAME",
"HIVE_BQ_OUTPUT_DATASET",
"HIVE_BQ_OUTPUT_MODE",
"HIVE_BQ_OUTPUT_TABLE",
"HIVE_GCS_INPUT_DATABASE",
"HIVE_GCS_INPUT_TABLE",
"HIVE_GCS_OUTPUT_FORMAT",
"HIVE_GCS_OUTPUT_LOCATION",
"HIVE_GCS_OUTPUT_MODE",
"JDBC_BATCH_SIZE",
"JDBC_CREATE_TABLE_OPTIONS",
"JDBC_DRIVER",
"JDBC_LOWERBOUND",
"JDBC_NUMPARTITIONS",
"JDBC_PARTITIONCOLUMN",
"JDBC_UPPERBOUND",
"JDBCTOJDBC_INPUT_DRIVER",
"JDBCTOJDBC_INPUT_LOWERBOUND",
"JDBCTOJDBC_INPUT_PARTITIONCOLUMN",
"JDBCTOJDBC_INPUT_TABLE",
"JDBCTOJDBC_INPUT_UPPERBOUND",
"JDBCTOJDBC_INPUT_URL",
"JDBCTOJDBC_NUMPARTITIONS",
"JDBCTOJDBC_OUTPUT_BATCH_SIZE",
"JDBCTOJDBC_OUTPUT_CREATE_TABLE_OPTION",
"JDBCTOJDBC_OUTPUT_DRIVER",
"JDBCTOJDBC_OUTPUT_MODE",
"JDBCTOJDBC_OUTPUT_TABLE",
"JDBCTOJDBC_OUTPUT_URL",
"OUTPUT_MODE_APPEND",
"OUTPUT_MODE_OVERWRITE",
"PROJECT_ID_PROP",
"TABLE",
"TEMP_GCS_BUCKET",
"for"
] |
help='JDBC input table partition column upper bound which is used to decide the partition stride'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_NUMPARTITIONS}',
dest=constants.JDBCTOJDBC_NUMPARTITIONS,
required=False,
default=10,
help='The maximum number of partitions that can be used for parallelism in table reading and writing. Default set to 10'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_OUTPUT_URL}',
dest=constants.JDBCTOJDBC_OUTPUT_URL,
required=True,
help='JDBC output URL'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_OUTPUT_DRIVER}',
dest=constants.JDBCTOJDBC_OUTPUT_DRIVER,
required=True,
help='JDBC output driver name'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_OUTPUT_TABLE}',
dest=constants.JDBCTOJDBC_OUTPUT_TABLE,
required=True,
help='JDBC output table name'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_OUTPUT_CREATE_TABLE_OPTION}',
dest=constants.JDBCTOJDBC_OUTPUT_CREATE_TABLE_OPTION,
required=False,
default="",
help='This option allows setting of database-specific table and partition options when creating a output table'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_OUTPUT_MODE}',
dest=constants.JDBCTOJDBC_OUTPUT_MODE,
required=False,
default=constants.OUTPUT_MODE_APPEND,
help=(
'Output write mode '
'(one of: append,overwrite,ignore,errorifexists) '
'(Defaults to append)'
),
choices=[
constants.OUTPUT_MODE_OVERWRITE,
constants.
| 151
|
198
| 12
| 5,124
|
googlecloudplatform__dataproc-templates
|
4d63cf08165dbee145ce8b26d7aa0b11ff4c5a8f
|
python/dataproc_templates/jdbc/jdbc_to_jdbc.py
|
inproject
|
get_logger
| true
|
function
| 4
| 4
| false
| true
|
[
"parse_args",
"run",
"get_logger",
"build",
"__str__",
"__annotations__",
"__class__",
"__delattr__",
"__dict__",
"__dir__",
"__eq__",
"__format__",
"__getattribute__",
"__hash__",
"__init__",
"__init_subclass__",
"__ne__",
"__new__",
"__reduce__",
"__reduce_ex__",
"__repr__",
"__setattr__",
"__sizeof__",
"__slots__",
"__subclasshook__",
"__doc__",
"__module__"
] |
[
{
"name": "build",
"type": "function"
},
{
"name": "get_logger",
"type": "function"
},
{
"name": "parse_args",
"type": "function"
},
{
"name": "run",
"type": "function"
},
{
"name": "__annotations__",
"type": "statement"
},
{
"name": "__class__",
"type": "property"
},
{
"name": "__delattr__",
"type": "function"
},
{
"name": "__dict__",
"type": "statement"
},
{
"name": "__dir__",
"type": "function"
},
{
"name": "__doc__",
"type": "statement"
},
{
"name": "__eq__",
"type": "function"
},
{
"name": "__format__",
"type": "function"
},
{
"name": "__getattribute__",
"type": "function"
},
{
"name": "__hash__",
"type": "function"
},
{
"name": "__init__",
"type": "function"
},
{
"name": "__init_subclass__",
"type": "function"
},
{
"name": "__module__",
"type": "statement"
},
{
"name": "__ne__",
"type": "function"
},
{
"name": "__new__",
"type": "function"
},
{
"name": "__reduce__",
"type": "function"
},
{
"name": "__reduce_ex__",
"type": "function"
},
{
"name": "__repr__",
"type": "function"
},
{
"name": "__setattr__",
"type": "function"
},
{
"name": "__sizeof__",
"type": "function"
},
{
"name": "__slots__",
"type": "statement"
},
{
"name": "__str__",
"type": "function"
}
] |
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Dict, Sequence, Optional, Any
from logging import Logger
import argparse
import pprint
from pyspark.sql import SparkSession, DataFrame
from dataproc_templates import BaseTemplate
import dataproc_templates.util.template_constants as constants
__all__ = ['JDBCToJDBCTemplate']
class JDBCToJDBCTemplate(BaseTemplate):
"""
Dataproc template implementing loads from JDBC into JDBC
"""
@staticmethod
def parse_args(args: Optional[Sequence[str]] = None) -> Dict[str, Any]:
parser: argparse.ArgumentParser = argparse.ArgumentParser()
parser.add_argument(
f'--{constants.JDBCTOJDBC_INPUT_URL}',
dest=constants.JDBCTOJDBC_INPUT_URL,
required=True,
help='JDBC input URL'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_INPUT_DRIVER}',
dest=constants.JDBCTOJDBC_INPUT_DRIVER,
required=True,
help='JDBC input driver name'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_INPUT_TABLE}',
dest=constants.JDBCTOJDBC_INPUT_TABLE,
required=True,
help='JDBC input table name'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_INPUT_PARTITIONCOLUMN}',
dest=constants.JDBCTOJDBC_INPUT_PARTITIONCOLUMN,
required=False,
default="",
help='JDBC input table partition column name'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_INPUT_LOWERBOUND}',
dest=constants.JDBCTOJDBC_INPUT_LOWERBOUND,
required=False,
default="",
help='JDBC input table partition column lower bound which is used to decide the partition stride'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_INPUT_UPPERBOUND}',
dest=constants.JDBCTOJDBC_INPUT_UPPERBOUND,
required=False,
default="",
help='JDBC input table partition column upper bound which is used to decide the partition stride'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_NUMPARTITIONS}',
dest=constants.JDBCTOJDBC_NUMPARTITIONS,
required=False,
default=10,
help='The maximum number of partitions that can be used for parallelism in table reading and writing. Default set to 10'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_OUTPUT_URL}',
dest=constants.JDBCTOJDBC_OUTPUT_URL,
required=True,
help='JDBC output URL'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_OUTPUT_DRIVER}',
dest=constants.JDBCTOJDBC_OUTPUT_DRIVER,
required=True,
help='JDBC output driver name'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_OUTPUT_TABLE}',
dest=constants.JDBCTOJDBC_OUTPUT_TABLE,
required=True,
help='JDBC output table name'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_OUTPUT_CREATE_TABLE_OPTION}',
dest=constants.JDBCTOJDBC_OUTPUT_CREATE_TABLE_OPTION,
required=False,
default="",
help='This option allows setting of database-specific table and partition options when creating a output table'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_OUTPUT_MODE}',
dest=constants.JDBCTOJDBC_OUTPUT_MODE,
required=False,
default=constants.OUTPUT_MODE_APPEND,
help=(
'Output write mode '
'(one of: append,overwrite,ignore,errorifexists) '
'(Defaults to append)'
),
choices=[
constants.OUTPUT_MODE_OVERWRITE,
constants.OUTPUT_MODE_APPEND,
constants.OUTPUT_MODE_IGNORE,
constants.OUTPUT_MODE_ERRORIFEXISTS
]
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_OUTPUT_BATCH_SIZE}',
dest=constants.JDBCTOJDBC_OUTPUT_BATCH_SIZE,
required=False,
default=1000,
help='JDBC output batch size. Default set to 1000'
)
known_args: argparse.Namespace
known_args, _ = parser.parse_known_args(args)
return vars(known_args)
def run(self, spark: SparkSession, args: Dict[str, Any]) -> None:
logger: Logger = self.
|
[
"parse_args",
"run",
"get_logger",
"build"
] |
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_OUTPUT_DRIVER}',
dest=constants.JDBCTOJDBC_OUTPUT_DRIVER,
required=True,
help='JDBC output driver name'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_OUTPUT_TABLE}',
dest=constants.JDBCTOJDBC_OUTPUT_TABLE,
required=True,
help='JDBC output table name'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_OUTPUT_CREATE_TABLE_OPTION}',
dest=constants.JDBCTOJDBC_OUTPUT_CREATE_TABLE_OPTION,
required=False,
default="",
help='This option allows setting of database-specific table and partition options when creating a output table'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_OUTPUT_MODE}',
dest=constants.JDBCTOJDBC_OUTPUT_MODE,
required=False,
default=constants.OUTPUT_MODE_APPEND,
help=(
'Output write mode '
'(one of: append,overwrite,ignore,errorifexists) '
'(Defaults to append)'
),
choices=[
constants.OUTPUT_MODE_OVERWRITE,
constants.OUTPUT_MODE_APPEND,
constants.OUTPUT_MODE_IGNORE,
constants.OUTPUT_MODE_ERRORIFEXISTS
]
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_OUTPUT_BATCH_SIZE}',
dest=constants.JDBCTOJDBC_OUTPUT_BATCH_SIZE,
required=False,
default=1000,
help='JDBC output batch size. Default set to 1000'
)
known_args: argparse.Namespace
known_args, _ = parser.parse_known_args(args)
return vars(known_args)
def run(self, spark: SparkSession, args: Dict[str, Any]) -> None:
logger: Logger = self.
| 156
|
230
| 13
| 1,013
|
googlecloudplatform__dataproc-templates
|
bba5da698a8aa144c73d4d2a90e84c6a577ce7f4
|
python/test/jdbc/test_jdbc_to_gcs.py
|
inproject
|
parse_args
| true
|
function
| 4
| 4
| false
| true
|
[
"parse_args",
"run",
"build",
"get_logger",
"__annotations__",
"__class__",
"__delattr__",
"__dict__",
"__dir__",
"__eq__",
"__format__",
"__getattribute__",
"__hash__",
"__init__",
"__init_subclass__",
"__ne__",
"__new__",
"__reduce__",
"__reduce_ex__",
"__repr__",
"__setattr__",
"__sizeof__",
"__slots__",
"__str__",
"__subclasshook__",
"__doc__",
"__module__"
] |
[
{
"name": "build",
"type": "function"
},
{
"name": "get_logger",
"type": "function"
},
{
"name": "parse_args",
"type": "function"
},
{
"name": "run",
"type": "function"
},
{
"name": "__annotations__",
"type": "statement"
},
{
"name": "__class__",
"type": "property"
},
{
"name": "__delattr__",
"type": "function"
},
{
"name": "__dict__",
"type": "statement"
},
{
"name": "__dir__",
"type": "function"
},
{
"name": "__doc__",
"type": "statement"
},
{
"name": "__eq__",
"type": "function"
},
{
"name": "__format__",
"type": "function"
},
{
"name": "__getattribute__",
"type": "function"
},
{
"name": "__hash__",
"type": "function"
},
{
"name": "__init__",
"type": "function"
},
{
"name": "__init_subclass__",
"type": "function"
},
{
"name": "__module__",
"type": "statement"
},
{
"name": "__ne__",
"type": "function"
},
{
"name": "__new__",
"type": "function"
},
{
"name": "__reduce__",
"type": "function"
},
{
"name": "__reduce_ex__",
"type": "function"
},
{
"name": "__repr__",
"type": "function"
},
{
"name": "__setattr__",
"type": "function"
},
{
"name": "__sizeof__",
"type": "function"
},
{
"name": "__slots__",
"type": "statement"
},
{
"name": "__str__",
"type": "function"
}
] |
"""
* Copyright 2022 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
"""
import mock
import pyspark
from dataproc_templates.jdbc.jdbc_to_gcs import JDBCToGCSTemplate
import dataproc_templates.util.template_constants as constants
class TestJDBCToGCSTemplate:
"""
Test suite for JDBCToGCSTemplate
"""
def test_parse_args1(self):
"""Tests JDBCToGCSTemplate.parse_args()"""
jdbc_to_gcs_template = JDBCToGCSTemplate()
parsed_args = jdbc_to_gcs_template.
|
[
"parse_args",
"run",
"build",
"get_logger"
] |
"""
* Copyright 2022 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
"""
import mock
import pyspark
from dataproc_templates.jdbc.jdbc_to_gcs import JDBCToGCSTemplate
import dataproc_templates.util.template_constants as constants
class TestJDBCToGCSTemplate:
"""
Test suite for JDBCToGCSTemplate
"""
def test_parse_args1(self):
"""Tests JDBCToGCSTemplate.parse_args()"""
jdbc_to_gcs_template = JDBCToGCSTemplate()
parsed_args = jdbc_to_gcs_template.
| 187
|
258
| 13
| 8,329
|
googlecloudplatform__dataproc-templates
|
bba5da698a8aa144c73d4d2a90e84c6a577ce7f4
|
python/test/jdbc/test_jdbc_to_gcs.py
|
random
|
JDBC_DRIVER
| true
|
statement
| 86
| 86
| false
| false
|
[
"FORMAT_JDBC",
"JDBC_URL",
"JDBC_TABLE",
"JDBC_NUMPARTITIONS",
"JDBC_DRIVER",
"BQ_GCS_INPUT_TABLE",
"BQ_GCS_OUTPUT_FORMAT",
"BQ_GCS_OUTPUT_LOCATION",
"BQ_GCS_OUTPUT_MODE",
"CSV_HEADER",
"CSV_INFER_SCHEMA",
"FORMAT_AVRO",
"FORMAT_AVRO_EXTD",
"FORMAT_BIGQUERY",
"FORMAT_CSV",
"FORMAT_HBASE",
"FORMAT_JSON",
"FORMAT_PRQT",
"GCS_BQ_INPUT_FORMAT",
"GCS_BQ_INPUT_LOCATION",
"GCS_BQ_LD_TEMP_BUCKET_NAME",
"GCS_BQ_OUTPUT_DATASET",
"GCS_BQ_OUTPUT_MODE",
"GCS_BQ_OUTPUT_TABLE",
"GCS_BQ_TEMP_BUCKET",
"GCS_BT_HBASE_CATALOG_JSON",
"GCS_BT_INPUT_FORMAT",
"GCS_BT_INPUT_LOCATION",
"GCS_JDBC_BATCH_SIZE",
"GCS_JDBC_INPUT_FORMAT",
"GCS_JDBC_INPUT_LOCATION",
"GCS_JDBC_OUTPUT_DRIVER",
"GCS_JDBC_OUTPUT_MODE",
"GCS_JDBC_OUTPUT_TABLE",
"GCS_JDBC_OUTPUT_URL",
"HBASE_GCS_CATALOG_JSON",
"HBASE_GCS_OUTPUT_FORMAT",
"HBASE_GCS_OUTPUT_LOCATION",
"HBASE_GCS_OUTPUT_MODE",
"HIVE_BQ_INPUT_DATABASE",
"HIVE_BQ_INPUT_TABLE",
"HIVE_BQ_LD_TEMP_BUCKET_NAME",
"HIVE_BQ_OUTPUT_DATASET",
"HIVE_BQ_OUTPUT_MODE",
"HIVE_BQ_OUTPUT_TABLE",
"HIVE_GCS_INPUT_DATABASE",
"HIVE_GCS_INPUT_TABLE",
"HIVE_GCS_OUTPUT_FORMAT",
"HIVE_GCS_OUTPUT_LOCATION",
"HIVE_GCS_OUTPUT_MODE",
"JDBC_BATCH_SIZE",
"JDBC_CREATE_TABLE_OPTIONS",
"JDBC_LOWERBOUND",
"JDBC_PARTITIONCOLUMN",
"JDBC_UPPERBOUND",
"JDBCTOGCS_INPUT_DRIVER",
"JDBCTOGCS_INPUT_LOWERBOUND",
"JDBCTOGCS_INPUT_PARTITIONCOLUMN",
"JDBCTOGCS_INPUT_TABLE",
"JDBCTOGCS_INPUT_UPPERBOUND",
"JDBCTOGCS_INPUT_URL",
"JDBCTOGCS_NUMPARTITIONS",
"JDBCTOGCS_OUTPUT_FORMAT",
"JDBCTOGCS_OUTPUT_LOCATION",
"JDBCTOGCS_OUTPUT_MODE",
"JDBCTOGCS_OUTPUT_PARTITIONCOLUMN",
"JDBCTOJDBC_INPUT_DRIVER",
"JDBCTOJDBC_INPUT_LOWERBOUND",
"JDBCTOJDBC_INPUT_PARTITIONCOLUMN",
"JDBCTOJDBC_INPUT_TABLE",
"JDBCTOJDBC_INPUT_UPPERBOUND",
"JDBCTOJDBC_INPUT_URL",
"JDBCTOJDBC_NUMPARTITIONS",
"JDBCTOJDBC_OUTPUT_BATCH_SIZE",
"JDBCTOJDBC_OUTPUT_CREATE_TABLE_OPTION",
"JDBCTOJDBC_OUTPUT_DRIVER",
"JDBCTOJDBC_OUTPUT_MODE",
"JDBCTOJDBC_OUTPUT_TABLE",
"JDBCTOJDBC_OUTPUT_URL",
"OUTPUT_MODE_APPEND",
"OUTPUT_MODE_ERRORIFEXISTS",
"OUTPUT_MODE_IGNORE",
"OUTPUT_MODE_OVERWRITE",
"PROJECT_ID_PROP",
"TABLE",
"TEMP_GCS_BUCKET"
] |
[
{
"name": "BQ_GCS_INPUT_TABLE",
"type": "statement"
},
{
"name": "BQ_GCS_OUTPUT_FORMAT",
"type": "statement"
},
{
"name": "BQ_GCS_OUTPUT_LOCATION",
"type": "statement"
},
{
"name": "BQ_GCS_OUTPUT_MODE",
"type": "statement"
},
{
"name": "CSV_HEADER",
"type": "statement"
},
{
"name": "CSV_INFER_SCHEMA",
"type": "statement"
},
{
"name": "FORMAT_AVRO",
"type": "statement"
},
{
"name": "FORMAT_AVRO_EXTD",
"type": "statement"
},
{
"name": "FORMAT_BIGQUERY",
"type": "statement"
},
{
"name": "FORMAT_CSV",
"type": "statement"
},
{
"name": "FORMAT_HBASE",
"type": "statement"
},
{
"name": "FORMAT_JDBC",
"type": "statement"
},
{
"name": "FORMAT_JSON",
"type": "statement"
},
{
"name": "FORMAT_PRQT",
"type": "statement"
},
{
"name": "GCS_BQ_INPUT_FORMAT",
"type": "statement"
},
{
"name": "GCS_BQ_INPUT_LOCATION",
"type": "statement"
},
{
"name": "GCS_BQ_LD_TEMP_BUCKET_NAME",
"type": "statement"
},
{
"name": "GCS_BQ_OUTPUT_DATASET",
"type": "statement"
},
{
"name": "GCS_BQ_OUTPUT_MODE",
"type": "statement"
},
{
"name": "GCS_BQ_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "GCS_BQ_TEMP_BUCKET",
"type": "statement"
},
{
"name": "GCS_BT_HBASE_CATALOG_JSON",
"type": "statement"
},
{
"name": "GCS_BT_INPUT_FORMAT",
"type": "statement"
},
{
"name": "GCS_BT_INPUT_LOCATION",
"type": "statement"
},
{
"name": "GCS_JDBC_BATCH_SIZE",
"type": "statement"
},
{
"name": "GCS_JDBC_INPUT_FORMAT",
"type": "statement"
},
{
"name": "GCS_JDBC_INPUT_LOCATION",
"type": "statement"
},
{
"name": "GCS_JDBC_OUTPUT_DRIVER",
"type": "statement"
},
{
"name": "GCS_JDBC_OUTPUT_MODE",
"type": "statement"
},
{
"name": "GCS_JDBC_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "GCS_JDBC_OUTPUT_URL",
"type": "statement"
},
{
"name": "HBASE_GCS_CATALOG_JSON",
"type": "statement"
},
{
"name": "HBASE_GCS_OUTPUT_FORMAT",
"type": "statement"
},
{
"name": "HBASE_GCS_OUTPUT_LOCATION",
"type": "statement"
},
{
"name": "HBASE_GCS_OUTPUT_MODE",
"type": "statement"
},
{
"name": "HIVE_BQ_INPUT_DATABASE",
"type": "statement"
},
{
"name": "HIVE_BQ_INPUT_TABLE",
"type": "statement"
},
{
"name": "HIVE_BQ_LD_TEMP_BUCKET_NAME",
"type": "statement"
},
{
"name": "HIVE_BQ_OUTPUT_DATASET",
"type": "statement"
},
{
"name": "HIVE_BQ_OUTPUT_MODE",
"type": "statement"
},
{
"name": "HIVE_BQ_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "HIVE_GCS_INPUT_DATABASE",
"type": "statement"
},
{
"name": "HIVE_GCS_INPUT_TABLE",
"type": "statement"
},
{
"name": "HIVE_GCS_OUTPUT_FORMAT",
"type": "statement"
},
{
"name": "HIVE_GCS_OUTPUT_LOCATION",
"type": "statement"
},
{
"name": "HIVE_GCS_OUTPUT_MODE",
"type": "statement"
},
{
"name": "JDBC_BATCH_SIZE",
"type": "statement"
},
{
"name": "JDBC_CREATE_TABLE_OPTIONS",
"type": "statement"
},
{
"name": "JDBC_DRIVER",
"type": "statement"
},
{
"name": "JDBC_LOWERBOUND",
"type": "statement"
},
{
"name": "JDBC_NUMPARTITIONS",
"type": "statement"
},
{
"name": "JDBC_PARTITIONCOLUMN",
"type": "statement"
},
{
"name": "JDBC_TABLE",
"type": "statement"
},
{
"name": "JDBC_UPPERBOUND",
"type": "statement"
},
{
"name": "JDBC_URL",
"type": "statement"
},
{
"name": "JDBCTOGCS_INPUT_DRIVER",
"type": "statement"
},
{
"name": "JDBCTOGCS_INPUT_LOWERBOUND",
"type": "statement"
},
{
"name": "JDBCTOGCS_INPUT_PARTITIONCOLUMN",
"type": "statement"
},
{
"name": "JDBCTOGCS_INPUT_TABLE",
"type": "statement"
},
{
"name": "JDBCTOGCS_INPUT_UPPERBOUND",
"type": "statement"
},
{
"name": "JDBCTOGCS_INPUT_URL",
"type": "statement"
},
{
"name": "JDBCTOGCS_NUMPARTITIONS",
"type": "statement"
},
{
"name": "JDBCTOGCS_OUTPUT_FORMAT",
"type": "statement"
},
{
"name": "JDBCTOGCS_OUTPUT_LOCATION",
"type": "statement"
},
{
"name": "JDBCTOGCS_OUTPUT_MODE",
"type": "statement"
},
{
"name": "JDBCTOGCS_OUTPUT_PARTITIONCOLUMN",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_DRIVER",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_LOWERBOUND",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_PARTITIONCOLUMN",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_TABLE",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_UPPERBOUND",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_URL",
"type": "statement"
},
{
"name": "JDBCTOJDBC_NUMPARTITIONS",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_BATCH_SIZE",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_CREATE_TABLE_OPTION",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_DRIVER",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_MODE",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_URL",
"type": "statement"
},
{
"name": "OUTPUT_MODE_APPEND",
"type": "statement"
},
{
"name": "OUTPUT_MODE_ERRORIFEXISTS",
"type": "statement"
},
{
"name": "OUTPUT_MODE_IGNORE",
"type": "statement"
},
{
"name": "OUTPUT_MODE_OVERWRITE",
"type": "statement"
},
{
"name": "PROJECT_ID_PROP",
"type": "statement"
},
{
"name": "TABLE",
"type": "statement"
},
{
"name": "TEMP_GCS_BUCKET",
"type": "statement"
},
{
"name": "__doc__",
"type": "instance"
},
{
"name": "__file__",
"type": "instance"
},
{
"name": "__name__",
"type": "instance"
},
{
"name": "__package__",
"type": "instance"
}
] |
"""
* Copyright 2022 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
"""
import mock
import pyspark
from dataproc_templates.jdbc.jdbc_to_gcs import JDBCToGCSTemplate
import dataproc_templates.util.template_constants as constants
class TestJDBCToGCSTemplate:
"""
Test suite for JDBCToGCSTemplate
"""
def test_parse_args1(self):
"""Tests JDBCToGCSTemplate.parse_args()"""
jdbc_to_gcs_template = JDBCToGCSTemplate()
parsed_args = jdbc_to_gcs_template.parse_args(
["--jdbctogcs.input.url=url",
"--jdbctogcs.input.driver=driver",
"--jdbctogcs.input.table=table1",
"--jdbctogcs.input.partitioncolumn=column",
"--jdbctogcs.input.lowerbound=1",
"--jdbctogcs.input.upperbound=2",
"--jdbctogcs.numpartitions=5",
"--jdbctogcs.output.location=gs://test",
"--jdbctogcs.output.format=csv",
"--jdbctogcs.output.mode=append",
"--jdbctogcs.output.partitioncolumn=column"
])
assert parsed_args["jdbctogcs.input.url"] == "url"
assert parsed_args["jdbctogcs.input.driver"] == "driver"
assert parsed_args["jdbctogcs.input.table"] == "table1"
assert parsed_args["jdbctogcs.input.partitioncolumn"] == "column"
assert parsed_args["jdbctogcs.input.lowerbound"] == "1"
assert parsed_args["jdbctogcs.input.upperbound"] == "2"
assert parsed_args["jdbctogcs.numpartitions"] == "5"
assert parsed_args["jdbctogcs.output.location"] == "gs://test"
assert parsed_args["jdbctogcs.output.format"] == "csv"
assert parsed_args["jdbctogcs.output.mode"] == "append"
assert parsed_args["jdbctogcs.output.partitioncolumn"] == "column"
@mock.patch.object(pyspark.sql, 'SparkSession')
def test_run_pass_args2(self, mock_spark_session):
"""Tests JDBCToGCSTemplate write parquet"""
jdbc_to_gcs_template = JDBCToGCSTemplate()
mock_parsed_args = jdbc_to_gcs_template.parse_args(
["--jdbctogcs.input.url=url",
"--jdbctogcs.input.driver=driver",
"--jdbctogcs.input.table=table1",
"--jdbctogcs.input.partitioncolumn=column",
"--jdbctogcs.input.lowerbound=1",
"--jdbctogcs.input.upperbound=2",
"--jdbctogcs.numpartitions=5",
"--jdbctogcs.output.location=gs://test",
"--jdbctogcs.output.format=parquet",
"--jdbctogcs.output.mode=overwrite"
])
mock_spark_session.read.format().option().option().option().option().option().option().option().load.return_value = mock_spark_session.dataframe.DataFrame
jdbc_to_gcs_template.run(mock_spark_session, mock_parsed_args)
mock_spark_session.read.format.assert_called_with(constants.FORMAT_JDBC)
mock_spark_session.read.format().option.assert_called_with(constants.JDBC_URL, "url")
mock_spark_session.read.format().option().option.assert_called_with(constants.JDBC_DRIVER, "driver")
mock_spark_session.read.format().option().option().option.assert_called_with(constants.JDBC_TABLE, "table1")
mock_spark_session.read.format().option().option().option().option.assert_called_with(constants.JDBC_PARTITIONCOLUMN, "column")
mock_spark_session.read.format().option().option().option().option().option.assert_called_with(constants.JDBC_LOWERBOUND, "1")
mock_spark_session.read.format().option().option().option().option().option().option.assert_called_with(constants.JDBC_UPPERBOUND, "2")
mock_spark_session.read.format().option().option().option().option().option().option().option.assert_called_with(constants.JDBC_NUMPARTITIONS, "5")
mock_spark_session.read.format().option().option().option().option().option().option().option().load()
mock_spark_session.dataframe.DataFrame.write.mode.assert_called_once_with(constants.OUTPUT_MODE_OVERWRITE)
mock_spark_session.dataframe.DataFrame.write.mode().parquet.assert_called_once_with("gs://test")
@mock.patch.object(pyspark.sql, 'SparkSession')
def test_run_pass_args3(self, mock_spark_session):
"""Tests JDBCToGCSTemplate write avro"""
jdbc_to_gcs_template = JDBCToGCSTemplate()
mock_parsed_args = jdbc_to_gcs_template.parse_args(
["--jdbctogcs.input.url=url",
"--jdbctogcs.input.driver=driver",
"--jdbctogcs.input.table=table1",
"--jdbctogcs.input.partitioncolumn=column",
"--jdbctogcs.input.lowerbound=1",
"--jdbctogcs.input.upperbound=2",
"--jdbctogcs.numpartitions=5",
"--jdbctogcs.output.location=gs://test",
"--jdbctogcs.output.format=avro",
"--jdbctogcs.output.mode=append"
])
mock_spark_session.read.format().option().option().option().option().option().option().option().load.return_value = mock_spark_session.dataframe.DataFrame
jdbc_to_gcs_template.run(mock_spark_session, mock_parsed_args)
mock_spark_session.read.format.assert_called_with(constants.FORMAT_JDBC)
mock_spark_session.read.format().option.assert_called_with(constants.JDBC_URL, "url")
mock_spark_session.read.format().option().option.assert_called_with(constants.JDBC_DRIVER, "driver")
mock_spark_session.read.format().option().option().option.assert_called_with(constants.JDBC_TABLE, "table1")
mock_spark_session.read.format().option().option().option().option.assert_called_with(constants.JDBC_PARTITIONCOLUMN, "column")
mock_spark_session.read.format().option().option().option().option().option.assert_called_with(constants.JDBC_LOWERBOUND, "1")
mock_spark_session.read.format().option().option().option().option().option().option.assert_called_with(constants.JDBC_UPPERBOUND, "2")
mock_spark_session.read.format().option().option().option().option().option().option().option.assert_called_with(constants.JDBC_NUMPARTITIONS, "5")
mock_spark_session.read.format().option().option().option().option().option().option().option().load()
mock_spark_session.dataframe.DataFrame.write.mode.assert_called_once_with(constants.OUTPUT_MODE_APPEND)
mock_spark_session.dataframe.DataFrame.write.mode().format.assert_called_once_with(constants.FORMAT_AVRO)
mock_spark_session.dataframe.DataFrame.write.mode().format().save.assert_called_once_with("gs://test")
@mock.patch.object(pyspark.sql, 'SparkSession')
def test_run_pass_args4(self, mock_spark_session):
"""Tests JDBCToGCSTemplate write csv"""
jdbc_to_gcs_template = JDBCToGCSTemplate()
mock_parsed_args = jdbc_to_gcs_template.parse_args(
["--jdbctogcs.input.url=url",
"--jdbctogcs.input.driver=driver",
"--jdbctogcs.input.table=table1",
"--jdbctogcs.input.partitioncolumn=column",
"--jdbctogcs.input.lowerbound=1",
"--jdbctogcs.input.upperbound=2",
"--jdbctogcs.numpartitions=5",
"--jdbctogcs.output.location=gs://test",
"--jdbctogcs.output.format=csv",
"--jdbctogcs.output.mode=ignore"
])
mock_spark_session.read.format().option().option().option().option().option().option().option().load.return_value = mock_spark_session.dataframe.DataFrame
jdbc_to_gcs_template.run(mock_spark_session, mock_parsed_args)
mock_spark_session.read.format.assert_called_with(constants.FORMAT_JDBC)
mock_spark_session.read.format().option.assert_called_with(constants.JDBC_URL, "url")
mock_spark_session.read.format().option().option.assert_called_with(constants.
|
[
"FORMAT_JDBC",
"JDBC_URL",
"JDBC_TABLE",
"JDBC_NUMPARTITIONS",
"JDBC_DRIVER",
"BQ_GCS_INPUT_TABLE",
"BQ_GCS_OUTPUT_FORMAT",
"BQ_GCS_OUTPUT_LOCATION",
"BQ_GCS_OUTPUT_MODE",
"CSV_HEADER",
"CSV_INFER_SCHEMA",
"FORMAT_AVRO",
"FORMAT_AVRO_EXTD",
"FORMAT_BIGQUERY",
"FORMAT_CSV",
"FORMAT_HBASE",
"FORMAT_JSON",
"FORMAT_PRQT",
"GCS_BQ_INPUT_FORMAT",
"GCS_BQ_INPUT_LOCATION",
"GCS_BQ_LD_TEMP_BUCKET_NAME",
"GCS_BQ_OUTPUT_DATASET",
"GCS_BQ_OUTPUT_MODE",
"GCS_BQ_OUTPUT_TABLE",
"GCS_BQ_TEMP_BUCKET",
"GCS_BT_HBASE_CATALOG_JSON",
"GCS_BT_INPUT_FORMAT",
"GCS_BT_INPUT_LOCATION",
"GCS_JDBC_BATCH_SIZE",
"GCS_JDBC_INPUT_FORMAT",
"GCS_JDBC_INPUT_LOCATION",
"GCS_JDBC_OUTPUT_DRIVER",
"GCS_JDBC_OUTPUT_MODE",
"GCS_JDBC_OUTPUT_TABLE",
"GCS_JDBC_OUTPUT_URL",
"HBASE_GCS_CATALOG_JSON",
"HBASE_GCS_OUTPUT_FORMAT",
"HBASE_GCS_OUTPUT_LOCATION",
"HBASE_GCS_OUTPUT_MODE",
"HIVE_BQ_INPUT_DATABASE",
"HIVE_BQ_INPUT_TABLE",
"HIVE_BQ_LD_TEMP_BUCKET_NAME",
"HIVE_BQ_OUTPUT_DATASET",
"HIVE_BQ_OUTPUT_MODE",
"HIVE_BQ_OUTPUT_TABLE",
"HIVE_GCS_INPUT_DATABASE",
"HIVE_GCS_INPUT_TABLE",
"HIVE_GCS_OUTPUT_FORMAT",
"HIVE_GCS_OUTPUT_LOCATION",
"HIVE_GCS_OUTPUT_MODE",
"JDBC_BATCH_SIZE",
"JDBC_CREATE_TABLE_OPTIONS",
"JDBC_LOWERBOUND",
"JDBC_PARTITIONCOLUMN",
"JDBC_UPPERBOUND",
"JDBCTOGCS_INPUT_DRIVER",
"JDBCTOGCS_INPUT_LOWERBOUND",
"JDBCTOGCS_INPUT_PARTITIONCOLUMN",
"JDBCTOGCS_INPUT_TABLE",
"JDBCTOGCS_INPUT_UPPERBOUND",
"JDBCTOGCS_INPUT_URL",
"JDBCTOGCS_NUMPARTITIONS",
"JDBCTOGCS_OUTPUT_FORMAT",
"JDBCTOGCS_OUTPUT_LOCATION",
"JDBCTOGCS_OUTPUT_MODE",
"JDBCTOGCS_OUTPUT_PARTITIONCOLUMN",
"JDBCTOJDBC_INPUT_DRIVER",
"JDBCTOJDBC_INPUT_LOWERBOUND",
"JDBCTOJDBC_INPUT_PARTITIONCOLUMN",
"JDBCTOJDBC_INPUT_TABLE",
"JDBCTOJDBC_INPUT_UPPERBOUND",
"JDBCTOJDBC_INPUT_URL",
"JDBCTOJDBC_NUMPARTITIONS",
"JDBCTOJDBC_OUTPUT_BATCH_SIZE",
"JDBCTOJDBC_OUTPUT_CREATE_TABLE_OPTION",
"JDBCTOJDBC_OUTPUT_DRIVER",
"JDBCTOJDBC_OUTPUT_MODE",
"JDBCTOJDBC_OUTPUT_TABLE",
"JDBCTOJDBC_OUTPUT_URL",
"OUTPUT_MODE_APPEND",
"OUTPUT_MODE_ERRORIFEXISTS",
"OUTPUT_MODE_IGNORE",
"OUTPUT_MODE_OVERWRITE",
"PROJECT_ID_PROP",
"TABLE",
"TEMP_GCS_BUCKET"
] |
ssert_called_with(constants.JDBC_UPPERBOUND, "2")
mock_spark_session.read.format().option().option().option().option().option().option().option.assert_called_with(constants.JDBC_NUMPARTITIONS, "5")
mock_spark_session.read.format().option().option().option().option().option().option().option().load()
mock_spark_session.dataframe.DataFrame.write.mode.assert_called_once_with(constants.OUTPUT_MODE_APPEND)
mock_spark_session.dataframe.DataFrame.write.mode().format.assert_called_once_with(constants.FORMAT_AVRO)
mock_spark_session.dataframe.DataFrame.write.mode().format().save.assert_called_once_with("gs://test")
@mock.patch.object(pyspark.sql, 'SparkSession')
def test_run_pass_args4(self, mock_spark_session):
"""Tests JDBCToGCSTemplate write csv"""
jdbc_to_gcs_template = JDBCToGCSTemplate()
mock_parsed_args = jdbc_to_gcs_template.parse_args(
["--jdbctogcs.input.url=url",
"--jdbctogcs.input.driver=driver",
"--jdbctogcs.input.table=table1",
"--jdbctogcs.input.partitioncolumn=column",
"--jdbctogcs.input.lowerbound=1",
"--jdbctogcs.input.upperbound=2",
"--jdbctogcs.numpartitions=5",
"--jdbctogcs.output.location=gs://test",
"--jdbctogcs.output.format=csv",
"--jdbctogcs.output.mode=ignore"
])
mock_spark_session.read.format().option().option().option().option().option().option().option().load.return_value = mock_spark_session.dataframe.DataFrame
jdbc_to_gcs_template.run(mock_spark_session, mock_parsed_args)
mock_spark_session.read.format.assert_called_with(constants.FORMAT_JDBC)
mock_spark_session.read.format().option.assert_called_with(constants.JDBC_URL, "url")
mock_spark_session.read.format().option().option.assert_called_with(constants.
| 215
|
263
| 13
| 9,015
|
googlecloudplatform__dataproc-templates
|
bba5da698a8aa144c73d4d2a90e84c6a577ce7f4
|
python/test/jdbc/test_jdbc_to_gcs.py
|
non_informative
|
JDBC_NUMPARTITIONS
| true
|
statement
| 86
| 86
| false
| false
|
[
"FORMAT_JDBC",
"JDBC_URL",
"JDBC_TABLE",
"JDBC_DRIVER",
"JDBC_LOWERBOUND",
"BQ_GCS_INPUT_TABLE",
"BQ_GCS_OUTPUT_FORMAT",
"BQ_GCS_OUTPUT_LOCATION",
"BQ_GCS_OUTPUT_MODE",
"CSV_HEADER",
"CSV_INFER_SCHEMA",
"FORMAT_AVRO",
"FORMAT_AVRO_EXTD",
"FORMAT_BIGQUERY",
"FORMAT_CSV",
"FORMAT_HBASE",
"FORMAT_JSON",
"FORMAT_PRQT",
"GCS_BQ_INPUT_FORMAT",
"GCS_BQ_INPUT_LOCATION",
"GCS_BQ_LD_TEMP_BUCKET_NAME",
"GCS_BQ_OUTPUT_DATASET",
"GCS_BQ_OUTPUT_MODE",
"GCS_BQ_OUTPUT_TABLE",
"GCS_BQ_TEMP_BUCKET",
"GCS_BT_HBASE_CATALOG_JSON",
"GCS_BT_INPUT_FORMAT",
"GCS_BT_INPUT_LOCATION",
"GCS_JDBC_BATCH_SIZE",
"GCS_JDBC_INPUT_FORMAT",
"GCS_JDBC_INPUT_LOCATION",
"GCS_JDBC_OUTPUT_DRIVER",
"GCS_JDBC_OUTPUT_MODE",
"GCS_JDBC_OUTPUT_TABLE",
"GCS_JDBC_OUTPUT_URL",
"HBASE_GCS_CATALOG_JSON",
"HBASE_GCS_OUTPUT_FORMAT",
"HBASE_GCS_OUTPUT_LOCATION",
"HBASE_GCS_OUTPUT_MODE",
"HIVE_BQ_INPUT_DATABASE",
"HIVE_BQ_INPUT_TABLE",
"HIVE_BQ_LD_TEMP_BUCKET_NAME",
"HIVE_BQ_OUTPUT_DATASET",
"HIVE_BQ_OUTPUT_MODE",
"HIVE_BQ_OUTPUT_TABLE",
"HIVE_GCS_INPUT_DATABASE",
"HIVE_GCS_INPUT_TABLE",
"HIVE_GCS_OUTPUT_FORMAT",
"HIVE_GCS_OUTPUT_LOCATION",
"HIVE_GCS_OUTPUT_MODE",
"JDBC_BATCH_SIZE",
"JDBC_CREATE_TABLE_OPTIONS",
"JDBC_NUMPARTITIONS",
"JDBC_PARTITIONCOLUMN",
"JDBC_UPPERBOUND",
"JDBCTOGCS_INPUT_DRIVER",
"JDBCTOGCS_INPUT_LOWERBOUND",
"JDBCTOGCS_INPUT_PARTITIONCOLUMN",
"JDBCTOGCS_INPUT_TABLE",
"JDBCTOGCS_INPUT_UPPERBOUND",
"JDBCTOGCS_INPUT_URL",
"JDBCTOGCS_NUMPARTITIONS",
"JDBCTOGCS_OUTPUT_FORMAT",
"JDBCTOGCS_OUTPUT_LOCATION",
"JDBCTOGCS_OUTPUT_MODE",
"JDBCTOGCS_OUTPUT_PARTITIONCOLUMN",
"JDBCTOJDBC_INPUT_DRIVER",
"JDBCTOJDBC_INPUT_LOWERBOUND",
"JDBCTOJDBC_INPUT_PARTITIONCOLUMN",
"JDBCTOJDBC_INPUT_TABLE",
"JDBCTOJDBC_INPUT_UPPERBOUND",
"JDBCTOJDBC_INPUT_URL",
"JDBCTOJDBC_NUMPARTITIONS",
"JDBCTOJDBC_OUTPUT_BATCH_SIZE",
"JDBCTOJDBC_OUTPUT_CREATE_TABLE_OPTION",
"JDBCTOJDBC_OUTPUT_DRIVER",
"JDBCTOJDBC_OUTPUT_MODE",
"JDBCTOJDBC_OUTPUT_TABLE",
"JDBCTOJDBC_OUTPUT_URL",
"OUTPUT_MODE_APPEND",
"OUTPUT_MODE_ERRORIFEXISTS",
"OUTPUT_MODE_IGNORE",
"OUTPUT_MODE_OVERWRITE",
"PROJECT_ID_PROP",
"TABLE",
"TEMP_GCS_BUCKET"
] |
[
{
"name": "BQ_GCS_INPUT_TABLE",
"type": "statement"
},
{
"name": "BQ_GCS_OUTPUT_FORMAT",
"type": "statement"
},
{
"name": "BQ_GCS_OUTPUT_LOCATION",
"type": "statement"
},
{
"name": "BQ_GCS_OUTPUT_MODE",
"type": "statement"
},
{
"name": "CSV_HEADER",
"type": "statement"
},
{
"name": "CSV_INFER_SCHEMA",
"type": "statement"
},
{
"name": "FORMAT_AVRO",
"type": "statement"
},
{
"name": "FORMAT_AVRO_EXTD",
"type": "statement"
},
{
"name": "FORMAT_BIGQUERY",
"type": "statement"
},
{
"name": "FORMAT_CSV",
"type": "statement"
},
{
"name": "FORMAT_HBASE",
"type": "statement"
},
{
"name": "FORMAT_JDBC",
"type": "statement"
},
{
"name": "FORMAT_JSON",
"type": "statement"
},
{
"name": "FORMAT_PRQT",
"type": "statement"
},
{
"name": "GCS_BQ_INPUT_FORMAT",
"type": "statement"
},
{
"name": "GCS_BQ_INPUT_LOCATION",
"type": "statement"
},
{
"name": "GCS_BQ_LD_TEMP_BUCKET_NAME",
"type": "statement"
},
{
"name": "GCS_BQ_OUTPUT_DATASET",
"type": "statement"
},
{
"name": "GCS_BQ_OUTPUT_MODE",
"type": "statement"
},
{
"name": "GCS_BQ_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "GCS_BQ_TEMP_BUCKET",
"type": "statement"
},
{
"name": "GCS_BT_HBASE_CATALOG_JSON",
"type": "statement"
},
{
"name": "GCS_BT_INPUT_FORMAT",
"type": "statement"
},
{
"name": "GCS_BT_INPUT_LOCATION",
"type": "statement"
},
{
"name": "GCS_JDBC_BATCH_SIZE",
"type": "statement"
},
{
"name": "GCS_JDBC_INPUT_FORMAT",
"type": "statement"
},
{
"name": "GCS_JDBC_INPUT_LOCATION",
"type": "statement"
},
{
"name": "GCS_JDBC_OUTPUT_DRIVER",
"type": "statement"
},
{
"name": "GCS_JDBC_OUTPUT_MODE",
"type": "statement"
},
{
"name": "GCS_JDBC_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "GCS_JDBC_OUTPUT_URL",
"type": "statement"
},
{
"name": "HBASE_GCS_CATALOG_JSON",
"type": "statement"
},
{
"name": "HBASE_GCS_OUTPUT_FORMAT",
"type": "statement"
},
{
"name": "HBASE_GCS_OUTPUT_LOCATION",
"type": "statement"
},
{
"name": "HBASE_GCS_OUTPUT_MODE",
"type": "statement"
},
{
"name": "HIVE_BQ_INPUT_DATABASE",
"type": "statement"
},
{
"name": "HIVE_BQ_INPUT_TABLE",
"type": "statement"
},
{
"name": "HIVE_BQ_LD_TEMP_BUCKET_NAME",
"type": "statement"
},
{
"name": "HIVE_BQ_OUTPUT_DATASET",
"type": "statement"
},
{
"name": "HIVE_BQ_OUTPUT_MODE",
"type": "statement"
},
{
"name": "HIVE_BQ_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "HIVE_GCS_INPUT_DATABASE",
"type": "statement"
},
{
"name": "HIVE_GCS_INPUT_TABLE",
"type": "statement"
},
{
"name": "HIVE_GCS_OUTPUT_FORMAT",
"type": "statement"
},
{
"name": "HIVE_GCS_OUTPUT_LOCATION",
"type": "statement"
},
{
"name": "HIVE_GCS_OUTPUT_MODE",
"type": "statement"
},
{
"name": "JDBC_BATCH_SIZE",
"type": "statement"
},
{
"name": "JDBC_CREATE_TABLE_OPTIONS",
"type": "statement"
},
{
"name": "JDBC_DRIVER",
"type": "statement"
},
{
"name": "JDBC_LOWERBOUND",
"type": "statement"
},
{
"name": "JDBC_NUMPARTITIONS",
"type": "statement"
},
{
"name": "JDBC_PARTITIONCOLUMN",
"type": "statement"
},
{
"name": "JDBC_TABLE",
"type": "statement"
},
{
"name": "JDBC_UPPERBOUND",
"type": "statement"
},
{
"name": "JDBC_URL",
"type": "statement"
},
{
"name": "JDBCTOGCS_INPUT_DRIVER",
"type": "statement"
},
{
"name": "JDBCTOGCS_INPUT_LOWERBOUND",
"type": "statement"
},
{
"name": "JDBCTOGCS_INPUT_PARTITIONCOLUMN",
"type": "statement"
},
{
"name": "JDBCTOGCS_INPUT_TABLE",
"type": "statement"
},
{
"name": "JDBCTOGCS_INPUT_UPPERBOUND",
"type": "statement"
},
{
"name": "JDBCTOGCS_INPUT_URL",
"type": "statement"
},
{
"name": "JDBCTOGCS_NUMPARTITIONS",
"type": "statement"
},
{
"name": "JDBCTOGCS_OUTPUT_FORMAT",
"type": "statement"
},
{
"name": "JDBCTOGCS_OUTPUT_LOCATION",
"type": "statement"
},
{
"name": "JDBCTOGCS_OUTPUT_MODE",
"type": "statement"
},
{
"name": "JDBCTOGCS_OUTPUT_PARTITIONCOLUMN",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_DRIVER",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_LOWERBOUND",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_PARTITIONCOLUMN",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_TABLE",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_UPPERBOUND",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_URL",
"type": "statement"
},
{
"name": "JDBCTOJDBC_NUMPARTITIONS",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_BATCH_SIZE",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_CREATE_TABLE_OPTION",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_DRIVER",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_MODE",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_URL",
"type": "statement"
},
{
"name": "OUTPUT_MODE_APPEND",
"type": "statement"
},
{
"name": "OUTPUT_MODE_ERRORIFEXISTS",
"type": "statement"
},
{
"name": "OUTPUT_MODE_IGNORE",
"type": "statement"
},
{
"name": "OUTPUT_MODE_OVERWRITE",
"type": "statement"
},
{
"name": "PROJECT_ID_PROP",
"type": "statement"
},
{
"name": "TABLE",
"type": "statement"
},
{
"name": "TEMP_GCS_BUCKET",
"type": "statement"
},
{
"name": "__doc__",
"type": "instance"
},
{
"name": "__file__",
"type": "instance"
},
{
"name": "__name__",
"type": "instance"
},
{
"name": "__package__",
"type": "instance"
}
] |
"""
* Copyright 2022 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
"""
import mock
import pyspark
from dataproc_templates.jdbc.jdbc_to_gcs import JDBCToGCSTemplate
import dataproc_templates.util.template_constants as constants
class TestJDBCToGCSTemplate:
"""
Test suite for JDBCToGCSTemplate
"""
def test_parse_args1(self):
"""Tests JDBCToGCSTemplate.parse_args()"""
jdbc_to_gcs_template = JDBCToGCSTemplate()
parsed_args = jdbc_to_gcs_template.parse_args(
["--jdbctogcs.input.url=url",
"--jdbctogcs.input.driver=driver",
"--jdbctogcs.input.table=table1",
"--jdbctogcs.input.partitioncolumn=column",
"--jdbctogcs.input.lowerbound=1",
"--jdbctogcs.input.upperbound=2",
"--jdbctogcs.numpartitions=5",
"--jdbctogcs.output.location=gs://test",
"--jdbctogcs.output.format=csv",
"--jdbctogcs.output.mode=append",
"--jdbctogcs.output.partitioncolumn=column"
])
assert parsed_args["jdbctogcs.input.url"] == "url"
assert parsed_args["jdbctogcs.input.driver"] == "driver"
assert parsed_args["jdbctogcs.input.table"] == "table1"
assert parsed_args["jdbctogcs.input.partitioncolumn"] == "column"
assert parsed_args["jdbctogcs.input.lowerbound"] == "1"
assert parsed_args["jdbctogcs.input.upperbound"] == "2"
assert parsed_args["jdbctogcs.numpartitions"] == "5"
assert parsed_args["jdbctogcs.output.location"] == "gs://test"
assert parsed_args["jdbctogcs.output.format"] == "csv"
assert parsed_args["jdbctogcs.output.mode"] == "append"
assert parsed_args["jdbctogcs.output.partitioncolumn"] == "column"
@mock.patch.object(pyspark.sql, 'SparkSession')
def test_run_pass_args2(self, mock_spark_session):
"""Tests JDBCToGCSTemplate write parquet"""
jdbc_to_gcs_template = JDBCToGCSTemplate()
mock_parsed_args = jdbc_to_gcs_template.parse_args(
["--jdbctogcs.input.url=url",
"--jdbctogcs.input.driver=driver",
"--jdbctogcs.input.table=table1",
"--jdbctogcs.input.partitioncolumn=column",
"--jdbctogcs.input.lowerbound=1",
"--jdbctogcs.input.upperbound=2",
"--jdbctogcs.numpartitions=5",
"--jdbctogcs.output.location=gs://test",
"--jdbctogcs.output.format=parquet",
"--jdbctogcs.output.mode=overwrite"
])
mock_spark_session.read.format().option().option().option().option().option().option().option().load.return_value = mock_spark_session.dataframe.DataFrame
jdbc_to_gcs_template.run(mock_spark_session, mock_parsed_args)
mock_spark_session.read.format.assert_called_with(constants.FORMAT_JDBC)
mock_spark_session.read.format().option.assert_called_with(constants.JDBC_URL, "url")
mock_spark_session.read.format().option().option.assert_called_with(constants.JDBC_DRIVER, "driver")
mock_spark_session.read.format().option().option().option.assert_called_with(constants.JDBC_TABLE, "table1")
mock_spark_session.read.format().option().option().option().option.assert_called_with(constants.JDBC_PARTITIONCOLUMN, "column")
mock_spark_session.read.format().option().option().option().option().option.assert_called_with(constants.JDBC_LOWERBOUND, "1")
mock_spark_session.read.format().option().option().option().option().option().option.assert_called_with(constants.JDBC_UPPERBOUND, "2")
mock_spark_session.read.format().option().option().option().option().option().option().option.assert_called_with(constants.JDBC_NUMPARTITIONS, "5")
mock_spark_session.read.format().option().option().option().option().option().option().option().load()
mock_spark_session.dataframe.DataFrame.write.mode.assert_called_once_with(constants.OUTPUT_MODE_OVERWRITE)
mock_spark_session.dataframe.DataFrame.write.mode().parquet.assert_called_once_with("gs://test")
@mock.patch.object(pyspark.sql, 'SparkSession')
def test_run_pass_args3(self, mock_spark_session):
"""Tests JDBCToGCSTemplate write avro"""
jdbc_to_gcs_template = JDBCToGCSTemplate()
mock_parsed_args = jdbc_to_gcs_template.parse_args(
["--jdbctogcs.input.url=url",
"--jdbctogcs.input.driver=driver",
"--jdbctogcs.input.table=table1",
"--jdbctogcs.input.partitioncolumn=column",
"--jdbctogcs.input.lowerbound=1",
"--jdbctogcs.input.upperbound=2",
"--jdbctogcs.numpartitions=5",
"--jdbctogcs.output.location=gs://test",
"--jdbctogcs.output.format=avro",
"--jdbctogcs.output.mode=append"
])
mock_spark_session.read.format().option().option().option().option().option().option().option().load.return_value = mock_spark_session.dataframe.DataFrame
jdbc_to_gcs_template.run(mock_spark_session, mock_parsed_args)
mock_spark_session.read.format.assert_called_with(constants.FORMAT_JDBC)
mock_spark_session.read.format().option.assert_called_with(constants.JDBC_URL, "url")
mock_spark_session.read.format().option().option.assert_called_with(constants.JDBC_DRIVER, "driver")
mock_spark_session.read.format().option().option().option.assert_called_with(constants.JDBC_TABLE, "table1")
mock_spark_session.read.format().option().option().option().option.assert_called_with(constants.JDBC_PARTITIONCOLUMN, "column")
mock_spark_session.read.format().option().option().option().option().option.assert_called_with(constants.JDBC_LOWERBOUND, "1")
mock_spark_session.read.format().option().option().option().option().option().option.assert_called_with(constants.JDBC_UPPERBOUND, "2")
mock_spark_session.read.format().option().option().option().option().option().option().option.assert_called_with(constants.JDBC_NUMPARTITIONS, "5")
mock_spark_session.read.format().option().option().option().option().option().option().option().load()
mock_spark_session.dataframe.DataFrame.write.mode.assert_called_once_with(constants.OUTPUT_MODE_APPEND)
mock_spark_session.dataframe.DataFrame.write.mode().format.assert_called_once_with(constants.FORMAT_AVRO)
mock_spark_session.dataframe.DataFrame.write.mode().format().save.assert_called_once_with("gs://test")
@mock.patch.object(pyspark.sql, 'SparkSession')
def test_run_pass_args4(self, mock_spark_session):
"""Tests JDBCToGCSTemplate write csv"""
jdbc_to_gcs_template = JDBCToGCSTemplate()
mock_parsed_args = jdbc_to_gcs_template.parse_args(
["--jdbctogcs.input.url=url",
"--jdbctogcs.input.driver=driver",
"--jdbctogcs.input.table=table1",
"--jdbctogcs.input.partitioncolumn=column",
"--jdbctogcs.input.lowerbound=1",
"--jdbctogcs.input.upperbound=2",
"--jdbctogcs.numpartitions=5",
"--jdbctogcs.output.location=gs://test",
"--jdbctogcs.output.format=csv",
"--jdbctogcs.output.mode=ignore"
])
mock_spark_session.read.format().option().option().option().option().option().option().option().load.return_value = mock_spark_session.dataframe.DataFrame
jdbc_to_gcs_template.run(mock_spark_session, mock_parsed_args)
mock_spark_session.read.format.assert_called_with(constants.FORMAT_JDBC)
mock_spark_session.read.format().option.assert_called_with(constants.JDBC_URL, "url")
mock_spark_session.read.format().option().option.assert_called_with(constants.JDBC_DRIVER, "driver")
mock_spark_session.read.format().option().option().option.assert_called_with(constants.JDBC_TABLE, "table1")
mock_spark_session.read.format().option().option().option().option.assert_called_with(constants.JDBC_PARTITIONCOLUMN, "column")
mock_spark_session.read.format().option().option().option().option().option.assert_called_with(constants.JDBC_LOWERBOUND, "1")
mock_spark_session.read.format().option().option().option().option().option().option.assert_called_with(constants.JDBC_UPPERBOUND, "2")
mock_spark_session.read.format().option().option().option().option().option().option().option.assert_called_with(constants.
|
[
"FORMAT_JDBC",
"JDBC_URL",
"JDBC_TABLE",
"JDBC_DRIVER",
"JDBC_LOWERBOUND",
"BQ_GCS_INPUT_TABLE",
"BQ_GCS_OUTPUT_FORMAT",
"BQ_GCS_OUTPUT_LOCATION",
"BQ_GCS_OUTPUT_MODE",
"CSV_HEADER",
"CSV_INFER_SCHEMA",
"FORMAT_AVRO",
"FORMAT_AVRO_EXTD",
"FORMAT_BIGQUERY",
"FORMAT_CSV",
"FORMAT_HBASE",
"FORMAT_JSON",
"FORMAT_PRQT",
"GCS_BQ_INPUT_FORMAT",
"GCS_BQ_INPUT_LOCATION",
"GCS_BQ_LD_TEMP_BUCKET_NAME",
"GCS_BQ_OUTPUT_DATASET",
"GCS_BQ_OUTPUT_MODE",
"GCS_BQ_OUTPUT_TABLE",
"GCS_BQ_TEMP_BUCKET",
"GCS_BT_HBASE_CATALOG_JSON",
"GCS_BT_INPUT_FORMAT",
"GCS_BT_INPUT_LOCATION",
"GCS_JDBC_BATCH_SIZE",
"GCS_JDBC_INPUT_FORMAT",
"GCS_JDBC_INPUT_LOCATION",
"GCS_JDBC_OUTPUT_DRIVER",
"GCS_JDBC_OUTPUT_MODE",
"GCS_JDBC_OUTPUT_TABLE",
"GCS_JDBC_OUTPUT_URL",
"HBASE_GCS_CATALOG_JSON",
"HBASE_GCS_OUTPUT_FORMAT",
"HBASE_GCS_OUTPUT_LOCATION",
"HBASE_GCS_OUTPUT_MODE",
"HIVE_BQ_INPUT_DATABASE",
"HIVE_BQ_INPUT_TABLE",
"HIVE_BQ_LD_TEMP_BUCKET_NAME",
"HIVE_BQ_OUTPUT_DATASET",
"HIVE_BQ_OUTPUT_MODE",
"HIVE_BQ_OUTPUT_TABLE",
"HIVE_GCS_INPUT_DATABASE",
"HIVE_GCS_INPUT_TABLE",
"HIVE_GCS_OUTPUT_FORMAT",
"HIVE_GCS_OUTPUT_LOCATION",
"HIVE_GCS_OUTPUT_MODE",
"JDBC_BATCH_SIZE",
"JDBC_CREATE_TABLE_OPTIONS",
"JDBC_NUMPARTITIONS",
"JDBC_PARTITIONCOLUMN",
"JDBC_UPPERBOUND",
"JDBCTOGCS_INPUT_DRIVER",
"JDBCTOGCS_INPUT_LOWERBOUND",
"JDBCTOGCS_INPUT_PARTITIONCOLUMN",
"JDBCTOGCS_INPUT_TABLE",
"JDBCTOGCS_INPUT_UPPERBOUND",
"JDBCTOGCS_INPUT_URL",
"JDBCTOGCS_NUMPARTITIONS",
"JDBCTOGCS_OUTPUT_FORMAT",
"JDBCTOGCS_OUTPUT_LOCATION",
"JDBCTOGCS_OUTPUT_MODE",
"JDBCTOGCS_OUTPUT_PARTITIONCOLUMN",
"JDBCTOJDBC_INPUT_DRIVER",
"JDBCTOJDBC_INPUT_LOWERBOUND",
"JDBCTOJDBC_INPUT_PARTITIONCOLUMN",
"JDBCTOJDBC_INPUT_TABLE",
"JDBCTOJDBC_INPUT_UPPERBOUND",
"JDBCTOJDBC_INPUT_URL",
"JDBCTOJDBC_NUMPARTITIONS",
"JDBCTOJDBC_OUTPUT_BATCH_SIZE",
"JDBCTOJDBC_OUTPUT_CREATE_TABLE_OPTION",
"JDBCTOJDBC_OUTPUT_DRIVER",
"JDBCTOJDBC_OUTPUT_MODE",
"JDBCTOJDBC_OUTPUT_TABLE",
"JDBCTOJDBC_OUTPUT_URL",
"OUTPUT_MODE_APPEND",
"OUTPUT_MODE_ERRORIFEXISTS",
"OUTPUT_MODE_IGNORE",
"OUTPUT_MODE_OVERWRITE",
"PROJECT_ID_PROP",
"TABLE",
"TEMP_GCS_BUCKET"
] |
pyspark.sql, 'SparkSession')
def test_run_pass_args4(self, mock_spark_session):
"""Tests JDBCToGCSTemplate write csv"""
jdbc_to_gcs_template = JDBCToGCSTemplate()
mock_parsed_args = jdbc_to_gcs_template.parse_args(
["--jdbctogcs.input.url=url",
"--jdbctogcs.input.driver=driver",
"--jdbctogcs.input.table=table1",
"--jdbctogcs.input.partitioncolumn=column",
"--jdbctogcs.input.lowerbound=1",
"--jdbctogcs.input.upperbound=2",
"--jdbctogcs.numpartitions=5",
"--jdbctogcs.output.location=gs://test",
"--jdbctogcs.output.format=csv",
"--jdbctogcs.output.mode=ignore"
])
mock_spark_session.read.format().option().option().option().option().option().option().option().load.return_value = mock_spark_session.dataframe.DataFrame
jdbc_to_gcs_template.run(mock_spark_session, mock_parsed_args)
mock_spark_session.read.format.assert_called_with(constants.FORMAT_JDBC)
mock_spark_session.read.format().option.assert_called_with(constants.JDBC_URL, "url")
mock_spark_session.read.format().option().option.assert_called_with(constants.JDBC_DRIVER, "driver")
mock_spark_session.read.format().option().option().option.assert_called_with(constants.JDBC_TABLE, "table1")
mock_spark_session.read.format().option().option().option().option.assert_called_with(constants.JDBC_PARTITIONCOLUMN, "column")
mock_spark_session.read.format().option().option().option().option().option.assert_called_with(constants.JDBC_LOWERBOUND, "1")
mock_spark_session.read.format().option().option().option().option().option().option.assert_called_with(constants.JDBC_UPPERBOUND, "2")
mock_spark_session.read.format().option().option().option().option().option().option().option.assert_called_with(constants.
| 220
|
267
| 13
| 10,457
|
googlecloudplatform__dataproc-templates
|
bba5da698a8aa144c73d4d2a90e84c6a577ce7f4
|
python/test/jdbc/test_jdbc_to_gcs.py
|
common
|
run
| true
|
function
| 4
| 4
| false
| false
|
[
"run",
"parse_args",
"build",
"get_logger",
"__annotations__",
"__class__",
"__delattr__",
"__dict__",
"__dir__",
"__eq__",
"__format__",
"__getattribute__",
"__hash__",
"__init__",
"__init_subclass__",
"__ne__",
"__new__",
"__reduce__",
"__reduce_ex__",
"__repr__",
"__setattr__",
"__sizeof__",
"__slots__",
"__str__",
"__subclasshook__",
"__doc__",
"__module__"
] |
[
{
"name": "build",
"type": "function"
},
{
"name": "get_logger",
"type": "function"
},
{
"name": "parse_args",
"type": "function"
},
{
"name": "run",
"type": "function"
},
{
"name": "__annotations__",
"type": "statement"
},
{
"name": "__class__",
"type": "property"
},
{
"name": "__delattr__",
"type": "function"
},
{
"name": "__dict__",
"type": "statement"
},
{
"name": "__dir__",
"type": "function"
},
{
"name": "__doc__",
"type": "statement"
},
{
"name": "__eq__",
"type": "function"
},
{
"name": "__format__",
"type": "function"
},
{
"name": "__getattribute__",
"type": "function"
},
{
"name": "__hash__",
"type": "function"
},
{
"name": "__init__",
"type": "function"
},
{
"name": "__init_subclass__",
"type": "function"
},
{
"name": "__module__",
"type": "statement"
},
{
"name": "__ne__",
"type": "function"
},
{
"name": "__new__",
"type": "function"
},
{
"name": "__reduce__",
"type": "function"
},
{
"name": "__reduce_ex__",
"type": "function"
},
{
"name": "__repr__",
"type": "function"
},
{
"name": "__setattr__",
"type": "function"
},
{
"name": "__sizeof__",
"type": "function"
},
{
"name": "__slots__",
"type": "statement"
},
{
"name": "__str__",
"type": "function"
}
] |
"""
* Copyright 2022 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
"""
import mock
import pyspark
from dataproc_templates.jdbc.jdbc_to_gcs import JDBCToGCSTemplate
import dataproc_templates.util.template_constants as constants
class TestJDBCToGCSTemplate:
"""
Test suite for JDBCToGCSTemplate
"""
def test_parse_args1(self):
"""Tests JDBCToGCSTemplate.parse_args()"""
jdbc_to_gcs_template = JDBCToGCSTemplate()
parsed_args = jdbc_to_gcs_template.parse_args(
["--jdbctogcs.input.url=url",
"--jdbctogcs.input.driver=driver",
"--jdbctogcs.input.table=table1",
"--jdbctogcs.input.partitioncolumn=column",
"--jdbctogcs.input.lowerbound=1",
"--jdbctogcs.input.upperbound=2",
"--jdbctogcs.numpartitions=5",
"--jdbctogcs.output.location=gs://test",
"--jdbctogcs.output.format=csv",
"--jdbctogcs.output.mode=append",
"--jdbctogcs.output.partitioncolumn=column"
])
assert parsed_args["jdbctogcs.input.url"] == "url"
assert parsed_args["jdbctogcs.input.driver"] == "driver"
assert parsed_args["jdbctogcs.input.table"] == "table1"
assert parsed_args["jdbctogcs.input.partitioncolumn"] == "column"
assert parsed_args["jdbctogcs.input.lowerbound"] == "1"
assert parsed_args["jdbctogcs.input.upperbound"] == "2"
assert parsed_args["jdbctogcs.numpartitions"] == "5"
assert parsed_args["jdbctogcs.output.location"] == "gs://test"
assert parsed_args["jdbctogcs.output.format"] == "csv"
assert parsed_args["jdbctogcs.output.mode"] == "append"
assert parsed_args["jdbctogcs.output.partitioncolumn"] == "column"
@mock.patch.object(pyspark.sql, 'SparkSession')
def test_run_pass_args2(self, mock_spark_session):
"""Tests JDBCToGCSTemplate write parquet"""
jdbc_to_gcs_template = JDBCToGCSTemplate()
mock_parsed_args = jdbc_to_gcs_template.parse_args(
["--jdbctogcs.input.url=url",
"--jdbctogcs.input.driver=driver",
"--jdbctogcs.input.table=table1",
"--jdbctogcs.input.partitioncolumn=column",
"--jdbctogcs.input.lowerbound=1",
"--jdbctogcs.input.upperbound=2",
"--jdbctogcs.numpartitions=5",
"--jdbctogcs.output.location=gs://test",
"--jdbctogcs.output.format=parquet",
"--jdbctogcs.output.mode=overwrite"
])
mock_spark_session.read.format().option().option().option().option().option().option().option().load.return_value = mock_spark_session.dataframe.DataFrame
jdbc_to_gcs_template.run(mock_spark_session, mock_parsed_args)
mock_spark_session.read.format.assert_called_with(constants.FORMAT_JDBC)
mock_spark_session.read.format().option.assert_called_with(constants.JDBC_URL, "url")
mock_spark_session.read.format().option().option.assert_called_with(constants.JDBC_DRIVER, "driver")
mock_spark_session.read.format().option().option().option.assert_called_with(constants.JDBC_TABLE, "table1")
mock_spark_session.read.format().option().option().option().option.assert_called_with(constants.JDBC_PARTITIONCOLUMN, "column")
mock_spark_session.read.format().option().option().option().option().option.assert_called_with(constants.JDBC_LOWERBOUND, "1")
mock_spark_session.read.format().option().option().option().option().option().option.assert_called_with(constants.JDBC_UPPERBOUND, "2")
mock_spark_session.read.format().option().option().option().option().option().option().option.assert_called_with(constants.JDBC_NUMPARTITIONS, "5")
mock_spark_session.read.format().option().option().option().option().option().option().option().load()
mock_spark_session.dataframe.DataFrame.write.mode.assert_called_once_with(constants.OUTPUT_MODE_OVERWRITE)
mock_spark_session.dataframe.DataFrame.write.mode().parquet.assert_called_once_with("gs://test")
@mock.patch.object(pyspark.sql, 'SparkSession')
def test_run_pass_args3(self, mock_spark_session):
"""Tests JDBCToGCSTemplate write avro"""
jdbc_to_gcs_template = JDBCToGCSTemplate()
mock_parsed_args = jdbc_to_gcs_template.parse_args(
["--jdbctogcs.input.url=url",
"--jdbctogcs.input.driver=driver",
"--jdbctogcs.input.table=table1",
"--jdbctogcs.input.partitioncolumn=column",
"--jdbctogcs.input.lowerbound=1",
"--jdbctogcs.input.upperbound=2",
"--jdbctogcs.numpartitions=5",
"--jdbctogcs.output.location=gs://test",
"--jdbctogcs.output.format=avro",
"--jdbctogcs.output.mode=append"
])
mock_spark_session.read.format().option().option().option().option().option().option().option().load.return_value = mock_spark_session.dataframe.DataFrame
jdbc_to_gcs_template.run(mock_spark_session, mock_parsed_args)
mock_spark_session.read.format.assert_called_with(constants.FORMAT_JDBC)
mock_spark_session.read.format().option.assert_called_with(constants.JDBC_URL, "url")
mock_spark_session.read.format().option().option.assert_called_with(constants.JDBC_DRIVER, "driver")
mock_spark_session.read.format().option().option().option.assert_called_with(constants.JDBC_TABLE, "table1")
mock_spark_session.read.format().option().option().option().option.assert_called_with(constants.JDBC_PARTITIONCOLUMN, "column")
mock_spark_session.read.format().option().option().option().option().option.assert_called_with(constants.JDBC_LOWERBOUND, "1")
mock_spark_session.read.format().option().option().option().option().option().option.assert_called_with(constants.JDBC_UPPERBOUND, "2")
mock_spark_session.read.format().option().option().option().option().option().option().option.assert_called_with(constants.JDBC_NUMPARTITIONS, "5")
mock_spark_session.read.format().option().option().option().option().option().option().option().load()
mock_spark_session.dataframe.DataFrame.write.mode.assert_called_once_with(constants.OUTPUT_MODE_APPEND)
mock_spark_session.dataframe.DataFrame.write.mode().format.assert_called_once_with(constants.FORMAT_AVRO)
mock_spark_session.dataframe.DataFrame.write.mode().format().save.assert_called_once_with("gs://test")
@mock.patch.object(pyspark.sql, 'SparkSession')
def test_run_pass_args4(self, mock_spark_session):
"""Tests JDBCToGCSTemplate write csv"""
jdbc_to_gcs_template = JDBCToGCSTemplate()
mock_parsed_args = jdbc_to_gcs_template.parse_args(
["--jdbctogcs.input.url=url",
"--jdbctogcs.input.driver=driver",
"--jdbctogcs.input.table=table1",
"--jdbctogcs.input.partitioncolumn=column",
"--jdbctogcs.input.lowerbound=1",
"--jdbctogcs.input.upperbound=2",
"--jdbctogcs.numpartitions=5",
"--jdbctogcs.output.location=gs://test",
"--jdbctogcs.output.format=csv",
"--jdbctogcs.output.mode=ignore"
])
mock_spark_session.read.format().option().option().option().option().option().option().option().load.return_value = mock_spark_session.dataframe.DataFrame
jdbc_to_gcs_template.run(mock_spark_session, mock_parsed_args)
mock_spark_session.read.format.assert_called_with(constants.FORMAT_JDBC)
mock_spark_session.read.format().option.assert_called_with(constants.JDBC_URL, "url")
mock_spark_session.read.format().option().option.assert_called_with(constants.JDBC_DRIVER, "driver")
mock_spark_session.read.format().option().option().option.assert_called_with(constants.JDBC_TABLE, "table1")
mock_spark_session.read.format().option().option().option().option.assert_called_with(constants.JDBC_PARTITIONCOLUMN, "column")
mock_spark_session.read.format().option().option().option().option().option.assert_called_with(constants.JDBC_LOWERBOUND, "1")
mock_spark_session.read.format().option().option().option().option().option().option.assert_called_with(constants.JDBC_UPPERBOUND, "2")
mock_spark_session.read.format().option().option().option().option().option().option().option.assert_called_with(constants.JDBC_NUMPARTITIONS, "5")
mock_spark_session.read.format().option().option().option().option().option().option().option().load()
mock_spark_session.dataframe.DataFrame.write.mode.assert_called_once_with(constants.OUTPUT_MODE_IGNORE)
mock_spark_session.dataframe.DataFrame.write.mode().option.assert_called_once_with(constants.CSV_HEADER, True)
mock_spark_session.dataframe.DataFrame.write.mode().option().csv.assert_called_once_with("gs://test")
@mock.patch.object(pyspark.sql, 'SparkSession')
def test_run_pass_args5(self, mock_spark_session):
"""Tests JDBCToGCSTemplate write json"""
jdbc_to_gcs_template = JDBCToGCSTemplate()
mock_parsed_args = jdbc_to_gcs_template.parse_args(
["--jdbctogcs.input.url=url",
"--jdbctogcs.input.driver=driver",
"--jdbctogcs.input.table=table1",
"--jdbctogcs.input.partitioncolumn=column",
"--jdbctogcs.input.lowerbound=1",
"--jdbctogcs.input.upperbound=2",
"--jdbctogcs.numpartitions=5",
"--jdbctogcs.output.location=gs://test",
"--jdbctogcs.output.format=json",
"--jdbctogcs.output.mode=ignore"
])
mock_spark_session.read.format().option().option().option().option().option().option().option().load.return_value = mock_spark_session.dataframe.DataFrame
jdbc_to_gcs_template.
|
[
"run",
"parse_args",
"build",
"get_logger"
] |
option.assert_called_with(constants.JDBC_PARTITIONCOLUMN, "column")
mock_spark_session.read.format().option().option().option().option().option.assert_called_with(constants.JDBC_LOWERBOUND, "1")
mock_spark_session.read.format().option().option().option().option().option().option.assert_called_with(constants.JDBC_UPPERBOUND, "2")
mock_spark_session.read.format().option().option().option().option().option().option().option.assert_called_with(constants.JDBC_NUMPARTITIONS, "5")
mock_spark_session.read.format().option().option().option().option().option().option().option().load()
mock_spark_session.dataframe.DataFrame.write.mode.assert_called_once_with(constants.OUTPUT_MODE_IGNORE)
mock_spark_session.dataframe.DataFrame.write.mode().option.assert_called_once_with(constants.CSV_HEADER, True)
mock_spark_session.dataframe.DataFrame.write.mode().option().csv.assert_called_once_with("gs://test")
@mock.patch.object(pyspark.sql, 'SparkSession')
def test_run_pass_args5(self, mock_spark_session):
"""Tests JDBCToGCSTemplate write json"""
jdbc_to_gcs_template = JDBCToGCSTemplate()
mock_parsed_args = jdbc_to_gcs_template.parse_args(
["--jdbctogcs.input.url=url",
"--jdbctogcs.input.driver=driver",
"--jdbctogcs.input.table=table1",
"--jdbctogcs.input.partitioncolumn=column",
"--jdbctogcs.input.lowerbound=1",
"--jdbctogcs.input.upperbound=2",
"--jdbctogcs.numpartitions=5",
"--jdbctogcs.output.location=gs://test",
"--jdbctogcs.output.format=json",
"--jdbctogcs.output.mode=ignore"
])
mock_spark_session.read.format().option().option().option().option().option().option().option().load.return_value = mock_spark_session.dataframe.DataFrame
jdbc_to_gcs_template.
| 224
|
277
| 13
| 12,062
|
googlecloudplatform__dataproc-templates
|
bba5da698a8aa144c73d4d2a90e84c6a577ce7f4
|
python/test/jdbc/test_jdbc_to_gcs.py
|
inproject
|
parse_args
| true
|
function
| 4
| 4
| false
| false
|
[
"parse_args",
"run",
"build",
"get_logger",
"__annotations__",
"__class__",
"__delattr__",
"__dict__",
"__dir__",
"__eq__",
"__format__",
"__getattribute__",
"__hash__",
"__init__",
"__init_subclass__",
"__ne__",
"__new__",
"__reduce__",
"__reduce_ex__",
"__repr__",
"__setattr__",
"__sizeof__",
"__slots__",
"__str__",
"__subclasshook__",
"__doc__",
"__module__"
] |
[
{
"name": "build",
"type": "function"
},
{
"name": "get_logger",
"type": "function"
},
{
"name": "parse_args",
"type": "function"
},
{
"name": "run",
"type": "function"
},
{
"name": "__annotations__",
"type": "statement"
},
{
"name": "__class__",
"type": "property"
},
{
"name": "__delattr__",
"type": "function"
},
{
"name": "__dict__",
"type": "statement"
},
{
"name": "__dir__",
"type": "function"
},
{
"name": "__doc__",
"type": "statement"
},
{
"name": "__eq__",
"type": "function"
},
{
"name": "__format__",
"type": "function"
},
{
"name": "__getattribute__",
"type": "function"
},
{
"name": "__hash__",
"type": "function"
},
{
"name": "__init__",
"type": "function"
},
{
"name": "__init_subclass__",
"type": "function"
},
{
"name": "__module__",
"type": "statement"
},
{
"name": "__ne__",
"type": "function"
},
{
"name": "__new__",
"type": "function"
},
{
"name": "__reduce__",
"type": "function"
},
{
"name": "__reduce_ex__",
"type": "function"
},
{
"name": "__repr__",
"type": "function"
},
{
"name": "__setattr__",
"type": "function"
},
{
"name": "__sizeof__",
"type": "function"
},
{
"name": "__slots__",
"type": "statement"
},
{
"name": "__str__",
"type": "function"
}
] |
"""
* Copyright 2022 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
"""
import mock
import pyspark
from dataproc_templates.jdbc.jdbc_to_gcs import JDBCToGCSTemplate
import dataproc_templates.util.template_constants as constants
class TestJDBCToGCSTemplate:
"""
Test suite for JDBCToGCSTemplate
"""
def test_parse_args1(self):
"""Tests JDBCToGCSTemplate.parse_args()"""
jdbc_to_gcs_template = JDBCToGCSTemplate()
parsed_args = jdbc_to_gcs_template.parse_args(
["--jdbctogcs.input.url=url",
"--jdbctogcs.input.driver=driver",
"--jdbctogcs.input.table=table1",
"--jdbctogcs.input.partitioncolumn=column",
"--jdbctogcs.input.lowerbound=1",
"--jdbctogcs.input.upperbound=2",
"--jdbctogcs.numpartitions=5",
"--jdbctogcs.output.location=gs://test",
"--jdbctogcs.output.format=csv",
"--jdbctogcs.output.mode=append",
"--jdbctogcs.output.partitioncolumn=column"
])
assert parsed_args["jdbctogcs.input.url"] == "url"
assert parsed_args["jdbctogcs.input.driver"] == "driver"
assert parsed_args["jdbctogcs.input.table"] == "table1"
assert parsed_args["jdbctogcs.input.partitioncolumn"] == "column"
assert parsed_args["jdbctogcs.input.lowerbound"] == "1"
assert parsed_args["jdbctogcs.input.upperbound"] == "2"
assert parsed_args["jdbctogcs.numpartitions"] == "5"
assert parsed_args["jdbctogcs.output.location"] == "gs://test"
assert parsed_args["jdbctogcs.output.format"] == "csv"
assert parsed_args["jdbctogcs.output.mode"] == "append"
assert parsed_args["jdbctogcs.output.partitioncolumn"] == "column"
@mock.patch.object(pyspark.sql, 'SparkSession')
def test_run_pass_args2(self, mock_spark_session):
"""Tests JDBCToGCSTemplate write parquet"""
jdbc_to_gcs_template = JDBCToGCSTemplate()
mock_parsed_args = jdbc_to_gcs_template.parse_args(
["--jdbctogcs.input.url=url",
"--jdbctogcs.input.driver=driver",
"--jdbctogcs.input.table=table1",
"--jdbctogcs.input.partitioncolumn=column",
"--jdbctogcs.input.lowerbound=1",
"--jdbctogcs.input.upperbound=2",
"--jdbctogcs.numpartitions=5",
"--jdbctogcs.output.location=gs://test",
"--jdbctogcs.output.format=parquet",
"--jdbctogcs.output.mode=overwrite"
])
mock_spark_session.read.format().option().option().option().option().option().option().option().load.return_value = mock_spark_session.dataframe.DataFrame
jdbc_to_gcs_template.run(mock_spark_session, mock_parsed_args)
mock_spark_session.read.format.assert_called_with(constants.FORMAT_JDBC)
mock_spark_session.read.format().option.assert_called_with(constants.JDBC_URL, "url")
mock_spark_session.read.format().option().option.assert_called_with(constants.JDBC_DRIVER, "driver")
mock_spark_session.read.format().option().option().option.assert_called_with(constants.JDBC_TABLE, "table1")
mock_spark_session.read.format().option().option().option().option.assert_called_with(constants.JDBC_PARTITIONCOLUMN, "column")
mock_spark_session.read.format().option().option().option().option().option.assert_called_with(constants.JDBC_LOWERBOUND, "1")
mock_spark_session.read.format().option().option().option().option().option().option.assert_called_with(constants.JDBC_UPPERBOUND, "2")
mock_spark_session.read.format().option().option().option().option().option().option().option.assert_called_with(constants.JDBC_NUMPARTITIONS, "5")
mock_spark_session.read.format().option().option().option().option().option().option().option().load()
mock_spark_session.dataframe.DataFrame.write.mode.assert_called_once_with(constants.OUTPUT_MODE_OVERWRITE)
mock_spark_session.dataframe.DataFrame.write.mode().parquet.assert_called_once_with("gs://test")
@mock.patch.object(pyspark.sql, 'SparkSession')
def test_run_pass_args3(self, mock_spark_session):
"""Tests JDBCToGCSTemplate write avro"""
jdbc_to_gcs_template = JDBCToGCSTemplate()
mock_parsed_args = jdbc_to_gcs_template.parse_args(
["--jdbctogcs.input.url=url",
"--jdbctogcs.input.driver=driver",
"--jdbctogcs.input.table=table1",
"--jdbctogcs.input.partitioncolumn=column",
"--jdbctogcs.input.lowerbound=1",
"--jdbctogcs.input.upperbound=2",
"--jdbctogcs.numpartitions=5",
"--jdbctogcs.output.location=gs://test",
"--jdbctogcs.output.format=avro",
"--jdbctogcs.output.mode=append"
])
mock_spark_session.read.format().option().option().option().option().option().option().option().load.return_value = mock_spark_session.dataframe.DataFrame
jdbc_to_gcs_template.run(mock_spark_session, mock_parsed_args)
mock_spark_session.read.format.assert_called_with(constants.FORMAT_JDBC)
mock_spark_session.read.format().option.assert_called_with(constants.JDBC_URL, "url")
mock_spark_session.read.format().option().option.assert_called_with(constants.JDBC_DRIVER, "driver")
mock_spark_session.read.format().option().option().option.assert_called_with(constants.JDBC_TABLE, "table1")
mock_spark_session.read.format().option().option().option().option.assert_called_with(constants.JDBC_PARTITIONCOLUMN, "column")
mock_spark_session.read.format().option().option().option().option().option.assert_called_with(constants.JDBC_LOWERBOUND, "1")
mock_spark_session.read.format().option().option().option().option().option().option.assert_called_with(constants.JDBC_UPPERBOUND, "2")
mock_spark_session.read.format().option().option().option().option().option().option().option.assert_called_with(constants.JDBC_NUMPARTITIONS, "5")
mock_spark_session.read.format().option().option().option().option().option().option().option().load()
mock_spark_session.dataframe.DataFrame.write.mode.assert_called_once_with(constants.OUTPUT_MODE_APPEND)
mock_spark_session.dataframe.DataFrame.write.mode().format.assert_called_once_with(constants.FORMAT_AVRO)
mock_spark_session.dataframe.DataFrame.write.mode().format().save.assert_called_once_with("gs://test")
@mock.patch.object(pyspark.sql, 'SparkSession')
def test_run_pass_args4(self, mock_spark_session):
"""Tests JDBCToGCSTemplate write csv"""
jdbc_to_gcs_template = JDBCToGCSTemplate()
mock_parsed_args = jdbc_to_gcs_template.parse_args(
["--jdbctogcs.input.url=url",
"--jdbctogcs.input.driver=driver",
"--jdbctogcs.input.table=table1",
"--jdbctogcs.input.partitioncolumn=column",
"--jdbctogcs.input.lowerbound=1",
"--jdbctogcs.input.upperbound=2",
"--jdbctogcs.numpartitions=5",
"--jdbctogcs.output.location=gs://test",
"--jdbctogcs.output.format=csv",
"--jdbctogcs.output.mode=ignore"
])
mock_spark_session.read.format().option().option().option().option().option().option().option().load.return_value = mock_spark_session.dataframe.DataFrame
jdbc_to_gcs_template.run(mock_spark_session, mock_parsed_args)
mock_spark_session.read.format.assert_called_with(constants.FORMAT_JDBC)
mock_spark_session.read.format().option.assert_called_with(constants.JDBC_URL, "url")
mock_spark_session.read.format().option().option.assert_called_with(constants.JDBC_DRIVER, "driver")
mock_spark_session.read.format().option().option().option.assert_called_with(constants.JDBC_TABLE, "table1")
mock_spark_session.read.format().option().option().option().option.assert_called_with(constants.JDBC_PARTITIONCOLUMN, "column")
mock_spark_session.read.format().option().option().option().option().option.assert_called_with(constants.JDBC_LOWERBOUND, "1")
mock_spark_session.read.format().option().option().option().option().option().option.assert_called_with(constants.JDBC_UPPERBOUND, "2")
mock_spark_session.read.format().option().option().option().option().option().option().option.assert_called_with(constants.JDBC_NUMPARTITIONS, "5")
mock_spark_session.read.format().option().option().option().option().option().option().option().load()
mock_spark_session.dataframe.DataFrame.write.mode.assert_called_once_with(constants.OUTPUT_MODE_IGNORE)
mock_spark_session.dataframe.DataFrame.write.mode().option.assert_called_once_with(constants.CSV_HEADER, True)
mock_spark_session.dataframe.DataFrame.write.mode().option().csv.assert_called_once_with("gs://test")
@mock.patch.object(pyspark.sql, 'SparkSession')
def test_run_pass_args5(self, mock_spark_session):
"""Tests JDBCToGCSTemplate write json"""
jdbc_to_gcs_template = JDBCToGCSTemplate()
mock_parsed_args = jdbc_to_gcs_template.parse_args(
["--jdbctogcs.input.url=url",
"--jdbctogcs.input.driver=driver",
"--jdbctogcs.input.table=table1",
"--jdbctogcs.input.partitioncolumn=column",
"--jdbctogcs.input.lowerbound=1",
"--jdbctogcs.input.upperbound=2",
"--jdbctogcs.numpartitions=5",
"--jdbctogcs.output.location=gs://test",
"--jdbctogcs.output.format=json",
"--jdbctogcs.output.mode=ignore"
])
mock_spark_session.read.format().option().option().option().option().option().option().option().load.return_value = mock_spark_session.dataframe.DataFrame
jdbc_to_gcs_template.run(mock_spark_session, mock_parsed_args)
mock_spark_session.read.format.assert_called_with(constants.FORMAT_JDBC)
mock_spark_session.read.format().option.assert_called_with(constants.JDBC_URL, "url")
mock_spark_session.read.format().option().option.assert_called_with(constants.JDBC_DRIVER, "driver")
mock_spark_session.read.format().option().option().option.assert_called_with(constants.JDBC_TABLE, "table1")
mock_spark_session.read.format().option().option().option().option.assert_called_with(constants.JDBC_PARTITIONCOLUMN, "column")
mock_spark_session.read.format().option().option().option().option().option.assert_called_with(constants.JDBC_LOWERBOUND, "1")
mock_spark_session.read.format().option().option().option().option().option().option.assert_called_with(constants.JDBC_UPPERBOUND, "2")
mock_spark_session.read.format().option().option().option().option().option().option().option.assert_called_with(constants.JDBC_NUMPARTITIONS, "5")
mock_spark_session.read.format().option().option().option().option().option().option().option().load()
mock_spark_session.dataframe.DataFrame.write.mode.assert_called_once_with(constants.OUTPUT_MODE_IGNORE)
#mock_spark_session.dataframe.DataFrame.write.mode().json.assert_called_once_with("gs://test")
@mock.patch.object(pyspark.sql, 'SparkSession')
def test_run_pass_args6(self, mock_spark_session):
"""Tests JDBCToGCSTemplate pass args"""
jdbc_to_gcs_template = JDBCToGCSTemplate()
mock_parsed_args = jdbc_to_gcs_template.
|
[
"parse_args",
"run",
"build",
"get_logger"
] |
n=gs://test",
"--jdbctogcs.output.format=json",
"--jdbctogcs.output.mode=ignore"
])
mock_spark_session.read.format().option().option().option().option().option().option().option().load.return_value = mock_spark_session.dataframe.DataFrame
jdbc_to_gcs_template.run(mock_spark_session, mock_parsed_args)
mock_spark_session.read.format.assert_called_with(constants.FORMAT_JDBC)
mock_spark_session.read.format().option.assert_called_with(constants.JDBC_URL, "url")
mock_spark_session.read.format().option().option.assert_called_with(constants.JDBC_DRIVER, "driver")
mock_spark_session.read.format().option().option().option.assert_called_with(constants.JDBC_TABLE, "table1")
mock_spark_session.read.format().option().option().option().option.assert_called_with(constants.JDBC_PARTITIONCOLUMN, "column")
mock_spark_session.read.format().option().option().option().option().option.assert_called_with(constants.JDBC_LOWERBOUND, "1")
mock_spark_session.read.format().option().option().option().option().option().option.assert_called_with(constants.JDBC_UPPERBOUND, "2")
mock_spark_session.read.format().option().option().option().option().option().option().option.assert_called_with(constants.JDBC_NUMPARTITIONS, "5")
mock_spark_session.read.format().option().option().option().option().option().option().option().load()
mock_spark_session.dataframe.DataFrame.write.mode.assert_called_once_with(constants.OUTPUT_MODE_IGNORE)
#mock_spark_session.dataframe.DataFrame.write.mode().json.assert_called_once_with("gs://test")
@mock.patch.object(pyspark.sql, 'SparkSession')
def test_run_pass_args6(self, mock_spark_session):
"""Tests JDBCToGCSTemplate pass args"""
jdbc_to_gcs_template = JDBCToGCSTemplate()
mock_parsed_args = jdbc_to_gcs_template.
| 234
|
295
| 14
| 1,013
|
googlecloudplatform__dataproc-templates
|
d62560011b069690d01cf2db563788bf81029623
|
python/test/jdbc/test_jdbc_to_gcs.py
|
inproject
|
parse_args
| true
|
function
| 4
| 4
| false
| false
|
[
"parse_args",
"run",
"build",
"get_logger",
"__annotations__",
"__class__",
"__delattr__",
"__dict__",
"__dir__",
"__eq__",
"__format__",
"__getattribute__",
"__hash__",
"__init__",
"__init_subclass__",
"__ne__",
"__new__",
"__reduce__",
"__reduce_ex__",
"__repr__",
"__setattr__",
"__sizeof__",
"__slots__",
"__str__",
"__subclasshook__",
"__doc__",
"__module__"
] |
[
{
"name": "build",
"type": "function"
},
{
"name": "get_logger",
"type": "function"
},
{
"name": "parse_args",
"type": "function"
},
{
"name": "run",
"type": "function"
},
{
"name": "__annotations__",
"type": "statement"
},
{
"name": "__class__",
"type": "property"
},
{
"name": "__delattr__",
"type": "function"
},
{
"name": "__dict__",
"type": "statement"
},
{
"name": "__dir__",
"type": "function"
},
{
"name": "__doc__",
"type": "statement"
},
{
"name": "__eq__",
"type": "function"
},
{
"name": "__format__",
"type": "function"
},
{
"name": "__getattribute__",
"type": "function"
},
{
"name": "__hash__",
"type": "function"
},
{
"name": "__init__",
"type": "function"
},
{
"name": "__init_subclass__",
"type": "function"
},
{
"name": "__module__",
"type": "statement"
},
{
"name": "__ne__",
"type": "function"
},
{
"name": "__new__",
"type": "function"
},
{
"name": "__reduce__",
"type": "function"
},
{
"name": "__reduce_ex__",
"type": "function"
},
{
"name": "__repr__",
"type": "function"
},
{
"name": "__setattr__",
"type": "function"
},
{
"name": "__sizeof__",
"type": "function"
},
{
"name": "__slots__",
"type": "statement"
},
{
"name": "__str__",
"type": "function"
}
] |
"""
* Copyright 2022 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
"""
import mock
import pyspark
from dataproc_templates.jdbc.jdbc_to_gcs import JDBCToGCSTemplate
import dataproc_templates.util.template_constants as constants
class TestJDBCToGCSTemplate:
"""
Test suite for JDBCToGCSTemplate
"""
def test_parse_args1(self):
"""Tests JDBCToGCSTemplate.parse_args()"""
jdbc_to_gcs_template = JDBCToGCSTemplate()
parsed_args = jdbc_to_gcs_template.
|
[
"parse_args",
"run",
"build",
"get_logger"
] |
"""
* Copyright 2022 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
"""
import mock
import pyspark
from dataproc_templates.jdbc.jdbc_to_gcs import JDBCToGCSTemplate
import dataproc_templates.util.template_constants as constants
class TestJDBCToGCSTemplate:
"""
Test suite for JDBCToGCSTemplate
"""
def test_parse_args1(self):
"""Tests JDBCToGCSTemplate.parse_args()"""
jdbc_to_gcs_template = JDBCToGCSTemplate()
parsed_args = jdbc_to_gcs_template.
| 252
|
303
| 14
| 3,983
|
googlecloudplatform__dataproc-templates
|
d62560011b069690d01cf2db563788bf81029623
|
python/test/jdbc/test_jdbc_to_gcs.py
|
random
|
JDBC_LOWERBOUND
| true
|
statement
| 103
| 103
| false
| false
|
[
"FORMAT_JDBC",
"JDBC_URL",
"JDBC_TABLE",
"JDBC_DRIVER",
"FORMAT_AVRO",
"BQ_GCS_INPUT_TABLE",
"BQ_GCS_OUTPUT_FORMAT",
"BQ_GCS_OUTPUT_LOCATION",
"BQ_GCS_OUTPUT_MODE",
"COMPRESSION_BZIP2",
"COMPRESSION_DEFLATE",
"COMPRESSION_GZIP",
"COMPRESSION_LZ4",
"COMPRESSION_NONE",
"FORMAT_AVRO_EXTD",
"FORMAT_BIGQUERY",
"FORMAT_CSV",
"FORMAT_HBASE",
"FORMAT_JSON",
"FORMAT_PRQT",
"FORMAT_TXT",
"GCS_BQ_INPUT_FORMAT",
"GCS_BQ_INPUT_LOCATION",
"GCS_BQ_LD_TEMP_BUCKET_NAME",
"GCS_BQ_OUTPUT_DATASET",
"GCS_BQ_OUTPUT_MODE",
"GCS_BQ_OUTPUT_TABLE",
"GCS_BQ_TEMP_BUCKET",
"GCS_BT_HBASE_CATALOG_JSON",
"GCS_BT_INPUT_FORMAT",
"GCS_BT_INPUT_LOCATION",
"GCS_JDBC_BATCH_SIZE",
"GCS_JDBC_INPUT_FORMAT",
"GCS_JDBC_INPUT_LOCATION",
"GCS_JDBC_OUTPUT_DRIVER",
"GCS_JDBC_OUTPUT_MODE",
"GCS_JDBC_OUTPUT_TABLE",
"GCS_JDBC_OUTPUT_URL",
"HBASE_GCS_CATALOG_JSON",
"HBASE_GCS_OUTPUT_FORMAT",
"HBASE_GCS_OUTPUT_LOCATION",
"HBASE_GCS_OUTPUT_MODE",
"HEADER",
"HIVE_BQ_INPUT_DATABASE",
"HIVE_BQ_INPUT_TABLE",
"HIVE_BQ_LD_TEMP_BUCKET_NAME",
"HIVE_BQ_OUTPUT_DATASET",
"HIVE_BQ_OUTPUT_MODE",
"HIVE_BQ_OUTPUT_TABLE",
"HIVE_GCS_INPUT_DATABASE",
"HIVE_GCS_INPUT_TABLE",
"HIVE_GCS_OUTPUT_FORMAT",
"HIVE_GCS_OUTPUT_LOCATION",
"HIVE_GCS_OUTPUT_MODE",
"INFER_SCHEMA",
"INPUT_COMPRESSION",
"INPUT_DELIMITER",
"JDBC_BATCH_SIZE",
"JDBC_CREATE_TABLE_OPTIONS",
"JDBC_LOWERBOUND",
"JDBC_NUMPARTITIONS",
"JDBC_PARTITIONCOLUMN",
"JDBC_UPPERBOUND",
"JDBCTOGCS_INPUT_DRIVER",
"JDBCTOGCS_INPUT_LOWERBOUND",
"JDBCTOGCS_INPUT_PARTITIONCOLUMN",
"JDBCTOGCS_INPUT_TABLE",
"JDBCTOGCS_INPUT_UPPERBOUND",
"JDBCTOGCS_INPUT_URL",
"JDBCTOGCS_NUMPARTITIONS",
"JDBCTOGCS_OUTPUT_FORMAT",
"JDBCTOGCS_OUTPUT_LOCATION",
"JDBCTOGCS_OUTPUT_MODE",
"JDBCTOGCS_OUTPUT_PARTITIONCOLUMN",
"JDBCTOJDBC_INPUT_DRIVER",
"JDBCTOJDBC_INPUT_LOWERBOUND",
"JDBCTOJDBC_INPUT_PARTITIONCOLUMN",
"JDBCTOJDBC_INPUT_TABLE",
"JDBCTOJDBC_INPUT_UPPERBOUND",
"JDBCTOJDBC_INPUT_URL",
"JDBCTOJDBC_NUMPARTITIONS",
"JDBCTOJDBC_OUTPUT_BATCH_SIZE",
"JDBCTOJDBC_OUTPUT_CREATE_TABLE_OPTION",
"JDBCTOJDBC_OUTPUT_DRIVER",
"JDBCTOJDBC_OUTPUT_MODE",
"JDBCTOJDBC_OUTPUT_TABLE",
"JDBCTOJDBC_OUTPUT_URL",
"OUTPUT_MODE_APPEND",
"OUTPUT_MODE_ERRORIFEXISTS",
"OUTPUT_MODE_IGNORE",
"OUTPUT_MODE_OVERWRITE",
"PROJECT_ID_PROP",
"TABLE",
"TEMP_GCS_BUCKET",
"TEXT_BQ_INPUT_INFERSCHEMA",
"TEXT_BQ_INPUT_LOCATION",
"TEXT_BQ_LD_TEMP_BUCKET_NAME",
"TEXT_BQ_OUTPUT_DATASET",
"TEXT_BQ_OUTPUT_MODE",
"TEXT_BQ_OUTPUT_TABLE",
"TEXT_BQ_TEMP_BUCKET",
"TEXT_INPUT_COMPRESSION",
"TEXT_INPUT_DELIMITER"
] |
[
{
"name": "BQ_GCS_INPUT_TABLE",
"type": "statement"
},
{
"name": "BQ_GCS_OUTPUT_FORMAT",
"type": "statement"
},
{
"name": "BQ_GCS_OUTPUT_LOCATION",
"type": "statement"
},
{
"name": "BQ_GCS_OUTPUT_MODE",
"type": "statement"
},
{
"name": "COMPRESSION_BZIP2",
"type": "statement"
},
{
"name": "COMPRESSION_DEFLATE",
"type": "statement"
},
{
"name": "COMPRESSION_GZIP",
"type": "statement"
},
{
"name": "COMPRESSION_LZ4",
"type": "statement"
},
{
"name": "COMPRESSION_NONE",
"type": "statement"
},
{
"name": "FORMAT_AVRO",
"type": "statement"
},
{
"name": "FORMAT_AVRO_EXTD",
"type": "statement"
},
{
"name": "FORMAT_BIGQUERY",
"type": "statement"
},
{
"name": "FORMAT_CSV",
"type": "statement"
},
{
"name": "FORMAT_HBASE",
"type": "statement"
},
{
"name": "FORMAT_JDBC",
"type": "statement"
},
{
"name": "FORMAT_JSON",
"type": "statement"
},
{
"name": "FORMAT_PRQT",
"type": "statement"
},
{
"name": "FORMAT_TXT",
"type": "statement"
},
{
"name": "GCS_BQ_INPUT_FORMAT",
"type": "statement"
},
{
"name": "GCS_BQ_INPUT_LOCATION",
"type": "statement"
},
{
"name": "GCS_BQ_LD_TEMP_BUCKET_NAME",
"type": "statement"
},
{
"name": "GCS_BQ_OUTPUT_DATASET",
"type": "statement"
},
{
"name": "GCS_BQ_OUTPUT_MODE",
"type": "statement"
},
{
"name": "GCS_BQ_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "GCS_BQ_TEMP_BUCKET",
"type": "statement"
},
{
"name": "GCS_BT_HBASE_CATALOG_JSON",
"type": "statement"
},
{
"name": "GCS_BT_INPUT_FORMAT",
"type": "statement"
},
{
"name": "GCS_BT_INPUT_LOCATION",
"type": "statement"
},
{
"name": "GCS_JDBC_BATCH_SIZE",
"type": "statement"
},
{
"name": "GCS_JDBC_INPUT_FORMAT",
"type": "statement"
},
{
"name": "GCS_JDBC_INPUT_LOCATION",
"type": "statement"
},
{
"name": "GCS_JDBC_OUTPUT_DRIVER",
"type": "statement"
},
{
"name": "GCS_JDBC_OUTPUT_MODE",
"type": "statement"
},
{
"name": "GCS_JDBC_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "GCS_JDBC_OUTPUT_URL",
"type": "statement"
},
{
"name": "HBASE_GCS_CATALOG_JSON",
"type": "statement"
},
{
"name": "HBASE_GCS_OUTPUT_FORMAT",
"type": "statement"
},
{
"name": "HBASE_GCS_OUTPUT_LOCATION",
"type": "statement"
},
{
"name": "HBASE_GCS_OUTPUT_MODE",
"type": "statement"
},
{
"name": "HEADER",
"type": "statement"
},
{
"name": "HIVE_BQ_INPUT_DATABASE",
"type": "statement"
},
{
"name": "HIVE_BQ_INPUT_TABLE",
"type": "statement"
},
{
"name": "HIVE_BQ_LD_TEMP_BUCKET_NAME",
"type": "statement"
},
{
"name": "HIVE_BQ_OUTPUT_DATASET",
"type": "statement"
},
{
"name": "HIVE_BQ_OUTPUT_MODE",
"type": "statement"
},
{
"name": "HIVE_BQ_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "HIVE_GCS_INPUT_DATABASE",
"type": "statement"
},
{
"name": "HIVE_GCS_INPUT_TABLE",
"type": "statement"
},
{
"name": "HIVE_GCS_OUTPUT_FORMAT",
"type": "statement"
},
{
"name": "HIVE_GCS_OUTPUT_LOCATION",
"type": "statement"
},
{
"name": "HIVE_GCS_OUTPUT_MODE",
"type": "statement"
},
{
"name": "INFER_SCHEMA",
"type": "statement"
},
{
"name": "INPUT_COMPRESSION",
"type": "statement"
},
{
"name": "INPUT_DELIMITER",
"type": "statement"
},
{
"name": "JDBC_BATCH_SIZE",
"type": "statement"
},
{
"name": "JDBC_CREATE_TABLE_OPTIONS",
"type": "statement"
},
{
"name": "JDBC_DRIVER",
"type": "statement"
},
{
"name": "JDBC_LOWERBOUND",
"type": "statement"
},
{
"name": "JDBC_NUMPARTITIONS",
"type": "statement"
},
{
"name": "JDBC_PARTITIONCOLUMN",
"type": "statement"
},
{
"name": "JDBC_TABLE",
"type": "statement"
},
{
"name": "JDBC_UPPERBOUND",
"type": "statement"
},
{
"name": "JDBC_URL",
"type": "statement"
},
{
"name": "JDBCTOGCS_INPUT_DRIVER",
"type": "statement"
},
{
"name": "JDBCTOGCS_INPUT_LOWERBOUND",
"type": "statement"
},
{
"name": "JDBCTOGCS_INPUT_PARTITIONCOLUMN",
"type": "statement"
},
{
"name": "JDBCTOGCS_INPUT_TABLE",
"type": "statement"
},
{
"name": "JDBCTOGCS_INPUT_UPPERBOUND",
"type": "statement"
},
{
"name": "JDBCTOGCS_INPUT_URL",
"type": "statement"
},
{
"name": "JDBCTOGCS_NUMPARTITIONS",
"type": "statement"
},
{
"name": "JDBCTOGCS_OUTPUT_FORMAT",
"type": "statement"
},
{
"name": "JDBCTOGCS_OUTPUT_LOCATION",
"type": "statement"
},
{
"name": "JDBCTOGCS_OUTPUT_MODE",
"type": "statement"
},
{
"name": "JDBCTOGCS_OUTPUT_PARTITIONCOLUMN",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_DRIVER",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_LOWERBOUND",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_PARTITIONCOLUMN",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_TABLE",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_UPPERBOUND",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_URL",
"type": "statement"
},
{
"name": "JDBCTOJDBC_NUMPARTITIONS",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_BATCH_SIZE",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_CREATE_TABLE_OPTION",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_DRIVER",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_MODE",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_URL",
"type": "statement"
},
{
"name": "OUTPUT_MODE_APPEND",
"type": "statement"
},
{
"name": "OUTPUT_MODE_ERRORIFEXISTS",
"type": "statement"
},
{
"name": "OUTPUT_MODE_IGNORE",
"type": "statement"
},
{
"name": "OUTPUT_MODE_OVERWRITE",
"type": "statement"
},
{
"name": "PROJECT_ID_PROP",
"type": "statement"
},
{
"name": "TABLE",
"type": "statement"
},
{
"name": "TEMP_GCS_BUCKET",
"type": "statement"
},
{
"name": "TEXT_BQ_INPUT_INFERSCHEMA",
"type": "statement"
},
{
"name": "TEXT_BQ_INPUT_LOCATION",
"type": "statement"
},
{
"name": "TEXT_BQ_LD_TEMP_BUCKET_NAME",
"type": "statement"
},
{
"name": "TEXT_BQ_OUTPUT_DATASET",
"type": "statement"
},
{
"name": "TEXT_BQ_OUTPUT_MODE",
"type": "statement"
},
{
"name": "TEXT_BQ_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "TEXT_BQ_TEMP_BUCKET",
"type": "statement"
},
{
"name": "TEXT_INPUT_COMPRESSION",
"type": "statement"
},
{
"name": "TEXT_INPUT_DELIMITER",
"type": "statement"
},
{
"name": "__doc__",
"type": "instance"
},
{
"name": "__file__",
"type": "instance"
},
{
"name": "__name__",
"type": "instance"
},
{
"name": "__package__",
"type": "instance"
}
] |
"""
* Copyright 2022 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
"""
import mock
import pyspark
from dataproc_templates.jdbc.jdbc_to_gcs import JDBCToGCSTemplate
import dataproc_templates.util.template_constants as constants
class TestJDBCToGCSTemplate:
"""
Test suite for JDBCToGCSTemplate
"""
def test_parse_args1(self):
"""Tests JDBCToGCSTemplate.parse_args()"""
jdbc_to_gcs_template = JDBCToGCSTemplate()
parsed_args = jdbc_to_gcs_template.parse_args(
["--jdbctogcs.input.url=url",
"--jdbctogcs.input.driver=driver",
"--jdbctogcs.input.table=table1",
"--jdbctogcs.input.partitioncolumn=column",
"--jdbctogcs.input.lowerbound=1",
"--jdbctogcs.input.upperbound=2",
"--jdbctogcs.numpartitions=5",
"--jdbctogcs.output.location=gs://test",
"--jdbctogcs.output.format=csv",
"--jdbctogcs.output.mode=append",
"--jdbctogcs.output.partitioncolumn=column"
])
assert parsed_args["jdbctogcs.input.url"] == "url"
assert parsed_args["jdbctogcs.input.driver"] == "driver"
assert parsed_args["jdbctogcs.input.table"] == "table1"
assert parsed_args["jdbctogcs.input.partitioncolumn"] == "column"
assert parsed_args["jdbctogcs.input.lowerbound"] == "1"
assert parsed_args["jdbctogcs.input.upperbound"] == "2"
assert parsed_args["jdbctogcs.numpartitions"] == "5"
assert parsed_args["jdbctogcs.output.location"] == "gs://test"
assert parsed_args["jdbctogcs.output.format"] == "csv"
assert parsed_args["jdbctogcs.output.mode"] == "append"
assert parsed_args["jdbctogcs.output.partitioncolumn"] == "column"
@mock.patch.object(pyspark.sql, 'SparkSession')
def test_run_pass_args2(self, mock_spark_session):
"""Tests JDBCToGCSTemplate write parquet"""
jdbc_to_gcs_template = JDBCToGCSTemplate()
mock_parsed_args = jdbc_to_gcs_template.parse_args(
["--jdbctogcs.input.url=url",
"--jdbctogcs.input.driver=driver",
"--jdbctogcs.input.table=table1",
"--jdbctogcs.input.partitioncolumn=column",
"--jdbctogcs.input.lowerbound=1",
"--jdbctogcs.input.upperbound=2",
"--jdbctogcs.numpartitions=5",
"--jdbctogcs.output.location=gs://test",
"--jdbctogcs.output.format=parquet",
"--jdbctogcs.output.mode=overwrite"
])
mock_spark_session.read.format().option().option().option().option().option().option().option().load.return_value = mock_spark_session.dataframe.DataFrame
jdbc_to_gcs_template.run(mock_spark_session, mock_parsed_args)
mock_spark_session.read.format.assert_called_with(constants.FORMAT_JDBC)
mock_spark_session.read.format().option.assert_called_with(constants.JDBC_URL, "url")
mock_spark_session.read.format().option().option.assert_called_with(constants.JDBC_DRIVER, "driver")
mock_spark_session.read.format().option().option().option.assert_called_with(constants.JDBC_TABLE, "table1")
mock_spark_session.read.format().option().option().option().option.assert_called_with(constants.JDBC_PARTITIONCOLUMN, "column")
mock_spark_session.read.format().option().option().option().option().option.assert_called_with(constants.
|
[
"FORMAT_JDBC",
"JDBC_URL",
"JDBC_TABLE",
"JDBC_DRIVER",
"FORMAT_AVRO",
"BQ_GCS_INPUT_TABLE",
"BQ_GCS_OUTPUT_FORMAT",
"BQ_GCS_OUTPUT_LOCATION",
"BQ_GCS_OUTPUT_MODE",
"COMPRESSION_BZIP2",
"COMPRESSION_DEFLATE",
"COMPRESSION_GZIP",
"COMPRESSION_LZ4",
"COMPRESSION_NONE",
"FORMAT_AVRO_EXTD",
"FORMAT_BIGQUERY",
"FORMAT_CSV",
"FORMAT_HBASE",
"FORMAT_JSON",
"FORMAT_PRQT",
"FORMAT_TXT",
"GCS_BQ_INPUT_FORMAT",
"GCS_BQ_INPUT_LOCATION",
"GCS_BQ_LD_TEMP_BUCKET_NAME",
"GCS_BQ_OUTPUT_DATASET",
"GCS_BQ_OUTPUT_MODE",
"GCS_BQ_OUTPUT_TABLE",
"GCS_BQ_TEMP_BUCKET",
"GCS_BT_HBASE_CATALOG_JSON",
"GCS_BT_INPUT_FORMAT",
"GCS_BT_INPUT_LOCATION",
"GCS_JDBC_BATCH_SIZE",
"GCS_JDBC_INPUT_FORMAT",
"GCS_JDBC_INPUT_LOCATION",
"GCS_JDBC_OUTPUT_DRIVER",
"GCS_JDBC_OUTPUT_MODE",
"GCS_JDBC_OUTPUT_TABLE",
"GCS_JDBC_OUTPUT_URL",
"HBASE_GCS_CATALOG_JSON",
"HBASE_GCS_OUTPUT_FORMAT",
"HBASE_GCS_OUTPUT_LOCATION",
"HBASE_GCS_OUTPUT_MODE",
"HEADER",
"HIVE_BQ_INPUT_DATABASE",
"HIVE_BQ_INPUT_TABLE",
"HIVE_BQ_LD_TEMP_BUCKET_NAME",
"HIVE_BQ_OUTPUT_DATASET",
"HIVE_BQ_OUTPUT_MODE",
"HIVE_BQ_OUTPUT_TABLE",
"HIVE_GCS_INPUT_DATABASE",
"HIVE_GCS_INPUT_TABLE",
"HIVE_GCS_OUTPUT_FORMAT",
"HIVE_GCS_OUTPUT_LOCATION",
"HIVE_GCS_OUTPUT_MODE",
"INFER_SCHEMA",
"INPUT_COMPRESSION",
"INPUT_DELIMITER",
"JDBC_BATCH_SIZE",
"JDBC_CREATE_TABLE_OPTIONS",
"JDBC_LOWERBOUND",
"JDBC_NUMPARTITIONS",
"JDBC_PARTITIONCOLUMN",
"JDBC_UPPERBOUND",
"JDBCTOGCS_INPUT_DRIVER",
"JDBCTOGCS_INPUT_LOWERBOUND",
"JDBCTOGCS_INPUT_PARTITIONCOLUMN",
"JDBCTOGCS_INPUT_TABLE",
"JDBCTOGCS_INPUT_UPPERBOUND",
"JDBCTOGCS_INPUT_URL",
"JDBCTOGCS_NUMPARTITIONS",
"JDBCTOGCS_OUTPUT_FORMAT",
"JDBCTOGCS_OUTPUT_LOCATION",
"JDBCTOGCS_OUTPUT_MODE",
"JDBCTOGCS_OUTPUT_PARTITIONCOLUMN",
"JDBCTOJDBC_INPUT_DRIVER",
"JDBCTOJDBC_INPUT_LOWERBOUND",
"JDBCTOJDBC_INPUT_PARTITIONCOLUMN",
"JDBCTOJDBC_INPUT_TABLE",
"JDBCTOJDBC_INPUT_UPPERBOUND",
"JDBCTOJDBC_INPUT_URL",
"JDBCTOJDBC_NUMPARTITIONS",
"JDBCTOJDBC_OUTPUT_BATCH_SIZE",
"JDBCTOJDBC_OUTPUT_CREATE_TABLE_OPTION",
"JDBCTOJDBC_OUTPUT_DRIVER",
"JDBCTOJDBC_OUTPUT_MODE",
"JDBCTOJDBC_OUTPUT_TABLE",
"JDBCTOJDBC_OUTPUT_URL",
"OUTPUT_MODE_APPEND",
"OUTPUT_MODE_ERRORIFEXISTS",
"OUTPUT_MODE_IGNORE",
"OUTPUT_MODE_OVERWRITE",
"PROJECT_ID_PROP",
"TABLE",
"TEMP_GCS_BUCKET",
"TEXT_BQ_INPUT_INFERSCHEMA",
"TEXT_BQ_INPUT_LOCATION",
"TEXT_BQ_LD_TEMP_BUCKET_NAME",
"TEXT_BQ_OUTPUT_DATASET",
"TEXT_BQ_OUTPUT_MODE",
"TEXT_BQ_OUTPUT_TABLE",
"TEXT_BQ_TEMP_BUCKET",
"TEXT_INPUT_COMPRESSION",
"TEXT_INPUT_DELIMITER"
] |
ed_args["jdbctogcs.output.location"] == "gs://test"
assert parsed_args["jdbctogcs.output.format"] == "csv"
assert parsed_args["jdbctogcs.output.mode"] == "append"
assert parsed_args["jdbctogcs.output.partitioncolumn"] == "column"
@mock.patch.object(pyspark.sql, 'SparkSession')
def test_run_pass_args2(self, mock_spark_session):
"""Tests JDBCToGCSTemplate write parquet"""
jdbc_to_gcs_template = JDBCToGCSTemplate()
mock_parsed_args = jdbc_to_gcs_template.parse_args(
["--jdbctogcs.input.url=url",
"--jdbctogcs.input.driver=driver",
"--jdbctogcs.input.table=table1",
"--jdbctogcs.input.partitioncolumn=column",
"--jdbctogcs.input.lowerbound=1",
"--jdbctogcs.input.upperbound=2",
"--jdbctogcs.numpartitions=5",
"--jdbctogcs.output.location=gs://test",
"--jdbctogcs.output.format=parquet",
"--jdbctogcs.output.mode=overwrite"
])
mock_spark_session.read.format().option().option().option().option().option().option().option().load.return_value = mock_spark_session.dataframe.DataFrame
jdbc_to_gcs_template.run(mock_spark_session, mock_parsed_args)
mock_spark_session.read.format.assert_called_with(constants.FORMAT_JDBC)
mock_spark_session.read.format().option.assert_called_with(constants.JDBC_URL, "url")
mock_spark_session.read.format().option().option.assert_called_with(constants.JDBC_DRIVER, "driver")
mock_spark_session.read.format().option().option().option.assert_called_with(constants.JDBC_TABLE, "table1")
mock_spark_session.read.format().option().option().option().option.assert_called_with(constants.JDBC_PARTITIONCOLUMN, "column")
mock_spark_session.read.format().option().option().option().option().option.assert_called_with(constants.
| 260
|
320
| 14
| 8,026
|
googlecloudplatform__dataproc-templates
|
d62560011b069690d01cf2db563788bf81029623
|
python/test/jdbc/test_jdbc_to_gcs.py
|
common
|
run
| true
|
function
| 4
| 4
| false
| false
|
[
"run",
"parse_args",
"build",
"get_logger",
"__annotations__",
"__class__",
"__delattr__",
"__dict__",
"__dir__",
"__eq__",
"__format__",
"__getattribute__",
"__hash__",
"__init__",
"__init_subclass__",
"__ne__",
"__new__",
"__reduce__",
"__reduce_ex__",
"__repr__",
"__setattr__",
"__sizeof__",
"__slots__",
"__str__",
"__subclasshook__",
"__doc__",
"__module__"
] |
[
{
"name": "build",
"type": "function"
},
{
"name": "get_logger",
"type": "function"
},
{
"name": "parse_args",
"type": "function"
},
{
"name": "run",
"type": "function"
},
{
"name": "__annotations__",
"type": "statement"
},
{
"name": "__class__",
"type": "property"
},
{
"name": "__delattr__",
"type": "function"
},
{
"name": "__dict__",
"type": "statement"
},
{
"name": "__dir__",
"type": "function"
},
{
"name": "__doc__",
"type": "statement"
},
{
"name": "__eq__",
"type": "function"
},
{
"name": "__format__",
"type": "function"
},
{
"name": "__getattribute__",
"type": "function"
},
{
"name": "__hash__",
"type": "function"
},
{
"name": "__init__",
"type": "function"
},
{
"name": "__init_subclass__",
"type": "function"
},
{
"name": "__module__",
"type": "statement"
},
{
"name": "__ne__",
"type": "function"
},
{
"name": "__new__",
"type": "function"
},
{
"name": "__reduce__",
"type": "function"
},
{
"name": "__reduce_ex__",
"type": "function"
},
{
"name": "__repr__",
"type": "function"
},
{
"name": "__setattr__",
"type": "function"
},
{
"name": "__sizeof__",
"type": "function"
},
{
"name": "__slots__",
"type": "statement"
},
{
"name": "__str__",
"type": "function"
}
] |
"""
* Copyright 2022 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
"""
import mock
import pyspark
from dataproc_templates.jdbc.jdbc_to_gcs import JDBCToGCSTemplate
import dataproc_templates.util.template_constants as constants
class TestJDBCToGCSTemplate:
"""
Test suite for JDBCToGCSTemplate
"""
def test_parse_args1(self):
"""Tests JDBCToGCSTemplate.parse_args()"""
jdbc_to_gcs_template = JDBCToGCSTemplate()
parsed_args = jdbc_to_gcs_template.parse_args(
["--jdbctogcs.input.url=url",
"--jdbctogcs.input.driver=driver",
"--jdbctogcs.input.table=table1",
"--jdbctogcs.input.partitioncolumn=column",
"--jdbctogcs.input.lowerbound=1",
"--jdbctogcs.input.upperbound=2",
"--jdbctogcs.numpartitions=5",
"--jdbctogcs.output.location=gs://test",
"--jdbctogcs.output.format=csv",
"--jdbctogcs.output.mode=append",
"--jdbctogcs.output.partitioncolumn=column"
])
assert parsed_args["jdbctogcs.input.url"] == "url"
assert parsed_args["jdbctogcs.input.driver"] == "driver"
assert parsed_args["jdbctogcs.input.table"] == "table1"
assert parsed_args["jdbctogcs.input.partitioncolumn"] == "column"
assert parsed_args["jdbctogcs.input.lowerbound"] == "1"
assert parsed_args["jdbctogcs.input.upperbound"] == "2"
assert parsed_args["jdbctogcs.numpartitions"] == "5"
assert parsed_args["jdbctogcs.output.location"] == "gs://test"
assert parsed_args["jdbctogcs.output.format"] == "csv"
assert parsed_args["jdbctogcs.output.mode"] == "append"
assert parsed_args["jdbctogcs.output.partitioncolumn"] == "column"
@mock.patch.object(pyspark.sql, 'SparkSession')
def test_run_pass_args2(self, mock_spark_session):
"""Tests JDBCToGCSTemplate write parquet"""
jdbc_to_gcs_template = JDBCToGCSTemplate()
mock_parsed_args = jdbc_to_gcs_template.parse_args(
["--jdbctogcs.input.url=url",
"--jdbctogcs.input.driver=driver",
"--jdbctogcs.input.table=table1",
"--jdbctogcs.input.partitioncolumn=column",
"--jdbctogcs.input.lowerbound=1",
"--jdbctogcs.input.upperbound=2",
"--jdbctogcs.numpartitions=5",
"--jdbctogcs.output.location=gs://test",
"--jdbctogcs.output.format=parquet",
"--jdbctogcs.output.mode=overwrite"
])
mock_spark_session.read.format().option().option().option().option().option().option().option().load.return_value = mock_spark_session.dataframe.DataFrame
jdbc_to_gcs_template.run(mock_spark_session, mock_parsed_args)
mock_spark_session.read.format.assert_called_with(constants.FORMAT_JDBC)
mock_spark_session.read.format().option.assert_called_with(constants.JDBC_URL, "url")
mock_spark_session.read.format().option().option.assert_called_with(constants.JDBC_DRIVER, "driver")
mock_spark_session.read.format().option().option().option.assert_called_with(constants.JDBC_TABLE, "table1")
mock_spark_session.read.format().option().option().option().option.assert_called_with(constants.JDBC_PARTITIONCOLUMN, "column")
mock_spark_session.read.format().option().option().option().option().option.assert_called_with(constants.JDBC_LOWERBOUND, "1")
mock_spark_session.read.format().option().option().option().option().option().option.assert_called_with(constants.JDBC_UPPERBOUND, "2")
mock_spark_session.read.format().option().option().option().option().option().option().option.assert_called_with(constants.JDBC_NUMPARTITIONS, "5")
mock_spark_session.read.format().option().option().option().option().option().option().option().load()
mock_spark_session.dataframe.DataFrame.write.mode.assert_called_once_with(constants.OUTPUT_MODE_OVERWRITE)
mock_spark_session.dataframe.DataFrame.write.mode().parquet.assert_called_once_with("gs://test")
@mock.patch.object(pyspark.sql, 'SparkSession')
def test_run_pass_args3(self, mock_spark_session):
"""Tests JDBCToGCSTemplate write avro"""
jdbc_to_gcs_template = JDBCToGCSTemplate()
mock_parsed_args = jdbc_to_gcs_template.parse_args(
["--jdbctogcs.input.url=url",
"--jdbctogcs.input.driver=driver",
"--jdbctogcs.input.table=table1",
"--jdbctogcs.input.partitioncolumn=column",
"--jdbctogcs.input.lowerbound=1",
"--jdbctogcs.input.upperbound=2",
"--jdbctogcs.numpartitions=5",
"--jdbctogcs.output.location=gs://test",
"--jdbctogcs.output.format=avro",
"--jdbctogcs.output.mode=append"
])
mock_spark_session.read.format().option().option().option().option().option().option().option().load.return_value = mock_spark_session.dataframe.DataFrame
jdbc_to_gcs_template.run(mock_spark_session, mock_parsed_args)
mock_spark_session.read.format.assert_called_with(constants.FORMAT_JDBC)
mock_spark_session.read.format().option.assert_called_with(constants.JDBC_URL, "url")
mock_spark_session.read.format().option().option.assert_called_with(constants.JDBC_DRIVER, "driver")
mock_spark_session.read.format().option().option().option.assert_called_with(constants.JDBC_TABLE, "table1")
mock_spark_session.read.format().option().option().option().option.assert_called_with(constants.JDBC_PARTITIONCOLUMN, "column")
mock_spark_session.read.format().option().option().option().option().option.assert_called_with(constants.JDBC_LOWERBOUND, "1")
mock_spark_session.read.format().option().option().option().option().option().option.assert_called_with(constants.JDBC_UPPERBOUND, "2")
mock_spark_session.read.format().option().option().option().option().option().option().option.assert_called_with(constants.JDBC_NUMPARTITIONS, "5")
mock_spark_session.read.format().option().option().option().option().option().option().option().load()
mock_spark_session.dataframe.DataFrame.write.mode.assert_called_once_with(constants.OUTPUT_MODE_APPEND)
mock_spark_session.dataframe.DataFrame.write.mode().format.assert_called_once_with(constants.FORMAT_AVRO)
mock_spark_session.dataframe.DataFrame.write.mode().format().save.assert_called_once_with("gs://test")
@mock.patch.object(pyspark.sql, 'SparkSession')
def test_run_pass_args4(self, mock_spark_session):
"""Tests JDBCToGCSTemplate write csv"""
jdbc_to_gcs_template = JDBCToGCSTemplate()
mock_parsed_args = jdbc_to_gcs_template.parse_args(
["--jdbctogcs.input.url=url",
"--jdbctogcs.input.driver=driver",
"--jdbctogcs.input.table=table1",
"--jdbctogcs.input.partitioncolumn=column",
"--jdbctogcs.input.lowerbound=1",
"--jdbctogcs.input.upperbound=2",
"--jdbctogcs.numpartitions=5",
"--jdbctogcs.output.location=gs://test",
"--jdbctogcs.output.format=csv",
"--jdbctogcs.output.mode=ignore"
])
mock_spark_session.read.format().option().option().option().option().option().option().option().load.return_value = mock_spark_session.dataframe.DataFrame
jdbc_to_gcs_template.
|
[
"run",
"parse_args",
"build",
"get_logger"
] |
ion().option.assert_called_with(constants.JDBC_PARTITIONCOLUMN, "column")
mock_spark_session.read.format().option().option().option().option().option.assert_called_with(constants.JDBC_LOWERBOUND, "1")
mock_spark_session.read.format().option().option().option().option().option().option.assert_called_with(constants.JDBC_UPPERBOUND, "2")
mock_spark_session.read.format().option().option().option().option().option().option().option.assert_called_with(constants.JDBC_NUMPARTITIONS, "5")
mock_spark_session.read.format().option().option().option().option().option().option().option().load()
mock_spark_session.dataframe.DataFrame.write.mode.assert_called_once_with(constants.OUTPUT_MODE_APPEND)
mock_spark_session.dataframe.DataFrame.write.mode().format.assert_called_once_with(constants.FORMAT_AVRO)
mock_spark_session.dataframe.DataFrame.write.mode().format().save.assert_called_once_with("gs://test")
@mock.patch.object(pyspark.sql, 'SparkSession')
def test_run_pass_args4(self, mock_spark_session):
"""Tests JDBCToGCSTemplate write csv"""
jdbc_to_gcs_template = JDBCToGCSTemplate()
mock_parsed_args = jdbc_to_gcs_template.parse_args(
["--jdbctogcs.input.url=url",
"--jdbctogcs.input.driver=driver",
"--jdbctogcs.input.table=table1",
"--jdbctogcs.input.partitioncolumn=column",
"--jdbctogcs.input.lowerbound=1",
"--jdbctogcs.input.upperbound=2",
"--jdbctogcs.numpartitions=5",
"--jdbctogcs.output.location=gs://test",
"--jdbctogcs.output.format=csv",
"--jdbctogcs.output.mode=ignore"
])
mock_spark_session.read.format().option().option().option().option().option().option().option().load.return_value = mock_spark_session.dataframe.DataFrame
jdbc_to_gcs_template.
| 277
|
330
| 14
| 9,758
|
googlecloudplatform__dataproc-templates
|
d62560011b069690d01cf2db563788bf81029623
|
python/test/jdbc/test_jdbc_to_gcs.py
|
inproject
|
parse_args
| true
|
function
| 4
| 4
| false
| false
|
[
"parse_args",
"run",
"build",
"get_logger",
"__annotations__",
"__class__",
"__delattr__",
"__dict__",
"__dir__",
"__eq__",
"__format__",
"__getattribute__",
"__hash__",
"__init__",
"__init_subclass__",
"__ne__",
"__new__",
"__reduce__",
"__reduce_ex__",
"__repr__",
"__setattr__",
"__sizeof__",
"__slots__",
"__str__",
"__subclasshook__",
"__doc__",
"__module__"
] |
[
{
"name": "build",
"type": "function"
},
{
"name": "get_logger",
"type": "function"
},
{
"name": "parse_args",
"type": "function"
},
{
"name": "run",
"type": "function"
},
{
"name": "__annotations__",
"type": "statement"
},
{
"name": "__class__",
"type": "property"
},
{
"name": "__delattr__",
"type": "function"
},
{
"name": "__dict__",
"type": "statement"
},
{
"name": "__dir__",
"type": "function"
},
{
"name": "__doc__",
"type": "statement"
},
{
"name": "__eq__",
"type": "function"
},
{
"name": "__format__",
"type": "function"
},
{
"name": "__getattribute__",
"type": "function"
},
{
"name": "__hash__",
"type": "function"
},
{
"name": "__init__",
"type": "function"
},
{
"name": "__init_subclass__",
"type": "function"
},
{
"name": "__module__",
"type": "statement"
},
{
"name": "__ne__",
"type": "function"
},
{
"name": "__new__",
"type": "function"
},
{
"name": "__reduce__",
"type": "function"
},
{
"name": "__reduce_ex__",
"type": "function"
},
{
"name": "__repr__",
"type": "function"
},
{
"name": "__setattr__",
"type": "function"
},
{
"name": "__sizeof__",
"type": "function"
},
{
"name": "__slots__",
"type": "statement"
},
{
"name": "__str__",
"type": "function"
}
] |
"""
* Copyright 2022 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
"""
import mock
import pyspark
from dataproc_templates.jdbc.jdbc_to_gcs import JDBCToGCSTemplate
import dataproc_templates.util.template_constants as constants
class TestJDBCToGCSTemplate:
"""
Test suite for JDBCToGCSTemplate
"""
def test_parse_args1(self):
"""Tests JDBCToGCSTemplate.parse_args()"""
jdbc_to_gcs_template = JDBCToGCSTemplate()
parsed_args = jdbc_to_gcs_template.parse_args(
["--jdbctogcs.input.url=url",
"--jdbctogcs.input.driver=driver",
"--jdbctogcs.input.table=table1",
"--jdbctogcs.input.partitioncolumn=column",
"--jdbctogcs.input.lowerbound=1",
"--jdbctogcs.input.upperbound=2",
"--jdbctogcs.numpartitions=5",
"--jdbctogcs.output.location=gs://test",
"--jdbctogcs.output.format=csv",
"--jdbctogcs.output.mode=append",
"--jdbctogcs.output.partitioncolumn=column"
])
assert parsed_args["jdbctogcs.input.url"] == "url"
assert parsed_args["jdbctogcs.input.driver"] == "driver"
assert parsed_args["jdbctogcs.input.table"] == "table1"
assert parsed_args["jdbctogcs.input.partitioncolumn"] == "column"
assert parsed_args["jdbctogcs.input.lowerbound"] == "1"
assert parsed_args["jdbctogcs.input.upperbound"] == "2"
assert parsed_args["jdbctogcs.numpartitions"] == "5"
assert parsed_args["jdbctogcs.output.location"] == "gs://test"
assert parsed_args["jdbctogcs.output.format"] == "csv"
assert parsed_args["jdbctogcs.output.mode"] == "append"
assert parsed_args["jdbctogcs.output.partitioncolumn"] == "column"
@mock.patch.object(pyspark.sql, 'SparkSession')
def test_run_pass_args2(self, mock_spark_session):
"""Tests JDBCToGCSTemplate write parquet"""
jdbc_to_gcs_template = JDBCToGCSTemplate()
mock_parsed_args = jdbc_to_gcs_template.parse_args(
["--jdbctogcs.input.url=url",
"--jdbctogcs.input.driver=driver",
"--jdbctogcs.input.table=table1",
"--jdbctogcs.input.partitioncolumn=column",
"--jdbctogcs.input.lowerbound=1",
"--jdbctogcs.input.upperbound=2",
"--jdbctogcs.numpartitions=5",
"--jdbctogcs.output.location=gs://test",
"--jdbctogcs.output.format=parquet",
"--jdbctogcs.output.mode=overwrite"
])
mock_spark_session.read.format().option().option().option().option().option().option().option().load.return_value = mock_spark_session.dataframe.DataFrame
jdbc_to_gcs_template.run(mock_spark_session, mock_parsed_args)
mock_spark_session.read.format.assert_called_with(constants.FORMAT_JDBC)
mock_spark_session.read.format().option.assert_called_with(constants.JDBC_URL, "url")
mock_spark_session.read.format().option().option.assert_called_with(constants.JDBC_DRIVER, "driver")
mock_spark_session.read.format().option().option().option.assert_called_with(constants.JDBC_TABLE, "table1")
mock_spark_session.read.format().option().option().option().option.assert_called_with(constants.JDBC_PARTITIONCOLUMN, "column")
mock_spark_session.read.format().option().option().option().option().option.assert_called_with(constants.JDBC_LOWERBOUND, "1")
mock_spark_session.read.format().option().option().option().option().option().option.assert_called_with(constants.JDBC_UPPERBOUND, "2")
mock_spark_session.read.format().option().option().option().option().option().option().option.assert_called_with(constants.JDBC_NUMPARTITIONS, "5")
mock_spark_session.read.format().option().option().option().option().option().option().option().load()
mock_spark_session.dataframe.DataFrame.write.mode.assert_called_once_with(constants.OUTPUT_MODE_OVERWRITE)
mock_spark_session.dataframe.DataFrame.write.mode().parquet.assert_called_once_with("gs://test")
@mock.patch.object(pyspark.sql, 'SparkSession')
def test_run_pass_args3(self, mock_spark_session):
"""Tests JDBCToGCSTemplate write avro"""
jdbc_to_gcs_template = JDBCToGCSTemplate()
mock_parsed_args = jdbc_to_gcs_template.parse_args(
["--jdbctogcs.input.url=url",
"--jdbctogcs.input.driver=driver",
"--jdbctogcs.input.table=table1",
"--jdbctogcs.input.partitioncolumn=column",
"--jdbctogcs.input.lowerbound=1",
"--jdbctogcs.input.upperbound=2",
"--jdbctogcs.numpartitions=5",
"--jdbctogcs.output.location=gs://test",
"--jdbctogcs.output.format=avro",
"--jdbctogcs.output.mode=append"
])
mock_spark_session.read.format().option().option().option().option().option().option().option().load.return_value = mock_spark_session.dataframe.DataFrame
jdbc_to_gcs_template.run(mock_spark_session, mock_parsed_args)
mock_spark_session.read.format.assert_called_with(constants.FORMAT_JDBC)
mock_spark_session.read.format().option.assert_called_with(constants.JDBC_URL, "url")
mock_spark_session.read.format().option().option.assert_called_with(constants.JDBC_DRIVER, "driver")
mock_spark_session.read.format().option().option().option.assert_called_with(constants.JDBC_TABLE, "table1")
mock_spark_session.read.format().option().option().option().option.assert_called_with(constants.JDBC_PARTITIONCOLUMN, "column")
mock_spark_session.read.format().option().option().option().option().option.assert_called_with(constants.JDBC_LOWERBOUND, "1")
mock_spark_session.read.format().option().option().option().option().option().option.assert_called_with(constants.JDBC_UPPERBOUND, "2")
mock_spark_session.read.format().option().option().option().option().option().option().option.assert_called_with(constants.JDBC_NUMPARTITIONS, "5")
mock_spark_session.read.format().option().option().option().option().option().option().option().load()
mock_spark_session.dataframe.DataFrame.write.mode.assert_called_once_with(constants.OUTPUT_MODE_APPEND)
mock_spark_session.dataframe.DataFrame.write.mode().format.assert_called_once_with(constants.FORMAT_AVRO)
mock_spark_session.dataframe.DataFrame.write.mode().format().save.assert_called_once_with("gs://test")
@mock.patch.object(pyspark.sql, 'SparkSession')
def test_run_pass_args4(self, mock_spark_session):
"""Tests JDBCToGCSTemplate write csv"""
jdbc_to_gcs_template = JDBCToGCSTemplate()
mock_parsed_args = jdbc_to_gcs_template.parse_args(
["--jdbctogcs.input.url=url",
"--jdbctogcs.input.driver=driver",
"--jdbctogcs.input.table=table1",
"--jdbctogcs.input.partitioncolumn=column",
"--jdbctogcs.input.lowerbound=1",
"--jdbctogcs.input.upperbound=2",
"--jdbctogcs.numpartitions=5",
"--jdbctogcs.output.location=gs://test",
"--jdbctogcs.output.format=csv",
"--jdbctogcs.output.mode=ignore"
])
mock_spark_session.read.format().option().option().option().option().option().option().option().load.return_value = mock_spark_session.dataframe.DataFrame
jdbc_to_gcs_template.run(mock_spark_session, mock_parsed_args)
mock_spark_session.read.format.assert_called_with(constants.FORMAT_JDBC)
mock_spark_session.read.format().option.assert_called_with(constants.JDBC_URL, "url")
mock_spark_session.read.format().option().option.assert_called_with(constants.JDBC_DRIVER, "driver")
mock_spark_session.read.format().option().option().option.assert_called_with(constants.JDBC_TABLE, "table1")
mock_spark_session.read.format().option().option().option().option.assert_called_with(constants.JDBC_PARTITIONCOLUMN, "column")
mock_spark_session.read.format().option().option().option().option().option.assert_called_with(constants.JDBC_LOWERBOUND, "1")
mock_spark_session.read.format().option().option().option().option().option().option.assert_called_with(constants.JDBC_UPPERBOUND, "2")
mock_spark_session.read.format().option().option().option().option().option().option().option.assert_called_with(constants.JDBC_NUMPARTITIONS, "5")
mock_spark_session.read.format().option().option().option().option().option().option().option().load()
mock_spark_session.dataframe.DataFrame.write.mode.assert_called_once_with(constants.OUTPUT_MODE_IGNORE)
mock_spark_session.dataframe.DataFrame.write.mode().option.assert_called_once_with(constants.CSV_HEADER, True)
mock_spark_session.dataframe.DataFrame.write.mode().option().csv.assert_called_once_with("gs://test")
@mock.patch.object(pyspark.sql, 'SparkSession')
def test_run_pass_args5(self, mock_spark_session):
"""Tests JDBCToGCSTemplate write json"""
jdbc_to_gcs_template = JDBCToGCSTemplate()
mock_parsed_args = jdbc_to_gcs_template.
|
[
"parse_args",
"run",
"build",
"get_logger"
] |
mock_spark_session.read.format().option().option().option().option().option().option().option().load.return_value = mock_spark_session.dataframe.DataFrame
jdbc_to_gcs_template.run(mock_spark_session, mock_parsed_args)
mock_spark_session.read.format.assert_called_with(constants.FORMAT_JDBC)
mock_spark_session.read.format().option.assert_called_with(constants.JDBC_URL, "url")
mock_spark_session.read.format().option().option.assert_called_with(constants.JDBC_DRIVER, "driver")
mock_spark_session.read.format().option().option().option.assert_called_with(constants.JDBC_TABLE, "table1")
mock_spark_session.read.format().option().option().option().option.assert_called_with(constants.JDBC_PARTITIONCOLUMN, "column")
mock_spark_session.read.format().option().option().option().option().option.assert_called_with(constants.JDBC_LOWERBOUND, "1")
mock_spark_session.read.format().option().option().option().option().option().option.assert_called_with(constants.JDBC_UPPERBOUND, "2")
mock_spark_session.read.format().option().option().option().option().option().option().option.assert_called_with(constants.JDBC_NUMPARTITIONS, "5")
mock_spark_session.read.format().option().option().option().option().option().option().option().load()
mock_spark_session.dataframe.DataFrame.write.mode.assert_called_once_with(constants.OUTPUT_MODE_IGNORE)
mock_spark_session.dataframe.DataFrame.write.mode().option.assert_called_once_with(constants.CSV_HEADER, True)
mock_spark_session.dataframe.DataFrame.write.mode().option().csv.assert_called_once_with("gs://test")
@mock.patch.object(pyspark.sql, 'SparkSession')
def test_run_pass_args5(self, mock_spark_session):
"""Tests JDBCToGCSTemplate write json"""
jdbc_to_gcs_template = JDBCToGCSTemplate()
mock_parsed_args = jdbc_to_gcs_template.
| 287
|
357
| 15
| 1,021
|
googlecloudplatform__dataproc-templates
|
8b291caed55509d56d36f03c1c25762350b6f905
|
python/test/mongo/test_mongo_to_gcs.py
|
inproject
|
parse_args
| true
|
function
| 4
| 4
| false
| true
|
[
"parse_args",
"run",
"build",
"get_logger",
"__annotations__",
"__class__",
"__delattr__",
"__dict__",
"__dir__",
"__eq__",
"__format__",
"__getattribute__",
"__hash__",
"__init__",
"__init_subclass__",
"__ne__",
"__new__",
"__reduce__",
"__reduce_ex__",
"__repr__",
"__setattr__",
"__sizeof__",
"__slots__",
"__str__",
"__subclasshook__",
"__doc__",
"__module__"
] |
[
{
"name": "build",
"type": "function"
},
{
"name": "get_logger",
"type": "function"
},
{
"name": "parse_args",
"type": "function"
},
{
"name": "run",
"type": "function"
},
{
"name": "__annotations__",
"type": "statement"
},
{
"name": "__class__",
"type": "property"
},
{
"name": "__delattr__",
"type": "function"
},
{
"name": "__dict__",
"type": "statement"
},
{
"name": "__dir__",
"type": "function"
},
{
"name": "__doc__",
"type": "statement"
},
{
"name": "__eq__",
"type": "function"
},
{
"name": "__format__",
"type": "function"
},
{
"name": "__getattribute__",
"type": "function"
},
{
"name": "__hash__",
"type": "function"
},
{
"name": "__init__",
"type": "function"
},
{
"name": "__init_subclass__",
"type": "function"
},
{
"name": "__module__",
"type": "statement"
},
{
"name": "__ne__",
"type": "function"
},
{
"name": "__new__",
"type": "function"
},
{
"name": "__reduce__",
"type": "function"
},
{
"name": "__reduce_ex__",
"type": "function"
},
{
"name": "__repr__",
"type": "function"
},
{
"name": "__setattr__",
"type": "function"
},
{
"name": "__sizeof__",
"type": "function"
},
{
"name": "__slots__",
"type": "statement"
},
{
"name": "__str__",
"type": "function"
}
] |
"""
* Copyright 2022 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
"""
import mock
import pyspark
from dataproc_templates.mongo.mongo_to_gcs import MongoToGCSTemplate
import dataproc_templates.util.template_constants as constants
class TestMongoToGCSTemplate:
"""
Test suite for MongoToGCSTemplate
"""
def test_parse_args(self):
"""Tests MongoToGCSTemplate.parse_args()"""
mongo_to_gcs_template = MongoToGCSTemplate()
parsed_args = mongo_to_gcs_template.
|
[
"parse_args",
"run",
"build",
"get_logger"
] |
"""
* Copyright 2022 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
"""
import mock
import pyspark
from dataproc_templates.mongo.mongo_to_gcs import MongoToGCSTemplate
import dataproc_templates.util.template_constants as constants
class TestMongoToGCSTemplate:
"""
Test suite for MongoToGCSTemplate
"""
def test_parse_args(self):
"""Tests MongoToGCSTemplate.parse_args()"""
mongo_to_gcs_template = MongoToGCSTemplate()
parsed_args = mongo_to_gcs_template.
| 314
|
359
| 15
| 2,559
|
googlecloudplatform__dataproc-templates
|
8b291caed55509d56d36f03c1c25762350b6f905
|
python/test/mongo/test_mongo_to_gcs.py
|
common
|
run
| true
|
function
| 4
| 4
| false
| true
|
[
"run",
"parse_args",
"build",
"get_logger",
"__annotations__",
"__class__",
"__delattr__",
"__dict__",
"__dir__",
"__eq__",
"__format__",
"__getattribute__",
"__hash__",
"__init__",
"__init_subclass__",
"__ne__",
"__new__",
"__reduce__",
"__reduce_ex__",
"__repr__",
"__setattr__",
"__sizeof__",
"__slots__",
"__str__",
"__subclasshook__",
"__doc__",
"__module__"
] |
[
{
"name": "build",
"type": "function"
},
{
"name": "get_logger",
"type": "function"
},
{
"name": "parse_args",
"type": "function"
},
{
"name": "run",
"type": "function"
},
{
"name": "__annotations__",
"type": "statement"
},
{
"name": "__class__",
"type": "property"
},
{
"name": "__delattr__",
"type": "function"
},
{
"name": "__dict__",
"type": "statement"
},
{
"name": "__dir__",
"type": "function"
},
{
"name": "__doc__",
"type": "statement"
},
{
"name": "__eq__",
"type": "function"
},
{
"name": "__format__",
"type": "function"
},
{
"name": "__getattribute__",
"type": "function"
},
{
"name": "__hash__",
"type": "function"
},
{
"name": "__init__",
"type": "function"
},
{
"name": "__init_subclass__",
"type": "function"
},
{
"name": "__module__",
"type": "statement"
},
{
"name": "__ne__",
"type": "function"
},
{
"name": "__new__",
"type": "function"
},
{
"name": "__reduce__",
"type": "function"
},
{
"name": "__reduce_ex__",
"type": "function"
},
{
"name": "__repr__",
"type": "function"
},
{
"name": "__setattr__",
"type": "function"
},
{
"name": "__sizeof__",
"type": "function"
},
{
"name": "__slots__",
"type": "statement"
},
{
"name": "__str__",
"type": "function"
}
] |
"""
* Copyright 2022 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
"""
import mock
import pyspark
from dataproc_templates.mongo.mongo_to_gcs import MongoToGCSTemplate
import dataproc_templates.util.template_constants as constants
class TestMongoToGCSTemplate:
"""
Test suite for MongoToGCSTemplate
"""
def test_parse_args(self):
"""Tests MongoToGCSTemplate.parse_args()"""
mongo_to_gcs_template = MongoToGCSTemplate()
parsed_args = mongo_to_gcs_template.parse_args(
["--mongo.gcs.input.uri=mongodb://host:port",
"--mongo.gcs.input.database=database",
"--mongo.gcs.input.collection=collection",
"--mongo.gcs.output.format=parquet",
"--mongo.gcs.output.mode=overwrite",
"--mongo.gcs.output.location=gs://test"])
assert parsed_args["mongo.gcs.input.uri"] == "mongodb://host:port"
assert parsed_args["mongo.gcs.input.database"] == "database"
assert parsed_args["mongo.gcs.input.collection"] == "collection"
assert parsed_args["mongo.gcs.output.format"] == "parquet"
assert parsed_args["mongo.gcs.output.mode"] == "overwrite"
assert parsed_args["mongo.gcs.output.location"] == "gs://test"
@mock.patch.object(pyspark.sql, 'SparkSession')
def test_run_parquet(self, mock_spark_session):
"""Tests MongoToGCSTemplate runs for parquet format output"""
mongo_to_gcs_template = MongoToGCSTemplate()
mock_parsed_args = mongo_to_gcs_template.parse_args(
["--mongo.gcs.input.uri=mongodb://host:port",
"--mongo.gcs.input.database=database",
"--mongo.gcs.input.collection=collection",
"--mongo.gcs.output.format=parquet",
"--mongo.gcs.output.mode=overwrite",
"--mongo.gcs.output.location=gs://test"])
mock_spark_session.read.format().option().option().option().load.return_value \
= mock_spark_session.dataframe.DataFrame
mongo_to_gcs_template.
|
[
"run",
"parse_args",
"build",
"get_logger"
] |
lates.mongo.mongo_to_gcs import MongoToGCSTemplate
import dataproc_templates.util.template_constants as constants
class TestMongoToGCSTemplate:
"""
Test suite for MongoToGCSTemplate
"""
def test_parse_args(self):
"""Tests MongoToGCSTemplate.parse_args()"""
mongo_to_gcs_template = MongoToGCSTemplate()
parsed_args = mongo_to_gcs_template.parse_args(
["--mongo.gcs.input.uri=mongodb://host:port",
"--mongo.gcs.input.database=database",
"--mongo.gcs.input.collection=collection",
"--mongo.gcs.output.format=parquet",
"--mongo.gcs.output.mode=overwrite",
"--mongo.gcs.output.location=gs://test"])
assert parsed_args["mongo.gcs.input.uri"] == "mongodb://host:port"
assert parsed_args["mongo.gcs.input.database"] == "database"
assert parsed_args["mongo.gcs.input.collection"] == "collection"
assert parsed_args["mongo.gcs.output.format"] == "parquet"
assert parsed_args["mongo.gcs.output.mode"] == "overwrite"
assert parsed_args["mongo.gcs.output.location"] == "gs://test"
@mock.patch.object(pyspark.sql, 'SparkSession')
def test_run_parquet(self, mock_spark_session):
"""Tests MongoToGCSTemplate runs for parquet format output"""
mongo_to_gcs_template = MongoToGCSTemplate()
mock_parsed_args = mongo_to_gcs_template.parse_args(
["--mongo.gcs.input.uri=mongodb://host:port",
"--mongo.gcs.input.database=database",
"--mongo.gcs.input.collection=collection",
"--mongo.gcs.output.format=parquet",
"--mongo.gcs.output.mode=overwrite",
"--mongo.gcs.output.location=gs://test"])
mock_spark_session.read.format().option().option().option().load.return_value \
= mock_spark_session.dataframe.DataFrame
mongo_to_gcs_template.
| 316
|
361
| 15
| 2,830
|
googlecloudplatform__dataproc-templates
|
8b291caed55509d56d36f03c1c25762350b6f905
|
python/test/mongo/test_mongo_to_gcs.py
|
random
|
MONGO_DATABASE
| true
|
statement
| 123
| 123
| false
| true
|
[
"FORMAT_MONGO",
"MONGO_COLLECTION",
"MONGO_DATABASE",
"HEADER",
"FORMAT_AVRO",
"BQ_GCS_INPUT_TABLE",
"BQ_GCS_OUTPUT_FORMAT",
"BQ_GCS_OUTPUT_LOCATION",
"BQ_GCS_OUTPUT_MODE",
"COMPRESSION_BZIP2",
"COMPRESSION_DEFLATE",
"COMPRESSION_GZIP",
"COMPRESSION_LZ4",
"COMPRESSION_NONE",
"FORMAT_AVRO_EXTD",
"FORMAT_BIGQUERY",
"FORMAT_CSV",
"FORMAT_HBASE",
"FORMAT_JDBC",
"FORMAT_JSON",
"FORMAT_PRQT",
"FORMAT_TXT",
"GCS_BQ_INPUT_FORMAT",
"GCS_BQ_INPUT_LOCATION",
"GCS_BQ_LD_TEMP_BUCKET_NAME",
"GCS_BQ_OUTPUT_DATASET",
"GCS_BQ_OUTPUT_MODE",
"GCS_BQ_OUTPUT_TABLE",
"GCS_BQ_TEMP_BUCKET",
"GCS_BT_HBASE_CATALOG_JSON",
"GCS_BT_INPUT_FORMAT",
"GCS_BT_INPUT_LOCATION",
"GCS_JDBC_BATCH_SIZE",
"GCS_JDBC_INPUT_FORMAT",
"GCS_JDBC_INPUT_LOCATION",
"GCS_JDBC_OUTPUT_DRIVER",
"GCS_JDBC_OUTPUT_MODE",
"GCS_JDBC_OUTPUT_TABLE",
"GCS_JDBC_OUTPUT_URL",
"GCS_MONGO_BATCH_SIZE",
"GCS_MONGO_INPUT_FORMAT",
"GCS_MONGO_INPUT_LOCATION",
"GCS_MONGO_OUTPUT_COLLECTION",
"GCS_MONGO_OUTPUT_DATABASE",
"GCS_MONGO_OUTPUT_MODE",
"GCS_MONGO_OUTPUT_URI",
"HBASE_GCS_CATALOG_JSON",
"HBASE_GCS_OUTPUT_FORMAT",
"HBASE_GCS_OUTPUT_LOCATION",
"HBASE_GCS_OUTPUT_MODE",
"HIVE_BQ_INPUT_DATABASE",
"HIVE_BQ_INPUT_TABLE",
"HIVE_BQ_LD_TEMP_BUCKET_NAME",
"HIVE_BQ_OUTPUT_DATASET",
"HIVE_BQ_OUTPUT_MODE",
"HIVE_BQ_OUTPUT_TABLE",
"HIVE_GCS_INPUT_DATABASE",
"HIVE_GCS_INPUT_TABLE",
"HIVE_GCS_OUTPUT_FORMAT",
"HIVE_GCS_OUTPUT_LOCATION",
"HIVE_GCS_OUTPUT_MODE",
"INFER_SCHEMA",
"INPUT_COMPRESSION",
"INPUT_DELIMITER",
"JDBC_BATCH_SIZE",
"JDBC_CREATE_TABLE_OPTIONS",
"JDBC_DRIVER",
"JDBC_LOWERBOUND",
"JDBC_NUMPARTITIONS",
"JDBC_PARTITIONCOLUMN",
"JDBC_TABLE",
"JDBC_UPPERBOUND",
"JDBC_URL",
"JDBCTOGCS_INPUT_DRIVER",
"JDBCTOGCS_INPUT_LOWERBOUND",
"JDBCTOGCS_INPUT_PARTITIONCOLUMN",
"JDBCTOGCS_INPUT_TABLE",
"JDBCTOGCS_INPUT_UPPERBOUND",
"JDBCTOGCS_INPUT_URL",
"JDBCTOGCS_NUMPARTITIONS",
"JDBCTOGCS_OUTPUT_FORMAT",
"JDBCTOGCS_OUTPUT_LOCATION",
"JDBCTOGCS_OUTPUT_MODE",
"JDBCTOGCS_OUTPUT_PARTITIONCOLUMN",
"JDBCTOJDBC_INPUT_DRIVER",
"JDBCTOJDBC_INPUT_LOWERBOUND",
"JDBCTOJDBC_INPUT_PARTITIONCOLUMN",
"JDBCTOJDBC_INPUT_TABLE",
"JDBCTOJDBC_INPUT_UPPERBOUND",
"JDBCTOJDBC_INPUT_URL",
"JDBCTOJDBC_NUMPARTITIONS",
"JDBCTOJDBC_OUTPUT_BATCH_SIZE",
"JDBCTOJDBC_OUTPUT_CREATE_TABLE_OPTION",
"JDBCTOJDBC_OUTPUT_DRIVER",
"JDBCTOJDBC_OUTPUT_MODE",
"JDBCTOJDBC_OUTPUT_TABLE",
"JDBCTOJDBC_OUTPUT_URL",
"MONGO_BATCH_SIZE",
"MONGO_DEFAULT_BATCH_SIZE",
"MONGO_GCS_INPUT_COLLECTION",
"MONGO_GCS_INPUT_DATABASE",
"MONGO_GCS_INPUT_URI",
"MONGO_GCS_OUTPUT_FORMAT",
"MONGO_GCS_OUTPUT_LOCATION",
"MONGO_GCS_OUTPUT_MODE",
"MONGO_INPUT_URI",
"MONGO_URL",
"OUTPUT_MODE_APPEND",
"OUTPUT_MODE_ERRORIFEXISTS",
"OUTPUT_MODE_IGNORE",
"OUTPUT_MODE_OVERWRITE",
"PROJECT_ID_PROP",
"TABLE",
"TEMP_GCS_BUCKET",
"TEXT_BQ_INPUT_INFERSCHEMA",
"TEXT_BQ_INPUT_LOCATION",
"TEXT_BQ_LD_TEMP_BUCKET_NAME",
"TEXT_BQ_OUTPUT_DATASET",
"TEXT_BQ_OUTPUT_MODE",
"TEXT_BQ_OUTPUT_TABLE",
"TEXT_BQ_TEMP_BUCKET",
"TEXT_INPUT_COMPRESSION",
"TEXT_INPUT_DELIMITER"
] |
[
{
"name": "BQ_GCS_INPUT_TABLE",
"type": "statement"
},
{
"name": "BQ_GCS_OUTPUT_FORMAT",
"type": "statement"
},
{
"name": "BQ_GCS_OUTPUT_LOCATION",
"type": "statement"
},
{
"name": "BQ_GCS_OUTPUT_MODE",
"type": "statement"
},
{
"name": "COMPRESSION_BZIP2",
"type": "statement"
},
{
"name": "COMPRESSION_DEFLATE",
"type": "statement"
},
{
"name": "COMPRESSION_GZIP",
"type": "statement"
},
{
"name": "COMPRESSION_LZ4",
"type": "statement"
},
{
"name": "COMPRESSION_NONE",
"type": "statement"
},
{
"name": "FORMAT_AVRO",
"type": "statement"
},
{
"name": "FORMAT_AVRO_EXTD",
"type": "statement"
},
{
"name": "FORMAT_BIGQUERY",
"type": "statement"
},
{
"name": "FORMAT_CSV",
"type": "statement"
},
{
"name": "FORMAT_HBASE",
"type": "statement"
},
{
"name": "FORMAT_JDBC",
"type": "statement"
},
{
"name": "FORMAT_JSON",
"type": "statement"
},
{
"name": "FORMAT_MONGO",
"type": "statement"
},
{
"name": "FORMAT_PRQT",
"type": "statement"
},
{
"name": "FORMAT_TXT",
"type": "statement"
},
{
"name": "GCS_BQ_INPUT_FORMAT",
"type": "statement"
},
{
"name": "GCS_BQ_INPUT_LOCATION",
"type": "statement"
},
{
"name": "GCS_BQ_LD_TEMP_BUCKET_NAME",
"type": "statement"
},
{
"name": "GCS_BQ_OUTPUT_DATASET",
"type": "statement"
},
{
"name": "GCS_BQ_OUTPUT_MODE",
"type": "statement"
},
{
"name": "GCS_BQ_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "GCS_BQ_TEMP_BUCKET",
"type": "statement"
},
{
"name": "GCS_BT_HBASE_CATALOG_JSON",
"type": "statement"
},
{
"name": "GCS_BT_INPUT_FORMAT",
"type": "statement"
},
{
"name": "GCS_BT_INPUT_LOCATION",
"type": "statement"
},
{
"name": "GCS_JDBC_BATCH_SIZE",
"type": "statement"
},
{
"name": "GCS_JDBC_INPUT_FORMAT",
"type": "statement"
},
{
"name": "GCS_JDBC_INPUT_LOCATION",
"type": "statement"
},
{
"name": "GCS_JDBC_OUTPUT_DRIVER",
"type": "statement"
},
{
"name": "GCS_JDBC_OUTPUT_MODE",
"type": "statement"
},
{
"name": "GCS_JDBC_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "GCS_JDBC_OUTPUT_URL",
"type": "statement"
},
{
"name": "GCS_MONGO_BATCH_SIZE",
"type": "statement"
},
{
"name": "GCS_MONGO_INPUT_FORMAT",
"type": "statement"
},
{
"name": "GCS_MONGO_INPUT_LOCATION",
"type": "statement"
},
{
"name": "GCS_MONGO_OUTPUT_COLLECTION",
"type": "statement"
},
{
"name": "GCS_MONGO_OUTPUT_DATABASE",
"type": "statement"
},
{
"name": "GCS_MONGO_OUTPUT_MODE",
"type": "statement"
},
{
"name": "GCS_MONGO_OUTPUT_URI",
"type": "statement"
},
{
"name": "HBASE_GCS_CATALOG_JSON",
"type": "statement"
},
{
"name": "HBASE_GCS_OUTPUT_FORMAT",
"type": "statement"
},
{
"name": "HBASE_GCS_OUTPUT_LOCATION",
"type": "statement"
},
{
"name": "HBASE_GCS_OUTPUT_MODE",
"type": "statement"
},
{
"name": "HEADER",
"type": "statement"
},
{
"name": "HIVE_BQ_INPUT_DATABASE",
"type": "statement"
},
{
"name": "HIVE_BQ_INPUT_TABLE",
"type": "statement"
},
{
"name": "HIVE_BQ_LD_TEMP_BUCKET_NAME",
"type": "statement"
},
{
"name": "HIVE_BQ_OUTPUT_DATASET",
"type": "statement"
},
{
"name": "HIVE_BQ_OUTPUT_MODE",
"type": "statement"
},
{
"name": "HIVE_BQ_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "HIVE_GCS_INPUT_DATABASE",
"type": "statement"
},
{
"name": "HIVE_GCS_INPUT_TABLE",
"type": "statement"
},
{
"name": "HIVE_GCS_OUTPUT_FORMAT",
"type": "statement"
},
{
"name": "HIVE_GCS_OUTPUT_LOCATION",
"type": "statement"
},
{
"name": "HIVE_GCS_OUTPUT_MODE",
"type": "statement"
},
{
"name": "INFER_SCHEMA",
"type": "statement"
},
{
"name": "INPUT_COMPRESSION",
"type": "statement"
},
{
"name": "INPUT_DELIMITER",
"type": "statement"
},
{
"name": "JDBC_BATCH_SIZE",
"type": "statement"
},
{
"name": "JDBC_CREATE_TABLE_OPTIONS",
"type": "statement"
},
{
"name": "JDBC_DRIVER",
"type": "statement"
},
{
"name": "JDBC_LOWERBOUND",
"type": "statement"
},
{
"name": "JDBC_NUMPARTITIONS",
"type": "statement"
},
{
"name": "JDBC_PARTITIONCOLUMN",
"type": "statement"
},
{
"name": "JDBC_TABLE",
"type": "statement"
},
{
"name": "JDBC_UPPERBOUND",
"type": "statement"
},
{
"name": "JDBC_URL",
"type": "statement"
},
{
"name": "JDBCTOGCS_INPUT_DRIVER",
"type": "statement"
},
{
"name": "JDBCTOGCS_INPUT_LOWERBOUND",
"type": "statement"
},
{
"name": "JDBCTOGCS_INPUT_PARTITIONCOLUMN",
"type": "statement"
},
{
"name": "JDBCTOGCS_INPUT_TABLE",
"type": "statement"
},
{
"name": "JDBCTOGCS_INPUT_UPPERBOUND",
"type": "statement"
},
{
"name": "JDBCTOGCS_INPUT_URL",
"type": "statement"
},
{
"name": "JDBCTOGCS_NUMPARTITIONS",
"type": "statement"
},
{
"name": "JDBCTOGCS_OUTPUT_FORMAT",
"type": "statement"
},
{
"name": "JDBCTOGCS_OUTPUT_LOCATION",
"type": "statement"
},
{
"name": "JDBCTOGCS_OUTPUT_MODE",
"type": "statement"
},
{
"name": "JDBCTOGCS_OUTPUT_PARTITIONCOLUMN",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_DRIVER",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_LOWERBOUND",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_PARTITIONCOLUMN",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_TABLE",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_UPPERBOUND",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_URL",
"type": "statement"
},
{
"name": "JDBCTOJDBC_NUMPARTITIONS",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_BATCH_SIZE",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_CREATE_TABLE_OPTION",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_DRIVER",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_MODE",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_URL",
"type": "statement"
},
{
"name": "MONGO_BATCH_SIZE",
"type": "statement"
},
{
"name": "MONGO_COLLECTION",
"type": "statement"
},
{
"name": "MONGO_DATABASE",
"type": "statement"
},
{
"name": "MONGO_DEFAULT_BATCH_SIZE",
"type": "statement"
},
{
"name": "MONGO_GCS_INPUT_COLLECTION",
"type": "statement"
},
{
"name": "MONGO_GCS_INPUT_DATABASE",
"type": "statement"
},
{
"name": "MONGO_GCS_INPUT_URI",
"type": "statement"
},
{
"name": "MONGO_GCS_OUTPUT_FORMAT",
"type": "statement"
},
{
"name": "MONGO_GCS_OUTPUT_LOCATION",
"type": "statement"
},
{
"name": "MONGO_GCS_OUTPUT_MODE",
"type": "statement"
},
{
"name": "MONGO_INPUT_URI",
"type": "statement"
},
{
"name": "MONGO_URL",
"type": "statement"
},
{
"name": "OUTPUT_MODE_APPEND",
"type": "statement"
},
{
"name": "OUTPUT_MODE_ERRORIFEXISTS",
"type": "statement"
},
{
"name": "OUTPUT_MODE_IGNORE",
"type": "statement"
},
{
"name": "OUTPUT_MODE_OVERWRITE",
"type": "statement"
},
{
"name": "PROJECT_ID_PROP",
"type": "statement"
},
{
"name": "TABLE",
"type": "statement"
},
{
"name": "TEMP_GCS_BUCKET",
"type": "statement"
},
{
"name": "TEXT_BQ_INPUT_INFERSCHEMA",
"type": "statement"
},
{
"name": "TEXT_BQ_INPUT_LOCATION",
"type": "statement"
},
{
"name": "TEXT_BQ_LD_TEMP_BUCKET_NAME",
"type": "statement"
},
{
"name": "TEXT_BQ_OUTPUT_DATASET",
"type": "statement"
},
{
"name": "TEXT_BQ_OUTPUT_MODE",
"type": "statement"
},
{
"name": "TEXT_BQ_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "TEXT_BQ_TEMP_BUCKET",
"type": "statement"
},
{
"name": "TEXT_INPUT_COMPRESSION",
"type": "statement"
},
{
"name": "TEXT_INPUT_DELIMITER",
"type": "statement"
},
{
"name": "__doc__",
"type": "instance"
},
{
"name": "__file__",
"type": "instance"
},
{
"name": "__name__",
"type": "instance"
},
{
"name": "__package__",
"type": "instance"
}
] |
"""
* Copyright 2022 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
"""
import mock
import pyspark
from dataproc_templates.mongo.mongo_to_gcs import MongoToGCSTemplate
import dataproc_templates.util.template_constants as constants
class TestMongoToGCSTemplate:
"""
Test suite for MongoToGCSTemplate
"""
def test_parse_args(self):
"""Tests MongoToGCSTemplate.parse_args()"""
mongo_to_gcs_template = MongoToGCSTemplate()
parsed_args = mongo_to_gcs_template.parse_args(
["--mongo.gcs.input.uri=mongodb://host:port",
"--mongo.gcs.input.database=database",
"--mongo.gcs.input.collection=collection",
"--mongo.gcs.output.format=parquet",
"--mongo.gcs.output.mode=overwrite",
"--mongo.gcs.output.location=gs://test"])
assert parsed_args["mongo.gcs.input.uri"] == "mongodb://host:port"
assert parsed_args["mongo.gcs.input.database"] == "database"
assert parsed_args["mongo.gcs.input.collection"] == "collection"
assert parsed_args["mongo.gcs.output.format"] == "parquet"
assert parsed_args["mongo.gcs.output.mode"] == "overwrite"
assert parsed_args["mongo.gcs.output.location"] == "gs://test"
@mock.patch.object(pyspark.sql, 'SparkSession')
def test_run_parquet(self, mock_spark_session):
"""Tests MongoToGCSTemplate runs for parquet format output"""
mongo_to_gcs_template = MongoToGCSTemplate()
mock_parsed_args = mongo_to_gcs_template.parse_args(
["--mongo.gcs.input.uri=mongodb://host:port",
"--mongo.gcs.input.database=database",
"--mongo.gcs.input.collection=collection",
"--mongo.gcs.output.format=parquet",
"--mongo.gcs.output.mode=overwrite",
"--mongo.gcs.output.location=gs://test"])
mock_spark_session.read.format().option().option().option().load.return_value \
= mock_spark_session.dataframe.DataFrame
mongo_to_gcs_template.run(mock_spark_session, mock_parsed_args)
mock_spark_session.read \
.format.assert_called_with(constants.FORMAT_MONGO)
mock_spark_session.read \
.format() \
.option() \
.option.assert_called_with(constants.
|
[
"FORMAT_MONGO",
"MONGO_COLLECTION",
"MONGO_DATABASE",
"HEADER",
"FORMAT_AVRO",
"BQ_GCS_INPUT_TABLE",
"BQ_GCS_OUTPUT_FORMAT",
"BQ_GCS_OUTPUT_LOCATION",
"BQ_GCS_OUTPUT_MODE",
"COMPRESSION_BZIP2",
"COMPRESSION_DEFLATE",
"COMPRESSION_GZIP",
"COMPRESSION_LZ4",
"COMPRESSION_NONE",
"FORMAT_AVRO_EXTD",
"FORMAT_BIGQUERY",
"FORMAT_CSV",
"FORMAT_HBASE",
"FORMAT_JDBC",
"FORMAT_JSON",
"FORMAT_PRQT",
"FORMAT_TXT",
"GCS_BQ_INPUT_FORMAT",
"GCS_BQ_INPUT_LOCATION",
"GCS_BQ_LD_TEMP_BUCKET_NAME",
"GCS_BQ_OUTPUT_DATASET",
"GCS_BQ_OUTPUT_MODE",
"GCS_BQ_OUTPUT_TABLE",
"GCS_BQ_TEMP_BUCKET",
"GCS_BT_HBASE_CATALOG_JSON",
"GCS_BT_INPUT_FORMAT",
"GCS_BT_INPUT_LOCATION",
"GCS_JDBC_BATCH_SIZE",
"GCS_JDBC_INPUT_FORMAT",
"GCS_JDBC_INPUT_LOCATION",
"GCS_JDBC_OUTPUT_DRIVER",
"GCS_JDBC_OUTPUT_MODE",
"GCS_JDBC_OUTPUT_TABLE",
"GCS_JDBC_OUTPUT_URL",
"GCS_MONGO_BATCH_SIZE",
"GCS_MONGO_INPUT_FORMAT",
"GCS_MONGO_INPUT_LOCATION",
"GCS_MONGO_OUTPUT_COLLECTION",
"GCS_MONGO_OUTPUT_DATABASE",
"GCS_MONGO_OUTPUT_MODE",
"GCS_MONGO_OUTPUT_URI",
"HBASE_GCS_CATALOG_JSON",
"HBASE_GCS_OUTPUT_FORMAT",
"HBASE_GCS_OUTPUT_LOCATION",
"HBASE_GCS_OUTPUT_MODE",
"HIVE_BQ_INPUT_DATABASE",
"HIVE_BQ_INPUT_TABLE",
"HIVE_BQ_LD_TEMP_BUCKET_NAME",
"HIVE_BQ_OUTPUT_DATASET",
"HIVE_BQ_OUTPUT_MODE",
"HIVE_BQ_OUTPUT_TABLE",
"HIVE_GCS_INPUT_DATABASE",
"HIVE_GCS_INPUT_TABLE",
"HIVE_GCS_OUTPUT_FORMAT",
"HIVE_GCS_OUTPUT_LOCATION",
"HIVE_GCS_OUTPUT_MODE",
"INFER_SCHEMA",
"INPUT_COMPRESSION",
"INPUT_DELIMITER",
"JDBC_BATCH_SIZE",
"JDBC_CREATE_TABLE_OPTIONS",
"JDBC_DRIVER",
"JDBC_LOWERBOUND",
"JDBC_NUMPARTITIONS",
"JDBC_PARTITIONCOLUMN",
"JDBC_TABLE",
"JDBC_UPPERBOUND",
"JDBC_URL",
"JDBCTOGCS_INPUT_DRIVER",
"JDBCTOGCS_INPUT_LOWERBOUND",
"JDBCTOGCS_INPUT_PARTITIONCOLUMN",
"JDBCTOGCS_INPUT_TABLE",
"JDBCTOGCS_INPUT_UPPERBOUND",
"JDBCTOGCS_INPUT_URL",
"JDBCTOGCS_NUMPARTITIONS",
"JDBCTOGCS_OUTPUT_FORMAT",
"JDBCTOGCS_OUTPUT_LOCATION",
"JDBCTOGCS_OUTPUT_MODE",
"JDBCTOGCS_OUTPUT_PARTITIONCOLUMN",
"JDBCTOJDBC_INPUT_DRIVER",
"JDBCTOJDBC_INPUT_LOWERBOUND",
"JDBCTOJDBC_INPUT_PARTITIONCOLUMN",
"JDBCTOJDBC_INPUT_TABLE",
"JDBCTOJDBC_INPUT_UPPERBOUND",
"JDBCTOJDBC_INPUT_URL",
"JDBCTOJDBC_NUMPARTITIONS",
"JDBCTOJDBC_OUTPUT_BATCH_SIZE",
"JDBCTOJDBC_OUTPUT_CREATE_TABLE_OPTION",
"JDBCTOJDBC_OUTPUT_DRIVER",
"JDBCTOJDBC_OUTPUT_MODE",
"JDBCTOJDBC_OUTPUT_TABLE",
"JDBCTOJDBC_OUTPUT_URL",
"MONGO_BATCH_SIZE",
"MONGO_DEFAULT_BATCH_SIZE",
"MONGO_GCS_INPUT_COLLECTION",
"MONGO_GCS_INPUT_DATABASE",
"MONGO_GCS_INPUT_URI",
"MONGO_GCS_OUTPUT_FORMAT",
"MONGO_GCS_OUTPUT_LOCATION",
"MONGO_GCS_OUTPUT_MODE",
"MONGO_INPUT_URI",
"MONGO_URL",
"OUTPUT_MODE_APPEND",
"OUTPUT_MODE_ERRORIFEXISTS",
"OUTPUT_MODE_IGNORE",
"OUTPUT_MODE_OVERWRITE",
"PROJECT_ID_PROP",
"TABLE",
"TEMP_GCS_BUCKET",
"TEXT_BQ_INPUT_INFERSCHEMA",
"TEXT_BQ_INPUT_LOCATION",
"TEXT_BQ_LD_TEMP_BUCKET_NAME",
"TEXT_BQ_OUTPUT_DATASET",
"TEXT_BQ_OUTPUT_MODE",
"TEXT_BQ_OUTPUT_TABLE",
"TEXT_BQ_TEMP_BUCKET",
"TEXT_INPUT_COMPRESSION",
"TEXT_INPUT_DELIMITER"
] |
se_args()"""
mongo_to_gcs_template = MongoToGCSTemplate()
parsed_args = mongo_to_gcs_template.parse_args(
["--mongo.gcs.input.uri=mongodb://host:port",
"--mongo.gcs.input.database=database",
"--mongo.gcs.input.collection=collection",
"--mongo.gcs.output.format=parquet",
"--mongo.gcs.output.mode=overwrite",
"--mongo.gcs.output.location=gs://test"])
assert parsed_args["mongo.gcs.input.uri"] == "mongodb://host:port"
assert parsed_args["mongo.gcs.input.database"] == "database"
assert parsed_args["mongo.gcs.input.collection"] == "collection"
assert parsed_args["mongo.gcs.output.format"] == "parquet"
assert parsed_args["mongo.gcs.output.mode"] == "overwrite"
assert parsed_args["mongo.gcs.output.location"] == "gs://test"
@mock.patch.object(pyspark.sql, 'SparkSession')
def test_run_parquet(self, mock_spark_session):
"""Tests MongoToGCSTemplate runs for parquet format output"""
mongo_to_gcs_template = MongoToGCSTemplate()
mock_parsed_args = mongo_to_gcs_template.parse_args(
["--mongo.gcs.input.uri=mongodb://host:port",
"--mongo.gcs.input.database=database",
"--mongo.gcs.input.collection=collection",
"--mongo.gcs.output.format=parquet",
"--mongo.gcs.output.mode=overwrite",
"--mongo.gcs.output.location=gs://test"])
mock_spark_session.read.format().option().option().option().load.return_value \
= mock_spark_session.dataframe.DataFrame
mongo_to_gcs_template.run(mock_spark_session, mock_parsed_args)
mock_spark_session.read \
.format.assert_called_with(constants.FORMAT_MONGO)
mock_spark_session.read \
.format() \
.option() \
.option.assert_called_with(constants.
| 318
|
371
| 15
| 5,597
|
googlecloudplatform__dataproc-templates
|
8b291caed55509d56d36f03c1c25762350b6f905
|
python/test/mongo/test_mongo_to_gcs.py
|
inproject
|
parse_args
| true
|
function
| 4
| 4
| false
| false
|
[
"parse_args",
"run",
"build",
"get_logger",
"__annotations__",
"__class__",
"__delattr__",
"__dict__",
"__dir__",
"__eq__",
"__format__",
"__getattribute__",
"__hash__",
"__init__",
"__init_subclass__",
"__ne__",
"__new__",
"__reduce__",
"__reduce_ex__",
"__repr__",
"__setattr__",
"__sizeof__",
"__slots__",
"__str__",
"__subclasshook__",
"__doc__",
"__module__"
] |
[
{
"name": "build",
"type": "function"
},
{
"name": "get_logger",
"type": "function"
},
{
"name": "parse_args",
"type": "function"
},
{
"name": "run",
"type": "function"
},
{
"name": "__annotations__",
"type": "statement"
},
{
"name": "__class__",
"type": "property"
},
{
"name": "__delattr__",
"type": "function"
},
{
"name": "__dict__",
"type": "statement"
},
{
"name": "__dir__",
"type": "function"
},
{
"name": "__doc__",
"type": "statement"
},
{
"name": "__eq__",
"type": "function"
},
{
"name": "__format__",
"type": "function"
},
{
"name": "__getattribute__",
"type": "function"
},
{
"name": "__hash__",
"type": "function"
},
{
"name": "__init__",
"type": "function"
},
{
"name": "__init_subclass__",
"type": "function"
},
{
"name": "__module__",
"type": "statement"
},
{
"name": "__ne__",
"type": "function"
},
{
"name": "__new__",
"type": "function"
},
{
"name": "__reduce__",
"type": "function"
},
{
"name": "__reduce_ex__",
"type": "function"
},
{
"name": "__repr__",
"type": "function"
},
{
"name": "__setattr__",
"type": "function"
},
{
"name": "__sizeof__",
"type": "function"
},
{
"name": "__slots__",
"type": "statement"
},
{
"name": "__str__",
"type": "function"
}
] |
"""
* Copyright 2022 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
"""
import mock
import pyspark
from dataproc_templates.mongo.mongo_to_gcs import MongoToGCSTemplate
import dataproc_templates.util.template_constants as constants
class TestMongoToGCSTemplate:
"""
Test suite for MongoToGCSTemplate
"""
def test_parse_args(self):
"""Tests MongoToGCSTemplate.parse_args()"""
mongo_to_gcs_template = MongoToGCSTemplate()
parsed_args = mongo_to_gcs_template.parse_args(
["--mongo.gcs.input.uri=mongodb://host:port",
"--mongo.gcs.input.database=database",
"--mongo.gcs.input.collection=collection",
"--mongo.gcs.output.format=parquet",
"--mongo.gcs.output.mode=overwrite",
"--mongo.gcs.output.location=gs://test"])
assert parsed_args["mongo.gcs.input.uri"] == "mongodb://host:port"
assert parsed_args["mongo.gcs.input.database"] == "database"
assert parsed_args["mongo.gcs.input.collection"] == "collection"
assert parsed_args["mongo.gcs.output.format"] == "parquet"
assert parsed_args["mongo.gcs.output.mode"] == "overwrite"
assert parsed_args["mongo.gcs.output.location"] == "gs://test"
@mock.patch.object(pyspark.sql, 'SparkSession')
def test_run_parquet(self, mock_spark_session):
"""Tests MongoToGCSTemplate runs for parquet format output"""
mongo_to_gcs_template = MongoToGCSTemplate()
mock_parsed_args = mongo_to_gcs_template.parse_args(
["--mongo.gcs.input.uri=mongodb://host:port",
"--mongo.gcs.input.database=database",
"--mongo.gcs.input.collection=collection",
"--mongo.gcs.output.format=parquet",
"--mongo.gcs.output.mode=overwrite",
"--mongo.gcs.output.location=gs://test"])
mock_spark_session.read.format().option().option().option().load.return_value \
= mock_spark_session.dataframe.DataFrame
mongo_to_gcs_template.run(mock_spark_session, mock_parsed_args)
mock_spark_session.read \
.format.assert_called_with(constants.FORMAT_MONGO)
mock_spark_session.read \
.format() \
.option() \
.option.assert_called_with(constants.MONGO_DATABASE,"database")
mock_spark_session.read \
.format() \
.option() \
.option() \
.option.assert_called_with(constants.MONGO_COLLECTION,"collection")
mock_spark_session.read \
.format() \
.option() \
.option() \
.option() \
.load.assert_called_with()
mock_spark_session.dataframe.DataFrame.write \
.mode.assert_called_once_with(constants.OUTPUT_MODE_OVERWRITE)
mock_spark_session.dataframe.DataFrame.write \
.mode() \
.parquet.assert_called_once_with("gs://test")
@mock.patch.object(pyspark.sql, 'SparkSession')
def test_run_avro(self, mock_spark_session):
"""Tests MongoToGCSTemplate runs for avro format output"""
mongo_to_gcs_template = MongoToGCSTemplate()
mock_parsed_args = mongo_to_gcs_template.parse_args(
["--mongo.gcs.input.uri=mongodb://host:port",
"--mongo.gcs.input.database=database",
"--mongo.gcs.input.collection=collection",
"--mongo.gcs.output.format=avro",
"--mongo.gcs.output.mode=append",
"--mongo.gcs.output.location=gs://test"])
mock_spark_session.read.format().option().option().option().load.return_value \
= mock_spark_session.dataframe.DataFrame
mongo_to_gcs_template.run(mock_spark_session, mock_parsed_args)
mock_spark_session.read \
.format.assert_called_with(constants.FORMAT_MONGO)
mock_spark_session.read \
.format() \
.option() \
.option.assert_called_with(constants.MONGO_DATABASE,"database")
mock_spark_session.read \
.format() \
.option() \
.option() \
.option.assert_called_with(constants.MONGO_COLLECTION,"collection")
mock_spark_session.read \
.format() \
.option() \
.option() \
.option() \
.load.assert_called_with()
mock_spark_session.dataframe.DataFrame.write \
.mode.assert_called_once_with(constants.OUTPUT_MODE_APPEND)
mock_spark_session.dataframe.DataFrame.write \
.mode() \
.format.assert_called_once_with(constants.FORMAT_AVRO)
mock_spark_session.dataframe.DataFrame.write \
.mode() \
.format() \
.save.assert_called_once_with("gs://test")
@mock.patch.object(pyspark.sql, 'SparkSession')
def test_run_csv(self, mock_spark_session):
"""Tests MongoToGCSTemplate runs for csv format output"""
mongo_to_gcs_template = MongoToGCSTemplate()
mock_parsed_args = mongo_to_gcs_template.
|
[
"parse_args",
"run",
"build",
"get_logger"
] |
= MongoToGCSTemplate()
mock_parsed_args = mongo_to_gcs_template.parse_args(
["--mongo.gcs.input.uri=mongodb://host:port",
"--mongo.gcs.input.database=database",
"--mongo.gcs.input.collection=collection",
"--mongo.gcs.output.format=avro",
"--mongo.gcs.output.mode=append",
"--mongo.gcs.output.location=gs://test"])
mock_spark_session.read.format().option().option().option().load.return_value \
= mock_spark_session.dataframe.DataFrame
mongo_to_gcs_template.run(mock_spark_session, mock_parsed_args)
mock_spark_session.read \
.format.assert_called_with(constants.FORMAT_MONGO)
mock_spark_session.read \
.format() \
.option() \
.option.assert_called_with(constants.MONGO_DATABASE,"database")
mock_spark_session.read \
.format() \
.option() \
.option() \
.option.assert_called_with(constants.MONGO_COLLECTION,"collection")
mock_spark_session.read \
.format() \
.option() \
.option() \
.option() \
.load.assert_called_with()
mock_spark_session.dataframe.DataFrame.write \
.mode.assert_called_once_with(constants.OUTPUT_MODE_APPEND)
mock_spark_session.dataframe.DataFrame.write \
.mode() \
.format.assert_called_once_with(constants.FORMAT_AVRO)
mock_spark_session.dataframe.DataFrame.write \
.mode() \
.format() \
.save.assert_called_once_with("gs://test")
@mock.patch.object(pyspark.sql, 'SparkSession')
def test_run_csv(self, mock_spark_session):
"""Tests MongoToGCSTemplate runs for csv format output"""
mongo_to_gcs_template = MongoToGCSTemplate()
mock_parsed_args = mongo_to_gcs_template.
| 328
|
384
| 16
| 990
|
googlecloudplatform__dataproc-templates
|
49e82f63f0e49578ce6451902da57a095bc02b5d
|
python/test/gcs/test_gcs_to_gcs.py
|
inproject
|
parse_args
| true
|
function
| 4
| 4
| false
| true
|
[
"parse_args",
"run",
"build",
"get_logger",
"__annotations__",
"__class__",
"__delattr__",
"__dict__",
"__dir__",
"__eq__",
"__format__",
"__getattribute__",
"__hash__",
"__init__",
"__init_subclass__",
"__ne__",
"__new__",
"__reduce__",
"__reduce_ex__",
"__repr__",
"__setattr__",
"__sizeof__",
"__slots__",
"__str__",
"__subclasshook__",
"__doc__",
"__module__"
] |
[
{
"name": "build",
"type": "function"
},
{
"name": "get_logger",
"type": "function"
},
{
"name": "parse_args",
"type": "function"
},
{
"name": "run",
"type": "function"
},
{
"name": "__annotations__",
"type": "statement"
},
{
"name": "__class__",
"type": "property"
},
{
"name": "__delattr__",
"type": "function"
},
{
"name": "__dict__",
"type": "statement"
},
{
"name": "__dir__",
"type": "function"
},
{
"name": "__doc__",
"type": "statement"
},
{
"name": "__eq__",
"type": "function"
},
{
"name": "__format__",
"type": "function"
},
{
"name": "__getattribute__",
"type": "function"
},
{
"name": "__hash__",
"type": "function"
},
{
"name": "__init__",
"type": "function"
},
{
"name": "__init_subclass__",
"type": "function"
},
{
"name": "__module__",
"type": "statement"
},
{
"name": "__ne__",
"type": "function"
},
{
"name": "__new__",
"type": "function"
},
{
"name": "__reduce__",
"type": "function"
},
{
"name": "__reduce_ex__",
"type": "function"
},
{
"name": "__repr__",
"type": "function"
},
{
"name": "__setattr__",
"type": "function"
},
{
"name": "__sizeof__",
"type": "function"
},
{
"name": "__slots__",
"type": "statement"
},
{
"name": "__str__",
"type": "function"
}
] |
"""
* Copyright 2022 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
"""
import mock
import pyspark
from google.cloud import storage
from dataproc_templates.gcs.gcs_to_gcs import GCSToGCSTemplate
import dataproc_templates.util.template_constants as constants
class TestGCSToGCSTemplate:
"""
Test suite for GCSToBigQueryTemplate
"""
def test_parse_args(self):
gcs_to_gcs_template = GCSToGCSTemplate()
parsed_args = gcs_to_gcs_template.
|
[
"parse_args",
"run",
"build",
"get_logger"
] |
"""
* Copyright 2022 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
"""
import mock
import pyspark
from google.cloud import storage
from dataproc_templates.gcs.gcs_to_gcs import GCSToGCSTemplate
import dataproc_templates.util.template_constants as constants
class TestGCSToGCSTemplate:
"""
Test suite for GCSToBigQueryTemplate
"""
def test_parse_args(self):
gcs_to_gcs_template = GCSToGCSTemplate()
parsed_args = gcs_to_gcs_template.
| 341
|
388
| 16
| 3,797
|
googlecloudplatform__dataproc-templates
|
49e82f63f0e49578ce6451902da57a095bc02b5d
|
python/test/gcs/test_gcs_to_gcs.py
|
inproject
|
parse_args
| true
|
function
| 4
| 4
| false
| false
|
[
"parse_args",
"run",
"build",
"get_logger",
"__annotations__",
"__class__",
"__delattr__",
"__dict__",
"__dir__",
"__eq__",
"__format__",
"__getattribute__",
"__hash__",
"__init__",
"__init_subclass__",
"__ne__",
"__new__",
"__reduce__",
"__reduce_ex__",
"__repr__",
"__setattr__",
"__sizeof__",
"__slots__",
"__str__",
"__subclasshook__",
"__doc__",
"__module__"
] |
[
{
"name": "build",
"type": "function"
},
{
"name": "get_logger",
"type": "function"
},
{
"name": "parse_args",
"type": "function"
},
{
"name": "run",
"type": "function"
},
{
"name": "__annotations__",
"type": "statement"
},
{
"name": "__class__",
"type": "property"
},
{
"name": "__delattr__",
"type": "function"
},
{
"name": "__dict__",
"type": "statement"
},
{
"name": "__dir__",
"type": "function"
},
{
"name": "__doc__",
"type": "statement"
},
{
"name": "__eq__",
"type": "function"
},
{
"name": "__format__",
"type": "function"
},
{
"name": "__getattribute__",
"type": "function"
},
{
"name": "__hash__",
"type": "function"
},
{
"name": "__init__",
"type": "function"
},
{
"name": "__init_subclass__",
"type": "function"
},
{
"name": "__module__",
"type": "statement"
},
{
"name": "__ne__",
"type": "function"
},
{
"name": "__new__",
"type": "function"
},
{
"name": "__reduce__",
"type": "function"
},
{
"name": "__reduce_ex__",
"type": "function"
},
{
"name": "__repr__",
"type": "function"
},
{
"name": "__setattr__",
"type": "function"
},
{
"name": "__sizeof__",
"type": "function"
},
{
"name": "__slots__",
"type": "statement"
},
{
"name": "__str__",
"type": "function"
}
] |
"""
* Copyright 2022 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
"""
import mock
import pyspark
from google.cloud import storage
from dataproc_templates.gcs.gcs_to_gcs import GCSToGCSTemplate
import dataproc_templates.util.template_constants as constants
class TestGCSToGCSTemplate:
"""
Test suite for GCSToBigQueryTemplate
"""
def test_parse_args(self):
gcs_to_gcs_template = GCSToGCSTemplate()
parsed_args = gcs_to_gcs_template.parse_args(
["--gcs.to.gcs.input.location=gs://input",
"--gcs.to.gcs.input.format=csv",
"--gcs.to.gcs.temp.view.name=temp",
"--gcs.to.gcs.sql.query=select * from temp",
"--gcs.to.gcs.output.format=csv",
"--gcs.to.gcs.output.mode=overwrite",
"--gcs.to.gcs.output.partition.column=column",
"--gcs.to.gcs.output.location=gs://output"])
assert parsed_args["gcs.to.gcs.input.location"] == "gs://input"
assert parsed_args["gcs.to.gcs.input.format"] == "csv"
assert parsed_args["gcs.to.gcs.temp.view.name"] == "temp"
assert parsed_args["gcs.to.gcs.sql.query"] == "select * from temp"
assert parsed_args["gcs.to.gcs.output.format"] == "csv"
assert parsed_args["gcs.to.gcs.output.mode"] == "overwrite"
assert parsed_args["gcs.to.gcs.output.partition.column"] == "column"
assert parsed_args["gcs.to.gcs.output.location"] == "gs://output"
@mock.patch.object(pyspark.sql, 'SparkSession')
def test_run_parquet(self, mock_spark_session):
"""Tests GCSToBigqueryTemplate runs with parquet format"""
gcs_to_gcs_template = GCSToGCSTemplate()
mock_parsed_args = gcs_to_gcs_template.parse_args(
["--gcs.to.gcs.input.location=gs://input",
"--gcs.to.gcs.input.format=parquet",
"--gcs.to.gcs.temp.view.name=temp",
"--gcs.to.gcs.sql.query=select * from temp",
"--gcs.to.gcs.output.format=parquet",
"--gcs.to.gcs.output.mode=overwrite",
"--gcs.to.gcs.output.partition.column=column",
"--gcs.to.gcs.output.location=gs://output"])
mock_spark_session.read.parquet.return_value = mock_spark_session.dataframe.DataFrame
gcs_to_gcs_template.run(mock_spark_session, mock_parsed_args)
mock_spark_session.read.parquet.assert_called_once_with("gs://input")
mock_spark_session.read.parquet().createOrReplaceTempView.assert_called_once_with("temp")
mock_spark_session.sql.assert_called_once_with("select * from temp")
mock_spark_session.sql().write \
.mode.assert_called_once_with(constants.OUTPUT_MODE_OVERWRITE)
mock_spark_session.sql().write \
.mode() \
.partitionBy.assert_called_once_with("column")
mock_spark_session.sql().write \
.mode() \
.partitionBy() \
.parquet.assert_called_once_with("gs://output")
@mock.patch.object(pyspark.sql, 'SparkSession')
def test_run_csv(self, mock_spark_session):
"""Tests GCSToBigqueryTemplate runs with parquet format"""
gcs_to_gcs_template = GCSToGCSTemplate()
mock_parsed_args = gcs_to_gcs_template.
|
[
"parse_args",
"run",
"build",
"get_logger"
] |
put.partition.column"] == "column"
assert parsed_args["gcs.to.gcs.output.location"] == "gs://output"
@mock.patch.object(pyspark.sql, 'SparkSession')
def test_run_parquet(self, mock_spark_session):
"""Tests GCSToBigqueryTemplate runs with parquet format"""
gcs_to_gcs_template = GCSToGCSTemplate()
mock_parsed_args = gcs_to_gcs_template.parse_args(
["--gcs.to.gcs.input.location=gs://input",
"--gcs.to.gcs.input.format=parquet",
"--gcs.to.gcs.temp.view.name=temp",
"--gcs.to.gcs.sql.query=select * from temp",
"--gcs.to.gcs.output.format=parquet",
"--gcs.to.gcs.output.mode=overwrite",
"--gcs.to.gcs.output.partition.column=column",
"--gcs.to.gcs.output.location=gs://output"])
mock_spark_session.read.parquet.return_value = mock_spark_session.dataframe.DataFrame
gcs_to_gcs_template.run(mock_spark_session, mock_parsed_args)
mock_spark_session.read.parquet.assert_called_once_with("gs://input")
mock_spark_session.read.parquet().createOrReplaceTempView.assert_called_once_with("temp")
mock_spark_session.sql.assert_called_once_with("select * from temp")
mock_spark_session.sql().write \
.mode.assert_called_once_with(constants.OUTPUT_MODE_OVERWRITE)
mock_spark_session.sql().write \
.mode() \
.partitionBy.assert_called_once_with("column")
mock_spark_session.sql().write \
.mode() \
.partitionBy() \
.parquet.assert_called_once_with("gs://output")
@mock.patch.object(pyspark.sql, 'SparkSession')
def test_run_csv(self, mock_spark_session):
"""Tests GCSToBigqueryTemplate runs with parquet format"""
gcs_to_gcs_template = GCSToGCSTemplate()
mock_parsed_args = gcs_to_gcs_template.
| 345
|
396
| 16
| 6,585
|
googlecloudplatform__dataproc-templates
|
49e82f63f0e49578ce6451902da57a095bc02b5d
|
python/test/gcs/test_gcs_to_gcs.py
|
common
|
run
| true
|
function
| 4
| 4
| false
| false
|
[
"run",
"parse_args",
"build",
"get_logger",
"__annotations__",
"__class__",
"__delattr__",
"__dict__",
"__dir__",
"__eq__",
"__format__",
"__getattribute__",
"__hash__",
"__init__",
"__init_subclass__",
"__ne__",
"__new__",
"__reduce__",
"__reduce_ex__",
"__repr__",
"__setattr__",
"__sizeof__",
"__slots__",
"__str__",
"__subclasshook__",
"__doc__",
"__module__"
] |
[
{
"name": "build",
"type": "function"
},
{
"name": "get_logger",
"type": "function"
},
{
"name": "parse_args",
"type": "function"
},
{
"name": "run",
"type": "function"
},
{
"name": "__annotations__",
"type": "statement"
},
{
"name": "__class__",
"type": "property"
},
{
"name": "__delattr__",
"type": "function"
},
{
"name": "__dict__",
"type": "statement"
},
{
"name": "__dir__",
"type": "function"
},
{
"name": "__doc__",
"type": "statement"
},
{
"name": "__eq__",
"type": "function"
},
{
"name": "__format__",
"type": "function"
},
{
"name": "__getattribute__",
"type": "function"
},
{
"name": "__hash__",
"type": "function"
},
{
"name": "__init__",
"type": "function"
},
{
"name": "__init_subclass__",
"type": "function"
},
{
"name": "__module__",
"type": "statement"
},
{
"name": "__ne__",
"type": "function"
},
{
"name": "__new__",
"type": "function"
},
{
"name": "__reduce__",
"type": "function"
},
{
"name": "__reduce_ex__",
"type": "function"
},
{
"name": "__repr__",
"type": "function"
},
{
"name": "__setattr__",
"type": "function"
},
{
"name": "__sizeof__",
"type": "function"
},
{
"name": "__slots__",
"type": "statement"
},
{
"name": "__str__",
"type": "function"
}
] |
"""
* Copyright 2022 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
"""
import mock
import pyspark
from google.cloud import storage
from dataproc_templates.gcs.gcs_to_gcs import GCSToGCSTemplate
import dataproc_templates.util.template_constants as constants
class TestGCSToGCSTemplate:
"""
Test suite for GCSToBigQueryTemplate
"""
def test_parse_args(self):
gcs_to_gcs_template = GCSToGCSTemplate()
parsed_args = gcs_to_gcs_template.parse_args(
["--gcs.to.gcs.input.location=gs://input",
"--gcs.to.gcs.input.format=csv",
"--gcs.to.gcs.temp.view.name=temp",
"--gcs.to.gcs.sql.query=select * from temp",
"--gcs.to.gcs.output.format=csv",
"--gcs.to.gcs.output.mode=overwrite",
"--gcs.to.gcs.output.partition.column=column",
"--gcs.to.gcs.output.location=gs://output"])
assert parsed_args["gcs.to.gcs.input.location"] == "gs://input"
assert parsed_args["gcs.to.gcs.input.format"] == "csv"
assert parsed_args["gcs.to.gcs.temp.view.name"] == "temp"
assert parsed_args["gcs.to.gcs.sql.query"] == "select * from temp"
assert parsed_args["gcs.to.gcs.output.format"] == "csv"
assert parsed_args["gcs.to.gcs.output.mode"] == "overwrite"
assert parsed_args["gcs.to.gcs.output.partition.column"] == "column"
assert parsed_args["gcs.to.gcs.output.location"] == "gs://output"
@mock.patch.object(pyspark.sql, 'SparkSession')
def test_run_parquet(self, mock_spark_session):
"""Tests GCSToBigqueryTemplate runs with parquet format"""
gcs_to_gcs_template = GCSToGCSTemplate()
mock_parsed_args = gcs_to_gcs_template.parse_args(
["--gcs.to.gcs.input.location=gs://input",
"--gcs.to.gcs.input.format=parquet",
"--gcs.to.gcs.temp.view.name=temp",
"--gcs.to.gcs.sql.query=select * from temp",
"--gcs.to.gcs.output.format=parquet",
"--gcs.to.gcs.output.mode=overwrite",
"--gcs.to.gcs.output.partition.column=column",
"--gcs.to.gcs.output.location=gs://output"])
mock_spark_session.read.parquet.return_value = mock_spark_session.dataframe.DataFrame
gcs_to_gcs_template.run(mock_spark_session, mock_parsed_args)
mock_spark_session.read.parquet.assert_called_once_with("gs://input")
mock_spark_session.read.parquet().createOrReplaceTempView.assert_called_once_with("temp")
mock_spark_session.sql.assert_called_once_with("select * from temp")
mock_spark_session.sql().write \
.mode.assert_called_once_with(constants.OUTPUT_MODE_OVERWRITE)
mock_spark_session.sql().write \
.mode() \
.partitionBy.assert_called_once_with("column")
mock_spark_session.sql().write \
.mode() \
.partitionBy() \
.parquet.assert_called_once_with("gs://output")
@mock.patch.object(pyspark.sql, 'SparkSession')
def test_run_csv(self, mock_spark_session):
"""Tests GCSToBigqueryTemplate runs with parquet format"""
gcs_to_gcs_template = GCSToGCSTemplate()
mock_parsed_args = gcs_to_gcs_template.parse_args(
["--gcs.to.gcs.input.location=gs://input",
"--gcs.to.gcs.input.format=csv",
"--gcs.to.gcs.temp.view.name=temp",
"--gcs.to.gcs.sql.query=select * from temp",
"--gcs.to.gcs.output.format=csv",
"--gcs.to.gcs.output.mode=overwrite",
"--gcs.to.gcs.output.partition.column=column",
"--gcs.to.gcs.output.location=gs://output"])
mock_spark_session.read.csv.return_value = mock_spark_session.dataframe.DataFrame
gcs_to_gcs_template.run(mock_spark_session, mock_parsed_args)
mock_spark_session.read.format.assert_called_once_with(constants.FORMAT_CSV)
mock_spark_session.read.format() \
.option.assert_called_once_with(constants.HEADER, True)
mock_spark_session.read.format() \
.option() \
.option.assert_called_once_with(constants.INFER_SCHEMA, True)
mock_spark_session.read.format() \
.option() \
.option() \
.load.assert_called_once_with("gs://input")
mock_spark_session.read.format() \
.option() \
.option() \
.load() \
.createOrReplaceTempView.assert_called_once_with("temp")
mock_spark_session.sql.assert_called_once_with("select * from temp")
mock_spark_session.sql().write \
.mode.assert_called_once_with(constants.OUTPUT_MODE_OVERWRITE)
mock_spark_session.sql().write \
.mode() \
.partitionBy.assert_called_once_with("column")
mock_spark_session.sql().write \
.mode() \
.partitionBy() \
.option.assert_called_once_with(constants.HEADER, True)
mock_spark_session.sql().write \
.mode() \
.partitionBy() \
.option() \
.csv.assert_called_once_with("gs://output")
@mock.patch.object(pyspark.sql, 'SparkSession')
def test_run_avro(self, mock_spark_session):
"""Tests GCSToBigqueryTemplate runs with parquet format"""
gcs_to_gcs_template = GCSToGCSTemplate()
mock_parsed_args = gcs_to_gcs_template.parse_args(
["--gcs.to.gcs.input.location=gs://input",
"--gcs.to.gcs.input.format=avro",
"--gcs.to.gcs.temp.view.name=temp",
"--gcs.to.gcs.sql.query=select * from temp",
"--gcs.to.gcs.output.format=avro",
"--gcs.to.gcs.output.mode=overwrite",
"--gcs.to.gcs.output.partition.column=column",
"--gcs.to.gcs.output.location=gs://output"])
mock_spark_session.read.csv.return_value = mock_spark_session.dataframe.DataFrame
gcs_to_gcs_template.
|
[
"run",
"parse_args",
"build",
"get_logger"
] |
.option.assert_called_once_with(constants.INFER_SCHEMA, True)
mock_spark_session.read.format() \
.option() \
.option() \
.load.assert_called_once_with("gs://input")
mock_spark_session.read.format() \
.option() \
.option() \
.load() \
.createOrReplaceTempView.assert_called_once_with("temp")
mock_spark_session.sql.assert_called_once_with("select * from temp")
mock_spark_session.sql().write \
.mode.assert_called_once_with(constants.OUTPUT_MODE_OVERWRITE)
mock_spark_session.sql().write \
.mode() \
.partitionBy.assert_called_once_with("column")
mock_spark_session.sql().write \
.mode() \
.partitionBy() \
.option.assert_called_once_with(constants.HEADER, True)
mock_spark_session.sql().write \
.mode() \
.partitionBy() \
.option() \
.csv.assert_called_once_with("gs://output")
@mock.patch.object(pyspark.sql, 'SparkSession')
def test_run_avro(self, mock_spark_session):
"""Tests GCSToBigqueryTemplate runs with parquet format"""
gcs_to_gcs_template = GCSToGCSTemplate()
mock_parsed_args = gcs_to_gcs_template.parse_args(
["--gcs.to.gcs.input.location=gs://input",
"--gcs.to.gcs.input.format=avro",
"--gcs.to.gcs.temp.view.name=temp",
"--gcs.to.gcs.sql.query=select * from temp",
"--gcs.to.gcs.output.format=avro",
"--gcs.to.gcs.output.mode=overwrite",
"--gcs.to.gcs.output.partition.column=column",
"--gcs.to.gcs.output.location=gs://output"])
mock_spark_session.read.csv.return_value = mock_spark_session.dataframe.DataFrame
gcs_to_gcs_template.
| 353
|
415
| 17
| 1,692
|
single-cell-data__tiledb-soma
|
0a9b47f0d37d4e309d85897423bcea947086b39a
|
apis/python/src/tiledbsc/assay_matrix.py
|
infile
|
create_empty_array
| true
|
function
| 17
| 17
| false
| true
|
[
"attr_name",
"col_dim_name",
"row_dim_name",
"ingest_data_rows_chunked",
"ingest_data_whole",
"__init__",
"create_empty_array",
"from_matrix",
"ingest_data",
"to_csr_matrix",
"ctx",
"exists",
"indent",
"name",
"object_type",
"soma_options",
"uri",
"verbose",
"__annotations__",
"__class__",
"__delattr__",
"__dict__",
"__dir__",
"__eq__",
"__format__",
"__getattribute__",
"__hash__",
"__init_subclass__",
"__ne__",
"__new__",
"__reduce__",
"__reduce_ex__",
"__repr__",
"__setattr__",
"__sizeof__",
"__str__",
"__subclasshook__",
"__doc__",
"__module__"
] |
[
{
"name": "attr_name",
"type": "statement"
},
{
"name": "col_dim_name",
"type": "statement"
},
{
"name": "create_empty_array",
"type": "function"
},
{
"name": "ctx",
"type": "statement"
},
{
"name": "exists",
"type": "function"
},
{
"name": "from_matrix",
"type": "function"
},
{
"name": "indent",
"type": "statement"
},
{
"name": "ingest_data",
"type": "function"
},
{
"name": "ingest_data_rows_chunked",
"type": "function"
},
{
"name": "ingest_data_whole",
"type": "function"
},
{
"name": "name",
"type": "statement"
},
{
"name": "object_type",
"type": "function"
},
{
"name": "row_dim_name",
"type": "statement"
},
{
"name": "soma_options",
"type": "statement"
},
{
"name": "to_csr_matrix",
"type": "function"
},
{
"name": "uri",
"type": "statement"
},
{
"name": "verbose",
"type": "statement"
},
{
"name": "__annotations__",
"type": "statement"
},
{
"name": "__class__",
"type": "property"
},
{
"name": "__delattr__",
"type": "function"
},
{
"name": "__dict__",
"type": "statement"
},
{
"name": "__dir__",
"type": "function"
},
{
"name": "__doc__",
"type": "statement"
},
{
"name": "__eq__",
"type": "function"
},
{
"name": "__format__",
"type": "function"
},
{
"name": "__getattribute__",
"type": "function"
},
{
"name": "__hash__",
"type": "function"
},
{
"name": "__init__",
"type": "function"
},
{
"name": "__init_subclass__",
"type": "function"
},
{
"name": "__module__",
"type": "statement"
},
{
"name": "__ne__",
"type": "function"
},
{
"name": "__new__",
"type": "function"
},
{
"name": "__reduce__",
"type": "function"
},
{
"name": "__reduce_ex__",
"type": "function"
},
{
"name": "__repr__",
"type": "function"
},
{
"name": "__setattr__",
"type": "function"
},
{
"name": "__sizeof__",
"type": "function"
},
{
"name": "__slots__",
"type": "statement"
},
{
"name": "__str__",
"type": "function"
}
] |
import tiledb
from .soma_options import SOMAOptions
from .tiledb_array import TileDBArray
from .tiledb_group import TileDBGroup
from .tiledb_object import TileDBObject
import tiledbsc.util as util
import scipy
import numpy as np
from typing import Optional
class AssayMatrix(TileDBArray):
"""
Wraps a TileDB sparse array with two string dimensions.
Used for X, obsp members, and varp members.
"""
row_dim_name: str # obs_id for X, obs_id_i for obsp; var_id_i for varp
col_dim_name: str # var_id for X, obs_id_j for obsp; var_id_j for varp
attr_name: str
# ----------------------------------------------------------------
def __init__(
self,
uri: str,
name: str,
row_dim_name: str,
col_dim_name: str,
parent: Optional[TileDBGroup] = None,
):
"""
See the TileDBObject constructor.
"""
super().__init__(uri=uri, name=name, parent=parent)
self.row_dim_name = row_dim_name
self.col_dim_name = col_dim_name
self.attr_name = 'value'
# ----------------------------------------------------------------
def from_matrix(self, matrix, row_names, col_names) -> None:
"""
Imports a matrix -- nominally scipy.sparse.csr_matrix or numpy.ndarray -- into a TileDB
array which is used for X, obsp members, and varp members.
"""
if self.verbose:
s = util.get_start_stamp()
print(f"{self.indent}START WRITING {self.uri}")
if self.exists():
if self.verbose:
print(f"{self.indent}Re-using existing array {self.uri}")
else:
self.
|
[
"attr_name",
"col_dim_name",
"row_dim_name",
"ingest_data_rows_chunked",
"ingest_data_whole",
"create_empty_array",
"from_matrix",
"ingest_data",
"to_csr_matrix",
"ctx",
"exists",
"indent",
"name",
"object_type",
"soma_options",
"uri",
"verbose"
] |
import tiledb
from .soma_options import SOMAOptions
from .tiledb_array import TileDBArray
from .tiledb_group import TileDBGroup
from .tiledb_object import TileDBObject
import tiledbsc.util as util
import scipy
import numpy as np
from typing import Optional
class AssayMatrix(TileDBArray):
"""
Wraps a TileDB sparse array with two string dimensions.
Used for X, obsp members, and varp members.
"""
row_dim_name: str # obs_id for X, obs_id_i for obsp; var_id_i for varp
col_dim_name: str # var_id for X, obs_id_j for obsp; var_id_j for varp
attr_name: str
# ----------------------------------------------------------------
def __init__(
self,
uri: str,
name: str,
row_dim_name: str,
col_dim_name: str,
parent: Optional[TileDBGroup] = None,
):
"""
See the TileDBObject constructor.
"""
super().__init__(uri=uri, name=name, parent=parent)
self.row_dim_name = row_dim_name
self.col_dim_name = col_dim_name
self.attr_name = 'value'
# ----------------------------------------------------------------
def from_matrix(self, matrix, row_names, col_names) -> None:
"""
Imports a matrix -- nominally scipy.sparse.csr_matrix or numpy.ndarray -- into a TileDB
array which is used for X, obsp members, and varp members.
"""
if self.verbose:
s = util.get_start_stamp()
print(f"{self.indent}START WRITING {self.uri}")
if self.exists():
if self.verbose:
print(f"{self.indent}Re-using existing array {self.uri}")
else:
self.
| 369
|
416
| 17
| 1,752
|
single-cell-data__tiledb-soma
|
0a9b47f0d37d4e309d85897423bcea947086b39a
|
apis/python/src/tiledbsc/assay_matrix.py
|
infile
|
ingest_data
| true
|
function
| 17
| 17
| false
| true
|
[
"row_dim_name",
"attr_name",
"col_dim_name",
"uri",
"indent",
"__init__",
"create_empty_array",
"from_matrix",
"ingest_data",
"ingest_data_rows_chunked",
"ingest_data_whole",
"to_csr_matrix",
"ctx",
"exists",
"name",
"object_type",
"soma_options",
"verbose",
"__annotations__",
"__class__",
"__delattr__",
"__dict__",
"__dir__",
"__eq__",
"__format__",
"__getattribute__",
"__hash__",
"__init_subclass__",
"__ne__",
"__new__",
"__reduce__",
"__reduce_ex__",
"__repr__",
"__setattr__",
"__sizeof__",
"__str__",
"__subclasshook__",
"__doc__",
"__module__"
] |
[
{
"name": "attr_name",
"type": "statement"
},
{
"name": "col_dim_name",
"type": "statement"
},
{
"name": "create_empty_array",
"type": "function"
},
{
"name": "ctx",
"type": "statement"
},
{
"name": "exists",
"type": "function"
},
{
"name": "from_matrix",
"type": "function"
},
{
"name": "indent",
"type": "statement"
},
{
"name": "ingest_data",
"type": "function"
},
{
"name": "ingest_data_rows_chunked",
"type": "function"
},
{
"name": "ingest_data_whole",
"type": "function"
},
{
"name": "name",
"type": "statement"
},
{
"name": "object_type",
"type": "function"
},
{
"name": "row_dim_name",
"type": "statement"
},
{
"name": "soma_options",
"type": "statement"
},
{
"name": "to_csr_matrix",
"type": "function"
},
{
"name": "uri",
"type": "statement"
},
{
"name": "verbose",
"type": "statement"
},
{
"name": "__annotations__",
"type": "statement"
},
{
"name": "__class__",
"type": "property"
},
{
"name": "__delattr__",
"type": "function"
},
{
"name": "__dict__",
"type": "statement"
},
{
"name": "__dir__",
"type": "function"
},
{
"name": "__doc__",
"type": "statement"
},
{
"name": "__eq__",
"type": "function"
},
{
"name": "__format__",
"type": "function"
},
{
"name": "__getattribute__",
"type": "function"
},
{
"name": "__hash__",
"type": "function"
},
{
"name": "__init__",
"type": "function"
},
{
"name": "__init_subclass__",
"type": "function"
},
{
"name": "__module__",
"type": "statement"
},
{
"name": "__ne__",
"type": "function"
},
{
"name": "__new__",
"type": "function"
},
{
"name": "__reduce__",
"type": "function"
},
{
"name": "__reduce_ex__",
"type": "function"
},
{
"name": "__repr__",
"type": "function"
},
{
"name": "__setattr__",
"type": "function"
},
{
"name": "__sizeof__",
"type": "function"
},
{
"name": "__slots__",
"type": "statement"
},
{
"name": "__str__",
"type": "function"
}
] |
import tiledb
from .soma_options import SOMAOptions
from .tiledb_array import TileDBArray
from .tiledb_group import TileDBGroup
from .tiledb_object import TileDBObject
import tiledbsc.util as util
import scipy
import numpy as np
from typing import Optional
class AssayMatrix(TileDBArray):
"""
Wraps a TileDB sparse array with two string dimensions.
Used for X, obsp members, and varp members.
"""
row_dim_name: str # obs_id for X, obs_id_i for obsp; var_id_i for varp
col_dim_name: str # var_id for X, obs_id_j for obsp; var_id_j for varp
attr_name: str
# ----------------------------------------------------------------
def __init__(
self,
uri: str,
name: str,
row_dim_name: str,
col_dim_name: str,
parent: Optional[TileDBGroup] = None,
):
"""
See the TileDBObject constructor.
"""
super().__init__(uri=uri, name=name, parent=parent)
self.row_dim_name = row_dim_name
self.col_dim_name = col_dim_name
self.attr_name = 'value'
# ----------------------------------------------------------------
def from_matrix(self, matrix, row_names, col_names) -> None:
"""
Imports a matrix -- nominally scipy.sparse.csr_matrix or numpy.ndarray -- into a TileDB
array which is used for X, obsp members, and varp members.
"""
if self.verbose:
s = util.get_start_stamp()
print(f"{self.indent}START WRITING {self.uri}")
if self.exists():
if self.verbose:
print(f"{self.indent}Re-using existing array {self.uri}")
else:
self.create_empty_array(matrix_dtype=matrix.dtype)
self.
|
[
"row_dim_name",
"attr_name",
"col_dim_name",
"uri",
"indent",
"create_empty_array",
"from_matrix",
"ingest_data",
"ingest_data_rows_chunked",
"ingest_data_whole",
"to_csr_matrix",
"ctx",
"exists",
"name",
"object_type",
"soma_options",
"verbose"
] |
import tiledb
from .soma_options import SOMAOptions
from .tiledb_array import TileDBArray
from .tiledb_group import TileDBGroup
from .tiledb_object import TileDBObject
import tiledbsc.util as util
import scipy
import numpy as np
from typing import Optional
class AssayMatrix(TileDBArray):
"""
Wraps a TileDB sparse array with two string dimensions.
Used for X, obsp members, and varp members.
"""
row_dim_name: str # obs_id for X, obs_id_i for obsp; var_id_i for varp
col_dim_name: str # var_id for X, obs_id_j for obsp; var_id_j for varp
attr_name: str
# ----------------------------------------------------------------
def __init__(
self,
uri: str,
name: str,
row_dim_name: str,
col_dim_name: str,
parent: Optional[TileDBGroup] = None,
):
"""
See the TileDBObject constructor.
"""
super().__init__(uri=uri, name=name, parent=parent)
self.row_dim_name = row_dim_name
self.col_dim_name = col_dim_name
self.attr_name = 'value'
# ----------------------------------------------------------------
def from_matrix(self, matrix, row_names, col_names) -> None:
"""
Imports a matrix -- nominally scipy.sparse.csr_matrix or numpy.ndarray -- into a TileDB
array which is used for X, obsp members, and varp members.
"""
if self.verbose:
s = util.get_start_stamp()
print(f"{self.indent}START WRITING {self.uri}")
if self.exists():
if self.verbose:
print(f"{self.indent}Re-using existing array {self.uri}")
else:
self.create_empty_array(matrix_dtype=matrix.dtype)
self.
| 370
|
424
| 17
| 2,387
|
single-cell-data__tiledb-soma
|
0a9b47f0d37d4e309d85897423bcea947086b39a
|
apis/python/src/tiledbsc/assay_matrix.py
|
random
|
col_dim_name
| true
|
statement
| 17
| 17
| false
| true
|
[
"attr_name",
"row_dim_name",
"col_dim_name",
"name",
"ctx",
"__init__",
"create_empty_array",
"from_matrix",
"ingest_data",
"ingest_data_rows_chunked",
"ingest_data_whole",
"to_csr_matrix",
"exists",
"indent",
"object_type",
"soma_options",
"uri",
"verbose",
"__annotations__",
"__class__",
"__delattr__",
"__dict__",
"__dir__",
"__eq__",
"__format__",
"__getattribute__",
"__hash__",
"__init_subclass__",
"__ne__",
"__new__",
"__reduce__",
"__reduce_ex__",
"__repr__",
"__setattr__",
"__sizeof__",
"__str__",
"__subclasshook__",
"__doc__",
"__module__"
] |
[
{
"name": "attr_name",
"type": "statement"
},
{
"name": "col_dim_name",
"type": "statement"
},
{
"name": "create_empty_array",
"type": "function"
},
{
"name": "ctx",
"type": "statement"
},
{
"name": "exists",
"type": "function"
},
{
"name": "from_matrix",
"type": "function"
},
{
"name": "indent",
"type": "statement"
},
{
"name": "ingest_data",
"type": "function"
},
{
"name": "ingest_data_rows_chunked",
"type": "function"
},
{
"name": "ingest_data_whole",
"type": "function"
},
{
"name": "name",
"type": "statement"
},
{
"name": "object_type",
"type": "function"
},
{
"name": "row_dim_name",
"type": "statement"
},
{
"name": "soma_options",
"type": "statement"
},
{
"name": "to_csr_matrix",
"type": "function"
},
{
"name": "uri",
"type": "statement"
},
{
"name": "verbose",
"type": "statement"
},
{
"name": "__annotations__",
"type": "statement"
},
{
"name": "__class__",
"type": "property"
},
{
"name": "__delattr__",
"type": "function"
},
{
"name": "__dict__",
"type": "statement"
},
{
"name": "__dir__",
"type": "function"
},
{
"name": "__doc__",
"type": "statement"
},
{
"name": "__eq__",
"type": "function"
},
{
"name": "__format__",
"type": "function"
},
{
"name": "__getattribute__",
"type": "function"
},
{
"name": "__hash__",
"type": "function"
},
{
"name": "__init__",
"type": "function"
},
{
"name": "__init_subclass__",
"type": "function"
},
{
"name": "__module__",
"type": "statement"
},
{
"name": "__ne__",
"type": "function"
},
{
"name": "__new__",
"type": "function"
},
{
"name": "__reduce__",
"type": "function"
},
{
"name": "__reduce_ex__",
"type": "function"
},
{
"name": "__repr__",
"type": "function"
},
{
"name": "__setattr__",
"type": "function"
},
{
"name": "__sizeof__",
"type": "function"
},
{
"name": "__slots__",
"type": "statement"
},
{
"name": "__str__",
"type": "function"
}
] |
import tiledb
from .soma_options import SOMAOptions
from .tiledb_array import TileDBArray
from .tiledb_group import TileDBGroup
from .tiledb_object import TileDBObject
import tiledbsc.util as util
import scipy
import numpy as np
from typing import Optional
class AssayMatrix(TileDBArray):
"""
Wraps a TileDB sparse array with two string dimensions.
Used for X, obsp members, and varp members.
"""
row_dim_name: str # obs_id for X, obs_id_i for obsp; var_id_i for varp
col_dim_name: str # var_id for X, obs_id_j for obsp; var_id_j for varp
attr_name: str
# ----------------------------------------------------------------
def __init__(
self,
uri: str,
name: str,
row_dim_name: str,
col_dim_name: str,
parent: Optional[TileDBGroup] = None,
):
"""
See the TileDBObject constructor.
"""
super().__init__(uri=uri, name=name, parent=parent)
self.row_dim_name = row_dim_name
self.col_dim_name = col_dim_name
self.attr_name = 'value'
# ----------------------------------------------------------------
def from_matrix(self, matrix, row_names, col_names) -> None:
"""
Imports a matrix -- nominally scipy.sparse.csr_matrix or numpy.ndarray -- into a TileDB
array which is used for X, obsp members, and varp members.
"""
if self.verbose:
s = util.get_start_stamp()
print(f"{self.indent}START WRITING {self.uri}")
if self.exists():
if self.verbose:
print(f"{self.indent}Re-using existing array {self.uri}")
else:
self.create_empty_array(matrix_dtype=matrix.dtype)
self.ingest_data(matrix, row_names, col_names)
if self.verbose:
print(util.format_elapsed(s, f"{self.indent}FINISH WRITING {self.uri}"))
# ----------------------------------------------------------------
def create_empty_array(self, matrix_dtype: np.dtype) -> None:
"""
Create a TileDB 2D sparse array with string dimensions and a single attribute.
"""
level = self.soma_options.string_dim_zstd_level
dom = tiledb.Domain(
tiledb.Dim(name=self.row_dim_name, domain=(None, None), dtype="ascii", filters=[tiledb.RleFilter()]),
tiledb.Dim(name=self.
|
[
"attr_name",
"row_dim_name",
"col_dim_name",
"name",
"ctx",
"create_empty_array",
"from_matrix",
"ingest_data",
"ingest_data_rows_chunked",
"ingest_data_whole",
"to_csr_matrix",
"exists",
"indent",
"object_type",
"soma_options",
"uri",
"verbose"
] |
obsp; var_id_i for varp
col_dim_name: str # var_id for X, obs_id_j for obsp; var_id_j for varp
attr_name: str
# ----------------------------------------------------------------
def __init__(
self,
uri: str,
name: str,
row_dim_name: str,
col_dim_name: str,
parent: Optional[TileDBGroup] = None,
):
"""
See the TileDBObject constructor.
"""
super().__init__(uri=uri, name=name, parent=parent)
self.row_dim_name = row_dim_name
self.col_dim_name = col_dim_name
self.attr_name = 'value'
# ----------------------------------------------------------------
def from_matrix(self, matrix, row_names, col_names) -> None:
"""
Imports a matrix -- nominally scipy.sparse.csr_matrix or numpy.ndarray -- into a TileDB
array which is used for X, obsp members, and varp members.
"""
if self.verbose:
s = util.get_start_stamp()
print(f"{self.indent}START WRITING {self.uri}")
if self.exists():
if self.verbose:
print(f"{self.indent}Re-using existing array {self.uri}")
else:
self.create_empty_array(matrix_dtype=matrix.dtype)
self.ingest_data(matrix, row_names, col_names)
if self.verbose:
print(util.format_elapsed(s, f"{self.indent}FINISH WRITING {self.uri}"))
# ----------------------------------------------------------------
def create_empty_array(self, matrix_dtype: np.dtype) -> None:
"""
Create a TileDB 2D sparse array with string dimensions and a single attribute.
"""
level = self.soma_options.string_dim_zstd_level
dom = tiledb.Domain(
tiledb.Dim(name=self.row_dim_name, domain=(None, None), dtype="ascii", filters=[tiledb.RleFilter()]),
tiledb.Dim(name=self.
| 378
|
435
| 17
| 3,112
|
single-cell-data__tiledb-soma
|
0a9b47f0d37d4e309d85897423bcea947086b39a
|
apis/python/src/tiledbsc/assay_matrix.py
|
common
|
uri
| true
|
statement
| 17
| 17
| false
| false
|
[
"attr_name",
"uri",
"indent",
"col_dim_name",
"row_dim_name",
"__init__",
"create_empty_array",
"from_matrix",
"ingest_data",
"ingest_data_rows_chunked",
"ingest_data_whole",
"to_csr_matrix",
"ctx",
"exists",
"name",
"object_type",
"soma_options",
"verbose",
"__annotations__",
"__class__",
"__delattr__",
"__dict__",
"__dir__",
"__eq__",
"__format__",
"__getattribute__",
"__hash__",
"__init_subclass__",
"__ne__",
"__new__",
"__reduce__",
"__reduce_ex__",
"__repr__",
"__setattr__",
"__sizeof__",
"__str__",
"__subclasshook__",
"__doc__",
"__module__"
] |
[
{
"name": "attr_name",
"type": "statement"
},
{
"name": "col_dim_name",
"type": "statement"
},
{
"name": "create_empty_array",
"type": "function"
},
{
"name": "ctx",
"type": "statement"
},
{
"name": "exists",
"type": "function"
},
{
"name": "from_matrix",
"type": "function"
},
{
"name": "indent",
"type": "statement"
},
{
"name": "ingest_data",
"type": "function"
},
{
"name": "ingest_data_rows_chunked",
"type": "function"
},
{
"name": "ingest_data_whole",
"type": "function"
},
{
"name": "name",
"type": "statement"
},
{
"name": "object_type",
"type": "function"
},
{
"name": "row_dim_name",
"type": "statement"
},
{
"name": "soma_options",
"type": "statement"
},
{
"name": "to_csr_matrix",
"type": "function"
},
{
"name": "uri",
"type": "statement"
},
{
"name": "verbose",
"type": "statement"
},
{
"name": "__annotations__",
"type": "statement"
},
{
"name": "__class__",
"type": "property"
},
{
"name": "__delattr__",
"type": "function"
},
{
"name": "__dict__",
"type": "statement"
},
{
"name": "__dir__",
"type": "function"
},
{
"name": "__doc__",
"type": "statement"
},
{
"name": "__eq__",
"type": "function"
},
{
"name": "__format__",
"type": "function"
},
{
"name": "__getattribute__",
"type": "function"
},
{
"name": "__hash__",
"type": "function"
},
{
"name": "__init__",
"type": "function"
},
{
"name": "__init_subclass__",
"type": "function"
},
{
"name": "__module__",
"type": "statement"
},
{
"name": "__ne__",
"type": "function"
},
{
"name": "__new__",
"type": "function"
},
{
"name": "__reduce__",
"type": "function"
},
{
"name": "__reduce_ex__",
"type": "function"
},
{
"name": "__repr__",
"type": "function"
},
{
"name": "__setattr__",
"type": "function"
},
{
"name": "__sizeof__",
"type": "function"
},
{
"name": "__slots__",
"type": "statement"
},
{
"name": "__str__",
"type": "function"
}
] |
import tiledb
from .soma_options import SOMAOptions
from .tiledb_array import TileDBArray
from .tiledb_group import TileDBGroup
from .tiledb_object import TileDBObject
import tiledbsc.util as util
import scipy
import numpy as np
from typing import Optional
class AssayMatrix(TileDBArray):
"""
Wraps a TileDB sparse array with two string dimensions.
Used for X, obsp members, and varp members.
"""
row_dim_name: str # obs_id for X, obs_id_i for obsp; var_id_i for varp
col_dim_name: str # var_id for X, obs_id_j for obsp; var_id_j for varp
attr_name: str
# ----------------------------------------------------------------
def __init__(
self,
uri: str,
name: str,
row_dim_name: str,
col_dim_name: str,
parent: Optional[TileDBGroup] = None,
):
"""
See the TileDBObject constructor.
"""
super().__init__(uri=uri, name=name, parent=parent)
self.row_dim_name = row_dim_name
self.col_dim_name = col_dim_name
self.attr_name = 'value'
# ----------------------------------------------------------------
def from_matrix(self, matrix, row_names, col_names) -> None:
"""
Imports a matrix -- nominally scipy.sparse.csr_matrix or numpy.ndarray -- into a TileDB
array which is used for X, obsp members, and varp members.
"""
if self.verbose:
s = util.get_start_stamp()
print(f"{self.indent}START WRITING {self.uri}")
if self.exists():
if self.verbose:
print(f"{self.indent}Re-using existing array {self.uri}")
else:
self.create_empty_array(matrix_dtype=matrix.dtype)
self.ingest_data(matrix, row_names, col_names)
if self.verbose:
print(util.format_elapsed(s, f"{self.indent}FINISH WRITING {self.uri}"))
# ----------------------------------------------------------------
def create_empty_array(self, matrix_dtype: np.dtype) -> None:
"""
Create a TileDB 2D sparse array with string dimensions and a single attribute.
"""
level = self.soma_options.string_dim_zstd_level
dom = tiledb.Domain(
tiledb.Dim(name=self.row_dim_name, domain=(None, None), dtype="ascii", filters=[tiledb.RleFilter()]),
tiledb.Dim(name=self.col_dim_name, domain=(None, None), dtype="ascii", filters=[tiledb.ZstdFilter(level=level)]),
ctx=self.ctx
)
att = tiledb.Attr(self.attr_name, dtype=matrix_dtype, filters=[tiledb.ZstdFilter()], ctx=self.ctx)
sch = tiledb.ArraySchema(
domain=dom,
attrs=(att,),
sparse=True,
allows_duplicates=True,
offsets_filters=[tiledb.DoubleDeltaFilter(), tiledb.BitWidthReductionFilter(), tiledb.ZstdFilter()],
capacity=self.soma_options.X_capacity,
cell_order=self.soma_options.X_cell_order,
tile_order=self.soma_options.X_tile_order,
ctx=self.ctx
)
tiledb.Array.create(self.
|
[
"attr_name",
"uri",
"indent",
"col_dim_name",
"row_dim_name",
"create_empty_array",
"from_matrix",
"ingest_data",
"ingest_data_rows_chunked",
"ingest_data_whole",
"to_csr_matrix",
"ctx",
"exists",
"name",
"object_type",
"soma_options",
"verbose"
] |
ames, col_names) -> None:
"""
Imports a matrix -- nominally scipy.sparse.csr_matrix or numpy.ndarray -- into a TileDB
array which is used for X, obsp members, and varp members.
"""
if self.verbose:
s = util.get_start_stamp()
print(f"{self.indent}START WRITING {self.uri}")
if self.exists():
if self.verbose:
print(f"{self.indent}Re-using existing array {self.uri}")
else:
self.create_empty_array(matrix_dtype=matrix.dtype)
self.ingest_data(matrix, row_names, col_names)
if self.verbose:
print(util.format_elapsed(s, f"{self.indent}FINISH WRITING {self.uri}"))
# ----------------------------------------------------------------
def create_empty_array(self, matrix_dtype: np.dtype) -> None:
"""
Create a TileDB 2D sparse array with string dimensions and a single attribute.
"""
level = self.soma_options.string_dim_zstd_level
dom = tiledb.Domain(
tiledb.Dim(name=self.row_dim_name, domain=(None, None), dtype="ascii", filters=[tiledb.RleFilter()]),
tiledb.Dim(name=self.col_dim_name, domain=(None, None), dtype="ascii", filters=[tiledb.ZstdFilter(level=level)]),
ctx=self.ctx
)
att = tiledb.Attr(self.attr_name, dtype=matrix_dtype, filters=[tiledb.ZstdFilter()], ctx=self.ctx)
sch = tiledb.ArraySchema(
domain=dom,
attrs=(att,),
sparse=True,
allows_duplicates=True,
offsets_filters=[tiledb.DoubleDeltaFilter(), tiledb.BitWidthReductionFilter(), tiledb.ZstdFilter()],
capacity=self.soma_options.X_capacity,
cell_order=self.soma_options.X_cell_order,
tile_order=self.soma_options.X_tile_order,
ctx=self.ctx
)
tiledb.Array.create(self.
| 389
|
436
| 17
| 3,131
|
single-cell-data__tiledb-soma
|
0a9b47f0d37d4e309d85897423bcea947086b39a
|
apis/python/src/tiledbsc/assay_matrix.py
|
common
|
ctx
| true
|
statement
| 17
| 17
| false
| false
|
[
"ctx",
"uri",
"col_dim_name",
"row_dim_name",
"soma_options",
"__init__",
"attr_name",
"create_empty_array",
"from_matrix",
"ingest_data",
"ingest_data_rows_chunked",
"ingest_data_whole",
"to_csr_matrix",
"exists",
"indent",
"name",
"object_type",
"verbose",
"__annotations__",
"__class__",
"__delattr__",
"__dict__",
"__dir__",
"__eq__",
"__format__",
"__getattribute__",
"__hash__",
"__init_subclass__",
"__ne__",
"__new__",
"__reduce__",
"__reduce_ex__",
"__repr__",
"__setattr__",
"__sizeof__",
"__str__",
"__subclasshook__",
"__doc__",
"__module__"
] |
[
{
"name": "attr_name",
"type": "statement"
},
{
"name": "col_dim_name",
"type": "statement"
},
{
"name": "create_empty_array",
"type": "function"
},
{
"name": "ctx",
"type": "statement"
},
{
"name": "exists",
"type": "function"
},
{
"name": "from_matrix",
"type": "function"
},
{
"name": "indent",
"type": "statement"
},
{
"name": "ingest_data",
"type": "function"
},
{
"name": "ingest_data_rows_chunked",
"type": "function"
},
{
"name": "ingest_data_whole",
"type": "function"
},
{
"name": "name",
"type": "statement"
},
{
"name": "object_type",
"type": "function"
},
{
"name": "row_dim_name",
"type": "statement"
},
{
"name": "soma_options",
"type": "statement"
},
{
"name": "to_csr_matrix",
"type": "function"
},
{
"name": "uri",
"type": "statement"
},
{
"name": "verbose",
"type": "statement"
},
{
"name": "__annotations__",
"type": "statement"
},
{
"name": "__class__",
"type": "property"
},
{
"name": "__delattr__",
"type": "function"
},
{
"name": "__dict__",
"type": "statement"
},
{
"name": "__dir__",
"type": "function"
},
{
"name": "__doc__",
"type": "statement"
},
{
"name": "__eq__",
"type": "function"
},
{
"name": "__format__",
"type": "function"
},
{
"name": "__getattribute__",
"type": "function"
},
{
"name": "__hash__",
"type": "function"
},
{
"name": "__init__",
"type": "function"
},
{
"name": "__init_subclass__",
"type": "function"
},
{
"name": "__module__",
"type": "statement"
},
{
"name": "__ne__",
"type": "function"
},
{
"name": "__new__",
"type": "function"
},
{
"name": "__reduce__",
"type": "function"
},
{
"name": "__reduce_ex__",
"type": "function"
},
{
"name": "__repr__",
"type": "function"
},
{
"name": "__setattr__",
"type": "function"
},
{
"name": "__sizeof__",
"type": "function"
},
{
"name": "__slots__",
"type": "statement"
},
{
"name": "__str__",
"type": "function"
}
] |
import tiledb
from .soma_options import SOMAOptions
from .tiledb_array import TileDBArray
from .tiledb_group import TileDBGroup
from .tiledb_object import TileDBObject
import tiledbsc.util as util
import scipy
import numpy as np
from typing import Optional
class AssayMatrix(TileDBArray):
"""
Wraps a TileDB sparse array with two string dimensions.
Used for X, obsp members, and varp members.
"""
row_dim_name: str # obs_id for X, obs_id_i for obsp; var_id_i for varp
col_dim_name: str # var_id for X, obs_id_j for obsp; var_id_j for varp
attr_name: str
# ----------------------------------------------------------------
def __init__(
self,
uri: str,
name: str,
row_dim_name: str,
col_dim_name: str,
parent: Optional[TileDBGroup] = None,
):
"""
See the TileDBObject constructor.
"""
super().__init__(uri=uri, name=name, parent=parent)
self.row_dim_name = row_dim_name
self.col_dim_name = col_dim_name
self.attr_name = 'value'
# ----------------------------------------------------------------
def from_matrix(self, matrix, row_names, col_names) -> None:
"""
Imports a matrix -- nominally scipy.sparse.csr_matrix or numpy.ndarray -- into a TileDB
array which is used for X, obsp members, and varp members.
"""
if self.verbose:
s = util.get_start_stamp()
print(f"{self.indent}START WRITING {self.uri}")
if self.exists():
if self.verbose:
print(f"{self.indent}Re-using existing array {self.uri}")
else:
self.create_empty_array(matrix_dtype=matrix.dtype)
self.ingest_data(matrix, row_names, col_names)
if self.verbose:
print(util.format_elapsed(s, f"{self.indent}FINISH WRITING {self.uri}"))
# ----------------------------------------------------------------
def create_empty_array(self, matrix_dtype: np.dtype) -> None:
"""
Create a TileDB 2D sparse array with string dimensions and a single attribute.
"""
level = self.soma_options.string_dim_zstd_level
dom = tiledb.Domain(
tiledb.Dim(name=self.row_dim_name, domain=(None, None), dtype="ascii", filters=[tiledb.RleFilter()]),
tiledb.Dim(name=self.col_dim_name, domain=(None, None), dtype="ascii", filters=[tiledb.ZstdFilter(level=level)]),
ctx=self.ctx
)
att = tiledb.Attr(self.attr_name, dtype=matrix_dtype, filters=[tiledb.ZstdFilter()], ctx=self.ctx)
sch = tiledb.ArraySchema(
domain=dom,
attrs=(att,),
sparse=True,
allows_duplicates=True,
offsets_filters=[tiledb.DoubleDeltaFilter(), tiledb.BitWidthReductionFilter(), tiledb.ZstdFilter()],
capacity=self.soma_options.X_capacity,
cell_order=self.soma_options.X_cell_order,
tile_order=self.soma_options.X_tile_order,
ctx=self.ctx
)
tiledb.Array.create(self.uri, sch, ctx=self.
|
[
"ctx",
"uri",
"col_dim_name",
"row_dim_name",
"soma_options",
"attr_name",
"create_empty_array",
"from_matrix",
"ingest_data",
"ingest_data_rows_chunked",
"ingest_data_whole",
"to_csr_matrix",
"exists",
"indent",
"name",
"object_type",
"verbose"
] |
None:
"""
Imports a matrix -- nominally scipy.sparse.csr_matrix or numpy.ndarray -- into a TileDB
array which is used for X, obsp members, and varp members.
"""
if self.verbose:
s = util.get_start_stamp()
print(f"{self.indent}START WRITING {self.uri}")
if self.exists():
if self.verbose:
print(f"{self.indent}Re-using existing array {self.uri}")
else:
self.create_empty_array(matrix_dtype=matrix.dtype)
self.ingest_data(matrix, row_names, col_names)
if self.verbose:
print(util.format_elapsed(s, f"{self.indent}FINISH WRITING {self.uri}"))
# ----------------------------------------------------------------
def create_empty_array(self, matrix_dtype: np.dtype) -> None:
"""
Create a TileDB 2D sparse array with string dimensions and a single attribute.
"""
level = self.soma_options.string_dim_zstd_level
dom = tiledb.Domain(
tiledb.Dim(name=self.row_dim_name, domain=(None, None), dtype="ascii", filters=[tiledb.RleFilter()]),
tiledb.Dim(name=self.col_dim_name, domain=(None, None), dtype="ascii", filters=[tiledb.ZstdFilter(level=level)]),
ctx=self.ctx
)
att = tiledb.Attr(self.attr_name, dtype=matrix_dtype, filters=[tiledb.ZstdFilter()], ctx=self.ctx)
sch = tiledb.ArraySchema(
domain=dom,
attrs=(att,),
sparse=True,
allows_duplicates=True,
offsets_filters=[tiledb.DoubleDeltaFilter(), tiledb.BitWidthReductionFilter(), tiledb.ZstdFilter()],
capacity=self.soma_options.X_capacity,
cell_order=self.soma_options.X_cell_order,
tile_order=self.soma_options.X_tile_order,
ctx=self.ctx
)
tiledb.Array.create(self.uri, sch, ctx=self.
| 390
|
437
| 17
| 3,387
|
single-cell-data__tiledb-soma
|
0a9b47f0d37d4e309d85897423bcea947086b39a
|
apis/python/src/tiledbsc/assay_matrix.py
|
random
|
soma_options
| true
|
statement
| 17
| 17
| false
| false
|
[
"to_csr_matrix",
"from_matrix",
"col_dim_name",
"row_dim_name",
"attr_name",
"__init__",
"create_empty_array",
"ingest_data",
"ingest_data_rows_chunked",
"ingest_data_whole",
"ctx",
"exists",
"indent",
"name",
"object_type",
"soma_options",
"uri",
"verbose",
"__annotations__",
"__class__",
"__delattr__",
"__dict__",
"__dir__",
"__eq__",
"__format__",
"__getattribute__",
"__hash__",
"__init_subclass__",
"__ne__",
"__new__",
"__reduce__",
"__reduce_ex__",
"__repr__",
"__setattr__",
"__sizeof__",
"__str__",
"__subclasshook__",
"__doc__",
"__module__"
] |
[
{
"name": "attr_name",
"type": "statement"
},
{
"name": "col_dim_name",
"type": "statement"
},
{
"name": "create_empty_array",
"type": "function"
},
{
"name": "ctx",
"type": "statement"
},
{
"name": "exists",
"type": "function"
},
{
"name": "from_matrix",
"type": "function"
},
{
"name": "indent",
"type": "statement"
},
{
"name": "ingest_data",
"type": "function"
},
{
"name": "ingest_data_rows_chunked",
"type": "function"
},
{
"name": "ingest_data_whole",
"type": "function"
},
{
"name": "name",
"type": "statement"
},
{
"name": "object_type",
"type": "function"
},
{
"name": "row_dim_name",
"type": "statement"
},
{
"name": "soma_options",
"type": "statement"
},
{
"name": "to_csr_matrix",
"type": "function"
},
{
"name": "uri",
"type": "statement"
},
{
"name": "verbose",
"type": "statement"
},
{
"name": "__annotations__",
"type": "statement"
},
{
"name": "__class__",
"type": "property"
},
{
"name": "__delattr__",
"type": "function"
},
{
"name": "__dict__",
"type": "statement"
},
{
"name": "__dir__",
"type": "function"
},
{
"name": "__doc__",
"type": "statement"
},
{
"name": "__eq__",
"type": "function"
},
{
"name": "__format__",
"type": "function"
},
{
"name": "__getattribute__",
"type": "function"
},
{
"name": "__hash__",
"type": "function"
},
{
"name": "__init__",
"type": "function"
},
{
"name": "__init_subclass__",
"type": "function"
},
{
"name": "__module__",
"type": "statement"
},
{
"name": "__ne__",
"type": "function"
},
{
"name": "__new__",
"type": "function"
},
{
"name": "__reduce__",
"type": "function"
},
{
"name": "__reduce_ex__",
"type": "function"
},
{
"name": "__repr__",
"type": "function"
},
{
"name": "__setattr__",
"type": "function"
},
{
"name": "__sizeof__",
"type": "function"
},
{
"name": "__slots__",
"type": "statement"
},
{
"name": "__str__",
"type": "function"
}
] |
import tiledb
from .soma_options import SOMAOptions
from .tiledb_array import TileDBArray
from .tiledb_group import TileDBGroup
from .tiledb_object import TileDBObject
import tiledbsc.util as util
import scipy
import numpy as np
from typing import Optional
class AssayMatrix(TileDBArray):
"""
Wraps a TileDB sparse array with two string dimensions.
Used for X, obsp members, and varp members.
"""
row_dim_name: str # obs_id for X, obs_id_i for obsp; var_id_i for varp
col_dim_name: str # var_id for X, obs_id_j for obsp; var_id_j for varp
attr_name: str
# ----------------------------------------------------------------
def __init__(
self,
uri: str,
name: str,
row_dim_name: str,
col_dim_name: str,
parent: Optional[TileDBGroup] = None,
):
"""
See the TileDBObject constructor.
"""
super().__init__(uri=uri, name=name, parent=parent)
self.row_dim_name = row_dim_name
self.col_dim_name = col_dim_name
self.attr_name = 'value'
# ----------------------------------------------------------------
def from_matrix(self, matrix, row_names, col_names) -> None:
"""
Imports a matrix -- nominally scipy.sparse.csr_matrix or numpy.ndarray -- into a TileDB
array which is used for X, obsp members, and varp members.
"""
if self.verbose:
s = util.get_start_stamp()
print(f"{self.indent}START WRITING {self.uri}")
if self.exists():
if self.verbose:
print(f"{self.indent}Re-using existing array {self.uri}")
else:
self.create_empty_array(matrix_dtype=matrix.dtype)
self.ingest_data(matrix, row_names, col_names)
if self.verbose:
print(util.format_elapsed(s, f"{self.indent}FINISH WRITING {self.uri}"))
# ----------------------------------------------------------------
def create_empty_array(self, matrix_dtype: np.dtype) -> None:
"""
Create a TileDB 2D sparse array with string dimensions and a single attribute.
"""
level = self.soma_options.string_dim_zstd_level
dom = tiledb.Domain(
tiledb.Dim(name=self.row_dim_name, domain=(None, None), dtype="ascii", filters=[tiledb.RleFilter()]),
tiledb.Dim(name=self.col_dim_name, domain=(None, None), dtype="ascii", filters=[tiledb.ZstdFilter(level=level)]),
ctx=self.ctx
)
att = tiledb.Attr(self.attr_name, dtype=matrix_dtype, filters=[tiledb.ZstdFilter()], ctx=self.ctx)
sch = tiledb.ArraySchema(
domain=dom,
attrs=(att,),
sparse=True,
allows_duplicates=True,
offsets_filters=[tiledb.DoubleDeltaFilter(), tiledb.BitWidthReductionFilter(), tiledb.ZstdFilter()],
capacity=self.soma_options.X_capacity,
cell_order=self.soma_options.X_cell_order,
tile_order=self.soma_options.X_tile_order,
ctx=self.ctx
)
tiledb.Array.create(self.uri, sch, ctx=self.ctx)
# ----------------------------------------------------------------
def ingest_data(self, matrix, row_names, col_names) -> None:
# TODO: add chunked support for CSC
if isinstance(matrix, scipy.sparse._csr.csr_matrix) and self.
|
[
"to_csr_matrix",
"from_matrix",
"col_dim_name",
"row_dim_name",
"attr_name",
"create_empty_array",
"ingest_data",
"ingest_data_rows_chunked",
"ingest_data_whole",
"ctx",
"exists",
"indent",
"name",
"object_type",
"soma_options",
"uri",
"verbose"
] |
()
print(f"{self.indent}START WRITING {self.uri}")
if self.exists():
if self.verbose:
print(f"{self.indent}Re-using existing array {self.uri}")
else:
self.create_empty_array(matrix_dtype=matrix.dtype)
self.ingest_data(matrix, row_names, col_names)
if self.verbose:
print(util.format_elapsed(s, f"{self.indent}FINISH WRITING {self.uri}"))
# ----------------------------------------------------------------
def create_empty_array(self, matrix_dtype: np.dtype) -> None:
"""
Create a TileDB 2D sparse array with string dimensions and a single attribute.
"""
level = self.soma_options.string_dim_zstd_level
dom = tiledb.Domain(
tiledb.Dim(name=self.row_dim_name, domain=(None, None), dtype="ascii", filters=[tiledb.RleFilter()]),
tiledb.Dim(name=self.col_dim_name, domain=(None, None), dtype="ascii", filters=[tiledb.ZstdFilter(level=level)]),
ctx=self.ctx
)
att = tiledb.Attr(self.attr_name, dtype=matrix_dtype, filters=[tiledb.ZstdFilter()], ctx=self.ctx)
sch = tiledb.ArraySchema(
domain=dom,
attrs=(att,),
sparse=True,
allows_duplicates=True,
offsets_filters=[tiledb.DoubleDeltaFilter(), tiledb.BitWidthReductionFilter(), tiledb.ZstdFilter()],
capacity=self.soma_options.X_capacity,
cell_order=self.soma_options.X_cell_order,
tile_order=self.soma_options.X_tile_order,
ctx=self.ctx
)
tiledb.Array.create(self.uri, sch, ctx=self.ctx)
# ----------------------------------------------------------------
def ingest_data(self, matrix, row_names, col_names) -> None:
# TODO: add chunked support for CSC
if isinstance(matrix, scipy.sparse._csr.csr_matrix) and self.
| 391
|
438
| 17
| 3,400
|
single-cell-data__tiledb-soma
|
0a9b47f0d37d4e309d85897423bcea947086b39a
|
apis/python/src/tiledbsc/assay_matrix.py
|
random
|
write_X_chunked_if_csr
| true
|
statement
| 8
| 8
| false
| true
|
[
"X_capacity",
"X_cell_order",
"X_tile_order",
"goal_chunk_nnz",
"string_dim_zstd_level",
"obs_extent",
"var_extent",
"write_X_chunked_if_csr",
"__init__",
"__annotations__",
"__class__",
"__delattr__",
"__dict__",
"__dir__",
"__eq__",
"__format__",
"__getattribute__",
"__hash__",
"__init_subclass__",
"__ne__",
"__new__",
"__reduce__",
"__reduce_ex__",
"__repr__",
"__setattr__",
"__sizeof__",
"__str__",
"__subclasshook__",
"__doc__",
"__module__"
] |
[
{
"name": "goal_chunk_nnz",
"type": "statement"
},
{
"name": "obs_extent",
"type": "statement"
},
{
"name": "string_dim_zstd_level",
"type": "statement"
},
{
"name": "var_extent",
"type": "statement"
},
{
"name": "write_X_chunked_if_csr",
"type": "statement"
},
{
"name": "X_capacity",
"type": "statement"
},
{
"name": "X_cell_order",
"type": "statement"
},
{
"name": "X_tile_order",
"type": "statement"
},
{
"name": "__annotations__",
"type": "statement"
},
{
"name": "__bool__",
"type": "instance"
},
{
"name": "__class__",
"type": "property"
},
{
"name": "__delattr__",
"type": "function"
},
{
"name": "__dict__",
"type": "statement"
},
{
"name": "__dir__",
"type": "function"
},
{
"name": "__doc__",
"type": "statement"
},
{
"name": "__eq__",
"type": "function"
},
{
"name": "__format__",
"type": "function"
},
{
"name": "__ge__",
"type": "instance"
},
{
"name": "__getattribute__",
"type": "function"
},
{
"name": "__gt__",
"type": "instance"
},
{
"name": "__hash__",
"type": "function"
},
{
"name": "__init__",
"type": "function"
},
{
"name": "__init_subclass__",
"type": "function"
},
{
"name": "__le__",
"type": "instance"
},
{
"name": "__lt__",
"type": "instance"
},
{
"name": "__module__",
"type": "statement"
},
{
"name": "__ne__",
"type": "function"
},
{
"name": "__new__",
"type": "function"
},
{
"name": "__reduce__",
"type": "function"
},
{
"name": "__reduce_ex__",
"type": "function"
},
{
"name": "__repr__",
"type": "function"
},
{
"name": "__setattr__",
"type": "function"
},
{
"name": "__sizeof__",
"type": "function"
},
{
"name": "__slots__",
"type": "statement"
},
{
"name": "__str__",
"type": "function"
},
{
"name": "__subclasshook__",
"type": "function"
}
] |
import tiledb
from .soma_options import SOMAOptions
from .tiledb_array import TileDBArray
from .tiledb_group import TileDBGroup
from .tiledb_object import TileDBObject
import tiledbsc.util as util
import scipy
import numpy as np
from typing import Optional
class AssayMatrix(TileDBArray):
"""
Wraps a TileDB sparse array with two string dimensions.
Used for X, obsp members, and varp members.
"""
row_dim_name: str # obs_id for X, obs_id_i for obsp; var_id_i for varp
col_dim_name: str # var_id for X, obs_id_j for obsp; var_id_j for varp
attr_name: str
# ----------------------------------------------------------------
def __init__(
self,
uri: str,
name: str,
row_dim_name: str,
col_dim_name: str,
parent: Optional[TileDBGroup] = None,
):
"""
See the TileDBObject constructor.
"""
super().__init__(uri=uri, name=name, parent=parent)
self.row_dim_name = row_dim_name
self.col_dim_name = col_dim_name
self.attr_name = 'value'
# ----------------------------------------------------------------
def from_matrix(self, matrix, row_names, col_names) -> None:
"""
Imports a matrix -- nominally scipy.sparse.csr_matrix or numpy.ndarray -- into a TileDB
array which is used for X, obsp members, and varp members.
"""
if self.verbose:
s = util.get_start_stamp()
print(f"{self.indent}START WRITING {self.uri}")
if self.exists():
if self.verbose:
print(f"{self.indent}Re-using existing array {self.uri}")
else:
self.create_empty_array(matrix_dtype=matrix.dtype)
self.ingest_data(matrix, row_names, col_names)
if self.verbose:
print(util.format_elapsed(s, f"{self.indent}FINISH WRITING {self.uri}"))
# ----------------------------------------------------------------
def create_empty_array(self, matrix_dtype: np.dtype) -> None:
"""
Create a TileDB 2D sparse array with string dimensions and a single attribute.
"""
level = self.soma_options.string_dim_zstd_level
dom = tiledb.Domain(
tiledb.Dim(name=self.row_dim_name, domain=(None, None), dtype="ascii", filters=[tiledb.RleFilter()]),
tiledb.Dim(name=self.col_dim_name, domain=(None, None), dtype="ascii", filters=[tiledb.ZstdFilter(level=level)]),
ctx=self.ctx
)
att = tiledb.Attr(self.attr_name, dtype=matrix_dtype, filters=[tiledb.ZstdFilter()], ctx=self.ctx)
sch = tiledb.ArraySchema(
domain=dom,
attrs=(att,),
sparse=True,
allows_duplicates=True,
offsets_filters=[tiledb.DoubleDeltaFilter(), tiledb.BitWidthReductionFilter(), tiledb.ZstdFilter()],
capacity=self.soma_options.X_capacity,
cell_order=self.soma_options.X_cell_order,
tile_order=self.soma_options.X_tile_order,
ctx=self.ctx
)
tiledb.Array.create(self.uri, sch, ctx=self.ctx)
# ----------------------------------------------------------------
def ingest_data(self, matrix, row_names, col_names) -> None:
# TODO: add chunked support for CSC
if isinstance(matrix, scipy.sparse._csr.csr_matrix) and self.soma_options.
|
[
"X_capacity",
"X_cell_order",
"X_tile_order",
"goal_chunk_nnz",
"string_dim_zstd_level",
"obs_extent",
"var_extent",
"write_X_chunked_if_csr"
] |
print(f"{self.indent}START WRITING {self.uri}")
if self.exists():
if self.verbose:
print(f"{self.indent}Re-using existing array {self.uri}")
else:
self.create_empty_array(matrix_dtype=matrix.dtype)
self.ingest_data(matrix, row_names, col_names)
if self.verbose:
print(util.format_elapsed(s, f"{self.indent}FINISH WRITING {self.uri}"))
# ----------------------------------------------------------------
def create_empty_array(self, matrix_dtype: np.dtype) -> None:
"""
Create a TileDB 2D sparse array with string dimensions and a single attribute.
"""
level = self.soma_options.string_dim_zstd_level
dom = tiledb.Domain(
tiledb.Dim(name=self.row_dim_name, domain=(None, None), dtype="ascii", filters=[tiledb.RleFilter()]),
tiledb.Dim(name=self.col_dim_name, domain=(None, None), dtype="ascii", filters=[tiledb.ZstdFilter(level=level)]),
ctx=self.ctx
)
att = tiledb.Attr(self.attr_name, dtype=matrix_dtype, filters=[tiledb.ZstdFilter()], ctx=self.ctx)
sch = tiledb.ArraySchema(
domain=dom,
attrs=(att,),
sparse=True,
allows_duplicates=True,
offsets_filters=[tiledb.DoubleDeltaFilter(), tiledb.BitWidthReductionFilter(), tiledb.ZstdFilter()],
capacity=self.soma_options.X_capacity,
cell_order=self.soma_options.X_cell_order,
tile_order=self.soma_options.X_tile_order,
ctx=self.ctx
)
tiledb.Array.create(self.uri, sch, ctx=self.ctx)
# ----------------------------------------------------------------
def ingest_data(self, matrix, row_names, col_names) -> None:
# TODO: add chunked support for CSC
if isinstance(matrix, scipy.sparse._csr.csr_matrix) and self.soma_options.
| 392
|
439
| 17
| 3,441
|
single-cell-data__tiledb-soma
|
0a9b47f0d37d4e309d85897423bcea947086b39a
|
apis/python/src/tiledbsc/assay_matrix.py
|
infile
|
ingest_data_rows_chunked
| true
|
function
| 17
| 17
| false
| true
|
[
"attr_name",
"ingest_data_whole",
"col_dim_name",
"row_dim_name",
"create_empty_array",
"__init__",
"from_matrix",
"ingest_data",
"ingest_data_rows_chunked",
"to_csr_matrix",
"ctx",
"exists",
"indent",
"name",
"object_type",
"soma_options",
"uri",
"verbose",
"__annotations__",
"__class__",
"__delattr__",
"__dict__",
"__dir__",
"__eq__",
"__format__",
"__getattribute__",
"__hash__",
"__init_subclass__",
"__ne__",
"__new__",
"__reduce__",
"__reduce_ex__",
"__repr__",
"__setattr__",
"__sizeof__",
"__str__",
"__subclasshook__",
"__doc__",
"__module__"
] |
[
{
"name": "attr_name",
"type": "statement"
},
{
"name": "col_dim_name",
"type": "statement"
},
{
"name": "create_empty_array",
"type": "function"
},
{
"name": "ctx",
"type": "statement"
},
{
"name": "exists",
"type": "function"
},
{
"name": "from_matrix",
"type": "function"
},
{
"name": "indent",
"type": "statement"
},
{
"name": "ingest_data",
"type": "function"
},
{
"name": "ingest_data_rows_chunked",
"type": "function"
},
{
"name": "ingest_data_whole",
"type": "function"
},
{
"name": "name",
"type": "statement"
},
{
"name": "object_type",
"type": "function"
},
{
"name": "row_dim_name",
"type": "statement"
},
{
"name": "soma_options",
"type": "statement"
},
{
"name": "to_csr_matrix",
"type": "function"
},
{
"name": "uri",
"type": "statement"
},
{
"name": "verbose",
"type": "statement"
},
{
"name": "__annotations__",
"type": "statement"
},
{
"name": "__class__",
"type": "property"
},
{
"name": "__delattr__",
"type": "function"
},
{
"name": "__dict__",
"type": "statement"
},
{
"name": "__dir__",
"type": "function"
},
{
"name": "__doc__",
"type": "statement"
},
{
"name": "__eq__",
"type": "function"
},
{
"name": "__format__",
"type": "function"
},
{
"name": "__getattribute__",
"type": "function"
},
{
"name": "__hash__",
"type": "function"
},
{
"name": "__init__",
"type": "function"
},
{
"name": "__init_subclass__",
"type": "function"
},
{
"name": "__module__",
"type": "statement"
},
{
"name": "__ne__",
"type": "function"
},
{
"name": "__new__",
"type": "function"
},
{
"name": "__reduce__",
"type": "function"
},
{
"name": "__reduce_ex__",
"type": "function"
},
{
"name": "__repr__",
"type": "function"
},
{
"name": "__setattr__",
"type": "function"
},
{
"name": "__sizeof__",
"type": "function"
},
{
"name": "__slots__",
"type": "statement"
},
{
"name": "__str__",
"type": "function"
}
] |
import tiledb
from .soma_options import SOMAOptions
from .tiledb_array import TileDBArray
from .tiledb_group import TileDBGroup
from .tiledb_object import TileDBObject
import tiledbsc.util as util
import scipy
import numpy as np
from typing import Optional
class AssayMatrix(TileDBArray):
"""
Wraps a TileDB sparse array with two string dimensions.
Used for X, obsp members, and varp members.
"""
row_dim_name: str # obs_id for X, obs_id_i for obsp; var_id_i for varp
col_dim_name: str # var_id for X, obs_id_j for obsp; var_id_j for varp
attr_name: str
# ----------------------------------------------------------------
def __init__(
self,
uri: str,
name: str,
row_dim_name: str,
col_dim_name: str,
parent: Optional[TileDBGroup] = None,
):
"""
See the TileDBObject constructor.
"""
super().__init__(uri=uri, name=name, parent=parent)
self.row_dim_name = row_dim_name
self.col_dim_name = col_dim_name
self.attr_name = 'value'
# ----------------------------------------------------------------
def from_matrix(self, matrix, row_names, col_names) -> None:
"""
Imports a matrix -- nominally scipy.sparse.csr_matrix or numpy.ndarray -- into a TileDB
array which is used for X, obsp members, and varp members.
"""
if self.verbose:
s = util.get_start_stamp()
print(f"{self.indent}START WRITING {self.uri}")
if self.exists():
if self.verbose:
print(f"{self.indent}Re-using existing array {self.uri}")
else:
self.create_empty_array(matrix_dtype=matrix.dtype)
self.ingest_data(matrix, row_names, col_names)
if self.verbose:
print(util.format_elapsed(s, f"{self.indent}FINISH WRITING {self.uri}"))
# ----------------------------------------------------------------
def create_empty_array(self, matrix_dtype: np.dtype) -> None:
"""
Create a TileDB 2D sparse array with string dimensions and a single attribute.
"""
level = self.soma_options.string_dim_zstd_level
dom = tiledb.Domain(
tiledb.Dim(name=self.row_dim_name, domain=(None, None), dtype="ascii", filters=[tiledb.RleFilter()]),
tiledb.Dim(name=self.col_dim_name, domain=(None, None), dtype="ascii", filters=[tiledb.ZstdFilter(level=level)]),
ctx=self.ctx
)
att = tiledb.Attr(self.attr_name, dtype=matrix_dtype, filters=[tiledb.ZstdFilter()], ctx=self.ctx)
sch = tiledb.ArraySchema(
domain=dom,
attrs=(att,),
sparse=True,
allows_duplicates=True,
offsets_filters=[tiledb.DoubleDeltaFilter(), tiledb.BitWidthReductionFilter(), tiledb.ZstdFilter()],
capacity=self.soma_options.X_capacity,
cell_order=self.soma_options.X_cell_order,
tile_order=self.soma_options.X_tile_order,
ctx=self.ctx
)
tiledb.Array.create(self.uri, sch, ctx=self.ctx)
# ----------------------------------------------------------------
def ingest_data(self, matrix, row_names, col_names) -> None:
# TODO: add chunked support for CSC
if isinstance(matrix, scipy.sparse._csr.csr_matrix) and self.soma_options.write_X_chunked_if_csr:
self.
|
[
"attr_name",
"ingest_data_whole",
"col_dim_name",
"row_dim_name",
"create_empty_array",
"from_matrix",
"ingest_data",
"ingest_data_rows_chunked",
"to_csr_matrix",
"ctx",
"exists",
"indent",
"name",
"object_type",
"soma_options",
"uri",
"verbose"
] |
lf.uri}")
if self.exists():
if self.verbose:
print(f"{self.indent}Re-using existing array {self.uri}")
else:
self.create_empty_array(matrix_dtype=matrix.dtype)
self.ingest_data(matrix, row_names, col_names)
if self.verbose:
print(util.format_elapsed(s, f"{self.indent}FINISH WRITING {self.uri}"))
# ----------------------------------------------------------------
def create_empty_array(self, matrix_dtype: np.dtype) -> None:
"""
Create a TileDB 2D sparse array with string dimensions and a single attribute.
"""
level = self.soma_options.string_dim_zstd_level
dom = tiledb.Domain(
tiledb.Dim(name=self.row_dim_name, domain=(None, None), dtype="ascii", filters=[tiledb.RleFilter()]),
tiledb.Dim(name=self.col_dim_name, domain=(None, None), dtype="ascii", filters=[tiledb.ZstdFilter(level=level)]),
ctx=self.ctx
)
att = tiledb.Attr(self.attr_name, dtype=matrix_dtype, filters=[tiledb.ZstdFilter()], ctx=self.ctx)
sch = tiledb.ArraySchema(
domain=dom,
attrs=(att,),
sparse=True,
allows_duplicates=True,
offsets_filters=[tiledb.DoubleDeltaFilter(), tiledb.BitWidthReductionFilter(), tiledb.ZstdFilter()],
capacity=self.soma_options.X_capacity,
cell_order=self.soma_options.X_cell_order,
tile_order=self.soma_options.X_tile_order,
ctx=self.ctx
)
tiledb.Array.create(self.uri, sch, ctx=self.ctx)
# ----------------------------------------------------------------
def ingest_data(self, matrix, row_names, col_names) -> None:
# TODO: add chunked support for CSC
if isinstance(matrix, scipy.sparse._csr.csr_matrix) and self.soma_options.write_X_chunked_if_csr:
self.
| 393
|
440
| 17
| 3,527
|
single-cell-data__tiledb-soma
|
0a9b47f0d37d4e309d85897423bcea947086b39a
|
apis/python/src/tiledbsc/assay_matrix.py
|
infile
|
ingest_data_whole
| true
|
function
| 17
| 17
| false
| true
|
[
"ingest_data_rows_chunked",
"attr_name",
"col_dim_name",
"row_dim_name",
"create_empty_array",
"__init__",
"from_matrix",
"ingest_data",
"ingest_data_whole",
"to_csr_matrix",
"ctx",
"exists",
"indent",
"name",
"object_type",
"soma_options",
"uri",
"verbose",
"__annotations__",
"__class__",
"__delattr__",
"__dict__",
"__dir__",
"__eq__",
"__format__",
"__getattribute__",
"__hash__",
"__init_subclass__",
"__ne__",
"__new__",
"__reduce__",
"__reduce_ex__",
"__repr__",
"__setattr__",
"__sizeof__",
"__str__",
"__subclasshook__",
"__doc__",
"__module__"
] |
[
{
"name": "attr_name",
"type": "statement"
},
{
"name": "col_dim_name",
"type": "statement"
},
{
"name": "create_empty_array",
"type": "function"
},
{
"name": "ctx",
"type": "statement"
},
{
"name": "exists",
"type": "function"
},
{
"name": "from_matrix",
"type": "function"
},
{
"name": "indent",
"type": "statement"
},
{
"name": "ingest_data",
"type": "function"
},
{
"name": "ingest_data_rows_chunked",
"type": "function"
},
{
"name": "ingest_data_whole",
"type": "function"
},
{
"name": "name",
"type": "statement"
},
{
"name": "object_type",
"type": "function"
},
{
"name": "row_dim_name",
"type": "statement"
},
{
"name": "soma_options",
"type": "statement"
},
{
"name": "to_csr_matrix",
"type": "function"
},
{
"name": "uri",
"type": "statement"
},
{
"name": "verbose",
"type": "statement"
},
{
"name": "__annotations__",
"type": "statement"
},
{
"name": "__class__",
"type": "property"
},
{
"name": "__delattr__",
"type": "function"
},
{
"name": "__dict__",
"type": "statement"
},
{
"name": "__dir__",
"type": "function"
},
{
"name": "__doc__",
"type": "statement"
},
{
"name": "__eq__",
"type": "function"
},
{
"name": "__format__",
"type": "function"
},
{
"name": "__getattribute__",
"type": "function"
},
{
"name": "__hash__",
"type": "function"
},
{
"name": "__init__",
"type": "function"
},
{
"name": "__init_subclass__",
"type": "function"
},
{
"name": "__module__",
"type": "statement"
},
{
"name": "__ne__",
"type": "function"
},
{
"name": "__new__",
"type": "function"
},
{
"name": "__reduce__",
"type": "function"
},
{
"name": "__reduce_ex__",
"type": "function"
},
{
"name": "__repr__",
"type": "function"
},
{
"name": "__setattr__",
"type": "function"
},
{
"name": "__sizeof__",
"type": "function"
},
{
"name": "__slots__",
"type": "statement"
},
{
"name": "__str__",
"type": "function"
}
] |
import tiledb
from .soma_options import SOMAOptions
from .tiledb_array import TileDBArray
from .tiledb_group import TileDBGroup
from .tiledb_object import TileDBObject
import tiledbsc.util as util
import scipy
import numpy as np
from typing import Optional
class AssayMatrix(TileDBArray):
"""
Wraps a TileDB sparse array with two string dimensions.
Used for X, obsp members, and varp members.
"""
row_dim_name: str # obs_id for X, obs_id_i for obsp; var_id_i for varp
col_dim_name: str # var_id for X, obs_id_j for obsp; var_id_j for varp
attr_name: str
# ----------------------------------------------------------------
def __init__(
self,
uri: str,
name: str,
row_dim_name: str,
col_dim_name: str,
parent: Optional[TileDBGroup] = None,
):
"""
See the TileDBObject constructor.
"""
super().__init__(uri=uri, name=name, parent=parent)
self.row_dim_name = row_dim_name
self.col_dim_name = col_dim_name
self.attr_name = 'value'
# ----------------------------------------------------------------
def from_matrix(self, matrix, row_names, col_names) -> None:
"""
Imports a matrix -- nominally scipy.sparse.csr_matrix or numpy.ndarray -- into a TileDB
array which is used for X, obsp members, and varp members.
"""
if self.verbose:
s = util.get_start_stamp()
print(f"{self.indent}START WRITING {self.uri}")
if self.exists():
if self.verbose:
print(f"{self.indent}Re-using existing array {self.uri}")
else:
self.create_empty_array(matrix_dtype=matrix.dtype)
self.ingest_data(matrix, row_names, col_names)
if self.verbose:
print(util.format_elapsed(s, f"{self.indent}FINISH WRITING {self.uri}"))
# ----------------------------------------------------------------
def create_empty_array(self, matrix_dtype: np.dtype) -> None:
"""
Create a TileDB 2D sparse array with string dimensions and a single attribute.
"""
level = self.soma_options.string_dim_zstd_level
dom = tiledb.Domain(
tiledb.Dim(name=self.row_dim_name, domain=(None, None), dtype="ascii", filters=[tiledb.RleFilter()]),
tiledb.Dim(name=self.col_dim_name, domain=(None, None), dtype="ascii", filters=[tiledb.ZstdFilter(level=level)]),
ctx=self.ctx
)
att = tiledb.Attr(self.attr_name, dtype=matrix_dtype, filters=[tiledb.ZstdFilter()], ctx=self.ctx)
sch = tiledb.ArraySchema(
domain=dom,
attrs=(att,),
sparse=True,
allows_duplicates=True,
offsets_filters=[tiledb.DoubleDeltaFilter(), tiledb.BitWidthReductionFilter(), tiledb.ZstdFilter()],
capacity=self.soma_options.X_capacity,
cell_order=self.soma_options.X_cell_order,
tile_order=self.soma_options.X_tile_order,
ctx=self.ctx
)
tiledb.Array.create(self.uri, sch, ctx=self.ctx)
# ----------------------------------------------------------------
def ingest_data(self, matrix, row_names, col_names) -> None:
# TODO: add chunked support for CSC
if isinstance(matrix, scipy.sparse._csr.csr_matrix) and self.soma_options.write_X_chunked_if_csr:
self.ingest_data_rows_chunked(matrix, row_names, col_names)
else:
self.
|
[
"ingest_data_rows_chunked",
"attr_name",
"col_dim_name",
"row_dim_name",
"create_empty_array",
"from_matrix",
"ingest_data",
"ingest_data_whole",
"to_csr_matrix",
"ctx",
"exists",
"indent",
"name",
"object_type",
"soma_options",
"uri",
"verbose"
] |
t(f"{self.indent}Re-using existing array {self.uri}")
else:
self.create_empty_array(matrix_dtype=matrix.dtype)
self.ingest_data(matrix, row_names, col_names)
if self.verbose:
print(util.format_elapsed(s, f"{self.indent}FINISH WRITING {self.uri}"))
# ----------------------------------------------------------------
def create_empty_array(self, matrix_dtype: np.dtype) -> None:
"""
Create a TileDB 2D sparse array with string dimensions and a single attribute.
"""
level = self.soma_options.string_dim_zstd_level
dom = tiledb.Domain(
tiledb.Dim(name=self.row_dim_name, domain=(None, None), dtype="ascii", filters=[tiledb.RleFilter()]),
tiledb.Dim(name=self.col_dim_name, domain=(None, None), dtype="ascii", filters=[tiledb.ZstdFilter(level=level)]),
ctx=self.ctx
)
att = tiledb.Attr(self.attr_name, dtype=matrix_dtype, filters=[tiledb.ZstdFilter()], ctx=self.ctx)
sch = tiledb.ArraySchema(
domain=dom,
attrs=(att,),
sparse=True,
allows_duplicates=True,
offsets_filters=[tiledb.DoubleDeltaFilter(), tiledb.BitWidthReductionFilter(), tiledb.ZstdFilter()],
capacity=self.soma_options.X_capacity,
cell_order=self.soma_options.X_cell_order,
tile_order=self.soma_options.X_tile_order,
ctx=self.ctx
)
tiledb.Array.create(self.uri, sch, ctx=self.ctx)
# ----------------------------------------------------------------
def ingest_data(self, matrix, row_names, col_names) -> None:
# TODO: add chunked support for CSC
if isinstance(matrix, scipy.sparse._csr.csr_matrix) and self.soma_options.write_X_chunked_if_csr:
self.ingest_data_rows_chunked(matrix, row_names, col_names)
else:
self.
| 394
|
443
| 17
| 6,382
|
single-cell-data__tiledb-soma
|
0a9b47f0d37d4e309d85897423bcea947086b39a
|
apis/python/src/tiledbsc/assay_matrix.py
|
inproject
|
get_sort_and_permutation
| true
|
function
| 6
| 12
| false
| true
|
[
"get_start_stamp",
"format_elapsed",
"find_csr_chunk_size",
"Optional",
"get_sort_and_permutation",
"_to_tiledb_supported_array_type"
] |
[
{
"name": "ad",
"type": "module"
},
{
"name": "find_csr_chunk_size",
"type": "function"
},
{
"name": "format_elapsed",
"type": "function"
},
{
"name": "get_sort_and_permutation",
"type": "function"
},
{
"name": "get_start_stamp",
"type": "function"
},
{
"name": "numpy",
"type": "module"
},
{
"name": "Optional",
"type": "class"
},
{
"name": "pd",
"type": "module"
},
{
"name": "scipy",
"type": "module"
},
{
"name": "tiledb",
"type": "module"
},
{
"name": "time",
"type": "module"
},
{
"name": "_to_tiledb_supported_array_type",
"type": "function"
},
{
"name": "__doc__",
"type": "instance"
},
{
"name": "__file__",
"type": "instance"
},
{
"name": "__name__",
"type": "instance"
},
{
"name": "__package__",
"type": "instance"
}
] |
import tiledb
from .soma_options import SOMAOptions
from .tiledb_array import TileDBArray
from .tiledb_group import TileDBGroup
from .tiledb_object import TileDBObject
import tiledbsc.util as util
import scipy
import numpy as np
from typing import Optional
class AssayMatrix(TileDBArray):
"""
Wraps a TileDB sparse array with two string dimensions.
Used for X, obsp members, and varp members.
"""
row_dim_name: str # obs_id for X, obs_id_i for obsp; var_id_i for varp
col_dim_name: str # var_id for X, obs_id_j for obsp; var_id_j for varp
attr_name: str
# ----------------------------------------------------------------
def __init__(
self,
uri: str,
name: str,
row_dim_name: str,
col_dim_name: str,
parent: Optional[TileDBGroup] = None,
):
"""
See the TileDBObject constructor.
"""
super().__init__(uri=uri, name=name, parent=parent)
self.row_dim_name = row_dim_name
self.col_dim_name = col_dim_name
self.attr_name = 'value'
# ----------------------------------------------------------------
def from_matrix(self, matrix, row_names, col_names) -> None:
"""
Imports a matrix -- nominally scipy.sparse.csr_matrix or numpy.ndarray -- into a TileDB
array which is used for X, obsp members, and varp members.
"""
if self.verbose:
s = util.get_start_stamp()
print(f"{self.indent}START WRITING {self.uri}")
if self.exists():
if self.verbose:
print(f"{self.indent}Re-using existing array {self.uri}")
else:
self.create_empty_array(matrix_dtype=matrix.dtype)
self.ingest_data(matrix, row_names, col_names)
if self.verbose:
print(util.format_elapsed(s, f"{self.indent}FINISH WRITING {self.uri}"))
# ----------------------------------------------------------------
def create_empty_array(self, matrix_dtype: np.dtype) -> None:
"""
Create a TileDB 2D sparse array with string dimensions and a single attribute.
"""
level = self.soma_options.string_dim_zstd_level
dom = tiledb.Domain(
tiledb.Dim(name=self.row_dim_name, domain=(None, None), dtype="ascii", filters=[tiledb.RleFilter()]),
tiledb.Dim(name=self.col_dim_name, domain=(None, None), dtype="ascii", filters=[tiledb.ZstdFilter(level=level)]),
ctx=self.ctx
)
att = tiledb.Attr(self.attr_name, dtype=matrix_dtype, filters=[tiledb.ZstdFilter()], ctx=self.ctx)
sch = tiledb.ArraySchema(
domain=dom,
attrs=(att,),
sparse=True,
allows_duplicates=True,
offsets_filters=[tiledb.DoubleDeltaFilter(), tiledb.BitWidthReductionFilter(), tiledb.ZstdFilter()],
capacity=self.soma_options.X_capacity,
cell_order=self.soma_options.X_cell_order,
tile_order=self.soma_options.X_tile_order,
ctx=self.ctx
)
tiledb.Array.create(self.uri, sch, ctx=self.ctx)
# ----------------------------------------------------------------
def ingest_data(self, matrix, row_names, col_names) -> None:
# TODO: add chunked support for CSC
if isinstance(matrix, scipy.sparse._csr.csr_matrix) and self.soma_options.write_X_chunked_if_csr:
self.ingest_data_rows_chunked(matrix, row_names, col_names)
else:
self.ingest_data_whole(matrix, row_names, col_names)
# ----------------------------------------------------------------
def ingest_data_whole(self, matrix, row_names, col_names) -> None:
"""
Convert ndarray/(csr|csc)matrix to coo_matrix and ingest into TileDB.
:param matrix: Matrix-like object coercible to a scipy coo_matrix.
:param row_names: List of row names.
:param col_names: List of column names.
"""
assert len(row_names) == matrix.shape[0]
assert len(col_names) == matrix.shape[1]
mat_coo = scipy.sparse.coo_matrix(matrix)
d0 = row_names[mat_coo.row]
d1 = col_names[mat_coo.col]
with tiledb.open(self.uri, mode="w", ctx=self.ctx) as A:
A[d0, d1] = mat_coo.data
# ----------------------------------------------------------------
# Example: suppose this 4x3 is to be written in two chunks of two rows each
# but written in sorted order.
#
# Original Sorted Permutation
# data row names
#
# X Y Z
# C 0 1 2 A 1
# A 4 0 5 B 2
# B 7 0 0 C 0
# D 0 8 9 D 3
#
# First chunk:
# * Row indices 0,1 map to permutation indices 1,2
# * i,i2 are 0,2
# * chunk_coo is original matrix rows 1,2
# * chunk_coo.row is [0,1]
# * chunk_coo.row + i is [0,1]
# * sorted_row_names: ['A', 'B']
#
# Second chunk:
# * Row indices 2,3 map to permutation indices 0,3
# * i,i2 are 2,4
# * chunk_coo is original matrix rows 0,3
# * chunk_coo.row is [0,1]
# * chunk_coo.row + i is [2,3]
# * sorted_row_names: ['C', 'D']
#
# See README-csr-ingest.md for important information of using this ingestor.
# ----------------------------------------------------------------
def ingest_data_rows_chunked(self, matrix, row_names, col_names) -> None:
"""
Convert csr_matrix to coo_matrix chunkwise and ingest into TileDB.
:param uri: TileDB URI of the array to be written.
:param matrix: csr_matrix.
:param row_names: List of row names.
:param col_names: List of column names.
"""
assert len(row_names) == matrix.shape[0]
assert len(col_names) == matrix.shape[1]
# Sort the row names so we can write chunks indexed by sorted string keys. This will lead
# to efficient TileDB fragments in the sparse array indexed by these string keys.
#
# Key note: only the _obs labels_ are being sorted, and along with them come permutation
# indices for accessing the CSR matrix via cursor-indirection -- e.g. csr[28] is accessed as
# with csr[permuation[28]] -- the CSR matrix itself isn't sorted in bulk.
sorted_row_names, permutation = util.
|
[
"get_start_stamp",
"format_elapsed",
"find_csr_chunk_size",
"Optional",
"get_sort_and_permutation"
] |
ach
# but written in sorted order.
#
# Original Sorted Permutation
# data row names
#
# X Y Z
# C 0 1 2 A 1
# A 4 0 5 B 2
# B 7 0 0 C 0
# D 0 8 9 D 3
#
# First chunk:
# * Row indices 0,1 map to permutation indices 1,2
# * i,i2 are 0,2
# * chunk_coo is original matrix rows 1,2
# * chunk_coo.row is [0,1]
# * chunk_coo.row + i is [0,1]
# * sorted_row_names: ['A', 'B']
#
# Second chunk:
# * Row indices 2,3 map to permutation indices 0,3
# * i,i2 are 2,4
# * chunk_coo is original matrix rows 0,3
# * chunk_coo.row is [0,1]
# * chunk_coo.row + i is [2,3]
# * sorted_row_names: ['C', 'D']
#
# See README-csr-ingest.md for important information of using this ingestor.
# ----------------------------------------------------------------
def ingest_data_rows_chunked(self, matrix, row_names, col_names) -> None:
"""
Convert csr_matrix to coo_matrix chunkwise and ingest into TileDB.
:param uri: TileDB URI of the array to be written.
:param matrix: csr_matrix.
:param row_names: List of row names.
:param col_names: List of column names.
"""
assert len(row_names) == matrix.shape[0]
assert len(col_names) == matrix.shape[1]
# Sort the row names so we can write chunks indexed by sorted string keys. This will lead
# to efficient TileDB fragments in the sparse array indexed by these string keys.
#
# Key note: only the _obs labels_ are being sorted, and along with them come permutation
# indices for accessing the CSR matrix via cursor-indirection -- e.g. csr[28] is accessed as
# with csr[permuation[28]] -- the CSR matrix itself isn't sorted in bulk.
sorted_row_names, permutation = util.
| 397
|
444
| 17
| 6,605
|
single-cell-data__tiledb-soma
|
0a9b47f0d37d4e309d85897423bcea947086b39a
|
apis/python/src/tiledbsc/assay_matrix.py
|
inproject
|
get_start_stamp
| true
|
function
| 6
| 12
| false
| false
|
[
"get_start_stamp",
"format_elapsed",
"find_csr_chunk_size",
"get_sort_and_permutation",
"Optional",
"_to_tiledb_supported_array_type"
] |
[
{
"name": "ad",
"type": "module"
},
{
"name": "find_csr_chunk_size",
"type": "function"
},
{
"name": "format_elapsed",
"type": "function"
},
{
"name": "get_sort_and_permutation",
"type": "function"
},
{
"name": "get_start_stamp",
"type": "function"
},
{
"name": "numpy",
"type": "module"
},
{
"name": "Optional",
"type": "class"
},
{
"name": "pd",
"type": "module"
},
{
"name": "scipy",
"type": "module"
},
{
"name": "tiledb",
"type": "module"
},
{
"name": "time",
"type": "module"
},
{
"name": "_to_tiledb_supported_array_type",
"type": "function"
},
{
"name": "__doc__",
"type": "instance"
},
{
"name": "__file__",
"type": "instance"
},
{
"name": "__name__",
"type": "instance"
},
{
"name": "__package__",
"type": "instance"
}
] |
import tiledb
from .soma_options import SOMAOptions
from .tiledb_array import TileDBArray
from .tiledb_group import TileDBGroup
from .tiledb_object import TileDBObject
import tiledbsc.util as util
import scipy
import numpy as np
from typing import Optional
class AssayMatrix(TileDBArray):
"""
Wraps a TileDB sparse array with two string dimensions.
Used for X, obsp members, and varp members.
"""
row_dim_name: str # obs_id for X, obs_id_i for obsp; var_id_i for varp
col_dim_name: str # var_id for X, obs_id_j for obsp; var_id_j for varp
attr_name: str
# ----------------------------------------------------------------
def __init__(
self,
uri: str,
name: str,
row_dim_name: str,
col_dim_name: str,
parent: Optional[TileDBGroup] = None,
):
"""
See the TileDBObject constructor.
"""
super().__init__(uri=uri, name=name, parent=parent)
self.row_dim_name = row_dim_name
self.col_dim_name = col_dim_name
self.attr_name = 'value'
# ----------------------------------------------------------------
def from_matrix(self, matrix, row_names, col_names) -> None:
"""
Imports a matrix -- nominally scipy.sparse.csr_matrix or numpy.ndarray -- into a TileDB
array which is used for X, obsp members, and varp members.
"""
if self.verbose:
s = util.get_start_stamp()
print(f"{self.indent}START WRITING {self.uri}")
if self.exists():
if self.verbose:
print(f"{self.indent}Re-using existing array {self.uri}")
else:
self.create_empty_array(matrix_dtype=matrix.dtype)
self.ingest_data(matrix, row_names, col_names)
if self.verbose:
print(util.format_elapsed(s, f"{self.indent}FINISH WRITING {self.uri}"))
# ----------------------------------------------------------------
def create_empty_array(self, matrix_dtype: np.dtype) -> None:
"""
Create a TileDB 2D sparse array with string dimensions and a single attribute.
"""
level = self.soma_options.string_dim_zstd_level
dom = tiledb.Domain(
tiledb.Dim(name=self.row_dim_name, domain=(None, None), dtype="ascii", filters=[tiledb.RleFilter()]),
tiledb.Dim(name=self.col_dim_name, domain=(None, None), dtype="ascii", filters=[tiledb.ZstdFilter(level=level)]),
ctx=self.ctx
)
att = tiledb.Attr(self.attr_name, dtype=matrix_dtype, filters=[tiledb.ZstdFilter()], ctx=self.ctx)
sch = tiledb.ArraySchema(
domain=dom,
attrs=(att,),
sparse=True,
allows_duplicates=True,
offsets_filters=[tiledb.DoubleDeltaFilter(), tiledb.BitWidthReductionFilter(), tiledb.ZstdFilter()],
capacity=self.soma_options.X_capacity,
cell_order=self.soma_options.X_cell_order,
tile_order=self.soma_options.X_tile_order,
ctx=self.ctx
)
tiledb.Array.create(self.uri, sch, ctx=self.ctx)
# ----------------------------------------------------------------
def ingest_data(self, matrix, row_names, col_names) -> None:
# TODO: add chunked support for CSC
if isinstance(matrix, scipy.sparse._csr.csr_matrix) and self.soma_options.write_X_chunked_if_csr:
self.ingest_data_rows_chunked(matrix, row_names, col_names)
else:
self.ingest_data_whole(matrix, row_names, col_names)
# ----------------------------------------------------------------
def ingest_data_whole(self, matrix, row_names, col_names) -> None:
"""
Convert ndarray/(csr|csc)matrix to coo_matrix and ingest into TileDB.
:param matrix: Matrix-like object coercible to a scipy coo_matrix.
:param row_names: List of row names.
:param col_names: List of column names.
"""
assert len(row_names) == matrix.shape[0]
assert len(col_names) == matrix.shape[1]
mat_coo = scipy.sparse.coo_matrix(matrix)
d0 = row_names[mat_coo.row]
d1 = col_names[mat_coo.col]
with tiledb.open(self.uri, mode="w", ctx=self.ctx) as A:
A[d0, d1] = mat_coo.data
# ----------------------------------------------------------------
# Example: suppose this 4x3 is to be written in two chunks of two rows each
# but written in sorted order.
#
# Original Sorted Permutation
# data row names
#
# X Y Z
# C 0 1 2 A 1
# A 4 0 5 B 2
# B 7 0 0 C 0
# D 0 8 9 D 3
#
# First chunk:
# * Row indices 0,1 map to permutation indices 1,2
# * i,i2 are 0,2
# * chunk_coo is original matrix rows 1,2
# * chunk_coo.row is [0,1]
# * chunk_coo.row + i is [0,1]
# * sorted_row_names: ['A', 'B']
#
# Second chunk:
# * Row indices 2,3 map to permutation indices 0,3
# * i,i2 are 2,4
# * chunk_coo is original matrix rows 0,3
# * chunk_coo.row is [0,1]
# * chunk_coo.row + i is [2,3]
# * sorted_row_names: ['C', 'D']
#
# See README-csr-ingest.md for important information of using this ingestor.
# ----------------------------------------------------------------
def ingest_data_rows_chunked(self, matrix, row_names, col_names) -> None:
"""
Convert csr_matrix to coo_matrix chunkwise and ingest into TileDB.
:param uri: TileDB URI of the array to be written.
:param matrix: csr_matrix.
:param row_names: List of row names.
:param col_names: List of column names.
"""
assert len(row_names) == matrix.shape[0]
assert len(col_names) == matrix.shape[1]
# Sort the row names so we can write chunks indexed by sorted string keys. This will lead
# to efficient TileDB fragments in the sparse array indexed by these string keys.
#
# Key note: only the _obs labels_ are being sorted, and along with them come permutation
# indices for accessing the CSR matrix via cursor-indirection -- e.g. csr[28] is accessed as
# with csr[permuation[28]] -- the CSR matrix itself isn't sorted in bulk.
sorted_row_names, permutation = util.get_sort_and_permutation(list(row_names))
# Using numpy we can index this with a list of indices, which a plain Python list doesn't support.
sorted_row_names = np.asarray(sorted_row_names)
s = util.
|
[
"get_start_stamp",
"format_elapsed",
"find_csr_chunk_size",
"get_sort_and_permutation",
"Optional"
] |
0
# D 0 8 9 D 3
#
# First chunk:
# * Row indices 0,1 map to permutation indices 1,2
# * i,i2 are 0,2
# * chunk_coo is original matrix rows 1,2
# * chunk_coo.row is [0,1]
# * chunk_coo.row + i is [0,1]
# * sorted_row_names: ['A', 'B']
#
# Second chunk:
# * Row indices 2,3 map to permutation indices 0,3
# * i,i2 are 2,4
# * chunk_coo is original matrix rows 0,3
# * chunk_coo.row is [0,1]
# * chunk_coo.row + i is [2,3]
# * sorted_row_names: ['C', 'D']
#
# See README-csr-ingest.md for important information of using this ingestor.
# ----------------------------------------------------------------
def ingest_data_rows_chunked(self, matrix, row_names, col_names) -> None:
"""
Convert csr_matrix to coo_matrix chunkwise and ingest into TileDB.
:param uri: TileDB URI of the array to be written.
:param matrix: csr_matrix.
:param row_names: List of row names.
:param col_names: List of column names.
"""
assert len(row_names) == matrix.shape[0]
assert len(col_names) == matrix.shape[1]
# Sort the row names so we can write chunks indexed by sorted string keys. This will lead
# to efficient TileDB fragments in the sparse array indexed by these string keys.
#
# Key note: only the _obs labels_ are being sorted, and along with them come permutation
# indices for accessing the CSR matrix via cursor-indirection -- e.g. csr[28] is accessed as
# with csr[permuation[28]] -- the CSR matrix itself isn't sorted in bulk.
sorted_row_names, permutation = util.get_sort_and_permutation(list(row_names))
# Using numpy we can index this with a list of indices, which a plain Python list doesn't support.
sorted_row_names = np.asarray(sorted_row_names)
s = util.
| 398
|
448
| 17
| 7,001
|
single-cell-data__tiledb-soma
|
0a9b47f0d37d4e309d85897423bcea947086b39a
|
apis/python/src/tiledbsc/assay_matrix.py
|
inproject
|
find_csr_chunk_size
| true
|
function
| 6
| 12
| false
| true
|
[
"get_start_stamp",
"format_elapsed",
"get_sort_and_permutation",
"find_csr_chunk_size",
"Optional",
"_to_tiledb_supported_array_type"
] |
[
{
"name": "ad",
"type": "module"
},
{
"name": "find_csr_chunk_size",
"type": "function"
},
{
"name": "format_elapsed",
"type": "function"
},
{
"name": "get_sort_and_permutation",
"type": "function"
},
{
"name": "get_start_stamp",
"type": "function"
},
{
"name": "numpy",
"type": "module"
},
{
"name": "Optional",
"type": "class"
},
{
"name": "pd",
"type": "module"
},
{
"name": "scipy",
"type": "module"
},
{
"name": "tiledb",
"type": "module"
},
{
"name": "time",
"type": "module"
},
{
"name": "_to_tiledb_supported_array_type",
"type": "function"
},
{
"name": "__doc__",
"type": "instance"
},
{
"name": "__file__",
"type": "instance"
},
{
"name": "__name__",
"type": "instance"
},
{
"name": "__package__",
"type": "instance"
}
] |
import tiledb
from .soma_options import SOMAOptions
from .tiledb_array import TileDBArray
from .tiledb_group import TileDBGroup
from .tiledb_object import TileDBObject
import tiledbsc.util as util
import scipy
import numpy as np
from typing import Optional
class AssayMatrix(TileDBArray):
"""
Wraps a TileDB sparse array with two string dimensions.
Used for X, obsp members, and varp members.
"""
row_dim_name: str # obs_id for X, obs_id_i for obsp; var_id_i for varp
col_dim_name: str # var_id for X, obs_id_j for obsp; var_id_j for varp
attr_name: str
# ----------------------------------------------------------------
def __init__(
self,
uri: str,
name: str,
row_dim_name: str,
col_dim_name: str,
parent: Optional[TileDBGroup] = None,
):
"""
See the TileDBObject constructor.
"""
super().__init__(uri=uri, name=name, parent=parent)
self.row_dim_name = row_dim_name
self.col_dim_name = col_dim_name
self.attr_name = 'value'
# ----------------------------------------------------------------
def from_matrix(self, matrix, row_names, col_names) -> None:
"""
Imports a matrix -- nominally scipy.sparse.csr_matrix or numpy.ndarray -- into a TileDB
array which is used for X, obsp members, and varp members.
"""
if self.verbose:
s = util.get_start_stamp()
print(f"{self.indent}START WRITING {self.uri}")
if self.exists():
if self.verbose:
print(f"{self.indent}Re-using existing array {self.uri}")
else:
self.create_empty_array(matrix_dtype=matrix.dtype)
self.ingest_data(matrix, row_names, col_names)
if self.verbose:
print(util.format_elapsed(s, f"{self.indent}FINISH WRITING {self.uri}"))
# ----------------------------------------------------------------
def create_empty_array(self, matrix_dtype: np.dtype) -> None:
"""
Create a TileDB 2D sparse array with string dimensions and a single attribute.
"""
level = self.soma_options.string_dim_zstd_level
dom = tiledb.Domain(
tiledb.Dim(name=self.row_dim_name, domain=(None, None), dtype="ascii", filters=[tiledb.RleFilter()]),
tiledb.Dim(name=self.col_dim_name, domain=(None, None), dtype="ascii", filters=[tiledb.ZstdFilter(level=level)]),
ctx=self.ctx
)
att = tiledb.Attr(self.attr_name, dtype=matrix_dtype, filters=[tiledb.ZstdFilter()], ctx=self.ctx)
sch = tiledb.ArraySchema(
domain=dom,
attrs=(att,),
sparse=True,
allows_duplicates=True,
offsets_filters=[tiledb.DoubleDeltaFilter(), tiledb.BitWidthReductionFilter(), tiledb.ZstdFilter()],
capacity=self.soma_options.X_capacity,
cell_order=self.soma_options.X_cell_order,
tile_order=self.soma_options.X_tile_order,
ctx=self.ctx
)
tiledb.Array.create(self.uri, sch, ctx=self.ctx)
# ----------------------------------------------------------------
def ingest_data(self, matrix, row_names, col_names) -> None:
# TODO: add chunked support for CSC
if isinstance(matrix, scipy.sparse._csr.csr_matrix) and self.soma_options.write_X_chunked_if_csr:
self.ingest_data_rows_chunked(matrix, row_names, col_names)
else:
self.ingest_data_whole(matrix, row_names, col_names)
# ----------------------------------------------------------------
def ingest_data_whole(self, matrix, row_names, col_names) -> None:
"""
Convert ndarray/(csr|csc)matrix to coo_matrix and ingest into TileDB.
:param matrix: Matrix-like object coercible to a scipy coo_matrix.
:param row_names: List of row names.
:param col_names: List of column names.
"""
assert len(row_names) == matrix.shape[0]
assert len(col_names) == matrix.shape[1]
mat_coo = scipy.sparse.coo_matrix(matrix)
d0 = row_names[mat_coo.row]
d1 = col_names[mat_coo.col]
with tiledb.open(self.uri, mode="w", ctx=self.ctx) as A:
A[d0, d1] = mat_coo.data
# ----------------------------------------------------------------
# Example: suppose this 4x3 is to be written in two chunks of two rows each
# but written in sorted order.
#
# Original Sorted Permutation
# data row names
#
# X Y Z
# C 0 1 2 A 1
# A 4 0 5 B 2
# B 7 0 0 C 0
# D 0 8 9 D 3
#
# First chunk:
# * Row indices 0,1 map to permutation indices 1,2
# * i,i2 are 0,2
# * chunk_coo is original matrix rows 1,2
# * chunk_coo.row is [0,1]
# * chunk_coo.row + i is [0,1]
# * sorted_row_names: ['A', 'B']
#
# Second chunk:
# * Row indices 2,3 map to permutation indices 0,3
# * i,i2 are 2,4
# * chunk_coo is original matrix rows 0,3
# * chunk_coo.row is [0,1]
# * chunk_coo.row + i is [2,3]
# * sorted_row_names: ['C', 'D']
#
# See README-csr-ingest.md for important information of using this ingestor.
# ----------------------------------------------------------------
def ingest_data_rows_chunked(self, matrix, row_names, col_names) -> None:
"""
Convert csr_matrix to coo_matrix chunkwise and ingest into TileDB.
:param uri: TileDB URI of the array to be written.
:param matrix: csr_matrix.
:param row_names: List of row names.
:param col_names: List of column names.
"""
assert len(row_names) == matrix.shape[0]
assert len(col_names) == matrix.shape[1]
# Sort the row names so we can write chunks indexed by sorted string keys. This will lead
# to efficient TileDB fragments in the sparse array indexed by these string keys.
#
# Key note: only the _obs labels_ are being sorted, and along with them come permutation
# indices for accessing the CSR matrix via cursor-indirection -- e.g. csr[28] is accessed as
# with csr[permuation[28]] -- the CSR matrix itself isn't sorted in bulk.
sorted_row_names, permutation = util.get_sort_and_permutation(list(row_names))
# Using numpy we can index this with a list of indices, which a plain Python list doesn't support.
sorted_row_names = np.asarray(sorted_row_names)
s = util.get_start_stamp()
if self.verbose:
print(f"{self.indent}START __ingest_coo_data_string_dims_rows_chunked")
with tiledb.open(self.uri, mode="w") as A:
nrow = len(sorted_row_names)
i = 0
while i < nrow:
# Find a number of CSR rows which will result in a desired nnz for the chunk.
chunk_size = util.
|
[
"get_start_stamp",
"format_elapsed",
"get_sort_and_permutation",
"find_csr_chunk_size",
"Optional"
] |
2,4
# * chunk_coo is original matrix rows 0,3
# * chunk_coo.row is [0,1]
# * chunk_coo.row + i is [2,3]
# * sorted_row_names: ['C', 'D']
#
# See README-csr-ingest.md for important information of using this ingestor.
# ----------------------------------------------------------------
def ingest_data_rows_chunked(self, matrix, row_names, col_names) -> None:
"""
Convert csr_matrix to coo_matrix chunkwise and ingest into TileDB.
:param uri: TileDB URI of the array to be written.
:param matrix: csr_matrix.
:param row_names: List of row names.
:param col_names: List of column names.
"""
assert len(row_names) == matrix.shape[0]
assert len(col_names) == matrix.shape[1]
# Sort the row names so we can write chunks indexed by sorted string keys. This will lead
# to efficient TileDB fragments in the sparse array indexed by these string keys.
#
# Key note: only the _obs labels_ are being sorted, and along with them come permutation
# indices for accessing the CSR matrix via cursor-indirection -- e.g. csr[28] is accessed as
# with csr[permuation[28]] -- the CSR matrix itself isn't sorted in bulk.
sorted_row_names, permutation = util.get_sort_and_permutation(list(row_names))
# Using numpy we can index this with a list of indices, which a plain Python list doesn't support.
sorted_row_names = np.asarray(sorted_row_names)
s = util.get_start_stamp()
if self.verbose:
print(f"{self.indent}START __ingest_coo_data_string_dims_rows_chunked")
with tiledb.open(self.uri, mode="w") as A:
nrow = len(sorted_row_names)
i = 0
while i < nrow:
# Find a number of CSR rows which will result in a desired nnz for the chunk.
chunk_size = util.
| 402
|
449
| 17
| 7,050
|
single-cell-data__tiledb-soma
|
0a9b47f0d37d4e309d85897423bcea947086b39a
|
apis/python/src/tiledbsc/assay_matrix.py
|
inproject
|
soma_options
| true
|
statement
| 17
| 17
| false
| false
|
[
"to_csr_matrix",
"from_matrix",
"row_dim_name",
"ingest_data",
"col_dim_name",
"__init__",
"attr_name",
"create_empty_array",
"ingest_data_rows_chunked",
"ingest_data_whole",
"ctx",
"exists",
"indent",
"name",
"object_type",
"soma_options",
"uri",
"verbose",
"__annotations__",
"__class__",
"__delattr__",
"__dict__",
"__dir__",
"__eq__",
"__format__",
"__getattribute__",
"__hash__",
"__init_subclass__",
"__ne__",
"__new__",
"__reduce__",
"__reduce_ex__",
"__repr__",
"__setattr__",
"__sizeof__",
"__str__",
"__subclasshook__",
"__doc__",
"__module__"
] |
[
{
"name": "attr_name",
"type": "statement"
},
{
"name": "col_dim_name",
"type": "statement"
},
{
"name": "create_empty_array",
"type": "function"
},
{
"name": "ctx",
"type": "statement"
},
{
"name": "exists",
"type": "function"
},
{
"name": "from_matrix",
"type": "function"
},
{
"name": "indent",
"type": "statement"
},
{
"name": "ingest_data",
"type": "function"
},
{
"name": "ingest_data_rows_chunked",
"type": "function"
},
{
"name": "ingest_data_whole",
"type": "function"
},
{
"name": "name",
"type": "statement"
},
{
"name": "object_type",
"type": "function"
},
{
"name": "row_dim_name",
"type": "statement"
},
{
"name": "soma_options",
"type": "statement"
},
{
"name": "to_csr_matrix",
"type": "function"
},
{
"name": "uri",
"type": "statement"
},
{
"name": "verbose",
"type": "statement"
},
{
"name": "__annotations__",
"type": "statement"
},
{
"name": "__class__",
"type": "property"
},
{
"name": "__delattr__",
"type": "function"
},
{
"name": "__dict__",
"type": "statement"
},
{
"name": "__dir__",
"type": "function"
},
{
"name": "__doc__",
"type": "statement"
},
{
"name": "__eq__",
"type": "function"
},
{
"name": "__format__",
"type": "function"
},
{
"name": "__getattribute__",
"type": "function"
},
{
"name": "__hash__",
"type": "function"
},
{
"name": "__init__",
"type": "function"
},
{
"name": "__init_subclass__",
"type": "function"
},
{
"name": "__module__",
"type": "statement"
},
{
"name": "__ne__",
"type": "function"
},
{
"name": "__new__",
"type": "function"
},
{
"name": "__reduce__",
"type": "function"
},
{
"name": "__reduce_ex__",
"type": "function"
},
{
"name": "__repr__",
"type": "function"
},
{
"name": "__setattr__",
"type": "function"
},
{
"name": "__sizeof__",
"type": "function"
},
{
"name": "__slots__",
"type": "statement"
},
{
"name": "__str__",
"type": "function"
}
] |
import tiledb
from .soma_options import SOMAOptions
from .tiledb_array import TileDBArray
from .tiledb_group import TileDBGroup
from .tiledb_object import TileDBObject
import tiledbsc.util as util
import scipy
import numpy as np
from typing import Optional
class AssayMatrix(TileDBArray):
"""
Wraps a TileDB sparse array with two string dimensions.
Used for X, obsp members, and varp members.
"""
row_dim_name: str # obs_id for X, obs_id_i for obsp; var_id_i for varp
col_dim_name: str # var_id for X, obs_id_j for obsp; var_id_j for varp
attr_name: str
# ----------------------------------------------------------------
def __init__(
self,
uri: str,
name: str,
row_dim_name: str,
col_dim_name: str,
parent: Optional[TileDBGroup] = None,
):
"""
See the TileDBObject constructor.
"""
super().__init__(uri=uri, name=name, parent=parent)
self.row_dim_name = row_dim_name
self.col_dim_name = col_dim_name
self.attr_name = 'value'
# ----------------------------------------------------------------
def from_matrix(self, matrix, row_names, col_names) -> None:
"""
Imports a matrix -- nominally scipy.sparse.csr_matrix or numpy.ndarray -- into a TileDB
array which is used for X, obsp members, and varp members.
"""
if self.verbose:
s = util.get_start_stamp()
print(f"{self.indent}START WRITING {self.uri}")
if self.exists():
if self.verbose:
print(f"{self.indent}Re-using existing array {self.uri}")
else:
self.create_empty_array(matrix_dtype=matrix.dtype)
self.ingest_data(matrix, row_names, col_names)
if self.verbose:
print(util.format_elapsed(s, f"{self.indent}FINISH WRITING {self.uri}"))
# ----------------------------------------------------------------
def create_empty_array(self, matrix_dtype: np.dtype) -> None:
"""
Create a TileDB 2D sparse array with string dimensions and a single attribute.
"""
level = self.soma_options.string_dim_zstd_level
dom = tiledb.Domain(
tiledb.Dim(name=self.row_dim_name, domain=(None, None), dtype="ascii", filters=[tiledb.RleFilter()]),
tiledb.Dim(name=self.col_dim_name, domain=(None, None), dtype="ascii", filters=[tiledb.ZstdFilter(level=level)]),
ctx=self.ctx
)
att = tiledb.Attr(self.attr_name, dtype=matrix_dtype, filters=[tiledb.ZstdFilter()], ctx=self.ctx)
sch = tiledb.ArraySchema(
domain=dom,
attrs=(att,),
sparse=True,
allows_duplicates=True,
offsets_filters=[tiledb.DoubleDeltaFilter(), tiledb.BitWidthReductionFilter(), tiledb.ZstdFilter()],
capacity=self.soma_options.X_capacity,
cell_order=self.soma_options.X_cell_order,
tile_order=self.soma_options.X_tile_order,
ctx=self.ctx
)
tiledb.Array.create(self.uri, sch, ctx=self.ctx)
# ----------------------------------------------------------------
def ingest_data(self, matrix, row_names, col_names) -> None:
# TODO: add chunked support for CSC
if isinstance(matrix, scipy.sparse._csr.csr_matrix) and self.soma_options.write_X_chunked_if_csr:
self.ingest_data_rows_chunked(matrix, row_names, col_names)
else:
self.ingest_data_whole(matrix, row_names, col_names)
# ----------------------------------------------------------------
def ingest_data_whole(self, matrix, row_names, col_names) -> None:
"""
Convert ndarray/(csr|csc)matrix to coo_matrix and ingest into TileDB.
:param matrix: Matrix-like object coercible to a scipy coo_matrix.
:param row_names: List of row names.
:param col_names: List of column names.
"""
assert len(row_names) == matrix.shape[0]
assert len(col_names) == matrix.shape[1]
mat_coo = scipy.sparse.coo_matrix(matrix)
d0 = row_names[mat_coo.row]
d1 = col_names[mat_coo.col]
with tiledb.open(self.uri, mode="w", ctx=self.ctx) as A:
A[d0, d1] = mat_coo.data
# ----------------------------------------------------------------
# Example: suppose this 4x3 is to be written in two chunks of two rows each
# but written in sorted order.
#
# Original Sorted Permutation
# data row names
#
# X Y Z
# C 0 1 2 A 1
# A 4 0 5 B 2
# B 7 0 0 C 0
# D 0 8 9 D 3
#
# First chunk:
# * Row indices 0,1 map to permutation indices 1,2
# * i,i2 are 0,2
# * chunk_coo is original matrix rows 1,2
# * chunk_coo.row is [0,1]
# * chunk_coo.row + i is [0,1]
# * sorted_row_names: ['A', 'B']
#
# Second chunk:
# * Row indices 2,3 map to permutation indices 0,3
# * i,i2 are 2,4
# * chunk_coo is original matrix rows 0,3
# * chunk_coo.row is [0,1]
# * chunk_coo.row + i is [2,3]
# * sorted_row_names: ['C', 'D']
#
# See README-csr-ingest.md for important information of using this ingestor.
# ----------------------------------------------------------------
def ingest_data_rows_chunked(self, matrix, row_names, col_names) -> None:
"""
Convert csr_matrix to coo_matrix chunkwise and ingest into TileDB.
:param uri: TileDB URI of the array to be written.
:param matrix: csr_matrix.
:param row_names: List of row names.
:param col_names: List of column names.
"""
assert len(row_names) == matrix.shape[0]
assert len(col_names) == matrix.shape[1]
# Sort the row names so we can write chunks indexed by sorted string keys. This will lead
# to efficient TileDB fragments in the sparse array indexed by these string keys.
#
# Key note: only the _obs labels_ are being sorted, and along with them come permutation
# indices for accessing the CSR matrix via cursor-indirection -- e.g. csr[28] is accessed as
# with csr[permuation[28]] -- the CSR matrix itself isn't sorted in bulk.
sorted_row_names, permutation = util.get_sort_and_permutation(list(row_names))
# Using numpy we can index this with a list of indices, which a plain Python list doesn't support.
sorted_row_names = np.asarray(sorted_row_names)
s = util.get_start_stamp()
if self.verbose:
print(f"{self.indent}START __ingest_coo_data_string_dims_rows_chunked")
with tiledb.open(self.uri, mode="w") as A:
nrow = len(sorted_row_names)
i = 0
while i < nrow:
# Find a number of CSR rows which will result in a desired nnz for the chunk.
chunk_size = util.find_csr_chunk_size(matrix, permutation, i, self.
|
[
"to_csr_matrix",
"from_matrix",
"row_dim_name",
"ingest_data",
"col_dim_name",
"attr_name",
"create_empty_array",
"ingest_data_rows_chunked",
"ingest_data_whole",
"ctx",
"exists",
"indent",
"name",
"object_type",
"soma_options",
"uri",
"verbose"
] |
# * chunk_coo.row is [0,1]
# * chunk_coo.row + i is [2,3]
# * sorted_row_names: ['C', 'D']
#
# See README-csr-ingest.md for important information of using this ingestor.
# ----------------------------------------------------------------
def ingest_data_rows_chunked(self, matrix, row_names, col_names) -> None:
"""
Convert csr_matrix to coo_matrix chunkwise and ingest into TileDB.
:param uri: TileDB URI of the array to be written.
:param matrix: csr_matrix.
:param row_names: List of row names.
:param col_names: List of column names.
"""
assert len(row_names) == matrix.shape[0]
assert len(col_names) == matrix.shape[1]
# Sort the row names so we can write chunks indexed by sorted string keys. This will lead
# to efficient TileDB fragments in the sparse array indexed by these string keys.
#
# Key note: only the _obs labels_ are being sorted, and along with them come permutation
# indices for accessing the CSR matrix via cursor-indirection -- e.g. csr[28] is accessed as
# with csr[permuation[28]] -- the CSR matrix itself isn't sorted in bulk.
sorted_row_names, permutation = util.get_sort_and_permutation(list(row_names))
# Using numpy we can index this with a list of indices, which a plain Python list doesn't support.
sorted_row_names = np.asarray(sorted_row_names)
s = util.get_start_stamp()
if self.verbose:
print(f"{self.indent}START __ingest_coo_data_string_dims_rows_chunked")
with tiledb.open(self.uri, mode="w") as A:
nrow = len(sorted_row_names)
i = 0
while i < nrow:
# Find a number of CSR rows which will result in a desired nnz for the chunk.
chunk_size = util.find_csr_chunk_size(matrix, permutation, i, self.
| 403
|
450
| 17
| 7,063
|
single-cell-data__tiledb-soma
|
0a9b47f0d37d4e309d85897423bcea947086b39a
|
apis/python/src/tiledbsc/assay_matrix.py
|
inproject
|
goal_chunk_nnz
| true
|
statement
| 8
| 8
| false
| true
|
[
"goal_chunk_nnz",
"write_X_chunked_if_csr",
"X_capacity",
"X_cell_order",
"X_tile_order",
"obs_extent",
"string_dim_zstd_level",
"var_extent",
"__init__",
"__annotations__",
"__class__",
"__delattr__",
"__dict__",
"__dir__",
"__eq__",
"__format__",
"__getattribute__",
"__hash__",
"__init_subclass__",
"__ne__",
"__new__",
"__reduce__",
"__reduce_ex__",
"__repr__",
"__setattr__",
"__sizeof__",
"__str__",
"__subclasshook__",
"__doc__",
"__module__"
] |
[
{
"name": "goal_chunk_nnz",
"type": "statement"
},
{
"name": "obs_extent",
"type": "statement"
},
{
"name": "string_dim_zstd_level",
"type": "statement"
},
{
"name": "var_extent",
"type": "statement"
},
{
"name": "write_X_chunked_if_csr",
"type": "statement"
},
{
"name": "X_capacity",
"type": "statement"
},
{
"name": "X_cell_order",
"type": "statement"
},
{
"name": "X_tile_order",
"type": "statement"
},
{
"name": "__annotations__",
"type": "statement"
},
{
"name": "__bool__",
"type": "instance"
},
{
"name": "__class__",
"type": "property"
},
{
"name": "__delattr__",
"type": "function"
},
{
"name": "__dict__",
"type": "statement"
},
{
"name": "__dir__",
"type": "function"
},
{
"name": "__doc__",
"type": "statement"
},
{
"name": "__eq__",
"type": "function"
},
{
"name": "__format__",
"type": "function"
},
{
"name": "__ge__",
"type": "instance"
},
{
"name": "__getattribute__",
"type": "function"
},
{
"name": "__gt__",
"type": "instance"
},
{
"name": "__hash__",
"type": "function"
},
{
"name": "__init__",
"type": "function"
},
{
"name": "__init_subclass__",
"type": "function"
},
{
"name": "__le__",
"type": "instance"
},
{
"name": "__lt__",
"type": "instance"
},
{
"name": "__module__",
"type": "statement"
},
{
"name": "__ne__",
"type": "function"
},
{
"name": "__new__",
"type": "function"
},
{
"name": "__reduce__",
"type": "function"
},
{
"name": "__reduce_ex__",
"type": "function"
},
{
"name": "__repr__",
"type": "function"
},
{
"name": "__setattr__",
"type": "function"
},
{
"name": "__sizeof__",
"type": "function"
},
{
"name": "__slots__",
"type": "statement"
},
{
"name": "__str__",
"type": "function"
},
{
"name": "__subclasshook__",
"type": "function"
}
] |
import tiledb
from .soma_options import SOMAOptions
from .tiledb_array import TileDBArray
from .tiledb_group import TileDBGroup
from .tiledb_object import TileDBObject
import tiledbsc.util as util
import scipy
import numpy as np
from typing import Optional
class AssayMatrix(TileDBArray):
"""
Wraps a TileDB sparse array with two string dimensions.
Used for X, obsp members, and varp members.
"""
row_dim_name: str # obs_id for X, obs_id_i for obsp; var_id_i for varp
col_dim_name: str # var_id for X, obs_id_j for obsp; var_id_j for varp
attr_name: str
# ----------------------------------------------------------------
def __init__(
self,
uri: str,
name: str,
row_dim_name: str,
col_dim_name: str,
parent: Optional[TileDBGroup] = None,
):
"""
See the TileDBObject constructor.
"""
super().__init__(uri=uri, name=name, parent=parent)
self.row_dim_name = row_dim_name
self.col_dim_name = col_dim_name
self.attr_name = 'value'
# ----------------------------------------------------------------
def from_matrix(self, matrix, row_names, col_names) -> None:
"""
Imports a matrix -- nominally scipy.sparse.csr_matrix or numpy.ndarray -- into a TileDB
array which is used for X, obsp members, and varp members.
"""
if self.verbose:
s = util.get_start_stamp()
print(f"{self.indent}START WRITING {self.uri}")
if self.exists():
if self.verbose:
print(f"{self.indent}Re-using existing array {self.uri}")
else:
self.create_empty_array(matrix_dtype=matrix.dtype)
self.ingest_data(matrix, row_names, col_names)
if self.verbose:
print(util.format_elapsed(s, f"{self.indent}FINISH WRITING {self.uri}"))
# ----------------------------------------------------------------
def create_empty_array(self, matrix_dtype: np.dtype) -> None:
"""
Create a TileDB 2D sparse array with string dimensions and a single attribute.
"""
level = self.soma_options.string_dim_zstd_level
dom = tiledb.Domain(
tiledb.Dim(name=self.row_dim_name, domain=(None, None), dtype="ascii", filters=[tiledb.RleFilter()]),
tiledb.Dim(name=self.col_dim_name, domain=(None, None), dtype="ascii", filters=[tiledb.ZstdFilter(level=level)]),
ctx=self.ctx
)
att = tiledb.Attr(self.attr_name, dtype=matrix_dtype, filters=[tiledb.ZstdFilter()], ctx=self.ctx)
sch = tiledb.ArraySchema(
domain=dom,
attrs=(att,),
sparse=True,
allows_duplicates=True,
offsets_filters=[tiledb.DoubleDeltaFilter(), tiledb.BitWidthReductionFilter(), tiledb.ZstdFilter()],
capacity=self.soma_options.X_capacity,
cell_order=self.soma_options.X_cell_order,
tile_order=self.soma_options.X_tile_order,
ctx=self.ctx
)
tiledb.Array.create(self.uri, sch, ctx=self.ctx)
# ----------------------------------------------------------------
def ingest_data(self, matrix, row_names, col_names) -> None:
# TODO: add chunked support for CSC
if isinstance(matrix, scipy.sparse._csr.csr_matrix) and self.soma_options.write_X_chunked_if_csr:
self.ingest_data_rows_chunked(matrix, row_names, col_names)
else:
self.ingest_data_whole(matrix, row_names, col_names)
# ----------------------------------------------------------------
def ingest_data_whole(self, matrix, row_names, col_names) -> None:
"""
Convert ndarray/(csr|csc)matrix to coo_matrix and ingest into TileDB.
:param matrix: Matrix-like object coercible to a scipy coo_matrix.
:param row_names: List of row names.
:param col_names: List of column names.
"""
assert len(row_names) == matrix.shape[0]
assert len(col_names) == matrix.shape[1]
mat_coo = scipy.sparse.coo_matrix(matrix)
d0 = row_names[mat_coo.row]
d1 = col_names[mat_coo.col]
with tiledb.open(self.uri, mode="w", ctx=self.ctx) as A:
A[d0, d1] = mat_coo.data
# ----------------------------------------------------------------
# Example: suppose this 4x3 is to be written in two chunks of two rows each
# but written in sorted order.
#
# Original Sorted Permutation
# data row names
#
# X Y Z
# C 0 1 2 A 1
# A 4 0 5 B 2
# B 7 0 0 C 0
# D 0 8 9 D 3
#
# First chunk:
# * Row indices 0,1 map to permutation indices 1,2
# * i,i2 are 0,2
# * chunk_coo is original matrix rows 1,2
# * chunk_coo.row is [0,1]
# * chunk_coo.row + i is [0,1]
# * sorted_row_names: ['A', 'B']
#
# Second chunk:
# * Row indices 2,3 map to permutation indices 0,3
# * i,i2 are 2,4
# * chunk_coo is original matrix rows 0,3
# * chunk_coo.row is [0,1]
# * chunk_coo.row + i is [2,3]
# * sorted_row_names: ['C', 'D']
#
# See README-csr-ingest.md for important information of using this ingestor.
# ----------------------------------------------------------------
def ingest_data_rows_chunked(self, matrix, row_names, col_names) -> None:
"""
Convert csr_matrix to coo_matrix chunkwise and ingest into TileDB.
:param uri: TileDB URI of the array to be written.
:param matrix: csr_matrix.
:param row_names: List of row names.
:param col_names: List of column names.
"""
assert len(row_names) == matrix.shape[0]
assert len(col_names) == matrix.shape[1]
# Sort the row names so we can write chunks indexed by sorted string keys. This will lead
# to efficient TileDB fragments in the sparse array indexed by these string keys.
#
# Key note: only the _obs labels_ are being sorted, and along with them come permutation
# indices for accessing the CSR matrix via cursor-indirection -- e.g. csr[28] is accessed as
# with csr[permuation[28]] -- the CSR matrix itself isn't sorted in bulk.
sorted_row_names, permutation = util.get_sort_and_permutation(list(row_names))
# Using numpy we can index this with a list of indices, which a plain Python list doesn't support.
sorted_row_names = np.asarray(sorted_row_names)
s = util.get_start_stamp()
if self.verbose:
print(f"{self.indent}START __ingest_coo_data_string_dims_rows_chunked")
with tiledb.open(self.uri, mode="w") as A:
nrow = len(sorted_row_names)
i = 0
while i < nrow:
# Find a number of CSR rows which will result in a desired nnz for the chunk.
chunk_size = util.find_csr_chunk_size(matrix, permutation, i, self.soma_options.
|
[
"goal_chunk_nnz",
"write_X_chunked_if_csr",
"X_capacity",
"X_cell_order",
"X_tile_order",
"obs_extent",
"string_dim_zstd_level",
"var_extent"
] |
k_coo.row is [0,1]
# * chunk_coo.row + i is [2,3]
# * sorted_row_names: ['C', 'D']
#
# See README-csr-ingest.md for important information of using this ingestor.
# ----------------------------------------------------------------
def ingest_data_rows_chunked(self, matrix, row_names, col_names) -> None:
"""
Convert csr_matrix to coo_matrix chunkwise and ingest into TileDB.
:param uri: TileDB URI of the array to be written.
:param matrix: csr_matrix.
:param row_names: List of row names.
:param col_names: List of column names.
"""
assert len(row_names) == matrix.shape[0]
assert len(col_names) == matrix.shape[1]
# Sort the row names so we can write chunks indexed by sorted string keys. This will lead
# to efficient TileDB fragments in the sparse array indexed by these string keys.
#
# Key note: only the _obs labels_ are being sorted, and along with them come permutation
# indices for accessing the CSR matrix via cursor-indirection -- e.g. csr[28] is accessed as
# with csr[permuation[28]] -- the CSR matrix itself isn't sorted in bulk.
sorted_row_names, permutation = util.get_sort_and_permutation(list(row_names))
# Using numpy we can index this with a list of indices, which a plain Python list doesn't support.
sorted_row_names = np.asarray(sorted_row_names)
s = util.get_start_stamp()
if self.verbose:
print(f"{self.indent}START __ingest_coo_data_string_dims_rows_chunked")
with tiledb.open(self.uri, mode="w") as A:
nrow = len(sorted_row_names)
i = 0
while i < nrow:
# Find a number of CSR rows which will result in a desired nnz for the chunk.
chunk_size = util.find_csr_chunk_size(matrix, permutation, i, self.soma_options.
| 404
|
451
| 17
| 7,258
|
single-cell-data__tiledb-soma
|
0a9b47f0d37d4e309d85897423bcea947086b39a
|
apis/python/src/tiledbsc/assay_matrix.py
|
inproject
|
get_start_stamp
| true
|
function
| 6
| 12
| false
| false
|
[
"get_start_stamp",
"find_csr_chunk_size",
"format_elapsed",
"get_sort_and_permutation",
"Optional",
"_to_tiledb_supported_array_type"
] |
[
{
"name": "ad",
"type": "module"
},
{
"name": "find_csr_chunk_size",
"type": "function"
},
{
"name": "format_elapsed",
"type": "function"
},
{
"name": "get_sort_and_permutation",
"type": "function"
},
{
"name": "get_start_stamp",
"type": "function"
},
{
"name": "numpy",
"type": "module"
},
{
"name": "Optional",
"type": "class"
},
{
"name": "pd",
"type": "module"
},
{
"name": "scipy",
"type": "module"
},
{
"name": "tiledb",
"type": "module"
},
{
"name": "time",
"type": "module"
},
{
"name": "_to_tiledb_supported_array_type",
"type": "function"
},
{
"name": "__doc__",
"type": "instance"
},
{
"name": "__file__",
"type": "instance"
},
{
"name": "__name__",
"type": "instance"
},
{
"name": "__package__",
"type": "instance"
}
] |
import tiledb
from .soma_options import SOMAOptions
from .tiledb_array import TileDBArray
from .tiledb_group import TileDBGroup
from .tiledb_object import TileDBObject
import tiledbsc.util as util
import scipy
import numpy as np
from typing import Optional
class AssayMatrix(TileDBArray):
"""
Wraps a TileDB sparse array with two string dimensions.
Used for X, obsp members, and varp members.
"""
row_dim_name: str # obs_id for X, obs_id_i for obsp; var_id_i for varp
col_dim_name: str # var_id for X, obs_id_j for obsp; var_id_j for varp
attr_name: str
# ----------------------------------------------------------------
def __init__(
self,
uri: str,
name: str,
row_dim_name: str,
col_dim_name: str,
parent: Optional[TileDBGroup] = None,
):
"""
See the TileDBObject constructor.
"""
super().__init__(uri=uri, name=name, parent=parent)
self.row_dim_name = row_dim_name
self.col_dim_name = col_dim_name
self.attr_name = 'value'
# ----------------------------------------------------------------
def from_matrix(self, matrix, row_names, col_names) -> None:
"""
Imports a matrix -- nominally scipy.sparse.csr_matrix or numpy.ndarray -- into a TileDB
array which is used for X, obsp members, and varp members.
"""
if self.verbose:
s = util.get_start_stamp()
print(f"{self.indent}START WRITING {self.uri}")
if self.exists():
if self.verbose:
print(f"{self.indent}Re-using existing array {self.uri}")
else:
self.create_empty_array(matrix_dtype=matrix.dtype)
self.ingest_data(matrix, row_names, col_names)
if self.verbose:
print(util.format_elapsed(s, f"{self.indent}FINISH WRITING {self.uri}"))
# ----------------------------------------------------------------
def create_empty_array(self, matrix_dtype: np.dtype) -> None:
"""
Create a TileDB 2D sparse array with string dimensions and a single attribute.
"""
level = self.soma_options.string_dim_zstd_level
dom = tiledb.Domain(
tiledb.Dim(name=self.row_dim_name, domain=(None, None), dtype="ascii", filters=[tiledb.RleFilter()]),
tiledb.Dim(name=self.col_dim_name, domain=(None, None), dtype="ascii", filters=[tiledb.ZstdFilter(level=level)]),
ctx=self.ctx
)
att = tiledb.Attr(self.attr_name, dtype=matrix_dtype, filters=[tiledb.ZstdFilter()], ctx=self.ctx)
sch = tiledb.ArraySchema(
domain=dom,
attrs=(att,),
sparse=True,
allows_duplicates=True,
offsets_filters=[tiledb.DoubleDeltaFilter(), tiledb.BitWidthReductionFilter(), tiledb.ZstdFilter()],
capacity=self.soma_options.X_capacity,
cell_order=self.soma_options.X_cell_order,
tile_order=self.soma_options.X_tile_order,
ctx=self.ctx
)
tiledb.Array.create(self.uri, sch, ctx=self.ctx)
# ----------------------------------------------------------------
def ingest_data(self, matrix, row_names, col_names) -> None:
# TODO: add chunked support for CSC
if isinstance(matrix, scipy.sparse._csr.csr_matrix) and self.soma_options.write_X_chunked_if_csr:
self.ingest_data_rows_chunked(matrix, row_names, col_names)
else:
self.ingest_data_whole(matrix, row_names, col_names)
# ----------------------------------------------------------------
def ingest_data_whole(self, matrix, row_names, col_names) -> None:
"""
Convert ndarray/(csr|csc)matrix to coo_matrix and ingest into TileDB.
:param matrix: Matrix-like object coercible to a scipy coo_matrix.
:param row_names: List of row names.
:param col_names: List of column names.
"""
assert len(row_names) == matrix.shape[0]
assert len(col_names) == matrix.shape[1]
mat_coo = scipy.sparse.coo_matrix(matrix)
d0 = row_names[mat_coo.row]
d1 = col_names[mat_coo.col]
with tiledb.open(self.uri, mode="w", ctx=self.ctx) as A:
A[d0, d1] = mat_coo.data
# ----------------------------------------------------------------
# Example: suppose this 4x3 is to be written in two chunks of two rows each
# but written in sorted order.
#
# Original Sorted Permutation
# data row names
#
# X Y Z
# C 0 1 2 A 1
# A 4 0 5 B 2
# B 7 0 0 C 0
# D 0 8 9 D 3
#
# First chunk:
# * Row indices 0,1 map to permutation indices 1,2
# * i,i2 are 0,2
# * chunk_coo is original matrix rows 1,2
# * chunk_coo.row is [0,1]
# * chunk_coo.row + i is [0,1]
# * sorted_row_names: ['A', 'B']
#
# Second chunk:
# * Row indices 2,3 map to permutation indices 0,3
# * i,i2 are 2,4
# * chunk_coo is original matrix rows 0,3
# * chunk_coo.row is [0,1]
# * chunk_coo.row + i is [2,3]
# * sorted_row_names: ['C', 'D']
#
# See README-csr-ingest.md for important information of using this ingestor.
# ----------------------------------------------------------------
def ingest_data_rows_chunked(self, matrix, row_names, col_names) -> None:
"""
Convert csr_matrix to coo_matrix chunkwise and ingest into TileDB.
:param uri: TileDB URI of the array to be written.
:param matrix: csr_matrix.
:param row_names: List of row names.
:param col_names: List of column names.
"""
assert len(row_names) == matrix.shape[0]
assert len(col_names) == matrix.shape[1]
# Sort the row names so we can write chunks indexed by sorted string keys. This will lead
# to efficient TileDB fragments in the sparse array indexed by these string keys.
#
# Key note: only the _obs labels_ are being sorted, and along with them come permutation
# indices for accessing the CSR matrix via cursor-indirection -- e.g. csr[28] is accessed as
# with csr[permuation[28]] -- the CSR matrix itself isn't sorted in bulk.
sorted_row_names, permutation = util.get_sort_and_permutation(list(row_names))
# Using numpy we can index this with a list of indices, which a plain Python list doesn't support.
sorted_row_names = np.asarray(sorted_row_names)
s = util.get_start_stamp()
if self.verbose:
print(f"{self.indent}START __ingest_coo_data_string_dims_rows_chunked")
with tiledb.open(self.uri, mode="w") as A:
nrow = len(sorted_row_names)
i = 0
while i < nrow:
# Find a number of CSR rows which will result in a desired nnz for the chunk.
chunk_size = util.find_csr_chunk_size(matrix, permutation, i, self.soma_options.goal_chunk_nnz)
i2 = i + chunk_size
# Convert the chunk to a COO matrix.
chunk_coo = matrix[permutation[i:i2]].tocoo()
s2 = util.
|
[
"get_start_stamp",
"find_csr_chunk_size",
"format_elapsed",
"get_sort_and_permutation",
"Optional"
] |
-----------------------------------------------------
def ingest_data_rows_chunked(self, matrix, row_names, col_names) -> None:
"""
Convert csr_matrix to coo_matrix chunkwise and ingest into TileDB.
:param uri: TileDB URI of the array to be written.
:param matrix: csr_matrix.
:param row_names: List of row names.
:param col_names: List of column names.
"""
assert len(row_names) == matrix.shape[0]
assert len(col_names) == matrix.shape[1]
# Sort the row names so we can write chunks indexed by sorted string keys. This will lead
# to efficient TileDB fragments in the sparse array indexed by these string keys.
#
# Key note: only the _obs labels_ are being sorted, and along with them come permutation
# indices for accessing the CSR matrix via cursor-indirection -- e.g. csr[28] is accessed as
# with csr[permuation[28]] -- the CSR matrix itself isn't sorted in bulk.
sorted_row_names, permutation = util.get_sort_and_permutation(list(row_names))
# Using numpy we can index this with a list of indices, which a plain Python list doesn't support.
sorted_row_names = np.asarray(sorted_row_names)
s = util.get_start_stamp()
if self.verbose:
print(f"{self.indent}START __ingest_coo_data_string_dims_rows_chunked")
with tiledb.open(self.uri, mode="w") as A:
nrow = len(sorted_row_names)
i = 0
while i < nrow:
# Find a number of CSR rows which will result in a desired nnz for the chunk.
chunk_size = util.find_csr_chunk_size(matrix, permutation, i, self.soma_options.goal_chunk_nnz)
i2 = i + chunk_size
# Convert the chunk to a COO matrix.
chunk_coo = matrix[permutation[i:i2]].tocoo()
s2 = util.
| 405
|
464
| 17
| 9,422
|
single-cell-data__tiledb-soma
|
0a9b47f0d37d4e309d85897423bcea947086b39a
|
apis/python/src/tiledbsc/assay_matrix.py
|
random
|
uri
| true
|
statement
| 17
| 17
| false
| false
|
[
"uri",
"attr_name",
"col_dim_name",
"row_dim_name",
"indent",
"__init__",
"create_empty_array",
"from_matrix",
"ingest_data",
"ingest_data_rows_chunked",
"ingest_data_whole",
"to_csr_matrix",
"ctx",
"exists",
"name",
"object_type",
"soma_options",
"verbose",
"__annotations__",
"__class__",
"__delattr__",
"__dict__",
"__dir__",
"__eq__",
"__format__",
"__getattribute__",
"__hash__",
"__init_subclass__",
"__ne__",
"__new__",
"__reduce__",
"__reduce_ex__",
"__repr__",
"__setattr__",
"__sizeof__",
"__str__",
"__subclasshook__",
"__doc__",
"__module__"
] |
[
{
"name": "attr_name",
"type": "statement"
},
{
"name": "col_dim_name",
"type": "statement"
},
{
"name": "create_empty_array",
"type": "function"
},
{
"name": "ctx",
"type": "statement"
},
{
"name": "exists",
"type": "function"
},
{
"name": "from_matrix",
"type": "function"
},
{
"name": "indent",
"type": "statement"
},
{
"name": "ingest_data",
"type": "function"
},
{
"name": "ingest_data_rows_chunked",
"type": "function"
},
{
"name": "ingest_data_whole",
"type": "function"
},
{
"name": "name",
"type": "statement"
},
{
"name": "object_type",
"type": "function"
},
{
"name": "row_dim_name",
"type": "statement"
},
{
"name": "soma_options",
"type": "statement"
},
{
"name": "to_csr_matrix",
"type": "function"
},
{
"name": "uri",
"type": "statement"
},
{
"name": "verbose",
"type": "statement"
},
{
"name": "__annotations__",
"type": "statement"
},
{
"name": "__class__",
"type": "property"
},
{
"name": "__delattr__",
"type": "function"
},
{
"name": "__dict__",
"type": "statement"
},
{
"name": "__dir__",
"type": "function"
},
{
"name": "__doc__",
"type": "statement"
},
{
"name": "__eq__",
"type": "function"
},
{
"name": "__format__",
"type": "function"
},
{
"name": "__getattribute__",
"type": "function"
},
{
"name": "__hash__",
"type": "function"
},
{
"name": "__init__",
"type": "function"
},
{
"name": "__init_subclass__",
"type": "function"
},
{
"name": "__module__",
"type": "statement"
},
{
"name": "__ne__",
"type": "function"
},
{
"name": "__new__",
"type": "function"
},
{
"name": "__reduce__",
"type": "function"
},
{
"name": "__reduce_ex__",
"type": "function"
},
{
"name": "__repr__",
"type": "function"
},
{
"name": "__setattr__",
"type": "function"
},
{
"name": "__sizeof__",
"type": "function"
},
{
"name": "__slots__",
"type": "statement"
},
{
"name": "__str__",
"type": "function"
}
] |
import tiledb
from .soma_options import SOMAOptions
from .tiledb_array import TileDBArray
from .tiledb_group import TileDBGroup
from .tiledb_object import TileDBObject
import tiledbsc.util as util
import scipy
import numpy as np
from typing import Optional
class AssayMatrix(TileDBArray):
"""
Wraps a TileDB sparse array with two string dimensions.
Used for X, obsp members, and varp members.
"""
row_dim_name: str # obs_id for X, obs_id_i for obsp; var_id_i for varp
col_dim_name: str # var_id for X, obs_id_j for obsp; var_id_j for varp
attr_name: str
# ----------------------------------------------------------------
def __init__(
self,
uri: str,
name: str,
row_dim_name: str,
col_dim_name: str,
parent: Optional[TileDBGroup] = None,
):
"""
See the TileDBObject constructor.
"""
super().__init__(uri=uri, name=name, parent=parent)
self.row_dim_name = row_dim_name
self.col_dim_name = col_dim_name
self.attr_name = 'value'
# ----------------------------------------------------------------
def from_matrix(self, matrix, row_names, col_names) -> None:
"""
Imports a matrix -- nominally scipy.sparse.csr_matrix or numpy.ndarray -- into a TileDB
array which is used for X, obsp members, and varp members.
"""
if self.verbose:
s = util.get_start_stamp()
print(f"{self.indent}START WRITING {self.uri}")
if self.exists():
if self.verbose:
print(f"{self.indent}Re-using existing array {self.uri}")
else:
self.create_empty_array(matrix_dtype=matrix.dtype)
self.ingest_data(matrix, row_names, col_names)
if self.verbose:
print(util.format_elapsed(s, f"{self.indent}FINISH WRITING {self.uri}"))
# ----------------------------------------------------------------
def create_empty_array(self, matrix_dtype: np.dtype) -> None:
"""
Create a TileDB 2D sparse array with string dimensions and a single attribute.
"""
level = self.soma_options.string_dim_zstd_level
dom = tiledb.Domain(
tiledb.Dim(name=self.row_dim_name, domain=(None, None), dtype="ascii", filters=[tiledb.RleFilter()]),
tiledb.Dim(name=self.col_dim_name, domain=(None, None), dtype="ascii", filters=[tiledb.ZstdFilter(level=level)]),
ctx=self.ctx
)
att = tiledb.Attr(self.attr_name, dtype=matrix_dtype, filters=[tiledb.ZstdFilter()], ctx=self.ctx)
sch = tiledb.ArraySchema(
domain=dom,
attrs=(att,),
sparse=True,
allows_duplicates=True,
offsets_filters=[tiledb.DoubleDeltaFilter(), tiledb.BitWidthReductionFilter(), tiledb.ZstdFilter()],
capacity=self.soma_options.X_capacity,
cell_order=self.soma_options.X_cell_order,
tile_order=self.soma_options.X_tile_order,
ctx=self.ctx
)
tiledb.Array.create(self.uri, sch, ctx=self.ctx)
# ----------------------------------------------------------------
def ingest_data(self, matrix, row_names, col_names) -> None:
# TODO: add chunked support for CSC
if isinstance(matrix, scipy.sparse._csr.csr_matrix) and self.soma_options.write_X_chunked_if_csr:
self.ingest_data_rows_chunked(matrix, row_names, col_names)
else:
self.ingest_data_whole(matrix, row_names, col_names)
# ----------------------------------------------------------------
def ingest_data_whole(self, matrix, row_names, col_names) -> None:
"""
Convert ndarray/(csr|csc)matrix to coo_matrix and ingest into TileDB.
:param matrix: Matrix-like object coercible to a scipy coo_matrix.
:param row_names: List of row names.
:param col_names: List of column names.
"""
assert len(row_names) == matrix.shape[0]
assert len(col_names) == matrix.shape[1]
mat_coo = scipy.sparse.coo_matrix(matrix)
d0 = row_names[mat_coo.row]
d1 = col_names[mat_coo.col]
with tiledb.open(self.uri, mode="w", ctx=self.ctx) as A:
A[d0, d1] = mat_coo.data
# ----------------------------------------------------------------
# Example: suppose this 4x3 is to be written in two chunks of two rows each
# but written in sorted order.
#
# Original Sorted Permutation
# data row names
#
# X Y Z
# C 0 1 2 A 1
# A 4 0 5 B 2
# B 7 0 0 C 0
# D 0 8 9 D 3
#
# First chunk:
# * Row indices 0,1 map to permutation indices 1,2
# * i,i2 are 0,2
# * chunk_coo is original matrix rows 1,2
# * chunk_coo.row is [0,1]
# * chunk_coo.row + i is [0,1]
# * sorted_row_names: ['A', 'B']
#
# Second chunk:
# * Row indices 2,3 map to permutation indices 0,3
# * i,i2 are 2,4
# * chunk_coo is original matrix rows 0,3
# * chunk_coo.row is [0,1]
# * chunk_coo.row + i is [2,3]
# * sorted_row_names: ['C', 'D']
#
# See README-csr-ingest.md for important information of using this ingestor.
# ----------------------------------------------------------------
def ingest_data_rows_chunked(self, matrix, row_names, col_names) -> None:
"""
Convert csr_matrix to coo_matrix chunkwise and ingest into TileDB.
:param uri: TileDB URI of the array to be written.
:param matrix: csr_matrix.
:param row_names: List of row names.
:param col_names: List of column names.
"""
assert len(row_names) == matrix.shape[0]
assert len(col_names) == matrix.shape[1]
# Sort the row names so we can write chunks indexed by sorted string keys. This will lead
# to efficient TileDB fragments in the sparse array indexed by these string keys.
#
# Key note: only the _obs labels_ are being sorted, and along with them come permutation
# indices for accessing the CSR matrix via cursor-indirection -- e.g. csr[28] is accessed as
# with csr[permuation[28]] -- the CSR matrix itself isn't sorted in bulk.
sorted_row_names, permutation = util.get_sort_and_permutation(list(row_names))
# Using numpy we can index this with a list of indices, which a plain Python list doesn't support.
sorted_row_names = np.asarray(sorted_row_names)
s = util.get_start_stamp()
if self.verbose:
print(f"{self.indent}START __ingest_coo_data_string_dims_rows_chunked")
with tiledb.open(self.uri, mode="w") as A:
nrow = len(sorted_row_names)
i = 0
while i < nrow:
# Find a number of CSR rows which will result in a desired nnz for the chunk.
chunk_size = util.find_csr_chunk_size(matrix, permutation, i, self.soma_options.goal_chunk_nnz)
i2 = i + chunk_size
# Convert the chunk to a COO matrix.
chunk_coo = matrix[permutation[i:i2]].tocoo()
s2 = util.get_start_stamp()
# Write the chunk-COO to TileDB.
d0 = sorted_row_names[chunk_coo.row + i]
d1 = col_names[chunk_coo.col]
if len(d0) == 0:
continue
# Python ranges are (lo, hi) with lo inclusive and hi exclusive. But saying that
# makes us look buggy if we say we're ingesting chunk 0:18 and then 18:32.
# Instead, print doubly-inclusive lo..hi like 0..17 and 18..31.
if self.verbose:
print("%sSTART chunk rows %d..%d of %d, obs_ids %s..%s, nnz=%d, %7.3f%%" %
(self.indent, i, i2-1, nrow, d0[0], d0[-1], chunk_coo.nnz, 100*(i2-1)/nrow))
# Write a TileDB fragment
A[d0, d1] = chunk_coo.data
if self.verbose:
print(util.format_elapsed(s2,f"{self.indent}FINISH chunk"))
i = i2
if self.verbose:
print(util.format_elapsed(s,f"{self.indent}FINISH __ingest_coo_data_string_dims_rows_chunked"))
# ----------------------------------------------------------------
def to_csr_matrix(self, row_labels, col_labels):
"""
Reads the TileDB array storage for the storage and returns a sparse CSR matrix. The
row/columns labels should be obs,var labels if the AssayMatrix is X, or obs,obs labels if
the AssayMatrix is obsp, or var,var labels if the AssayMatrix is varp.
Note in all cases that TileDB will have sorted the row and column labels; they won't
be in the same order as they were in any anndata object which was used to create the
TileDB storage.
"""
if self.verbose:
s = util.get_start_stamp()
print(f"{self.indent}START read {self.uri}")
# Since the TileDB array is sparse, with two string dimensions, we get back a dict:
# * 'obs_id' key is a sequence of dim0 coordinates for X data.
# * 'var_id' key is a sequence of dim1 coordinates for X data.
# * 'values' key is a sequence of X data values.
with tiledb.open(self.
|
[
"uri",
"attr_name",
"col_dim_name",
"row_dim_name",
"indent",
"create_empty_array",
"from_matrix",
"ingest_data",
"ingest_data_rows_chunked",
"ingest_data_whole",
"to_csr_matrix",
"ctx",
"exists",
"name",
"object_type",
"soma_options",
"verbose"
] |
# Python ranges are (lo, hi) with lo inclusive and hi exclusive. But saying that
# makes us look buggy if we say we're ingesting chunk 0:18 and then 18:32.
# Instead, print doubly-inclusive lo..hi like 0..17 and 18..31.
if self.verbose:
print("%sSTART chunk rows %d..%d of %d, obs_ids %s..%s, nnz=%d, %7.3f%%" %
(self.indent, i, i2-1, nrow, d0[0], d0[-1], chunk_coo.nnz, 100*(i2-1)/nrow))
# Write a TileDB fragment
A[d0, d1] = chunk_coo.data
if self.verbose:
print(util.format_elapsed(s2,f"{self.indent}FINISH chunk"))
i = i2
if self.verbose:
print(util.format_elapsed(s,f"{self.indent}FINISH __ingest_coo_data_string_dims_rows_chunked"))
# ----------------------------------------------------------------
def to_csr_matrix(self, row_labels, col_labels):
"""
Reads the TileDB array storage for the storage and returns a sparse CSR matrix. The
row/columns labels should be obs,var labels if the AssayMatrix is X, or obs,obs labels if
the AssayMatrix is obsp, or var,var labels if the AssayMatrix is varp.
Note in all cases that TileDB will have sorted the row and column labels; they won't
be in the same order as they were in any anndata object which was used to create the
TileDB storage.
"""
if self.verbose:
s = util.get_start_stamp()
print(f"{self.indent}START read {self.uri}")
# Since the TileDB array is sparse, with two string dimensions, we get back a dict:
# * 'obs_id' key is a sequence of dim0 coordinates for X data.
# * 'var_id' key is a sequence of dim1 coordinates for X data.
# * 'values' key is a sequence of X data values.
with tiledb.open(self.
| 418
|
474
| 18
| 966
|
single-cell-data__tiledb-soma
|
0a9b47f0d37d4e309d85897423bcea947086b39a
|
apis/python/src/tiledbsc/uns_array.py
|
inproject
|
get_start_stamp
| true
|
function
| 6
| 12
| false
| true
|
[
"get_start_stamp",
"format_elapsed",
"_to_tiledb_supported_array_type",
"Optional",
"find_csr_chunk_size",
"get_sort_and_permutation"
] |
[
{
"name": "ad",
"type": "module"
},
{
"name": "find_csr_chunk_size",
"type": "function"
},
{
"name": "format_elapsed",
"type": "function"
},
{
"name": "get_sort_and_permutation",
"type": "function"
},
{
"name": "get_start_stamp",
"type": "function"
},
{
"name": "numpy",
"type": "module"
},
{
"name": "Optional",
"type": "class"
},
{
"name": "pd",
"type": "module"
},
{
"name": "scipy",
"type": "module"
},
{
"name": "tiledb",
"type": "module"
},
{
"name": "time",
"type": "module"
},
{
"name": "_to_tiledb_supported_array_type",
"type": "function"
},
{
"name": "__doc__",
"type": "instance"
},
{
"name": "__file__",
"type": "instance"
},
{
"name": "__name__",
"type": "instance"
},
{
"name": "__package__",
"type": "instance"
}
] |
import tiledb
from .soma_options import SOMAOptions
from .tiledb_array import TileDBArray
from .tiledb_group import TileDBGroup
import tiledbsc.util as util
import numpy as np
import scipy
import pandas as pd
from typing import Optional
class UnsArray(TileDBArray):
"""
Holds TileDB storage for an array obtained from the nested `anndata.uns` field.
"""
# ----------------------------------------------------------------
def __init__(
self,
uri: str,
name: str,
parent: Optional[TileDBGroup] = None,
):
"""
See the TileDBObject constructor.
"""
super().__init__(uri=uri, name=name, parent=parent)
# ----------------------------------------------------------------
def from_pandas_dataframe(self, df: pd.DataFrame):
"""
Ingests an `UnsArray` into TileDB storage, given a pandas.DataFrame.
"""
if self.verbose:
s = util.
|
[
"get_start_stamp",
"format_elapsed",
"Optional",
"find_csr_chunk_size",
"get_sort_and_permutation"
] |
import tiledb
from .soma_options import SOMAOptions
from .tiledb_array import TileDBArray
from .tiledb_group import TileDBGroup
import tiledbsc.util as util
import numpy as np
import scipy
import pandas as pd
from typing import Optional
class UnsArray(TileDBArray):
"""
Holds TileDB storage for an array obtained from the nested `anndata.uns` field.
"""
# ----------------------------------------------------------------
def __init__(
self,
uri: str,
name: str,
parent: Optional[TileDBGroup] = None,
):
"""
See the TileDBObject constructor.
"""
super().__init__(uri=uri, name=name, parent=parent)
# ----------------------------------------------------------------
def from_pandas_dataframe(self, df: pd.DataFrame):
"""
Ingests an `UnsArray` into TileDB storage, given a pandas.DataFrame.
"""
if self.verbose:
s = util.
| 428
|
475
| 18
| 1,010
|
single-cell-data__tiledb-soma
|
0a9b47f0d37d4e309d85897423bcea947086b39a
|
apis/python/src/tiledbsc/uns_array.py
|
non_informative
|
indent
| true
|
statement
| 15
| 15
| false
| true
|
[
"uri",
"indent",
"from_numpy_ndarray",
"ctx",
"verbose",
"__init__",
"create_empty_array_for_csr",
"from_pandas_dataframe",
"from_scipy_csr",
"ingest_data_from_csr",
"maybe_from_numpyable_object",
"to_matrix",
"exists",
"name",
"object_type",
"soma_options",
"__annotations__",
"__class__",
"__delattr__",
"__dict__",
"__dir__",
"__eq__",
"__format__",
"__getattribute__",
"__hash__",
"__init_subclass__",
"__ne__",
"__new__",
"__reduce__",
"__reduce_ex__",
"__repr__",
"__setattr__",
"__sizeof__",
"__str__",
"__subclasshook__",
"__doc__",
"__module__"
] |
[
{
"name": "create_empty_array_for_csr",
"type": "function"
},
{
"name": "ctx",
"type": "statement"
},
{
"name": "exists",
"type": "function"
},
{
"name": "from_numpy_ndarray",
"type": "function"
},
{
"name": "from_pandas_dataframe",
"type": "function"
},
{
"name": "from_scipy_csr",
"type": "function"
},
{
"name": "indent",
"type": "statement"
},
{
"name": "ingest_data_from_csr",
"type": "function"
},
{
"name": "maybe_from_numpyable_object",
"type": "function"
},
{
"name": "name",
"type": "statement"
},
{
"name": "object_type",
"type": "function"
},
{
"name": "soma_options",
"type": "statement"
},
{
"name": "to_matrix",
"type": "function"
},
{
"name": "uri",
"type": "statement"
},
{
"name": "verbose",
"type": "statement"
},
{
"name": "__annotations__",
"type": "statement"
},
{
"name": "__class__",
"type": "property"
},
{
"name": "__delattr__",
"type": "function"
},
{
"name": "__dict__",
"type": "statement"
},
{
"name": "__dir__",
"type": "function"
},
{
"name": "__doc__",
"type": "statement"
},
{
"name": "__eq__",
"type": "function"
},
{
"name": "__format__",
"type": "function"
},
{
"name": "__getattribute__",
"type": "function"
},
{
"name": "__hash__",
"type": "function"
},
{
"name": "__init__",
"type": "function"
},
{
"name": "__init_subclass__",
"type": "function"
},
{
"name": "__module__",
"type": "statement"
},
{
"name": "__ne__",
"type": "function"
},
{
"name": "__new__",
"type": "function"
},
{
"name": "__reduce__",
"type": "function"
},
{
"name": "__reduce_ex__",
"type": "function"
},
{
"name": "__repr__",
"type": "function"
},
{
"name": "__setattr__",
"type": "function"
},
{
"name": "__sizeof__",
"type": "function"
},
{
"name": "__slots__",
"type": "statement"
},
{
"name": "__str__",
"type": "function"
}
] |
import tiledb
from .soma_options import SOMAOptions
from .tiledb_array import TileDBArray
from .tiledb_group import TileDBGroup
import tiledbsc.util as util
import numpy as np
import scipy
import pandas as pd
from typing import Optional
class UnsArray(TileDBArray):
"""
Holds TileDB storage for an array obtained from the nested `anndata.uns` field.
"""
# ----------------------------------------------------------------
def __init__(
self,
uri: str,
name: str,
parent: Optional[TileDBGroup] = None,
):
"""
See the TileDBObject constructor.
"""
super().__init__(uri=uri, name=name, parent=parent)
# ----------------------------------------------------------------
def from_pandas_dataframe(self, df: pd.DataFrame):
"""
Ingests an `UnsArray` into TileDB storage, given a pandas.DataFrame.
"""
if self.verbose:
s = util.get_start_stamp()
print(f"{self.
|
[
"uri",
"indent",
"from_numpy_ndarray",
"ctx",
"verbose",
"create_empty_array_for_csr",
"from_pandas_dataframe",
"from_scipy_csr",
"ingest_data_from_csr",
"maybe_from_numpyable_object",
"to_matrix",
"exists",
"name",
"object_type",
"soma_options"
] |
import tiledb
from .soma_options import SOMAOptions
from .tiledb_array import TileDBArray
from .tiledb_group import TileDBGroup
import tiledbsc.util as util
import numpy as np
import scipy
import pandas as pd
from typing import Optional
class UnsArray(TileDBArray):
"""
Holds TileDB storage for an array obtained from the nested `anndata.uns` field.
"""
# ----------------------------------------------------------------
def __init__(
self,
uri: str,
name: str,
parent: Optional[TileDBGroup] = None,
):
"""
See the TileDBObject constructor.
"""
super().__init__(uri=uri, name=name, parent=parent)
# ----------------------------------------------------------------
def from_pandas_dataframe(self, df: pd.DataFrame):
"""
Ingests an `UnsArray` into TileDB storage, given a pandas.DataFrame.
"""
if self.verbose:
s = util.get_start_stamp()
print(f"{self.
| 429
|
476
| 18
| 1,055
|
single-cell-data__tiledb-soma
|
0a9b47f0d37d4e309d85897423bcea947086b39a
|
apis/python/src/tiledbsc/uns_array.py
|
non_informative
|
uri
| true
|
statement
| 15
| 15
| false
| true
|
[
"indent",
"uri",
"from_numpy_ndarray",
"ctx",
"verbose",
"__init__",
"create_empty_array_for_csr",
"from_pandas_dataframe",
"from_scipy_csr",
"ingest_data_from_csr",
"maybe_from_numpyable_object",
"to_matrix",
"exists",
"name",
"object_type",
"soma_options",
"__annotations__",
"__class__",
"__delattr__",
"__dict__",
"__dir__",
"__eq__",
"__format__",
"__getattribute__",
"__hash__",
"__init_subclass__",
"__ne__",
"__new__",
"__reduce__",
"__reduce_ex__",
"__repr__",
"__setattr__",
"__sizeof__",
"__str__",
"__subclasshook__",
"__doc__",
"__module__"
] |
[
{
"name": "create_empty_array_for_csr",
"type": "function"
},
{
"name": "ctx",
"type": "statement"
},
{
"name": "exists",
"type": "function"
},
{
"name": "from_numpy_ndarray",
"type": "function"
},
{
"name": "from_pandas_dataframe",
"type": "function"
},
{
"name": "from_scipy_csr",
"type": "function"
},
{
"name": "indent",
"type": "statement"
},
{
"name": "ingest_data_from_csr",
"type": "function"
},
{
"name": "maybe_from_numpyable_object",
"type": "function"
},
{
"name": "name",
"type": "statement"
},
{
"name": "object_type",
"type": "function"
},
{
"name": "soma_options",
"type": "statement"
},
{
"name": "to_matrix",
"type": "function"
},
{
"name": "uri",
"type": "statement"
},
{
"name": "verbose",
"type": "statement"
},
{
"name": "__annotations__",
"type": "statement"
},
{
"name": "__class__",
"type": "property"
},
{
"name": "__delattr__",
"type": "function"
},
{
"name": "__dict__",
"type": "statement"
},
{
"name": "__dir__",
"type": "function"
},
{
"name": "__doc__",
"type": "statement"
},
{
"name": "__eq__",
"type": "function"
},
{
"name": "__format__",
"type": "function"
},
{
"name": "__getattribute__",
"type": "function"
},
{
"name": "__hash__",
"type": "function"
},
{
"name": "__init__",
"type": "function"
},
{
"name": "__init_subclass__",
"type": "function"
},
{
"name": "__module__",
"type": "statement"
},
{
"name": "__ne__",
"type": "function"
},
{
"name": "__new__",
"type": "function"
},
{
"name": "__reduce__",
"type": "function"
},
{
"name": "__reduce_ex__",
"type": "function"
},
{
"name": "__repr__",
"type": "function"
},
{
"name": "__setattr__",
"type": "function"
},
{
"name": "__sizeof__",
"type": "function"
},
{
"name": "__slots__",
"type": "statement"
},
{
"name": "__str__",
"type": "function"
}
] |
import tiledb
from .soma_options import SOMAOptions
from .tiledb_array import TileDBArray
from .tiledb_group import TileDBGroup
import tiledbsc.util as util
import numpy as np
import scipy
import pandas as pd
from typing import Optional
class UnsArray(TileDBArray):
"""
Holds TileDB storage for an array obtained from the nested `anndata.uns` field.
"""
# ----------------------------------------------------------------
def __init__(
self,
uri: str,
name: str,
parent: Optional[TileDBGroup] = None,
):
"""
See the TileDBObject constructor.
"""
super().__init__(uri=uri, name=name, parent=parent)
# ----------------------------------------------------------------
def from_pandas_dataframe(self, df: pd.DataFrame):
"""
Ingests an `UnsArray` into TileDB storage, given a pandas.DataFrame.
"""
if self.verbose:
s = util.get_start_stamp()
print(f"{self.indent}START WRITING PANDAS.DATAFRAME {self.
|
[
"indent",
"uri",
"from_numpy_ndarray",
"ctx",
"verbose",
"create_empty_array_for_csr",
"from_pandas_dataframe",
"from_scipy_csr",
"ingest_data_from_csr",
"maybe_from_numpyable_object",
"to_matrix",
"exists",
"name",
"object_type",
"soma_options"
] |
import tiledb
from .soma_options import SOMAOptions
from .tiledb_array import TileDBArray
from .tiledb_group import TileDBGroup
import tiledbsc.util as util
import numpy as np
import scipy
import pandas as pd
from typing import Optional
class UnsArray(TileDBArray):
"""
Holds TileDB storage for an array obtained from the nested `anndata.uns` field.
"""
# ----------------------------------------------------------------
def __init__(
self,
uri: str,
name: str,
parent: Optional[TileDBGroup] = None,
):
"""
See the TileDBObject constructor.
"""
super().__init__(uri=uri, name=name, parent=parent)
# ----------------------------------------------------------------
def from_pandas_dataframe(self, df: pd.DataFrame):
"""
Ingests an `UnsArray` into TileDB storage, given a pandas.DataFrame.
"""
if self.verbose:
s = util.get_start_stamp()
print(f"{self.indent}START WRITING PANDAS.DATAFRAME {self.
| 430
|
479
| 18
| 1,257
|
single-cell-data__tiledb-soma
|
0a9b47f0d37d4e309d85897423bcea947086b39a
|
apis/python/src/tiledbsc/uns_array.py
|
random
|
verbose
| true
|
statement
| 15
| 15
| false
| false
|
[
"verbose",
"uri",
"from_numpy_ndarray",
"ctx",
"indent",
"__init__",
"create_empty_array_for_csr",
"from_pandas_dataframe",
"from_scipy_csr",
"ingest_data_from_csr",
"maybe_from_numpyable_object",
"to_matrix",
"exists",
"name",
"object_type",
"soma_options",
"__annotations__",
"__class__",
"__delattr__",
"__dict__",
"__dir__",
"__eq__",
"__format__",
"__getattribute__",
"__hash__",
"__init_subclass__",
"__ne__",
"__new__",
"__reduce__",
"__reduce_ex__",
"__repr__",
"__setattr__",
"__sizeof__",
"__str__",
"__subclasshook__",
"__doc__",
"__module__"
] |
[
{
"name": "create_empty_array_for_csr",
"type": "function"
},
{
"name": "ctx",
"type": "statement"
},
{
"name": "exists",
"type": "function"
},
{
"name": "from_numpy_ndarray",
"type": "function"
},
{
"name": "from_pandas_dataframe",
"type": "function"
},
{
"name": "from_scipy_csr",
"type": "function"
},
{
"name": "indent",
"type": "statement"
},
{
"name": "ingest_data_from_csr",
"type": "function"
},
{
"name": "maybe_from_numpyable_object",
"type": "function"
},
{
"name": "name",
"type": "statement"
},
{
"name": "object_type",
"type": "function"
},
{
"name": "soma_options",
"type": "statement"
},
{
"name": "to_matrix",
"type": "function"
},
{
"name": "uri",
"type": "statement"
},
{
"name": "verbose",
"type": "statement"
},
{
"name": "__annotations__",
"type": "statement"
},
{
"name": "__class__",
"type": "property"
},
{
"name": "__delattr__",
"type": "function"
},
{
"name": "__dict__",
"type": "statement"
},
{
"name": "__dir__",
"type": "function"
},
{
"name": "__doc__",
"type": "statement"
},
{
"name": "__eq__",
"type": "function"
},
{
"name": "__format__",
"type": "function"
},
{
"name": "__getattribute__",
"type": "function"
},
{
"name": "__hash__",
"type": "function"
},
{
"name": "__init__",
"type": "function"
},
{
"name": "__init_subclass__",
"type": "function"
},
{
"name": "__module__",
"type": "statement"
},
{
"name": "__ne__",
"type": "function"
},
{
"name": "__new__",
"type": "function"
},
{
"name": "__reduce__",
"type": "function"
},
{
"name": "__reduce_ex__",
"type": "function"
},
{
"name": "__repr__",
"type": "function"
},
{
"name": "__setattr__",
"type": "function"
},
{
"name": "__sizeof__",
"type": "function"
},
{
"name": "__slots__",
"type": "statement"
},
{
"name": "__str__",
"type": "function"
}
] |
import tiledb
from .soma_options import SOMAOptions
from .tiledb_array import TileDBArray
from .tiledb_group import TileDBGroup
import tiledbsc.util as util
import numpy as np
import scipy
import pandas as pd
from typing import Optional
class UnsArray(TileDBArray):
"""
Holds TileDB storage for an array obtained from the nested `anndata.uns` field.
"""
# ----------------------------------------------------------------
def __init__(
self,
uri: str,
name: str,
parent: Optional[TileDBGroup] = None,
):
"""
See the TileDBObject constructor.
"""
super().__init__(uri=uri, name=name, parent=parent)
# ----------------------------------------------------------------
def from_pandas_dataframe(self, df: pd.DataFrame):
"""
Ingests an `UnsArray` into TileDB storage, given a pandas.DataFrame.
"""
if self.verbose:
s = util.get_start_stamp()
print(f"{self.indent}START WRITING PANDAS.DATAFRAME {self.uri}")
tiledb.from_pandas(
uri=self.uri,
dataframe=df,
sparse=True,
allows_duplicates=False,
ctx=self.ctx
)
if self.
|
[
"verbose",
"uri",
"from_numpy_ndarray",
"ctx",
"indent",
"create_empty_array_for_csr",
"from_pandas_dataframe",
"from_scipy_csr",
"ingest_data_from_csr",
"maybe_from_numpyable_object",
"to_matrix",
"exists",
"name",
"object_type",
"soma_options"
] |
import tiledb
from .soma_options import SOMAOptions
from .tiledb_array import TileDBArray
from .tiledb_group import TileDBGroup
import tiledbsc.util as util
import numpy as np
import scipy
import pandas as pd
from typing import Optional
class UnsArray(TileDBArray):
"""
Holds TileDB storage for an array obtained from the nested `anndata.uns` field.
"""
# ----------------------------------------------------------------
def __init__(
self,
uri: str,
name: str,
parent: Optional[TileDBGroup] = None,
):
"""
See the TileDBObject constructor.
"""
super().__init__(uri=uri, name=name, parent=parent)
# ----------------------------------------------------------------
def from_pandas_dataframe(self, df: pd.DataFrame):
"""
Ingests an `UnsArray` into TileDB storage, given a pandas.DataFrame.
"""
if self.verbose:
s = util.get_start_stamp()
print(f"{self.indent}START WRITING PANDAS.DATAFRAME {self.uri}")
tiledb.from_pandas(
uri=self.uri,
dataframe=df,
sparse=True,
allows_duplicates=False,
ctx=self.ctx
)
if self.
| 433
|
486
| 18
| 2,268
|
single-cell-data__tiledb-soma
|
0a9b47f0d37d4e309d85897423bcea947086b39a
|
apis/python/src/tiledbsc/uns_array.py
|
infile
|
from_numpy_ndarray
| true
|
function
| 15
| 15
| false
| false
|
[
"from_numpy_ndarray",
"uri",
"ctx",
"indent",
"ingest_data_from_csr",
"__init__",
"create_empty_array_for_csr",
"from_pandas_dataframe",
"from_scipy_csr",
"maybe_from_numpyable_object",
"to_matrix",
"exists",
"name",
"object_type",
"soma_options",
"verbose",
"__annotations__",
"__class__",
"__delattr__",
"__dict__",
"__dir__",
"__eq__",
"__format__",
"__getattribute__",
"__hash__",
"__init_subclass__",
"__ne__",
"__new__",
"__reduce__",
"__reduce_ex__",
"__repr__",
"__setattr__",
"__sizeof__",
"__str__",
"__subclasshook__",
"__doc__",
"__module__"
] |
[
{
"name": "create_empty_array_for_csr",
"type": "function"
},
{
"name": "ctx",
"type": "statement"
},
{
"name": "exists",
"type": "function"
},
{
"name": "from_numpy_ndarray",
"type": "function"
},
{
"name": "from_pandas_dataframe",
"type": "function"
},
{
"name": "from_scipy_csr",
"type": "function"
},
{
"name": "indent",
"type": "statement"
},
{
"name": "ingest_data_from_csr",
"type": "function"
},
{
"name": "maybe_from_numpyable_object",
"type": "function"
},
{
"name": "name",
"type": "statement"
},
{
"name": "object_type",
"type": "function"
},
{
"name": "soma_options",
"type": "statement"
},
{
"name": "to_matrix",
"type": "function"
},
{
"name": "uri",
"type": "statement"
},
{
"name": "verbose",
"type": "statement"
},
{
"name": "__annotations__",
"type": "statement"
},
{
"name": "__class__",
"type": "property"
},
{
"name": "__delattr__",
"type": "function"
},
{
"name": "__dict__",
"type": "statement"
},
{
"name": "__dir__",
"type": "function"
},
{
"name": "__doc__",
"type": "statement"
},
{
"name": "__eq__",
"type": "function"
},
{
"name": "__format__",
"type": "function"
},
{
"name": "__getattribute__",
"type": "function"
},
{
"name": "__hash__",
"type": "function"
},
{
"name": "__init__",
"type": "function"
},
{
"name": "__init_subclass__",
"type": "function"
},
{
"name": "__module__",
"type": "statement"
},
{
"name": "__ne__",
"type": "function"
},
{
"name": "__new__",
"type": "function"
},
{
"name": "__reduce__",
"type": "function"
},
{
"name": "__reduce_ex__",
"type": "function"
},
{
"name": "__repr__",
"type": "function"
},
{
"name": "__setattr__",
"type": "function"
},
{
"name": "__sizeof__",
"type": "function"
},
{
"name": "__slots__",
"type": "statement"
},
{
"name": "__str__",
"type": "function"
}
] |
import tiledb
from .soma_options import SOMAOptions
from .tiledb_array import TileDBArray
from .tiledb_group import TileDBGroup
import tiledbsc.util as util
import numpy as np
import scipy
import pandas as pd
from typing import Optional
class UnsArray(TileDBArray):
"""
Holds TileDB storage for an array obtained from the nested `anndata.uns` field.
"""
# ----------------------------------------------------------------
def __init__(
self,
uri: str,
name: str,
parent: Optional[TileDBGroup] = None,
):
"""
See the TileDBObject constructor.
"""
super().__init__(uri=uri, name=name, parent=parent)
# ----------------------------------------------------------------
def from_pandas_dataframe(self, df: pd.DataFrame):
"""
Ingests an `UnsArray` into TileDB storage, given a pandas.DataFrame.
"""
if self.verbose:
s = util.get_start_stamp()
print(f"{self.indent}START WRITING PANDAS.DATAFRAME {self.uri}")
tiledb.from_pandas(
uri=self.uri,
dataframe=df,
sparse=True,
allows_duplicates=False,
ctx=self.ctx
)
if self.verbose:
print(util.format_elapsed(s, f"{self.indent}FINISH WRITING PANDAS.DATAFRAME {self.uri}"))
# ----------------------------------------------------------------
def maybe_from_numpyable_object(self, obj) -> bool:
"""
Nominally for ingest of `uns` nested data from anndata objects. Handles scalar or array values
-- the former, by wrapping in a 1D array. Maps to TileDB / tiledb.from_numpy storage semantics,
including UTF-8 handling. Supports dtypes like
"""
if isinstance(obj, np.ndarray):
arr = util._to_tiledb_supported_array_type(obj)
self.from_numpy_ndarray(arr)
return True
elif isinstance(obj, list):
arr = np.asarray(obj)
self.from_numpy_ndarray(arr)
return True
elif isinstance(obj, np.str_):
# Needs explicit cast from numpy.str_ to str for tiledb.from_numpy
arr = np.asarray([obj]).astype(str)
self.
|
[
"from_numpy_ndarray",
"uri",
"ctx",
"indent",
"ingest_data_from_csr",
"create_empty_array_for_csr",
"from_pandas_dataframe",
"from_scipy_csr",
"maybe_from_numpyable_object",
"to_matrix",
"exists",
"name",
"object_type",
"soma_options",
"verbose"
] |
.uns` field.
"""
# ----------------------------------------------------------------
def __init__(
self,
uri: str,
name: str,
parent: Optional[TileDBGroup] = None,
):
"""
See the TileDBObject constructor.
"""
super().__init__(uri=uri, name=name, parent=parent)
# ----------------------------------------------------------------
def from_pandas_dataframe(self, df: pd.DataFrame):
"""
Ingests an `UnsArray` into TileDB storage, given a pandas.DataFrame.
"""
if self.verbose:
s = util.get_start_stamp()
print(f"{self.indent}START WRITING PANDAS.DATAFRAME {self.uri}")
tiledb.from_pandas(
uri=self.uri,
dataframe=df,
sparse=True,
allows_duplicates=False,
ctx=self.ctx
)
if self.verbose:
print(util.format_elapsed(s, f"{self.indent}FINISH WRITING PANDAS.DATAFRAME {self.uri}"))
# ----------------------------------------------------------------
def maybe_from_numpyable_object(self, obj) -> bool:
"""
Nominally for ingest of `uns` nested data from anndata objects. Handles scalar or array values
-- the former, by wrapping in a 1D array. Maps to TileDB / tiledb.from_numpy storage semantics,
including UTF-8 handling. Supports dtypes like
"""
if isinstance(obj, np.ndarray):
arr = util._to_tiledb_supported_array_type(obj)
self.from_numpy_ndarray(arr)
return True
elif isinstance(obj, list):
arr = np.asarray(obj)
self.from_numpy_ndarray(arr)
return True
elif isinstance(obj, np.str_):
# Needs explicit cast from numpy.str_ to str for tiledb.from_numpy
arr = np.asarray([obj]).astype(str)
self.
| 440
|
508
| 18
| 4,731
|
single-cell-data__tiledb-soma
|
0a9b47f0d37d4e309d85897423bcea947086b39a
|
apis/python/src/tiledbsc/uns_array.py
|
infile
|
create_empty_array_for_csr
| true
|
function
| 15
| 15
| false
| true
|
[
"uri",
"from_numpy_ndarray",
"ctx",
"indent",
"ingest_data_from_csr",
"__init__",
"create_empty_array_for_csr",
"from_pandas_dataframe",
"from_scipy_csr",
"maybe_from_numpyable_object",
"to_matrix",
"exists",
"name",
"object_type",
"soma_options",
"verbose",
"__annotations__",
"__class__",
"__delattr__",
"__dict__",
"__dir__",
"__eq__",
"__format__",
"__getattribute__",
"__hash__",
"__init_subclass__",
"__ne__",
"__new__",
"__reduce__",
"__reduce_ex__",
"__repr__",
"__setattr__",
"__sizeof__",
"__str__",
"__subclasshook__",
"__doc__",
"__module__"
] |
[
{
"name": "create_empty_array_for_csr",
"type": "function"
},
{
"name": "ctx",
"type": "statement"
},
{
"name": "exists",
"type": "function"
},
{
"name": "from_numpy_ndarray",
"type": "function"
},
{
"name": "from_pandas_dataframe",
"type": "function"
},
{
"name": "from_scipy_csr",
"type": "function"
},
{
"name": "indent",
"type": "statement"
},
{
"name": "ingest_data_from_csr",
"type": "function"
},
{
"name": "maybe_from_numpyable_object",
"type": "function"
},
{
"name": "name",
"type": "statement"
},
{
"name": "object_type",
"type": "function"
},
{
"name": "soma_options",
"type": "statement"
},
{
"name": "to_matrix",
"type": "function"
},
{
"name": "uri",
"type": "statement"
},
{
"name": "verbose",
"type": "statement"
},
{
"name": "__annotations__",
"type": "statement"
},
{
"name": "__class__",
"type": "property"
},
{
"name": "__delattr__",
"type": "function"
},
{
"name": "__dict__",
"type": "statement"
},
{
"name": "__dir__",
"type": "function"
},
{
"name": "__doc__",
"type": "statement"
},
{
"name": "__eq__",
"type": "function"
},
{
"name": "__format__",
"type": "function"
},
{
"name": "__getattribute__",
"type": "function"
},
{
"name": "__hash__",
"type": "function"
},
{
"name": "__init__",
"type": "function"
},
{
"name": "__init_subclass__",
"type": "function"
},
{
"name": "__module__",
"type": "statement"
},
{
"name": "__ne__",
"type": "function"
},
{
"name": "__new__",
"type": "function"
},
{
"name": "__reduce__",
"type": "function"
},
{
"name": "__reduce_ex__",
"type": "function"
},
{
"name": "__repr__",
"type": "function"
},
{
"name": "__setattr__",
"type": "function"
},
{
"name": "__sizeof__",
"type": "function"
},
{
"name": "__slots__",
"type": "statement"
},
{
"name": "__str__",
"type": "function"
}
] |
import tiledb
from .soma_options import SOMAOptions
from .tiledb_array import TileDBArray
from .tiledb_group import TileDBGroup
import tiledbsc.util as util
import numpy as np
import scipy
import pandas as pd
from typing import Optional
class UnsArray(TileDBArray):
"""
Holds TileDB storage for an array obtained from the nested `anndata.uns` field.
"""
# ----------------------------------------------------------------
def __init__(
self,
uri: str,
name: str,
parent: Optional[TileDBGroup] = None,
):
"""
See the TileDBObject constructor.
"""
super().__init__(uri=uri, name=name, parent=parent)
# ----------------------------------------------------------------
def from_pandas_dataframe(self, df: pd.DataFrame):
"""
Ingests an `UnsArray` into TileDB storage, given a pandas.DataFrame.
"""
if self.verbose:
s = util.get_start_stamp()
print(f"{self.indent}START WRITING PANDAS.DATAFRAME {self.uri}")
tiledb.from_pandas(
uri=self.uri,
dataframe=df,
sparse=True,
allows_duplicates=False,
ctx=self.ctx
)
if self.verbose:
print(util.format_elapsed(s, f"{self.indent}FINISH WRITING PANDAS.DATAFRAME {self.uri}"))
# ----------------------------------------------------------------
def maybe_from_numpyable_object(self, obj) -> bool:
"""
Nominally for ingest of `uns` nested data from anndata objects. Handles scalar or array values
-- the former, by wrapping in a 1D array. Maps to TileDB / tiledb.from_numpy storage semantics,
including UTF-8 handling. Supports dtypes like
"""
if isinstance(obj, np.ndarray):
arr = util._to_tiledb_supported_array_type(obj)
self.from_numpy_ndarray(arr)
return True
elif isinstance(obj, list):
arr = np.asarray(obj)
self.from_numpy_ndarray(arr)
return True
elif isinstance(obj, np.str_):
# Needs explicit cast from numpy.str_ to str for tiledb.from_numpy
arr = np.asarray([obj]).astype(str)
self.from_numpy_ndarray(arr)
return True
elif isinstance(obj, int) or isinstance(obj, float) or isinstance(obj, str):
# Nominally this is unit-test data
arr = np.asarray([obj])
self.from_numpy_ndarray(arr)
return True
elif 'numpy' in str(type(obj)):
arr = np.asarray([obj])
arr = util._to_tiledb_supported_array_type(arr)
self.from_numpy_ndarray(arr)
return True
else:
return False
# ----------------------------------------------------------------
def from_numpy_ndarray(self, arr: np.ndarray) -> None:
"""
Writes a numpy.ndarray to a TileDB array, nominally for ingest of `uns` nested data from anndata
objects. Mostly tiledb.from_numpy, but with some necessary handling for data with UTF-8 values.
"""
if self.verbose:
s = util.get_start_stamp()
print(f"{self.indent}START WRITING FROM NUMPY.NDARRAY {self.uri}")
if 'numpy' in str(type(arr)) and str(arr.dtype).startswith('<U'):
# Note arr.astype('str') does not lead to a successfuly tiledb.from_numpy.
arr = np.array(arr, dtype='O')
# overwrite = False
# if self.exists:
# overwrite = True
# if self.verbose:
# print(f"{self.indent}Re-using existing array {self.uri}")
# tiledb.from_numpy(uri=self.uri, array=arr, ctx=self.ctx, overwrite=overwrite)
# TODO: find the right syntax for update-in-place (tiledb.from_pandas uses `mode`)
tiledb.from_numpy(uri=self.uri, array=arr, ctx=self.ctx)
if self.verbose:
print(util.format_elapsed(s, f"{self.indent}FINISH WRITING FROM NUMPY.NDARRAY {self.uri}"))
# ----------------------------------------------------------------
def from_scipy_csr(self, csr: scipy.sparse.csr_matrix) -> None:
"""
Convert ndarray/(csr|csc)matrix to coo_matrix and ingest into TileDB.
:param csr: Matrix-like object coercible to a scipy coo_matrix.
"""
if self.verbose:
s = util.get_start_stamp()
print(f"{self.indent}START WRITING FROM SCIPY.SPARSE.CSR {self.uri}")
nrows, ncols = csr.shape
if self.exists():
if self.verbose:
print(f"{self.indent}Re-using existing array {self.uri}")
else:
self.
|
[
"uri",
"from_numpy_ndarray",
"ctx",
"indent",
"ingest_data_from_csr",
"create_empty_array_for_csr",
"from_pandas_dataframe",
"from_scipy_csr",
"maybe_from_numpyable_object",
"to_matrix",
"exists",
"name",
"object_type",
"soma_options",
"verbose"
] |
----------------------------------------------------
def from_numpy_ndarray(self, arr: np.ndarray) -> None:
"""
Writes a numpy.ndarray to a TileDB array, nominally for ingest of `uns` nested data from anndata
objects. Mostly tiledb.from_numpy, but with some necessary handling for data with UTF-8 values.
"""
if self.verbose:
s = util.get_start_stamp()
print(f"{self.indent}START WRITING FROM NUMPY.NDARRAY {self.uri}")
if 'numpy' in str(type(arr)) and str(arr.dtype).startswith('<U'):
# Note arr.astype('str') does not lead to a successfuly tiledb.from_numpy.
arr = np.array(arr, dtype='O')
# overwrite = False
# if self.exists:
# overwrite = True
# if self.verbose:
# print(f"{self.indent}Re-using existing array {self.uri}")
# tiledb.from_numpy(uri=self.uri, array=arr, ctx=self.ctx, overwrite=overwrite)
# TODO: find the right syntax for update-in-place (tiledb.from_pandas uses `mode`)
tiledb.from_numpy(uri=self.uri, array=arr, ctx=self.ctx)
if self.verbose:
print(util.format_elapsed(s, f"{self.indent}FINISH WRITING FROM NUMPY.NDARRAY {self.uri}"))
# ----------------------------------------------------------------
def from_scipy_csr(self, csr: scipy.sparse.csr_matrix) -> None:
"""
Convert ndarray/(csr|csc)matrix to coo_matrix and ingest into TileDB.
:param csr: Matrix-like object coercible to a scipy coo_matrix.
"""
if self.verbose:
s = util.get_start_stamp()
print(f"{self.indent}START WRITING FROM SCIPY.SPARSE.CSR {self.uri}")
nrows, ncols = csr.shape
if self.exists():
if self.verbose:
print(f"{self.indent}Re-using existing array {self.uri}")
else:
self.
| 462
|
509
| 18
| 4,805
|
single-cell-data__tiledb-soma
|
0a9b47f0d37d4e309d85897423bcea947086b39a
|
apis/python/src/tiledbsc/uns_array.py
|
infile
|
ingest_data_from_csr
| true
|
function
| 15
| 15
| false
| true
|
[
"from_numpy_ndarray",
"uri",
"indent",
"ctx",
"verbose",
"__init__",
"create_empty_array_for_csr",
"from_pandas_dataframe",
"from_scipy_csr",
"ingest_data_from_csr",
"maybe_from_numpyable_object",
"to_matrix",
"exists",
"name",
"object_type",
"soma_options",
"__annotations__",
"__class__",
"__delattr__",
"__dict__",
"__dir__",
"__eq__",
"__format__",
"__getattribute__",
"__hash__",
"__init_subclass__",
"__ne__",
"__new__",
"__reduce__",
"__reduce_ex__",
"__repr__",
"__setattr__",
"__sizeof__",
"__str__",
"__subclasshook__",
"__doc__",
"__module__"
] |
[
{
"name": "create_empty_array_for_csr",
"type": "function"
},
{
"name": "ctx",
"type": "statement"
},
{
"name": "exists",
"type": "function"
},
{
"name": "from_numpy_ndarray",
"type": "function"
},
{
"name": "from_pandas_dataframe",
"type": "function"
},
{
"name": "from_scipy_csr",
"type": "function"
},
{
"name": "indent",
"type": "statement"
},
{
"name": "ingest_data_from_csr",
"type": "function"
},
{
"name": "maybe_from_numpyable_object",
"type": "function"
},
{
"name": "name",
"type": "statement"
},
{
"name": "object_type",
"type": "function"
},
{
"name": "soma_options",
"type": "statement"
},
{
"name": "to_matrix",
"type": "function"
},
{
"name": "uri",
"type": "statement"
},
{
"name": "verbose",
"type": "statement"
},
{
"name": "__annotations__",
"type": "statement"
},
{
"name": "__class__",
"type": "property"
},
{
"name": "__delattr__",
"type": "function"
},
{
"name": "__dict__",
"type": "statement"
},
{
"name": "__dir__",
"type": "function"
},
{
"name": "__doc__",
"type": "statement"
},
{
"name": "__eq__",
"type": "function"
},
{
"name": "__format__",
"type": "function"
},
{
"name": "__getattribute__",
"type": "function"
},
{
"name": "__hash__",
"type": "function"
},
{
"name": "__init__",
"type": "function"
},
{
"name": "__init_subclass__",
"type": "function"
},
{
"name": "__module__",
"type": "statement"
},
{
"name": "__ne__",
"type": "function"
},
{
"name": "__new__",
"type": "function"
},
{
"name": "__reduce__",
"type": "function"
},
{
"name": "__reduce_ex__",
"type": "function"
},
{
"name": "__repr__",
"type": "function"
},
{
"name": "__setattr__",
"type": "function"
},
{
"name": "__sizeof__",
"type": "function"
},
{
"name": "__slots__",
"type": "statement"
},
{
"name": "__str__",
"type": "function"
}
] |
import tiledb
from .soma_options import SOMAOptions
from .tiledb_array import TileDBArray
from .tiledb_group import TileDBGroup
import tiledbsc.util as util
import numpy as np
import scipy
import pandas as pd
from typing import Optional
class UnsArray(TileDBArray):
"""
Holds TileDB storage for an array obtained from the nested `anndata.uns` field.
"""
# ----------------------------------------------------------------
def __init__(
self,
uri: str,
name: str,
parent: Optional[TileDBGroup] = None,
):
"""
See the TileDBObject constructor.
"""
super().__init__(uri=uri, name=name, parent=parent)
# ----------------------------------------------------------------
def from_pandas_dataframe(self, df: pd.DataFrame):
"""
Ingests an `UnsArray` into TileDB storage, given a pandas.DataFrame.
"""
if self.verbose:
s = util.get_start_stamp()
print(f"{self.indent}START WRITING PANDAS.DATAFRAME {self.uri}")
tiledb.from_pandas(
uri=self.uri,
dataframe=df,
sparse=True,
allows_duplicates=False,
ctx=self.ctx
)
if self.verbose:
print(util.format_elapsed(s, f"{self.indent}FINISH WRITING PANDAS.DATAFRAME {self.uri}"))
# ----------------------------------------------------------------
def maybe_from_numpyable_object(self, obj) -> bool:
"""
Nominally for ingest of `uns` nested data from anndata objects. Handles scalar or array values
-- the former, by wrapping in a 1D array. Maps to TileDB / tiledb.from_numpy storage semantics,
including UTF-8 handling. Supports dtypes like
"""
if isinstance(obj, np.ndarray):
arr = util._to_tiledb_supported_array_type(obj)
self.from_numpy_ndarray(arr)
return True
elif isinstance(obj, list):
arr = np.asarray(obj)
self.from_numpy_ndarray(arr)
return True
elif isinstance(obj, np.str_):
# Needs explicit cast from numpy.str_ to str for tiledb.from_numpy
arr = np.asarray([obj]).astype(str)
self.from_numpy_ndarray(arr)
return True
elif isinstance(obj, int) or isinstance(obj, float) or isinstance(obj, str):
# Nominally this is unit-test data
arr = np.asarray([obj])
self.from_numpy_ndarray(arr)
return True
elif 'numpy' in str(type(obj)):
arr = np.asarray([obj])
arr = util._to_tiledb_supported_array_type(arr)
self.from_numpy_ndarray(arr)
return True
else:
return False
# ----------------------------------------------------------------
def from_numpy_ndarray(self, arr: np.ndarray) -> None:
"""
Writes a numpy.ndarray to a TileDB array, nominally for ingest of `uns` nested data from anndata
objects. Mostly tiledb.from_numpy, but with some necessary handling for data with UTF-8 values.
"""
if self.verbose:
s = util.get_start_stamp()
print(f"{self.indent}START WRITING FROM NUMPY.NDARRAY {self.uri}")
if 'numpy' in str(type(arr)) and str(arr.dtype).startswith('<U'):
# Note arr.astype('str') does not lead to a successfuly tiledb.from_numpy.
arr = np.array(arr, dtype='O')
# overwrite = False
# if self.exists:
# overwrite = True
# if self.verbose:
# print(f"{self.indent}Re-using existing array {self.uri}")
# tiledb.from_numpy(uri=self.uri, array=arr, ctx=self.ctx, overwrite=overwrite)
# TODO: find the right syntax for update-in-place (tiledb.from_pandas uses `mode`)
tiledb.from_numpy(uri=self.uri, array=arr, ctx=self.ctx)
if self.verbose:
print(util.format_elapsed(s, f"{self.indent}FINISH WRITING FROM NUMPY.NDARRAY {self.uri}"))
# ----------------------------------------------------------------
def from_scipy_csr(self, csr: scipy.sparse.csr_matrix) -> None:
"""
Convert ndarray/(csr|csc)matrix to coo_matrix and ingest into TileDB.
:param csr: Matrix-like object coercible to a scipy coo_matrix.
"""
if self.verbose:
s = util.get_start_stamp()
print(f"{self.indent}START WRITING FROM SCIPY.SPARSE.CSR {self.uri}")
nrows, ncols = csr.shape
if self.exists():
if self.verbose:
print(f"{self.indent}Re-using existing array {self.uri}")
else:
self.create_empty_array_for_csr("data", csr.dtype, nrows, ncols)
self.
|
[
"from_numpy_ndarray",
"uri",
"indent",
"ctx",
"verbose",
"create_empty_array_for_csr",
"from_pandas_dataframe",
"from_scipy_csr",
"ingest_data_from_csr",
"maybe_from_numpyable_object",
"to_matrix",
"exists",
"name",
"object_type",
"soma_options"
] |
array(self, arr: np.ndarray) -> None:
"""
Writes a numpy.ndarray to a TileDB array, nominally for ingest of `uns` nested data from anndata
objects. Mostly tiledb.from_numpy, but with some necessary handling for data with UTF-8 values.
"""
if self.verbose:
s = util.get_start_stamp()
print(f"{self.indent}START WRITING FROM NUMPY.NDARRAY {self.uri}")
if 'numpy' in str(type(arr)) and str(arr.dtype).startswith('<U'):
# Note arr.astype('str') does not lead to a successfuly tiledb.from_numpy.
arr = np.array(arr, dtype='O')
# overwrite = False
# if self.exists:
# overwrite = True
# if self.verbose:
# print(f"{self.indent}Re-using existing array {self.uri}")
# tiledb.from_numpy(uri=self.uri, array=arr, ctx=self.ctx, overwrite=overwrite)
# TODO: find the right syntax for update-in-place (tiledb.from_pandas uses `mode`)
tiledb.from_numpy(uri=self.uri, array=arr, ctx=self.ctx)
if self.verbose:
print(util.format_elapsed(s, f"{self.indent}FINISH WRITING FROM NUMPY.NDARRAY {self.uri}"))
# ----------------------------------------------------------------
def from_scipy_csr(self, csr: scipy.sparse.csr_matrix) -> None:
"""
Convert ndarray/(csr|csc)matrix to coo_matrix and ingest into TileDB.
:param csr: Matrix-like object coercible to a scipy coo_matrix.
"""
if self.verbose:
s = util.get_start_stamp()
print(f"{self.indent}START WRITING FROM SCIPY.SPARSE.CSR {self.uri}")
nrows, ncols = csr.shape
if self.exists():
if self.verbose:
print(f"{self.indent}Re-using existing array {self.uri}")
else:
self.create_empty_array_for_csr("data", csr.dtype, nrows, ncols)
self.
| 463
|
517
| 18
| 6,290
|
single-cell-data__tiledb-soma
|
0a9b47f0d37d4e309d85897423bcea947086b39a
|
apis/python/src/tiledbsc/uns_array.py
|
common
|
uri
| true
|
statement
| 15
| 15
| false
| false
|
[
"uri",
"ctx",
"from_numpy_ndarray",
"indent",
"verbose",
"__init__",
"create_empty_array_for_csr",
"from_pandas_dataframe",
"from_scipy_csr",
"ingest_data_from_csr",
"maybe_from_numpyable_object",
"to_matrix",
"exists",
"name",
"object_type",
"soma_options",
"__annotations__",
"__class__",
"__delattr__",
"__dict__",
"__dir__",
"__eq__",
"__format__",
"__getattribute__",
"__hash__",
"__init_subclass__",
"__ne__",
"__new__",
"__reduce__",
"__reduce_ex__",
"__repr__",
"__setattr__",
"__sizeof__",
"__str__",
"__subclasshook__",
"__doc__",
"__module__"
] |
[
{
"name": "create_empty_array_for_csr",
"type": "function"
},
{
"name": "ctx",
"type": "statement"
},
{
"name": "exists",
"type": "function"
},
{
"name": "from_numpy_ndarray",
"type": "function"
},
{
"name": "from_pandas_dataframe",
"type": "function"
},
{
"name": "from_scipy_csr",
"type": "function"
},
{
"name": "indent",
"type": "statement"
},
{
"name": "ingest_data_from_csr",
"type": "function"
},
{
"name": "maybe_from_numpyable_object",
"type": "function"
},
{
"name": "name",
"type": "statement"
},
{
"name": "object_type",
"type": "function"
},
{
"name": "soma_options",
"type": "statement"
},
{
"name": "to_matrix",
"type": "function"
},
{
"name": "uri",
"type": "statement"
},
{
"name": "verbose",
"type": "statement"
},
{
"name": "__annotations__",
"type": "statement"
},
{
"name": "__class__",
"type": "property"
},
{
"name": "__delattr__",
"type": "function"
},
{
"name": "__dict__",
"type": "statement"
},
{
"name": "__dir__",
"type": "function"
},
{
"name": "__doc__",
"type": "statement"
},
{
"name": "__eq__",
"type": "function"
},
{
"name": "__format__",
"type": "function"
},
{
"name": "__getattribute__",
"type": "function"
},
{
"name": "__hash__",
"type": "function"
},
{
"name": "__init__",
"type": "function"
},
{
"name": "__init_subclass__",
"type": "function"
},
{
"name": "__module__",
"type": "statement"
},
{
"name": "__ne__",
"type": "function"
},
{
"name": "__new__",
"type": "function"
},
{
"name": "__reduce__",
"type": "function"
},
{
"name": "__reduce_ex__",
"type": "function"
},
{
"name": "__repr__",
"type": "function"
},
{
"name": "__setattr__",
"type": "function"
},
{
"name": "__sizeof__",
"type": "function"
},
{
"name": "__slots__",
"type": "statement"
},
{
"name": "__str__",
"type": "function"
}
] |
import tiledb
from .soma_options import SOMAOptions
from .tiledb_array import TileDBArray
from .tiledb_group import TileDBGroup
import tiledbsc.util as util
import numpy as np
import scipy
import pandas as pd
from typing import Optional
class UnsArray(TileDBArray):
"""
Holds TileDB storage for an array obtained from the nested `anndata.uns` field.
"""
# ----------------------------------------------------------------
def __init__(
self,
uri: str,
name: str,
parent: Optional[TileDBGroup] = None,
):
"""
See the TileDBObject constructor.
"""
super().__init__(uri=uri, name=name, parent=parent)
# ----------------------------------------------------------------
def from_pandas_dataframe(self, df: pd.DataFrame):
"""
Ingests an `UnsArray` into TileDB storage, given a pandas.DataFrame.
"""
if self.verbose:
s = util.get_start_stamp()
print(f"{self.indent}START WRITING PANDAS.DATAFRAME {self.uri}")
tiledb.from_pandas(
uri=self.uri,
dataframe=df,
sparse=True,
allows_duplicates=False,
ctx=self.ctx
)
if self.verbose:
print(util.format_elapsed(s, f"{self.indent}FINISH WRITING PANDAS.DATAFRAME {self.uri}"))
# ----------------------------------------------------------------
def maybe_from_numpyable_object(self, obj) -> bool:
"""
Nominally for ingest of `uns` nested data from anndata objects. Handles scalar or array values
-- the former, by wrapping in a 1D array. Maps to TileDB / tiledb.from_numpy storage semantics,
including UTF-8 handling. Supports dtypes like
"""
if isinstance(obj, np.ndarray):
arr = util._to_tiledb_supported_array_type(obj)
self.from_numpy_ndarray(arr)
return True
elif isinstance(obj, list):
arr = np.asarray(obj)
self.from_numpy_ndarray(arr)
return True
elif isinstance(obj, np.str_):
# Needs explicit cast from numpy.str_ to str for tiledb.from_numpy
arr = np.asarray([obj]).astype(str)
self.from_numpy_ndarray(arr)
return True
elif isinstance(obj, int) or isinstance(obj, float) or isinstance(obj, str):
# Nominally this is unit-test data
arr = np.asarray([obj])
self.from_numpy_ndarray(arr)
return True
elif 'numpy' in str(type(obj)):
arr = np.asarray([obj])
arr = util._to_tiledb_supported_array_type(arr)
self.from_numpy_ndarray(arr)
return True
else:
return False
# ----------------------------------------------------------------
def from_numpy_ndarray(self, arr: np.ndarray) -> None:
"""
Writes a numpy.ndarray to a TileDB array, nominally for ingest of `uns` nested data from anndata
objects. Mostly tiledb.from_numpy, but with some necessary handling for data with UTF-8 values.
"""
if self.verbose:
s = util.get_start_stamp()
print(f"{self.indent}START WRITING FROM NUMPY.NDARRAY {self.uri}")
if 'numpy' in str(type(arr)) and str(arr.dtype).startswith('<U'):
# Note arr.astype('str') does not lead to a successfuly tiledb.from_numpy.
arr = np.array(arr, dtype='O')
# overwrite = False
# if self.exists:
# overwrite = True
# if self.verbose:
# print(f"{self.indent}Re-using existing array {self.uri}")
# tiledb.from_numpy(uri=self.uri, array=arr, ctx=self.ctx, overwrite=overwrite)
# TODO: find the right syntax for update-in-place (tiledb.from_pandas uses `mode`)
tiledb.from_numpy(uri=self.uri, array=arr, ctx=self.ctx)
if self.verbose:
print(util.format_elapsed(s, f"{self.indent}FINISH WRITING FROM NUMPY.NDARRAY {self.uri}"))
# ----------------------------------------------------------------
def from_scipy_csr(self, csr: scipy.sparse.csr_matrix) -> None:
"""
Convert ndarray/(csr|csc)matrix to coo_matrix and ingest into TileDB.
:param csr: Matrix-like object coercible to a scipy coo_matrix.
"""
if self.verbose:
s = util.get_start_stamp()
print(f"{self.indent}START WRITING FROM SCIPY.SPARSE.CSR {self.uri}")
nrows, ncols = csr.shape
if self.exists():
if self.verbose:
print(f"{self.indent}Re-using existing array {self.uri}")
else:
self.create_empty_array_for_csr("data", csr.dtype, nrows, ncols)
self.ingest_data_from_csr(csr)
if self.verbose:
print(util.format_elapsed(s, f"{self.indent}FINISH WRITING FROM SCIPY.SPARSE.CSR {self.uri}"))
# ----------------------------------------------------------------
def create_empty_array_for_csr(self, attr_name: str, matrix_dtype: np.dtype, nrows: int, ncols: int):
"""
Create a TileDB 2D sparse array with int dimensions and a single attribute.
Nominally used for uns data.
:param matrix_dtype: datatype of the matrix
:param nrows: number of rows in the matrix
:param ncols: number of columns in the matrix
"""
assert isinstance(attr_name, str)
dom = tiledb.Domain(
tiledb.Dim(name="dim0", domain=(0, nrows-1), dtype="int32", filters=[tiledb.RleFilter()]),
tiledb.Dim(name="dim1", domain=(0, ncols-1), dtype="int32", filters=[tiledb.ZstdFilter()]),
ctx=self.ctx
)
att = tiledb.Attr(attr_name, dtype=matrix_dtype, filters=[tiledb.ZstdFilter()], ctx=self.ctx)
sch = tiledb.ArraySchema(
domain=dom,
attrs=(att,),
sparse=True,
allows_duplicates=True,
offsets_filters=[tiledb.DoubleDeltaFilter(), tiledb.BitWidthReductionFilter(), tiledb.ZstdFilter()],
capacity=100000,
cell_order='row-major',
tile_order='col-major',
ctx=self.ctx
)
tiledb.Array.create(self.
|
[
"uri",
"ctx",
"from_numpy_ndarray",
"indent",
"verbose",
"create_empty_array_for_csr",
"from_pandas_dataframe",
"from_scipy_csr",
"ingest_data_from_csr",
"maybe_from_numpyable_object",
"to_matrix",
"exists",
"name",
"object_type",
"soma_options"
] |
atrix.
"""
if self.verbose:
s = util.get_start_stamp()
print(f"{self.indent}START WRITING FROM SCIPY.SPARSE.CSR {self.uri}")
nrows, ncols = csr.shape
if self.exists():
if self.verbose:
print(f"{self.indent}Re-using existing array {self.uri}")
else:
self.create_empty_array_for_csr("data", csr.dtype, nrows, ncols)
self.ingest_data_from_csr(csr)
if self.verbose:
print(util.format_elapsed(s, f"{self.indent}FINISH WRITING FROM SCIPY.SPARSE.CSR {self.uri}"))
# ----------------------------------------------------------------
def create_empty_array_for_csr(self, attr_name: str, matrix_dtype: np.dtype, nrows: int, ncols: int):
"""
Create a TileDB 2D sparse array with int dimensions and a single attribute.
Nominally used for uns data.
:param matrix_dtype: datatype of the matrix
:param nrows: number of rows in the matrix
:param ncols: number of columns in the matrix
"""
assert isinstance(attr_name, str)
dom = tiledb.Domain(
tiledb.Dim(name="dim0", domain=(0, nrows-1), dtype="int32", filters=[tiledb.RleFilter()]),
tiledb.Dim(name="dim1", domain=(0, ncols-1), dtype="int32", filters=[tiledb.ZstdFilter()]),
ctx=self.ctx
)
att = tiledb.Attr(attr_name, dtype=matrix_dtype, filters=[tiledb.ZstdFilter()], ctx=self.ctx)
sch = tiledb.ArraySchema(
domain=dom,
attrs=(att,),
sparse=True,
allows_duplicates=True,
offsets_filters=[tiledb.DoubleDeltaFilter(), tiledb.BitWidthReductionFilter(), tiledb.ZstdFilter()],
capacity=100000,
cell_order='row-major',
tile_order='col-major',
ctx=self.ctx
)
tiledb.Array.create(self.
| 471
|
518
| 18
| 6,309
|
single-cell-data__tiledb-soma
|
0a9b47f0d37d4e309d85897423bcea947086b39a
|
apis/python/src/tiledbsc/uns_array.py
|
common
|
ctx
| true
|
statement
| 15
| 15
| false
| false
|
[
"ctx",
"uri",
"from_numpy_ndarray",
"indent",
"verbose",
"__init__",
"create_empty_array_for_csr",
"from_pandas_dataframe",
"from_scipy_csr",
"ingest_data_from_csr",
"maybe_from_numpyable_object",
"to_matrix",
"exists",
"name",
"object_type",
"soma_options",
"__annotations__",
"__class__",
"__delattr__",
"__dict__",
"__dir__",
"__eq__",
"__format__",
"__getattribute__",
"__hash__",
"__init_subclass__",
"__ne__",
"__new__",
"__reduce__",
"__reduce_ex__",
"__repr__",
"__setattr__",
"__sizeof__",
"__str__",
"__subclasshook__",
"__doc__",
"__module__"
] |
[
{
"name": "create_empty_array_for_csr",
"type": "function"
},
{
"name": "ctx",
"type": "statement"
},
{
"name": "exists",
"type": "function"
},
{
"name": "from_numpy_ndarray",
"type": "function"
},
{
"name": "from_pandas_dataframe",
"type": "function"
},
{
"name": "from_scipy_csr",
"type": "function"
},
{
"name": "indent",
"type": "statement"
},
{
"name": "ingest_data_from_csr",
"type": "function"
},
{
"name": "maybe_from_numpyable_object",
"type": "function"
},
{
"name": "name",
"type": "statement"
},
{
"name": "object_type",
"type": "function"
},
{
"name": "soma_options",
"type": "statement"
},
{
"name": "to_matrix",
"type": "function"
},
{
"name": "uri",
"type": "statement"
},
{
"name": "verbose",
"type": "statement"
},
{
"name": "__annotations__",
"type": "statement"
},
{
"name": "__class__",
"type": "property"
},
{
"name": "__delattr__",
"type": "function"
},
{
"name": "__dict__",
"type": "statement"
},
{
"name": "__dir__",
"type": "function"
},
{
"name": "__doc__",
"type": "statement"
},
{
"name": "__eq__",
"type": "function"
},
{
"name": "__format__",
"type": "function"
},
{
"name": "__getattribute__",
"type": "function"
},
{
"name": "__hash__",
"type": "function"
},
{
"name": "__init__",
"type": "function"
},
{
"name": "__init_subclass__",
"type": "function"
},
{
"name": "__module__",
"type": "statement"
},
{
"name": "__ne__",
"type": "function"
},
{
"name": "__new__",
"type": "function"
},
{
"name": "__reduce__",
"type": "function"
},
{
"name": "__reduce_ex__",
"type": "function"
},
{
"name": "__repr__",
"type": "function"
},
{
"name": "__setattr__",
"type": "function"
},
{
"name": "__sizeof__",
"type": "function"
},
{
"name": "__slots__",
"type": "statement"
},
{
"name": "__str__",
"type": "function"
}
] |
import tiledb
from .soma_options import SOMAOptions
from .tiledb_array import TileDBArray
from .tiledb_group import TileDBGroup
import tiledbsc.util as util
import numpy as np
import scipy
import pandas as pd
from typing import Optional
class UnsArray(TileDBArray):
"""
Holds TileDB storage for an array obtained from the nested `anndata.uns` field.
"""
# ----------------------------------------------------------------
def __init__(
self,
uri: str,
name: str,
parent: Optional[TileDBGroup] = None,
):
"""
See the TileDBObject constructor.
"""
super().__init__(uri=uri, name=name, parent=parent)
# ----------------------------------------------------------------
def from_pandas_dataframe(self, df: pd.DataFrame):
"""
Ingests an `UnsArray` into TileDB storage, given a pandas.DataFrame.
"""
if self.verbose:
s = util.get_start_stamp()
print(f"{self.indent}START WRITING PANDAS.DATAFRAME {self.uri}")
tiledb.from_pandas(
uri=self.uri,
dataframe=df,
sparse=True,
allows_duplicates=False,
ctx=self.ctx
)
if self.verbose:
print(util.format_elapsed(s, f"{self.indent}FINISH WRITING PANDAS.DATAFRAME {self.uri}"))
# ----------------------------------------------------------------
def maybe_from_numpyable_object(self, obj) -> bool:
"""
Nominally for ingest of `uns` nested data from anndata objects. Handles scalar or array values
-- the former, by wrapping in a 1D array. Maps to TileDB / tiledb.from_numpy storage semantics,
including UTF-8 handling. Supports dtypes like
"""
if isinstance(obj, np.ndarray):
arr = util._to_tiledb_supported_array_type(obj)
self.from_numpy_ndarray(arr)
return True
elif isinstance(obj, list):
arr = np.asarray(obj)
self.from_numpy_ndarray(arr)
return True
elif isinstance(obj, np.str_):
# Needs explicit cast from numpy.str_ to str for tiledb.from_numpy
arr = np.asarray([obj]).astype(str)
self.from_numpy_ndarray(arr)
return True
elif isinstance(obj, int) or isinstance(obj, float) or isinstance(obj, str):
# Nominally this is unit-test data
arr = np.asarray([obj])
self.from_numpy_ndarray(arr)
return True
elif 'numpy' in str(type(obj)):
arr = np.asarray([obj])
arr = util._to_tiledb_supported_array_type(arr)
self.from_numpy_ndarray(arr)
return True
else:
return False
# ----------------------------------------------------------------
def from_numpy_ndarray(self, arr: np.ndarray) -> None:
"""
Writes a numpy.ndarray to a TileDB array, nominally for ingest of `uns` nested data from anndata
objects. Mostly tiledb.from_numpy, but with some necessary handling for data with UTF-8 values.
"""
if self.verbose:
s = util.get_start_stamp()
print(f"{self.indent}START WRITING FROM NUMPY.NDARRAY {self.uri}")
if 'numpy' in str(type(arr)) and str(arr.dtype).startswith('<U'):
# Note arr.astype('str') does not lead to a successfuly tiledb.from_numpy.
arr = np.array(arr, dtype='O')
# overwrite = False
# if self.exists:
# overwrite = True
# if self.verbose:
# print(f"{self.indent}Re-using existing array {self.uri}")
# tiledb.from_numpy(uri=self.uri, array=arr, ctx=self.ctx, overwrite=overwrite)
# TODO: find the right syntax for update-in-place (tiledb.from_pandas uses `mode`)
tiledb.from_numpy(uri=self.uri, array=arr, ctx=self.ctx)
if self.verbose:
print(util.format_elapsed(s, f"{self.indent}FINISH WRITING FROM NUMPY.NDARRAY {self.uri}"))
# ----------------------------------------------------------------
def from_scipy_csr(self, csr: scipy.sparse.csr_matrix) -> None:
"""
Convert ndarray/(csr|csc)matrix to coo_matrix and ingest into TileDB.
:param csr: Matrix-like object coercible to a scipy coo_matrix.
"""
if self.verbose:
s = util.get_start_stamp()
print(f"{self.indent}START WRITING FROM SCIPY.SPARSE.CSR {self.uri}")
nrows, ncols = csr.shape
if self.exists():
if self.verbose:
print(f"{self.indent}Re-using existing array {self.uri}")
else:
self.create_empty_array_for_csr("data", csr.dtype, nrows, ncols)
self.ingest_data_from_csr(csr)
if self.verbose:
print(util.format_elapsed(s, f"{self.indent}FINISH WRITING FROM SCIPY.SPARSE.CSR {self.uri}"))
# ----------------------------------------------------------------
def create_empty_array_for_csr(self, attr_name: str, matrix_dtype: np.dtype, nrows: int, ncols: int):
"""
Create a TileDB 2D sparse array with int dimensions and a single attribute.
Nominally used for uns data.
:param matrix_dtype: datatype of the matrix
:param nrows: number of rows in the matrix
:param ncols: number of columns in the matrix
"""
assert isinstance(attr_name, str)
dom = tiledb.Domain(
tiledb.Dim(name="dim0", domain=(0, nrows-1), dtype="int32", filters=[tiledb.RleFilter()]),
tiledb.Dim(name="dim1", domain=(0, ncols-1), dtype="int32", filters=[tiledb.ZstdFilter()]),
ctx=self.ctx
)
att = tiledb.Attr(attr_name, dtype=matrix_dtype, filters=[tiledb.ZstdFilter()], ctx=self.ctx)
sch = tiledb.ArraySchema(
domain=dom,
attrs=(att,),
sparse=True,
allows_duplicates=True,
offsets_filters=[tiledb.DoubleDeltaFilter(), tiledb.BitWidthReductionFilter(), tiledb.ZstdFilter()],
capacity=100000,
cell_order='row-major',
tile_order='col-major',
ctx=self.ctx
)
tiledb.Array.create(self.uri, sch, ctx=self.
|
[
"ctx",
"uri",
"from_numpy_ndarray",
"indent",
"verbose",
"create_empty_array_for_csr",
"from_pandas_dataframe",
"from_scipy_csr",
"ingest_data_from_csr",
"maybe_from_numpyable_object",
"to_matrix",
"exists",
"name",
"object_type",
"soma_options"
] |
if self.verbose:
s = util.get_start_stamp()
print(f"{self.indent}START WRITING FROM SCIPY.SPARSE.CSR {self.uri}")
nrows, ncols = csr.shape
if self.exists():
if self.verbose:
print(f"{self.indent}Re-using existing array {self.uri}")
else:
self.create_empty_array_for_csr("data", csr.dtype, nrows, ncols)
self.ingest_data_from_csr(csr)
if self.verbose:
print(util.format_elapsed(s, f"{self.indent}FINISH WRITING FROM SCIPY.SPARSE.CSR {self.uri}"))
# ----------------------------------------------------------------
def create_empty_array_for_csr(self, attr_name: str, matrix_dtype: np.dtype, nrows: int, ncols: int):
"""
Create a TileDB 2D sparse array with int dimensions and a single attribute.
Nominally used for uns data.
:param matrix_dtype: datatype of the matrix
:param nrows: number of rows in the matrix
:param ncols: number of columns in the matrix
"""
assert isinstance(attr_name, str)
dom = tiledb.Domain(
tiledb.Dim(name="dim0", domain=(0, nrows-1), dtype="int32", filters=[tiledb.RleFilter()]),
tiledb.Dim(name="dim1", domain=(0, ncols-1), dtype="int32", filters=[tiledb.ZstdFilter()]),
ctx=self.ctx
)
att = tiledb.Attr(attr_name, dtype=matrix_dtype, filters=[tiledb.ZstdFilter()], ctx=self.ctx)
sch = tiledb.ArraySchema(
domain=dom,
attrs=(att,),
sparse=True,
allows_duplicates=True,
offsets_filters=[tiledb.DoubleDeltaFilter(), tiledb.BitWidthReductionFilter(), tiledb.ZstdFilter()],
capacity=100000,
cell_order='row-major',
tile_order='col-major',
ctx=self.ctx
)
tiledb.Array.create(self.uri, sch, ctx=self.
| 472
|
522
| 18
| 7,410
|
single-cell-data__tiledb-soma
|
0a9b47f0d37d4e309d85897423bcea947086b39a
|
apis/python/src/tiledbsc/uns_array.py
|
inproject
|
get_start_stamp
| true
|
function
| 6
| 12
| false
| false
|
[
"get_start_stamp",
"format_elapsed",
"_to_tiledb_supported_array_type",
"Optional",
"find_csr_chunk_size",
"get_sort_and_permutation"
] |
[
{
"name": "ad",
"type": "module"
},
{
"name": "find_csr_chunk_size",
"type": "function"
},
{
"name": "format_elapsed",
"type": "function"
},
{
"name": "get_sort_and_permutation",
"type": "function"
},
{
"name": "get_start_stamp",
"type": "function"
},
{
"name": "numpy",
"type": "module"
},
{
"name": "Optional",
"type": "class"
},
{
"name": "pd",
"type": "module"
},
{
"name": "scipy",
"type": "module"
},
{
"name": "tiledb",
"type": "module"
},
{
"name": "time",
"type": "module"
},
{
"name": "_to_tiledb_supported_array_type",
"type": "function"
},
{
"name": "__doc__",
"type": "instance"
},
{
"name": "__file__",
"type": "instance"
},
{
"name": "__name__",
"type": "instance"
},
{
"name": "__package__",
"type": "instance"
}
] |
import tiledb
from .soma_options import SOMAOptions
from .tiledb_array import TileDBArray
from .tiledb_group import TileDBGroup
import tiledbsc.util as util
import numpy as np
import scipy
import pandas as pd
from typing import Optional
class UnsArray(TileDBArray):
"""
Holds TileDB storage for an array obtained from the nested `anndata.uns` field.
"""
# ----------------------------------------------------------------
def __init__(
self,
uri: str,
name: str,
parent: Optional[TileDBGroup] = None,
):
"""
See the TileDBObject constructor.
"""
super().__init__(uri=uri, name=name, parent=parent)
# ----------------------------------------------------------------
def from_pandas_dataframe(self, df: pd.DataFrame):
"""
Ingests an `UnsArray` into TileDB storage, given a pandas.DataFrame.
"""
if self.verbose:
s = util.get_start_stamp()
print(f"{self.indent}START WRITING PANDAS.DATAFRAME {self.uri}")
tiledb.from_pandas(
uri=self.uri,
dataframe=df,
sparse=True,
allows_duplicates=False,
ctx=self.ctx
)
if self.verbose:
print(util.format_elapsed(s, f"{self.indent}FINISH WRITING PANDAS.DATAFRAME {self.uri}"))
# ----------------------------------------------------------------
def maybe_from_numpyable_object(self, obj) -> bool:
"""
Nominally for ingest of `uns` nested data from anndata objects. Handles scalar or array values
-- the former, by wrapping in a 1D array. Maps to TileDB / tiledb.from_numpy storage semantics,
including UTF-8 handling. Supports dtypes like
"""
if isinstance(obj, np.ndarray):
arr = util._to_tiledb_supported_array_type(obj)
self.from_numpy_ndarray(arr)
return True
elif isinstance(obj, list):
arr = np.asarray(obj)
self.from_numpy_ndarray(arr)
return True
elif isinstance(obj, np.str_):
# Needs explicit cast from numpy.str_ to str for tiledb.from_numpy
arr = np.asarray([obj]).astype(str)
self.from_numpy_ndarray(arr)
return True
elif isinstance(obj, int) or isinstance(obj, float) or isinstance(obj, str):
# Nominally this is unit-test data
arr = np.asarray([obj])
self.from_numpy_ndarray(arr)
return True
elif 'numpy' in str(type(obj)):
arr = np.asarray([obj])
arr = util._to_tiledb_supported_array_type(arr)
self.from_numpy_ndarray(arr)
return True
else:
return False
# ----------------------------------------------------------------
def from_numpy_ndarray(self, arr: np.ndarray) -> None:
"""
Writes a numpy.ndarray to a TileDB array, nominally for ingest of `uns` nested data from anndata
objects. Mostly tiledb.from_numpy, but with some necessary handling for data with UTF-8 values.
"""
if self.verbose:
s = util.get_start_stamp()
print(f"{self.indent}START WRITING FROM NUMPY.NDARRAY {self.uri}")
if 'numpy' in str(type(arr)) and str(arr.dtype).startswith('<U'):
# Note arr.astype('str') does not lead to a successfuly tiledb.from_numpy.
arr = np.array(arr, dtype='O')
# overwrite = False
# if self.exists:
# overwrite = True
# if self.verbose:
# print(f"{self.indent}Re-using existing array {self.uri}")
# tiledb.from_numpy(uri=self.uri, array=arr, ctx=self.ctx, overwrite=overwrite)
# TODO: find the right syntax for update-in-place (tiledb.from_pandas uses `mode`)
tiledb.from_numpy(uri=self.uri, array=arr, ctx=self.ctx)
if self.verbose:
print(util.format_elapsed(s, f"{self.indent}FINISH WRITING FROM NUMPY.NDARRAY {self.uri}"))
# ----------------------------------------------------------------
def from_scipy_csr(self, csr: scipy.sparse.csr_matrix) -> None:
"""
Convert ndarray/(csr|csc)matrix to coo_matrix and ingest into TileDB.
:param csr: Matrix-like object coercible to a scipy coo_matrix.
"""
if self.verbose:
s = util.get_start_stamp()
print(f"{self.indent}START WRITING FROM SCIPY.SPARSE.CSR {self.uri}")
nrows, ncols = csr.shape
if self.exists():
if self.verbose:
print(f"{self.indent}Re-using existing array {self.uri}")
else:
self.create_empty_array_for_csr("data", csr.dtype, nrows, ncols)
self.ingest_data_from_csr(csr)
if self.verbose:
print(util.format_elapsed(s, f"{self.indent}FINISH WRITING FROM SCIPY.SPARSE.CSR {self.uri}"))
# ----------------------------------------------------------------
def create_empty_array_for_csr(self, attr_name: str, matrix_dtype: np.dtype, nrows: int, ncols: int):
"""
Create a TileDB 2D sparse array with int dimensions and a single attribute.
Nominally used for uns data.
:param matrix_dtype: datatype of the matrix
:param nrows: number of rows in the matrix
:param ncols: number of columns in the matrix
"""
assert isinstance(attr_name, str)
dom = tiledb.Domain(
tiledb.Dim(name="dim0", domain=(0, nrows-1), dtype="int32", filters=[tiledb.RleFilter()]),
tiledb.Dim(name="dim1", domain=(0, ncols-1), dtype="int32", filters=[tiledb.ZstdFilter()]),
ctx=self.ctx
)
att = tiledb.Attr(attr_name, dtype=matrix_dtype, filters=[tiledb.ZstdFilter()], ctx=self.ctx)
sch = tiledb.ArraySchema(
domain=dom,
attrs=(att,),
sparse=True,
allows_duplicates=True,
offsets_filters=[tiledb.DoubleDeltaFilter(), tiledb.BitWidthReductionFilter(), tiledb.ZstdFilter()],
capacity=100000,
cell_order='row-major',
tile_order='col-major',
ctx=self.ctx
)
tiledb.Array.create(self.uri, sch, ctx=self.ctx)
# ----------------------------------------------------------------
def ingest_data_from_csr(self, csr: scipy.sparse.csr_matrix):
"""
Convert ndarray/(csr|csc)matrix to coo_matrix and ingest into TileDB.
:param csr: Matrix-like object coercible to a scipy coo_matrix.
"""
mat_coo = scipy.sparse.coo_matrix(csr)
d0 = mat_coo.row
d1 = mat_coo.col
with tiledb.open(self.uri, mode="w", ctx=self.ctx) as A:
A[d0, d1] = mat_coo.data
# ----------------------------------------------------------------
# TODO: regardless of which matrix type (numpy.ndarray, scipy.sparse.csr_matrix, etc) was
# written in, this returns always the same type on readback. Perhaps at write time we can save a
# metadata tag with the provenance-type of the array, and on readback, try to return the same
# type.
def to_matrix(self):
"""
Reads an uns array from TileDB storage and returns a matrix -- currently, always as numpy.ndarray.
"""
if self.verbose:
s2 = util.
|
[
"get_start_stamp",
"format_elapsed",
"Optional",
"find_csr_chunk_size",
"get_sort_and_permutation"
] |
dom = tiledb.Domain(
tiledb.Dim(name="dim0", domain=(0, nrows-1), dtype="int32", filters=[tiledb.RleFilter()]),
tiledb.Dim(name="dim1", domain=(0, ncols-1), dtype="int32", filters=[tiledb.ZstdFilter()]),
ctx=self.ctx
)
att = tiledb.Attr(attr_name, dtype=matrix_dtype, filters=[tiledb.ZstdFilter()], ctx=self.ctx)
sch = tiledb.ArraySchema(
domain=dom,
attrs=(att,),
sparse=True,
allows_duplicates=True,
offsets_filters=[tiledb.DoubleDeltaFilter(), tiledb.BitWidthReductionFilter(), tiledb.ZstdFilter()],
capacity=100000,
cell_order='row-major',
tile_order='col-major',
ctx=self.ctx
)
tiledb.Array.create(self.uri, sch, ctx=self.ctx)
# ----------------------------------------------------------------
def ingest_data_from_csr(self, csr: scipy.sparse.csr_matrix):
"""
Convert ndarray/(csr|csc)matrix to coo_matrix and ingest into TileDB.
:param csr: Matrix-like object coercible to a scipy coo_matrix.
"""
mat_coo = scipy.sparse.coo_matrix(csr)
d0 = mat_coo.row
d1 = mat_coo.col
with tiledb.open(self.uri, mode="w", ctx=self.ctx) as A:
A[d0, d1] = mat_coo.data
# ----------------------------------------------------------------
# TODO: regardless of which matrix type (numpy.ndarray, scipy.sparse.csr_matrix, etc) was
# written in, this returns always the same type on readback. Perhaps at write time we can save a
# metadata tag with the provenance-type of the array, and on readback, try to return the same
# type.
def to_matrix(self):
"""
Reads an uns array from TileDB storage and returns a matrix -- currently, always as numpy.ndarray.
"""
if self.verbose:
s2 = util.
| 476
|
540
| 19
| 649
|
single-cell-data__tiledb-soma
|
e64f89e6251f221bd91c49a997d7fb67de3ed349
|
apis/python/src/tiledbsc/io/anndata.py
|
inproject
|
util
| true
|
module
| 33
| 35
| false
| false
|
[
"util",
"soma",
"SOMA",
"util_ann",
"soma_options",
"annotation_matrix",
"annotation_matrix_group",
"annotation_pairwise_matrix_group",
"AnnotationMatrix",
"AnnotationMatrixGroup",
"AnnotationPairwiseMatrixGroup",
"assay_matrix",
"assay_matrix_group",
"AssayMatrix",
"AssayMatrixGroup",
"describe_ann_file",
"raw_group",
"RawGroup",
"show_single_cell_group",
"soma_collection",
"SOMACollection",
"SOMAOptions",
"tiledb_array",
"tiledb_group",
"tiledb_object",
"TileDBArray",
"TileDBGroup",
"TileDBObject",
"uns_array",
"uns_group",
"UnsArray",
"UnsGroup",
"util_tiledb"
] |
[
{
"name": "annotation_dataframe",
"type": "module"
},
{
"name": "annotation_matrix",
"type": "module"
},
{
"name": "annotation_matrix_group",
"type": "module"
},
{
"name": "annotation_pairwise_matrix_group",
"type": "module"
},
{
"name": "AnnotationMatrix",
"type": "class"
},
{
"name": "AnnotationMatrixGroup",
"type": "class"
},
{
"name": "AnnotationPairwiseMatrixGroup",
"type": "class"
},
{
"name": "assay_matrix",
"type": "module"
},
{
"name": "assay_matrix_group",
"type": "module"
},
{
"name": "AssayMatrix",
"type": "class"
},
{
"name": "AssayMatrixGroup",
"type": "class"
},
{
"name": "describe_ann_file",
"type": "function"
},
{
"name": "io",
"type": "module"
},
{
"name": "raw_group",
"type": "module"
},
{
"name": "RawGroup",
"type": "class"
},
{
"name": "show_single_cell_group",
"type": "function"
},
{
"name": "SOMA",
"type": "class"
},
{
"name": "soma",
"type": "module"
},
{
"name": "soma_collection",
"type": "module"
},
{
"name": "soma_options",
"type": "module"
},
{
"name": "SOMACollection",
"type": "class"
},
{
"name": "SOMAOptions",
"type": "class"
},
{
"name": "tiledb_array",
"type": "module"
},
{
"name": "tiledb_group",
"type": "module"
},
{
"name": "tiledb_object",
"type": "module"
},
{
"name": "TileDBArray",
"type": "class"
},
{
"name": "TileDBGroup",
"type": "class"
},
{
"name": "TileDBObject",
"type": "class"
},
{
"name": "uns_array",
"type": "module"
},
{
"name": "uns_group",
"type": "module"
},
{
"name": "UnsArray",
"type": "class"
},
{
"name": "UnsGroup",
"type": "class"
},
{
"name": "util",
"type": "module"
},
{
"name": "util_ann",
"type": "module"
},
{
"name": "util_tiledb",
"type": "module"
},
{
"name": "__doc__",
"type": "instance"
},
{
"name": "__file__",
"type": "instance"
},
{
"name": "__name__",
"type": "instance"
},
{
"name": "__package__",
"type": "instance"
}
] |
import tiledbsc
import tiledbsc.util
import tiledbsc.util_ann
import anndata as ad
# ----------------------------------------------------------------
def from_h5ad(soma: tiledbsc.SOMA, input_path: str) -> None:
"""
Reads an .h5ad file and writes to a TileDB group structure.
"""
if soma._verbose:
s = tiledbsc.util.get_start_stamp()
print(f"START SOMA.from_h5ad {input_path} -> {soma.uri}")
if soma._verbose:
s = tiledbsc.util.get_start_stamp()
print(f"{soma._indent}START READING {input_path}")
anndata = ad.read_h5ad(input_path)
if soma._verbose:
print(
tiledbsc.
|
[
"util",
"soma",
"SOMA",
"util_ann",
"soma_options",
"annotation_matrix",
"annotation_matrix_group",
"annotation_pairwise_matrix_group",
"AnnotationMatrix",
"AnnotationMatrixGroup",
"AnnotationPairwiseMatrixGroup",
"assay_matrix",
"assay_matrix_group",
"AssayMatrix",
"AssayMatrixGroup",
"describe_ann_file",
"raw_group",
"RawGroup",
"show_single_cell_group",
"soma_collection",
"SOMACollection",
"SOMAOptions",
"tiledb_array",
"tiledb_group",
"tiledb_object",
"TileDBArray",
"TileDBGroup",
"TileDBObject",
"uns_array",
"uns_group",
"UnsArray",
"UnsGroup",
"util_tiledb"
] |
import tiledbsc
import tiledbsc.util
import tiledbsc.util_ann
import anndata as ad
# ----------------------------------------------------------------
def from_h5ad(soma: tiledbsc.SOMA, input_path: str) -> None:
"""
Reads an .h5ad file and writes to a TileDB group structure.
"""
if soma._verbose:
s = tiledbsc.util.get_start_stamp()
print(f"START SOMA.from_h5ad {input_path} -> {soma.uri}")
if soma._verbose:
s = tiledbsc.util.get_start_stamp()
print(f"{soma._indent}START READING {input_path}")
anndata = ad.read_h5ad(input_path)
if soma._verbose:
print(
tiledbsc.
| 489
|
541
| 19
| 654
|
single-cell-data__tiledb-soma
|
e64f89e6251f221bd91c49a997d7fb67de3ed349
|
apis/python/src/tiledbsc/io/anndata.py
|
inproject
|
format_elapsed
| true
|
function
| 10
| 16
| false
| true
|
[
"format_elapsed",
"get_start_stamp",
"List",
"Optional",
"ETATracker",
"_find_csc_chunk_size",
"_find_csr_chunk_size",
"_get_sort_and_permutation",
"_to_tiledb_supported_array_type",
"_X_and_ids_to_coo"
] |
[
{
"name": "ad",
"type": "module"
},
{
"name": "ETATracker",
"type": "class"
},
{
"name": "format_elapsed",
"type": "function"
},
{
"name": "get_start_stamp",
"type": "function"
},
{
"name": "List",
"type": "class"
},
{
"name": "numpy",
"type": "module"
},
{
"name": "Optional",
"type": "class"
},
{
"name": "pd",
"type": "module"
},
{
"name": "scipy",
"type": "module"
},
{
"name": "tiledb",
"type": "module"
},
{
"name": "time",
"type": "module"
},
{
"name": "_find_csc_chunk_size",
"type": "function"
},
{
"name": "_find_csr_chunk_size",
"type": "function"
},
{
"name": "_get_sort_and_permutation",
"type": "function"
},
{
"name": "_to_tiledb_supported_array_type",
"type": "function"
},
{
"name": "_X_and_ids_to_coo",
"type": "function"
},
{
"name": "__doc__",
"type": "instance"
},
{
"name": "__file__",
"type": "instance"
},
{
"name": "__name__",
"type": "instance"
},
{
"name": "__package__",
"type": "instance"
}
] |
import tiledbsc
import tiledbsc.util
import tiledbsc.util_ann
import anndata as ad
# ----------------------------------------------------------------
def from_h5ad(soma: tiledbsc.SOMA, input_path: str) -> None:
"""
Reads an .h5ad file and writes to a TileDB group structure.
"""
if soma._verbose:
s = tiledbsc.util.get_start_stamp()
print(f"START SOMA.from_h5ad {input_path} -> {soma.uri}")
if soma._verbose:
s = tiledbsc.util.get_start_stamp()
print(f"{soma._indent}START READING {input_path}")
anndata = ad.read_h5ad(input_path)
if soma._verbose:
print(
tiledbsc.util.
|
[
"format_elapsed",
"get_start_stamp",
"List",
"Optional",
"ETATracker"
] |
import tiledbsc
import tiledbsc.util
import tiledbsc.util_ann
import anndata as ad
# ----------------------------------------------------------------
def from_h5ad(soma: tiledbsc.SOMA, input_path: str) -> None:
"""
Reads an .h5ad file and writes to a TileDB group structure.
"""
if soma._verbose:
s = tiledbsc.util.get_start_stamp()
print(f"START SOMA.from_h5ad {input_path} -> {soma.uri}")
if soma._verbose:
s = tiledbsc.util.get_start_stamp()
print(f"{soma._indent}START READING {input_path}")
anndata = ad.read_h5ad(input_path)
if soma._verbose:
print(
tiledbsc.util.
| 490
|
569
| 19
| 2,547
|
single-cell-data__tiledb-soma
|
e64f89e6251f221bd91c49a997d7fb67de3ed349
|
apis/python/src/tiledbsc/io/anndata.py
|
inproject
|
util_ann
| true
|
module
| 33
| 35
| false
| true
|
[
"soma",
"util",
"SOMA",
"util_ann",
"soma_options",
"annotation_matrix",
"annotation_matrix_group",
"annotation_pairwise_matrix_group",
"AnnotationMatrix",
"AnnotationMatrixGroup",
"AnnotationPairwiseMatrixGroup",
"assay_matrix",
"assay_matrix_group",
"AssayMatrix",
"AssayMatrixGroup",
"describe_ann_file",
"raw_group",
"RawGroup",
"show_single_cell_group",
"soma_collection",
"SOMACollection",
"SOMAOptions",
"tiledb_array",
"tiledb_group",
"tiledb_object",
"TileDBArray",
"TileDBGroup",
"TileDBObject",
"uns_array",
"uns_group",
"UnsArray",
"UnsGroup",
"util_tiledb"
] |
[
{
"name": "annotation_dataframe",
"type": "module"
},
{
"name": "annotation_matrix",
"type": "module"
},
{
"name": "annotation_matrix_group",
"type": "module"
},
{
"name": "annotation_pairwise_matrix_group",
"type": "module"
},
{
"name": "AnnotationMatrix",
"type": "class"
},
{
"name": "AnnotationMatrixGroup",
"type": "class"
},
{
"name": "AnnotationPairwiseMatrixGroup",
"type": "class"
},
{
"name": "assay_matrix",
"type": "module"
},
{
"name": "assay_matrix_group",
"type": "module"
},
{
"name": "AssayMatrix",
"type": "class"
},
{
"name": "AssayMatrixGroup",
"type": "class"
},
{
"name": "describe_ann_file",
"type": "function"
},
{
"name": "io",
"type": "module"
},
{
"name": "raw_group",
"type": "module"
},
{
"name": "RawGroup",
"type": "class"
},
{
"name": "show_single_cell_group",
"type": "function"
},
{
"name": "SOMA",
"type": "class"
},
{
"name": "soma",
"type": "module"
},
{
"name": "soma_collection",
"type": "module"
},
{
"name": "soma_options",
"type": "module"
},
{
"name": "SOMACollection",
"type": "class"
},
{
"name": "SOMAOptions",
"type": "class"
},
{
"name": "tiledb_array",
"type": "module"
},
{
"name": "tiledb_group",
"type": "module"
},
{
"name": "tiledb_object",
"type": "module"
},
{
"name": "TileDBArray",
"type": "class"
},
{
"name": "TileDBGroup",
"type": "class"
},
{
"name": "TileDBObject",
"type": "class"
},
{
"name": "uns_array",
"type": "module"
},
{
"name": "uns_group",
"type": "module"
},
{
"name": "UnsArray",
"type": "class"
},
{
"name": "UnsGroup",
"type": "class"
},
{
"name": "util",
"type": "module"
},
{
"name": "util_ann",
"type": "module"
},
{
"name": "util_tiledb",
"type": "module"
},
{
"name": "__doc__",
"type": "instance"
},
{
"name": "__file__",
"type": "instance"
},
{
"name": "__name__",
"type": "instance"
},
{
"name": "__package__",
"type": "instance"
}
] |
import tiledbsc
import tiledbsc.util
import tiledbsc.util_ann
import anndata as ad
# ----------------------------------------------------------------
def from_h5ad(soma: tiledbsc.SOMA, input_path: str) -> None:
"""
Reads an .h5ad file and writes to a TileDB group structure.
"""
if soma._verbose:
s = tiledbsc.util.get_start_stamp()
print(f"START SOMA.from_h5ad {input_path} -> {soma.uri}")
if soma._verbose:
s = tiledbsc.util.get_start_stamp()
print(f"{soma._indent}START READING {input_path}")
anndata = ad.read_h5ad(input_path)
if soma._verbose:
print(
tiledbsc.util.format_elapsed(
s, f"{soma._indent}FINISH READING {input_path}"
)
)
from_anndata(soma, anndata)
if soma._verbose:
print(
tiledbsc.util.format_elapsed(
s, f"FINISH SOMA.from_h5ad {input_path} -> {soma.uri}"
)
)
# ----------------------------------------------------------------
def from_10x(soma: tiledbsc.SOMA, input_path: str) -> None:
"""
Reads a 10X file and writes to a TileDB group structure.
"""
if soma._verbose:
s = tiledbsc.util.get_start_stamp()
print(f"START SOMA.from_10x {input_path} -> {soma.uri}")
if soma._verbose:
s = tiledbsc.util.get_start_stamp()
print(f"{soma._indent}START READING {input_path}")
anndata = scanpy.read_10x_h5(input_path)
if soma._verbose:
print(
tiledbsc.util.format_elapsed(
s, f"{soma._indent}FINISH READING {input_path}"
)
)
from_anndata(soma, anndata)
if soma._verbose:
print(
tiledbsc.util.format_elapsed(
s, f"FINISH SOMA.from_10x {input_path} -> {soma.uri}"
)
)
return anndata
# ----------------------------------------------------------------
def from_anndata(soma: tiledbsc.SOMA, anndata: ad.AnnData) -> None:
"""
Top-level writer method for creating a TileDB group for a SOMA object.
"""
# Without _at least_ an index, there is nothing to indicate the dimension indices.
if anndata.obs.index.empty or anndata.var.index.empty:
raise NotImplementedError("Empty AnnData.obs or AnnData.var unsupported.")
if soma._verbose:
s = tiledbsc.util.get_start_stamp()
print(f"{soma._indent}START DECATEGORICALIZING")
anndata.obs_names_make_unique()
anndata.var_names_make_unique()
anndata = tiledbsc.
|
[
"soma",
"util",
"SOMA",
"util_ann",
"soma_options",
"annotation_matrix",
"annotation_matrix_group",
"annotation_pairwise_matrix_group",
"AnnotationMatrix",
"AnnotationMatrixGroup",
"AnnotationPairwiseMatrixGroup",
"assay_matrix",
"assay_matrix_group",
"AssayMatrix",
"AssayMatrixGroup",
"describe_ann_file",
"raw_group",
"RawGroup",
"show_single_cell_group",
"soma_collection",
"SOMACollection",
"SOMAOptions",
"tiledb_array",
"tiledb_group",
"tiledb_object",
"TileDBArray",
"TileDBGroup",
"TileDBObject",
"uns_array",
"uns_group",
"UnsArray",
"UnsGroup",
"util_tiledb"
] |
tiledbsc.util.format_elapsed(
s, f"{soma._indent}FINISH READING {input_path}"
)
)
from_anndata(soma, anndata)
if soma._verbose:
print(
tiledbsc.util.format_elapsed(
s, f"FINISH SOMA.from_h5ad {input_path} -> {soma.uri}"
)
)
# ----------------------------------------------------------------
def from_10x(soma: tiledbsc.SOMA, input_path: str) -> None:
"""
Reads a 10X file and writes to a TileDB group structure.
"""
if soma._verbose:
s = tiledbsc.util.get_start_stamp()
print(f"START SOMA.from_10x {input_path} -> {soma.uri}")
if soma._verbose:
s = tiledbsc.util.get_start_stamp()
print(f"{soma._indent}START READING {input_path}")
anndata = scanpy.read_10x_h5(input_path)
if soma._verbose:
print(
tiledbsc.util.format_elapsed(
s, f"{soma._indent}FINISH READING {input_path}"
)
)
from_anndata(soma, anndata)
if soma._verbose:
print(
tiledbsc.util.format_elapsed(
s, f"FINISH SOMA.from_10x {input_path} -> {soma.uri}"
)
)
return anndata
# ----------------------------------------------------------------
def from_anndata(soma: tiledbsc.SOMA, anndata: ad.AnnData) -> None:
"""
Top-level writer method for creating a TileDB group for a SOMA object.
"""
# Without _at least_ an index, there is nothing to indicate the dimension indices.
if anndata.obs.index.empty or anndata.var.index.empty:
raise NotImplementedError("Empty AnnData.obs or AnnData.var unsupported.")
if soma._verbose:
s = tiledbsc.util.get_start_stamp()
print(f"{soma._indent}START DECATEGORICALIZING")
anndata.obs_names_make_unique()
anndata.var_names_make_unique()
anndata = tiledbsc.
| 505
|
572
| 19
| 2,641
|
single-cell-data__tiledb-soma
|
e64f89e6251f221bd91c49a997d7fb67de3ed349
|
apis/python/src/tiledbsc/io/anndata.py
|
inproject
|
util
| true
|
module
| 33
| 35
| false
| false
|
[
"soma",
"util",
"SOMA",
"util_ann",
"soma_options",
"annotation_matrix",
"annotation_matrix_group",
"annotation_pairwise_matrix_group",
"AnnotationMatrix",
"AnnotationMatrixGroup",
"AnnotationPairwiseMatrixGroup",
"assay_matrix",
"assay_matrix_group",
"AssayMatrix",
"AssayMatrixGroup",
"describe_ann_file",
"raw_group",
"RawGroup",
"show_single_cell_group",
"soma_collection",
"SOMACollection",
"SOMAOptions",
"tiledb_array",
"tiledb_group",
"tiledb_object",
"TileDBArray",
"TileDBGroup",
"TileDBObject",
"uns_array",
"uns_group",
"UnsArray",
"UnsGroup",
"util_tiledb"
] |
[
{
"name": "annotation_dataframe",
"type": "module"
},
{
"name": "annotation_matrix",
"type": "module"
},
{
"name": "annotation_matrix_group",
"type": "module"
},
{
"name": "annotation_pairwise_matrix_group",
"type": "module"
},
{
"name": "AnnotationMatrix",
"type": "class"
},
{
"name": "AnnotationMatrixGroup",
"type": "class"
},
{
"name": "AnnotationPairwiseMatrixGroup",
"type": "class"
},
{
"name": "assay_matrix",
"type": "module"
},
{
"name": "assay_matrix_group",
"type": "module"
},
{
"name": "AssayMatrix",
"type": "class"
},
{
"name": "AssayMatrixGroup",
"type": "class"
},
{
"name": "describe_ann_file",
"type": "function"
},
{
"name": "io",
"type": "module"
},
{
"name": "raw_group",
"type": "module"
},
{
"name": "RawGroup",
"type": "class"
},
{
"name": "show_single_cell_group",
"type": "function"
},
{
"name": "SOMA",
"type": "class"
},
{
"name": "soma",
"type": "module"
},
{
"name": "soma_collection",
"type": "module"
},
{
"name": "soma_options",
"type": "module"
},
{
"name": "SOMACollection",
"type": "class"
},
{
"name": "SOMAOptions",
"type": "class"
},
{
"name": "tiledb_array",
"type": "module"
},
{
"name": "tiledb_group",
"type": "module"
},
{
"name": "tiledb_object",
"type": "module"
},
{
"name": "TileDBArray",
"type": "class"
},
{
"name": "TileDBGroup",
"type": "class"
},
{
"name": "TileDBObject",
"type": "class"
},
{
"name": "uns_array",
"type": "module"
},
{
"name": "uns_group",
"type": "module"
},
{
"name": "UnsArray",
"type": "class"
},
{
"name": "UnsGroup",
"type": "class"
},
{
"name": "util",
"type": "module"
},
{
"name": "util_ann",
"type": "module"
},
{
"name": "util_tiledb",
"type": "module"
},
{
"name": "__doc__",
"type": "instance"
},
{
"name": "__file__",
"type": "instance"
},
{
"name": "__name__",
"type": "instance"
},
{
"name": "__package__",
"type": "instance"
}
] |
import tiledbsc
import tiledbsc.util
import tiledbsc.util_ann
import anndata as ad
# ----------------------------------------------------------------
def from_h5ad(soma: tiledbsc.SOMA, input_path: str) -> None:
"""
Reads an .h5ad file and writes to a TileDB group structure.
"""
if soma._verbose:
s = tiledbsc.util.get_start_stamp()
print(f"START SOMA.from_h5ad {input_path} -> {soma.uri}")
if soma._verbose:
s = tiledbsc.util.get_start_stamp()
print(f"{soma._indent}START READING {input_path}")
anndata = ad.read_h5ad(input_path)
if soma._verbose:
print(
tiledbsc.util.format_elapsed(
s, f"{soma._indent}FINISH READING {input_path}"
)
)
from_anndata(soma, anndata)
if soma._verbose:
print(
tiledbsc.util.format_elapsed(
s, f"FINISH SOMA.from_h5ad {input_path} -> {soma.uri}"
)
)
# ----------------------------------------------------------------
def from_10x(soma: tiledbsc.SOMA, input_path: str) -> None:
"""
Reads a 10X file and writes to a TileDB group structure.
"""
if soma._verbose:
s = tiledbsc.util.get_start_stamp()
print(f"START SOMA.from_10x {input_path} -> {soma.uri}")
if soma._verbose:
s = tiledbsc.util.get_start_stamp()
print(f"{soma._indent}START READING {input_path}")
anndata = scanpy.read_10x_h5(input_path)
if soma._verbose:
print(
tiledbsc.util.format_elapsed(
s, f"{soma._indent}FINISH READING {input_path}"
)
)
from_anndata(soma, anndata)
if soma._verbose:
print(
tiledbsc.util.format_elapsed(
s, f"FINISH SOMA.from_10x {input_path} -> {soma.uri}"
)
)
return anndata
# ----------------------------------------------------------------
def from_anndata(soma: tiledbsc.SOMA, anndata: ad.AnnData) -> None:
"""
Top-level writer method for creating a TileDB group for a SOMA object.
"""
# Without _at least_ an index, there is nothing to indicate the dimension indices.
if anndata.obs.index.empty or anndata.var.index.empty:
raise NotImplementedError("Empty AnnData.obs or AnnData.var unsupported.")
if soma._verbose:
s = tiledbsc.util.get_start_stamp()
print(f"{soma._indent}START DECATEGORICALIZING")
anndata.obs_names_make_unique()
anndata.var_names_make_unique()
anndata = tiledbsc.util_ann._decategoricalize(anndata)
if soma._verbose:
print(
tiledbsc.
|
[
"soma",
"util",
"SOMA",
"util_ann",
"soma_options",
"annotation_matrix",
"annotation_matrix_group",
"annotation_pairwise_matrix_group",
"AnnotationMatrix",
"AnnotationMatrixGroup",
"AnnotationPairwiseMatrixGroup",
"assay_matrix",
"assay_matrix_group",
"AssayMatrix",
"AssayMatrixGroup",
"describe_ann_file",
"raw_group",
"RawGroup",
"show_single_cell_group",
"soma_collection",
"SOMACollection",
"SOMAOptions",
"tiledb_array",
"tiledb_group",
"tiledb_object",
"TileDBArray",
"TileDBGroup",
"TileDBObject",
"uns_array",
"uns_group",
"UnsArray",
"UnsGroup",
"util_tiledb"
] |
input_path}"
)
)
from_anndata(soma, anndata)
if soma._verbose:
print(
tiledbsc.util.format_elapsed(
s, f"FINISH SOMA.from_h5ad {input_path} -> {soma.uri}"
)
)
# ----------------------------------------------------------------
def from_10x(soma: tiledbsc.SOMA, input_path: str) -> None:
"""
Reads a 10X file and writes to a TileDB group structure.
"""
if soma._verbose:
s = tiledbsc.util.get_start_stamp()
print(f"START SOMA.from_10x {input_path} -> {soma.uri}")
if soma._verbose:
s = tiledbsc.util.get_start_stamp()
print(f"{soma._indent}START READING {input_path}")
anndata = scanpy.read_10x_h5(input_path)
if soma._verbose:
print(
tiledbsc.util.format_elapsed(
s, f"{soma._indent}FINISH READING {input_path}"
)
)
from_anndata(soma, anndata)
if soma._verbose:
print(
tiledbsc.util.format_elapsed(
s, f"FINISH SOMA.from_10x {input_path} -> {soma.uri}"
)
)
return anndata
# ----------------------------------------------------------------
def from_anndata(soma: tiledbsc.SOMA, anndata: ad.AnnData) -> None:
"""
Top-level writer method for creating a TileDB group for a SOMA object.
"""
# Without _at least_ an index, there is nothing to indicate the dimension indices.
if anndata.obs.index.empty or anndata.var.index.empty:
raise NotImplementedError("Empty AnnData.obs or AnnData.var unsupported.")
if soma._verbose:
s = tiledbsc.util.get_start_stamp()
print(f"{soma._indent}START DECATEGORICALIZING")
anndata.obs_names_make_unique()
anndata.var_names_make_unique()
anndata = tiledbsc.util_ann._decategoricalize(anndata)
if soma._verbose:
print(
tiledbsc.
| 507
|
573
| 19
| 2,646
|
single-cell-data__tiledb-soma
|
e64f89e6251f221bd91c49a997d7fb67de3ed349
|
apis/python/src/tiledbsc/io/anndata.py
|
inproject
|
format_elapsed
| true
|
function
| 10
| 16
| false
| false
|
[
"format_elapsed",
"get_start_stamp",
"List",
"Optional",
"ETATracker",
"_find_csc_chunk_size",
"_find_csr_chunk_size",
"_get_sort_and_permutation",
"_to_tiledb_supported_array_type",
"_X_and_ids_to_coo"
] |
[
{
"name": "ad",
"type": "module"
},
{
"name": "ETATracker",
"type": "class"
},
{
"name": "format_elapsed",
"type": "function"
},
{
"name": "get_start_stamp",
"type": "function"
},
{
"name": "List",
"type": "class"
},
{
"name": "numpy",
"type": "module"
},
{
"name": "Optional",
"type": "class"
},
{
"name": "pd",
"type": "module"
},
{
"name": "scipy",
"type": "module"
},
{
"name": "tiledb",
"type": "module"
},
{
"name": "time",
"type": "module"
},
{
"name": "_find_csc_chunk_size",
"type": "function"
},
{
"name": "_find_csr_chunk_size",
"type": "function"
},
{
"name": "_get_sort_and_permutation",
"type": "function"
},
{
"name": "_to_tiledb_supported_array_type",
"type": "function"
},
{
"name": "_X_and_ids_to_coo",
"type": "function"
},
{
"name": "__doc__",
"type": "instance"
},
{
"name": "__file__",
"type": "instance"
},
{
"name": "__name__",
"type": "instance"
},
{
"name": "__package__",
"type": "instance"
}
] |
import tiledbsc
import tiledbsc.util
import tiledbsc.util_ann
import anndata as ad
# ----------------------------------------------------------------
def from_h5ad(soma: tiledbsc.SOMA, input_path: str) -> None:
"""
Reads an .h5ad file and writes to a TileDB group structure.
"""
if soma._verbose:
s = tiledbsc.util.get_start_stamp()
print(f"START SOMA.from_h5ad {input_path} -> {soma.uri}")
if soma._verbose:
s = tiledbsc.util.get_start_stamp()
print(f"{soma._indent}START READING {input_path}")
anndata = ad.read_h5ad(input_path)
if soma._verbose:
print(
tiledbsc.util.format_elapsed(
s, f"{soma._indent}FINISH READING {input_path}"
)
)
from_anndata(soma, anndata)
if soma._verbose:
print(
tiledbsc.util.format_elapsed(
s, f"FINISH SOMA.from_h5ad {input_path} -> {soma.uri}"
)
)
# ----------------------------------------------------------------
def from_10x(soma: tiledbsc.SOMA, input_path: str) -> None:
"""
Reads a 10X file and writes to a TileDB group structure.
"""
if soma._verbose:
s = tiledbsc.util.get_start_stamp()
print(f"START SOMA.from_10x {input_path} -> {soma.uri}")
if soma._verbose:
s = tiledbsc.util.get_start_stamp()
print(f"{soma._indent}START READING {input_path}")
anndata = scanpy.read_10x_h5(input_path)
if soma._verbose:
print(
tiledbsc.util.format_elapsed(
s, f"{soma._indent}FINISH READING {input_path}"
)
)
from_anndata(soma, anndata)
if soma._verbose:
print(
tiledbsc.util.format_elapsed(
s, f"FINISH SOMA.from_10x {input_path} -> {soma.uri}"
)
)
return anndata
# ----------------------------------------------------------------
def from_anndata(soma: tiledbsc.SOMA, anndata: ad.AnnData) -> None:
"""
Top-level writer method for creating a TileDB group for a SOMA object.
"""
# Without _at least_ an index, there is nothing to indicate the dimension indices.
if anndata.obs.index.empty or anndata.var.index.empty:
raise NotImplementedError("Empty AnnData.obs or AnnData.var unsupported.")
if soma._verbose:
s = tiledbsc.util.get_start_stamp()
print(f"{soma._indent}START DECATEGORICALIZING")
anndata.obs_names_make_unique()
anndata.var_names_make_unique()
anndata = tiledbsc.util_ann._decategoricalize(anndata)
if soma._verbose:
print(
tiledbsc.util.
|
[
"format_elapsed",
"get_start_stamp",
"List",
"Optional",
"ETATracker"
] |
_path}"
)
)
from_anndata(soma, anndata)
if soma._verbose:
print(
tiledbsc.util.format_elapsed(
s, f"FINISH SOMA.from_h5ad {input_path} -> {soma.uri}"
)
)
# ----------------------------------------------------------------
def from_10x(soma: tiledbsc.SOMA, input_path: str) -> None:
"""
Reads a 10X file and writes to a TileDB group structure.
"""
if soma._verbose:
s = tiledbsc.util.get_start_stamp()
print(f"START SOMA.from_10x {input_path} -> {soma.uri}")
if soma._verbose:
s = tiledbsc.util.get_start_stamp()
print(f"{soma._indent}START READING {input_path}")
anndata = scanpy.read_10x_h5(input_path)
if soma._verbose:
print(
tiledbsc.util.format_elapsed(
s, f"{soma._indent}FINISH READING {input_path}"
)
)
from_anndata(soma, anndata)
if soma._verbose:
print(
tiledbsc.util.format_elapsed(
s, f"FINISH SOMA.from_10x {input_path} -> {soma.uri}"
)
)
return anndata
# ----------------------------------------------------------------
def from_anndata(soma: tiledbsc.SOMA, anndata: ad.AnnData) -> None:
"""
Top-level writer method for creating a TileDB group for a SOMA object.
"""
# Without _at least_ an index, there is nothing to indicate the dimension indices.
if anndata.obs.index.empty or anndata.var.index.empty:
raise NotImplementedError("Empty AnnData.obs or AnnData.var unsupported.")
if soma._verbose:
s = tiledbsc.util.get_start_stamp()
print(f"{soma._indent}START DECATEGORICALIZING")
anndata.obs_names_make_unique()
anndata.var_names_make_unique()
anndata = tiledbsc.util_ann._decategoricalize(anndata)
if soma._verbose:
print(
tiledbsc.util.
| 508
|
576
| 19
| 2,762
|
single-cell-data__tiledb-soma
|
e64f89e6251f221bd91c49a997d7fb67de3ed349
|
apis/python/src/tiledbsc/io/anndata.py
|
inproject
|
util
| true
|
module
| 33
| 35
| false
| false
|
[
"soma",
"util",
"SOMA",
"util_ann",
"soma_options",
"annotation_matrix",
"annotation_matrix_group",
"annotation_pairwise_matrix_group",
"AnnotationMatrix",
"AnnotationMatrixGroup",
"AnnotationPairwiseMatrixGroup",
"assay_matrix",
"assay_matrix_group",
"AssayMatrix",
"AssayMatrixGroup",
"describe_ann_file",
"raw_group",
"RawGroup",
"show_single_cell_group",
"soma_collection",
"SOMACollection",
"SOMAOptions",
"tiledb_array",
"tiledb_group",
"tiledb_object",
"TileDBArray",
"TileDBGroup",
"TileDBObject",
"uns_array",
"uns_group",
"UnsArray",
"UnsGroup",
"util_tiledb"
] |
[
{
"name": "annotation_dataframe",
"type": "module"
},
{
"name": "annotation_matrix",
"type": "module"
},
{
"name": "annotation_matrix_group",
"type": "module"
},
{
"name": "annotation_pairwise_matrix_group",
"type": "module"
},
{
"name": "AnnotationMatrix",
"type": "class"
},
{
"name": "AnnotationMatrixGroup",
"type": "class"
},
{
"name": "AnnotationPairwiseMatrixGroup",
"type": "class"
},
{
"name": "assay_matrix",
"type": "module"
},
{
"name": "assay_matrix_group",
"type": "module"
},
{
"name": "AssayMatrix",
"type": "class"
},
{
"name": "AssayMatrixGroup",
"type": "class"
},
{
"name": "describe_ann_file",
"type": "function"
},
{
"name": "io",
"type": "module"
},
{
"name": "raw_group",
"type": "module"
},
{
"name": "RawGroup",
"type": "class"
},
{
"name": "show_single_cell_group",
"type": "function"
},
{
"name": "SOMA",
"type": "class"
},
{
"name": "soma",
"type": "module"
},
{
"name": "soma_collection",
"type": "module"
},
{
"name": "soma_options",
"type": "module"
},
{
"name": "SOMACollection",
"type": "class"
},
{
"name": "SOMAOptions",
"type": "class"
},
{
"name": "tiledb_array",
"type": "module"
},
{
"name": "tiledb_group",
"type": "module"
},
{
"name": "tiledb_object",
"type": "module"
},
{
"name": "TileDBArray",
"type": "class"
},
{
"name": "TileDBGroup",
"type": "class"
},
{
"name": "TileDBObject",
"type": "class"
},
{
"name": "uns_array",
"type": "module"
},
{
"name": "uns_group",
"type": "module"
},
{
"name": "UnsArray",
"type": "class"
},
{
"name": "UnsGroup",
"type": "class"
},
{
"name": "util",
"type": "module"
},
{
"name": "util_ann",
"type": "module"
},
{
"name": "util_tiledb",
"type": "module"
},
{
"name": "__doc__",
"type": "instance"
},
{
"name": "__file__",
"type": "instance"
},
{
"name": "__name__",
"type": "instance"
},
{
"name": "__package__",
"type": "instance"
}
] |
import tiledbsc
import tiledbsc.util
import tiledbsc.util_ann
import anndata as ad
# ----------------------------------------------------------------
def from_h5ad(soma: tiledbsc.SOMA, input_path: str) -> None:
"""
Reads an .h5ad file and writes to a TileDB group structure.
"""
if soma._verbose:
s = tiledbsc.util.get_start_stamp()
print(f"START SOMA.from_h5ad {input_path} -> {soma.uri}")
if soma._verbose:
s = tiledbsc.util.get_start_stamp()
print(f"{soma._indent}START READING {input_path}")
anndata = ad.read_h5ad(input_path)
if soma._verbose:
print(
tiledbsc.util.format_elapsed(
s, f"{soma._indent}FINISH READING {input_path}"
)
)
from_anndata(soma, anndata)
if soma._verbose:
print(
tiledbsc.util.format_elapsed(
s, f"FINISH SOMA.from_h5ad {input_path} -> {soma.uri}"
)
)
# ----------------------------------------------------------------
def from_10x(soma: tiledbsc.SOMA, input_path: str) -> None:
"""
Reads a 10X file and writes to a TileDB group structure.
"""
if soma._verbose:
s = tiledbsc.util.get_start_stamp()
print(f"START SOMA.from_10x {input_path} -> {soma.uri}")
if soma._verbose:
s = tiledbsc.util.get_start_stamp()
print(f"{soma._indent}START READING {input_path}")
anndata = scanpy.read_10x_h5(input_path)
if soma._verbose:
print(
tiledbsc.util.format_elapsed(
s, f"{soma._indent}FINISH READING {input_path}"
)
)
from_anndata(soma, anndata)
if soma._verbose:
print(
tiledbsc.util.format_elapsed(
s, f"FINISH SOMA.from_10x {input_path} -> {soma.uri}"
)
)
return anndata
# ----------------------------------------------------------------
def from_anndata(soma: tiledbsc.SOMA, anndata: ad.AnnData) -> None:
"""
Top-level writer method for creating a TileDB group for a SOMA object.
"""
# Without _at least_ an index, there is nothing to indicate the dimension indices.
if anndata.obs.index.empty or anndata.var.index.empty:
raise NotImplementedError("Empty AnnData.obs or AnnData.var unsupported.")
if soma._verbose:
s = tiledbsc.util.get_start_stamp()
print(f"{soma._indent}START DECATEGORICALIZING")
anndata.obs_names_make_unique()
anndata.var_names_make_unique()
anndata = tiledbsc.util_ann._decategoricalize(anndata)
if soma._verbose:
print(
tiledbsc.util.format_elapsed(s, f"{soma._indent}FINISH DECATEGORICALIZING")
)
if soma._verbose:
s = tiledbsc.
|
[
"soma",
"util",
"SOMA",
"util_ann",
"soma_options",
"annotation_matrix",
"annotation_matrix_group",
"annotation_pairwise_matrix_group",
"AnnotationMatrix",
"AnnotationMatrixGroup",
"AnnotationPairwiseMatrixGroup",
"assay_matrix",
"assay_matrix_group",
"AssayMatrix",
"AssayMatrixGroup",
"describe_ann_file",
"raw_group",
"RawGroup",
"show_single_cell_group",
"soma_collection",
"SOMACollection",
"SOMAOptions",
"tiledb_array",
"tiledb_group",
"tiledb_object",
"TileDBArray",
"TileDBGroup",
"TileDBObject",
"uns_array",
"uns_group",
"UnsArray",
"UnsGroup",
"util_tiledb"
] |
iledbsc.util.format_elapsed(
s, f"FINISH SOMA.from_h5ad {input_path} -> {soma.uri}"
)
)
# ----------------------------------------------------------------
def from_10x(soma: tiledbsc.SOMA, input_path: str) -> None:
"""
Reads a 10X file and writes to a TileDB group structure.
"""
if soma._verbose:
s = tiledbsc.util.get_start_stamp()
print(f"START SOMA.from_10x {input_path} -> {soma.uri}")
if soma._verbose:
s = tiledbsc.util.get_start_stamp()
print(f"{soma._indent}START READING {input_path}")
anndata = scanpy.read_10x_h5(input_path)
if soma._verbose:
print(
tiledbsc.util.format_elapsed(
s, f"{soma._indent}FINISH READING {input_path}"
)
)
from_anndata(soma, anndata)
if soma._verbose:
print(
tiledbsc.util.format_elapsed(
s, f"FINISH SOMA.from_10x {input_path} -> {soma.uri}"
)
)
return anndata
# ----------------------------------------------------------------
def from_anndata(soma: tiledbsc.SOMA, anndata: ad.AnnData) -> None:
"""
Top-level writer method for creating a TileDB group for a SOMA object.
"""
# Without _at least_ an index, there is nothing to indicate the dimension indices.
if anndata.obs.index.empty or anndata.var.index.empty:
raise NotImplementedError("Empty AnnData.obs or AnnData.var unsupported.")
if soma._verbose:
s = tiledbsc.util.get_start_stamp()
print(f"{soma._indent}START DECATEGORICALIZING")
anndata.obs_names_make_unique()
anndata.var_names_make_unique()
anndata = tiledbsc.util_ann._decategoricalize(anndata)
if soma._verbose:
print(
tiledbsc.util.format_elapsed(s, f"{soma._indent}FINISH DECATEGORICALIZING")
)
if soma._verbose:
s = tiledbsc.
| 509
|
577
| 19
| 2,767
|
single-cell-data__tiledb-soma
|
e64f89e6251f221bd91c49a997d7fb67de3ed349
|
apis/python/src/tiledbsc/io/anndata.py
|
inproject
|
get_start_stamp
| true
|
function
| 10
| 16
| false
| false
|
[
"format_elapsed",
"get_start_stamp",
"List",
"Optional",
"ETATracker",
"_find_csc_chunk_size",
"_find_csr_chunk_size",
"_get_sort_and_permutation",
"_to_tiledb_supported_array_type",
"_X_and_ids_to_coo"
] |
[
{
"name": "ad",
"type": "module"
},
{
"name": "ETATracker",
"type": "class"
},
{
"name": "format_elapsed",
"type": "function"
},
{
"name": "get_start_stamp",
"type": "function"
},
{
"name": "List",
"type": "class"
},
{
"name": "numpy",
"type": "module"
},
{
"name": "Optional",
"type": "class"
},
{
"name": "pd",
"type": "module"
},
{
"name": "scipy",
"type": "module"
},
{
"name": "tiledb",
"type": "module"
},
{
"name": "time",
"type": "module"
},
{
"name": "_find_csc_chunk_size",
"type": "function"
},
{
"name": "_find_csr_chunk_size",
"type": "function"
},
{
"name": "_get_sort_and_permutation",
"type": "function"
},
{
"name": "_to_tiledb_supported_array_type",
"type": "function"
},
{
"name": "_X_and_ids_to_coo",
"type": "function"
},
{
"name": "__doc__",
"type": "instance"
},
{
"name": "__file__",
"type": "instance"
},
{
"name": "__name__",
"type": "instance"
},
{
"name": "__package__",
"type": "instance"
}
] |
import tiledbsc
import tiledbsc.util
import tiledbsc.util_ann
import anndata as ad
# ----------------------------------------------------------------
def from_h5ad(soma: tiledbsc.SOMA, input_path: str) -> None:
"""
Reads an .h5ad file and writes to a TileDB group structure.
"""
if soma._verbose:
s = tiledbsc.util.get_start_stamp()
print(f"START SOMA.from_h5ad {input_path} -> {soma.uri}")
if soma._verbose:
s = tiledbsc.util.get_start_stamp()
print(f"{soma._indent}START READING {input_path}")
anndata = ad.read_h5ad(input_path)
if soma._verbose:
print(
tiledbsc.util.format_elapsed(
s, f"{soma._indent}FINISH READING {input_path}"
)
)
from_anndata(soma, anndata)
if soma._verbose:
print(
tiledbsc.util.format_elapsed(
s, f"FINISH SOMA.from_h5ad {input_path} -> {soma.uri}"
)
)
# ----------------------------------------------------------------
def from_10x(soma: tiledbsc.SOMA, input_path: str) -> None:
"""
Reads a 10X file and writes to a TileDB group structure.
"""
if soma._verbose:
s = tiledbsc.util.get_start_stamp()
print(f"START SOMA.from_10x {input_path} -> {soma.uri}")
if soma._verbose:
s = tiledbsc.util.get_start_stamp()
print(f"{soma._indent}START READING {input_path}")
anndata = scanpy.read_10x_h5(input_path)
if soma._verbose:
print(
tiledbsc.util.format_elapsed(
s, f"{soma._indent}FINISH READING {input_path}"
)
)
from_anndata(soma, anndata)
if soma._verbose:
print(
tiledbsc.util.format_elapsed(
s, f"FINISH SOMA.from_10x {input_path} -> {soma.uri}"
)
)
return anndata
# ----------------------------------------------------------------
def from_anndata(soma: tiledbsc.SOMA, anndata: ad.AnnData) -> None:
"""
Top-level writer method for creating a TileDB group for a SOMA object.
"""
# Without _at least_ an index, there is nothing to indicate the dimension indices.
if anndata.obs.index.empty or anndata.var.index.empty:
raise NotImplementedError("Empty AnnData.obs or AnnData.var unsupported.")
if soma._verbose:
s = tiledbsc.util.get_start_stamp()
print(f"{soma._indent}START DECATEGORICALIZING")
anndata.obs_names_make_unique()
anndata.var_names_make_unique()
anndata = tiledbsc.util_ann._decategoricalize(anndata)
if soma._verbose:
print(
tiledbsc.util.format_elapsed(s, f"{soma._indent}FINISH DECATEGORICALIZING")
)
if soma._verbose:
s = tiledbsc.util.
|
[
"format_elapsed",
"get_start_stamp",
"List",
"Optional",
"ETATracker"
] |
sc.util.format_elapsed(
s, f"FINISH SOMA.from_h5ad {input_path} -> {soma.uri}"
)
)
# ----------------------------------------------------------------
def from_10x(soma: tiledbsc.SOMA, input_path: str) -> None:
"""
Reads a 10X file and writes to a TileDB group structure.
"""
if soma._verbose:
s = tiledbsc.util.get_start_stamp()
print(f"START SOMA.from_10x {input_path} -> {soma.uri}")
if soma._verbose:
s = tiledbsc.util.get_start_stamp()
print(f"{soma._indent}START READING {input_path}")
anndata = scanpy.read_10x_h5(input_path)
if soma._verbose:
print(
tiledbsc.util.format_elapsed(
s, f"{soma._indent}FINISH READING {input_path}"
)
)
from_anndata(soma, anndata)
if soma._verbose:
print(
tiledbsc.util.format_elapsed(
s, f"FINISH SOMA.from_10x {input_path} -> {soma.uri}"
)
)
return anndata
# ----------------------------------------------------------------
def from_anndata(soma: tiledbsc.SOMA, anndata: ad.AnnData) -> None:
"""
Top-level writer method for creating a TileDB group for a SOMA object.
"""
# Without _at least_ an index, there is nothing to indicate the dimension indices.
if anndata.obs.index.empty or anndata.var.index.empty:
raise NotImplementedError("Empty AnnData.obs or AnnData.var unsupported.")
if soma._verbose:
s = tiledbsc.util.get_start_stamp()
print(f"{soma._indent}START DECATEGORICALIZING")
anndata.obs_names_make_unique()
anndata.var_names_make_unique()
anndata = tiledbsc.util_ann._decategoricalize(anndata)
if soma._verbose:
print(
tiledbsc.util.format_elapsed(s, f"{soma._indent}FINISH DECATEGORICALIZING")
)
if soma._verbose:
s = tiledbsc.util.
| 510
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.