hexsha
string | size
int64 | ext
string | lang
string | max_stars_repo_path
string | max_stars_repo_name
string | max_stars_repo_head_hexsha
string | max_stars_repo_licenses
list | max_stars_count
int64 | max_stars_repo_stars_event_min_datetime
string | max_stars_repo_stars_event_max_datetime
string | max_issues_repo_path
string | max_issues_repo_name
string | max_issues_repo_head_hexsha
string | max_issues_repo_licenses
list | max_issues_count
int64 | max_issues_repo_issues_event_min_datetime
string | max_issues_repo_issues_event_max_datetime
string | max_forks_repo_path
string | max_forks_repo_name
string | max_forks_repo_head_hexsha
string | max_forks_repo_licenses
list | max_forks_count
int64 | max_forks_repo_forks_event_min_datetime
string | max_forks_repo_forks_event_max_datetime
string | content
string | avg_line_length
float64 | max_line_length
int64 | alphanum_fraction
float64 | qsc_code_num_words_quality_signal
int64 | qsc_code_num_chars_quality_signal
float64 | qsc_code_mean_word_length_quality_signal
float64 | qsc_code_frac_words_unique_quality_signal
float64 | qsc_code_frac_chars_top_2grams_quality_signal
float64 | qsc_code_frac_chars_top_3grams_quality_signal
float64 | qsc_code_frac_chars_top_4grams_quality_signal
float64 | qsc_code_frac_chars_dupe_5grams_quality_signal
float64 | qsc_code_frac_chars_dupe_6grams_quality_signal
float64 | qsc_code_frac_chars_dupe_7grams_quality_signal
float64 | qsc_code_frac_chars_dupe_8grams_quality_signal
float64 | qsc_code_frac_chars_dupe_9grams_quality_signal
float64 | qsc_code_frac_chars_dupe_10grams_quality_signal
float64 | qsc_code_frac_chars_replacement_symbols_quality_signal
float64 | qsc_code_frac_chars_digital_quality_signal
float64 | qsc_code_frac_chars_whitespace_quality_signal
float64 | qsc_code_size_file_byte_quality_signal
float64 | qsc_code_num_lines_quality_signal
float64 | qsc_code_num_chars_line_max_quality_signal
float64 | qsc_code_num_chars_line_mean_quality_signal
float64 | qsc_code_frac_chars_alphabet_quality_signal
float64 | qsc_code_frac_chars_comments_quality_signal
float64 | qsc_code_cate_xml_start_quality_signal
float64 | qsc_code_frac_lines_dupe_lines_quality_signal
float64 | qsc_code_cate_autogen_quality_signal
float64 | qsc_code_frac_lines_long_string_quality_signal
float64 | qsc_code_frac_chars_string_length_quality_signal
float64 | qsc_code_frac_chars_long_word_length_quality_signal
float64 | qsc_code_frac_lines_string_concat_quality_signal
float64 | qsc_code_cate_encoded_data_quality_signal
float64 | qsc_code_frac_chars_hex_words_quality_signal
float64 | qsc_code_frac_lines_prompt_comments_quality_signal
float64 | qsc_code_frac_lines_assert_quality_signal
float64 | qsc_codepython_cate_ast_quality_signal
float64 | qsc_codepython_frac_lines_func_ratio_quality_signal
float64 | qsc_codepython_cate_var_zero_quality_signal
bool | qsc_codepython_frac_lines_pass_quality_signal
float64 | qsc_codepython_frac_lines_import_quality_signal
float64 | qsc_codepython_frac_lines_simplefunc_quality_signal
float64 | qsc_codepython_score_lines_no_logic_quality_signal
float64 | qsc_codepython_frac_lines_print_quality_signal
float64 | qsc_code_num_words
int64 | qsc_code_num_chars
int64 | qsc_code_mean_word_length
int64 | qsc_code_frac_words_unique
null | qsc_code_frac_chars_top_2grams
int64 | qsc_code_frac_chars_top_3grams
int64 | qsc_code_frac_chars_top_4grams
int64 | qsc_code_frac_chars_dupe_5grams
int64 | qsc_code_frac_chars_dupe_6grams
int64 | qsc_code_frac_chars_dupe_7grams
int64 | qsc_code_frac_chars_dupe_8grams
int64 | qsc_code_frac_chars_dupe_9grams
int64 | qsc_code_frac_chars_dupe_10grams
int64 | qsc_code_frac_chars_replacement_symbols
int64 | qsc_code_frac_chars_digital
int64 | qsc_code_frac_chars_whitespace
int64 | qsc_code_size_file_byte
int64 | qsc_code_num_lines
int64 | qsc_code_num_chars_line_max
int64 | qsc_code_num_chars_line_mean
int64 | qsc_code_frac_chars_alphabet
int64 | qsc_code_frac_chars_comments
int64 | qsc_code_cate_xml_start
int64 | qsc_code_frac_lines_dupe_lines
int64 | qsc_code_cate_autogen
int64 | qsc_code_frac_lines_long_string
int64 | qsc_code_frac_chars_string_length
int64 | qsc_code_frac_chars_long_word_length
int64 | qsc_code_frac_lines_string_concat
null | qsc_code_cate_encoded_data
int64 | qsc_code_frac_chars_hex_words
int64 | qsc_code_frac_lines_prompt_comments
int64 | qsc_code_frac_lines_assert
int64 | qsc_codepython_cate_ast
int64 | qsc_codepython_frac_lines_func_ratio
int64 | qsc_codepython_cate_var_zero
int64 | qsc_codepython_frac_lines_pass
int64 | qsc_codepython_frac_lines_import
int64 | qsc_codepython_frac_lines_simplefunc
int64 | qsc_codepython_score_lines_no_logic
int64 | qsc_codepython_frac_lines_print
int64 | effective
string | hits
int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
5d36d6dbf217342990cb49eda55af38f42824619
| 4,238
|
py
|
Python
|
pkgs/dynd-python-0.7.2-py27_0/lib/python2.7/site-packages/dynd/tests/test_nd_fields.py
|
wangyum/anaconda
|
6e5a0dbead3327661d73a61e85414cf92aa52be6
|
[
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null |
pkgs/dynd-python-0.7.2-py27_0/lib/python2.7/site-packages/dynd/tests/test_nd_fields.py
|
wangyum/anaconda
|
6e5a0dbead3327661d73a61e85414cf92aa52be6
|
[
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null |
pkgs/dynd-python-0.7.2-py27_0/lib/python2.7/site-packages/dynd/tests/test_nd_fields.py
|
wangyum/anaconda
|
6e5a0dbead3327661d73a61e85414cf92aa52be6
|
[
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null |
import sys
import unittest
from dynd import nd, ndt
"""
class TestFields(unittest.TestCase):
def test_simple(self):
a = nd.array([
(1, 2, 'a', 'b'),
(3, 4, 'ab', 'cd'),
(5, 6, 'def', 'ghi')],
type='3 * {x: int32, y: int32, z: string, w: string}')
# Selecting a single field
b = nd.fields(a, 'x')
self.assertEqual(nd.dtype_of(b), ndt.make_struct(
[ndt.int32],
['x']))
self.assertEqual(nd.as_py(b.x), nd.as_py(a.x))
# Selecting two fields
b = nd.fields(a, 'z', 'y')
self.assertEqual(nd.dtype_of(b), ndt.make_struct(
[ndt.string, ndt.int32],
['z', 'y']))
self.assertEqual(nd.as_py(b.z), nd.as_py(a.z))
self.assertEqual(nd.as_py(b.y), nd.as_py(a.y))
# Selecting three fields
b = nd.fields(a, 'w', 'y', 'z')
self.assertEqual(nd.dtype_of(b), ndt.make_struct(
[ndt.string, ndt.int32, ndt.string],
['w', 'y', 'z']))
self.assertEqual(nd.as_py(b.w), nd.as_py(a.w))
self.assertEqual(nd.as_py(b.y), nd.as_py(a.y))
self.assertEqual(nd.as_py(b.z), nd.as_py(a.z))
# Reordering all four fields
b = nd.fields(a, 'w', 'y', 'x', 'z')
self.assertEqual(nd.dtype_of(b), ndt.make_struct(
[ndt.string, ndt.int32, ndt.int32, ndt.string],
['w', 'y', 'x', 'z']))
self.assertEqual(nd.as_py(b.w), nd.as_py(a.w))
self.assertEqual(nd.as_py(b.y), nd.as_py(a.y))
self.assertEqual(nd.as_py(b.x), nd.as_py(a.x))
self.assertEqual(nd.as_py(b.z), nd.as_py(a.z))
def test_fixed_var(self):
a = nd.array([
[(1, 2, 'a', 'b'),
(3, 4, 'ab', 'cd')],
[(5, 6, 'def', 'ghi')],
[(7, 8, 'alpha', 'beta'),
(9, 10, 'X', 'Y'),
(11, 12, 'the', 'end')]],
type='3 * var * {x: int32, y: int32, z: string, w: string}')
# Selecting a single field
b = nd.fields(a, 'x')
self.assertEqual(nd.type_of(b), ndt.make_fixed_dim(3,
ndt.make_var_dim(ndt.make_struct(
[ndt.int32],
['x']))))
self.assertEqual(nd.as_py(b.x), nd.as_py(a.x))
# Selecting two fields
b = nd.fields(a, 'z', 'y')
self.assertEqual(nd.type_of(b), ndt.make_fixed_dim(3,
ndt.make_var_dim(ndt.make_struct(
[ndt.string, ndt.int32],
['z', 'y']))))
self.assertEqual(nd.as_py(b.z), nd.as_py(a.z))
self.assertEqual(nd.as_py(b.y), nd.as_py(a.y))
# Selecting three fields
b = nd.fields(a, 'w', 'y', 'z')
self.assertEqual(nd.type_of(b), ndt.make_fixed_dim(3,
ndt.make_var_dim(ndt.make_struct(
[ndt.string, ndt.int32, ndt.string],
['w', 'y', 'z']))))
self.assertEqual(nd.as_py(b.w), nd.as_py(a.w))
self.assertEqual(nd.as_py(b.y), nd.as_py(a.y))
self.assertEqual(nd.as_py(b.z), nd.as_py(a.z))
# Reordering all four fields
b = nd.fields(a, 'w', 'y', 'x', 'z')
self.assertEqual(nd.type_of(b), ndt.make_fixed_dim(3,
ndt.make_var_dim(ndt.make_struct(
[ndt.string, ndt.int32, ndt.int32, ndt.string],
['w', 'y', 'x', 'z']))))
self.assertEqual(nd.as_py(b.w), nd.as_py(a.w))
self.assertEqual(nd.as_py(b.y), nd.as_py(a.y))
self.assertEqual(nd.as_py(b.x), nd.as_py(a.x))
self.assertEqual(nd.as_py(b.z), nd.as_py(a.z))
def test_bad_field_name(self):
a = nd.array([
(1, 2, 'a', 'b'),
(3, 4, 'ab', 'cd'),
(5, 6, 'def', 'ghi')],
type='3 * {x: int32, y: int32, z: string, w: string}')
self.assertRaises(RuntimeError, nd.fields, a, 'y', 'v')
"""
if __name__ == '__main__':
unittest.main()
| 42.808081
| 76
| 0.464606
| 617
| 4,238
| 3.051864
| 0.111831
| 0.084971
| 0.127456
| 0.201806
| 0.893255
| 0.893255
| 0.893255
| 0.893255
| 0.893255
| 0.893255
| 0
| 0.02387
| 0.34757
| 4,238
| 98
| 77
| 43.244898
| 0.657143
| 0
| 0
| 0
| 0
| 0
| 0.078431
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.6
| 0
| 0.6
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 10
|
5d579c372853402ecfd7e953a09a9d04c6d7c725
| 114
|
py
|
Python
|
nintendeals/noa/api/__init__.py
|
Pooroomoo/nintendeals
|
993f4d159ff405ed82cd2bb023c7b75d921d0acb
|
[
"MIT"
] | 37
|
2020-04-30T13:48:02.000Z
|
2022-03-09T04:55:54.000Z
|
nintendeals/noa/api/__init__.py
|
Pooroomoo/nintendeals
|
993f4d159ff405ed82cd2bb023c7b75d921d0acb
|
[
"MIT"
] | 4
|
2020-05-09T03:17:44.000Z
|
2021-04-28T00:53:55.000Z
|
nintendeals/noa/api/__init__.py
|
Pooroomoo/nintendeals
|
993f4d159ff405ed82cd2bb023c7b75d921d0acb
|
[
"MIT"
] | 5
|
2020-07-22T06:42:27.000Z
|
2022-02-07T22:35:57.000Z
|
from .algolia import search_by_nsuid
from .algolia import search_by_platform
from .algolia import search_by_query
| 28.5
| 39
| 0.868421
| 18
| 114
| 5.166667
| 0.444444
| 0.354839
| 0.548387
| 0.741935
| 0.806452
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.105263
| 114
| 3
| 40
| 38
| 0.911765
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 9
|
538ed9ab23e9e71ee700c89f6a7e07b38fae61a0
| 50,485
|
py
|
Python
|
cloudroast/objectstorage/smoke/object_smoke.py
|
RULCSoft/cloudroast
|
30f0e64672676c3f90b4a582fe90fac6621475b3
|
[
"Apache-2.0"
] | null | null | null |
cloudroast/objectstorage/smoke/object_smoke.py
|
RULCSoft/cloudroast
|
30f0e64672676c3f90b4a582fe90fac6621475b3
|
[
"Apache-2.0"
] | null | null | null |
cloudroast/objectstorage/smoke/object_smoke.py
|
RULCSoft/cloudroast
|
30f0e64672676c3f90b4a582fe90fac6621475b3
|
[
"Apache-2.0"
] | null | null | null |
"""
Copyright 2015 Rackspace
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import calendar
import time
import zlib
from hashlib import md5
import unittest
from cafe.drivers.unittest.decorators import (
DataDrivenFixture, data_driven_test)
from cloudcafe.objectstorage.objectstorage_api.common.constants import \
Constants
from cloudroast.objectstorage.fixtures import ObjectStorageFixture
from cloudroast.objectstorage.generators import (
ObjectDatasetList, CONTENT_TYPES)
CONTAINER_DESCRIPTOR = 'object_smoke_test'
STATUS_CODE_MSG = ('{method} expected status code {expected}'
' received status code {received}')
@DataDrivenFixture
class ObjectSmokeTest(ObjectStorageFixture):
@classmethod
def setUpClass(cls):
super(ObjectSmokeTest, cls).setUpClass()
cls.default_obj_name = Constants.VALID_OBJECT_NAME_WITH_UNICODE
@staticmethod
def generate_chunk_data():
for i in range(10):
yield "Test chunk %s\r\n" % i
@data_driven_test(ObjectDatasetList())
def ddtest_object_retrieval_with_valid_object_name(
self, object_type, generate_object):
container_name = self.create_temp_container(
descriptor=CONTAINER_DESCRIPTOR)
object_name = self.default_obj_name
generate_object(container_name, object_name)
response = self.client.get_object(container_name, object_name)
method = 'object creation with valid object name'
expected = 200
received = response.status_code
self.assertEqual(
expected,
received,
msg=STATUS_CODE_MSG.format(
method=method,
expected=expected,
received=str(received)))
@data_driven_test(ObjectDatasetList(exclude=['dlo', 'slo']))
def ddtest_object_retrieval_with_if_match(
self, object_type, generate_object):
"""
Bug filed for dlo/slo support of If-match Header:
https://bugs.launchpad.net/swift/+bug/1279076
"""
container_name = self.create_temp_container(
descriptor=CONTAINER_DESCRIPTOR)
object_name = self.default_obj_name
obj_info = generate_object(container_name, object_name)
headers = {'If-Match': obj_info.get('etag')}
response = self.client.get_object(
container_name,
self.default_obj_name,
headers=headers)
method = 'object retrieval with if match header'
expected = 200
received = response.status_code
self.assertEqual(
expected,
received,
msg=STATUS_CODE_MSG.format(
method=method,
expected=expected,
received=str(received)))
@data_driven_test(ObjectDatasetList(exclude=['dlo', 'slo']))
def ddtest_object_retrieval_with_if_none_match(
self, object_type, generate_object):
"""
Bug filed for dlo/slo support of If-match Header:
https://bugs.launchpad.net/swift/+bug/1279076
"""
container_name = self.create_temp_container(
descriptor=CONTAINER_DESCRIPTOR)
object_name = self.default_obj_name
object_info = generate_object(container_name, object_name)
headers = {'If-None-Match': 'grok'}
response = self.client.get_object(
container_name,
self.default_obj_name,
headers=headers)
method = 'object retrieval with if none match header'
expected = 200
received = response.status_code
self.assertEqual(
expected,
received,
msg=STATUS_CODE_MSG.format(
method=method,
expected=expected,
received=str(received)))
headers = {'If-None-Match': object_info.get('etag')}
response = self.client.get_object(
container_name,
self.default_obj_name,
headers=headers)
method = 'object should be flagged as not modified'
expected = 304
received = response.status_code
self.assertEqual(
expected,
received,
msg=STATUS_CODE_MSG.format(
method=method,
expected=expected,
received=str(received)))
@data_driven_test(ObjectDatasetList())
def ddtest_object_retrieval_with_if_modified_since(
self, object_type, generate_object):
container_name = self.create_temp_container(
descriptor=CONTAINER_DESCRIPTOR)
object_name = self.default_obj_name
generate_object(container_name, object_name)
headers = {'If-Modified-Since': 'Fri, 17 Aug 2001 18:44:42 GMT'}
response = self.client.get_object(
container_name,
self.default_obj_name,
headers=headers)
method = 'object retrieval with if modified since header (past date)'
expected = 200
received = response.status_code
self.assertEqual(
expected,
received,
msg=STATUS_CODE_MSG.format(
method=method,
expected=expected,
received=str(received)))
@data_driven_test(ObjectDatasetList())
def ddtest_object_not_modified_with_if_modified_since(
self, object_type, generate_object):
container_name = self.create_temp_container(
descriptor=CONTAINER_DESCRIPTOR)
object_name = self.default_obj_name
generate_object(container_name, object_name)
headers = {'If-Modified-Since': 'Fri, 17 Aug 2101 18:44:42 GMT'}
response = self.client.get_object(
container_name,
self.default_obj_name,
headers=headers)
method = 'object retrieval with if modified since header (future date)'
expected = 304
received = response.status_code
self.assertEqual(
expected,
received,
msg=STATUS_CODE_MSG.format(
method=method,
expected=expected,
received=str(received)))
@data_driven_test(ObjectDatasetList())
def ddtest_object_retrieval_with_if_unmodified_since(
self, object_type, generate_object):
container_name = self.create_temp_container(
descriptor=CONTAINER_DESCRIPTOR)
object_name = self.default_obj_name
generate_object(container_name, object_name)
headers = {'If-Unmodified-Since': 'Fri, 17 Aug 2101 18:44:42 GMT'}
response = self.client.get_object(
container_name,
self.default_obj_name,
headers=headers)
method = 'object retrieval with if unmodified since header'
expected = 200
received = response.status_code
self.assertEqual(
expected,
received,
msg=STATUS_CODE_MSG.format(
method=method,
expected=expected,
received=str(received)))
@data_driven_test(ObjectDatasetList())
def ddtest_object_retrieval_fails_with_if_unmodified_since(
self, object_type, generate_object):
container_name = self.create_temp_container(
descriptor=CONTAINER_DESCRIPTOR)
object_name = self.default_obj_name
generate_object(container_name, object_name)
headers = {'If-Unmodified-Since': 'Fri, 17 Aug 2001 18:44:42 GMT'}
response = self.client.get_object(
container_name,
self.default_obj_name,
headers=headers)
method = ('object retrieval precondition fail with if unmodified'
' since header')
expected = 412
received = response.status_code
self.assertEqual(
expected,
received,
msg=STATUS_CODE_MSG.format(
method=method,
expected=expected,
received=str(received)))
@data_driven_test(ObjectDatasetList())
def ddtest_partial_object_retrieval_with_start_range(
self, object_type, generate_object):
container_name = self.create_temp_container(
descriptor=CONTAINER_DESCRIPTOR)
object_name = self.default_obj_name
generate_object(container_name, object_name)
headers = {'Range': 'bytes=5-'}
response = self.client.get_object(
container_name,
self.default_obj_name,
headers=headers)
method = 'partial object retrieval with start range'
expected = 206
received = response.status_code
self.assertEqual(
expected,
received,
msg=STATUS_CODE_MSG.format(
method=method, expected=expected, received=str(received)))
@data_driven_test(ObjectDatasetList())
def ddtest_partial_object_retrieval_with_end_range(
self, object_type, generate_object):
container_name = self.create_temp_container(
descriptor=CONTAINER_DESCRIPTOR)
object_name = self.default_obj_name
generate_object(container_name, object_name)
headers = {'Range': 'bytes=-4'}
response = self.client.get_object(
container_name,
self.default_obj_name,
headers=headers)
method = 'partial object retrieval with end range'
expected = 206
received = response.status_code
self.assertEqual(
expected,
received,
msg=STATUS_CODE_MSG.format(
method=method,
expected=expected,
received=str(received)))
@data_driven_test(ObjectDatasetList())
def ddtest_partial_object_retrieval_with_range(
self, object_type, generate_object):
container_name = self.create_temp_container(
descriptor=CONTAINER_DESCRIPTOR)
object_name = self.default_obj_name
generate_object(container_name, object_name)
headers = {'Range': 'bytes=5-8'}
response = self.client.get_object(
container_name,
self.default_obj_name,
headers=headers)
method = 'partial object retrieval with start and end range'
expected = 206
received = response.status_code
self.assertEqual(
expected,
received,
msg=STATUS_CODE_MSG.format(
method=method,
expected=expected,
received=str(received)))
@data_driven_test(ObjectDatasetList())
def ddtest_partial_object_retrieval_with_complete_range(
self, object_type, generate_object):
container_name = self.create_temp_container(
descriptor=CONTAINER_DESCRIPTOR)
object_name = self.default_obj_name
generate_object(container_name, object_name)
headers = {'Range': 'bytes=99-0'}
response = self.client.get_object(
container_name,
self.default_obj_name,
headers=headers)
method = 'partial object retrieval with complete range'
expected = 200
received = response.status_code
self.assertEqual(
expected,
received,
msg=STATUS_CODE_MSG.format(
method=method,
expected=expected,
received=str(received)))
@data_driven_test(ObjectDatasetList())
def ddtest_object_creation_with_valid_object_name(
self, object_type, generate_object):
container_name = self.create_temp_container(
descriptor=CONTAINER_DESCRIPTOR)
object_name = self.default_obj_name
object_info = generate_object(container_name, object_name)
response = object_info.get('response')
method = 'object creation with valid object name'
expected = 201
received = response.status_code
self.assertEqual(
expected,
received,
msg=STATUS_CODE_MSG.format(
method=method,
expected=expected,
received=str(received)))
response = self.client.get_object(
container_name,
self.default_obj_name)
method = 'object retrieval'
expected = 200
received = response.status_code
self.assertEqual(
expected,
received,
msg=STATUS_CODE_MSG.format(
method=method,
expected=expected,
received=str(received)))
response_md5 = md5(response.content).hexdigest()
self.assertEqual(
object_info.get('md5'),
response_md5,
msg='should return identical object')
@data_driven_test(ObjectDatasetList(exclude=['dlo', 'slo']))
def ddtest_object_update_with_valid_object_name(
self, object_type, generate_object):
container_name = self.create_temp_container(
descriptor=CONTAINER_DESCRIPTOR)
object_name = self.default_obj_name
generate_object(container_name, object_name)
updated_object_data = 'Updated test file data'
updated_content_length = str(len(updated_object_data))
headers = {'Content-Length': updated_content_length,
'Content-Type': CONTENT_TYPES.get('text')}
response = self.client.create_object(
container_name,
self.default_obj_name,
headers=headers,
data=updated_object_data)
method = 'object update with valid object name'
expected = 201
received = response.status_code
self.assertEqual(
expected,
received,
msg=STATUS_CODE_MSG.format(
method=method,
expected=expected,
received=str(received)))
@data_driven_test(ObjectDatasetList())
def ddtest_object_creation_with_etag(
self, object_type, generate_object):
container_name = self.create_temp_container(
descriptor=CONTAINER_DESCRIPTOR)
object_name = self.default_obj_name
object_info = generate_object(container_name, object_name)
response = object_info.get('response')
method = 'object creation with etag header'
expected = 201
received = response.status_code
self.assertEqual(
expected,
received,
msg=STATUS_CODE_MSG.format(
method=method,
expected=expected,
received=str(received)))
response = self.client.get_object_metadata(
container_name,
self.default_obj_name)
response = self.client.get_object(
container_name,
self.default_obj_name)
self.assertIn(
'etag',
response.headers,
msg="Etag header was set")
if object_type == 'standard':
expected = object_info.get('etag')
else:
expected = '"{0}"'.format(object_info.get('etag'))
received = response.headers.get('etag')
self.assertEqual(
expected,
received,
msg='object created with Etag header'
' value expected: {0} received: {1}'.format(
expected,
received))
@data_driven_test(ObjectDatasetList(exclude=['dlo', 'slo']))
def test_object_creation_with_uppercase_etag(self):
container_name = self.create_temp_container(
descriptor=CONTAINER_DESCRIPTOR)
object_name = self.default_obj_name
object_data = "valid_data"
data_md5 = md5(object_data).hexdigest()
upper_etag = data_md5.upper()
headers = {"ETag": upper_etag}
create_response = self.client.create_object(container_name,
object_name,
data=object_data,
headers=headers)
method = 'object creation with uppercase etag header'
expected = 201
received = create_response.status_code
self.assertEqual(
expected,
received,
msg=STATUS_CODE_MSG.format(
method=method,
expected=expected,
received=str(received)))
object_response = self.client.get_object(
container_name,
self.default_obj_name)
self.assertIn(
'etag',
object_response.headers,
msg="Etag header was set")
expected = data_md5
received = object_response.headers.get('etag')
self.assertEqual(
expected,
received,
msg='object created with Etag header'
' value expected: {0} received: {1}'.format(
expected,
received))
@data_driven_test(ObjectDatasetList())
@ObjectStorageFixture.required_features('object-cors')
def ddtest_object_creation_with_access_control_allow_credentials(
self, object_type, generate_object):
container_name = self.create_temp_container(
descriptor=CONTAINER_DESCRIPTOR)
object_name = self.default_obj_name
object_headers = {'Access-Control-Allow-Credentials': 'true'}
object_info = generate_object(container_name, object_name,
headers=object_headers)
response = object_info.get('response')
method = 'object creation with Access-Control-Allow-Credentials header'
expected = 201
received = response.status_code
self.assertEqual(
expected,
received,
msg=STATUS_CODE_MSG.format(
method=method,
expected=expected,
received=str(received)))
response = self.client.get_object_metadata(
container_name,
self.default_obj_name)
self.assertIn(
'Access-Control-Allow-Credentials',
response.headers,
msg="Access-Control-Allow-Credentials header was set")
expected = 'true'
received = response.headers.get('Access-Control-Allow-Credentials')
self.assertEqual(
expected,
received,
msg='object created with Access-Control-Allow-Credentials header'
' value expected: {0} received: {1}'.format(
expected,
received))
@data_driven_test(ObjectDatasetList())
@ObjectStorageFixture.required_features('object-cors')
def ddtest_object_creation_with_access_control_allow_methods(
self, object_type, generate_object):
container_name = self.create_temp_container(
descriptor=CONTAINER_DESCRIPTOR)
object_name = self.default_obj_name
object_headers = {
'Access-Control-Allow-Methods': 'GET, POST, OPTIONS'}
object_info = generate_object(container_name, object_name,
headers=object_headers)
response = object_info.get('response')
method = 'object creation with Access-Control-Allow-Methods header'
expected = 201
received = response.status_code
self.assertEqual(
expected,
received,
msg=STATUS_CODE_MSG.format(
method=method,
expected=expected,
received=str(received)))
response = self.client.get_object_metadata(
container_name,
self.default_obj_name)
self.assertIn(
'Access-Control-Allow-Methods',
response.headers,
msg="Access-Control-Allow-Methods header was set")
expected = 'GET, POST, OPTIONS'
received = response.headers.get('Access-Control-Allow-Methods')
self.assertEqual(
expected,
received,
msg='object created with Access-Control-Allow-Methods header'
' value expected: {0} received: {1}'.format(
expected,
received))
@data_driven_test(ObjectDatasetList())
@ObjectStorageFixture.required_features('object-cors')
def ddtest_object_creation_with_access_control_allow_origin(
self, object_type, generate_object):
container_name = self.create_temp_container(
descriptor=CONTAINER_DESCRIPTOR)
object_name = self.default_obj_name
object_headers = {
'Access-Control-Allow-Origin': 'http://example.com'}
object_info = generate_object(container_name, object_name,
headers=object_headers)
response = object_info.get('response')
method = 'object creation with Access-Control-Allow-Origin header'
expected = 201
received = response.status_code
self.assertEqual(
expected,
received,
msg=STATUS_CODE_MSG.format(
method=method,
expected=expected,
received=str(received)))
response = self.client.get_object_metadata(
container_name, self.default_obj_name)
self.assertIn(
'Access-Control-Allow-Origin',
response.headers,
msg="Access-Control-Allow-Origin header was set")
expected = 'http://example.com'
received = response.headers.get('Access-Control-Allow-Origin')
self.assertEqual(
expected,
received,
msg='object created with Access-Control-Allow-Origin header'
' value expected: {0} received: {1}'.format(
expected,
received))
@data_driven_test(ObjectDatasetList())
@ObjectStorageFixture.required_features('object-cors')
def ddtest_object_creation_with_access_control_expose_headers(
self, object_type, generate_object):
container_name = self.create_temp_container(
descriptor=CONTAINER_DESCRIPTOR)
object_name = self.default_obj_name
object_headers = {'Access-Control-Expose-Headers': 'X-Foo-Header'}
object_info = generate_object(container_name, object_name,
headers=object_headers)
response = object_info.get('response')
method = 'object creation with Access-Control-Expose-Headers header'
expected = 201
received = response.status_code
self.assertEqual(
expected,
received,
msg=STATUS_CODE_MSG.format(
method=method,
expected=expected,
received=str(received)))
response = self.client.get_object_metadata(
container_name,
self.default_obj_name)
self.assertIn(
'Access-Control-Expose-Headers',
response.headers,
msg="Access-Control-Expose-Headers header was set")
expected = 'X-Foo-Header'
received = response.headers.get('Access-Control-Expose-Headers')
self.assertEqual(
expected,
received,
msg='object created with Access-Control-Expose-Headers header'
' value expected: {0} received: {1}'.format(
expected,
received))
@data_driven_test(ObjectDatasetList())
@ObjectStorageFixture.required_features('object-cors')
def ddtest_object_creation_with_access_controle_max_age(
self, object_type, generate_object):
container_name = self.create_temp_container(
descriptor=CONTAINER_DESCRIPTOR)
object_name = self.default_obj_name
object_headers = {'Access-Control-Max-Age': '5'}
object_info = generate_object(container_name, object_name,
headers=object_headers)
response = object_info.get('response')
method = 'object creation with Access-Control-Max-Age header'
expected = 201
received = response.status_code
self.assertEqual(
expected,
received,
msg=STATUS_CODE_MSG.format(
method=method,
expected=expected,
received=str(received)))
response = self.client.get_object_metadata(
container_name,
self.default_obj_name)
self.assertIn(
'Access-Control-Max-Age',
response.headers,
msg="Access-Control-Max-Age header was set")
expected = '5'
received = response.headers.get('Access-Control-Max-Age')
self.assertEqual(
expected,
received,
msg='object created with Access-Control-Max-Age header'
' value expected: {0} received: {1}'.format(
expected,
received))
@data_driven_test(ObjectDatasetList())
@ObjectStorageFixture.required_features('object-cors')
def ddtest_object_creation_with_access_control_request_headers(
self, object_type, generate_object):
container_name = self.create_temp_container(
descriptor=CONTAINER_DESCRIPTOR)
object_name = self.default_obj_name
object_headers = {'Access-Control-Request-Headers': 'x-requested-with'}
object_info = generate_object(container_name, object_name,
headers=object_headers)
response = object_info.get('response')
method = 'object creation with Access-Control-Request-Headers header'
expected = 201
received = response.status_code
self.assertEqual(
expected,
received,
msg=STATUS_CODE_MSG.format(
method=method,
expected=expected,
received=str(received)))
response = self.client.get_object_metadata(
container_name,
self.default_obj_name)
self.assertIn(
'Access-Control-Request-Headers',
response.headers,
msg="Access-Control-Request-Headers header was set")
expected = 'x-requested-with'
received = response.headers.get('Access-Control-Request-Headers')
self.assertEqual(
expected,
received,
msg='object created with Access-Control-Request-Headers header'
' value expected: {0} received: {1}'.format(
expected,
received))
@data_driven_test(ObjectDatasetList())
@ObjectStorageFixture.required_features('object-cors')
def ddtest_object_creation_with_access_control_request_method(
self, object_type, generate_object):
container_name = self.create_temp_container(
descriptor=CONTAINER_DESCRIPTOR)
object_name = self.default_obj_name
object_headers = {'Access-Control-Request-Method': 'GET'}
object_info = generate_object(container_name, object_name,
headers=object_headers)
response = object_info.get('response')
method = 'object creation with Access-Control-Request-Method header'
expected = 201
received = response.status_code
self.assertEqual(
expected,
received,
msg=STATUS_CODE_MSG.format(
method=method,
expected=expected,
received=str(received)))
response = self.client.get_object_metadata(
container_name,
self.default_obj_name)
self.assertIn(
'Access-Control-Request-Method',
response.headers,
msg="Access-Control-Request-Method header was set")
expected = 'GET'
received = response.headers.get('Access-Control-Request-Method')
self.assertEqual(
expected,
received,
msg='object created with Access-Control-Request-Method header'
' value expected: {0} received: {1}'.format(
expected,
received))
@data_driven_test(ObjectDatasetList())
@ObjectStorageFixture.required_features('object-cors')
def ddtest_object_retrieval_with_origin(
self, object_type, generate_object):
container_name = self.create_temp_container(
descriptor=CONTAINER_DESCRIPTOR)
object_name = self.default_obj_name
headers = {'access-control-allow-origin': 'http://example.com',
'access-control-expose-headers': 'X-Trans-Id'}
generate_object(container_name, object_name, headers=headers)
headers = {'Origin': 'http://example.com'}
response = self.client.get_object_metadata(
container_name, object_name, headers=headers)
self.assertIn(
'access-control-expose-headers',
response.headers,
msg="access-control-expose-headers header should be set")
self.assertIn(
'access-control-allow-origin',
response.headers,
msg="access-control-allow-origin header should be set")
expected = 'http://example.com'
received = response.headers.get('access-control-allow-origin')
self.assertEqual(
expected,
received,
msg='access-control-allow-origin header should reflect origin'
' expected: {0} received: {1}'.format(expected, received))
@data_driven_test(ObjectDatasetList(exclude=['dlo', 'slo']))
def ddtest_object_creation_with_file_compression(
self, object_type, generate_object):
container_name = self.create_temp_container(
descriptor=CONTAINER_DESCRIPTOR)
object_name = self.default_obj_name
def object_data_op(data, extra_data):
data = zlib.compress(data)
return (data, extra_data)
object_headers = {'Content-Encoding': 'gzip'}
object_info = generate_object(container_name, object_name,
data_op=object_data_op,
headers=object_headers)
response = object_info.get('response')
method = 'object creation with Content-Encoding header'
expected = 201
received = response.status_code
self.assertEqual(
expected,
received,
msg=STATUS_CODE_MSG.format(
method=method,
expected=expected,
received=str(received)))
response = self.client.get_object_metadata(
container_name,
self.default_obj_name)
self.assertIn(
'Content-Encoding',
response.headers,
msg="Content-Encoding header was set")
expected = 'gzip'
received = response.headers.get('Content-Encoding')
self.assertEqual(
expected,
received,
msg='object created with Content-Encoding header value'
' expected: {0} received: {1}'.format(expected, received))
@data_driven_test(ObjectDatasetList())
def ddtest_object_creation_with_content_disposition(
self, object_type, generate_object):
container_name = self.create_temp_container(
descriptor=CONTAINER_DESCRIPTOR)
object_name = self.default_obj_name
object_headers = {
'Content-Disposition': 'attachment; filename=testdata.txt'}
object_info = generate_object(container_name, object_name,
headers=object_headers)
response = object_info.get('response')
method = 'object creation with content disposition header'
expected = 201
received = response.status_code
self.assertEqual(
expected,
received,
msg=STATUS_CODE_MSG.format(
method=method,
expected=expected,
received=str(received)))
response = self.client.get_object_metadata(
container_name,
self.default_obj_name)
self.assertIn(
'Content-Disposition',
response.headers,
msg="Content-Disposition header was set")
expected = 'attachment; filename=testdata.txt'
received = response.headers.get('Content-Disposition')
self.assertEqual(
expected,
received,
msg='object created with Content-Disposition header value'
' expected: {0} received: {1}'.format(expected, received))
@data_driven_test(ObjectDatasetList())
def ddtest_object_creation_with_x_delete_at(
self, object_type, generate_object):
container_name = self.create_temp_container(
descriptor=CONTAINER_DESCRIPTOR)
object_name = self.default_obj_name
start_time = calendar.timegm(time.gmtime())
future_time = str(int(start_time + 60))
object_headers = {'X-Delete-At': future_time}
object_info = generate_object(container_name, object_name,
headers=object_headers)
response = object_info.get('response')
method = 'object creation with X-Delete-At header'
expected = 201
received = response.status_code
self.assertEqual(
expected,
received,
msg=STATUS_CODE_MSG.format(
method=method,
expected=expected,
received=str(received)))
response = self.client.get_object_metadata(
container_name,
self.default_obj_name)
self.assertIn(
'X-Delete-At',
response.headers,
msg="X-Delete-At header was set")
expected = future_time
received = response.headers.get('X-Delete-At')
self.assertEqual(
expected,
received,
msg='object created with X-Delete-At header value'
' expected: {0} received: {1}'.format(expected, received))
@data_driven_test(ObjectDatasetList())
def ddtest_object_creation_with_delete_after(
self, object_type, generate_object):
container_name = self.create_temp_container(
descriptor=CONTAINER_DESCRIPTOR)
object_name = self.default_obj_name
object_headers = {'X-Delete-After': '60'}
object_info = generate_object(container_name, object_name,
headers=object_headers)
response = object_info.get('response')
method = 'object creation with X-Delete-After header'
expected = 201
received = response.status_code
self.assertEqual(
expected,
received,
msg=STATUS_CODE_MSG.format(
method=method,
expected=expected,
received=str(received)))
response = self.client.get_object_metadata(
container_name,
self.default_obj_name)
self.assertIn(
'X-Delete-At',
response.headers,
msg="X-Delete-At header was set")
@data_driven_test(ObjectDatasetList())
@ObjectStorageFixture.required_features('object_versioning')
def ddtest_versioned_container_creation_with_valid_data(
self, object_type, generate_object):
container_name = self.create_temp_container(
descriptor=CONTAINER_DESCRIPTOR)
object_history_container_name = self.create_temp_container(
descriptor=CONTAINER_DESCRIPTOR)
headers = {'X-Versions-Location': object_history_container_name}
self.client.set_container_metadata(container_name, headers=headers)
# list objects in non-current container
response = self.client.list_objects(
object_history_container_name)
method = 'list on empty versioned container'
expected = 204
received = response.status_code
self.assertEqual(
expected,
received,
msg=STATUS_CODE_MSG.format(
method=method,
expected=expected,
received=str(received)))
# Create an object (version 1)
object_name = self.default_obj_name
ver1_info = generate_object(container_name, object_name)
response = ver1_info.get('response')
method = 'object version one creation'
expected = 201
received = response.status_code
self.assertEqual(
expected,
received,
msg=STATUS_CODE_MSG.format(
method=method,
expected=expected,
received=str(received)))
# Update an object (version 2)
object_name = self.default_obj_name
ver2_info = generate_object(container_name, object_name)
response = ver2_info.get('response')
method = 'update version one object'
expected = 201
received = response.status_code
self.assertEqual(
expected,
received,
msg=STATUS_CODE_MSG.format(
method=method,
expected=expected,
received=str(received)))
response = self.client.list_objects(object_history_container_name)
method = 'list on versioned container'
expected = 200
received = response.status_code
self.assertEqual(
expected,
received,
msg=STATUS_CODE_MSG.format(
method=method,
expected=expected,
received=str(received)))
@unittest.skip('Problem with this tests assertion, needs review')
@data_driven_test(ObjectDatasetList())
def ddtest_put_copy_object(self, object_type, generate_object):
src_container_name = self.create_temp_container(
descriptor=CONTAINER_DESCRIPTOR)
dest_container_name = self.create_temp_container(
descriptor=CONTAINER_DESCRIPTOR)
src_object_name = '{0}_source'.format(self.default_obj_name)
generate_object(src_container_name, src_object_name)
dest_obj_name = '{0}_destination'.format(self.default_obj_name)
source = '/{0}/{1}'.format(src_container_name, src_object_name)
hdrs = {'X-Copy-From': source, 'Content-Length': '0'}
response = self.client.copy_object(
dest_container_name,
dest_obj_name,
headers=hdrs)
method = 'put copy object'
expected = 201
received = response.status_code
self.assertEqual(
expected,
received,
msg=STATUS_CODE_MSG.format(
method=method,
expected=expected,
received=str(received)))
response = self.client.get_object(
dest_container_name,
dest_obj_name)
method = 'copied object retrieval'
expected = 200
received = response.status_code
self.assertEqual(
expected,
received,
msg=STATUS_CODE_MSG.format(
method=method,
expected=expected,
received=str(received)))
@data_driven_test(ObjectDatasetList())
def ddtest_copy_object(self, object_type, generate_object):
src_container_name = self.create_temp_container(
descriptor=CONTAINER_DESCRIPTOR)
dest_container_name = self.create_temp_container(
descriptor=CONTAINER_DESCRIPTOR)
src_object_name = '{0}_source'.format(self.default_obj_name)
generate_object(src_container_name, src_object_name)
dest_object_name = '{0}_destination'.format(self.default_obj_name)
dest = '/{0}/{1}'.format(dest_container_name, dest_object_name)
headers = {'Destination': dest}
response = self.client.copy_object(
src_container_name,
src_object_name,
headers=headers)
method = 'copy object'
expected = 201
received = response.status_code
self.assertEqual(
expected,
received,
msg=STATUS_CODE_MSG.format(
method=method,
expected=expected,
received=str(received)))
response = self.client.get_object(
dest_container_name,
dest_object_name)
method = 'copied object retrieval'
expected = 200
received = response.status_code
self.assertEqual(
expected,
received,
msg=STATUS_CODE_MSG.format(
method=method,
expected=expected,
received=str(received)))
@data_driven_test(ObjectDatasetList())
def ddtest_object_deletion_with_valid_object(
self, object_type, generate_object):
container_name = self.create_temp_container(
descriptor=CONTAINER_DESCRIPTOR)
object_name = self.default_obj_name
generate_object(container_name, object_name)
response = self.client.delete_object(
container_name,
object_name)
method = 'delete object'
expected = 204
received = response.status_code
self.assertEqual(
expected,
received,
msg=STATUS_CODE_MSG.format(
method=method,
expected=expected,
received=str(received)))
response = self.client.get_object(
container_name,
self.default_obj_name)
method = 'object retrieval'
expected = 404
received = response.status_code
self.assertEqual(
expected,
received,
msg=STATUS_CODE_MSG.format(
method=method,
expected=expected,
received=str(received)))
@data_driven_test(ObjectDatasetList())
def ddtest_obj_metadata_update_with_object_possessing_metadata(
self, object_type, generate_object):
container_name = self.create_temp_container(
descriptor=CONTAINER_DESCRIPTOR)
object_name = self.default_obj_name
generate_object(container_name, object_name,
headers={'X-Object-Meta-Grok': 'Drok'})
response = self.client.get_object_metadata(
container_name, object_name)
self.assertIn(
'X-Object-Meta-Grok',
response.headers,
msg="object not created with X-Object-Meta-Grok header")
expected = 'Drok'
received = response.headers.get('X-Object-Meta-Grok')
self.assertEqual(
expected,
received,
msg='object created with X-Object-Meta-Grok header value'
' expected: {0} received: {1}'.format(expected, received))
headers = {'X-Object-Meta-Foo': 'Bar'}
response = self.client.set_object_metadata(
container_name,
self.default_obj_name,
headers=headers)
method = 'set object metadata'
expected = 202
received = response.status_code
self.assertEqual(
expected,
received,
msg=STATUS_CODE_MSG.format(
method=method,
expected=expected,
received=str(received)))
response = self.client.get_object_metadata(
container_name,
self.default_obj_name)
self.assertIn(
'X-Object-Meta-Foo',
response.headers,
msg="object updated with X-Object-Meta-Foo header")
expected = 'Bar'
received = response.headers.get('X-Object-Meta-Foo')
self.assertEqual(
expected,
received,
msg='object X-Object-Meta-Foo header value expected: {0}'
' received: {1}'.format(expected, received))
@data_driven_test(ObjectDatasetList())
def ddtest_obj_metadata_update(self, object_type, generate_object):
container_name = self.create_temp_container(
descriptor=CONTAINER_DESCRIPTOR)
object_name = self.default_obj_name
generate_object(container_name, object_name)
headers = {'X-Object-Meta-Grok': 'Drok'}
response = self.client.set_object_metadata(
container_name, object_name, headers=headers)
method = 'set object metadata X-Object-Meta-Grok: Drok'
expected = 202
received = response.status_code
self.assertEqual(
expected,
received,
msg=STATUS_CODE_MSG.format(
method=method,
expected=expected,
received=str(received)))
response = self.client.get_object_metadata(
container_name,
self.default_obj_name)
self.assertIn(
'X-Object-Meta-Grok',
response.headers,
msg="object updated with X-Object-Meta-Grok header")
expected = 'Drok'
received = response.headers.get('X-Object-Meta-Grok')
self.assertEqual(
expected,
received,
msg='object X-Object-Meta-Grok header value expected: {0}'
' received: {1}'.format(expected, received))
@data_driven_test(ObjectDatasetList())
def ddtest_content_type_not_detected_without_detect_content_type_header(
self, object_type, generate_object):
container_name = self.create_temp_container(
descriptor=CONTAINER_DESCRIPTOR)
object1_name = 'object1.txt'
object1_headers = {'Content-Type': 'application/x-www-form-urlencoded'}
generate_object(container_name, object1_name, headers=object1_headers)
object2_name = 'object2.txt'
object2_headers = {'X-Detect-Content-Type': False,
'Content-Type': 'application/x-www-form-urlencoded'}
generate_object(container_name, object2_name, headers=object2_headers)
response = self.client.get_object(
container_name, object1_name)
expected = 'application/x-www-form-urlencoded'
received = response.headers.get('content-type')
self.assertEqual(
expected,
received,
msg='object created should have content type: {0}'
' received: {1}'.format(expected, received))
response = self.client.get_object(
container_name, object2_name)
self.assertEqual(
expected,
received,
msg='object created should have content type: {0}'
' received: {1}'.format(expected, received))
@data_driven_test(ObjectDatasetList())
def ddtest_content_type_detected_with_detect_content_type(
self, object_type, generate_object):
container_name = self.create_temp_container(
descriptor=CONTAINER_DESCRIPTOR)
object1_name = 'object1.txt'
object1_headers = {'X-Detect-Content-Type': True,
'Content-Type': 'application/x-www-form-urlencoded'}
generate_object(container_name, object1_name, headers=object1_headers)
response = self.client.get_object(
container_name, object1_name)
expected = 'text/plain'
received = response.headers.get('content-type')
self.assertEqual(
expected,
received,
msg='object created should have content type: {0}'
' received: {1}'.format(expected, received))
object2_name = 'object2.txt'
object2_headers = {'X-Detect-Content-Type': True}
generate_object(container_name, object2_name, headers=object2_headers)
response = self.client.get_object(
container_name, object2_name)
expected = 'text/plain'
received = response.headers.get('content-type')
self.assertEqual(
expected,
received,
msg='object created should have content type: {0}'
' received: {1}'.format(expected, received))
def test_object_creation_via_chunked_transfer(self):
"""
Scenario:
Create an object using chunked transfer encoding.
Expected Results:
Return a 201 status code and a single object should
be created.
"""
container_name = self.create_temp_container(
descriptor=CONTAINER_DESCRIPTOR)
headers = {"Transfer-Encoding": "chunked"}
create_response = self.client.create_object(
container_name,
self.default_obj_name,
headers=headers,
data=self.generate_chunk_data())
method = 'Object creation via chunked transfer'
expected = 201
received = create_response.status_code
self.assertEqual(
expected,
received,
msg=STATUS_CODE_MSG.format(
method=method,
expected=expected,
received=str(received)))
object_response = self.client.get_object(container_name,
self.default_obj_name)
method = 'Object retrieval'
expected = 200
received = object_response.status_code
self.assertEqual(
expected,
received,
msg=STATUS_CODE_MSG.format(
method=method,
expected=expected,
received=str(received)))
| 34.273591
| 79
| 0.605051
| 4,979
| 50,485
| 5.887728
| 0.060253
| 0.057206
| 0.058332
| 0.042367
| 0.883404
| 0.860583
| 0.836568
| 0.817602
| 0.797749
| 0.780488
| 0
| 0.00876
| 0.314886
| 50,485
| 1,472
| 80
| 34.296875
| 0.838788
| 0.019669
| 0
| 0.775993
| 0
| 0
| 0.131571
| 0.033242
| 0
| 0
| 0
| 0
| 0.069315
| 1
| 0.032967
| false
| 0
| 0.007608
| 0
| 0.042265
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
539324c139f4acda8b0dbb87e42e77a126f0fc1b
| 155
|
py
|
Python
|
tests/__init__.py
|
egor43/PyImageComparsion
|
5270f5646c40391cc5ac225305d7be9b0b7de140
|
[
"BSD-2-Clause"
] | null | null | null |
tests/__init__.py
|
egor43/PyImageComparsion
|
5270f5646c40391cc5ac225305d7be9b0b7de140
|
[
"BSD-2-Clause"
] | null | null | null |
tests/__init__.py
|
egor43/PyImageComparsion
|
5270f5646c40391cc5ac225305d7be9b0b7de140
|
[
"BSD-2-Clause"
] | null | null | null |
from . import test_helpers
from . import test_image_opener
from . import test_image_metrick
from . import test_compare_tools
from . import test_compare_api
| 31
| 32
| 0.845161
| 24
| 155
| 5.083333
| 0.416667
| 0.409836
| 0.57377
| 0.311475
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.122581
| 155
| 5
| 33
| 31
| 0.897059
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 7
|
53a287190d58a2db9d8427aaa2bd973ac3e2cd59
| 59
|
py
|
Python
|
__init__.py
|
csalyk/nirspec
|
58661371871d29103afe42bfccc0bff9ff773914
|
[
"MIT-0"
] | null | null | null |
__init__.py
|
csalyk/nirspec
|
58661371871d29103afe42bfccc0bff9ff773914
|
[
"MIT-0"
] | null | null | null |
__init__.py
|
csalyk/nirspec
|
58661371871d29103afe42bfccc0bff9ff773914
|
[
"MIT-0"
] | null | null | null |
from .nirspec import divspec
from .nirspec import gluespec
| 19.666667
| 29
| 0.830508
| 8
| 59
| 6.125
| 0.625
| 0.44898
| 0.693878
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.135593
| 59
| 2
| 30
| 29.5
| 0.960784
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
53b0797fa1d2b73bd60c7d0448335bb8ff3970e6
| 2,995
|
py
|
Python
|
tests/bucket/test_bucket.py
|
WillChilds-Klein/mistress-mapreduce
|
c991a502545bd0d3ec4f914cdc63faf6a40e77ae
|
[
"Apache-2.0"
] | 2
|
2018-12-02T11:10:15.000Z
|
2019-02-21T22:24:00.000Z
|
tests/bucket/test_bucket.py
|
WillChilds-Klein/mistress-mapreduce
|
c991a502545bd0d3ec4f914cdc63faf6a40e77ae
|
[
"Apache-2.0"
] | 1
|
2019-02-21T22:23:36.000Z
|
2019-02-21T22:23:36.000Z
|
tests/bucket/test_bucket.py
|
WillChilds-Klein/mistress-mapreduce
|
c991a502545bd0d3ec4f914cdc63faf6a40e77ae
|
[
"Apache-2.0"
] | 3
|
2018-04-26T16:02:10.000Z
|
2018-12-02T11:10:16.000Z
|
from mrs.bucket import WriteBucket
from mrs import BinWriter, HexWriter
def test_writebucket():
b = WriteBucket(0, 0)
b.addpair((4, 'test'))
b.collect([(3, 'a'), (1, 'This'), (2, 'is')])
values = ' '.join(value for key, value in b)
assert values == 'test a This is'
b.sort()
values = ' '.join(value for key, value in b)
assert values == 'This is a test'
def test_write_only():
b = WriteBucket(0, 0)
b.addpair((4, 'test'), write_only=True)
b.collect([(3, 'a'), (1, 'This'), (2, 'is')], write_only=True)
values = ' '.join(value for key, value in b)
assert values == ''
readonly_copy = b.readonly_copy()
assert readonly_copy.url is None
def test_writing(tmpdir):
b = WriteBucket(2, 4, dir=tmpdir.strpath, format=BinWriter)
prefix = b.prefix()
assert prefix == 'source_2_split_4_'
listdir = tmpdir.listdir()
assert listdir == []
b.addpair((1, 2))
filename = prefix + '.mrsb'
path = tmpdir.join(filename).strpath
listdir = tmpdir.listdir()
assert listdir == [path]
readonly_copy = b.readonly_copy()
assert readonly_copy.url == path
def test_roundtrip(tmpdir):
b = WriteBucket(2, 4, dir=tmpdir.strpath, format=BinWriter)
prefix = b.prefix()
assert prefix == 'source_2_split_4_'
listdir = tmpdir.listdir()
assert listdir == []
b.addpair((4, 'test'))
b.collect([(3, 'a'), (1, 'This'), (2, 'is')])
values = ' '.join(value for key, value in b)
assert values == 'test a This is'
b.close_writer(do_sync=False)
filename = prefix + '.mrsb'
path = tmpdir.join(filename).strpath
listdir = tmpdir.listdir()
assert listdir == [path]
readonly_copy = b.readonly_copy()
assert readonly_copy.url == path
values = ' '.join(value for key, value in readonly_copy)
assert values == 'test a This is'
values = ' '.join(value for key, value in readonly_copy.stream())
assert values == 'test a This is'
b.clean()
listdir = tmpdir.listdir()
assert listdir == []
def test_roundtrip_write_only(tmpdir):
b = WriteBucket(7, 1, dir=tmpdir.strpath, format=HexWriter)
prefix = b.prefix()
assert prefix == 'source_7_split_1_'
listdir = tmpdir.listdir()
assert listdir == []
b.addpair((4, 'test'), write_only=True)
b.collect([(3, 'a'), (1, 'This'), (2, 'is')], write_only=True)
values = ' '.join(value for key, value in b)
assert values == ''
b.close_writer(do_sync=False)
filename = prefix + '.mrsx'
path = tmpdir.join(filename).strpath
listdir = tmpdir.listdir()
assert listdir == [path]
readonly_copy = b.readonly_copy()
assert readonly_copy.url == path
values = ' '.join(value for key, value in readonly_copy)
assert values == ''
values = ' '.join(value for key, value in readonly_copy.stream())
assert values == 'test a This is'
b.clean()
listdir = tmpdir.listdir()
assert listdir == []
# vim: et sw=4 sts=4
| 26.741071
| 69
| 0.621035
| 409
| 2,995
| 4.440098
| 0.149144
| 0.105727
| 0.074339
| 0.089207
| 0.863987
| 0.863987
| 0.84196
| 0.84196
| 0.792952
| 0.749449
| 0
| 0.015544
| 0.226711
| 2,995
| 111
| 70
| 26.981982
| 0.768566
| 0.00601
| 0
| 0.825
| 0
| 0
| 0.068235
| 0
| 0
| 0
| 0
| 0
| 0.3
| 1
| 0.0625
| false
| 0
| 0.025
| 0
| 0.0875
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
53bf55da72ae86acb1c699435bc12016f38e84ea
| 146
|
py
|
Python
|
DataQualityTester/views/pages.py
|
pwyf/data-quality-tester
|
d7674849c64d4d41ff4e4b6b12631994c7ce0a92
|
[
"MIT"
] | null | null | null |
DataQualityTester/views/pages.py
|
pwyf/data-quality-tester
|
d7674849c64d4d41ff4e4b6b12631994c7ce0a92
|
[
"MIT"
] | 53
|
2017-04-07T09:41:38.000Z
|
2022-02-11T14:26:46.000Z
|
DataQualityTester/views/pages.py
|
pwyf/iati-simple-tester
|
ef7f06ebbd4dd45e6ca76d93a3f624abc33d961c
|
[
"MIT"
] | 3
|
2017-07-19T13:43:14.000Z
|
2019-10-29T15:25:49.000Z
|
from flask import render_template
def home():
return render_template('upload.html')
def about():
return render_template('about.html')
| 14.6
| 41
| 0.726027
| 19
| 146
| 5.421053
| 0.578947
| 0.407767
| 0.38835
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.164384
| 146
| 9
| 42
| 16.222222
| 0.844262
| 0
| 0
| 0
| 0
| 0
| 0.143836
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.4
| true
| 0
| 0.2
| 0.4
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 1
| 1
| 0
|
0
| 8
|
071593280ef30a4532ccbb4b6f3c6b4f7d728fa5
| 4,251
|
py
|
Python
|
image_quality/handlers/data_generator.py
|
mbartoli/image-quality-assessment
|
b957c781ac8a11f8668f58345524f33503338b3b
|
[
"Apache-2.0"
] | 1
|
2021-03-27T15:09:30.000Z
|
2021-03-27T15:09:30.000Z
|
image_quality/handlers/data_generator.py
|
welcotravel/image-quality-assessment
|
b9e17de93578220e5ae142725d9153098759e7c8
|
[
"Apache-2.0"
] | null | null | null |
image_quality/handlers/data_generator.py
|
welcotravel/image-quality-assessment
|
b9e17de93578220e5ae142725d9153098759e7c8
|
[
"Apache-2.0"
] | 1
|
2020-10-05T03:20:53.000Z
|
2020-10-05T03:20:53.000Z
|
import os
import numpy as np
import tensorflow as tf
from image_quality.utils import utils
class TrainDataGenerator(tf.keras.utils.Sequence):
'''inherits from Keras Sequence base object, allows to use multiprocessing in .fit_generator'''
def __init__(self, samples, img_dir, batch_size, n_classes, basenet_preprocess,
img_load_dims=(256, 256), img_crop_dims=(224, 224), shuffle=True):
self.samples = samples
self.img_dir = img_dir
self.batch_size = batch_size
self.n_classes = n_classes
self.basenet_preprocess = basenet_preprocess # Keras basenet specific preprocessing function
self.img_load_dims = img_load_dims # dimensions that images get resized into when loaded
self.img_crop_dims = img_crop_dims # dimensions that images get randomly cropped to
self.shuffle = shuffle
self.on_epoch_end() # call ensures that samples are shuffled in first epoch if shuffle is set to True
def __len__(self):
return int(np.ceil(len(self.samples) / self.batch_size)) # number of batches per epoch
def __getitem__(self, index):
batch_indexes = self.indexes[index*self.batch_size:(index+1)*self.batch_size] # get batch indexes
batch_samples = [self.samples[i] for i in batch_indexes] # get batch samples
X, y = self.__data_generator(batch_samples)
return X, y
def on_epoch_end(self):
self.indexes = np.arange(len(self.samples))
if self.shuffle is True:
np.random.shuffle(self.indexes)
def __data_generator(self, batch_samples):
# initialize images and labels tensors for faster processing
X = np.empty((len(batch_samples), *self.img_crop_dims, 3))
y = np.empty((len(batch_samples), self.n_classes))
for i, sample in enumerate(batch_samples):
# load and randomly augment image
img_file = os.path.join(self.img_dir, '{}'.format(sample['image_id']))
img = utils.load_image(img_file, self.img_load_dims)
if img is not None:
img = utils.random_crop(img, self.img_crop_dims)
img = utils.random_horizontal_flip(img)
X[i, ] = img
# normalize labels
y[i, ] = utils.normalize_labels(sample['label'])
# apply basenet specific preprocessing
# input is 4D numpy array of RGB values within [0, 255]
X = self.basenet_preprocess(X)
return X, y
class TestDataGenerator(tf.keras.utils.Sequence):
'''inherits from Keras Sequence base object, allows to use multiprocessing in .fit_generator'''
def __init__(self, samples, img_dir, batch_size, n_classes, basenet_preprocess,
img_load_dims=(224, 224)):
self.samples = samples
self.img_dir = img_dir
self.batch_size = batch_size
self.n_classes = n_classes
self.basenet_preprocess = basenet_preprocess # Keras basenet specific preprocessing function
self.img_load_dims = img_load_dims # dimensions that images get resized into when loaded
self.on_epoch_end() # call ensures that samples are shuffled in first epoch if shuffle is set to True
def __len__(self):
return int(np.ceil(len(self.samples) / self.batch_size)) # number of batches per epoch
def __getitem__(self, index):
batch_indexes = self.indexes[index*self.batch_size:(index+1)*self.batch_size] # get batch indexes
batch_samples = [self.samples[i] for i in batch_indexes] # get batch samples
X, y = self.__data_generator(batch_samples)
return X, y
def on_epoch_end(self):
self.indexes = np.arange(len(self.samples))
def __data_generator(self, batch_samples):
# initialize images and labels tensors for faster processing
X = np.empty((len(batch_samples), *self.img_load_dims, 3))
y = np.empty((len(batch_samples), self.n_classes))
for i, sample in enumerate(batch_samples):
# load and randomly augment image
img_file = os.path.join(self.img_dir, '{}'.format(sample['image_id']))
img = utils.load_image(img_file, self.img_load_dims)
if img is not None:
X[i, ] = img
# normalize labels
if sample.get('label') is not None:
y[i, ] = utils.normalize_labels(sample['label'])
# apply basenet specific preprocessing
# input is 4D numpy array of RGB values within [0, 255]
X = self.basenet_preprocess(X)
return X, y
| 40.485714
| 106
| 0.713479
| 630
| 4,251
| 4.592063
| 0.188889
| 0.058071
| 0.034221
| 0.025925
| 0.887314
| 0.853094
| 0.853094
| 0.853094
| 0.853094
| 0.853094
| 0
| 0.009316
| 0.191955
| 4,251
| 104
| 107
| 40.875
| 0.832897
| 0.26088
| 0
| 0.760563
| 0
| 0
| 0.011261
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.140845
| false
| 0
| 0.056338
| 0.028169
| 0.309859
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
072bd117dea823ba3412148c4dbda51e774d2a1f
| 11,707
|
py
|
Python
|
cohorts_proj/datasets/migrations/0009_auto_20200824_0617.py
|
zferic/harmonization-website
|
f6a081481df3a3a62cb075fbb63ad0470b0d4e06
|
[
"MIT"
] | 1
|
2020-09-20T02:32:01.000Z
|
2020-09-20T02:32:01.000Z
|
cohorts_proj/datasets/migrations/0009_auto_20200824_0617.py
|
zferic/harmonization-website
|
f6a081481df3a3a62cb075fbb63ad0470b0d4e06
|
[
"MIT"
] | 20
|
2020-04-17T14:01:41.000Z
|
2022-03-12T00:30:23.000Z
|
cohorts_proj/datasets/migrations/0009_auto_20200824_0617.py
|
zferic/harmonization-website
|
f6a081481df3a3a62cb075fbb63ad0470b0d4e06
|
[
"MIT"
] | 3
|
2020-10-08T00:24:51.000Z
|
2021-06-02T20:07:30.000Z
|
# Generated by Django 3.0.7 on 2020-08-24 06:17
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('datasets', '0008_auto_20200821_1427'),
]
operations = [
migrations.AddField(
model_name='rawdar',
name='AsB',
field=models.FloatField(blank=True, null=True),
),
migrations.AddField(
model_name='rawdar',
name='AsB_BDL',
field=models.CharField(choices=[('1', 'below detection level'), ('0', 'above detection level'), ('nan', 'invalid')], default=0, max_length=3),
preserve_default=False,
),
migrations.AddField(
model_name='rawdar',
name='AsB_IDL',
field=models.FloatField(blank=True, null=True),
),
migrations.AddField(
model_name='rawdar',
name='Ba',
field=models.FloatField(blank=True, null=True),
),
migrations.AddField(
model_name='rawdar',
name='Ba_BDL',
field=models.CharField(choices=[('1', 'below detection level'), ('0', 'above detection level'), ('nan', 'invalid')], default=0, max_length=3),
preserve_default=False,
),
migrations.AddField(
model_name='rawdar',
name='Ba_IDL',
field=models.FloatField(blank=True, null=True),
),
migrations.AddField(
model_name='rawdar',
name='Cs',
field=models.FloatField(blank=True, null=True),
),
migrations.AddField(
model_name='rawdar',
name='Cs_BDL',
field=models.CharField(choices=[('1', 'below detection level'), ('0', 'above detection level'), ('nan', 'invalid')], default=0, max_length=3),
preserve_default=False,
),
migrations.AddField(
model_name='rawdar',
name='Cs_IDL',
field=models.FloatField(blank=True, null=True),
),
migrations.AddField(
model_name='rawdar',
name='DMA',
field=models.FloatField(blank=True, null=True),
),
migrations.AddField(
model_name='rawdar',
name='DMA_BDL',
field=models.CharField(choices=[('1', 'below detection level'), ('0', 'above detection level'), ('nan', 'invalid')], default=0, max_length=3),
preserve_default=False,
),
migrations.AddField(
model_name='rawdar',
name='DMA_IDL',
field=models.FloatField(blank=True, null=True),
),
migrations.AddField(
model_name='rawdar',
name='MMA',
field=models.FloatField(blank=True, null=True),
),
migrations.AddField(
model_name='rawdar',
name='MMA_BDL',
field=models.CharField(choices=[('1', 'below detection level'), ('0', 'above detection level'), ('nan', 'invalid')], default=0, max_length=3),
preserve_default=False,
),
migrations.AddField(
model_name='rawdar',
name='MMA_IDL',
field=models.FloatField(blank=True, null=True),
),
migrations.AddField(
model_name='rawdar',
name='Sr',
field=models.FloatField(blank=True, null=True),
),
migrations.AddField(
model_name='rawdar',
name='Sr_BDL',
field=models.CharField(choices=[('1', 'below detection level'), ('0', 'above detection level'), ('nan', 'invalid')], default=0, max_length=3),
preserve_default=False,
),
migrations.AddField(
model_name='rawdar',
name='Sr_IDL',
field=models.FloatField(blank=True, null=True),
),
migrations.AddField(
model_name='rawdar',
name='iAs',
field=models.FloatField(blank=True, null=True),
),
migrations.AddField(
model_name='rawdar',
name='iAs_BDL',
field=models.CharField(choices=[('1', 'below detection level'), ('0', 'above detection level'), ('nan', 'invalid')], default=0, max_length=3),
preserve_default=False,
),
migrations.AddField(
model_name='rawdar',
name='iAs_IDL',
field=models.FloatField(blank=True, null=True),
),
migrations.AlterField(
model_name='rawdar',
name='Ag',
field=models.FloatField(blank=True, null=True),
),
migrations.AlterField(
model_name='rawdar',
name='Ag_IDL',
field=models.FloatField(blank=True, null=True),
),
migrations.AlterField(
model_name='rawdar',
name='Al',
field=models.FloatField(blank=True, null=True),
),
migrations.AlterField(
model_name='rawdar',
name='Al_IDL',
field=models.FloatField(blank=True, null=True),
),
migrations.AlterField(
model_name='rawdar',
name='As',
field=models.FloatField(blank=True, null=True),
),
migrations.AlterField(
model_name='rawdar',
name='As_IDL',
field=models.FloatField(blank=True, null=True),
),
migrations.AlterField(
model_name='rawdar',
name='Be',
field=models.FloatField(blank=True, null=True),
),
migrations.AlterField(
model_name='rawdar',
name='Be_IDL',
field=models.FloatField(blank=True, null=True),
),
migrations.AlterField(
model_name='rawdar',
name='Cd',
field=models.FloatField(blank=True, null=True),
),
migrations.AlterField(
model_name='rawdar',
name='Cd_IDL',
field=models.FloatField(blank=True, null=True),
),
migrations.AlterField(
model_name='rawdar',
name='Co',
field=models.FloatField(blank=True, null=True),
),
migrations.AlterField(
model_name='rawdar',
name='Co_IDL',
field=models.FloatField(blank=True, null=True),
),
migrations.AlterField(
model_name='rawdar',
name='Cr',
field=models.FloatField(blank=True, null=True),
),
migrations.AlterField(
model_name='rawdar',
name='Cr_IDL',
field=models.FloatField(blank=True, null=True),
),
migrations.AlterField(
model_name='rawdar',
name='Cu',
field=models.FloatField(blank=True, null=True),
),
migrations.AlterField(
model_name='rawdar',
name='Cu_IDL',
field=models.FloatField(blank=True, null=True),
),
migrations.AlterField(
model_name='rawdar',
name='Fe',
field=models.FloatField(blank=True, null=True),
),
migrations.AlterField(
model_name='rawdar',
name='Fe_IDL',
field=models.FloatField(blank=True, null=True),
),
migrations.AlterField(
model_name='rawdar',
name='Hg',
field=models.FloatField(blank=True, null=True),
),
migrations.AlterField(
model_name='rawdar',
name='Hg_IDL',
field=models.FloatField(blank=True, null=True),
),
migrations.AlterField(
model_name='rawdar',
name='Mn',
field=models.FloatField(blank=True, null=True),
),
migrations.AlterField(
model_name='rawdar',
name='Mn_IDL',
field=models.FloatField(blank=True, null=True),
),
migrations.AlterField(
model_name='rawdar',
name='Mo',
field=models.FloatField(blank=True, null=True),
),
migrations.AlterField(
model_name='rawdar',
name='Mo_IDL',
field=models.FloatField(blank=True, null=True),
),
migrations.AlterField(
model_name='rawdar',
name='Ni',
field=models.FloatField(blank=True, null=True),
),
migrations.AlterField(
model_name='rawdar',
name='Ni_IDL',
field=models.FloatField(blank=True, null=True),
),
migrations.AlterField(
model_name='rawdar',
name='Pb',
field=models.FloatField(blank=True, null=True),
),
migrations.AlterField(
model_name='rawdar',
name='Pb_IDL',
field=models.FloatField(blank=True, null=True),
),
migrations.AlterField(
model_name='rawdar',
name='Sb',
field=models.FloatField(blank=True, null=True),
),
migrations.AlterField(
model_name='rawdar',
name='Sb_IDL',
field=models.FloatField(blank=True, null=True),
),
migrations.AlterField(
model_name='rawdar',
name='Se',
field=models.FloatField(blank=True, null=True),
),
migrations.AlterField(
model_name='rawdar',
name='Se_IDL',
field=models.FloatField(blank=True, null=True),
),
migrations.AlterField(
model_name='rawdar',
name='Sn',
field=models.FloatField(blank=True, null=True),
),
migrations.AlterField(
model_name='rawdar',
name='Sn_IDL',
field=models.FloatField(blank=True, null=True),
),
migrations.AlterField(
model_name='rawdar',
name='Tl',
field=models.FloatField(blank=True, null=True),
),
migrations.AlterField(
model_name='rawdar',
name='Tl_IDL',
field=models.FloatField(blank=True, null=True),
),
migrations.AlterField(
model_name='rawdar',
name='U',
field=models.FloatField(blank=True, null=True),
),
migrations.AlterField(
model_name='rawdar',
name='U_IDL',
field=models.FloatField(blank=True, null=True),
),
migrations.AlterField(
model_name='rawdar',
name='V',
field=models.FloatField(blank=True, null=True),
),
migrations.AlterField(
model_name='rawdar',
name='V_IDL',
field=models.FloatField(blank=True, null=True),
),
migrations.AlterField(
model_name='rawdar',
name='W',
field=models.FloatField(blank=True, null=True),
),
migrations.AlterField(
model_name='rawdar',
name='W_IDL',
field=models.FloatField(blank=True, null=True),
),
migrations.AlterField(
model_name='rawdar',
name='Zn',
field=models.FloatField(blank=True, null=True),
),
migrations.AlterField(
model_name='rawdar',
name='Zn_IDL',
field=models.FloatField(blank=True, null=True),
),
migrations.AlterField(
model_name='rawdar',
name='urine_specific_gravity',
field=models.FloatField(blank=True, null=True),
),
]
| 33.353276
| 154
| 0.522337
| 1,096
| 11,707
| 5.468978
| 0.076642
| 0.099099
| 0.165165
| 0.209209
| 0.971471
| 0.971471
| 0.971471
| 0.954955
| 0.954955
| 0.954955
| 0
| 0.007698
| 0.345349
| 11,707
| 350
| 155
| 33.448571
| 0.7744
| 0.003844
| 0
| 0.787791
| 1
| 0
| 0.09494
| 0.003859
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.002907
| 0
| 0.011628
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 9
|
4ae8e1876538896679e757644a54528296f6f24d
| 62,352
|
py
|
Python
|
gpMgmt/bin/gppylib/test/unit/test_unit_gpcrondump.py
|
nurikk/gpdb
|
04fe0202c59721826d1eda2b19d73e5572893fcb
|
[
"PostgreSQL",
"Apache-2.0"
] | null | null | null |
gpMgmt/bin/gppylib/test/unit/test_unit_gpcrondump.py
|
nurikk/gpdb
|
04fe0202c59721826d1eda2b19d73e5572893fcb
|
[
"PostgreSQL",
"Apache-2.0"
] | null | null | null |
gpMgmt/bin/gppylib/test/unit/test_unit_gpcrondump.py
|
nurikk/gpdb
|
04fe0202c59721826d1eda2b19d73e5572893fcb
|
[
"PostgreSQL",
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
import os
import imp
gpcrondump_path = os.path.abspath('gpcrondump')
gpcrondump = imp.load_source('gpcrondump', gpcrondump_path)
import unittest2 as unittest
from datetime import datetime
from gppylib import gplog
from gpcrondump import GpCronDump
from gppylib.operations.utils import DEFAULT_NUM_WORKERS
from mock import patch, Mock
from gppylib.operations.dump import MailDumpEvent
from gppylib.operations.backup_utils import get_backup_directory, write_lines_to_file
import mock
logger = gplog.get_unittest_logger()
class GpCronDumpTestCase(unittest.TestCase):
class Options:
def __init__(self):
self.masterDataDirectory = ""
self.interactive = False
self.clear_dumps_only = False
self.post_script = None
self.dump_config = False
self.history = False
self.pre_vacuum = False
self.post_vacuum = False
self.rollback = False
self.compress = True
self.free_space_percent = None
self.clear_dumps = False
self.cleanup_date = None
self.cleanup_total = None
self.dump_schema = False
self.dump_databases = ['testdb']
self.bypass_disk_check = True
self.backup_set = None
self.dump_global = False
self.clear_catalog_dumps = False
self.batch_default = DEFAULT_NUM_WORKERS
self.include_dump_tables = None
self.exclude_dump_tables = None
self.include_dump_tables_file = None
self.exclude_dump_tables_file = None
self.backup_dir = None
self.encoding = None
self.output_options = None
self.report_dir = None
self.timestamp_key = None
self.list_backup_files = None
self.quiet = False
self.verbose = False
self.local_dump_prefix = ''
self.list_filter_tables = None
self.include_email_file = None
self.email_details = None
self.include_schema_file = None
self.exclude_schema_file = None
self.exclude_dump_schema = None
self.dump_stats = None
## Enterprise init
self.incremental = False
self.ddboost = False
self.ddboost_hosts = None
self.ddboost_user = None
self.ddboost_config_remove = False
self.ddboost_verify = False
self.ddboost_remote = None
self.ddboost_ping = None
self.ddboost_backupdir = None
self.replicate = None
self.max_streams = None
self.netbackup_service_host = None
self.netbackup_policy = None
self.netbackup_schedule = None
self.netbackup_block_size = None
self.netbackup_keyword = None
@patch('gpcrondump.GpCronDump._get_master_port')
@patch('gpcrondump.GpCronDump.validate_dump_schema')
@patch('gpcrondump.validate_current_timestamp')
def test_option_schema_filter_1(self, mock, mock2, mock3):
options = GpCronDumpTestCase.Options()
options.include_schema_file = '/tmp/foo'
options.incremental = True
with self.assertRaisesRegexp(Exception, '--schema-file option can not be selected with incremental backup'):
cron = GpCronDump(options, None)
@patch('gpcrondump.GpCronDump._get_master_port')
@patch('gpcrondump.GpCronDump.validate_dump_schema')
@patch('gpcrondump.validate_current_timestamp')
def test_option_schema_filter_2(self, mock, mock2, mock3):
options = GpCronDumpTestCase.Options()
options.exclude_schema_file = '/tmp/foo'
options.incremental = True
with self.assertRaisesRegexp(Exception, '--exclude-schema-file option can not be selected with incremental backup'):
cron = GpCronDump(options, None)
@patch('gpcrondump.GpCronDump._get_master_port')
@patch('gpcrondump.validate_current_timestamp')
def test_options_schema_filter_3(self, mock, mock2):
options = GpCronDumpTestCase.Options()
options.exclude_dump_schema = 'foo'
options.incremental = True
with self.assertRaisesRegexp(Exception, '-S option can not be selected with incremental backup'):
cron = GpCronDump(options, None)
@patch('gpcrondump.GpCronDump._get_master_port')
@patch('gpcrondump.validate_current_timestamp')
def test_options_schema_filter_4(self, mock, mock2):
options = GpCronDumpTestCase.Options()
options.dump_schema = 'foo'
options.incremental = True
with self.assertRaisesRegexp(Exception, '-s option can not be selected with incremental backup'):
cron = GpCronDump(options, None)
@patch('gpcrondump.GpCronDump._get_master_port')
@patch('gpcrondump.validate_current_timestamp')
def test_options_schema_filter_5(self, mock, mock2):
options = GpCronDumpTestCase.Options()
options.dump_schema = 'foo'
options.exclude_schema_file = '/tmp/foo'
with self.assertRaisesRegexp(Exception, '-s can not be selected with --exclude-schema-file option'):
cron = GpCronDump(options, None)
@patch('gpcrondump.GpCronDump._get_master_port')
@patch('gpcrondump.validate_current_timestamp')
def test_options_schema_filter_6(self, mock, mock2):
options = GpCronDumpTestCase.Options()
options.dump_schema = 'foo'
options.include_schema_file = '/tmp/foo'
with self.assertRaisesRegexp(Exception, '-s can not be selected with --schema-file option'):
cron = GpCronDump(options, None)
@patch('gpcrondump.GpCronDump._get_master_port')
@patch('gpcrondump.validate_current_timestamp')
def test_options_schema_filter_7(self, mock, mock2):
options = GpCronDumpTestCase.Options()
options.dump_schema = 'foo'
options.exclude_dump_schema = 'foo'
with self.assertRaisesRegexp(Exception, '-s can not be selected with -S option'):
cron = GpCronDump(options, None)
@patch('gpcrondump.GpCronDump._get_master_port')
@patch('gpcrondump.validate_current_timestamp')
def test_options_schema_filter_8(self, mock, mock2):
options = GpCronDumpTestCase.Options()
options.exclude_dump_schema = 'foo'
options.exclude_schema_file = '/tmp/foo'
with self.assertRaisesRegexp(Exception, '-S can not be selected with --exclude-schema-file option'):
cron = GpCronDump(options, None)
@patch('gpcrondump.GpCronDump._get_master_port')
@patch('gpcrondump.validate_current_timestamp')
def test_options_schema_filter_9(self, mock, mock2):
options = GpCronDumpTestCase.Options()
options.exclude_dump_schema = 'foo'
options.include_schema_file = '/tmp/foo'
with self.assertRaisesRegexp(Exception, '-S can not be selected with --schema-file option'):
cron = GpCronDump(options, None)
@patch('gpcrondump.GpCronDump._get_master_port')
@patch('gpcrondump.validate_current_timestamp')
def test_options_schema_filter_10(self, mock, mock2):
options = GpCronDumpTestCase.Options()
options.exclude_schema_file = 'foo'
options.include_schema_file = '/tmp/foo'
with self.assertRaisesRegexp(Exception, '--exclude-schema-file can not be selected with --schema-file option'):
cron = GpCronDump(options, None)
@patch('gpcrondump.GpCronDump._get_master_port')
@patch('gpcrondump.validate_current_timestamp')
def test_options_schema_filter_11(self, mock, mock2):
options = GpCronDumpTestCase.Options()
options.exclude_schema_file = 'foo'
options.include_dump_tables_file = '/tmp/foo'
with self.assertRaisesRegexp(Exception, '--table-file and --exclude-table-file can not be selected with --exclude-schema-file option'):
cron = GpCronDump(options, None)
@patch('gpcrondump.GpCronDump._get_master_port')
@patch('gpcrondump.validate_current_timestamp')
def test_options_schema_filter_12(self, mock, mock2):
options = GpCronDumpTestCase.Options()
options.exclude_schema_file = 'foo'
options.exclude_dump_tables_file = '/tmp/foo'
with self.assertRaisesRegexp(Exception, '--table-file and --exclude-table-file can not be selected with --exclude-schema-file option'):
cron = GpCronDump(options, None)
@patch('gpcrondump.GpCronDump._get_master_port')
@patch('gpcrondump.validate_current_timestamp')
def test_options_schema_filter_13(self, mock, mock2):
options = GpCronDumpTestCase.Options()
options.include_schema_file = 'foo'
options.exclude_dump_tables_file = '/tmp/foo'
with self.assertRaisesRegexp(Exception, '--table-file and --exclude-table-file can not be selected with --schema-file option'):
cron = GpCronDump(options, None)
@patch('gpcrondump.GpCronDump._get_master_port')
@patch('gpcrondump.validate_current_timestamp')
def test_options_schema_filter_14(self, mock, mock2):
options = GpCronDumpTestCase.Options()
options.include_schema_file = 'foo'
options.include_dump_tables_file = '/tmp/foo'
with self.assertRaisesRegexp(Exception, '--table-file and --exclude-table-file can not be selected with --schema-file option'):
cron = GpCronDump(options, None)
@patch('gpcrondump.GpCronDump._get_master_port')
@patch('gpcrondump.validate_current_timestamp')
def test_options_schema_filter_15(self, mock, mock2):
options = GpCronDumpTestCase.Options()
options.dump_schema = 'foo'
options.include_dump_tables_file = '/tmp/foo'
with self.assertRaisesRegexp(Exception, '--table-file and --exclude-table-file can not be selected with -s option'):
cron = GpCronDump(options, None)
@patch('gpcrondump.GpCronDump._get_master_port')
@patch('gpcrondump.validate_current_timestamp')
def test_options_schema_filter_16(self, mock, mock2):
options = GpCronDumpTestCase.Options()
options.dump_schema = 'foo'
options.exclude_dump_tables_file = '/tmp/foo'
with self.assertRaisesRegexp(Exception, '--table-file and --exclude-table-file can not be selected with -s option'):
cron = GpCronDump(options, None)
@patch('gpcrondump.GpCronDump._get_master_port')
@patch('gpcrondump.validate_current_timestamp')
def test_options_schema_filter_17(self, mock, mock2):
options = GpCronDumpTestCase.Options()
options.exclude_dump_schema = 'foo'
options.include_dump_tables_file = '/tmp/foo'
with self.assertRaisesRegexp(Exception, '--table-file and --exclude-table-file can not be selected with -S option'):
cron = GpCronDump(options, None)
@patch('gpcrondump.GpCronDump._get_master_port')
@patch('gpcrondump.validate_current_timestamp')
def test_options_schema_filter_18(self, mock, mock2):
options = GpCronDumpTestCase.Options()
options.exclude_dump_schema = 'foo'
options.exclude_dump_tables_file = '/tmp/foo'
with self.assertRaisesRegexp(Exception, '--table-file and --exclude-table-file can not be selected with -S option'):
cron = GpCronDump(options, None)
@patch('gpcrondump.GpCronDump._get_master_port')
@patch('gpcrondump.validate_current_timestamp')
def test_options_schema_filter_19(self, mock, mock2):
options = GpCronDumpTestCase.Options()
options.exclude_schema_file = 'foo'
options.exclude_dump_tables = '/tmp/foo'
with self.assertRaisesRegexp(Exception, '-t and -T can not be selected with --exclude-schema-file option'):
cron = GpCronDump(options, None)
@patch('gpcrondump.GpCronDump._get_master_port')
@patch('gpcrondump.validate_current_timestamp')
def test_options_schema_filter_20(self, mock, mock2):
options = GpCronDumpTestCase.Options()
options.exclude_schema_file = 'foo'
options.include_dump_tables = '/tmp/foo'
with self.assertRaisesRegexp(Exception, '-t and -T can not be selected with --exclude-schema-file option'):
cron = GpCronDump(options, None)
@patch('gpcrondump.GpCronDump._get_master_port')
@patch('gpcrondump.validate_current_timestamp')
def test_options_schema_filter_21(self, mock, mock2):
options = GpCronDumpTestCase.Options()
options.include_schema_file = 'foo'
options.exclude_dump_tables = '/tmp/foo'
with self.assertRaisesRegexp(Exception, '-t and -T can not be selected with --schema-file option'):
cron = GpCronDump(options, None)
@patch('gpcrondump.GpCronDump._get_master_port')
@patch('gpcrondump.validate_current_timestamp')
def test_options_schema_filter_22(self, mock, mock2):
options = GpCronDumpTestCase.Options()
options.include_schema_file = 'foo'
options.include_dump_tables = '/tmp/foo'
with self.assertRaisesRegexp(Exception, '-t and -T can not be selected with --schema-file option'):
cron = GpCronDump(options, None)
@patch('gpcrondump.GpCronDump._get_master_port')
@patch('gpcrondump.validate_current_timestamp')
def test_options_schema_filter_23(self, mock, mock2):
options = GpCronDumpTestCase.Options()
options.dump_schema = 'foo'
options.exclude_dump_tables = '/tmp/foo'
with self.assertRaisesRegexp(Exception, '-t and -T can not be selected with -s option'):
cron = GpCronDump(options, None)
@patch('gpcrondump.GpCronDump._get_master_port')
@patch('gpcrondump.validate_current_timestamp')
def test_options_schema_filter_24(self, mock, mock2):
options = GpCronDumpTestCase.Options()
options.dump_schema = 'foo'
options.include_dump_tables = '/tmp/foo'
with self.assertRaisesRegexp(Exception, '-t and -T can not be selected with -s option'):
cron = GpCronDump(options, None)
@patch('gpcrondump.GpCronDump._get_master_port')
@patch('gpcrondump.validate_current_timestamp')
def test_options_schema_filter_25(self, mock, mock2):
options = GpCronDumpTestCase.Options()
options.exclude_dump_schema = 'foo'
options.exclude_dump_tables = '/tmp/foo'
with self.assertRaisesRegexp(Exception, '-t and -T can not be selected with -S option'):
cron = GpCronDump(options, None)
@patch('gpcrondump.GpCronDump._get_master_port')
@patch('gpcrondump.validate_current_timestamp')
def test_options_schema_filter_26(self, mock, mock2):
options = GpCronDumpTestCase.Options()
options.exclude_dump_schema = 'foo'
options.include_dump_tables = '/tmp/foo'
with self.assertRaisesRegexp(Exception, '-t and -T can not be selected with -S option'):
cron = GpCronDump(options, None)
@patch('gpcrondump.GpCronDump._get_master_port')
@patch('gpcrondump.validate_current_timestamp')
def test_options_schema_filter_27(self, mock, mock2):
options = GpCronDumpTestCase.Options()
options.dump_schema = ['information_schema']
with self.assertRaisesRegexp(Exception, "can not specify catalog schema 'information_schema' using -s option"):
GpCronDump(options, None)
@patch('gpcrondump.GpCronDump._get_master_port')
@patch('gpcrondump.validate_current_timestamp')
def test_options_schema_filter_28(self, mock, mock2):
options = GpCronDumpTestCase.Options()
options.exclude_dump_schema = ['information_schema']
with self.assertRaisesRegexp(Exception, "can not specify catalog schema 'information_schema' using -S option"):
GpCronDump(options, None)
@patch('gpcrondump.GpCronDump._get_master_port')
@patch('gpcrondump.validate_current_timestamp')
@patch('gpcrondump.get_lines_from_file', return_value=['public', 'information_schema'])
def test_options_schema_filter_29(self, mock, mock2, mock3):
options = GpCronDumpTestCase.Options()
options.exclude_schema_file = '/tmp/foo'
with self.assertRaisesRegexp(Exception, "can not exclude catalog schema 'information_schema' in schema file '/tmp/foo'"):
GpCronDump(options, None)
@patch('gpcrondump.GpCronDump._get_master_port')
@patch('gpcrondump.validate_current_timestamp')
@patch('gpcrondump.get_lines_from_file', return_value=['public', 'information_schema'])
def test_options_schema_filter_30(self, mock, mock2, mock3):
options = GpCronDumpTestCase.Options()
options.include_schema_file = '/tmp/foo'
with self.assertRaisesRegexp(Exception, "can not include catalog schema 'information_schema' in schema file '/tmp/foo'"):
GpCronDump(options, None)
@patch('gpcrondump.GpCronDump._get_master_port')
@patch('gpcrondump.validate_current_timestamp')
def test_options_schema_filter_31(self, mock, mock2):
options = GpCronDumpTestCase.Options()
options.masterDataDirectory = '/tmp/foobar'
gpcd = GpCronDump(options, None)
dbname = 'foo'
timestamp = '20141016010101'
file = gpcd.get_schema_list_file(dbname)
self.assertEquals(file, None)
@patch('gpcrondump.GpCronDump._get_master_port')
@patch('gpcrondump.validate_current_timestamp')
def test_options_schema_filter_32(self, mock1, mock2):
options = GpCronDumpTestCase.Options()
options.dump_schema = ['public']
gpcd = GpCronDump(options, None)
dbname = 'foo'
timestamp = '20141016010101'
file = gpcd.get_schema_list_file(dbname)
self.assertTrue(file.startswith('/tmp/schema_list'))
@patch('gpcrondump.GpCronDump._get_master_port')
@patch('gpcrondump.validate_current_timestamp')
def test_options_schema_filter_33(self, mock1, mock2):
options = GpCronDumpTestCase.Options()
options.include_schema_file = '/tmp/foo'
write_lines_to_file('/tmp/foo', ['public'])
gpcd = GpCronDump(options, None)
dbname = 'foo'
timestamp = '20141016010101'
file = gpcd.get_schema_list_file(dbname)
self.assertTrue(file.startswith('/tmp/schema_list'))
if os.path.exists('/tmp/foo'):
os.remove('/tmp/foo')
@patch('gpcrondump.GpCronDump._get_master_port')
@patch('gpcrondump.validate_current_timestamp')
@patch('gpcrondump.get_include_schema_list_from_exclude_schema', return_value=['public'])
def test_options_schema_filter_34(self, mock1, mock2, mock3):
options = GpCronDumpTestCase.Options()
options.exclude_schema_file = '/tmp/foo'
write_lines_to_file('/tmp/foo', ['public'])
gpcd = GpCronDump(options, None)
dbname = 'foo'
timestamp = '20141016010101'
file = gpcd.get_schema_list_file(dbname)
self.assertTrue(file.startswith('/tmp/schema_list'))
if os.path.exists('/tmp/foo'):
os.remove('/tmp/foo')
@patch('gpcrondump.GpCronDump._get_master_port')
@patch('gpcrondump.validate_current_timestamp')
@patch('gpcrondump.get_include_schema_list_from_exclude_schema', return_value=['public'])
def test_options_schema_filter_35(self, mock1, mock2, mock3):
options = GpCronDumpTestCase.Options()
options.exclude_dump_schema = 'public'
gpcd = GpCronDump(options, None)
dbname = 'foo'
timestamp = '20141016010101'
file = gpcd.get_schema_list_file(dbname)
self.assertTrue(file.startswith('/tmp/schema_list'))
@patch('gpcrondump.GpCronDump._get_master_port')
@patch('gpcrondump.validate_current_timestamp')
@patch('gpcrondump.get_lines_from_file', return_value=['public'])
@patch('gpcrondump.get_user_table_list_for_schema', return_value=['public', 'table1', 'public', 'table2'])
def test_options_schema_filter_36(self, mock1, mock2, mock3, mock4):
options = GpCronDumpTestCase.Options()
gpcd = GpCronDump(options, None)
dbname = 'foo'
schema_file = '/tmp/foo'
inc = gpcd.generate_include_table_list_from_schema_file(dbname, schema_file)
self.assertTrue(inc.startswith('/tmp/include_dump_tables_file'))
@patch('gpcrondump.GpCronDump._get_master_port')
@patch('gpcrondump.validate_current_timestamp')
def test_options1(self, mock, mock2):
options = GpCronDumpTestCase.Options()
options.include_dump_tables = 'foo'
options.incremental = True
with self.assertRaisesRegexp(Exception, 'include table list can not be selected with incremental backup'):
cron = GpCronDump(options, None)
@patch('gpcrondump.GpCronDump._get_master_port')
@patch('gpcrondump.validate_current_timestamp')
def test_options2(self, mock, mock2):
options = GpCronDumpTestCase.Options()
options.exclude_dump_tables = 'foo'
options.incremental = True
with self.assertRaisesRegexp(Exception, 'exclude table list can not be selected with incremental backup'):
cron = GpCronDump(options, None)
@patch('gpcrondump.GpCronDump._get_master_port')
@patch('gpcrondump.validate_current_timestamp')
def test_options3(self, mock, mock2):
options = GpCronDumpTestCase.Options()
options.include_dump_tables_file = 'foo'
options.incremental = True
with self.assertRaisesRegexp(Exception, 'include table file can not be selected with incremental backup'):
cron = GpCronDump(options, None)
@patch('gpcrondump.GpCronDump._get_master_port')
@patch('gpcrondump.validate_current_timestamp')
def test_options4(self, mock, mock2):
options = GpCronDumpTestCase.Options()
options.exclude_dump_tables_file = 'foo'
options.incremental = True
with self.assertRaisesRegexp(Exception, 'exclude table file can not be selected with incremental backup'):
cron = GpCronDump(options, None)
@patch('gpcrondump.GpCronDump._get_master_port')
@patch('gpcrondump.validate_current_timestamp')
def test_options10(self, mock, mock2):
options = GpCronDumpTestCase.Options()
options.local_dump_prefix = 'foo'
options.incremental = False
options.list_filter_tables = True
try:
with self.assertRaisesRegexp(Exception, 'list filter tables option requires --prefix and --incremental'):
cron = GpCronDump(options, None)
finally:
options.list_filter_tables = False
@patch('gpcrondump.GpCronDump._get_master_port')
@patch('gpcrondump.validate_current_timestamp')
@patch('gpcrondump.get_latest_full_dump_timestamp', return_value='20121225090000')
def test_options11(self, mock, mock2, mock3):
options = GpCronDumpTestCase.Options()
options.incremental = True
cron = GpCronDump(options, None)
self.assertEquals(cron.full_dump_timestamp, '20121225090000')
@patch('gpcrondump.GpCronDump._get_master_port')
@patch('gpcrondump.validate_current_timestamp')
def test_options12(self, mock, mock2):
options = GpCronDumpTestCase.Options()
options.incremental = True
options.dump_databases = 'bkdb,fulldb'
with self.assertRaisesRegexp(Exception, 'multi-database backup is not supported with incremental backup'):
cron = GpCronDump(options, None)
@patch('gpcrondump.get_latest_full_dump_timestamp', return_value='20120330090000')
@patch('gpcrondump.validate_current_timestamp')
@patch('gpcrondump.GpCronDump._get_master_port')
def test_options13(self, mock, mock2, mock3):
options = GpCronDumpTestCase.Options()
options.incremental = True
options.dump_databases = ['bkdb']
#If this is successful then it should not raise an exception
GpCronDump(options, None)
@patch('gpcrondump.GpCronDump._get_master_port')
@patch('gpcrondump.validate_current_timestamp')
def test_options14(self, mock, mock2):
options = GpCronDumpTestCase.Options()
options.dump_databases = 'bkdb'
options.incremental = False
#If this is successful then it should not raise an exception
GpCronDump(options, None)
@patch('gpcrondump.GpCronDump._get_master_port')
@patch('gpcrondump.validate_current_timestamp')
def test_options15(self, mock, mock2):
options = GpCronDumpTestCase.Options()
options.dump_databases = 'bkdb,fulldb'
options.incremental = False
#If this is successful then it should not raise an exception
GpCronDump(options, None)
@patch('gpcrondump.GpCronDump._get_master_port')
@patch('gpcrondump.validate_current_timestamp')
def test_options16(self, mock, mock2):
options = GpCronDumpTestCase.Options()
options.masterDataDirectory = '/tmp/foobar'
options.backup_dir = '/foo1'
gpcd = GpCronDump(options, None)
self.assertEquals(gpcd.getBackupDirectoryRoot(), '/foo1')
@patch('gpcrondump.GpCronDump._get_master_port')
@patch('gpcrondump.validate_current_timestamp')
def test_options17(self, mock, mock2):
options = GpCronDumpTestCase.Options()
options.masterDataDirectory = '/tmp/foobar'
options.backup_dir = None
gpcd = GpCronDump(options, None)
self.assertEquals(gpcd.getBackupDirectoryRoot(), '/tmp/foobar')
@patch('gpcrondump.GpCronDump._get_master_port')
@patch('gpcrondump.validate_current_timestamp')
def test_options18(self, mock, mock2):
options = GpCronDumpTestCase.Options()
options.dump_schema = 'foo'
options.incremental = True
with self.assertRaisesRegexp(Exception, '-s option can not be selected with incremental backup'):
cron = GpCronDump(options, None)
@patch('gpcrondump.GpCronDump._get_master_port')
@patch('gpcrondump.validate_current_timestamp')
def test_options19(self, mock, mock2):
options = GpCronDumpTestCase.Options()
options.clear_dumps = True
options.incremental = True
with self.assertRaisesRegexp(Exception, '-c option can not be selected with incremental backup'):
cron = GpCronDump(options, None)
@patch('gpcrondump.GpCronDump._get_master_port')
@patch('gpcrondump.validate_current_timestamp')
def test_options20(self, mock, mock2):
options = GpCronDumpTestCase.Options()
options.dump_databases = []
options.incremental = True
with self.assertRaisesRegexp(Exception, 'Must supply -x <database name> with incremental option'):
cron = GpCronDump(options, None)
@patch('gpcrondump.GpCronDump._get_master_port')
@patch('gpcrondump.validate_current_timestamp')
def test_options21(self, mock, mock2):
options = GpCronDumpTestCase.Options()
options.ddboost = True
options.replicate = False
options.max_streams = 20
with self.assertRaisesRegexp(Exception, '--max-streams must be specified along with --replicate'):
cron = GpCronDump(options, None)
@patch('gpcrondump.GpCronDump._get_master_port')
@patch('gpcrondump.validate_current_timestamp')
def test_options22(self, mock, mock2):
options = GpCronDumpTestCase.Options()
options.ddboost = True
options.replicate = True
options.max_streams = None
with self.assertRaisesRegexp(Exception, '--max-streams must be specified along with --replicate'):
cron = GpCronDump(options, None)
@patch('gpcrondump.GpCronDump._get_master_port')
@patch('gpcrondump.validate_current_timestamp')
def test_options23(self, mock, mock2):
options = GpCronDumpTestCase.Options()
options.ddboost = True
options.replicate = True
options.max_streams = 0
with self.assertRaisesRegexp(Exception, '--max-streams must be a number greater than zero'):
cron = GpCronDump(options, None)
@patch('gpcrondump.GpCronDump._get_master_port')
@patch('gpcrondump.validate_current_timestamp')
def test_options24(self, mock, mock2):
options = GpCronDumpTestCase.Options()
options.ddboost = True
options.replicate = True
options.max_streams = "abc"
with self.assertRaisesRegexp(Exception, '--max-streams must be a number greater than zero'):
cron = GpCronDump(options, None)
@patch('gpcrondump.GpCronDump._get_master_port')
@patch('gpcrondump.validate_current_timestamp')
def test_options25(self, mock, mock2):
options = GpCronDumpTestCase.Options()
options.ddboost = False
options.replicate = False
options.max_streams = 20
with self.assertRaisesRegexp(Exception, '--replicate and --max-streams cannot be used without --ddboost'):
cron = GpCronDump(options, None)
@patch('gpcrondump.GpCronDump._get_master_port')
@patch('gpcrondump.validate_current_timestamp')
def test_options26(self, mock1, mock2):
options = GpCronDumpTestCase.Options()
options.list_backup_files = True
options.timestamp_key = None
with self.assertRaisesRegexp(Exception, 'Must supply -K option when listing backup files'):
GpCronDump(options, None)
@patch('gpcrondump.GpCronDump._get_master_port')
@patch('gpcrondump.validate_current_timestamp')
def test_options27(self, mock, mock2):
options = GpCronDumpTestCase.Options()
options.dump_databases = 'bkdb,fulldb'
options.timestamp_key = True
with self.assertRaisesRegexp(Exception, 'multi-database backup is not supported with -K option'):
GpCronDump(options, None)
@patch('gpcrondump.GpCronDump._get_master_port')
@patch('gpcrondump.validate_current_timestamp')
def test_options28(self, mock, mock2):
options = GpCronDumpTestCase.Options()
options.dump_databases = ['bkdb']
options.timestamp_key = True
options.ddboost = True
options.list_backup_files = True
with self.assertRaisesRegexp(Exception, 'list backup files not supported with ddboost option'):
GpCronDump(options, None)
@patch('gpcrondump.GpCronDump._get_master_port')
@patch('gpcrondump.validate_current_timestamp')
def test_options29(self, mock, mock2):
options = GpCronDumpTestCase.Options()
options.dump_databases = ['bkdb']
options.timestamp_key = True
options.ddboost = True
options.netbackup_service_host = "mdw"
options.netbackup_policy = "test_policy"
options.netbackup_schedule = "test_schedule"
with self.assertRaisesRegexp(Exception, '--ddboost is not supported with NetBackup'):
GpCronDump(options, None)
@patch('gpcrondump.GpCronDump._get_master_port')
@patch('gpcrondump.validate_current_timestamp')
def test_get_include_exclude_for_dump_database00(self, mock1, mock2):
options = GpCronDumpTestCase.Options()
options.masterDataDirectory = '/tmp/foobar'
gpcd = GpCronDump(options, None)
dirtyfile = '/tmp/dirty'
dbname = 'foo'
(inc, exc) = gpcd.get_include_exclude_for_dump_database(dirtyfile, dbname)
self.assertEquals(inc, None)
self.assertEquals(exc, None)
@patch('gpcrondump.GpCronDump._get_master_port')
@patch('gpcrondump.validate_current_timestamp')
@patch('gpcrondump.expand_partitions_and_populate_filter_file', return_value='/tmp/include_dump_tables_file')
@patch('gpcrondump.get_lines_from_file', return_value=['public.t1', 'public.t2'])
def test_get_include_exclude_for_dump_database01(self, mock1, mock2, mock3, mock4):
options = GpCronDumpTestCase.Options()
options.masterDataDirectory = '/tmp/foobar'
options.include_dump_tables_file = '/mydir/incfile'
gpcd = GpCronDump(options, None)
dirtyfile = '/tmp/dirty'
dbname = 'foo'
(inc, exc) = gpcd.get_include_exclude_for_dump_database(dirtyfile, dbname)
self.assertTrue(inc.startswith('/tmp/include_dump_tables_file'))
@patch('gpcrondump.GpCronDump._get_master_port')
@patch('gpcrondump.validate_current_timestamp')
@patch('gpcrondump.expand_partitions_and_populate_filter_file', return_value='/tmp/include_dump_tables_file')
@patch('gpcrondump.get_lines_from_file')
def test_get_include_exclude_for_dump_database02(self, mock1, mock2, mock3, mock4):
options = GpCronDumpTestCase.Options()
options.masterDataDirectory = '/tmp/foobar'
options.include_dump_tables = ['public.t1', 'public.t2', 'public.t3']
gpcd = GpCronDump(options, None)
dirtyfile = '/tmp/dirty'
dbname = 'foo'
(inc, exc) = gpcd.get_include_exclude_for_dump_database(dirtyfile, dbname)
self.assertTrue(inc.startswith('/tmp/include_dump_tables_file'))
@patch('gpcrondump.GpCronDump._get_master_port')
@patch('gpcrondump.validate_current_timestamp')
@patch('gpcrondump.get_latest_full_dump_timestamp', return_value='20121225090000')
def test_get_include_exclude_for_dump_database03(self, mock1, mock2, mock3):
options = GpCronDumpTestCase.Options()
options.masterDataDirectory = '/tmp/foobar'
options.incremental = True
gpcd = GpCronDump(options, None)
dirtyfile = '/tmp/dirty'
dbname = 'foo'
(inc, exc) = gpcd.get_include_exclude_for_dump_database(dirtyfile, dbname)
self.assertEquals(inc, '/tmp/dirty')
self.assertEquals(exc, None)
@patch('gpcrondump.GpCronDump._get_master_port')
@patch('gpcrondump.validate_current_timestamp')
@patch('gpcrondump.expand_partitions_and_populate_filter_file', return_value='/tmp/exclude_dump_tables_file')
@patch('gpcrondump.get_lines_from_file', return_value=['public.t1', 'public.t2'])
def test_get_include_exclude_for_dump_database04(self, mock1, mock2, mock3, mock4):
options = GpCronDumpTestCase.Options()
options.masterDataDirectory = '/tmp/foobar'
options.exclude_dump_tables_file = '/odir/exfile'
gpcd = GpCronDump(options, None)
dirtyfile = '/tmp/dirty'
dbname = 'foo'
(inc, exc) = gpcd.get_include_exclude_for_dump_database(dirtyfile, dbname)
self.assertTrue(exc.startswith('/tmp/exclude_dump_tables_file'))
@patch('gpcrondump.GpCronDump._get_master_port')
@patch('gpcrondump.validate_current_timestamp')
@patch('gpcrondump.expand_partitions_and_populate_filter_file', return_value='/tmp/exclude_dump_tables_file')
@patch('gpcrondump.get_lines_from_file')
def test_get_include_exclude_for_dump_database06(self, mock1, mock2, mock3, mock4):
options = GpCronDumpTestCase.Options()
options.masterDataDirectory = '/tmp/foobar'
options.exclude_dump_tables = ['public.t4', 'public.t5', 'public.t6']
gpcd = GpCronDump(options, None)
dirtyfile = '/tmp/dirty'
dbname = 'foo'
(inc, exc) = gpcd.get_include_exclude_for_dump_database(dirtyfile, dbname)
self.assertTrue(exc.startswith('/tmp/exclude_dump_tables_file'))
@patch('gpcrondump.GpCronDump._get_master_port')
@patch('gpcrondump.validate_current_timestamp')
@patch('gpcrondump.GpCronDump._get_table_names_from_partition_list', side_effect = [['public.aot1', 'public.aot2'], ['public.cot1', 'public.cot2']])
def test_verify_tablenames_00(self, mock1, mock2, mock3):
options = GpCronDumpTestCase.Options()
cron = GpCronDump(options, None)
ao_partition_list = ['public, aot1, 2190', 'public, aot2, 3190']
co_partition_list = ['public, cot1, 2190', 'public, cot2, 3190']
heap_partition_list = ['public.heapt1', 'public.heapt2']
cron._verify_tablenames(ao_partition_list, co_partition_list, heap_partition_list) #Should not raise an exception
@patch('gpcrondump.GpCronDump._get_master_port')
@patch('gpcrondump.validate_current_timestamp')
@patch('gpcrondump.GpCronDump._get_table_names_from_partition_list', side_effect = [['public.aot1:asd', 'public.aot2'], ['public.cot1', 'public.cot2:asd']])
def test_verify_tablenames_00_bad(self, mock1, mock2, mock3):
options = GpCronDumpTestCase.Options()
cron = GpCronDump(options, None)
ao_partition_list = ['public, aot1!asd, 2190', 'public, aot2, 3190']
co_partition_list = ['public, cot1, 2190', 'public, cot2\nasd, 3190']
heap_partition_list = ['public, heapt1, 2190', 'public, heapt2!asdasd , 3190']
with self.assertRaisesRegexp(Exception, ''):
cron._verify_tablenames(ao_partition_list, co_partition_list, heap_partition_list)
@patch('gpcrondump.GpCronDump._get_master_port')
@patch('gpcrondump.validate_current_timestamp')
def test_options_inserts_with_incremental(self, mock, mock2):
options = GpCronDumpTestCase.Options()
options.output_options = ['--inserts']
options.incremental = True
with self.assertRaisesRegexp(Exception, '--inserts, --column-inserts, --oids cannot be selected with incremental backup'):
cron = GpCronDump(options, None)
@patch('gpcrondump.GpCronDump._get_master_port')
@patch('gpcrondump.validate_current_timestamp')
def test_options_oids_with_incremental(self, mock, mock2):
options = GpCronDumpTestCase.Options()
options.output_options = ['--oids']
options.incremental = True
with self.assertRaisesRegexp(Exception, '--inserts, --column-inserts, --oids cannot be selected with incremental backup'):
cron = GpCronDump(options, None)
@patch('gpcrondump.GpCronDump._get_master_port')
@patch('gpcrondump.validate_current_timestamp')
def test_options_column_inserts_with_incremental(self, mock, mock2):
options = GpCronDumpTestCase.Options()
options.output_options = ['--column-inserts']
options.incremental = True
with self.assertRaisesRegexp(Exception, '--inserts, --column-inserts, --oids cannot be selected with incremental backup'):
cron = GpCronDump(options, None)
@patch('gpcrondump.GpCronDump._get_master_port')
@patch('gpcrondump.validate_current_timestamp')
def test_get_table_names_from_partition_list_00(self, mock1, mock2):
options = GpCronDumpTestCase.Options()
cron = GpCronDump(options, None)
partition_list = ['public, aot1, 2190', 'public, aot2:aot, 3190']
expected_output = ['public.aot1', 'public.aot2:aot']
result = cron._get_table_names_from_partition_list(partition_list)
self.assertEqual(result, expected_output)
@patch('gpcrondump.GpCronDump._get_master_port')
@patch('gpcrondump.validate_current_timestamp')
def test_get_table_names_from_partition_list_01(self, mock1, mock2):
options = GpCronDumpTestCase.Options()
cron = GpCronDump(options, None)
partition_list = ['public, aot1, 2190', 'public, aot2,aot, 3190']
with self.assertRaisesRegexp(Exception, 'Invalid partition entry "public, aot2,aot, 3190"'):
cron._get_table_names_from_partition_list(partition_list)
@patch('gpcrondump.GpCronDump._get_master_port')
@patch('gpcrondump.validate_current_timestamp')
def test_options_table_filter1(self, mock, mock2):
options = GpCronDumpTestCase.Options()
options.include_dump_tables = 'foo'
options.include_dump_tables_file = 'foo'
with self.assertRaisesRegexp(Exception, '-t can not be selected with --table-file option'):
cron = GpCronDump(options, None)
@patch('gpcrondump.GpCronDump._get_master_port')
@patch('gpcrondump.validate_current_timestamp')
def test_options_table_filter2(self, mock, mock2):
options = GpCronDumpTestCase.Options()
options.include_dump_tables = 'foo'
options.exclude_dump_tables_file = 'foo'
with self.assertRaisesRegexp(Exception, '-t can not be selected with --exclude-table-file option'):
cron = GpCronDump(options, None)
@patch('gpcrondump.GpCronDump._get_master_port')
@patch('gpcrondump.validate_current_timestamp')
def test_options_table_filter3(self, mock, mock2):
options = GpCronDumpTestCase.Options()
options.exclude_dump_tables = 'foo'
options.exclude_dump_tables_file = 'foo'
with self.assertRaisesRegexp(Exception, '-T can not be selected with --exclude-table-file option'):
cron = GpCronDump(options, None)
@patch('gpcrondump.GpCronDump._get_master_port')
@patch('gpcrondump.validate_current_timestamp')
def test_options_table_filter4(self, mock, mock2):
options = GpCronDumpTestCase.Options()
options.exclude_dump_tables = 'foo'
options.include_dump_tables_file = 'foo'
with self.assertRaisesRegexp(Exception, '-T can not be selected with --table-file option'):
cron = GpCronDump(options, None)
@patch('gpcrondump.GpCronDump._get_master_port')
@patch('gpcrondump.validate_current_timestamp')
def test_options_table_filter5(self, mock, mock2):
options = GpCronDumpTestCase.Options()
options.include_dump_tables = 'foo'
options.exclude_dump_tables = 'foo'
with self.assertRaisesRegexp(Exception, '-t can not be selected with -T option'):
cron = GpCronDump(options, None)
@patch('gpcrondump.GpCronDump._get_master_port')
@patch('gpcrondump.validate_current_timestamp')
def test_options_table_filter6(self, mock, mock2):
options = GpCronDumpTestCase.Options()
options.include_dump_tables_file = 'foo'
options.exclude_dump_tables_file = 'foo'
with self.assertRaisesRegexp(Exception, '--table-file can not be selected with --exclude-table-file option'):
cron = GpCronDump(options, None)
@patch('gpcrondump.GpCronDump._get_master_port')
@patch('gpcrondump.validate_current_timestamp')
def test_get_timestamp_object1(self, mock1, mock2):
options = GpCronDumpTestCase.Options()
options.timestamp_key = '20130101010101'
gpcd = GpCronDump(options, None)
timestamp = gpcd._get_timestamp_object(options.timestamp_key)
self.assertEquals(timestamp, datetime(2013, 1, 1, 1, 1, 1))
@patch('gpcrondump.GpCronDump._get_master_port')
@patch('gpcrondump.validate_current_timestamp')
def test_get_timestamp_object2(self, mock1, mock2):
options = GpCronDumpTestCase.Options()
options.timestamp_key = '20130101010'
gpcd = GpCronDump(options, None)
with self.assertRaisesRegexp(Exception, 'Invalid timestamp key'):
gpcd._get_timestamp_object(options.timestamp_key)
@patch('gpcrondump.GpCronDump._get_master_port')
@patch('gpcrondump.validate_current_timestamp')
def test_get_timestamp_object3(self, mock1, mock2):
options = GpCronDumpTestCase.Options()
options.timestamp_key = None
gpcd = GpCronDump(options, None)
timestamp = gpcd._get_timestamp_object(options.timestamp_key)
self.assertTrue(isinstance(timestamp, datetime))
@patch('gpcrondump.GpCronDump._get_master_port')
@patch('gpcrondump.validate_current_timestamp')
def test_get_files_file_list1(self, mock1, mock2):
options = GpCronDumpTestCase.Options()
options.timestamp_key = None
options.masterDataDirectory = '/foo'
gpcd = GpCronDump(options, None)
master = Mock()
master.getSegmentHostName.return_value = 'foo1'
timestamp = '20130101010101'
dump_dir = get_backup_directory(options.masterDataDirectory, options.backup_dir, gpcd.dump_dir, timestamp)
files_file_list = gpcd._get_files_file_list(master, dump_dir, timestamp)
expected_files_list = ['foo1:%s/db_dumps/20130101/gp_cdatabase_1_1_20130101010101' % options.masterDataDirectory,
'foo1:%s/db_dumps/20130101/gp_dump_20130101010101_ao_state_file' % options.masterDataDirectory,
'foo1:%s/db_dumps/20130101/gp_dump_20130101010101_co_state_file' % options.masterDataDirectory,
'foo1:%s/db_dumps/20130101/gp_dump_20130101010101_last_operation' % options.masterDataDirectory,
'foo1:%s/db_dumps/20130101/gp_dump_20130101010101.rpt' % options.masterDataDirectory,
'foo1:%s/db_dumps/20130101/gp_dump_status_1_1_20130101010101' % options.masterDataDirectory]
self.assertEqual(files_file_list, expected_files_list)
@patch('gpcrondump.GpCronDump._get_master_port')
@patch('gpcrondump.validate_current_timestamp')
def test_get_files_file_list2(self, mock1, mock2):
options = GpCronDumpTestCase.Options()
options.timestamp_key = None
options.masterDataDirectory = '/foo'
gpcd = GpCronDump(options, None)
master = Mock()
master.getSegmentHostName.return_value = 'foo2'
timestamp = '20130101010101'
dump_dir = get_backup_directory(options.masterDataDirectory, options.backup_dir, gpcd.dump_dir, timestamp)
files_file_list = gpcd._get_files_file_list(master, dump_dir, timestamp)
expected_files_list = ['foo2:%s/db_dumps/20130101/gp_cdatabase_1_1_20130101010101' % options.masterDataDirectory,
'foo2:%s/db_dumps/20130101/gp_dump_20130101010101_ao_state_file' % options.masterDataDirectory,
'foo2:%s/db_dumps/20130101/gp_dump_20130101010101_co_state_file' % options.masterDataDirectory,
'foo2:%s/db_dumps/20130101/gp_dump_20130101010101_last_operation' % options.masterDataDirectory,
'foo2:%s/db_dumps/20130101/gp_dump_20130101010101.rpt' % options.masterDataDirectory,
'foo2:%s/db_dumps/20130101/gp_dump_status_1_1_20130101010101' % options.masterDataDirectory]
self.assertEqual(files_file_list, expected_files_list)
@patch('gpcrondump.GpCronDump._get_master_port')
@patch('gpcrondump.validate_current_timestamp')
@patch('gpcrondump.get_latest_full_dump_timestamp', return_value='20130101000000')
def test_get_files_file_list3(self, mock1, mock2, mock3):
options = GpCronDumpTestCase.Options()
options.timestamp_key = '20130101010101'
options.incremental = True
options.masterDataDirectory = '/data/foo'
gpcd = GpCronDump(options, None)
master = Mock()
master.getSegmentHostName.return_value = 'foo1'
timestamp = '20130101010101'
dump_dir = get_backup_directory(options.masterDataDirectory, None, gpcd.dump_dir, timestamp)
files_file_list = gpcd._get_files_file_list(master, dump_dir, timestamp)
expected_files_list = ['foo1:%s/db_dumps/20130101/gp_cdatabase_1_1_20130101010101' % options.masterDataDirectory,
'foo1:%s/db_dumps/20130101/gp_dump_20130101010101_ao_state_file' % options.masterDataDirectory,
'foo1:%s/db_dumps/20130101/gp_dump_20130101010101_co_state_file' % options.masterDataDirectory,
'foo1:%s/db_dumps/20130101/gp_dump_20130101010101_last_operation' % options.masterDataDirectory,
'foo1:%s/db_dumps/20130101/gp_dump_20130101010101.rpt' % options.masterDataDirectory,
'foo1:%s/db_dumps/20130101/gp_dump_status_1_1_20130101010101' % options.masterDataDirectory,
'foo1:%s/db_dumps/20130101/gp_dump_20130101000000_increments' % options.masterDataDirectory]
self.assertEqual(sorted(files_file_list), sorted(expected_files_list))
@patch('gpcrondump.validate_current_timestamp')
@patch('gpcrondump.GpCronDump._get_master_port')
@patch('gppylib.operations.backup_utils.get_latest_full_dump_timestamp', return_value='20130101000000')
def test_get_files_file_list_with_filter(self, mock1, mock2, mock3):
options = GpCronDumpTestCase.Options()
options.timestamp_key = '20130101010101'
options.local_dump_prefix = 'metro'
options.include_dump_tables_file = 'bar'
options.masterDataDirectory = '/data/foo'
gpcd = GpCronDump(options, None)
master = Mock()
master.getSegmentHostName.return_value = 'foo1'
timestamp = '20130101010101'
dump_dir = get_backup_directory(options.masterDataDirectory, options.backup_dir, gpcd.dump_dir, timestamp)
files_file_list = gpcd._get_files_file_list(master, dump_dir, timestamp)
expected_files_list = ['foo1:%s/db_dumps/20130101/metro_gp_cdatabase_1_1_20130101010101' % options.masterDataDirectory,
'foo1:%s/db_dumps/20130101/metro_gp_dump_20130101010101_ao_state_file' % options.masterDataDirectory,
'foo1:%s/db_dumps/20130101/metro_gp_dump_20130101010101_co_state_file' % options.masterDataDirectory,
'foo1:%s/db_dumps/20130101/metro_gp_dump_20130101010101_last_operation' % options.masterDataDirectory,
'foo1:%s/db_dumps/20130101/metro_gp_dump_20130101010101.rpt' % options.masterDataDirectory,
'foo1:%s/db_dumps/20130101/metro_gp_dump_status_1_1_20130101010101' % options.masterDataDirectory,
'foo1:%s/db_dumps/20130101/metro_gp_dump_20130101010101_filter' % options.masterDataDirectory]
self.assertEqual(sorted(files_file_list), sorted(expected_files_list))
@patch('gpcrondump.validate_current_timestamp')
@patch('gpcrondump.get_latest_full_dump_timestamp', return_value='20130101000000')
@patch('gpcrondump.GpCronDump._get_master_port')
def test_get_files_file_list_with_prefix(self, mock1, mock2, mock3):
options = GpCronDumpTestCase.Options()
options.timestamp_key = '20130101010101'
options.incremental = True
options.local_dump_prefix = 'metro'
options.masterDataDirectory = '/data/foo'
gpcd = GpCronDump(options, None)
master = Mock()
master.getSegmentHostName.return_value = 'foo1'
timestamp = '20130101010101'
dump_dir = get_backup_directory(options.masterDataDirectory, None, gpcd.dump_dir, timestamp)
files_file_list = gpcd._get_files_file_list(master, dump_dir, timestamp)
expected_files_list = ['foo1:%s/db_dumps/20130101/metro_gp_cdatabase_1_1_20130101010101' % options.masterDataDirectory,
'foo1:%s/db_dumps/20130101/metro_gp_dump_20130101010101_ao_state_file' % options.masterDataDirectory,
'foo1:%s/db_dumps/20130101/metro_gp_dump_20130101010101_co_state_file' % options.masterDataDirectory,
'foo1:%s/db_dumps/20130101/metro_gp_dump_20130101010101_last_operation' % options.masterDataDirectory,
'foo1:%s/db_dumps/20130101/metro_gp_dump_20130101010101.rpt' % options.masterDataDirectory,
'foo1:%s/db_dumps/20130101/metro_gp_dump_status_1_1_20130101010101' % options.masterDataDirectory,
'foo1:%s/db_dumps/20130101/metro_gp_dump_20130101000000_increments' % options.masterDataDirectory]
self.assertEqual(sorted(files_file_list), sorted(expected_files_list))
@patch('gpcrondump.GpCronDump._get_master_port')
@patch('gpcrondump.validate_current_timestamp')
def test_get_pipes_file_list1(self, mock1, mock2):
options = GpCronDumpTestCase.Options()
options.timestamp_key = None
options.masterDataDirectory = '/foo'
gpcd = GpCronDump(options, None)
master = Mock()
master.getSegmentHostName.return_value = 'foo2'
mock_segs = []
timestamp = '20130101010101'
dump_dir = get_backup_directory(options.masterDataDirectory, options.backup_dir, gpcd.dump_dir, timestamp)
pipes_file_list = gpcd._get_pipes_file_list(master, mock_segs, dump_dir, timestamp)
expected_files_list = ['foo2:%s/db_dumps/20130101/gp_dump_1_1_20130101010101.gz' % options.masterDataDirectory,
'foo2:%s/db_dumps/20130101/gp_dump_1_1_20130101010101_post_data.gz' % options.masterDataDirectory]
self.assertEqual(pipes_file_list, expected_files_list)
@patch('gpcrondump.GpCronDump._get_master_port')
@patch('gpcrondump.validate_current_timestamp')
def test_get_pipes_file_list2(self, mock1, mock2):
options = GpCronDumpTestCase.Options()
options.timestamp_key = None
options.masterDataDirectory = '/foo'
gpcd = GpCronDump(options, None)
master = Mock()
master.getSegmentHostName.return_value = 'foo1'
mock_segs = [Mock(), Mock()]
for id, seg in enumerate(mock_segs):
seg.getSegmentDataDirectory.return_value = '/bar'
seg.getSegmentHostName.return_value = 'foo1'
seg.getSegmentDbId.return_value = id + 1
timestamp = '20130101010101'
dump_dir = get_backup_directory(options.masterDataDirectory, options.backup_dir, gpcd.dump_dir, timestamp)
pipes_file_list = gpcd._get_pipes_file_list(master, mock_segs, dump_dir, timestamp)
expected_files_list = ['foo1:%s/db_dumps/20130101/gp_dump_1_1_20130101010101.gz' % options.masterDataDirectory,
'foo1:%s/db_dumps/20130101/gp_dump_1_1_20130101010101_post_data.gz' % options.masterDataDirectory,
'foo1:/bar/db_dumps/20130101/gp_dump_0_1_20130101010101.gz',
'foo1:/bar/db_dumps/20130101/gp_dump_0_2_20130101010101.gz']
self.assertEqual(sorted(pipes_file_list), sorted(expected_files_list))
@patch('gpcrondump.GpCronDump._get_master_port')
@patch('gpcrondump.validate_current_timestamp')
def test_get_pipes_file_list3(self, mock1, mock2):
options = GpCronDumpTestCase.Options()
options.timestamp_key = None
options.dump_global = True
options.masterDataDirectory = '/foo'
gpcd = GpCronDump(options, None)
master = Mock()
master.getSegmentHostName.return_value = 'foo1'
mock_segs = [Mock(), Mock()]
for id, seg in enumerate(mock_segs):
seg.getSegmentDataDirectory.return_value = '/bar'
seg.getSegmentHostName.return_value = 'foo1'
seg.getSegmentDbId.return_value = id + 1
timestamp = '20130101010101'
dump_dir = get_backup_directory(options.masterDataDirectory, options.backup_dir, gpcd.dump_dir, timestamp)
pipes_file_list = gpcd._get_pipes_file_list(master, mock_segs, dump_dir, timestamp)
expected_files_list = ['foo1:%s/db_dumps/20130101/gp_dump_1_1_20130101010101.gz' % options.masterDataDirectory,
'foo1:%s/db_dumps/20130101/gp_dump_1_1_20130101010101_post_data.gz' % options.masterDataDirectory,
'foo1:%s/db_dumps/20130101/gp_global_1_1_20130101010101' % options.masterDataDirectory,
'foo1:/bar/db_dumps/20130101/gp_dump_0_1_20130101010101.gz',
'foo1:/bar/db_dumps/20130101/gp_dump_0_2_20130101010101.gz']
self.assertEqual(sorted(pipes_file_list), sorted(expected_files_list))
@patch('gpcrondump.GpCronDump._get_master_port')
@patch('gpcrondump.validate_current_timestamp')
def test_get_pipes_file_list4(self, mock1, mock2):
options = GpCronDumpTestCase.Options()
options.timestamp_key = None
options.masterDataDirectory = '/foo'
options.dump_config = True
gpcd = GpCronDump(options, None)
master = Mock()
master.getSegmentHostName.return_value = 'foo1'
mock_segs = [Mock(), Mock()]
for id, seg in enumerate(mock_segs):
seg.getSegmentDataDirectory.return_value = '/bar'
seg.getSegmentHostName.return_value = 'foo1'
seg.getSegmentDbId.return_value = id + 1
timestamp = '20130101010101'
dump_dir = get_backup_directory(options.masterDataDirectory, options.backup_dir, gpcd.dump_dir, timestamp)
pipes_file_list = gpcd._get_pipes_file_list(master, mock_segs, dump_dir, timestamp)
expected_files_list = ['foo1:%s/db_dumps/20130101/gp_dump_1_1_20130101010101.gz' % options.masterDataDirectory,
'foo1:%s/db_dumps/20130101/gp_dump_1_1_20130101010101_post_data.gz' % options.masterDataDirectory,
'foo1:%s/db_dumps/20130101/gp_master_config_files_20130101010101.tar' % options.masterDataDirectory,
'foo1:/bar/db_dumps/20130101/gp_segment_config_files_0_1_20130101010101.tar',
'foo1:/bar/db_dumps/20130101/gp_segment_config_files_0_2_20130101010101.tar',
'foo1:/bar/db_dumps/20130101/gp_dump_0_1_20130101010101.gz',
'foo1:/bar/db_dumps/20130101/gp_dump_0_2_20130101010101.gz']
self.assertEqual(sorted(pipes_file_list), sorted(expected_files_list))
@patch('gpcrondump.GpCronDump._get_master_port')
@patch('gpcrondump.validate_current_timestamp')
def test_gpcrondump_init0(self, mock1, mock2):
options = GpCronDumpTestCase.Options()
options.timestamp_key = None
options.local_dump_prefix = 'foo'
options.ddboost = False
options.ddboost_verify = False
options.ddboost_config_remove = False
options.ddboost_user = False
options.ddboost_host = False
options.max_streams = None
options.list_backup_files = False
gpcd = GpCronDump(options, None)
self.assertEqual(gpcd.dump_prefix, 'foo_')
@patch('gpcrondump.os.path.isfile', return_value=True)
@patch('gpcrondump.GpCronDump._get_master_port')
@patch('gpcrondump.os.path.getsize', return_value=111)
@patch('gpcrondump.yaml.load', return_value={'EMAIL_DETAILS': [{'FROM': 'RRP_MPE2_DCA_1', 'DBNAME': 'testdb100', 'SUBJECT': "backup completed for Database 'testdb100'"}]})
def test_validate_parse_email_File00(self, mock1, mock2, mock3, mock4):
options = GpCronDumpTestCase.Options()
options.include_email_file = "/tmp/abc.yaml"
m = mock.MagicMock()
with patch('__builtin__.open', m, create=True):
cron = GpCronDump(options, None)
@patch('gpcrondump.os.path.isfile', return_value=False)
@patch('gpcrondump.GpCronDump._get_master_port')
def test_validate_parse_email_File01(self, mock1, mock2):
options = GpCronDumpTestCase.Options()
options.include_email_file = "/tmp/abc.yaml"
with self.assertRaisesRegexp(Exception, "\'%s\' file does not exist." % options.include_email_file):
cron = GpCronDump(options, None)
@patch('gpcrondump.os.path.isfile', return_value=True)
@patch('gpcrondump.GpCronDump._get_master_port')
@patch('gpcrondump.os.path.getsize', return_value=111)
def test_validate_parse_email_File02(self, mock1, mock2, mock3):
options = GpCronDumpTestCase.Options()
options.include_email_file = "/tmp/abc"
with self.assertRaisesRegexp(Exception, "'%s' is not '.yaml' file. File containing email details should be '.yaml' file." % options.include_email_file):
cron = GpCronDump(options, None)
@patch('gpcrondump.os.path.isfile', return_value=True)
@patch('gpcrondump.GpCronDump._get_master_port')
@patch('gpcrondump.os.path.getsize', return_value=0)
def test_validate_parse_email_File03(self, mock1, mock2, mock3):
options = GpCronDumpTestCase.Options()
options.include_email_file = "/tmp/abc.yaml"
with self.assertRaisesRegexp(Exception, "'%s' file is empty." % options.include_email_file):
cron = GpCronDump(options, None)
@patch('gpcrondump.os.path.isfile', return_value=True)
@patch('gpcrondump.GpCronDump._get_master_port')
@patch('gpcrondump.os.path.getsize', return_value=111)
@patch('gpcrondump.yaml.load', return_value={'EMAIL_DETAILS': [{'FROM': 'RRP_MPE2_DCA_1', 'NAME': 'testdb100', 'SUBJECT': "backup completed for Database 'testdb100'"}]})
def test_validate_parse_email_File04(self, mock1, mock2, mock3, mock4):
options = GpCronDumpTestCase.Options()
options.include_email_file = "/tmp/abc.yaml"
m = mock.MagicMock()
with self.assertRaisesRegexp(Exception, "\'%s\' file is not formatted properly." % options.include_email_file):
with patch('__builtin__.open', m, create=True):
cron = GpCronDump(options, None)
@patch('gpcrondump.os.path.isfile', return_value=True)
@patch('gpcrondump.GpCronDump._get_master_port')
@patch('gpcrondump.os.path.getsize', return_value=111)
@patch('gpcrondump.yaml.load', return_value={'EMAIL_DETAILS': [{'FROM': 'RRP_MPE2_DCA_1', 'DBNAME': None, 'SUBJECT': "backup completed for Database 'testdb100'"}]})
def test_validate_parse_email_File05(self, mock1, mock2, mock3, mock4):
options = GpCronDumpTestCase.Options()
options.include_email_file = "/tmp/abc.yaml"
m = mock.MagicMock()
with self.assertRaisesRegexp(Exception, "\'%s\' file is not formatted properly." % options.include_email_file):
with patch('__builtin__.open', m, create=True):
cron = GpCronDump(options, None)
@patch('gpcrondump.MailDumpEvent')
@patch('gpcrondump.GpCronDump._get_master_port')
def test_send_email00(self, mock1, MailDumpEvent):
options = GpCronDumpTestCase.Options()
dump_database = 'testdb1'
current_exit_status = 0
time_start = '12:07:09'
time_end = '12:08:18'
cron = GpCronDump(options, None)
cron._send_email(dump_database, current_exit_status, time_start, time_end)
#------------------------------- Mainline --------------------------------
if __name__ == '__main__':
unittest.main()
| 52.264878
| 175
| 0.697203
| 6,899
| 62,352
| 6.016814
| 0.058269
| 0.082751
| 0.062033
| 0.068128
| 0.906697
| 0.889207
| 0.881306
| 0.869164
| 0.855167
| 0.841845
| 0
| 0.041937
| 0.200346
| 62,352
| 1,192
| 176
| 52.308725
| 0.790594
| 0.005052
| 0
| 0.713222
| 0
| 0.004655
| 0.291444
| 0.194425
| 0
| 0
| 0
| 0
| 0.088454
| 1
| 0.09311
| false
| 0.000931
| 0.010242
| 0
| 0.105214
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
db874da91d4a01e76e9bd18e99b073b83ddddd62
| 6,050
|
py
|
Python
|
AutomationFramework/tests/interfaces/test_if_subif.py
|
sbarguil/Testing-framework
|
f3ef69f1c4f0aeafd02e222d846162c711783b15
|
[
"Apache-2.0"
] | 1
|
2020-04-23T15:22:16.000Z
|
2020-04-23T15:22:16.000Z
|
AutomationFramework/tests/interfaces/test_if_subif.py
|
sbarguil/Testing-framework
|
f3ef69f1c4f0aeafd02e222d846162c711783b15
|
[
"Apache-2.0"
] | 44
|
2020-08-13T19:35:41.000Z
|
2021-03-01T09:08:00.000Z
|
AutomationFramework/tests/interfaces/test_if_subif.py
|
sbarguil/Testing-framework
|
f3ef69f1c4f0aeafd02e222d846162c711783b15
|
[
"Apache-2.0"
] | 6
|
2020-04-23T15:29:38.000Z
|
2022-03-03T14:23:38.000Z
|
import pytest
from AutomationFramework.page_objects.interfaces.interfaces import Interfaces
from AutomationFramework.tests.base_test import BaseTest
class TestInterfacesSubInterfaces(BaseTest):
test_case_file = 'if_subif.yml'
@pytest.mark.parametrize('create_page_object_arg', [{'test_case_file': test_case_file,
'test_case_name': 'if_subif_description',
'page_object_class': Interfaces}])
def test_if_subif_description(self, create_page_object):
create_page_object.execute_generic_interfaces_edit_config_test_case()
assert create_page_object.generic_validate_test_case_params(), create_page_object.get_test_case_description()
@pytest.mark.parametrize('create_page_object_arg', [{'test_case_file': test_case_file,
'test_case_name': 'if_subif_enabled',
'page_object_class': Interfaces}])
def test_if_subif_enabled(self, create_page_object):
create_page_object.execute_generic_interfaces_edit_config_test_case()
assert create_page_object.generic_validate_test_case_params(), create_page_object.get_test_case_description()
@pytest.mark.parametrize('create_page_object_arg', [{'test_case_file': test_case_file,
'test_case_name': 'if_subif_ip_prefix_length',
'page_object_class': Interfaces}])
def test_if_subif_ip_prefix_length(self, create_page_object):
create_page_object.execute_generic_interfaces_edit_config_test_case()
assert create_page_object.generic_validate_test_case_params(), create_page_object.get_test_case_description()
@pytest.mark.parametrize('multiple_create_page_objects_arg', [{'test_case_file': test_case_file,
'test_case_name': 'if_subif_ip_state',
'page_object_rpcs_classes': [Interfaces, Interfaces],
'rpc_clean_order': None,
}])
def test_if_subif_ip_state(self, multiple_create_page_objects):
for page_object in multiple_create_page_objects:
page_object.execute_interface_rpc()
assert page_object.validate_rpc(), page_object.get_test_case_description()
@pytest.mark.parametrize('multiple_create_page_objects_arg', [{'test_case_file': test_case_file,
'test_case_name': 'if_subif_origin',
'page_object_rpcs_classes': [Interfaces, Interfaces],
'rpc_clean_order': None,
}])
def test_if_subif_origin(self, multiple_create_page_objects):
for page_object in multiple_create_page_objects:
page_object.execute_interface_rpc()
assert page_object.validate_rpc(), page_object.get_test_case_description()
@pytest.mark.parametrize('create_page_object_arg', [{'test_case_file': test_case_file,
'test_case_name': 'if_subif_dhcp_client',
'page_object_class': Interfaces}])
def test_if_subif_dhcp_client(self, create_page_object):
create_page_object.execute_generic_interfaces_edit_config_test_case()
assert create_page_object.generic_validate_test_case_params(), create_page_object.get_test_case_description()
@pytest.mark.parametrize('create_page_object_arg', [{'test_case_file': test_case_file,
'test_case_name': 'if_subif_mtu',
'page_object_class': Interfaces}])
def test_if_subif_mtu(self, create_page_object):
create_page_object.execute_generic_interfaces_edit_config_test_case()
assert create_page_object.generic_validate_test_case_params(), create_page_object.get_test_case_description()
@pytest.mark.parametrize('create_page_object_arg', [{'test_case_file': test_case_file,
'test_case_name': 'if_subif_vlan_id',
'page_object_class': Interfaces}])
def test_if_subif_vlan_id(self, create_page_object):
create_page_object.execute_generic_interfaces_edit_config_test_case()
assert create_page_object.generic_validate_test_case_params(), create_page_object.get_test_case_description()
@pytest.mark.parametrize('create_page_object_arg', [{'test_case_file': test_case_file,
'test_case_name': 'if_subif_inner_outer_vlan_id',
'page_object_class': Interfaces}])
def test_if_subif_inner_outer_vlan_id(self, create_page_object):
create_page_object.execute_generic_interfaces_edit_config_test_case()
assert create_page_object.generic_validate_test_case_params(), create_page_object.get_test_case_description()
@pytest.mark.parametrize('create_page_object_arg', [{'test_case_file': test_case_file,
'test_case_name': 'if_subif_match_vlan_id',
'page_object_class': Interfaces}])
def test_if_subif_match_vlan_id(self, create_page_object):
create_page_object.execute_generic_interfaces_edit_config_test_case()
assert create_page_object.generic_validate_test_case_params(), create_page_object.get_test_case_description()
| 72.02381
| 120
| 0.615372
| 638
| 6,050
| 5.219436
| 0.092476
| 0.174174
| 0.192192
| 0.096096
| 0.916517
| 0.905105
| 0.899099
| 0.899099
| 0.840541
| 0.840541
| 0
| 0
| 0.315207
| 6,050
| 83
| 121
| 72.891566
| 0.803765
| 0
| 0
| 0.647887
| 0
| 0
| 0.154876
| 0.06
| 0
| 0
| 0
| 0
| 0.140845
| 1
| 0.140845
| false
| 0
| 0.042254
| 0
| 0.211268
| 0
| 0
| 0
| 0
| null | 0
| 1
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
915d6d3e43279c39fd9d72fc48c527f4f811ec46
| 180
|
py
|
Python
|
rta/provision/__init__.py
|
XiaoguTech/rta-sandbox
|
2783a3ba8920bf64273761ce7392e51c9c8fb1f7
|
[
"MIT"
] | null | null | null |
rta/provision/__init__.py
|
XiaoguTech/rta-sandbox
|
2783a3ba8920bf64273761ce7392e51c9c8fb1f7
|
[
"MIT"
] | null | null | null |
rta/provision/__init__.py
|
XiaoguTech/rta-sandbox
|
2783a3ba8920bf64273761ce7392e51c9c8fb1f7
|
[
"MIT"
] | null | null | null |
from rta.provision.utils import *
from rta.provision.passwd import *
from rta.provision.influxdb import *
from rta.provision.grafana import *
from rta.provision.kapacitor import *
| 30
| 37
| 0.805556
| 25
| 180
| 5.8
| 0.36
| 0.241379
| 0.551724
| 0.606897
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.111111
| 180
| 5
| 38
| 36
| 0.90625
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.2
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
|
0
| 9
|
91820c594379b0529582b42b9cc165d4cd520738
| 33,871
|
py
|
Python
|
tests/compute/test_sampler.py
|
buaaqt/dgl
|
64f6f3c1a8c2c3e08ec0750b902f3e2c63fd2cd7
|
[
"Apache-2.0"
] | 1
|
2020-07-21T03:03:15.000Z
|
2020-07-21T03:03:15.000Z
|
tests/compute/test_sampler.py
|
buaaqt/dgl
|
64f6f3c1a8c2c3e08ec0750b902f3e2c63fd2cd7
|
[
"Apache-2.0"
] | null | null | null |
tests/compute/test_sampler.py
|
buaaqt/dgl
|
64f6f3c1a8c2c3e08ec0750b902f3e2c63fd2cd7
|
[
"Apache-2.0"
] | null | null | null |
import backend as F
import numpy as np
import scipy as sp
import dgl
from dgl import utils
import unittest
from numpy.testing import assert_array_equal
np.random.seed(42)
def generate_rand_graph(n):
arr = (sp.sparse.random(n, n, density=0.1, format='coo') != 0).astype(np.int64)
return dgl.DGLGraph(arr, readonly=True)
def test_create_full():
g = generate_rand_graph(100)
full_nf = dgl.contrib.sampling.sampler.create_full_nodeflow(g, 5)
assert full_nf.number_of_nodes() == g.number_of_nodes() * 6
assert full_nf.number_of_edges() == g.number_of_edges() * 5
def test_1neighbor_sampler_all():
g = generate_rand_graph(100)
# In this case, NeighborSampling simply gets the neighborhood of a single vertex.
for i, subg in enumerate(dgl.contrib.sampling.NeighborSampler(
g, 1, g.number_of_nodes(), neighbor_type='in', num_workers=4)):
seed_ids = subg.layer_parent_nid(-1)
assert len(seed_ids) == 1
src, dst, eid = g.in_edges(seed_ids, form='all')
assert subg.number_of_nodes() == len(src) + 1
assert subg.number_of_edges() == len(src)
assert seed_ids == subg.layer_parent_nid(-1)
child_src, child_dst, child_eid = subg.in_edges(subg.layer_nid(-1), form='all')
assert F.array_equal(child_src, subg.layer_nid(0))
src1 = subg.map_to_parent_nid(child_src)
assert F.array_equal(src1, src)
def is_sorted(arr):
return np.sum(np.sort(arr) == arr, 0) == len(arr)
def verify_subgraph(g, subg, seed_id):
seed_id = F.asnumpy(seed_id)
seeds = F.asnumpy(subg.map_to_parent_nid(subg.layer_nid(-1)))
assert seed_id in seeds
child_seed = F.asnumpy(subg.layer_nid(-1))[seeds == seed_id]
src, dst, eid = g.in_edges(seed_id, form='all')
child_src, child_dst, child_eid = subg.in_edges(child_seed, form='all')
child_src = F.asnumpy(child_src)
# We don't allow duplicate elements in the neighbor list.
assert(len(np.unique(child_src)) == len(child_src))
# The neighbor list also needs to be sorted.
assert(is_sorted(child_src))
# a neighbor in the subgraph must also exist in parent graph.
src = F.asnumpy(src)
for i in subg.map_to_parent_nid(child_src):
assert F.asnumpy(i) in src
def test_1neighbor_sampler():
g = generate_rand_graph(100)
# In this case, NeighborSampling simply gets the neighborhood of a single vertex.
for subg in dgl.contrib.sampling.NeighborSampler(g, 1, 5, neighbor_type='in',
num_workers=4):
seed_ids = subg.layer_parent_nid(-1)
assert len(seed_ids) == 1
assert subg.number_of_nodes() <= 6
assert subg.number_of_edges() <= 5
verify_subgraph(g, subg, seed_ids)
def test_prefetch_neighbor_sampler():
g = generate_rand_graph(100)
# In this case, NeighborSampling simply gets the neighborhood of a single vertex.
for subg in dgl.contrib.sampling.NeighborSampler(g, 1, 5, neighbor_type='in',
num_workers=4, prefetch=True):
seed_ids = subg.layer_parent_nid(-1)
assert len(seed_ids) == 1
assert subg.number_of_nodes() <= 6
assert subg.number_of_edges() <= 5
verify_subgraph(g, subg, seed_ids)
def test_10neighbor_sampler_all():
g = generate_rand_graph(100)
# In this case, NeighborSampling simply gets the neighborhood of a single vertex.
for subg in dgl.contrib.sampling.NeighborSampler(g, 10, g.number_of_nodes(),
neighbor_type='in', num_workers=4):
seed_ids = subg.layer_parent_nid(-1)
assert F.array_equal(seed_ids, subg.map_to_parent_nid(subg.layer_nid(-1)))
src, dst, eid = g.in_edges(seed_ids, form='all')
child_src, child_dst, child_eid = subg.in_edges(subg.layer_nid(-1), form='all')
src1 = subg.map_to_parent_nid(child_src)
assert F.array_equal(src1, src)
def check_10neighbor_sampler(g, seeds):
# In this case, NeighborSampling simply gets the neighborhood of a single vertex.
for subg in dgl.contrib.sampling.NeighborSampler(g, 10, 5, neighbor_type='in',
num_workers=4, seed_nodes=seeds):
seed_ids = subg.layer_parent_nid(-1)
assert subg.number_of_nodes() <= 6 * len(seed_ids)
assert subg.number_of_edges() <= 5 * len(seed_ids)
for seed_id in seed_ids:
verify_subgraph(g, subg, seed_id)
def test_10neighbor_sampler():
g = generate_rand_graph(100)
check_10neighbor_sampler(g, None)
check_10neighbor_sampler(g, seeds=np.unique(np.random.randint(0, g.number_of_nodes(),
size=int(g.number_of_nodes() / 10))))
def _test_layer_sampler(prefetch=False):
g = generate_rand_graph(100)
nid = g.nodes()
src, dst, eid = g.all_edges(form='all', order='eid')
n_batches = 5
batch_size = 50
seed_batches = [np.sort(np.random.choice(F.asnumpy(nid), batch_size, replace=False))
for i in range(n_batches)]
seed_nodes = np.hstack(seed_batches)
layer_sizes = [50] * 3
LayerSampler = getattr(dgl.contrib.sampling, 'LayerSampler')
sampler = LayerSampler(g, batch_size, layer_sizes, 'in',
seed_nodes=seed_nodes, num_workers=4, prefetch=prefetch)
for sub_g in sampler:
assert all(sub_g.layer_size(i) < size for i, size in enumerate(layer_sizes))
sub_nid = F.arange(0, sub_g.number_of_nodes())
assert all(np.all(np.isin(F.asnumpy(sub_g.layer_nid(i)), F.asnumpy(sub_nid)))
for i in range(sub_g.num_layers))
assert np.all(np.isin(F.asnumpy(sub_g.map_to_parent_nid(sub_nid)),
F.asnumpy(nid)))
sub_eid = F.arange(0, sub_g.number_of_edges())
assert np.all(np.isin(F.asnumpy(sub_g.map_to_parent_eid(sub_eid)),
F.asnumpy(eid)))
assert any(np.all(np.sort(F.asnumpy(sub_g.layer_parent_nid(-1))) == seed_batch)
for seed_batch in seed_batches)
sub_src, sub_dst = sub_g.all_edges(order='eid')
for i in range(sub_g.num_blocks):
block_eid = sub_g.block_eid(i)
block_src = sub_g.map_to_parent_nid(F.gather_row(sub_src, block_eid))
block_dst = sub_g.map_to_parent_nid(F.gather_row(sub_dst, block_eid))
block_parent_eid = sub_g.block_parent_eid(i)
block_parent_src = F.gather_row(src, block_parent_eid)
block_parent_dst = F.gather_row(dst, block_parent_eid)
assert np.all(F.asnumpy(block_src == block_parent_src))
n_layers = sub_g.num_layers
sub_n = sub_g.number_of_nodes()
assert sum(F.shape(sub_g.layer_nid(i))[0] for i in range(n_layers)) == sub_n
n_blocks = sub_g.num_blocks
sub_m = sub_g.number_of_edges()
assert sum(F.shape(sub_g.block_eid(i))[0] for i in range(n_blocks)) == sub_m
def test_layer_sampler():
_test_layer_sampler()
_test_layer_sampler(prefetch=True)
@unittest.skipIf(dgl.backend.backend_name == "tensorflow", reason="Error occured when multiprocessing")
def test_nonuniform_neighbor_sampler():
# Construct a graph with
# (1) A path (0, 1, ..., 99) with weight 1
# (2) A bunch of random edges with weight 0.
edges = []
for i in range(99):
edges.append((i, i + 1))
for i in range(1000):
edge = (np.random.randint(100), np.random.randint(100))
if edge not in edges:
edges.append(edge)
src, dst = zip(*edges)
g = dgl.DGLGraph()
g.add_nodes(100)
g.add_edges(src, dst)
g.readonly()
g.edata['w'] = F.cat([
F.ones((99,), F.float64, F.cpu()),
F.zeros((len(edges) - 99,), F.float64, F.cpu())], 0)
# Test 1-neighbor NodeFlow with 99 as target node.
# The generated NodeFlow should only contain node i on layer i.
sampler = dgl.contrib.sampling.NeighborSampler(
g, 1, 1, 99, 'in', transition_prob='w', seed_nodes=[99])
nf = next(iter(sampler))
assert nf.num_layers == 100
for i in range(nf.num_layers):
assert nf.layer_size(i) == 1
assert F.asnumpy(nf.layer_parent_nid(i)[0]) == i
# Test the reverse direction
sampler = dgl.contrib.sampling.NeighborSampler(
g, 1, 1, 99, 'out', transition_prob='w', seed_nodes=[0])
nf = next(iter(sampler))
assert nf.num_layers == 100
for i in range(nf.num_layers):
assert nf.layer_size(i) == 1
assert F.asnumpy(nf.layer_parent_nid(i)[0]) == 99 - i
def test_setseed():
g = generate_rand_graph(100)
nids = []
dgl.random.seed(42)
for subg in dgl.contrib.sampling.NeighborSampler(
g, 5, 3, num_hops=2, neighbor_type='in', num_workers=1):
nids.append(
tuple(tuple(F.asnumpy(subg.layer_parent_nid(i))) for i in range(3)))
# reinitialize
dgl.random.seed(42)
for i, subg in enumerate(dgl.contrib.sampling.NeighborSampler(
g, 5, 3, num_hops=2, neighbor_type='in', num_workers=1)):
item = tuple(tuple(F.asnumpy(subg.layer_parent_nid(i))) for i in range(3))
assert item == nids[i]
for i, subg in enumerate(dgl.contrib.sampling.NeighborSampler(
g, 5, 3, num_hops=2, neighbor_type='in', num_workers=4)):
pass
def check_head_tail(g):
lsrc, ldst, leid = g.all_edges(form='all', order='eid')
lsrc = np.unique(F.asnumpy(lsrc))
head_nid = np.unique(F.asnumpy(g.head_nid))
assert len(head_nid) == len(g.head_nid)
np.testing.assert_equal(lsrc, head_nid)
ldst = np.unique(F.asnumpy(ldst))
tail_nid = np.unique(F.asnumpy(g.tail_nid))
assert len(tail_nid) == len(g.tail_nid)
np.testing.assert_equal(tail_nid, ldst)
def check_negative_sampler(mode, exclude_positive, neg_size):
g = generate_rand_graph(100)
num_edges = g.number_of_edges()
etype = np.random.randint(0, 10, size=g.number_of_edges(), dtype=np.int64)
g.edata['etype'] = F.copy_to(F.tensor(etype), F.cpu())
pos_gsrc, pos_gdst, pos_geid = g.all_edges(form='all', order='eid')
pos_map = {}
for i in range(len(pos_geid)):
pos_d = int(F.asnumpy(pos_gdst[i]))
pos_e = int(F.asnumpy(pos_geid[i]))
pos_map[(pos_d, pos_e)] = int(F.asnumpy(pos_gsrc[i]))
EdgeSampler = getattr(dgl.contrib.sampling, 'EdgeSampler')
# Test the homogeneous graph.
batch_size = 50
total_samples = 0
for pos_edges, neg_edges in EdgeSampler(g, batch_size,
negative_mode=mode,
reset=False,
neg_sample_size=neg_size,
exclude_positive=exclude_positive,
return_false_neg=True):
pos_lsrc, pos_ldst, pos_leid = pos_edges.all_edges(form='all', order='eid')
assert_array_equal(F.asnumpy(F.gather_row(pos_edges.parent_eid, pos_leid)),
F.asnumpy(g.edge_ids(F.gather_row(pos_edges.parent_nid, pos_lsrc),
F.gather_row(pos_edges.parent_nid, pos_ldst))))
neg_lsrc, neg_ldst, neg_leid = neg_edges.all_edges(form='all', order='eid')
neg_src = F.gather_row(neg_edges.parent_nid, neg_lsrc)
neg_dst = F.gather_row(neg_edges.parent_nid, neg_ldst)
neg_eid = F.gather_row(neg_edges.parent_eid, neg_leid)
for i in range(len(neg_eid)):
neg_d = int(F.asnumpy(neg_dst)[i])
neg_e = int(F.asnumpy(neg_eid)[i])
assert (neg_d, neg_e) in pos_map
if exclude_positive:
assert int(F.asnumpy(neg_src[i])) != pos_map[(neg_d, neg_e)]
check_head_tail(neg_edges)
pos_tails = F.gather_row(pos_edges.parent_nid, pos_edges.tail_nid)
neg_tails = F.gather_row(neg_edges.parent_nid, neg_edges.tail_nid)
pos_tails = np.sort(F.asnumpy(pos_tails))
neg_tails = np.sort(F.asnumpy(neg_tails))
np.testing.assert_equal(pos_tails, neg_tails)
exist = neg_edges.edata['false_neg']
if exclude_positive:
assert np.sum(F.asnumpy(exist) == 0) == len(exist)
else:
assert F.array_equal(g.has_edges_between(neg_src, neg_dst), exist)
total_samples += batch_size
assert total_samples <= num_edges
# check replacement = True
# with reset = False (default setting)
total_samples = 0
for pos_edges, neg_edges in EdgeSampler(g, batch_size,
replacement=True,
reset=False,
negative_mode=mode,
neg_sample_size=neg_size,
exclude_positive=exclude_positive,
return_false_neg=True):
_, _, pos_leid = pos_edges.all_edges(form='all', order='eid')
assert len(pos_leid) == batch_size
total_samples += len(pos_leid)
assert total_samples == num_edges
# check replacement = False
# with reset = False (default setting)
total_samples = 0
for pos_edges, neg_edges in EdgeSampler(g, batch_size,
replacement=False,
reset=False,
negative_mode=mode,
neg_sample_size=neg_size,
exclude_positive=exclude_positive,
return_false_neg=True):
_, _, pos_leid = pos_edges.all_edges(form='all', order='eid')
assert len(pos_leid) == batch_size
total_samples += len(pos_leid)
assert total_samples == num_edges
# check replacement = True
# with reset = True
total_samples = 0
max_samples = 2 * num_edges
for pos_edges, neg_edges in EdgeSampler(g, batch_size,
replacement=True,
reset=True,
negative_mode=mode,
neg_sample_size=neg_size,
exclude_positive=exclude_positive,
return_false_neg=True):
_, _, pos_leid = pos_edges.all_edges(form='all', order='eid')
assert len(pos_leid) <= batch_size
total_samples += len(pos_leid)
if (total_samples >= max_samples):
break
assert total_samples >= max_samples
# check replacement = False
# with reset = True
total_samples = 0
max_samples = 2 * num_edges
for pos_edges, neg_edges in EdgeSampler(g, batch_size,
replacement=False,
reset=True,
negative_mode=mode,
neg_sample_size=neg_size,
exclude_positive=exclude_positive,
return_false_neg=True):
_, _, pos_leid = pos_edges.all_edges(form='all', order='eid')
assert len(pos_leid) <= batch_size
total_samples += len(pos_leid)
if (total_samples >= max_samples):
break
assert total_samples >= max_samples
# Test the knowledge graph.
total_samples = 0
for _, neg_edges in EdgeSampler(g, batch_size,
negative_mode=mode,
reset=False,
neg_sample_size=neg_size,
exclude_positive=exclude_positive,
relations=g.edata['etype'],
return_false_neg=True):
neg_lsrc, neg_ldst, neg_leid = neg_edges.all_edges(form='all', order='eid')
neg_src = F.gather_row(neg_edges.parent_nid, neg_lsrc)
neg_dst = F.gather_row(neg_edges.parent_nid, neg_ldst)
neg_eid = F.gather_row(neg_edges.parent_eid, neg_leid)
exists = neg_edges.edata['false_neg']
neg_edges.edata['etype'] = F.gather_row(g.edata['etype'], neg_eid)
for i in range(len(neg_eid)):
u, v = F.asnumpy(neg_src[i]), F.asnumpy(neg_dst[i])
if g.has_edge_between(u, v):
eid = g.edge_id(u, v)
etype = g.edata['etype'][eid]
exist = neg_edges.edata['etype'][i] == etype
assert F.asnumpy(exists[i]) == F.asnumpy(exist)
total_samples += batch_size
assert total_samples <= num_edges
def check_weighted_negative_sampler(mode, exclude_positive, neg_size):
g = generate_rand_graph(100)
num_edges = g.number_of_edges()
num_nodes = g.number_of_nodes()
edge_weight = F.copy_to(F.tensor(np.full((num_edges,), 1, dtype=np.float32)), F.cpu())
node_weight = F.copy_to(F.tensor(np.full((num_nodes,), 1, dtype=np.float32)), F.cpu())
etype = np.random.randint(0, 10, size=num_edges, dtype=np.int64)
g.edata['etype'] = F.copy_to(F.tensor(etype), F.cpu())
pos_gsrc, pos_gdst, pos_geid = g.all_edges(form='all', order='eid')
pos_map = {}
for i in range(len(pos_geid)):
pos_d = int(F.asnumpy(pos_gdst[i]))
pos_e = int(F.asnumpy(pos_geid[i]))
pos_map[(pos_d, pos_e)] = int(F.asnumpy(pos_gsrc[i]))
EdgeSampler = getattr(dgl.contrib.sampling, 'EdgeSampler')
# Correctness check
# Test the homogeneous graph.
batch_size = 50
# Test the knowledge graph with edge weight provied.
total_samples = 0
for pos_edges, neg_edges in EdgeSampler(g, batch_size,
reset=False,
edge_weight=edge_weight,
negative_mode=mode,
neg_sample_size=neg_size,
exclude_positive=exclude_positive,
return_false_neg=True):
pos_lsrc, pos_ldst, pos_leid = pos_edges.all_edges(form='all', order='eid')
assert_array_equal(F.asnumpy(F.gather_row(pos_edges.parent_eid, pos_leid)),
F.asnumpy(g.edge_ids(F.gather_row(pos_edges.parent_nid, pos_lsrc),
F.gather_row(pos_edges.parent_nid, pos_ldst))))
neg_lsrc, neg_ldst, neg_leid = neg_edges.all_edges(form='all', order='eid')
neg_src = F.gather_row(neg_edges.parent_nid, neg_lsrc)
neg_dst = F.gather_row(neg_edges.parent_nid, neg_ldst)
neg_eid = F.gather_row(neg_edges.parent_eid, neg_leid)
for i in range(len(neg_eid)):
neg_d = int(F.asnumpy(neg_dst[i]))
neg_e = int(F.asnumpy(neg_eid[i]))
assert (neg_d, neg_e) in pos_map
if exclude_positive:
assert int(F.asnumpy(neg_src[i])) != pos_map[(neg_d, neg_e)]
check_head_tail(neg_edges)
pos_tails = F.gather_row(pos_edges.parent_nid, pos_edges.tail_nid)
neg_tails = F.gather_row(neg_edges.parent_nid, neg_edges.tail_nid)
pos_tails = np.sort(F.asnumpy(pos_tails))
neg_tails = np.sort(F.asnumpy(neg_tails))
np.testing.assert_equal(pos_tails, neg_tails)
exist = neg_edges.edata['false_neg']
if exclude_positive:
assert np.sum(F.asnumpy(exist) == 0) == len(exist)
else:
assert F.array_equal(g.has_edges_between(neg_src, neg_dst), exist)
total_samples += batch_size
assert total_samples <= num_edges
# Test the knowledge graph with edge weight provied.
total_samples = 0
for pos_edges, neg_edges in EdgeSampler(g, batch_size,
reset=False,
edge_weight=edge_weight,
negative_mode=mode,
neg_sample_size=neg_size,
exclude_positive=exclude_positive,
relations=g.edata['etype'],
return_false_neg=True):
neg_lsrc, neg_ldst, neg_leid = neg_edges.all_edges(form='all', order='eid')
neg_src = F.gather_row(neg_edges.parent_nid, neg_lsrc)
neg_dst = F.gather_row(neg_edges.parent_nid, neg_ldst)
neg_eid = F.gather_row(neg_edges.parent_eid, neg_leid)
exists = neg_edges.edata['false_neg']
neg_edges.edata['etype'] = F.gather_row(g.edata['etype'], neg_eid)
for i in range(len(neg_eid)):
u, v = F.asnumpy(neg_src[i]), F.asnumpy(neg_dst[i])
if g.has_edge_between(u, v):
eid = g.edge_id(u, v)
etype = g.edata['etype'][eid]
exist = neg_edges.edata['etype'][i] == etype
assert F.asnumpy(exists[i]) == F.asnumpy(exist)
total_samples += batch_size
assert total_samples <= num_edges
# Test the knowledge graph with edge/node weight provied.
total_samples = 0
for pos_edges, neg_edges in EdgeSampler(g, batch_size,
reset=False,
edge_weight=edge_weight,
node_weight=node_weight,
negative_mode=mode,
neg_sample_size=neg_size,
exclude_positive=exclude_positive,
relations=g.edata['etype'],
return_false_neg=True):
neg_lsrc, neg_ldst, neg_leid = neg_edges.all_edges(form='all', order='eid')
neg_src = F.gather_row(neg_edges.parent_nid, neg_lsrc)
neg_dst = F.gather_row(neg_edges.parent_nid, neg_ldst)
neg_eid = F.gather_row(neg_edges.parent_eid, neg_leid)
exists = neg_edges.edata['false_neg']
neg_edges.edata['etype'] = F.gather_row(g.edata['etype'], neg_eid)
for i in range(len(neg_eid)):
u, v = F.asnumpy(neg_src[i]), F.asnumpy(neg_dst[i])
if g.has_edge_between(u, v):
eid = g.edge_id(u, v)
etype = g.edata['etype'][eid]
exist = neg_edges.edata['etype'][i] == etype
assert F.asnumpy(exists[i]) == F.asnumpy(exist)
total_samples += batch_size
assert total_samples <= num_edges
# check replacement = True with pos edges no-uniform sample
# with reset = False
total_samples = 0
for pos_edges, neg_edges in EdgeSampler(g, batch_size,
replacement=True,
reset=False,
edge_weight=edge_weight,
negative_mode=mode,
neg_sample_size=neg_size,
exclude_positive=exclude_positive,
return_false_neg=True):
_, _, pos_leid = pos_edges.all_edges(form='all', order='eid')
assert len(pos_leid) == batch_size
total_samples += len(pos_leid)
assert total_samples == num_edges
# check replacement = True with pos edges no-uniform sample
# with reset = True
total_samples = 0
max_samples = 4 * num_edges
for pos_edges, neg_edges in EdgeSampler(g, batch_size,
replacement=True,
reset=True,
edge_weight=edge_weight,
negative_mode=mode,
neg_sample_size=neg_size,
exclude_positive=exclude_positive,
return_false_neg=True):
_, _, pos_leid = pos_edges.all_edges(form='all', order='eid')
assert len(pos_leid) == batch_size
total_samples += len(pos_leid)
if total_samples >= max_samples:
break
assert total_samples == max_samples
# check replacement = False with pos/neg edges no-uniform sample
# reset = False
total_samples = 0
for pos_edges, neg_edges in EdgeSampler(g, batch_size,
replacement=False,
reset=False,
edge_weight=edge_weight,
node_weight=node_weight,
negative_mode=mode,
neg_sample_size=neg_size,
exclude_positive=exclude_positive,
relations=g.edata['etype'],
return_false_neg=True):
_, _, pos_leid = pos_edges.all_edges(form='all', order='eid')
assert len(pos_leid) == batch_size
total_samples += len(pos_leid)
assert total_samples == num_edges
# check replacement = False with pos/neg edges no-uniform sample
# reset = True
total_samples = 0
for pos_edges, neg_edges in EdgeSampler(g, batch_size,
replacement=False,
reset=True,
edge_weight=edge_weight,
node_weight=node_weight,
negative_mode=mode,
neg_sample_size=neg_size,
exclude_positive=exclude_positive,
relations=g.edata['etype'],
return_false_neg=True):
_, _, pos_leid = pos_edges.all_edges(form='all', order='eid')
assert len(pos_leid) == batch_size
total_samples += len(pos_leid)
if total_samples >= max_samples:
break
assert total_samples == max_samples
# Check Rate
dgl.random.seed(0)
g = generate_rand_graph(1000)
num_edges = g.number_of_edges()
num_nodes = g.number_of_nodes()
edge_weight = F.copy_to(F.tensor(np.full((num_edges,), 1, dtype=np.float32)), F.cpu())
edge_weight[0] = F.sum(edge_weight, dim=0)
node_weight = F.copy_to(F.tensor(np.full((num_nodes,), 1, dtype=np.float32)), F.cpu())
node_weight[-1] = F.sum(node_weight, dim=0) / 200
etype = np.random.randint(0, 20, size=num_edges, dtype=np.int64)
g.edata['etype'] = F.copy_to(F.tensor(etype), F.cpu())
# Test w/o node weight.
max_samples = num_edges // 5
total_samples = 0
# Test the knowledge graph with edge weight provied.
edge_sampled = np.full((num_edges,), 0, dtype=np.int32)
node_sampled = np.full((num_nodes,), 0, dtype=np.int32)
for pos_edges, neg_edges in EdgeSampler(g, batch_size,
replacement=True,
edge_weight=edge_weight,
shuffle=True,
negative_mode=mode,
neg_sample_size=neg_size,
exclude_positive=False,
relations=g.edata['etype'],
return_false_neg=True):
_, _, pos_leid = pos_edges.all_edges(form='all', order='eid')
neg_lsrc, neg_ldst, _ = neg_edges.all_edges(form='all', order='eid')
if 'head' in mode:
neg_src = neg_edges.parent_nid[neg_lsrc]
np.add.at(node_sampled, F.asnumpy(neg_src), 1)
else:
neg_dst = neg_edges.parent_nid[neg_ldst]
np.add.at(node_sampled, F.asnumpy(neg_dst), 1)
np.add.at(edge_sampled, F.asnumpy(pos_edges.parent_eid[pos_leid]), 1)
total_samples += batch_size
if total_samples > max_samples:
break
# Check rate here
edge_rate_0 = edge_sampled[0] / edge_sampled.sum()
edge_tail_half_cnt = edge_sampled[edge_sampled.shape[0] // 2:-1].sum()
edge_rate_tail_half = edge_tail_half_cnt / edge_sampled.sum()
assert np.allclose(edge_rate_0, 0.5, atol=0.05)
assert np.allclose(edge_rate_tail_half, 0.25, atol=0.05)
node_rate_0 = node_sampled[0] / node_sampled.sum()
node_tail_half_cnt = node_sampled[node_sampled.shape[0] // 2:-1].sum()
node_rate_tail_half = node_tail_half_cnt / node_sampled.sum()
assert node_rate_0 < 0.02
assert np.allclose(node_rate_tail_half, 0.5, atol=0.02)
# Test the knowledge graph with edge/node weight provied.
edge_sampled = np.full((num_edges,), 0, dtype=np.int32)
node_sampled = np.full((num_nodes,), 0, dtype=np.int32)
total_samples = 0
for pos_edges, neg_edges in EdgeSampler(g, batch_size,
replacement=True,
edge_weight=edge_weight,
node_weight=node_weight,
shuffle=True,
negative_mode=mode,
neg_sample_size=neg_size,
exclude_positive=False,
relations=g.edata['etype'],
return_false_neg=True):
_, _, pos_leid = pos_edges.all_edges(form='all', order='eid')
neg_lsrc, neg_ldst, _ = neg_edges.all_edges(form='all', order='eid')
if 'head' in mode:
neg_src = F.gather_row(neg_edges.parent_nid, neg_lsrc)
np.add.at(node_sampled, F.asnumpy(neg_src), 1)
else:
neg_dst = F.gather_row(neg_edges.parent_nid, neg_ldst)
np.add.at(node_sampled, F.asnumpy(neg_dst), 1)
np.add.at(edge_sampled, F.asnumpy(pos_edges.parent_eid[pos_leid]), 1)
total_samples += batch_size
if total_samples > max_samples:
break
# Check rate here
edge_rate_0 = edge_sampled[0] / edge_sampled.sum()
edge_tail_half_cnt = edge_sampled[edge_sampled.shape[0] // 2:-1].sum()
edge_rate_tail_half = edge_tail_half_cnt / edge_sampled.sum()
assert np.allclose(edge_rate_0, 0.5, atol=0.05)
assert np.allclose(edge_rate_tail_half, 0.25, atol=0.05)
node_rate = node_sampled[-1] / node_sampled.sum()
node_rate_a = np.average(node_sampled[:50]) / node_sampled.sum()
node_rate_b = np.average(node_sampled[50:100]) / node_sampled.sum()
# As neg sampling does not contain duplicate nodes,
# this test takes some acceptable variation on the sample rate.
assert np.allclose(node_rate, node_rate_a * 5, atol=0.002)
assert np.allclose(node_rate_a, node_rate_b, atol=0.0002)
def check_positive_edge_sampler():
g = generate_rand_graph(1000)
num_edges = g.number_of_edges()
edge_weight = F.copy_to(F.tensor(np.full((num_edges,), 1, dtype=np.float32)), F.cpu())
edge_weight[num_edges-1] = num_edges ** 3
EdgeSampler = getattr(dgl.contrib.sampling, 'EdgeSampler')
# Correctness check
# Test the homogeneous graph.
batch_size = 128
edge_sampled = np.full((num_edges,), 0, dtype=np.int32)
for pos_edges in EdgeSampler(g, batch_size,
reset=False,
edge_weight=edge_weight):
_, _, pos_leid = pos_edges.all_edges(form='all', order='eid')
np.add.at(edge_sampled, F.asnumpy(pos_edges.parent_eid[pos_leid]), 1)
truth = np.full((num_edges,), 1, dtype=np.int32)
edge_sampled = edge_sampled[:num_edges]
assert np.array_equal(truth, edge_sampled)
edge_sampled = np.full((num_edges,), 0, dtype=np.int32)
for pos_edges in EdgeSampler(g, batch_size,
reset=False,
shuffle=True,
edge_weight=edge_weight):
_, _, pos_leid = pos_edges.all_edges(form='all', order='eid')
np.add.at(edge_sampled, F.asnumpy(pos_edges.parent_eid[pos_leid]), 1)
truth = np.full((num_edges,), 1, dtype=np.int32)
edge_sampled = edge_sampled[:num_edges]
assert np.array_equal(truth, edge_sampled)
@unittest.skipIf(dgl.backend.backend_name == "tensorflow", reason="TF doesn't support item assignment")
def test_negative_sampler():
check_negative_sampler('chunk-head', False, 10)
check_negative_sampler('head', True, 10)
check_negative_sampler('head', False, 10)
check_weighted_negative_sampler('chunk-head', False, 10)
check_weighted_negative_sampler('head', True, 10)
check_weighted_negative_sampler('head', False, 10)
check_positive_edge_sampler()
#disable this check for now. It might take too long time.
#check_negative_sampler('head', False, 100)
if __name__ == '__main__':
test_create_full()
test_1neighbor_sampler_all()
test_10neighbor_sampler_all()
test_1neighbor_sampler()
test_10neighbor_sampler()
test_layer_sampler()
test_nonuniform_neighbor_sampler()
test_setseed()
test_negative_sampler()
| 46.783149
| 103
| 0.575507
| 4,447
| 33,871
| 4.093996
| 0.063414
| 0.028562
| 0.018675
| 0.020598
| 0.818412
| 0.778919
| 0.754092
| 0.730913
| 0.716522
| 0.703614
| 0
| 0.016596
| 0.322193
| 33,871
| 723
| 104
| 46.847856
| 0.776418
| 0.059638
| 0
| 0.686242
| 1
| 0
| 0.016981
| 0
| 0
| 0
| 0
| 0
| 0.147651
| 1
| 0.031879
| false
| 0.001678
| 0.011745
| 0.001678
| 0.04698
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
9186884237c62f08e8e5c91cdb86f2cf165aa0f6
| 173
|
py
|
Python
|
examples/simple_lakehouse/simple_lakehouse/repo.py
|
dbatten5/dagster
|
d76e50295054ffe5a72f9b292ef57febae499528
|
[
"Apache-2.0"
] | 2
|
2021-06-21T17:50:26.000Z
|
2021-06-21T19:14:23.000Z
|
examples/simple_lakehouse/simple_lakehouse/repo.py
|
dbatten5/dagster
|
d76e50295054ffe5a72f9b292ef57febae499528
|
[
"Apache-2.0"
] | 1
|
2021-06-21T18:30:02.000Z
|
2021-06-25T21:18:39.000Z
|
examples/simple_lakehouse/simple_lakehouse/repo.py
|
dbatten5/dagster
|
d76e50295054ffe5a72f9b292ef57febae499528
|
[
"Apache-2.0"
] | 1
|
2021-08-18T17:21:57.000Z
|
2021-08-18T17:21:57.000Z
|
from dagster import repository
from simple_lakehouse.pipelines import simple_lakehouse_pipeline
@repository
def simple_lakehouse():
return [simple_lakehouse_pipeline]
| 21.625
| 64
| 0.849711
| 20
| 173
| 7.05
| 0.5
| 0.425532
| 0.326241
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.109827
| 173
| 7
| 65
| 24.714286
| 0.915584
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.2
| true
| 0
| 0.4
| 0.2
| 0.8
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 1
| 1
| 0
|
0
| 8
|
91b2c92f668693110e6ccdfb6fa82e177d314e5d
| 8,510
|
py
|
Python
|
z2/part2/interactive/jm/random_fuzzy_arrows_1/554539540.py
|
kozakusek/ipp-2020-testy
|
09aa008fa53d159672cc7cbf969a6b237e15a7b8
|
[
"MIT"
] | 1
|
2020-04-16T12:13:47.000Z
|
2020-04-16T12:13:47.000Z
|
z2/part2/interactive/jm/random_fuzzy_arrows_1/554539540.py
|
kozakusek/ipp-2020-testy
|
09aa008fa53d159672cc7cbf969a6b237e15a7b8
|
[
"MIT"
] | 18
|
2020-03-06T17:50:15.000Z
|
2020-05-19T14:58:30.000Z
|
z2/part2/interactive/jm/random_fuzzy_arrows_1/554539540.py
|
kozakusek/ipp-2020-testy
|
09aa008fa53d159672cc7cbf969a6b237e15a7b8
|
[
"MIT"
] | 18
|
2020-03-06T17:45:13.000Z
|
2020-06-09T19:18:31.000Z
|
from part1 import (
gamma_board,
gamma_busy_fields,
gamma_delete,
gamma_free_fields,
gamma_golden_move,
gamma_golden_possible,
gamma_move,
gamma_new,
)
"""
scenario: test_random_actions
uuid: 554539540
"""
"""
random actions, total chaos
"""
board = gamma_new(6, 8, 3, 17)
assert board is not None
assert gamma_move(board, 1, 7, 4) == 0
assert gamma_move(board, 1, 4, 3) == 1
assert gamma_busy_fields(board, 1) == 1
assert gamma_move(board, 2, 5, 1) == 1
assert gamma_move(board, 2, 1, 7) == 1
assert gamma_busy_fields(board, 2) == 2
assert gamma_golden_possible(board, 2) == 1
assert gamma_move(board, 3, 1, 0) == 1
assert gamma_golden_move(board, 3, 3, 4) == 0
assert gamma_busy_fields(board, 2) == 2
assert gamma_move(board, 3, 1, 3) == 1
assert gamma_move(board, 1, 3, 5) == 1
assert gamma_move(board, 1, 2, 3) == 1
assert gamma_golden_possible(board, 1) == 1
assert gamma_move(board, 2, 1, 0) == 0
assert gamma_move(board, 3, 2, 2) == 1
assert gamma_golden_possible(board, 3) == 1
assert gamma_move(board, 1, 0, 2) == 1
assert gamma_move(board, 1, 1, 1) == 1
assert gamma_move(board, 2, 5, 4) == 1
assert gamma_move(board, 3, 0, 4) == 1
assert gamma_golden_possible(board, 3) == 1
assert gamma_move(board, 1, 1, 2) == 1
assert gamma_move(board, 2, 1, 4) == 1
assert gamma_move(board, 2, 1, 6) == 1
assert gamma_move(board, 3, 1, 2) == 0
assert gamma_move(board, 1, 0, 3) == 1
assert gamma_move(board, 1, 4, 2) == 1
board251673140 = gamma_board(board)
assert board251673140 is not None
assert board251673140 == (".2....\n"
".2....\n"
"...1..\n"
"32...2\n"
"131.1.\n"
"113.1.\n"
".1...2\n"
".3....\n")
del board251673140
board251673140 = None
assert gamma_move(board, 2, 4, 3) == 0
assert gamma_move(board, 2, 5, 1) == 0
assert gamma_move(board, 3, 4, 5) == 1
assert gamma_move(board, 3, 3, 0) == 1
assert gamma_free_fields(board, 3) == 29
assert gamma_move(board, 2, 1, 7) == 0
assert gamma_move(board, 2, 3, 5) == 0
assert gamma_move(board, 3, 0, 5) == 1
assert gamma_move(board, 3, 0, 1) == 1
assert gamma_golden_possible(board, 3) == 1
assert gamma_move(board, 1, 3, 0) == 0
assert gamma_move(board, 1, 0, 7) == 1
board281476409 = gamma_board(board)
assert board281476409 is not None
assert board281476409 == ("12....\n"
".2....\n"
"3..13.\n"
"32...2\n"
"131.1.\n"
"113.1.\n"
"31...2\n"
".3.3..\n")
del board281476409
board281476409 = None
assert gamma_move(board, 2, 5, 1) == 0
assert gamma_move(board, 2, 5, 4) == 0
assert gamma_golden_possible(board, 2) == 1
assert gamma_move(board, 3, 7, 3) == 0
assert gamma_move(board, 3, 5, 1) == 0
assert gamma_busy_fields(board, 3) == 8
assert gamma_move(board, 1, 5, 4) == 0
assert gamma_move(board, 1, 0, 0) == 1
assert gamma_move(board, 2, 6, 3) == 0
assert gamma_move(board, 2, 4, 4) == 1
assert gamma_move(board, 3, 0, 5) == 0
assert gamma_move(board, 3, 0, 1) == 0
assert gamma_free_fields(board, 3) == 24
assert gamma_move(board, 1, 1, 7) == 0
assert gamma_move(board, 1, 2, 1) == 1
board412285252 = gamma_board(board)
assert board412285252 is not None
assert board412285252 == ("12....\n"
".2....\n"
"3..13.\n"
"32..22\n"
"131.1.\n"
"113.1.\n"
"311..2\n"
"13.3..\n")
del board412285252
board412285252 = None
assert gamma_move(board, 2, 1, 6) == 0
assert gamma_move(board, 2, 2, 1) == 0
assert gamma_move(board, 3, 1, 2) == 0
assert gamma_free_fields(board, 3) == 23
assert gamma_golden_move(board, 3, 4, 4) == 1
assert gamma_move(board, 1, 0, 2) == 0
assert gamma_move(board, 1, 3, 6) == 1
assert gamma_golden_possible(board, 1) == 1
assert gamma_move(board, 2, 7, 4) == 0
assert gamma_free_fields(board, 2) == 22
assert gamma_move(board, 3, 5, 5) == 1
assert gamma_move(board, 3, 5, 5) == 0
assert gamma_free_fields(board, 3) == 21
assert gamma_move(board, 1, 0, 5) == 0
assert gamma_move(board, 1, 5, 7) == 1
assert gamma_move(board, 2, 0, 6) == 1
assert gamma_move(board, 2, 5, 6) == 1
assert gamma_move(board, 3, 2, 2) == 0
assert gamma_move(board, 1, 5, 2) == 1
assert gamma_move(board, 2, 7, 4) == 0
assert gamma_move(board, 3, 2, 3) == 0
assert gamma_move(board, 3, 3, 1) == 1
assert gamma_move(board, 1, 5, 1) == 0
assert gamma_free_fields(board, 1) == 16
assert gamma_move(board, 2, 4, 2) == 0
assert gamma_move(board, 3, 4, 1) == 1
assert gamma_move(board, 3, 5, 2) == 0
assert gamma_move(board, 1, 7, 4) == 0
assert gamma_move(board, 1, 4, 1) == 0
assert gamma_move(board, 2, 0, 2) == 0
assert gamma_move(board, 2, 0, 5) == 0
assert gamma_busy_fields(board, 2) == 7
assert gamma_move(board, 3, 5, 2) == 0
assert gamma_move(board, 1, 1, 5) == 1
assert gamma_move(board, 2, 3, 5) == 0
assert gamma_move(board, 2, 4, 1) == 0
assert gamma_move(board, 3, 0, 3) == 0
assert gamma_move(board, 3, 1, 5) == 0
assert gamma_move(board, 1, 2, 4) == 1
assert gamma_move(board, 1, 3, 0) == 0
assert gamma_busy_fields(board, 1) == 16
assert gamma_move(board, 2, 3, 5) == 0
assert gamma_move(board, 2, 3, 1) == 0
assert gamma_move(board, 3, 5, 2) == 0
assert gamma_move(board, 1, 0, 4) == 0
assert gamma_move(board, 1, 0, 6) == 0
assert gamma_move(board, 2, 5, 5) == 0
assert gamma_golden_move(board, 2, 2, 2) == 1
assert gamma_move(board, 1, 5, 5) == 0
assert gamma_free_fields(board, 1) == 13
assert gamma_move(board, 2, 2, 6) == 1
assert gamma_move(board, 2, 5, 6) == 0
assert gamma_move(board, 3, 4, 3) == 0
assert gamma_move(board, 1, 4, 3) == 0
assert gamma_move(board, 1, 3, 5) == 0
assert gamma_move(board, 2, 2, 0) == 1
assert gamma_move(board, 3, 0, 4) == 0
assert gamma_move(board, 1, 7, 3) == 0
assert gamma_move(board, 2, 7, 3) == 0
assert gamma_move(board, 2, 3, 1) == 0
assert gamma_move(board, 3, 7, 3) == 0
assert gamma_move(board, 3, 0, 2) == 0
assert gamma_move(board, 1, 3, 3) == 1
assert gamma_move(board, 2, 7, 2) == 0
assert gamma_move(board, 2, 2, 3) == 0
assert gamma_free_fields(board, 2) == 10
assert gamma_move(board, 3, 7, 3) == 0
assert gamma_move(board, 3, 5, 1) == 0
assert gamma_move(board, 1, 7, 2) == 0
board481507094 = gamma_board(board)
assert board481507094 is not None
assert board481507094 == ("12...1\n"
"2221.2\n"
"31.133\n"
"321.32\n"
"13111.\n"
"112.11\n"
"311332\n"
"1323..\n")
del board481507094
board481507094 = None
assert gamma_move(board, 2, 2, 4) == 0
assert gamma_move(board, 2, 5, 4) == 0
assert gamma_busy_fields(board, 2) == 10
assert gamma_move(board, 1, 7, 2) == 0
assert gamma_move(board, 2, 7, 4) == 0
assert gamma_move(board, 3, 0, 4) == 0
assert gamma_busy_fields(board, 3) == 11
assert gamma_golden_possible(board, 3) == 0
assert gamma_move(board, 2, 7, 2) == 0
assert gamma_move(board, 2, 1, 4) == 0
assert gamma_free_fields(board, 2) == 10
assert gamma_move(board, 3, 0, 5) == 0
assert gamma_busy_fields(board, 3) == 11
assert gamma_move(board, 1, 7, 2) == 0
assert gamma_move(board, 1, 1, 6) == 0
assert gamma_move(board, 2, 2, 0) == 0
assert gamma_move(board, 2, 1, 7) == 0
assert gamma_move(board, 3, 3, 1) == 0
assert gamma_move(board, 1, 6, 4) == 0
assert gamma_move(board, 2, 0, 4) == 0
assert gamma_move(board, 2, 2, 7) == 1
board984249076 = gamma_board(board)
assert board984249076 is not None
assert board984249076 == ("122..1\n"
"2221.2\n"
"31.133\n"
"321.32\n"
"13111.\n"
"112.11\n"
"311332\n"
"1323..\n")
del board984249076
board984249076 = None
assert gamma_move(board, 1, 4, 1) == 0
assert gamma_golden_possible(board, 1) == 1
board492321582 = gamma_board(board)
assert board492321582 is not None
assert board492321582 == ("122..1\n"
"2221.2\n"
"31.133\n"
"321.32\n"
"13111.\n"
"112.11\n"
"311332\n"
"1323..\n")
del board492321582
board492321582 = None
assert gamma_move(board, 2, 2, 3) == 0
assert gamma_move(board, 2, 2, 4) == 0
assert gamma_golden_possible(board, 2) == 0
assert gamma_move(board, 3, 2, 3) == 0
assert gamma_move(board, 1, 7, 3) == 0
assert gamma_move(board, 1, 4, 3) == 0
assert gamma_move(board, 2, 2, 4) == 0
assert gamma_move(board, 1, 0, 4) == 0
assert gamma_move(board, 2, 0, 4) == 0
assert gamma_move(board, 2, 2, 6) == 0
assert gamma_move(board, 3, 5, 2) == 0
assert gamma_move(board, 1, 0, 5) == 0
assert gamma_move(board, 2, 3, 2) == 1
assert gamma_move(board, 3, 0, 5) == 0
assert gamma_move(board, 1, 0, 5) == 0
assert gamma_move(board, 1, 2, 3) == 0
assert gamma_golden_possible(board, 1) == 1
assert gamma_move(board, 2, 2, 0) == 0
assert gamma_move(board, 3, 5, 6) == 0
assert gamma_move(board, 3, 2, 1) == 0
gamma_delete(board)
| 30.722022
| 46
| 0.653114
| 1,575
| 8,510
| 3.388571
| 0.041905
| 0.344201
| 0.379427
| 0.505902
| 0.824433
| 0.818999
| 0.767285
| 0.634814
| 0.539254
| 0.493536
| 0
| 0.155384
| 0.177203
| 8,510
| 276
| 47
| 30.833333
| 0.606827
| 0
| 0
| 0.436508
| 0
| 0
| 0.045595
| 0
| 0
| 0
| 0
| 0
| 0.714286
| 1
| 0
| false
| 0
| 0.003968
| 0
| 0.003968
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
91cb09a3e92988e65a39aed7bb0bc23d1f6a9538
| 20,537
|
py
|
Python
|
util/hierarchical_primitive/cube_inclusion.py
|
isunchy/cuboid_abstraction
|
afda6ca8516c2f5e5e7292b3b22a059a4f6c84ec
|
[
"MIT"
] | 43
|
2019-09-20T07:45:08.000Z
|
2022-03-23T04:07:21.000Z
|
util/hierarchical_primitive/cube_inclusion.py
|
SilenKZYoung/cuboid_abstraction
|
afda6ca8516c2f5e5e7292b3b22a059a4f6c84ec
|
[
"MIT"
] | 4
|
2019-11-25T00:57:10.000Z
|
2021-09-02T10:59:05.000Z
|
util/hierarchical_primitive/cube_inclusion.py
|
SilenKZYoung/cuboid_abstraction
|
afda6ca8516c2f5e5e7292b3b22a059a4f6c84ec
|
[
"MIT"
] | 10
|
2019-09-10T02:19:47.000Z
|
2021-06-16T05:23:43.000Z
|
import numpy as np
import quaternion
sample_points = np.array([[-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0],
[-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0],
[-1.0,-0.8,-0.6,-0.4,-0.2,0.0,0.2,0.4,0.6,0.8,1.0,-1.0,-0.8,-0.6,-0.4,-0.2,0.0,0.2,0.4,0.6,0.8,1.0,-1.0,-0.8,-0.6,-0.4,-0.2,0.0,0.2,0.4,0.6,0.8,1.0,-1.0,-0.8,-0.6,-0.4,-0.2,0.0,0.2,0.4,0.6,0.8,1.0,-1.0,-0.8,-0.6,-0.4,-0.2,0.0,0.2,0.4,0.6,0.8,1.0,-1.0,-0.8,-0.6,-0.4,-0.2,0.0,0.2,0.4,0.6,0.8,1.0,-1.0,-0.8,-0.6,-0.4,-0.2,0.0,0.2,0.4,0.6,0.8,1.0,-1.0,-0.8,-0.6,-0.4,-0.2,0.0,0.2,0.4,0.6,0.8,1.0,-1.0,-0.8,-0.6,-0.4,-0.2,0.0,0.2,0.4,0.6,0.8,1.0,-1.0,-0.8,-0.6,-0.4,-0.2,0.0,0.2,0.4,0.6,0.8,1.0,-1.0,-0.8,-0.6,-0.4,-0.2,0.0,0.2,0.4,0.6,0.8,1.0,-1.0,-0.8,-0.6,-0.4,-0.2,0.0,0.2,0.4,0.6,0.8,1.0,-1.0,-0.8,-0.6,-0.4,-0.2,0.0,0.2,0.4,0.6,0.8,1.0,-1.0,-0.8,-0.6,-0.4,-0.2,0.0,0.2,0.4,0.6,0.8,1.0,-1.0,-0.8,-0.6,-0.4,-0.2,0.0,0.2,0.4,0.6,0.8,1.0,-1.0,-0.8,-0.6,-0.4,-0.2,0.0,0.2,0.4,0.6,0.8,1.0,-1.0,-0.8,-0.6,-0.4,-0.2,0.0,0.2,0.4,0.6,0.8,1.0,-1.0,-0.8,-0.6,-0.4,-0.2,0.0,0.2,0.4,0.6,0.8,1.0,-1.0,-0.8,-0.6,-0.4,-0.2,0.0,0.2,0.4,0.6,0.8,1.0,-1.0,-0.8,-0.6,-0.4,-0.2,0.0,0.2,0.4,0.6,0.8,1.0,-1.0,-0.8,-0.6,-0.4,-0.2,0.0,0.2,0.4,0.6,0.8,1.0,-1.0,-0.8,-0.6,-0.4,-0.2,0.0,0.2,0.4,0.6,0.8,1.0,-1.0,-0.8,-0.6,-0.4,-0.2,0.0,0.2,0.4,0.6,0.8,1.0,-1.0,-0.8,-0.6,-0.4,-0.2,0.0,0.2,0.4,0.6,0.8,1.0,-1.0,-0.8,-0.6,-0.4,-0.2,0.0,0.2,0.4,0.6,0.8,1.0,-1.0,-0.8,-0.6,-0.4,-0.2,0.0,0.2,0.4,0.6,0.8,1.0,-1.0,-0.8,-0.6,-0.4,-0.2,0.0,0.2,0.4,0.6,0.8,1.0,-1.0,-0.8,-0.6,-0.4,-0.2,0.0,0.2,0.4,0.6,0.8,1.0,-1.0,-0.8,-0.6,-0.4,-0.2,0.0,0.2,0.4,0.6,0.8,1.0,-1.0,-0.8,-0.6,-0.4,-0.2,0.0,0.2,0.4,0.6,0.8,1.0,-1.0,-0.8,-0.6,-0.4,-0.2,0.0,0.2,0.4,0.6,0.8,1.0,-1.0,-0.8,-0.6,-0.4,-0.2,0.0,0.2,0.4,0.6,0.8,1.0,-1.0,-0.8,-0.6,-0.4,-0.2,0.0,0.2,0.4,0.6,0.8,1.0,-1.0,-0.8,-0.6,-0.4,-0.2,0.0,0.2,0.4,0.6,0.8,1.0,-1.0,-0.8,-0.6,-0.4,-0.2,0.0,0.2,0.4,0.6,0.8,1.0,-1.0,-0.8,-0.6,-0.4,-0.2,0.0,0.2,0.4,0.6,0.8,1.0,-1.0,-0.8,-0.6,-0.4,-0.2,0.0,0.2,0.4,0.6,0.8,1.0,-1.0,-0.8,-0.6,-0.4,-0.2,0.0,0.2,0.4,0.6,0.8,1.0,-1.0,-0.8,-0.6,-0.4,-0.2,0.0,0.2,0.4,0.6,0.8,1.0,-1.0,-0.8,-0.6,-0.4,-0.2,0.0,0.2,0.4,0.6,0.8,1.0,-1.0,-0.8,-0.6,-0.4,-0.2,0.0,0.2,0.4,0.6,0.8,1.0,-1.0,-0.8,-0.6,-0.4,-0.2,0.0,0.2,0.4,0.6,0.8,1.0,-1.0,-0.8,-0.6,-0.4,-0.2,0.0,0.2,0.4,0.6,0.8,1.0,-1.0,-0.8,-0.6,-0.4,-0.2,0.0,0.2,0.4,0.6,0.8,1.0,-1.0,-0.8,-0.6,-0.4,-0.2,0.0,0.2,0.4,0.6,0.8,1.0,-1.0,-0.8,-0.6,-0.4,-0.2,0.0,0.2,0.4,0.6,0.8,1.0,-1.0,-0.8,-0.6,-0.4,-0.2,0.0,0.2,0.4,0.6,0.8,1.0,-1.0,-0.8,-0.6,-0.4,-0.2,0.0,0.2,0.4,0.6,0.8,1.0,-1.0,-0.8,-0.6,-0.4,-0.2,0.0,0.2,0.4,0.6,0.8,1.0,-1.0,-0.8,-0.6,-0.4,-0.2,0.0,0.2,0.4,0.6,0.8,1.0,-1.0,-0.8,-0.6,-0.4,-0.2,0.0,0.2,0.4,0.6,0.8,1.0,-1.0,-0.8,-0.6,-0.4,-0.2,0.0,0.2,0.4,0.6,0.8,1.0,-1.0,-0.8,-0.6,-0.4,-0.2,0.0,0.2,0.4,0.6,0.8,1.0,-1.0,-0.8,-0.6,-0.4,-0.2,0.0,0.2,0.4,0.6,0.8,1.0,-1.0,-0.8,-0.6,-0.4,-0.2,0.0,0.2,0.4,0.6,0.8,1.0,-1.0,-0.8,-0.6,-0.4,-0.2,0.0,0.2,0.4,0.6,0.8,1.0,-1.0,-0.8,-0.6,-0.4,-0.2,0.0,0.2,0.4,0.6,0.8,1.0,-1.0,-0.8,-0.6,-0.4,-0.2,0.0,0.2,0.4,0.6,0.8,1.0,-1.0,-0.8,-0.6,-0.4,-0.2,0.0,0.2,0.4,0.6,0.8,1.0,-1.0,-0.8,-0.6,-0.4,-0.2,0.0,0.2,0.4,0.6,0.8,1.0,-1.0,-0.8,-0.6,-0.4,-0.2,0.0,0.2,0.4,0.6,0.8,1.0,-1.0,-0.8,-0.6,-0.4,-0.2,0.0,0.2,0.4,0.6,0.8,1.0,-1.0,-0.8,-0.6,-0.4,-0.2,0.0,0.2,0.4,0.6,0.8,1.0,-1.0,-0.8,-0.6,-0.4,-0.2,0.0,0.2,0.4,0.6,0.8,1.0,-1.0,-0.8,-0.6,-0.4,-0.2,0.0,0.2,0.4,0.6,0.8,1.0,-1.0,-0.8,-0.6,-0.4,-0.2,0.0,0.2,0.4,0.6,0.8,1.0,-1.0,-0.8,-0.6,-0.4,-0.2,0.0,0.2,0.4,0.6,0.8,1.0,-1.0,-0.8,-0.6,-0.4,-0.2,0.0,0.2,0.4,0.6,0.8,1.0,-1.0,-0.8,-0.6,-0.4,-0.2,0.0,0.2,0.4,0.6,0.8,1.0,-1.0,-0.8,-0.6,-0.4,-0.2,0.0,0.2,0.4,0.6,0.8,1.0,-1.0,-0.8,-0.6,-0.4,-0.2,0.0,0.2,0.4,0.6,0.8,1.0,-1.0,-0.8,-0.6,-0.4,-0.2,0.0,0.2,0.4,0.6,0.8,1.0,-1.0,-0.8,-0.6,-0.4,-0.2,0.0,0.2,0.4,0.6,0.8,1.0,-1.0,-0.8,-0.6,-0.4,-0.2,0.0,0.2,0.4,0.6,0.8,1.0,-1.0,-0.8,-0.6,-0.4,-0.2,0.0,0.2,0.4,0.6,0.8,1.0,-1.0,-0.8,-0.6,-0.4,-0.2,0.0,0.2,0.4,0.6,0.8,1.0,-1.0,-0.8,-0.6,-0.4,-0.2,0.0,0.2,0.4,0.6,0.8,1.0,-1.0,-0.8,-0.6,-0.4,-0.2,0.0,0.2,0.4,0.6,0.8,1.0,-1.0,-0.8,-0.6,-0.4,-0.2,0.0,0.2,0.4,0.6,0.8,1.0,-1.0,-0.8,-0.6,-0.4,-0.2,0.0,0.2,0.4,0.6,0.8,1.0,-1.0,-0.8,-0.6,-0.4,-0.2,0.0,0.2,0.4,0.6,0.8,1.0,-1.0,-0.8,-0.6,-0.4,-0.2,0.0,0.2,0.4,0.6,0.8,1.0,-1.0,-0.8,-0.6,-0.4,-0.2,0.0,0.2,0.4,0.6,0.8,1.0,-1.0,-0.8,-0.6,-0.4,-0.2,0.0,0.2,0.4,0.6,0.8,1.0,-1.0,-0.8,-0.6,-0.4,-0.2,0.0,0.2,0.4,0.6,0.8,1.0,-1.0,-0.8,-0.6,-0.4,-0.2,0.0,0.2,0.4,0.6,0.8,1.0,-1.0,-0.8,-0.6,-0.4,-0.2,0.0,0.2,0.4,0.6,0.8,1.0,-1.0,-0.8,-0.6,-0.4,-0.2,0.0,0.2,0.4,0.6,0.8,1.0,-1.0,-0.8,-0.6,-0.4,-0.2,0.0,0.2,0.4,0.6,0.8,1.0,-1.0,-0.8,-0.6,-0.4,-0.2,0.0,0.2,0.4,0.6,0.8,1.0,-1.0,-0.8,-0.6,-0.4,-0.2,0.0,0.2,0.4,0.6,0.8,1.0,-1.0,-0.8,-0.6,-0.4,-0.2,0.0,0.2,0.4,0.6,0.8,1.0,-1.0,-0.8,-0.6,-0.4,-0.2,0.0,0.2,0.4,0.6,0.8,1.0,-1.0,-0.8,-0.6,-0.4,-0.2,0.0,0.2,0.4,0.6,0.8,1.0,-1.0,-0.8,-0.6,-0.4,-0.2,0.0,0.2,0.4,0.6,0.8,1.0,-1.0,-0.8,-0.6,-0.4,-0.2,0.0,0.2,0.4,0.6,0.8,1.0,-1.0,-0.8,-0.6,-0.4,-0.2,0.0,0.2,0.4,0.6,0.8,1.0,-1.0,-0.8,-0.6,-0.4,-0.2,0.0,0.2,0.4,0.6,0.8,1.0,-1.0,-0.8,-0.6,-0.4,-0.2,0.0,0.2,0.4,0.6,0.8,1.0,-1.0,-0.8,-0.6,-0.4,-0.2,0.0,0.2,0.4,0.6,0.8,1.0,-1.0,-0.8,-0.6,-0.4,-0.2,0.0,0.2,0.4,0.6,0.8,1.0,-1.0,-0.8,-0.6,-0.4,-0.2,0.0,0.2,0.4,0.6,0.8,1.0,-1.0,-0.8,-0.6,-0.4,-0.2,0.0,0.2,0.4,0.6,0.8,1.0,-1.0,-0.8,-0.6,-0.4,-0.2,0.0,0.2,0.4,0.6,0.8,1.0,-1.0,-0.8,-0.6,-0.4,-0.2,0.0,0.2,0.4,0.6,0.8,1.0,-1.0,-0.8,-0.6,-0.4,-0.2,0.0,0.2,0.4,0.6,0.8,1.0,-1.0,-0.8,-0.6,-0.4,-0.2,0.0,0.2,0.4,0.6,0.8,1.0,-1.0,-0.8,-0.6,-0.4,-0.2,0.0,0.2,0.4,0.6,0.8,1.0,-1.0,-0.8,-0.6,-0.4,-0.2,0.0,0.2,0.4,0.6,0.8,1.0,-1.0,-0.8,-0.6,-0.4,-0.2,0.0,0.2,0.4,0.6,0.8,1.0,-1.0,-0.8,-0.6,-0.4,-0.2,0.0,0.2,0.4,0.6,0.8,1.0,-1.0,-0.8,-0.6,-0.4,-0.2,0.0,0.2,0.4,0.6,0.8,1.0,-1.0,-0.8,-0.6,-0.4,-0.2,0.0,0.2,0.4,0.6,0.8,1.0,-1.0,-0.8,-0.6,-0.4,-0.2,0.0,0.2,0.4,0.6,0.8,1.0,-1.0,-0.8,-0.6,-0.4,-0.2,0.0,0.2,0.4,0.6,0.8,1.0,-1.0,-0.8,-0.6,-0.4,-0.2,0.0,0.2,0.4,0.6,0.8,1.0,-1.0,-0.8,-0.6,-0.4,-0.2,0.0,0.2,0.4,0.6,0.8,1.0,-1.0,-0.8,-0.6,-0.4,-0.2,0.0,0.2,0.4,0.6,0.8,1.0,-1.0,-0.8,-0.6,-0.4,-0.2,0.0,0.2,0.4,0.6,0.8,1.0,-1.0,-0.8,-0.6,-0.4,-0.2,0.0,0.2,0.4,0.6,0.8,1.0,-1.0,-0.8,-0.6,-0.4,-0.2,0.0,0.2,0.4,0.6,0.8,1.0]
], dtype=np.float32) # [3, n]
sample_points = np.transpose(sample_points) # [n, 3]
def cube_inclusion(cube_param_1, cube_param_2):
n_cube_1 = cube_param_1['z'].shape[0] # child
n_cube_2 = cube_param_2['z'].shape[0] # parent
assert(n_cube_1 > n_cube_2)
assert(cube_param_1['q'].shape[0] == cube_param_1['t'].shape[0] == n_cube_1)
assert(cube_param_2['q'].shape[0] == cube_param_2['t'].shape[0] == n_cube_2)
n_point = sample_points.shape[0]
cube_cube_distance = np.zeros([n_cube_1, n_cube_2])
for i in range(n_cube_1):
z1, q1, t1 = [cube_param_1[v][i] for v in ['z', 'q', 't']]
for j in range(n_cube_2):
z2, q2, t2 = [cube_param_2[v][j] for v in ['z', 'q', 't']]
points = sample_points * z1
rot1 = np.quaternion(q1[0], q1[1], q1[2], q1[3])
rot1 = quaternion.as_rotation_matrix(rot1)
points = np.transpose(np.matmul(rot1, np.transpose(points)))
points += t1
points -= t2
rot2 = np.quaternion(q2[0], q2[1], q2[2], q2[3]).conjugate()
rot2 = quaternion.as_rotation_matrix(rot2)
points = np.transpose(np.matmul(rot2, np.transpose(points)))
distance = np.mean(np.sum(np.maximum(abs(points) - z2, 0)**2, axis=1))
cube_cube_distance[i, j] = distance
index = np.argmin(cube_cube_distance, axis=1)
return index
def generate_sample_cube_points(resulution=11):
sample_points = np.zeros([resulution, resulution, resulution, 3], dtype=np.float32)
location_template = np.linspace(-1.0, 1.0, num=11)
for i in range(resulution):
for j in range(resulution):
for k in range(resulution):
sample_points[i, j, k, 0] = location_template[i]
sample_points[i, j, k, 1] = location_template[j]
sample_points[i, j, k, 2] = location_template[k]
np.savetxt('sample_points.txt', np.transpose(np.reshape(sample_points, [-1, 3])),
fmt='%1.1f', delimiter=',')
if __name__ == '__main__':
# generate_sample_cube_points()
z1 = np.array([[0.1, 0.1, 0.1], [0.1, 0.1, 0.1], [0.1, 0.1, 0.1]])
q1 = np.array([[1.0, 0.0, 0.0, 0.0], [1.0, 0.0, 0.0, 0.0], [1.0, 0.0, 0.0, 0.0]])
t1 = np.array([[0.1, 0.1, 0.1], [0.1, 0.1, 0.1], [0.4, 0.4, 0.4]])
cube_param_1 = {'z': z1, 'q': q1, 't': t1}
z2 = np.array([[0.1, 0.1, 0.1], [0.2, 0.2, 0.2]])
q2 = np.array([[1.0, 0.0, 0.0, 0.0], [1.0, 0.0, 0.0, 0.0]])
t2 = np.array([[0.2, 0.2, 0.2], [0.3, 0.3, 0.3]])
cube_param_2 = {'z': z2, 'q': q2, 't': t2}
index = cube_inclusion(cube_param_1, cube_param_2)
print(index)
assert((index == np.array([0, 0, 1])).all())
| 315.953846
| 5,958
| 0.466378
| 8,476
| 20,537
| 1.119396
| 0.009202
| 0.187605
| 0.231134
| 0.255902
| 0.882694
| 0.866779
| 0.861298
| 0.860561
| 0.853605
| 0.852129
| 0
| 0.41108
| 0.029654
| 20,537
| 64
| 5,959
| 320.890625
| 0.065034
| 0.002727
| 0
| 0
| 0
| 0
| 0.002401
| 0
| 0
| 0
| 0
| 0
| 0.072727
| 1
| 0.036364
| false
| 0
| 0.036364
| 0
| 0.090909
| 0.018182
| 0
| 0
| 1
| null | 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 13
|
91ce047cf63bd3235780b724cb14faa1d2a5cf51
| 1,732
|
py
|
Python
|
src/tests/testdata.py
|
Doometnick/MaxiMin-2048
|
f1d795ec07fffe1aa239c105cf522d2c3bc9b011
|
[
"MIT"
] | null | null | null |
src/tests/testdata.py
|
Doometnick/MaxiMin-2048
|
f1d795ec07fffe1aa239c105cf522d2c3bc9b011
|
[
"MIT"
] | null | null | null |
src/tests/testdata.py
|
Doometnick/MaxiMin-2048
|
f1d795ec07fffe1aa239c105cf522d2c3bc9b011
|
[
"MIT"
] | null | null | null |
from board import Direction
# Tuples of input, action, expected output.
moving_tests = [
(
[[0,0,0,0],
[4,0,0,0],
[0,0,0,0],
[4,0,2,0]],
Direction.UP,
[[8,0,2,0],
[0,0,0,0],
[0,0,0,0],
[0,0,0,0]]
),
(
[[0,0,0,0],
[4,0,0,0],
[0,0,0,0],
[4,0,2,0]],
Direction.DOWN,
[[0,0,0,0],
[0,0,0,0],
[0,0,0,0],
[8,0,2,0]]
),
(
[[0,0,0,0],
[4,0,0,0],
[0,0,0,0],
[4,0,2,0]],
Direction.LEFT,
[[0,0,0,0],
[4,0,0,0],
[0,0,0,0],
[4,2,0,0]]
),
(
[[0,0,0,0],
[4,0,0,0],
[0,0,0,0],
[4,0,2,0]],
Direction.RIGHT,
[[0,0,0,0],
[0,0,0,4],
[0,0,0,0],
[0,0,4,2]]
),
(
[[4,4,4,4],
[8,0,8,4],
[32,16,0,16],
[16,8,2,4]],
Direction.RIGHT,
[[0,0,8,8],
[0,0,16,4],
[0,0,32,32],
[16,8,2,4]]
),
(
[[4,4,4,4],
[8,0,8,4],
[32,16,0,16],
[16,8,2,4]],
Direction.LEFT,
[[8,8,0,0],
[16,4,0,0],
[32,32,0,0],
[16,8,2,4]]
),
(
[[4,4,4,4],
[8,0,8,4],
[32,16,0,16],
[16,8,2,4]],
Direction.UP,
[[4,4,4,8],
[8,16,8,16],
[32,8,2,4],
[16,0,0,0]]
),
(
[[4,4,4,4],
[8,0,8,4],
[32,16,0,16],
[16,8,2,4]],
Direction.DOWN,
[[4,0,0,0],
[8,4,4,8],
[32,16,8,16],
[16,8,2,4]]
)
]
| 18.623656
| 43
| 0.265012
| 284
| 1,732
| 1.612676
| 0.077465
| 0.406114
| 0.471616
| 0.497817
| 0.70524
| 0.689956
| 0.689956
| 0.689956
| 0.670306
| 0.670306
| 0
| 0.315444
| 0.472864
| 1,732
| 93
| 44
| 18.623656
| 0.186199
| 0.023672
| 0
| 0.659341
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.010989
| 0
| 0.010989
| 0
| 0
| 0
| 1
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
37cf939b241a87e359fb447071196040b0ef99e6
| 26,714
|
py
|
Python
|
openprocurement/blade/tests/auctions.py
|
imaginal/openprocurement.blade
|
4ef512e3d0c1287af1faca9caa9e5349a3c5b0fb
|
[
"Apache-2.0"
] | null | null | null |
openprocurement/blade/tests/auctions.py
|
imaginal/openprocurement.blade
|
4ef512e3d0c1287af1faca9caa9e5349a3c5b0fb
|
[
"Apache-2.0"
] | null | null | null |
openprocurement/blade/tests/auctions.py
|
imaginal/openprocurement.blade
|
4ef512e3d0c1287af1faca9caa9e5349a3c5b0fb
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
import unittest
from uuid import uuid4
from copy import deepcopy
from openprocurement.api.models import get_now
from openprocurement.edge.tests.base import AuctionBaseWebTest, test_award, test_auction_data, test_document, ROUTE_PREFIX
try:
import openprocurement.auctions.core as auctions_core
except ImportError:
auctions_core = None
@unittest.skipUnless(auctions_core, "Auctions is not reachable")
class AuctionResourceTest(AuctionBaseWebTest):
def test_empty_listing(self):
response = self.app.get('/auctions')
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['data'], [])
self.assertNotIn('{\n "', response.body)
self.assertNotIn('callback({', response.body)
self.assertEqual(response.json['next_page']['offset'], '')
self.assertNotIn('prev_page', response.json)
response = self.app.get('/auctions?opt_jsonp=callback')
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.content_type, 'application/javascript')
self.assertNotIn('{\n "', response.body)
self.assertIn('callback({', response.body)
response = self.app.get('/auctions?opt_pretty=1')
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.content_type, 'application/json')
self.assertIn('{\n "', response.body)
self.assertNotIn('callback({', response.body)
response = self.app.get('/auctions?opt_jsonp=callback&opt_pretty=1')
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.content_type, 'application/javascript')
self.assertIn('{\n "', response.body)
self.assertIn('callback({', response.body)
response = self.app.get('/auctions?offset=2015-01-01T00:00:00+02:00&descending=1&limit=10')
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['data'], [])
self.assertIn('descending=1', response.json['next_page']['uri'])
self.assertIn('limit=10', response.json['next_page']['uri'])
self.assertNotIn('descending=1', response.json['prev_page']['uri'])
self.assertIn('limit=10', response.json['prev_page']['uri'])
response = self.app.get('/auctions?feed=changes')
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['data'], [])
self.assertEqual(response.json['next_page']['offset'], '')
self.assertNotIn('prev_page', response.json)
response = self.app.get('/auctions?feed=changes&offset=0', status=404)
self.assertEqual(response.status, '404 Not Found')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['status'], 'error')
self.assertEqual(response.json['errors'], [
{u'description': u'Offset expired/invalid', u'location': u'params', u'name': u'offset'}
])
response = self.app.get('/auctions?feed=changes&descending=1&limit=10')
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['data'], [])
self.assertIn('descending=1', response.json['next_page']['uri'])
self.assertIn('limit=10', response.json['next_page']['uri'])
self.assertNotIn('descending=1', response.json['prev_page']['uri'])
self.assertIn('limit=10', response.json['prev_page']['uri'])
def test_listing(self):
response = self.app.get('/auctions')
self.assertEqual(response.status, '200 OK')
self.assertEqual(len(response.json['data']), 0)
auctions = []
for i in range(3):
offset = get_now().isoformat()
auctions.append(self.create_auction())
ids = ','.join([i['id'] for i in auctions])
while True:
response = self.app.get('/auctions')
self.assertTrue(ids.startswith(','.join([i['id'] for i in response.json['data']])))
if len(response.json['data']) == 3:
break
self.assertEqual(len(response.json['data']), 3)
self.assertEqual(set(response.json['data'][0]), set([u'id', u'dateModified']))
self.assertEqual(set([i['id'] for i in response.json['data']]), set([i['id'] for i in auctions]))
self.assertEqual(set([i['dateModified'] for i in response.json['data']]), set([i['dateModified'] for i in auctions]))
self.assertEqual([i['dateModified'] for i in response.json['data']], sorted([i['dateModified'] for i in auctions]))
while True:
response = self.app.get('/auctions?offset={}'.format(offset))
self.assertEqual(response.status, '200 OK')
if len(response.json['data']) == 1:
break
self.assertEqual(len(response.json['data']), 1)
response = self.app.get('/auctions?limit=2')
self.assertEqual(response.status, '200 OK')
self.assertNotIn('prev_page', response.json)
self.assertEqual(len(response.json['data']), 2)
response = self.app.get(response.json['next_page']['path'].replace(ROUTE_PREFIX, ''))
self.assertEqual(response.status, '200 OK')
self.assertIn('descending=1', response.json['prev_page']['uri'])
self.assertEqual(len(response.json['data']), 1)
response = self.app.get(response.json['next_page']['path'].replace(ROUTE_PREFIX, ''))
self.assertEqual(response.status, '200 OK')
self.assertIn('descending=1', response.json['prev_page']['uri'])
self.assertEqual(len(response.json['data']), 0)
response = self.app.get('/auctions', params=[('opt_fields', 'status')])
self.assertEqual(response.status, '200 OK')
self.assertEqual(len(response.json['data']), 3)
self.assertEqual(set(response.json['data'][0]), set([u'id', u'dateModified', u'status']))
self.assertIn('opt_fields=status', response.json['next_page']['uri'])
response = self.app.get('/auctions', params=[('opt_fields', 'status,enquiryPeriod')])
self.assertEqual(response.status, '200 OK')
self.assertEqual(len(response.json['data']), 3)
self.assertEqual(set(response.json['data'][0]), set([u'id', u'dateModified', u'status', u'enquiryPeriod']))
self.assertIn('opt_fields=status%2CenquiryPeriod', response.json['next_page']['uri'])
response = self.app.get('/auctions?descending=1')
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(len(response.json['data']), 3)
self.assertEqual(set(response.json['data'][0]), set([u'id', u'dateModified']))
self.assertEqual(set([i['id'] for i in response.json['data']]), set([i['id'] for i in auctions]))
self.assertEqual([i['dateModified'] for i in response.json['data']], sorted([i['dateModified'] for i in auctions], reverse=True))
response = self.app.get('/auctions?descending=1&limit=2')
self.assertEqual(response.status, '200 OK')
self.assertNotIn('descending=1', response.json['prev_page']['uri'])
self.assertEqual(len(response.json['data']), 2)
response = self.app.get(response.json['next_page']['path'].replace(ROUTE_PREFIX, ''))
self.assertEqual(response.status, '200 OK')
self.assertNotIn('descending=1', response.json['prev_page']['uri'])
self.assertEqual(len(response.json['data']), 1)
response = self.app.get(response.json['next_page']['path'].replace(ROUTE_PREFIX, ''))
self.assertEqual(response.status, '200 OK')
self.assertNotIn('descending=1', response.json['prev_page']['uri'])
self.assertEqual(len(response.json['data']), 0)
test_auction_data2 = test_auction_data.copy()
test_auction_data2['mode'] = 'test'
self.create_auction(test_auction_data2)
while True:
response = self.app.get('/auctions?mode=test')
self.assertEqual(response.status, '200 OK')
if len(response.json['data']) == 1:
break
self.assertEqual(len(response.json['data']), 1)
response = self.app.get('/auctions?mode=_all_')
self.assertEqual(response.status, '200 OK')
self.assertEqual(len(response.json['data']), 4)
def test_listing_changes(self):
response = self.app.get('/auctions?feed=changes')
self.assertEqual(response.status, '200 OK')
self.assertEqual(len(response.json['data']), 0)
auctions = []
for i in range(3):
auctions.append(self.create_auction())
ids = ','.join([i['id'] for i in auctions])
while True:
response = self.app.get('/auctions?feed=changes')
self.assertTrue(ids.startswith(','.join([i['id'] for i in response.json['data']])))
if len(response.json['data']) == 3:
break
self.assertEqual(response.status, '200 OK')
self.assertEqual(len(response.json['data']), 3)
self.assertEqual(set(response.json['data'][0]), set([u'id', u'dateModified']))
self.assertEqual(set([i['id'] for i in response.json['data']]), set([i['id'] for i in auctions]))
self.assertEqual(set([i['dateModified'] for i in response.json['data']]), set([i['dateModified'] for i in auctions]))
self.assertEqual([i['dateModified'] for i in response.json['data']], sorted([i['dateModified'] for i in auctions]))
response = self.app.get('/auctions?feed=changes&limit=2')
self.assertEqual(response.status, '200 OK')
self.assertNotIn('prev_page', response.json)
self.assertEqual(len(response.json['data']), 2)
response = self.app.get(response.json['next_page']['path'].replace(ROUTE_PREFIX, ''))
self.assertEqual(response.status, '200 OK')
self.assertIn('descending=1', response.json['prev_page']['uri'])
self.assertEqual(len(response.json['data']), 1)
response = self.app.get(response.json['next_page']['path'].replace(ROUTE_PREFIX, ''))
self.assertEqual(response.status, '200 OK')
self.assertIn('descending=1', response.json['prev_page']['uri'])
self.assertEqual(len(response.json['data']), 0)
response = self.app.get('/auctions?feed=changes', params=[('opt_fields', 'status')])
self.assertEqual(response.status, '200 OK')
self.assertEqual(len(response.json['data']), 3)
self.assertEqual(set(response.json['data'][0]), set([u'id', u'dateModified', u'status']))
self.assertIn('opt_fields=status', response.json['next_page']['uri'])
response = self.app.get('/auctions?feed=changes', params=[('opt_fields', 'status,enquiryPeriod')])
self.assertEqual(response.status, '200 OK')
self.assertEqual(len(response.json['data']), 3)
self.assertEqual(set(response.json['data'][0]), set([u'id', u'dateModified', u'status', u'enquiryPeriod']))
self.assertIn('opt_fields=status%2CenquiryPeriod', response.json['next_page']['uri'])
response = self.app.get('/auctions?feed=changes&descending=1')
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(len(response.json['data']), 3)
self.assertEqual(set(response.json['data'][0]), set([u'id', u'dateModified']))
self.assertEqual(set([i['id'] for i in response.json['data']]), set([i['id'] for i in auctions]))
self.assertEqual([i['dateModified'] for i in response.json['data']], sorted([i['dateModified'] for i in auctions], reverse=True))
response = self.app.get('/auctions?feed=changes&descending=1&limit=2')
self.assertEqual(response.status, '200 OK')
self.assertNotIn('descending=1', response.json['prev_page']['uri'])
self.assertEqual(len(response.json['data']), 2)
response = self.app.get(response.json['next_page']['path'].replace(ROUTE_PREFIX, ''))
self.assertEqual(response.status, '200 OK')
self.assertNotIn('descending=1', response.json['prev_page']['uri'])
self.assertEqual(len(response.json['data']), 1)
response = self.app.get(response.json['next_page']['path'].replace(ROUTE_PREFIX, ''))
self.assertEqual(response.status, '200 OK')
self.assertNotIn('descending=1', response.json['prev_page']['uri'])
self.assertEqual(len(response.json['data']), 0)
test_auction_data2 = test_auction_data.copy()
test_auction_data2['mode'] = 'test'
self.create_auction(test_auction_data2)
while True:
response = self.app.get('/auctions?feed=changes&mode=test')
self.assertEqual(response.status, '200 OK')
if len(response.json['data']) == 1:
break
self.assertEqual(len(response.json['data']), 1)
response = self.app.get('/auctions?feed=changes&mode=_all_')
self.assertEqual(response.status, '200 OK')
self.assertEqual(len(response.json['data']), 4)
def test_listing_draft(self):
response = self.app.get('/auctions')
self.assertEqual(response.status, '200 OK')
self.assertEqual(len(response.json['data']), 0)
auctions = []
data = test_auction_data.copy()
data.update({'status': 'draft'})
for i in range(3):
auctions.append(self.create_auction(data))
ids = ','.join([i['id'] for i in auctions])
while True:
response = self.app.get('/auctions')
self.assertTrue(ids.startswith(','.join([i['id'] for i in response.json['data']])))
if len(response.json['data']) == 3:
break
self.assertEqual(len(response.json['data']), 3)
self.assertEqual(set(response.json['data'][0]), set([u'id', u'dateModified']))
self.assertEqual(set([i['id'] for i in response.json['data']]), set([i['id'] for i in auctions]))
self.assertEqual(set([i['dateModified'] for i in response.json['data']]), set([i['dateModified'] for i in auctions]))
self.assertEqual([i['dateModified'] for i in response.json['data']], sorted([i['dateModified'] for i in auctions]))
def test_get_auction(self):
auction = self.create_auction()
response = self.app.get('/auctions/{}'.format(auction['id']))
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.content_type, 'application/json')
self.assertDictEqual(response.json['data'], auction)
response = self.app.get('/auctions/{}?opt_jsonp=callback'.format(auction['id']))
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.content_type, 'application/javascript')
self.assertIn('callback({"data": {"', response.body)
response = self.app.get('/auctions/{}?opt_pretty=1'.format(auction['id']))
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.content_type, 'application/json')
self.assertIn('{\n "data": {\n "', response.body)
def test_auction_not_found(self):
response = self.app.get('/auctions')
self.assertEqual(response.status, '200 OK')
self.assertEqual(len(response.json['data']), 0)
response = self.app.get('/auctions/some_id', status=404)
self.assertEqual(response.status, '404 Not Found')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['status'], 'error')
self.assertEqual(response.json['errors'], [
{u'description': u'Not Found', u'location': u'url', u'name': u'auction_id'}
])
response = self.app.patch_json(
'/auctions/some_id', {'data': {}}, status=404)
self.assertEqual(response.status, '404 Not Found')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['status'], 'error')
self.assertEqual(response.json['errors'], [
{u'description': u'Not Found', u'location': u'url', u'name': u'auction_id'}
])
# put custom document object into database to check auction construction on non-Auction data
data = {'contract': 'test', '_id': uuid4().hex}
self.db.save(data)
response = self.app.get('/auctions/{}'.format(data['_id']), status=404)
self.assertEqual(response.status, '404 Not Found')
@unittest.skipUnless(auctions_core, "Auctions is not reachable")
class AuctionAwardResourceTest(AuctionBaseWebTest):
def test_listing(self):
auction = self.create_auction()
response = self.app.get('/auctions/{}/awards'.format(auction['id']))
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['data'], auction['awards'])
self.assertNotIn('{\n "', response.body)
self.assertNotIn('callback({', response.body)
response = self.app.get('/auctions/{}/awards?opt_jsonp=callback'.format(auction['id']))
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.content_type, 'application/javascript')
self.assertNotIn('{\n "', response.body)
self.assertIn('callback({', response.body)
response = self.app.get('/auctions/{}/awards?opt_pretty=1'.format(auction['id']))
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.content_type, 'application/json')
self.assertIn('{\n "', response.body)
self.assertNotIn('callback({', response.body)
response = self.app.get('/auctions/{}/awards?opt_jsonp=callback&opt_pretty=1'.format(auction['id']))
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.content_type, 'application/javascript')
self.assertIn('{\n "', response.body)
self.assertIn('callback({', response.body)
def test_listing_changes(self):
auction = self.create_auction()
data = self.db[auction['id']]
awards = data['awards']
for i in range(3):
award = deepcopy(test_award)
award['date'] = get_now().isoformat()
award['id'] = uuid4().hex
awards.append(award)
self.db.save(data)
ids = ','.join([i['id'] for i in awards])
response = self.app.get('/auctions/{}/awards'.format(auction['id']))
self.assertTrue(ids.startswith(','.join([i['id'] for i in response.json['data']])))
self.assertEqual(response.status, '200 OK')
self.assertEqual(len(response.json['data']), len(awards))
self.assertEqual(set([i['id'] for i in response.json['data']]), set([i['id'] for i in awards]))
self.assertEqual(set([i['date'] for i in response.json['data']]), set([i['date'] for i in awards]))
self.assertEqual([i['date'] for i in response.json['data']], sorted([i['date'] for i in awards]))
def test_get_award(self):
auction = self.create_auction()
award = auction['awards'][0]
response = self.app.get('/auctions/{}/awards/{}'.format(auction['id'], award['id']))
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.content_type, 'application/json')
self.assertDictEqual(response.json['data'], award)
response = self.app.get('/auctions/{}/awards/{}?opt_jsonp=callback'.format(auction['id'], award['id']))
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.content_type, 'application/javascript')
self.assertIn('callback({"data": {"', response.body)
response = self.app.get('/auctions/{}/awards/{}?opt_pretty=1'.format(auction['id'], award['id']))
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.content_type, 'application/json')
self.assertIn('{\n "data": {\n "', response.body)
def test_award_not_found(self):
auction = self.create_auction()
response = self.app.get('/auctions/{}/awards/some_id'.format(auction['id']), status=404)
self.assertEqual(response.status, '404 Not Found')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['status'], 'error')
self.assertEqual(response.json['errors'], [
{u'description': u'Not Found', u'location': u'url', u'name': u'award_id'}
])
def test_get_document_with_versions(self):
auction = self.create_auction()
data = self.db[auction['id']]
documents = data['documents']
for i in range(3):
document = deepcopy(test_document)
document['id'] = data['documents'][0]['id']
document['url'] += str(i)
document['dateModified'] = get_now().isoformat()
documents.append(document)
self.db.save(data)
versions = [{'dateModified': i['dateModified'], 'url': i['url']} for i in documents[:-1]]
response = self.app.get('/auctions/{}/documents/{}'.format(auction['id'], document['id']))
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(len(response.json['data']['previousVersions']), len(versions))
self.assertEqual(response.json['data']['previousVersions'], versions)
@unittest.skipUnless(auctions_core, "Auctions is not reachable")
class AuctionAwardDocumentResourceTest(AuctionBaseWebTest):
def test_listing(self):
auction = self.create_auction()
award = auction['awards'][0]
document = award['documents'][0]
response = self.app.get('/auctions/{}/awards/{}/documents'.format(auction['id'], award['id']))
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['data'], award['documents'])
self.assertNotIn('{\n "', response.body)
self.assertNotIn('callback({', response.body)
response = self.app.get('/auctions/{}/awards/{}/documents?opt_jsonp=callback'.format(auction['id'], award['id']))
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.content_type, 'application/javascript')
self.assertNotIn('{\n "', response.body)
self.assertIn('callback({', response.body)
response = self.app.get('/auctions/{}/awards/{}/documents?opt_pretty=1'.format(auction['id'], award['id']))
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.content_type, 'application/json')
self.assertIn('{\n "', response.body)
self.assertNotIn('callback({', response.body)
response = self.app.get('/auctions/{}/awards/{}/documents?opt_jsonp=callback&opt_pretty=1'.format(auction['id'], award['id']))
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.content_type, 'application/javascript')
self.assertIn('{\n "', response.body)
self.assertIn('callback({', response.body)
def test_listing_changes(self):
auction = self.create_auction()
data = self.db[auction['id']]
award = data['awards'][0]
award_documents = award['documents']
for i in range(3):
document = deepcopy(test_document)
document['dateModified'] = get_now().isoformat()
document['id'] = uuid4().hex
award_documents.append(document)
self.db.save(data)
ids = ','.join([i['id'] for i in award_documents])
response = self.app.get('/auctions/{}/awards/{}/documents'.format(auction['id'], award['id']))
self.assertTrue(ids.startswith(','.join([i['id'] for i in response.json['data']])))
self.assertEqual(response.status, '200 OK')
self.assertEqual(len(response.json['data']), len(award_documents))
self.assertEqual(set([i['id'] for i in response.json['data']]), set([i['id'] for i in award_documents]))
self.assertEqual(set([i['dateModified'] for i in response.json['data']]), set([i['dateModified'] for i in award_documents]))
self.assertEqual([i['dateModified'] for i in response.json['data']], sorted([i['dateModified'] for i in award_documents]))
def test_get_award_document(self):
auction = self.create_auction()
award = auction['awards'][0]
award_document = award['documents'][0]
response = self.app.get('/auctions/{}/awards/{}/documents/{}'.format(auction['id'], award['id'], award_document['id']))
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.content_type, 'application/json')
self.assertDictEqual(response.json['data'], award_document)
response = self.app.get('/auctions/{}/awards/{}/documents/{}?opt_jsonp=callback'.format(auction['id'], award['id'],award_document['id']))
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.content_type, 'application/javascript')
self.assertIn('callback({"data": {"', response.body)
response = self.app.get('/auctions/{}/awards/{}/documents/{}?opt_pretty=1'.format(auction['id'], award['id'], award_document['id']))
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.content_type, 'application/json')
self.assertIn('{\n "data": {\n "', response.body)
def test_award_document_not_found(self):
auction = self.create_auction()
response = self.app.get('/auctions/{}/awards/{}/documents/some_id'.format(auction['id'], auction['awards'][0]['id']), status=404)
self.assertEqual(response.status, '404 Not Found')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['status'], 'error')
self.assertEqual(response.json['errors'], [
{u'description': u'Not Found', u'location': u'url', u'name': u'document_id'}
])
def suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(AuctionResourceTest))
suite.addTest(unittest.makeSuite(AuctionAwardResourceTest))
suite.addTest(unittest.makeSuite(AuctionAwardDocumentResourceTest))
return suite
if __name__ == '__main__':
unittest.main(defaultTest='suite')
| 47.960503
| 145
| 0.63521
| 3,189
| 26,714
| 5.25149
| 0.049859
| 0.154953
| 0.153819
| 0.066639
| 0.911984
| 0.90255
| 0.89222
| 0.881889
| 0.873828
| 0.847973
| 0
| 0.015606
| 0.189264
| 26,714
| 556
| 146
| 48.046763
| 0.757642
| 0.004193
| 0
| 0.739631
| 0
| 0.002304
| 0.197301
| 0.058536
| 0
| 0
| 0
| 0
| 0.546083
| 1
| 0.036866
| false
| 0
| 0.016129
| 0
| 0.062212
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
530685b38b63bb864c23c036e780f7efc9f20c41
| 440,655
|
py
|
Python
|
tensorflow-ops-generator/resources/gen_ops/gen_math_ops.py
|
wumo/sim-world
|
2a3a5118239b27eeb268cd1e7bdbfe5f5604dab6
|
[
"MIT"
] | 1
|
2019-01-12T13:17:32.000Z
|
2019-01-12T13:17:32.000Z
|
rcnn/lib/python3.6/site-packages/tensorflow/python/ops/gen_math_ops.py
|
dreamingweaver/making_passportImage
|
68f23411780ff82abe934dfae5fc04acb80f2c49
|
[
"MIT"
] | null | null | null |
rcnn/lib/python3.6/site-packages/tensorflow/python/ops/gen_math_ops.py
|
dreamingweaver/making_passportImage
|
68f23411780ff82abe934dfae5fc04acb80f2c49
|
[
"MIT"
] | null | null | null |
"""Python wrappers around TensorFlow ops.
This file is MACHINE GENERATED! Do not edit.
Original C++ source file: math_ops.cc
"""
import collections as _collections
import six as _six
from tensorflow.python import pywrap_tensorflow as _pywrap_tensorflow
from tensorflow.python.eager import context as _context
from tensorflow.python.eager import core as _core
from tensorflow.python.eager import execute as _execute
from tensorflow.python.framework import dtypes as _dtypes
from tensorflow.python.framework import errors as _errors
from tensorflow.python.framework import tensor_shape as _tensor_shape
from tensorflow.core.framework import op_def_pb2 as _op_def_pb2
# Needed to trigger the call to _set_call_cpp_shape_fn.
from tensorflow.python.framework import common_shapes as _common_shapes
from tensorflow.python.framework import op_def_registry as _op_def_registry
from tensorflow.python.framework import ops as _ops
from tensorflow.python.framework import op_def_library as _op_def_library
from tensorflow.python.util.tf_export import tf_export
def _abs(x, name=None):
r"""Computes the absolute value of a tensor.
Given a tensor `x`, this operation returns a tensor containing the absolute
value of each element in `x`. For example, if x is an input element and y is
an output element, this operation computes \\(y = |x|\\).
Args:
x: A `Tensor`. Must be one of the following types: `bfloat16`, `half`, `float32`, `float64`, `int32`, `int64`.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `x`.
"""
_ctx = _context._context
if _ctx is None or not _ctx._eager_context.is_eager:
_, _, _op = _op_def_lib._apply_op_helper(
"Abs", x=x, name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ("T", _op.get_attr("T"))
_execute.record_gradient(
"Abs", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
else:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._eager_context.device_name, "Abs", name,
_ctx._post_execution_callbacks, x)
return _result
except _core._FallbackException:
return _abs_eager_fallback(
x, name=name, ctx=_ctx)
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
def _abs_eager_fallback(x, name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function _abs
"""
_ctx = ctx if ctx else _context.context()
_attr_T, (x,) = _execute.args_to_matching_eager([x], _ctx)
_inputs_flat = [x]
_attrs = ("T", _attr_T)
_result = _execute.execute(b"Abs", 1, inputs=_inputs_flat, attrs=_attrs,
ctx=_ctx, name=name)
_execute.record_gradient(
"Abs", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
def accumulate_nv2(inputs, shape, name=None):
r"""Returns the element-wise sum of a list of tensors.
`tf.accumulate_n_v2` performs the same operation as `tf.add_n`, but does not
wait for all of its inputs to be ready before beginning to sum. This can
save memory if inputs are ready at different times, since minimum temporary
storage is proportional to the output size rather than the inputs size.
Unlike the original `accumulate_n`, `accumulate_n_v2` is differentiable.
Returns a `Tensor` of same shape and type as the elements of `inputs`.
Args:
inputs: A list of at least 1 `Tensor` objects with the same type in: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `uint16`, `complex128`, `half`, `uint32`, `uint64`.
A list of `Tensor` objects, each with same shape and type.
shape: A `tf.TensorShape` or list of `ints`.
Shape of elements of `inputs`.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `inputs`.
"""
_ctx = _context._context
if _ctx is None or not _ctx._eager_context.is_eager:
if not isinstance(inputs, (list, tuple)):
raise TypeError(
"Expected list for 'inputs' argument to "
"'accumulate_nv2' Op, not %r." % inputs)
_attr_N = len(inputs)
shape = _execute.make_shape(shape, "shape")
_, _, _op = _op_def_lib._apply_op_helper(
"AccumulateNV2", inputs=inputs, shape=shape, name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ("N", _op.get_attr("N"), "T", _op.get_attr("T"), "shape",
_op.get_attr("shape"))
_execute.record_gradient(
"AccumulateNV2", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
else:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._eager_context.device_name,
"AccumulateNV2", name, _ctx._post_execution_callbacks, inputs,
"shape", shape)
return _result
except _core._FallbackException:
return accumulate_nv2_eager_fallback(
inputs, shape=shape, name=name, ctx=_ctx)
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
def accumulate_nv2_eager_fallback(inputs, shape, name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function accumulate_nv2
"""
_ctx = ctx if ctx else _context.context()
if not isinstance(inputs, (list, tuple)):
raise TypeError(
"Expected list for 'inputs' argument to "
"'accumulate_nv2' Op, not %r." % inputs)
_attr_N = len(inputs)
shape = _execute.make_shape(shape, "shape")
_attr_T, inputs = _execute.args_to_matching_eager(list(inputs), _ctx)
_inputs_flat = list(inputs)
_attrs = ("N", _attr_N, "T", _attr_T, "shape", shape)
_result = _execute.execute(b"AccumulateNV2", 1, inputs=_inputs_flat,
attrs=_attrs, ctx=_ctx, name=name)
_execute.record_gradient(
"AccumulateNV2", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
@tf_export('math.acos', 'acos')
def acos(x, name=None):
r"""Computes acos of x element-wise.
Args:
x: A `Tensor`. Must be one of the following types: `bfloat16`, `half`, `float32`, `float64`, `int32`, `int64`, `complex64`, `complex128`.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `x`.
"""
_ctx = _context._context
if _ctx is None or not _ctx._eager_context.is_eager:
_, _, _op = _op_def_lib._apply_op_helper(
"Acos", x=x, name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ("T", _op.get_attr("T"))
_execute.record_gradient(
"Acos", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
else:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._eager_context.device_name, "Acos", name,
_ctx._post_execution_callbacks, x)
return _result
except _core._FallbackException:
return acos_eager_fallback(
x, name=name, ctx=_ctx)
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
def acos_eager_fallback(x, name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function acos
"""
_ctx = ctx if ctx else _context.context()
_attr_T, (x,) = _execute.args_to_matching_eager([x], _ctx)
_inputs_flat = [x]
_attrs = ("T", _attr_T)
_result = _execute.execute(b"Acos", 1, inputs=_inputs_flat, attrs=_attrs,
ctx=_ctx, name=name)
_execute.record_gradient(
"Acos", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
@tf_export('math.acosh', 'acosh')
def acosh(x, name=None):
r"""Computes inverse hyperbolic cosine of x element-wise.
Args:
x: A `Tensor`. Must be one of the following types: `bfloat16`, `half`, `float32`, `float64`, `complex64`, `complex128`.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `x`.
"""
_ctx = _context._context
if _ctx is None or not _ctx._eager_context.is_eager:
_, _, _op = _op_def_lib._apply_op_helper(
"Acosh", x=x, name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ("T", _op.get_attr("T"))
_execute.record_gradient(
"Acosh", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
else:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._eager_context.device_name, "Acosh", name,
_ctx._post_execution_callbacks, x)
return _result
except _core._FallbackException:
return acosh_eager_fallback(
x, name=name, ctx=_ctx)
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
def acosh_eager_fallback(x, name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function acosh
"""
_ctx = ctx if ctx else _context.context()
_attr_T, (x,) = _execute.args_to_matching_eager([x], _ctx)
_inputs_flat = [x]
_attrs = ("T", _attr_T)
_result = _execute.execute(b"Acosh", 1, inputs=_inputs_flat, attrs=_attrs,
ctx=_ctx, name=name)
_execute.record_gradient(
"Acosh", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
@tf_export('math.add', 'add')
def add(x, y, name=None):
r"""Returns x + y element-wise.
*NOTE*: `math.add` supports broadcasting. `AddN` does not. More about broadcasting
[here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
Args:
x: A `Tensor`. Must be one of the following types: `bfloat16`, `half`, `float32`, `float64`, `uint8`, `int8`, `int16`, `int32`, `int64`, `complex64`, `complex128`, `string`.
y: A `Tensor`. Must have the same type as `x`.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `x`.
"""
_ctx = _context._context
if _ctx is None or not _ctx._eager_context.is_eager:
_, _, _op = _op_def_lib._apply_op_helper(
"Add", x=x, y=y, name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ("T", _op.get_attr("T"))
_execute.record_gradient(
"Add", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
else:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._eager_context.device_name, "Add", name,
_ctx._post_execution_callbacks, x, y)
return _result
except _core._FallbackException:
return add_eager_fallback(
x, y, name=name, ctx=_ctx)
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
def add_eager_fallback(x, y, name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function add
"""
_ctx = ctx if ctx else _context.context()
_attr_T, _inputs_T = _execute.args_to_matching_eager([x, y], _ctx)
(x, y) = _inputs_T
_inputs_flat = [x, y]
_attrs = ("T", _attr_T)
_result = _execute.execute(b"Add", 1, inputs=_inputs_flat, attrs=_attrs,
ctx=_ctx, name=name)
_execute.record_gradient(
"Add", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
def add_n(inputs, name=None):
r"""Add all input tensors element wise.
Args:
inputs: A list of at least 1 `Tensor` objects with the same type in: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `uint16`, `complex128`, `half`, `uint32`, `uint64`, `variant`.
Must all be the same size and shape.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `inputs`.
"""
_ctx = _context._context
if _ctx is None or not _ctx._eager_context.is_eager:
if not isinstance(inputs, (list, tuple)):
raise TypeError(
"Expected list for 'inputs' argument to "
"'add_n' Op, not %r." % inputs)
_attr_N = len(inputs)
_, _, _op = _op_def_lib._apply_op_helper(
"AddN", inputs=inputs, name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ("N", _op.get_attr("N"), "T", _op.get_attr("T"))
_execute.record_gradient(
"AddN", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
else:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._eager_context.device_name, "AddN", name,
_ctx._post_execution_callbacks, inputs)
return _result
except _core._FallbackException:
return add_n_eager_fallback(
inputs, name=name, ctx=_ctx)
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
def add_n_eager_fallback(inputs, name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function add_n
"""
_ctx = ctx if ctx else _context.context()
if not isinstance(inputs, (list, tuple)):
raise TypeError(
"Expected list for 'inputs' argument to "
"'add_n' Op, not %r." % inputs)
_attr_N = len(inputs)
_attr_T, inputs = _execute.args_to_matching_eager(list(inputs), _ctx)
_inputs_flat = list(inputs)
_attrs = ("N", _attr_N, "T", _attr_T)
_result = _execute.execute(b"AddN", 1, inputs=_inputs_flat, attrs=_attrs,
ctx=_ctx, name=name)
_execute.record_gradient(
"AddN", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
def add_v2(x, y, name=None):
r"""Returns x + y element-wise.
*NOTE*: `Add` supports broadcasting. `AddN` does not. More about broadcasting
[here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
Args:
x: A `Tensor`. Must be one of the following types: `bfloat16`, `half`, `float32`, `float64`, `uint8`, `int8`, `int16`, `int32`, `int64`, `complex64`, `complex128`.
y: A `Tensor`. Must have the same type as `x`.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `x`.
"""
_ctx = _context._context
if _ctx is None or not _ctx._eager_context.is_eager:
_, _, _op = _op_def_lib._apply_op_helper(
"AddV2", x=x, y=y, name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ("T", _op.get_attr("T"))
_execute.record_gradient(
"AddV2", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
else:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._eager_context.device_name, "AddV2", name,
_ctx._post_execution_callbacks, x, y)
return _result
except _core._FallbackException:
return add_v2_eager_fallback(
x, y, name=name, ctx=_ctx)
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
def add_v2_eager_fallback(x, y, name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function add_v2
"""
_ctx = ctx if ctx else _context.context()
_attr_T, _inputs_T = _execute.args_to_matching_eager([x, y], _ctx)
(x, y) = _inputs_T
_inputs_flat = [x, y]
_attrs = ("T", _attr_T)
_result = _execute.execute(b"AddV2", 1, inputs=_inputs_flat, attrs=_attrs,
ctx=_ctx, name=name)
_execute.record_gradient(
"AddV2", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
def _all(input, axis, keep_dims=False, name=None):
r"""Computes the "logical and" of elements across dimensions of a tensor.
Reduces `input` along the dimensions given in `axis`. Unless
`keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in
`axis`. If `keep_dims` is true, the reduced dimensions are
retained with length 1.
Args:
input: A `Tensor` of type `bool`. The tensor to reduce.
axis: A `Tensor`. Must be one of the following types: `int32`, `int64`.
The dimensions to reduce. Must be in the range
`[-rank(input), rank(input))`.
keep_dims: An optional `bool`. Defaults to `False`.
If true, retain reduced dimensions with length 1.
name: A name for the operation (optional).
Returns:
A `Tensor` of type `bool`.
"""
_ctx = _context._context
if _ctx is None or not _ctx._eager_context.is_eager:
if keep_dims is None:
keep_dims = False
keep_dims = _execute.make_bool(keep_dims, "keep_dims")
_, _, _op = _op_def_lib._apply_op_helper(
"All", input=input, reduction_indices=axis, keep_dims=keep_dims,
name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ("keep_dims", _op.get_attr("keep_dims"), "Tidx",
_op.get_attr("Tidx"))
_execute.record_gradient(
"All", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
else:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._eager_context.device_name, "All", name,
_ctx._post_execution_callbacks, input, axis, "keep_dims", keep_dims)
return _result
except _core._FallbackException:
return _all_eager_fallback(
input, axis, keep_dims=keep_dims, name=name, ctx=_ctx)
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
def _all_eager_fallback(input, axis, keep_dims=False, name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function _all
"""
_ctx = ctx if ctx else _context.context()
if keep_dims is None:
keep_dims = False
keep_dims = _execute.make_bool(keep_dims, "keep_dims")
_attr_Tidx, (axis,) = _execute.args_to_matching_eager([axis], _ctx, _dtypes.int32)
input = _ops.convert_to_tensor(input, _dtypes.bool)
_inputs_flat = [input, axis]
_attrs = ("keep_dims", keep_dims, "Tidx", _attr_Tidx)
_result = _execute.execute(b"All", 1, inputs=_inputs_flat, attrs=_attrs,
ctx=_ctx, name=name)
_execute.record_gradient(
"All", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
def angle(input, Tout=_dtypes.float32, name=None):
r"""Returns the argument of a complex number.
Given a tensor `input` of complex numbers, this operation returns a tensor of
type `float` that is the argument of each element in `input`. All elements in
`input` must be complex numbers of the form \\(a + bj\\), where *a*
is the real part and *b* is the imaginary part.
The argument returned by this operation is of the form \\(atan2(b, a)\\).
For example:
```
# tensor 'input' is [-2.25 + 4.75j, 3.25 + 5.75j]
tf.angle(input) ==> [2.0132, 1.056]
```
@compatibility(numpy)
Equivalent to np.angle.
@end_compatibility
Args:
input: A `Tensor`. Must be one of the following types: `complex64`, `complex128`.
Tout: An optional `tf.DType` from: `tf.float32, tf.float64`. Defaults to `tf.float32`.
name: A name for the operation (optional).
Returns:
A `Tensor` of type `Tout`.
"""
_ctx = _context._context
if _ctx is None or not _ctx._eager_context.is_eager:
if Tout is None:
Tout = _dtypes.float32
Tout = _execute.make_type(Tout, "Tout")
_, _, _op = _op_def_lib._apply_op_helper(
"Angle", input=input, Tout=Tout, name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ("T", _op.get_attr("T"), "Tout", _op.get_attr("Tout"))
_execute.record_gradient(
"Angle", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
else:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._eager_context.device_name, "Angle", name,
_ctx._post_execution_callbacks, input, "Tout", Tout)
return _result
except _core._FallbackException:
return angle_eager_fallback(
input, Tout=Tout, name=name, ctx=_ctx)
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
def angle_eager_fallback(input, Tout=_dtypes.float32, name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function angle
"""
_ctx = ctx if ctx else _context.context()
if Tout is None:
Tout = _dtypes.float32
Tout = _execute.make_type(Tout, "Tout")
_attr_T, (input,) = _execute.args_to_matching_eager([input], _ctx, _dtypes.complex64)
_inputs_flat = [input]
_attrs = ("T", _attr_T, "Tout", Tout)
_result = _execute.execute(b"Angle", 1, inputs=_inputs_flat, attrs=_attrs,
ctx=_ctx, name=name)
_execute.record_gradient(
"Angle", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
def _any(input, axis, keep_dims=False, name=None):
r"""Computes the "logical or" of elements across dimensions of a tensor.
Reduces `input` along the dimensions given in `axis`. Unless
`keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in
`axis`. If `keep_dims` is true, the reduced dimensions are
retained with length 1.
Args:
input: A `Tensor` of type `bool`. The tensor to reduce.
axis: A `Tensor`. Must be one of the following types: `int32`, `int64`.
The dimensions to reduce. Must be in the range
`[-rank(input), rank(input))`.
keep_dims: An optional `bool`. Defaults to `False`.
If true, retain reduced dimensions with length 1.
name: A name for the operation (optional).
Returns:
A `Tensor` of type `bool`.
"""
_ctx = _context._context
if _ctx is None or not _ctx._eager_context.is_eager:
if keep_dims is None:
keep_dims = False
keep_dims = _execute.make_bool(keep_dims, "keep_dims")
_, _, _op = _op_def_lib._apply_op_helper(
"Any", input=input, reduction_indices=axis, keep_dims=keep_dims,
name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ("keep_dims", _op.get_attr("keep_dims"), "Tidx",
_op.get_attr("Tidx"))
_execute.record_gradient(
"Any", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
else:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._eager_context.device_name, "Any", name,
_ctx._post_execution_callbacks, input, axis, "keep_dims", keep_dims)
return _result
except _core._FallbackException:
return _any_eager_fallback(
input, axis, keep_dims=keep_dims, name=name, ctx=_ctx)
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
def _any_eager_fallback(input, axis, keep_dims=False, name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function _any
"""
_ctx = ctx if ctx else _context.context()
if keep_dims is None:
keep_dims = False
keep_dims = _execute.make_bool(keep_dims, "keep_dims")
_attr_Tidx, (axis,) = _execute.args_to_matching_eager([axis], _ctx, _dtypes.int32)
input = _ops.convert_to_tensor(input, _dtypes.bool)
_inputs_flat = [input, axis]
_attrs = ("keep_dims", keep_dims, "Tidx", _attr_Tidx)
_result = _execute.execute(b"Any", 1, inputs=_inputs_flat, attrs=_attrs,
ctx=_ctx, name=name)
_execute.record_gradient(
"Any", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
def approximate_equal(x, y, tolerance=1e-05, name=None):
r"""Returns the truth value of abs(x-y) < tolerance element-wise.
Args:
x: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `uint16`, `complex128`, `half`, `uint32`, `uint64`.
y: A `Tensor`. Must have the same type as `x`.
tolerance: An optional `float`. Defaults to `1e-05`.
name: A name for the operation (optional).
Returns:
A `Tensor` of type `bool`.
"""
_ctx = _context._context
if _ctx is None or not _ctx._eager_context.is_eager:
if tolerance is None:
tolerance = 1e-05
tolerance = _execute.make_float(tolerance, "tolerance")
_, _, _op = _op_def_lib._apply_op_helper(
"ApproximateEqual", x=x, y=y, tolerance=tolerance, name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ("T", _op.get_attr("T"), "tolerance", _op.get_attr("tolerance"))
_execute.record_gradient(
"ApproximateEqual", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
else:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._eager_context.device_name,
"ApproximateEqual", name, _ctx._post_execution_callbacks, x, y,
"tolerance", tolerance)
return _result
except _core._FallbackException:
return approximate_equal_eager_fallback(
x, y, tolerance=tolerance, name=name, ctx=_ctx)
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
def approximate_equal_eager_fallback(x, y, tolerance=1e-05, name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function approximate_equal
"""
_ctx = ctx if ctx else _context.context()
if tolerance is None:
tolerance = 1e-05
tolerance = _execute.make_float(tolerance, "tolerance")
_attr_T, _inputs_T = _execute.args_to_matching_eager([x, y], _ctx)
(x, y) = _inputs_T
_inputs_flat = [x, y]
_attrs = ("T", _attr_T, "tolerance", tolerance)
_result = _execute.execute(b"ApproximateEqual", 1, inputs=_inputs_flat,
attrs=_attrs, ctx=_ctx, name=name)
_execute.record_gradient(
"ApproximateEqual", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
def arg_max(input, dimension, output_type=_dtypes.int64, name=None):
r"""Returns the index with the largest value across dimensions of a tensor.
Note that in case of ties the identity of the return value is not guaranteed.
Args:
input: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `uint16`, `complex128`, `half`, `uint32`, `uint64`.
dimension: A `Tensor`. Must be one of the following types: `int32`, `int64`.
int32 or int64, must be in the range `[-rank(input), rank(input))`.
Describes which dimension of the input Tensor to reduce across. For vectors,
use dimension = 0.
output_type: An optional `tf.DType` from: `tf.int32, tf.int64`. Defaults to `tf.int64`.
name: A name for the operation (optional).
Returns:
A `Tensor` of type `output_type`.
"""
_ctx = _context._context
if _ctx is None or not _ctx._eager_context.is_eager:
if output_type is None:
output_type = _dtypes.int64
output_type = _execute.make_type(output_type, "output_type")
_, _, _op = _op_def_lib._apply_op_helper(
"ArgMax", input=input, dimension=dimension, output_type=output_type,
name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ("T", _op.get_attr("T"), "Tidx", _op.get_attr("Tidx"),
"output_type", _op.get_attr("output_type"))
_execute.record_gradient(
"ArgMax", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
else:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._eager_context.device_name, "ArgMax", name,
_ctx._post_execution_callbacks, input, dimension, "output_type",
output_type)
return _result
except _core._FallbackException:
return arg_max_eager_fallback(
input, dimension, output_type=output_type, name=name, ctx=_ctx)
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
def arg_max_eager_fallback(input, dimension, output_type=_dtypes.int64, name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function arg_max
"""
_ctx = ctx if ctx else _context.context()
if output_type is None:
output_type = _dtypes.int64
output_type = _execute.make_type(output_type, "output_type")
_attr_T, (input,) = _execute.args_to_matching_eager([input], _ctx)
_attr_Tidx, (dimension,) = _execute.args_to_matching_eager([dimension], _ctx, _dtypes.int32)
_inputs_flat = [input, dimension]
_attrs = ("T", _attr_T, "Tidx", _attr_Tidx, "output_type", output_type)
_result = _execute.execute(b"ArgMax", 1, inputs=_inputs_flat, attrs=_attrs,
ctx=_ctx, name=name)
_execute.record_gradient(
"ArgMax", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
def arg_min(input, dimension, output_type=_dtypes.int64, name=None):
r"""Returns the index with the smallest value across dimensions of a tensor.
Note that in case of ties the identity of the return value is not guaranteed.
Args:
input: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `uint16`, `complex128`, `half`, `uint32`, `uint64`.
dimension: A `Tensor`. Must be one of the following types: `int32`, `int64`.
int32 or int64, must be in the range `[-rank(input), rank(input))`.
Describes which dimension of the input Tensor to reduce across. For vectors,
use dimension = 0.
output_type: An optional `tf.DType` from: `tf.int32, tf.int64`. Defaults to `tf.int64`.
name: A name for the operation (optional).
Returns:
A `Tensor` of type `output_type`.
"""
_ctx = _context._context
if _ctx is None or not _ctx._eager_context.is_eager:
if output_type is None:
output_type = _dtypes.int64
output_type = _execute.make_type(output_type, "output_type")
_, _, _op = _op_def_lib._apply_op_helper(
"ArgMin", input=input, dimension=dimension, output_type=output_type,
name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ("T", _op.get_attr("T"), "Tidx", _op.get_attr("Tidx"),
"output_type", _op.get_attr("output_type"))
_execute.record_gradient(
"ArgMin", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
else:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._eager_context.device_name, "ArgMin", name,
_ctx._post_execution_callbacks, input, dimension, "output_type",
output_type)
return _result
except _core._FallbackException:
return arg_min_eager_fallback(
input, dimension, output_type=output_type, name=name, ctx=_ctx)
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
def arg_min_eager_fallback(input, dimension, output_type=_dtypes.int64, name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function arg_min
"""
_ctx = ctx if ctx else _context.context()
if output_type is None:
output_type = _dtypes.int64
output_type = _execute.make_type(output_type, "output_type")
_attr_T, (input,) = _execute.args_to_matching_eager([input], _ctx)
_attr_Tidx, (dimension,) = _execute.args_to_matching_eager([dimension], _ctx, _dtypes.int32)
_inputs_flat = [input, dimension]
_attrs = ("T", _attr_T, "Tidx", _attr_Tidx, "output_type", output_type)
_result = _execute.execute(b"ArgMin", 1, inputs=_inputs_flat, attrs=_attrs,
ctx=_ctx, name=name)
_execute.record_gradient(
"ArgMin", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
@tf_export('math.asin', 'asin')
def asin(x, name=None):
r"""Computes asin of x element-wise.
Args:
x: A `Tensor`. Must be one of the following types: `bfloat16`, `half`, `float32`, `float64`, `int32`, `int64`, `complex64`, `complex128`.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `x`.
"""
_ctx = _context._context
if _ctx is None or not _ctx._eager_context.is_eager:
_, _, _op = _op_def_lib._apply_op_helper(
"Asin", x=x, name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ("T", _op.get_attr("T"))
_execute.record_gradient(
"Asin", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
else:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._eager_context.device_name, "Asin", name,
_ctx._post_execution_callbacks, x)
return _result
except _core._FallbackException:
return asin_eager_fallback(
x, name=name, ctx=_ctx)
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
def asin_eager_fallback(x, name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function asin
"""
_ctx = ctx if ctx else _context.context()
_attr_T, (x,) = _execute.args_to_matching_eager([x], _ctx)
_inputs_flat = [x]
_attrs = ("T", _attr_T)
_result = _execute.execute(b"Asin", 1, inputs=_inputs_flat, attrs=_attrs,
ctx=_ctx, name=name)
_execute.record_gradient(
"Asin", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
@tf_export('math.asinh', 'asinh')
def asinh(x, name=None):
r"""Computes inverse hyperbolic sine of x element-wise.
Args:
x: A `Tensor`. Must be one of the following types: `bfloat16`, `half`, `float32`, `float64`, `complex64`, `complex128`.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `x`.
"""
_ctx = _context._context
if _ctx is None or not _ctx._eager_context.is_eager:
_, _, _op = _op_def_lib._apply_op_helper(
"Asinh", x=x, name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ("T", _op.get_attr("T"))
_execute.record_gradient(
"Asinh", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
else:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._eager_context.device_name, "Asinh", name,
_ctx._post_execution_callbacks, x)
return _result
except _core._FallbackException:
return asinh_eager_fallback(
x, name=name, ctx=_ctx)
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
def asinh_eager_fallback(x, name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function asinh
"""
_ctx = ctx if ctx else _context.context()
_attr_T, (x,) = _execute.args_to_matching_eager([x], _ctx)
_inputs_flat = [x]
_attrs = ("T", _attr_T)
_result = _execute.execute(b"Asinh", 1, inputs=_inputs_flat, attrs=_attrs,
ctx=_ctx, name=name)
_execute.record_gradient(
"Asinh", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
@tf_export('math.atan', 'atan')
def atan(x, name=None):
r"""Computes atan of x element-wise.
Args:
x: A `Tensor`. Must be one of the following types: `bfloat16`, `half`, `float32`, `float64`, `int32`, `int64`, `complex64`, `complex128`.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `x`.
"""
_ctx = _context._context
if _ctx is None or not _ctx._eager_context.is_eager:
_, _, _op = _op_def_lib._apply_op_helper(
"Atan", x=x, name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ("T", _op.get_attr("T"))
_execute.record_gradient(
"Atan", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
else:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._eager_context.device_name, "Atan", name,
_ctx._post_execution_callbacks, x)
return _result
except _core._FallbackException:
return atan_eager_fallback(
x, name=name, ctx=_ctx)
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
def atan_eager_fallback(x, name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function atan
"""
_ctx = ctx if ctx else _context.context()
_attr_T, (x,) = _execute.args_to_matching_eager([x], _ctx)
_inputs_flat = [x]
_attrs = ("T", _attr_T)
_result = _execute.execute(b"Atan", 1, inputs=_inputs_flat, attrs=_attrs,
ctx=_ctx, name=name)
_execute.record_gradient(
"Atan", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
@tf_export('math.atan2', 'atan2')
def atan2(y, x, name=None):
r"""Computes arctangent of `y/x` element-wise, respecting signs of the arguments.
This is the angle \( \theta \in [-\pi, \pi] \) such that
\[ x = r \cos(\theta) \]
and
\[ y = r \sin(\theta) \]
where \(r = \sqrt(x^2 + y^2) \).
Args:
y: A `Tensor`. Must be one of the following types: `bfloat16`, `half`, `float32`, `float64`.
x: A `Tensor`. Must have the same type as `y`.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `y`.
"""
_ctx = _context._context
if _ctx is None or not _ctx._eager_context.is_eager:
_, _, _op = _op_def_lib._apply_op_helper(
"Atan2", y=y, x=x, name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ("T", _op.get_attr("T"))
_execute.record_gradient(
"Atan2", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
else:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._eager_context.device_name, "Atan2", name,
_ctx._post_execution_callbacks, y, x)
return _result
except _core._FallbackException:
return atan2_eager_fallback(
y, x, name=name, ctx=_ctx)
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
def atan2_eager_fallback(y, x, name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function atan2
"""
_ctx = ctx if ctx else _context.context()
_attr_T, _inputs_T = _execute.args_to_matching_eager([y, x], _ctx)
(y, x) = _inputs_T
_inputs_flat = [y, x]
_attrs = ("T", _attr_T)
_result = _execute.execute(b"Atan2", 1, inputs=_inputs_flat, attrs=_attrs,
ctx=_ctx, name=name)
_execute.record_gradient(
"Atan2", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
@tf_export('math.atanh', 'atanh')
def atanh(x, name=None):
r"""Computes inverse hyperbolic tangent of x element-wise.
Args:
x: A `Tensor`. Must be one of the following types: `bfloat16`, `half`, `float32`, `float64`, `complex64`, `complex128`.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `x`.
"""
_ctx = _context._context
if _ctx is None or not _ctx._eager_context.is_eager:
_, _, _op = _op_def_lib._apply_op_helper(
"Atanh", x=x, name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ("T", _op.get_attr("T"))
_execute.record_gradient(
"Atanh", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
else:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._eager_context.device_name, "Atanh", name,
_ctx._post_execution_callbacks, x)
return _result
except _core._FallbackException:
return atanh_eager_fallback(
x, name=name, ctx=_ctx)
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
def atanh_eager_fallback(x, name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function atanh
"""
_ctx = ctx if ctx else _context.context()
_attr_T, (x,) = _execute.args_to_matching_eager([x], _ctx)
_inputs_flat = [x]
_attrs = ("T", _attr_T)
_result = _execute.execute(b"Atanh", 1, inputs=_inputs_flat, attrs=_attrs,
ctx=_ctx, name=name)
_execute.record_gradient(
"Atanh", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
def batch_mat_mul(x, y, adj_x=False, adj_y=False, name=None):
r"""Multiplies slices of two tensors in batches.
Multiplies all slices of `Tensor` `x` and `y` (each slice can be
viewed as an element of a batch), and arranges the individual results
in a single output tensor of the same batch size. Each of the
individual slices can optionally be adjointed (to adjoint a matrix
means to transpose and conjugate it) before multiplication by setting
the `adj_x` or `adj_y` flag to `True`, which are by default `False`.
The input tensors `x` and `y` are 2-D or higher with shape `[..., r_x, c_x]`
and `[..., r_y, c_y]`.
The output tensor is 2-D or higher with shape `[..., r_o, c_o]`, where:
r_o = c_x if adj_x else r_x
c_o = r_y if adj_y else c_y
It is computed as:
output[..., :, :] = matrix(x[..., :, :]) * matrix(y[..., :, :])
Args:
x: A `Tensor`. Must be one of the following types: `bfloat16`, `half`, `float32`, `float64`, `int32`, `complex64`, `complex128`.
2-D or higher with shape `[..., r_x, c_x]`.
y: A `Tensor`. Must have the same type as `x`.
2-D or higher with shape `[..., r_y, c_y]`.
adj_x: An optional `bool`. Defaults to `False`.
If `True`, adjoint the slices of `x`. Defaults to `False`.
adj_y: An optional `bool`. Defaults to `False`.
If `True`, adjoint the slices of `y`. Defaults to `False`.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `x`.
"""
_ctx = _context._context
if _ctx is None or not _ctx._eager_context.is_eager:
if adj_x is None:
adj_x = False
adj_x = _execute.make_bool(adj_x, "adj_x")
if adj_y is None:
adj_y = False
adj_y = _execute.make_bool(adj_y, "adj_y")
_, _, _op = _op_def_lib._apply_op_helper(
"BatchMatMul", x=x, y=y, adj_x=adj_x, adj_y=adj_y, name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ("T", _op.get_attr("T"), "adj_x", _op.get_attr("adj_x"), "adj_y",
_op.get_attr("adj_y"))
_execute.record_gradient(
"BatchMatMul", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
else:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._eager_context.device_name, "BatchMatMul",
name, _ctx._post_execution_callbacks, x, y, "adj_x", adj_x, "adj_y",
adj_y)
return _result
except _core._FallbackException:
return batch_mat_mul_eager_fallback(
x, y, adj_x=adj_x, adj_y=adj_y, name=name, ctx=_ctx)
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
def batch_mat_mul_eager_fallback(x, y, adj_x=False, adj_y=False, name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function batch_mat_mul
"""
_ctx = ctx if ctx else _context.context()
if adj_x is None:
adj_x = False
adj_x = _execute.make_bool(adj_x, "adj_x")
if adj_y is None:
adj_y = False
adj_y = _execute.make_bool(adj_y, "adj_y")
_attr_T, _inputs_T = _execute.args_to_matching_eager([x, y], _ctx)
(x, y) = _inputs_T
_inputs_flat = [x, y]
_attrs = ("T", _attr_T, "adj_x", adj_x, "adj_y", adj_y)
_result = _execute.execute(b"BatchMatMul", 1, inputs=_inputs_flat,
attrs=_attrs, ctx=_ctx, name=name)
_execute.record_gradient(
"BatchMatMul", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
def bessel_i0e(x, name=None):
r"""Computes the Bessel i0e function of `x` element-wise.
Exponentially scaled modified Bessel function of order 0 defined as
`bessel_i0e(x) = exp(-abs(x)) bessel_i0(x)`.
This function is faster and numerically stabler than `bessel_i0(x)`.
Args:
x: A `Tensor`. Must be one of the following types: `bfloat16`, `half`, `float32`, `float64`.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `x`.
"""
_ctx = _context._context
if _ctx is None or not _ctx._eager_context.is_eager:
_, _, _op = _op_def_lib._apply_op_helper(
"BesselI0e", x=x, name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ("T", _op.get_attr("T"))
_execute.record_gradient(
"BesselI0e", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
else:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._eager_context.device_name, "BesselI0e",
name, _ctx._post_execution_callbacks, x)
return _result
except _core._FallbackException:
return bessel_i0e_eager_fallback(
x, name=name, ctx=_ctx)
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
def bessel_i0e_eager_fallback(x, name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function bessel_i0e
"""
_ctx = ctx if ctx else _context.context()
_attr_T, (x,) = _execute.args_to_matching_eager([x], _ctx)
_inputs_flat = [x]
_attrs = ("T", _attr_T)
_result = _execute.execute(b"BesselI0e", 1, inputs=_inputs_flat,
attrs=_attrs, ctx=_ctx, name=name)
_execute.record_gradient(
"BesselI0e", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
def bessel_i1e(x, name=None):
r"""Computes the Bessel i1e function of `x` element-wise.
Exponentially scaled modified Bessel function of order 0 defined as
`bessel_i1e(x) = exp(-abs(x)) bessel_i1(x)`.
This function is faster and numerically stabler than `bessel_i1(x)`.
Args:
x: A `Tensor`. Must be one of the following types: `bfloat16`, `half`, `float32`, `float64`.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `x`.
"""
_ctx = _context._context
if _ctx is None or not _ctx._eager_context.is_eager:
_, _, _op = _op_def_lib._apply_op_helper(
"BesselI1e", x=x, name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ("T", _op.get_attr("T"))
_execute.record_gradient(
"BesselI1e", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
else:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._eager_context.device_name, "BesselI1e",
name, _ctx._post_execution_callbacks, x)
return _result
except _core._FallbackException:
return bessel_i1e_eager_fallback(
x, name=name, ctx=_ctx)
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
def bessel_i1e_eager_fallback(x, name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function bessel_i1e
"""
_ctx = ctx if ctx else _context.context()
_attr_T, (x,) = _execute.args_to_matching_eager([x], _ctx)
_inputs_flat = [x]
_attrs = ("T", _attr_T)
_result = _execute.execute(b"BesselI1e", 1, inputs=_inputs_flat,
attrs=_attrs, ctx=_ctx, name=name)
_execute.record_gradient(
"BesselI1e", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
@tf_export('math.betainc', 'betainc')
def betainc(a, b, x, name=None):
r"""Compute the regularized incomplete beta integral \\(I_x(a, b)\\).
The regularized incomplete beta integral is defined as:
\\(I_x(a, b) = \frac{B(x; a, b)}{B(a, b)}\\)
where
\\(B(x; a, b) = \int_0^x t^{a-1} (1 - t)^{b-1} dt\\)
is the incomplete beta function and \\(B(a, b)\\) is the *complete*
beta function.
Args:
a: A `Tensor`. Must be one of the following types: `float32`, `float64`.
b: A `Tensor`. Must have the same type as `a`.
x: A `Tensor`. Must have the same type as `a`.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `a`.
"""
_ctx = _context._context
if _ctx is None or not _ctx._eager_context.is_eager:
_, _, _op = _op_def_lib._apply_op_helper(
"Betainc", a=a, b=b, x=x, name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ("T", _op.get_attr("T"))
_execute.record_gradient(
"Betainc", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
else:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._eager_context.device_name, "Betainc",
name, _ctx._post_execution_callbacks, a, b, x)
return _result
except _core._FallbackException:
return betainc_eager_fallback(
a, b, x, name=name, ctx=_ctx)
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
def betainc_eager_fallback(a, b, x, name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function betainc
"""
_ctx = ctx if ctx else _context.context()
_attr_T, _inputs_T = _execute.args_to_matching_eager([a, b, x], _ctx)
(a, b, x) = _inputs_T
_inputs_flat = [a, b, x]
_attrs = ("T", _attr_T)
_result = _execute.execute(b"Betainc", 1, inputs=_inputs_flat, attrs=_attrs,
ctx=_ctx, name=name)
_execute.record_gradient(
"Betainc", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
def bincount(arr, size, weights, name=None):
r"""Counts the number of occurrences of each value in an integer array.
Outputs a vector with length `size` and the same dtype as `weights`. If
`weights` are empty, then index `i` stores the number of times the value `i` is
counted in `arr`. If `weights` are non-empty, then index `i` stores the sum of
the value in `weights` at each index where the corresponding value in `arr` is
`i`.
Values in `arr` outside of the range [0, size) are ignored.
Args:
arr: A `Tensor` of type `int32`. int32 `Tensor`.
size: A `Tensor` of type `int32`. non-negative int32 scalar `Tensor`.
weights: A `Tensor`. Must be one of the following types: `int32`, `int64`, `float32`, `float64`.
is an int32, int64, float32, or float64 `Tensor` with the same
shape as `arr`, or a length-0 `Tensor`, in which case it acts as all weights
equal to 1.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `weights`.
"""
_ctx = _context._context
if _ctx is None or not _ctx._eager_context.is_eager:
_, _, _op = _op_def_lib._apply_op_helper(
"Bincount", arr=arr, size=size, weights=weights, name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ("T", _op.get_attr("T"))
_execute.record_gradient(
"Bincount", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
else:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._eager_context.device_name, "Bincount",
name, _ctx._post_execution_callbacks, arr, size, weights)
return _result
except _core._FallbackException:
return bincount_eager_fallback(
arr, size, weights, name=name, ctx=_ctx)
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
def bincount_eager_fallback(arr, size, weights, name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function bincount
"""
_ctx = ctx if ctx else _context.context()
_attr_T, (weights,) = _execute.args_to_matching_eager([weights], _ctx)
arr = _ops.convert_to_tensor(arr, _dtypes.int32)
size = _ops.convert_to_tensor(size, _dtypes.int32)
_inputs_flat = [arr, size, weights]
_attrs = ("T", _attr_T)
_result = _execute.execute(b"Bincount", 1, inputs=_inputs_flat,
attrs=_attrs, ctx=_ctx, name=name)
_execute.record_gradient(
"Bincount", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
def bucketize(input, boundaries, name=None):
r"""Bucketizes 'input' based on 'boundaries'.
For example, if the inputs are
boundaries = [0, 10, 100]
input = [[-5, 10000]
[150, 10]
[5, 100]]
then the output will be
output = [[0, 3]
[3, 2]
[1, 3]]
Args:
input: A `Tensor`. Must be one of the following types: `int32`, `int64`, `float32`, `float64`.
Any shape of Tensor contains with int or float type.
boundaries: A list of `floats`.
A sorted list of floats gives the boundary of the buckets.
name: A name for the operation (optional).
Returns:
A `Tensor` of type `int32`.
"""
_ctx = _context._context
if _ctx is None or not _ctx._eager_context.is_eager:
if not isinstance(boundaries, (list, tuple)):
raise TypeError(
"Expected list for 'boundaries' argument to "
"'bucketize' Op, not %r." % boundaries)
boundaries = [_execute.make_float(_f, "boundaries") for _f in boundaries]
_, _, _op = _op_def_lib._apply_op_helper(
"Bucketize", input=input, boundaries=boundaries, name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ("T", _op.get_attr("T"), "boundaries",
_op.get_attr("boundaries"))
_execute.record_gradient(
"Bucketize", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
else:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._eager_context.device_name, "Bucketize",
name, _ctx._post_execution_callbacks, input, "boundaries", boundaries)
return _result
except _core._FallbackException:
return bucketize_eager_fallback(
input, boundaries=boundaries, name=name, ctx=_ctx)
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
def bucketize_eager_fallback(input, boundaries, name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function bucketize
"""
_ctx = ctx if ctx else _context.context()
if not isinstance(boundaries, (list, tuple)):
raise TypeError(
"Expected list for 'boundaries' argument to "
"'bucketize' Op, not %r." % boundaries)
boundaries = [_execute.make_float(_f, "boundaries") for _f in boundaries]
_attr_T, (input,) = _execute.args_to_matching_eager([input], _ctx)
_inputs_flat = [input]
_attrs = ("T", _attr_T, "boundaries", boundaries)
_result = _execute.execute(b"Bucketize", 1, inputs=_inputs_flat,
attrs=_attrs, ctx=_ctx, name=name)
_execute.record_gradient(
"Bucketize", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
def cast(x, DstT, name=None):
r"""Cast x of type SrcT to y of DstT.
Args:
x: A `Tensor`.
DstT: A `tf.DType`.
name: A name for the operation (optional).
Returns:
A `Tensor` of type `DstT`.
"""
_ctx = _context._context
if _ctx is None or not _ctx._eager_context.is_eager:
DstT = _execute.make_type(DstT, "DstT")
_, _, _op = _op_def_lib._apply_op_helper(
"Cast", x=x, DstT=DstT, name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ("SrcT", _op.get_attr("SrcT"), "DstT", _op.get_attr("DstT"))
_execute.record_gradient(
"Cast", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
else:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._eager_context.device_name, "Cast", name,
_ctx._post_execution_callbacks, x, "DstT", DstT)
return _result
except _core._FallbackException:
return cast_eager_fallback(
x, DstT=DstT, name=name, ctx=_ctx)
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
def cast_eager_fallback(x, DstT, name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function cast
"""
_ctx = ctx if ctx else _context.context()
DstT = _execute.make_type(DstT, "DstT")
_attr_SrcT, (x,) = _execute.args_to_matching_eager([x], _ctx)
_inputs_flat = [x]
_attrs = ("SrcT", _attr_SrcT, "DstT", DstT)
_result = _execute.execute(b"Cast", 1, inputs=_inputs_flat, attrs=_attrs,
ctx=_ctx, name=name)
_execute.record_gradient(
"Cast", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
@tf_export('math.ceil', 'ceil')
def ceil(x, name=None):
r"""Returns element-wise smallest integer in not less than x.
Args:
x: A `Tensor`. Must be one of the following types: `bfloat16`, `half`, `float32`, `float64`.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `x`.
"""
_ctx = _context._context
if _ctx is None or not _ctx._eager_context.is_eager:
_, _, _op = _op_def_lib._apply_op_helper(
"Ceil", x=x, name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ("T", _op.get_attr("T"))
_execute.record_gradient(
"Ceil", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
else:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._eager_context.device_name, "Ceil", name,
_ctx._post_execution_callbacks, x)
return _result
except _core._FallbackException:
return ceil_eager_fallback(
x, name=name, ctx=_ctx)
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
def ceil_eager_fallback(x, name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function ceil
"""
_ctx = ctx if ctx else _context.context()
_attr_T, (x,) = _execute.args_to_matching_eager([x], _ctx)
_inputs_flat = [x]
_attrs = ("T", _attr_T)
_result = _execute.execute(b"Ceil", 1, inputs=_inputs_flat, attrs=_attrs,
ctx=_ctx, name=name)
_execute.record_gradient(
"Ceil", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
def _clip_by_value(t, clip_value_min, clip_value_max, name=None):
r"""Clips tensor values to a specified min and max.
Given a tensor `t`, this operation returns a tensor of the same type and
shape as `t` with its values clipped to `clip_value_min` and `clip_value_max`.
Any values less than `clip_value_min` are set to `clip_value_min`. Any values
greater than `clip_value_max` are set to `clip_value_max`.
Args:
t: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `uint16`, `complex128`, `half`, `uint32`, `uint64`.
A `Tensor`.
clip_value_min: A `Tensor`. Must have the same type as `t`.
A 0-D (scalar) `Tensor`, or a `Tensor` with the same shape
as `t`. The minimum value to clip by.
clip_value_max: A `Tensor`. Must have the same type as `t`.
A 0-D (scalar) `Tensor`, or a `Tensor` with the same shape
as `t`. The maximum value to clip by.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `t`.
"""
_ctx = _context._context
if _ctx is None or not _ctx._eager_context.is_eager:
_, _, _op = _op_def_lib._apply_op_helper(
"ClipByValue", t=t, clip_value_min=clip_value_min,
clip_value_max=clip_value_max, name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ("T", _op.get_attr("T"))
_execute.record_gradient(
"ClipByValue", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
else:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._eager_context.device_name, "ClipByValue",
name, _ctx._post_execution_callbacks, t, clip_value_min,
clip_value_max)
return _result
except _core._FallbackException:
return _clip_by_value_eager_fallback(
t, clip_value_min, clip_value_max, name=name, ctx=_ctx)
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
def _clip_by_value_eager_fallback(t, clip_value_min, clip_value_max, name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function _clip_by_value
"""
_ctx = ctx if ctx else _context.context()
_attr_T, _inputs_T = _execute.args_to_matching_eager([t, clip_value_min, clip_value_max], _ctx)
(t, clip_value_min, clip_value_max) = _inputs_T
_inputs_flat = [t, clip_value_min, clip_value_max]
_attrs = ("T", _attr_T)
_result = _execute.execute(b"ClipByValue", 1, inputs=_inputs_flat,
attrs=_attrs, ctx=_ctx, name=name)
_execute.record_gradient(
"ClipByValue", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
def compare_and_bitpack(input, threshold, name=None):
r"""Compare values of `input` to `threshold` and pack resulting bits into a `uint8`.
Each comparison returns a boolean `true` (if `input_value > threshold`)
or and `false` otherwise.
This operation is useful for Locality-Sensitive-Hashing (LSH) and other
algorithms that use hashing approximations of cosine and `L2` distances;
codes can be generated from an input via:
```python
codebook_size = 50
codebook_bits = codebook_size * 32
codebook = tf.get_variable('codebook', [x.shape[-1].value, codebook_bits],
dtype=x.dtype,
initializer=tf.orthogonal_initializer())
codes = compare_and_threshold(tf.matmul(x, codebook), threshold=0.)
codes = tf.bitcast(codes, tf.int32) # go from uint8 to int32
# now codes has shape x.shape[:-1] + [codebook_size]
```
**NOTE**: Currently, the innermost dimension of the tensor must be divisible
by 8.
Given an `input` shaped `[s0, s1, ..., s_n]`, the output is
a `uint8` tensor shaped `[s0, s1, ..., s_n / 8]`.
Args:
input: A `Tensor`. Must be one of the following types: `bool`, `half`, `float32`, `float64`, `int8`, `int16`, `int32`, `int64`.
Values to compare against `threshold` and bitpack.
threshold: A `Tensor`. Must have the same type as `input`.
Threshold to compare against.
name: A name for the operation (optional).
Returns:
A `Tensor` of type `uint8`.
"""
_ctx = _context._context
if _ctx is None or not _ctx._eager_context.is_eager:
_, _, _op = _op_def_lib._apply_op_helper(
"CompareAndBitpack", input=input, threshold=threshold, name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ("T", _op.get_attr("T"))
_execute.record_gradient(
"CompareAndBitpack", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
else:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._eager_context.device_name,
"CompareAndBitpack", name, _ctx._post_execution_callbacks, input,
threshold)
return _result
except _core._FallbackException:
return compare_and_bitpack_eager_fallback(
input, threshold, name=name, ctx=_ctx)
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
def compare_and_bitpack_eager_fallback(input, threshold, name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function compare_and_bitpack
"""
_ctx = ctx if ctx else _context.context()
_attr_T, _inputs_T = _execute.args_to_matching_eager([input, threshold], _ctx)
(input, threshold) = _inputs_T
_inputs_flat = [input, threshold]
_attrs = ("T", _attr_T)
_result = _execute.execute(b"CompareAndBitpack", 1, inputs=_inputs_flat,
attrs=_attrs, ctx=_ctx, name=name)
_execute.record_gradient(
"CompareAndBitpack", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
def _complex(real, imag, Tout=_dtypes.complex64, name=None):
r"""Converts two real numbers to a complex number.
Given a tensor `real` representing the real part of a complex number, and a
tensor `imag` representing the imaginary part of a complex number, this
operation returns complex numbers elementwise of the form \\(a + bj\\), where
*a* represents the `real` part and *b* represents the `imag` part.
The input tensors `real` and `imag` must have the same shape.
For example:
```
# tensor 'real' is [2.25, 3.25]
# tensor `imag` is [4.75, 5.75]
tf.complex(real, imag) ==> [[2.25 + 4.75j], [3.25 + 5.75j]]
```
Args:
real: A `Tensor`. Must be one of the following types: `float32`, `float64`.
imag: A `Tensor`. Must have the same type as `real`.
Tout: An optional `tf.DType` from: `tf.complex64, tf.complex128`. Defaults to `tf.complex64`.
name: A name for the operation (optional).
Returns:
A `Tensor` of type `Tout`.
"""
_ctx = _context._context
if _ctx is None or not _ctx._eager_context.is_eager:
if Tout is None:
Tout = _dtypes.complex64
Tout = _execute.make_type(Tout, "Tout")
_, _, _op = _op_def_lib._apply_op_helper(
"Complex", real=real, imag=imag, Tout=Tout, name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ("T", _op.get_attr("T"), "Tout", _op.get_attr("Tout"))
_execute.record_gradient(
"Complex", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
else:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._eager_context.device_name, "Complex",
name, _ctx._post_execution_callbacks, real, imag, "Tout", Tout)
return _result
except _core._FallbackException:
return _complex_eager_fallback(
real, imag, Tout=Tout, name=name, ctx=_ctx)
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
def _complex_eager_fallback(real, imag, Tout=_dtypes.complex64, name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function _complex
"""
_ctx = ctx if ctx else _context.context()
if Tout is None:
Tout = _dtypes.complex64
Tout = _execute.make_type(Tout, "Tout")
_attr_T, _inputs_T = _execute.args_to_matching_eager([real, imag], _ctx, _dtypes.float32)
(real, imag) = _inputs_T
_inputs_flat = [real, imag]
_attrs = ("T", _attr_T, "Tout", Tout)
_result = _execute.execute(b"Complex", 1, inputs=_inputs_flat, attrs=_attrs,
ctx=_ctx, name=name)
_execute.record_gradient(
"Complex", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
def complex_abs(x, Tout=_dtypes.float32, name=None):
r"""Computes the complex absolute value of a tensor.
Given a tensor `x` of complex numbers, this operation returns a tensor of type
`float` or `double` that is the absolute value of each element in `x`. All
elements in `x` must be complex numbers of the form \\(a + bj\\). The absolute
value is computed as \\( \sqrt{a^2 + b^2}\\).
Args:
x: A `Tensor`. Must be one of the following types: `complex64`, `complex128`.
Tout: An optional `tf.DType` from: `tf.float32, tf.float64`. Defaults to `tf.float32`.
name: A name for the operation (optional).
Returns:
A `Tensor` of type `Tout`.
"""
_ctx = _context._context
if _ctx is None or not _ctx._eager_context.is_eager:
if Tout is None:
Tout = _dtypes.float32
Tout = _execute.make_type(Tout, "Tout")
_, _, _op = _op_def_lib._apply_op_helper(
"ComplexAbs", x=x, Tout=Tout, name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ("T", _op.get_attr("T"), "Tout", _op.get_attr("Tout"))
_execute.record_gradient(
"ComplexAbs", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
else:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._eager_context.device_name, "ComplexAbs",
name, _ctx._post_execution_callbacks, x, "Tout", Tout)
return _result
except _core._FallbackException:
return complex_abs_eager_fallback(
x, Tout=Tout, name=name, ctx=_ctx)
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
def complex_abs_eager_fallback(x, Tout=_dtypes.float32, name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function complex_abs
"""
_ctx = ctx if ctx else _context.context()
if Tout is None:
Tout = _dtypes.float32
Tout = _execute.make_type(Tout, "Tout")
_attr_T, (x,) = _execute.args_to_matching_eager([x], _ctx, _dtypes.complex64)
_inputs_flat = [x]
_attrs = ("T", _attr_T, "Tout", Tout)
_result = _execute.execute(b"ComplexAbs", 1, inputs=_inputs_flat,
attrs=_attrs, ctx=_ctx, name=name)
_execute.record_gradient(
"ComplexAbs", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
def conj(input, name=None):
r"""Returns the complex conjugate of a complex number.
Given a tensor `input` of complex numbers, this operation returns a tensor of
complex numbers that are the complex conjugate of each element in `input`. The
complex numbers in `input` must be of the form \\(a + bj\\), where *a* is the
real part and *b* is the imaginary part.
The complex conjugate returned by this operation is of the form \\(a - bj\\).
For example:
```
# tensor 'input' is [-2.25 + 4.75j, 3.25 + 5.75j]
tf.conj(input) ==> [-2.25 - 4.75j, 3.25 - 5.75j]
```
Args:
input: A `Tensor`. Must be one of the following types: `complex64`, `complex128`, `variant`.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `input`.
"""
_ctx = _context._context
if _ctx is None or not _ctx._eager_context.is_eager:
_, _, _op = _op_def_lib._apply_op_helper(
"Conj", input=input, name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ("T", _op.get_attr("T"))
_execute.record_gradient(
"Conj", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
else:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._eager_context.device_name, "Conj", name,
_ctx._post_execution_callbacks, input)
return _result
except _core._FallbackException:
return conj_eager_fallback(
input, name=name, ctx=_ctx)
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
def conj_eager_fallback(input, name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function conj
"""
_ctx = ctx if ctx else _context.context()
_attr_T, (input,) = _execute.args_to_matching_eager([input], _ctx, _dtypes.complex64)
_inputs_flat = [input]
_attrs = ("T", _attr_T)
_result = _execute.execute(b"Conj", 1, inputs=_inputs_flat, attrs=_attrs,
ctx=_ctx, name=name)
_execute.record_gradient(
"Conj", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
@tf_export('math.cos', 'cos')
def cos(x, name=None):
r"""Computes cos of x element-wise.
Args:
x: A `Tensor`. Must be one of the following types: `bfloat16`, `half`, `float32`, `float64`, `complex64`, `complex128`.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `x`.
"""
_ctx = _context._context
if _ctx is None or not _ctx._eager_context.is_eager:
_, _, _op = _op_def_lib._apply_op_helper(
"Cos", x=x, name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ("T", _op.get_attr("T"))
_execute.record_gradient(
"Cos", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
else:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._eager_context.device_name, "Cos", name,
_ctx._post_execution_callbacks, x)
return _result
except _core._FallbackException:
return cos_eager_fallback(
x, name=name, ctx=_ctx)
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
def cos_eager_fallback(x, name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function cos
"""
_ctx = ctx if ctx else _context.context()
_attr_T, (x,) = _execute.args_to_matching_eager([x], _ctx)
_inputs_flat = [x]
_attrs = ("T", _attr_T)
_result = _execute.execute(b"Cos", 1, inputs=_inputs_flat, attrs=_attrs,
ctx=_ctx, name=name)
_execute.record_gradient(
"Cos", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
@tf_export('math.cosh', 'cosh')
def cosh(x, name=None):
r"""Computes hyperbolic cosine of x element-wise.
Args:
x: A `Tensor`. Must be one of the following types: `bfloat16`, `half`, `float32`, `float64`, `complex64`, `complex128`.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `x`.
"""
_ctx = _context._context
if _ctx is None or not _ctx._eager_context.is_eager:
_, _, _op = _op_def_lib._apply_op_helper(
"Cosh", x=x, name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ("T", _op.get_attr("T"))
_execute.record_gradient(
"Cosh", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
else:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._eager_context.device_name, "Cosh", name,
_ctx._post_execution_callbacks, x)
return _result
except _core._FallbackException:
return cosh_eager_fallback(
x, name=name, ctx=_ctx)
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
def cosh_eager_fallback(x, name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function cosh
"""
_ctx = ctx if ctx else _context.context()
_attr_T, (x,) = _execute.args_to_matching_eager([x], _ctx)
_inputs_flat = [x]
_attrs = ("T", _attr_T)
_result = _execute.execute(b"Cosh", 1, inputs=_inputs_flat, attrs=_attrs,
ctx=_ctx, name=name)
_execute.record_gradient(
"Cosh", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
@tf_export('linalg.cross', 'cross')
def cross(a, b, name=None):
r"""Compute the pairwise cross product.
`a` and `b` must be the same shape; they can either be simple 3-element vectors,
or any shape where the innermost dimension is 3. In the latter case, each pair
of corresponding 3-element vectors is cross-multiplied independently.
Args:
a: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `int64`, `bfloat16`, `uint16`, `half`, `uint32`, `uint64`.
A tensor containing 3-element vectors.
b: A `Tensor`. Must have the same type as `a`.
Another tensor, of same type and shape as `a`.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `a`.
"""
_ctx = _context._context
if _ctx is None or not _ctx._eager_context.is_eager:
_, _, _op = _op_def_lib._apply_op_helper(
"Cross", a=a, b=b, name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ("T", _op.get_attr("T"))
_execute.record_gradient(
"Cross", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
else:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._eager_context.device_name, "Cross", name,
_ctx._post_execution_callbacks, a, b)
return _result
except _core._FallbackException:
return cross_eager_fallback(
a, b, name=name, ctx=_ctx)
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
def cross_eager_fallback(a, b, name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function cross
"""
_ctx = ctx if ctx else _context.context()
_attr_T, _inputs_T = _execute.args_to_matching_eager([a, b], _ctx)
(a, b) = _inputs_T
_inputs_flat = [a, b]
_attrs = ("T", _attr_T)
_result = _execute.execute(b"Cross", 1, inputs=_inputs_flat, attrs=_attrs,
ctx=_ctx, name=name)
_execute.record_gradient(
"Cross", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
def cumprod(x, axis, exclusive=False, reverse=False, name=None):
r"""Compute the cumulative product of the tensor `x` along `axis`.
By default, this op performs an inclusive cumprod, which means that the first
element of the input is identical to the first element of the output:
```python
tf.cumprod([a, b, c]) # => [a, a * b, a * b * c]
```
By setting the `exclusive` kwarg to `True`, an exclusive cumprod is
performed instead:
```python
tf.cumprod([a, b, c], exclusive=True) # => [1, a, a * b]
```
By setting the `reverse` kwarg to `True`, the cumprod is performed in the
opposite direction:
```python
tf.cumprod([a, b, c], reverse=True) # => [a * b * c, b * c, c]
```
This is more efficient than using separate `tf.reverse` ops.
The `reverse` and `exclusive` kwargs can also be combined:
```python
tf.cumprod([a, b, c], exclusive=True, reverse=True) # => [b * c, c, 1]
```
Args:
x: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `uint16`, `complex128`, `half`, `uint32`, `uint64`.
A `Tensor`. Must be one of the following types: `float32`, `float64`,
`int64`, `int32`, `uint8`, `uint16`, `int16`, `int8`, `complex64`,
`complex128`, `qint8`, `quint8`, `qint32`, `half`.
axis: A `Tensor`. Must be one of the following types: `int32`, `int64`.
A `Tensor` of type `int32` (default: 0). Must be in the range
`[-rank(x), rank(x))`.
exclusive: An optional `bool`. Defaults to `False`.
If `True`, perform exclusive cumprod.
reverse: An optional `bool`. Defaults to `False`.
A `bool` (default: False).
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `x`.
"""
_ctx = _context._context
if _ctx is None or not _ctx._eager_context.is_eager:
if exclusive is None:
exclusive = False
exclusive = _execute.make_bool(exclusive, "exclusive")
if reverse is None:
reverse = False
reverse = _execute.make_bool(reverse, "reverse")
_, _, _op = _op_def_lib._apply_op_helper(
"Cumprod", x=x, axis=axis, exclusive=exclusive, reverse=reverse,
name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ("exclusive", _op.get_attr("exclusive"), "reverse",
_op.get_attr("reverse"), "T", _op.get_attr("T"), "Tidx",
_op.get_attr("Tidx"))
_execute.record_gradient(
"Cumprod", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
else:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._eager_context.device_name, "Cumprod",
name, _ctx._post_execution_callbacks, x, axis, "exclusive", exclusive,
"reverse", reverse)
return _result
except _core._FallbackException:
return cumprod_eager_fallback(
x, axis, exclusive=exclusive, reverse=reverse, name=name, ctx=_ctx)
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
def cumprod_eager_fallback(x, axis, exclusive=False, reverse=False, name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function cumprod
"""
_ctx = ctx if ctx else _context.context()
if exclusive is None:
exclusive = False
exclusive = _execute.make_bool(exclusive, "exclusive")
if reverse is None:
reverse = False
reverse = _execute.make_bool(reverse, "reverse")
_attr_T, (x,) = _execute.args_to_matching_eager([x], _ctx)
_attr_Tidx, (axis,) = _execute.args_to_matching_eager([axis], _ctx, _dtypes.int32)
_inputs_flat = [x, axis]
_attrs = ("exclusive", exclusive, "reverse", reverse, "T", _attr_T, "Tidx",
_attr_Tidx)
_result = _execute.execute(b"Cumprod", 1, inputs=_inputs_flat, attrs=_attrs,
ctx=_ctx, name=name)
_execute.record_gradient(
"Cumprod", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
def cumsum(x, axis, exclusive=False, reverse=False, name=None):
r"""Compute the cumulative sum of the tensor `x` along `axis`.
By default, this op performs an inclusive cumsum, which means that the first
element of the input is identical to the first element of the output:
```python
tf.cumsum([a, b, c]) # => [a, a + b, a + b + c]
```
By setting the `exclusive` kwarg to `True`, an exclusive cumsum is
performed instead:
```python
tf.cumsum([a, b, c], exclusive=True) # => [0, a, a + b]
```
By setting the `reverse` kwarg to `True`, the cumsum is performed in the
opposite direction:
```python
tf.cumsum([a, b, c], reverse=True) # => [a + b + c, b + c, c]
```
This is more efficient than using separate `tf.reverse` ops.
The `reverse` and `exclusive` kwargs can also be combined:
```python
tf.cumsum([a, b, c], exclusive=True, reverse=True) # => [b + c, c, 0]
```
Args:
x: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `uint16`, `complex128`, `half`, `uint32`, `uint64`.
A `Tensor`. Must be one of the following types: `float32`, `float64`,
`int64`, `int32`, `uint8`, `uint16`, `int16`, `int8`, `complex64`,
`complex128`, `qint8`, `quint8`, `qint32`, `half`.
axis: A `Tensor`. Must be one of the following types: `int32`, `int64`.
A `Tensor` of type `int32` (default: 0). Must be in the range
`[-rank(x), rank(x))`.
exclusive: An optional `bool`. Defaults to `False`.
If `True`, perform exclusive cumsum.
reverse: An optional `bool`. Defaults to `False`.
A `bool` (default: False).
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `x`.
"""
_ctx = _context._context
if _ctx is None or not _ctx._eager_context.is_eager:
if exclusive is None:
exclusive = False
exclusive = _execute.make_bool(exclusive, "exclusive")
if reverse is None:
reverse = False
reverse = _execute.make_bool(reverse, "reverse")
_, _, _op = _op_def_lib._apply_op_helper(
"Cumsum", x=x, axis=axis, exclusive=exclusive, reverse=reverse,
name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ("exclusive", _op.get_attr("exclusive"), "reverse",
_op.get_attr("reverse"), "T", _op.get_attr("T"), "Tidx",
_op.get_attr("Tidx"))
_execute.record_gradient(
"Cumsum", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
else:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._eager_context.device_name, "Cumsum", name,
_ctx._post_execution_callbacks, x, axis, "exclusive", exclusive,
"reverse", reverse)
return _result
except _core._FallbackException:
return cumsum_eager_fallback(
x, axis, exclusive=exclusive, reverse=reverse, name=name, ctx=_ctx)
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
def cumsum_eager_fallback(x, axis, exclusive=False, reverse=False, name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function cumsum
"""
_ctx = ctx if ctx else _context.context()
if exclusive is None:
exclusive = False
exclusive = _execute.make_bool(exclusive, "exclusive")
if reverse is None:
reverse = False
reverse = _execute.make_bool(reverse, "reverse")
_attr_T, (x,) = _execute.args_to_matching_eager([x], _ctx)
_attr_Tidx, (axis,) = _execute.args_to_matching_eager([axis], _ctx, _dtypes.int32)
_inputs_flat = [x, axis]
_attrs = ("exclusive", exclusive, "reverse", reverse, "T", _attr_T, "Tidx",
_attr_Tidx)
_result = _execute.execute(b"Cumsum", 1, inputs=_inputs_flat, attrs=_attrs,
ctx=_ctx, name=name)
_execute.record_gradient(
"Cumsum", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
@tf_export('math.digamma', 'digamma')
def digamma(x, name=None):
r"""Computes Psi, the derivative of Lgamma (the log of the absolute value of
`Gamma(x)`), element-wise.
Args:
x: A `Tensor`. Must be one of the following types: `bfloat16`, `half`, `float32`, `float64`.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `x`.
"""
_ctx = _context._context
if _ctx is None or not _ctx._eager_context.is_eager:
_, _, _op = _op_def_lib._apply_op_helper(
"Digamma", x=x, name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ("T", _op.get_attr("T"))
_execute.record_gradient(
"Digamma", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
else:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._eager_context.device_name, "Digamma",
name, _ctx._post_execution_callbacks, x)
return _result
except _core._FallbackException:
return digamma_eager_fallback(
x, name=name, ctx=_ctx)
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
def digamma_eager_fallback(x, name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function digamma
"""
_ctx = ctx if ctx else _context.context()
_attr_T, (x,) = _execute.args_to_matching_eager([x], _ctx)
_inputs_flat = [x]
_attrs = ("T", _attr_T)
_result = _execute.execute(b"Digamma", 1, inputs=_inputs_flat, attrs=_attrs,
ctx=_ctx, name=name)
_execute.record_gradient(
"Digamma", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
def div(x, y, name=None):
r"""Returns x / y element-wise.
*NOTE*: `Div` supports broadcasting. More about broadcasting
[here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
Args:
x: A `Tensor`. Must be one of the following types: `bfloat16`, `half`, `float32`, `float64`, `uint8`, `int8`, `uint16`, `int16`, `int32`, `int64`, `complex64`, `complex128`.
y: A `Tensor`. Must have the same type as `x`.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `x`.
"""
_ctx = _context._context
if _ctx is None or not _ctx._eager_context.is_eager:
_, _, _op = _op_def_lib._apply_op_helper(
"Div", x=x, y=y, name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ("T", _op.get_attr("T"))
_execute.record_gradient(
"Div", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
else:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._eager_context.device_name, "Div", name,
_ctx._post_execution_callbacks, x, y)
return _result
except _core._FallbackException:
return div_eager_fallback(
x, y, name=name, ctx=_ctx)
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
def div_eager_fallback(x, y, name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function div
"""
_ctx = ctx if ctx else _context.context()
_attr_T, _inputs_T = _execute.args_to_matching_eager([x, y], _ctx)
(x, y) = _inputs_T
_inputs_flat = [x, y]
_attrs = ("T", _attr_T)
_result = _execute.execute(b"Div", 1, inputs=_inputs_flat, attrs=_attrs,
ctx=_ctx, name=name)
_execute.record_gradient(
"Div", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
@tf_export('math.equal', 'equal')
def equal(x, y, name=None):
r"""Returns the truth value of (x == y) element-wise.
*NOTE*: `math.equal` supports broadcasting. More about broadcasting
[here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
Args:
x: A `Tensor`. Must be one of the following types: `bfloat16`, `half`, `float32`, `float64`, `uint8`, `int8`, `int16`, `int32`, `int64`, `complex64`, `quint8`, `qint8`, `qint32`, `string`, `bool`, `complex128`.
y: A `Tensor`. Must have the same type as `x`.
name: A name for the operation (optional).
Returns:
A `Tensor` of type `bool`.
"""
_ctx = _context._context
if _ctx is None or not _ctx._eager_context.is_eager:
_, _, _op = _op_def_lib._apply_op_helper(
"Equal", x=x, y=y, name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ("T", _op.get_attr("T"))
_execute.record_gradient(
"Equal", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
else:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._eager_context.device_name, "Equal", name,
_ctx._post_execution_callbacks, x, y)
return _result
except _core._FallbackException:
return equal_eager_fallback(
x, y, name=name, ctx=_ctx)
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
def equal_eager_fallback(x, y, name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function equal
"""
_ctx = ctx if ctx else _context.context()
_attr_T, _inputs_T = _execute.args_to_matching_eager([x, y], _ctx)
(x, y) = _inputs_T
_inputs_flat = [x, y]
_attrs = ("T", _attr_T)
_result = _execute.execute(b"Equal", 1, inputs=_inputs_flat, attrs=_attrs,
ctx=_ctx, name=name)
_execute.record_gradient(
"Equal", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
def erf(x, name=None):
r"""Computes the Gauss error function of `x` element-wise.
Args:
x: A `Tensor`. Must be one of the following types: `bfloat16`, `half`, `float32`, `float64`.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `x`.
"""
_ctx = _context._context
if _ctx is None or not _ctx._eager_context.is_eager:
_, _, _op = _op_def_lib._apply_op_helper(
"Erf", x=x, name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ("T", _op.get_attr("T"))
_execute.record_gradient(
"Erf", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
else:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._eager_context.device_name, "Erf", name,
_ctx._post_execution_callbacks, x)
return _result
except _core._FallbackException:
return erf_eager_fallback(
x, name=name, ctx=_ctx)
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
def erf_eager_fallback(x, name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function erf
"""
_ctx = ctx if ctx else _context.context()
_attr_T, (x,) = _execute.args_to_matching_eager([x], _ctx)
_inputs_flat = [x]
_attrs = ("T", _attr_T)
_result = _execute.execute(b"Erf", 1, inputs=_inputs_flat, attrs=_attrs,
ctx=_ctx, name=name)
_execute.record_gradient(
"Erf", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
@tf_export('math.erfc', 'erfc')
def erfc(x, name=None):
r"""Computes the complementary error function of `x` element-wise.
Args:
x: A `Tensor`. Must be one of the following types: `bfloat16`, `half`, `float32`, `float64`.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `x`.
"""
_ctx = _context._context
if _ctx is None or not _ctx._eager_context.is_eager:
_, _, _op = _op_def_lib._apply_op_helper(
"Erfc", x=x, name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ("T", _op.get_attr("T"))
_execute.record_gradient(
"Erfc", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
else:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._eager_context.device_name, "Erfc", name,
_ctx._post_execution_callbacks, x)
return _result
except _core._FallbackException:
return erfc_eager_fallback(
x, name=name, ctx=_ctx)
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
def erfc_eager_fallback(x, name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function erfc
"""
_ctx = ctx if ctx else _context.context()
_attr_T, (x,) = _execute.args_to_matching_eager([x], _ctx)
_inputs_flat = [x]
_attrs = ("T", _attr_T)
_result = _execute.execute(b"Erfc", 1, inputs=_inputs_flat, attrs=_attrs,
ctx=_ctx, name=name)
_execute.record_gradient(
"Erfc", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
@tf_export('math.exp', 'exp')
def exp(x, name=None):
r"""Computes exponential of x element-wise. \\(y = e^x\\).
Args:
x: A `Tensor`. Must be one of the following types: `bfloat16`, `half`, `float32`, `float64`, `complex64`, `complex128`.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `x`.
"""
_ctx = _context._context
if _ctx is None or not _ctx._eager_context.is_eager:
_, _, _op = _op_def_lib._apply_op_helper(
"Exp", x=x, name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ("T", _op.get_attr("T"))
_execute.record_gradient(
"Exp", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
else:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._eager_context.device_name, "Exp", name,
_ctx._post_execution_callbacks, x)
return _result
except _core._FallbackException:
return exp_eager_fallback(
x, name=name, ctx=_ctx)
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
def exp_eager_fallback(x, name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function exp
"""
_ctx = ctx if ctx else _context.context()
_attr_T, (x,) = _execute.args_to_matching_eager([x], _ctx)
_inputs_flat = [x]
_attrs = ("T", _attr_T)
_result = _execute.execute(b"Exp", 1, inputs=_inputs_flat, attrs=_attrs,
ctx=_ctx, name=name)
_execute.record_gradient(
"Exp", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
@tf_export('math.expm1', 'expm1')
def expm1(x, name=None):
r"""Computes exponential of x - 1 element-wise.
I.e., \\(y = (\exp x) - 1\\).
Args:
x: A `Tensor`. Must be one of the following types: `bfloat16`, `half`, `float32`, `float64`, `complex64`, `complex128`.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `x`.
"""
_ctx = _context._context
if _ctx is None or not _ctx._eager_context.is_eager:
_, _, _op = _op_def_lib._apply_op_helper(
"Expm1", x=x, name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ("T", _op.get_attr("T"))
_execute.record_gradient(
"Expm1", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
else:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._eager_context.device_name, "Expm1", name,
_ctx._post_execution_callbacks, x)
return _result
except _core._FallbackException:
return expm1_eager_fallback(
x, name=name, ctx=_ctx)
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
def expm1_eager_fallback(x, name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function expm1
"""
_ctx = ctx if ctx else _context.context()
_attr_T, (x,) = _execute.args_to_matching_eager([x], _ctx)
_inputs_flat = [x]
_attrs = ("T", _attr_T)
_result = _execute.execute(b"Expm1", 1, inputs=_inputs_flat, attrs=_attrs,
ctx=_ctx, name=name)
_execute.record_gradient(
"Expm1", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
@tf_export('math.floor', 'floor')
def floor(x, name=None):
r"""Returns element-wise largest integer not greater than x.
Args:
x: A `Tensor`. Must be one of the following types: `bfloat16`, `half`, `float32`, `float64`.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `x`.
"""
_ctx = _context._context
if _ctx is None or not _ctx._eager_context.is_eager:
_, _, _op = _op_def_lib._apply_op_helper(
"Floor", x=x, name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ("T", _op.get_attr("T"))
_execute.record_gradient(
"Floor", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
else:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._eager_context.device_name, "Floor", name,
_ctx._post_execution_callbacks, x)
return _result
except _core._FallbackException:
return floor_eager_fallback(
x, name=name, ctx=_ctx)
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
def floor_eager_fallback(x, name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function floor
"""
_ctx = ctx if ctx else _context.context()
_attr_T, (x,) = _execute.args_to_matching_eager([x], _ctx)
_inputs_flat = [x]
_attrs = ("T", _attr_T)
_result = _execute.execute(b"Floor", 1, inputs=_inputs_flat, attrs=_attrs,
ctx=_ctx, name=name)
_execute.record_gradient(
"Floor", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
def floor_div(x, y, name=None):
r"""Returns x // y element-wise.
*NOTE*: `FloorDiv` supports broadcasting. More about broadcasting
[here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
Args:
x: A `Tensor`. Must be one of the following types: `bfloat16`, `half`, `float32`, `float64`, `uint8`, `int8`, `uint16`, `int16`, `int32`, `int64`, `complex64`, `complex128`.
y: A `Tensor`. Must have the same type as `x`.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `x`.
"""
_ctx = _context._context
if _ctx is None or not _ctx._eager_context.is_eager:
_, _, _op = _op_def_lib._apply_op_helper(
"FloorDiv", x=x, y=y, name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ("T", _op.get_attr("T"))
_execute.record_gradient(
"FloorDiv", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
else:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._eager_context.device_name, "FloorDiv",
name, _ctx._post_execution_callbacks, x, y)
return _result
except _core._FallbackException:
return floor_div_eager_fallback(
x, y, name=name, ctx=_ctx)
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
def floor_div_eager_fallback(x, y, name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function floor_div
"""
_ctx = ctx if ctx else _context.context()
_attr_T, _inputs_T = _execute.args_to_matching_eager([x, y], _ctx)
(x, y) = _inputs_T
_inputs_flat = [x, y]
_attrs = ("T", _attr_T)
_result = _execute.execute(b"FloorDiv", 1, inputs=_inputs_flat,
attrs=_attrs, ctx=_ctx, name=name)
_execute.record_gradient(
"FloorDiv", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
def floor_mod(x, y, name=None):
r"""Returns element-wise remainder of division. When `x < 0` xor `y < 0` is
true, this follows Python semantics in that the result here is consistent
with a flooring divide. E.g. `floor(x / y) * y + mod(x, y) = x`.
*NOTE*: `FloorMod` supports broadcasting. More about broadcasting
[here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
Args:
x: A `Tensor`. Must be one of the following types: `int32`, `int64`, `bfloat16`, `half`, `float32`, `float64`.
y: A `Tensor`. Must have the same type as `x`.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `x`.
"""
_ctx = _context._context
if _ctx is None or not _ctx._eager_context.is_eager:
_, _, _op = _op_def_lib._apply_op_helper(
"FloorMod", x=x, y=y, name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ("T", _op.get_attr("T"))
_execute.record_gradient(
"FloorMod", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
else:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._eager_context.device_name, "FloorMod",
name, _ctx._post_execution_callbacks, x, y)
return _result
except _core._FallbackException:
return floor_mod_eager_fallback(
x, y, name=name, ctx=_ctx)
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
def floor_mod_eager_fallback(x, y, name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function floor_mod
"""
_ctx = ctx if ctx else _context.context()
_attr_T, _inputs_T = _execute.args_to_matching_eager([x, y], _ctx)
(x, y) = _inputs_T
_inputs_flat = [x, y]
_attrs = ("T", _attr_T)
_result = _execute.execute(b"FloorMod", 1, inputs=_inputs_flat,
attrs=_attrs, ctx=_ctx, name=name)
_execute.record_gradient(
"FloorMod", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
@tf_export('math.greater', 'greater')
def greater(x, y, name=None):
r"""Returns the truth value of (x > y) element-wise.
*NOTE*: `math.greater` supports broadcasting. More about broadcasting
[here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
Args:
x: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `int64`, `bfloat16`, `uint16`, `half`, `uint32`, `uint64`.
y: A `Tensor`. Must have the same type as `x`.
name: A name for the operation (optional).
Returns:
A `Tensor` of type `bool`.
"""
_ctx = _context._context
if _ctx is None or not _ctx._eager_context.is_eager:
_, _, _op = _op_def_lib._apply_op_helper(
"Greater", x=x, y=y, name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ("T", _op.get_attr("T"))
_execute.record_gradient(
"Greater", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
else:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._eager_context.device_name, "Greater",
name, _ctx._post_execution_callbacks, x, y)
return _result
except _core._FallbackException:
return greater_eager_fallback(
x, y, name=name, ctx=_ctx)
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
def greater_eager_fallback(x, y, name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function greater
"""
_ctx = ctx if ctx else _context.context()
_attr_T, _inputs_T = _execute.args_to_matching_eager([x, y], _ctx)
(x, y) = _inputs_T
_inputs_flat = [x, y]
_attrs = ("T", _attr_T)
_result = _execute.execute(b"Greater", 1, inputs=_inputs_flat, attrs=_attrs,
ctx=_ctx, name=name)
_execute.record_gradient(
"Greater", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
@tf_export('math.greater_equal', 'greater_equal')
def greater_equal(x, y, name=None):
r"""Returns the truth value of (x >= y) element-wise.
*NOTE*: `math.greater_equal` supports broadcasting. More about broadcasting
[here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
Args:
x: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `int64`, `bfloat16`, `uint16`, `half`, `uint32`, `uint64`.
y: A `Tensor`. Must have the same type as `x`.
name: A name for the operation (optional).
Returns:
A `Tensor` of type `bool`.
"""
_ctx = _context._context
if _ctx is None or not _ctx._eager_context.is_eager:
_, _, _op = _op_def_lib._apply_op_helper(
"GreaterEqual", x=x, y=y, name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ("T", _op.get_attr("T"))
_execute.record_gradient(
"GreaterEqual", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
else:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._eager_context.device_name, "GreaterEqual",
name, _ctx._post_execution_callbacks, x, y)
return _result
except _core._FallbackException:
return greater_equal_eager_fallback(
x, y, name=name, ctx=_ctx)
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
def greater_equal_eager_fallback(x, y, name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function greater_equal
"""
_ctx = ctx if ctx else _context.context()
_attr_T, _inputs_T = _execute.args_to_matching_eager([x, y], _ctx)
(x, y) = _inputs_T
_inputs_flat = [x, y]
_attrs = ("T", _attr_T)
_result = _execute.execute(b"GreaterEqual", 1, inputs=_inputs_flat,
attrs=_attrs, ctx=_ctx, name=name)
_execute.record_gradient(
"GreaterEqual", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
def _histogram_fixed_width(values, value_range, nbins, dtype=_dtypes.int32, name=None):
r"""Return histogram of values.
Given the tensor `values`, this operation returns a rank 1 histogram counting
the number of entries in `values` that fall into every bin. The bins are
equal width and determined by the arguments `value_range` and `nbins`.
```python
# Bins will be: (-inf, 1), [1, 2), [2, 3), [3, 4), [4, inf)
nbins = 5
value_range = [0.0, 5.0]
new_values = [-1.0, 0.0, 1.5, 2.0, 5.0, 15]
with tf.get_default_session() as sess:
hist = tf.histogram_fixed_width(new_values, value_range, nbins=5)
variables.global_variables_initializer().run()
sess.run(hist) => [2, 1, 1, 0, 2]
```
Args:
values: A `Tensor`. Must be one of the following types: `int32`, `int64`, `float32`, `float64`.
Numeric `Tensor`.
value_range: A `Tensor`. Must have the same type as `values`.
Shape [2] `Tensor` of same `dtype` as `values`.
values <= value_range[0] will be mapped to hist[0],
values >= value_range[1] will be mapped to hist[-1].
nbins: A `Tensor` of type `int32`.
Scalar `int32 Tensor`. Number of histogram bins.
dtype: An optional `tf.DType` from: `tf.int32, tf.int64`. Defaults to `tf.int32`.
name: A name for the operation (optional).
Returns:
A `Tensor` of type `dtype`.
"""
_ctx = _context._context
if _ctx is None or not _ctx._eager_context.is_eager:
if dtype is None:
dtype = _dtypes.int32
dtype = _execute.make_type(dtype, "dtype")
_, _, _op = _op_def_lib._apply_op_helper(
"HistogramFixedWidth", values=values, value_range=value_range,
nbins=nbins, dtype=dtype, name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ("T", _op.get_attr("T"), "dtype", _op.get_attr("dtype"))
_execute.record_gradient(
"HistogramFixedWidth", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
else:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._eager_context.device_name,
"HistogramFixedWidth", name, _ctx._post_execution_callbacks, values,
value_range, nbins, "dtype", dtype)
return _result
except _core._FallbackException:
return _histogram_fixed_width_eager_fallback(
values, value_range, nbins, dtype=dtype, name=name, ctx=_ctx)
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
def _histogram_fixed_width_eager_fallback(values, value_range, nbins, dtype=_dtypes.int32, name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function _histogram_fixed_width
"""
_ctx = ctx if ctx else _context.context()
if dtype is None:
dtype = _dtypes.int32
dtype = _execute.make_type(dtype, "dtype")
_attr_T, _inputs_T = _execute.args_to_matching_eager([values, value_range], _ctx)
(values, value_range) = _inputs_T
nbins = _ops.convert_to_tensor(nbins, _dtypes.int32)
_inputs_flat = [values, value_range, nbins]
_attrs = ("T", _attr_T, "dtype", dtype)
_result = _execute.execute(b"HistogramFixedWidth", 1, inputs=_inputs_flat,
attrs=_attrs, ctx=_ctx, name=name)
_execute.record_gradient(
"HistogramFixedWidth", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
@tf_export('math.igamma', 'igamma')
def igamma(a, x, name=None):
r"""Compute the lower regularized incomplete Gamma function `Q(a, x)`.
The lower regularized incomplete Gamma function is defined as:
\\(P(a, x) = gamma(a, x) / Gamma(a) = 1 - Q(a, x)\\)
where
\\(gamma(a, x) = int_{0}^{x} t^{a-1} exp(-t) dt\\)
is the lower incomplete Gamma function.
Note, above `Q(a, x)` (`Igammac`) is the upper regularized complete
Gamma function.
Args:
a: A `Tensor`. Must be one of the following types: `float32`, `float64`.
x: A `Tensor`. Must have the same type as `a`.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `a`.
"""
_ctx = _context._context
if _ctx is None or not _ctx._eager_context.is_eager:
_, _, _op = _op_def_lib._apply_op_helper(
"Igamma", a=a, x=x, name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ("T", _op.get_attr("T"))
_execute.record_gradient(
"Igamma", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
else:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._eager_context.device_name, "Igamma", name,
_ctx._post_execution_callbacks, a, x)
return _result
except _core._FallbackException:
return igamma_eager_fallback(
a, x, name=name, ctx=_ctx)
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
def igamma_eager_fallback(a, x, name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function igamma
"""
_ctx = ctx if ctx else _context.context()
_attr_T, _inputs_T = _execute.args_to_matching_eager([a, x], _ctx)
(a, x) = _inputs_T
_inputs_flat = [a, x]
_attrs = ("T", _attr_T)
_result = _execute.execute(b"Igamma", 1, inputs=_inputs_flat, attrs=_attrs,
ctx=_ctx, name=name)
_execute.record_gradient(
"Igamma", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
def igamma_grad_a(a, x, name=None):
r"""Computes the gradient of `igamma(a, x)` wrt `a`.
Args:
a: A `Tensor`. Must be one of the following types: `float32`, `float64`.
x: A `Tensor`. Must have the same type as `a`.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `a`.
"""
_ctx = _context._context
if _ctx is None or not _ctx._eager_context.is_eager:
_, _, _op = _op_def_lib._apply_op_helper(
"IgammaGradA", a=a, x=x, name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ("T", _op.get_attr("T"))
_execute.record_gradient(
"IgammaGradA", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
else:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._eager_context.device_name, "IgammaGradA",
name, _ctx._post_execution_callbacks, a, x)
return _result
except _core._FallbackException:
return igamma_grad_a_eager_fallback(
a, x, name=name, ctx=_ctx)
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
def igamma_grad_a_eager_fallback(a, x, name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function igamma_grad_a
"""
_ctx = ctx if ctx else _context.context()
_attr_T, _inputs_T = _execute.args_to_matching_eager([a, x], _ctx)
(a, x) = _inputs_T
_inputs_flat = [a, x]
_attrs = ("T", _attr_T)
_result = _execute.execute(b"IgammaGradA", 1, inputs=_inputs_flat,
attrs=_attrs, ctx=_ctx, name=name)
_execute.record_gradient(
"IgammaGradA", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
@tf_export('math.igammac', 'igammac')
def igammac(a, x, name=None):
r"""Compute the upper regularized incomplete Gamma function `Q(a, x)`.
The upper regularized incomplete Gamma function is defined as:
\\(Q(a, x) = Gamma(a, x) / Gamma(a) = 1 - P(a, x)\\)
where
\\(Gamma(a, x) = int_{x}^{\infty} t^{a-1} exp(-t) dt\\)
is the upper incomplete Gama function.
Note, above `P(a, x)` (`Igamma`) is the lower regularized complete
Gamma function.
Args:
a: A `Tensor`. Must be one of the following types: `float32`, `float64`.
x: A `Tensor`. Must have the same type as `a`.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `a`.
"""
_ctx = _context._context
if _ctx is None or not _ctx._eager_context.is_eager:
_, _, _op = _op_def_lib._apply_op_helper(
"Igammac", a=a, x=x, name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ("T", _op.get_attr("T"))
_execute.record_gradient(
"Igammac", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
else:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._eager_context.device_name, "Igammac",
name, _ctx._post_execution_callbacks, a, x)
return _result
except _core._FallbackException:
return igammac_eager_fallback(
a, x, name=name, ctx=_ctx)
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
def igammac_eager_fallback(a, x, name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function igammac
"""
_ctx = ctx if ctx else _context.context()
_attr_T, _inputs_T = _execute.args_to_matching_eager([a, x], _ctx)
(a, x) = _inputs_T
_inputs_flat = [a, x]
_attrs = ("T", _attr_T)
_result = _execute.execute(b"Igammac", 1, inputs=_inputs_flat, attrs=_attrs,
ctx=_ctx, name=name)
_execute.record_gradient(
"Igammac", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
def imag(input, Tout=_dtypes.float32, name=None):
r"""Returns the imaginary part of a complex number.
Given a tensor `input` of complex numbers, this operation returns a tensor of
type `float` that is the imaginary part of each element in `input`. All
elements in `input` must be complex numbers of the form \\(a + bj\\), where *a*
is the real part and *b* is the imaginary part returned by this operation.
For example:
```
# tensor 'input' is [-2.25 + 4.75j, 3.25 + 5.75j]
tf.imag(input) ==> [4.75, 5.75]
```
Args:
input: A `Tensor`. Must be one of the following types: `complex64`, `complex128`.
Tout: An optional `tf.DType` from: `tf.float32, tf.float64`. Defaults to `tf.float32`.
name: A name for the operation (optional).
Returns:
A `Tensor` of type `Tout`.
"""
_ctx = _context._context
if _ctx is None or not _ctx._eager_context.is_eager:
if Tout is None:
Tout = _dtypes.float32
Tout = _execute.make_type(Tout, "Tout")
_, _, _op = _op_def_lib._apply_op_helper(
"Imag", input=input, Tout=Tout, name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ("T", _op.get_attr("T"), "Tout", _op.get_attr("Tout"))
_execute.record_gradient(
"Imag", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
else:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._eager_context.device_name, "Imag", name,
_ctx._post_execution_callbacks, input, "Tout", Tout)
return _result
except _core._FallbackException:
return imag_eager_fallback(
input, Tout=Tout, name=name, ctx=_ctx)
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
def imag_eager_fallback(input, Tout=_dtypes.float32, name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function imag
"""
_ctx = ctx if ctx else _context.context()
if Tout is None:
Tout = _dtypes.float32
Tout = _execute.make_type(Tout, "Tout")
_attr_T, (input,) = _execute.args_to_matching_eager([input], _ctx, _dtypes.complex64)
_inputs_flat = [input]
_attrs = ("T", _attr_T, "Tout", Tout)
_result = _execute.execute(b"Imag", 1, inputs=_inputs_flat, attrs=_attrs,
ctx=_ctx, name=name)
_execute.record_gradient(
"Imag", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
def inv(x, name=None):
r"""Computes the reciprocal of x element-wise.
I.e., \\(y = 1 / x\\).
Args:
x: A `Tensor`. Must be one of the following types: `bfloat16`, `half`, `float32`, `float64`, `int32`, `int64`, `complex64`, `complex128`.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `x`.
"""
_ctx = _context._context
if _ctx is None or not _ctx._eager_context.is_eager:
_, _, _op = _op_def_lib._apply_op_helper(
"Inv", x=x, name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ("T", _op.get_attr("T"))
_execute.record_gradient(
"Inv", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
else:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._eager_context.device_name, "Inv", name,
_ctx._post_execution_callbacks, x)
return _result
except _core._FallbackException:
return inv_eager_fallback(
x, name=name, ctx=_ctx)
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
def inv_eager_fallback(x, name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function inv
"""
_ctx = ctx if ctx else _context.context()
_attr_T, (x,) = _execute.args_to_matching_eager([x], _ctx)
_inputs_flat = [x]
_attrs = ("T", _attr_T)
_result = _execute.execute(b"Inv", 1, inputs=_inputs_flat, attrs=_attrs,
ctx=_ctx, name=name)
_execute.record_gradient(
"Inv", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
def inv_grad(y, dy, name=None):
r"""Computes the gradient for the inverse of `x` wrt its input.
Specifically, `grad = -dy * y*y`, where `y = 1/x`, and `dy`
is the corresponding input gradient.
Args:
y: A `Tensor`. Must be one of the following types: `bfloat16`, `half`, `float32`, `float64`, `complex64`, `complex128`.
dy: A `Tensor`. Must have the same type as `y`.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `y`.
"""
_ctx = _context._context
if _ctx is None or not _ctx._eager_context.is_eager:
_, _, _op = _op_def_lib._apply_op_helper(
"InvGrad", y=y, dy=dy, name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ("T", _op.get_attr("T"))
_execute.record_gradient(
"InvGrad", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
else:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._eager_context.device_name, "InvGrad",
name, _ctx._post_execution_callbacks, y, dy)
return _result
except _core._FallbackException:
return inv_grad_eager_fallback(
y, dy, name=name, ctx=_ctx)
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
def inv_grad_eager_fallback(y, dy, name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function inv_grad
"""
_ctx = ctx if ctx else _context.context()
_attr_T, _inputs_T = _execute.args_to_matching_eager([y, dy], _ctx)
(y, dy) = _inputs_T
_inputs_flat = [y, dy]
_attrs = ("T", _attr_T)
_result = _execute.execute(b"InvGrad", 1, inputs=_inputs_flat, attrs=_attrs,
ctx=_ctx, name=name)
_execute.record_gradient(
"InvGrad", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
@tf_export('debugging.is_finite', 'is_finite')
def is_finite(x, name=None):
r"""Returns which elements of x are finite.
@compatibility(numpy)
Equivalent to np.isfinite
@end_compatibility
Args:
x: A `Tensor`. Must be one of the following types: `bfloat16`, `half`, `float32`, `float64`.
name: A name for the operation (optional).
Returns:
A `Tensor` of type `bool`.
"""
_ctx = _context._context
if _ctx is None or not _ctx._eager_context.is_eager:
_, _, _op = _op_def_lib._apply_op_helper(
"IsFinite", x=x, name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ("T", _op.get_attr("T"))
_execute.record_gradient(
"IsFinite", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
else:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._eager_context.device_name, "IsFinite",
name, _ctx._post_execution_callbacks, x)
return _result
except _core._FallbackException:
return is_finite_eager_fallback(
x, name=name, ctx=_ctx)
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
def is_finite_eager_fallback(x, name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function is_finite
"""
_ctx = ctx if ctx else _context.context()
_attr_T, (x,) = _execute.args_to_matching_eager([x], _ctx)
_inputs_flat = [x]
_attrs = ("T", _attr_T)
_result = _execute.execute(b"IsFinite", 1, inputs=_inputs_flat,
attrs=_attrs, ctx=_ctx, name=name)
_execute.record_gradient(
"IsFinite", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
@tf_export('debugging.is_inf', 'is_inf')
def is_inf(x, name=None):
r"""Returns which elements of x are Inf.
@compatibility(numpy)
Equivalent to np.isinf
@end_compatibility
Args:
x: A `Tensor`. Must be one of the following types: `bfloat16`, `half`, `float32`, `float64`.
name: A name for the operation (optional).
Returns:
A `Tensor` of type `bool`.
"""
_ctx = _context._context
if _ctx is None or not _ctx._eager_context.is_eager:
_, _, _op = _op_def_lib._apply_op_helper(
"IsInf", x=x, name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ("T", _op.get_attr("T"))
_execute.record_gradient(
"IsInf", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
else:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._eager_context.device_name, "IsInf", name,
_ctx._post_execution_callbacks, x)
return _result
except _core._FallbackException:
return is_inf_eager_fallback(
x, name=name, ctx=_ctx)
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
def is_inf_eager_fallback(x, name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function is_inf
"""
_ctx = ctx if ctx else _context.context()
_attr_T, (x,) = _execute.args_to_matching_eager([x], _ctx)
_inputs_flat = [x]
_attrs = ("T", _attr_T)
_result = _execute.execute(b"IsInf", 1, inputs=_inputs_flat, attrs=_attrs,
ctx=_ctx, name=name)
_execute.record_gradient(
"IsInf", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
@tf_export('debugging.is_nan', 'is_nan')
def is_nan(x, name=None):
r"""Returns which elements of x are NaN.
@compatibility(numpy)
Equivalent to np.isnan
@end_compatibility
Args:
x: A `Tensor`. Must be one of the following types: `bfloat16`, `half`, `float32`, `float64`.
name: A name for the operation (optional).
Returns:
A `Tensor` of type `bool`.
"""
_ctx = _context._context
if _ctx is None or not _ctx._eager_context.is_eager:
_, _, _op = _op_def_lib._apply_op_helper(
"IsNan", x=x, name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ("T", _op.get_attr("T"))
_execute.record_gradient(
"IsNan", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
else:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._eager_context.device_name, "IsNan", name,
_ctx._post_execution_callbacks, x)
return _result
except _core._FallbackException:
return is_nan_eager_fallback(
x, name=name, ctx=_ctx)
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
def is_nan_eager_fallback(x, name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function is_nan
"""
_ctx = ctx if ctx else _context.context()
_attr_T, (x,) = _execute.args_to_matching_eager([x], _ctx)
_inputs_flat = [x]
_attrs = ("T", _attr_T)
_result = _execute.execute(b"IsNan", 1, inputs=_inputs_flat, attrs=_attrs,
ctx=_ctx, name=name)
_execute.record_gradient(
"IsNan", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
@tf_export('math.less', 'less')
def less(x, y, name=None):
r"""Returns the truth value of (x < y) element-wise.
*NOTE*: `math.less` supports broadcasting. More about broadcasting
[here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
Args:
x: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `int64`, `bfloat16`, `uint16`, `half`, `uint32`, `uint64`.
y: A `Tensor`. Must have the same type as `x`.
name: A name for the operation (optional).
Returns:
A `Tensor` of type `bool`.
"""
_ctx = _context._context
if _ctx is None or not _ctx._eager_context.is_eager:
_, _, _op = _op_def_lib._apply_op_helper(
"Less", x=x, y=y, name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ("T", _op.get_attr("T"))
_execute.record_gradient(
"Less", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
else:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._eager_context.device_name, "Less", name,
_ctx._post_execution_callbacks, x, y)
return _result
except _core._FallbackException:
return less_eager_fallback(
x, y, name=name, ctx=_ctx)
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
def less_eager_fallback(x, y, name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function less
"""
_ctx = ctx if ctx else _context.context()
_attr_T, _inputs_T = _execute.args_to_matching_eager([x, y], _ctx)
(x, y) = _inputs_T
_inputs_flat = [x, y]
_attrs = ("T", _attr_T)
_result = _execute.execute(b"Less", 1, inputs=_inputs_flat, attrs=_attrs,
ctx=_ctx, name=name)
_execute.record_gradient(
"Less", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
@tf_export('math.less_equal', 'less_equal')
def less_equal(x, y, name=None):
r"""Returns the truth value of (x <= y) element-wise.
*NOTE*: `math.less_equal` supports broadcasting. More about broadcasting
[here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
Args:
x: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `int64`, `bfloat16`, `uint16`, `half`, `uint32`, `uint64`.
y: A `Tensor`. Must have the same type as `x`.
name: A name for the operation (optional).
Returns:
A `Tensor` of type `bool`.
"""
_ctx = _context._context
if _ctx is None or not _ctx._eager_context.is_eager:
_, _, _op = _op_def_lib._apply_op_helper(
"LessEqual", x=x, y=y, name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ("T", _op.get_attr("T"))
_execute.record_gradient(
"LessEqual", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
else:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._eager_context.device_name, "LessEqual",
name, _ctx._post_execution_callbacks, x, y)
return _result
except _core._FallbackException:
return less_equal_eager_fallback(
x, y, name=name, ctx=_ctx)
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
def less_equal_eager_fallback(x, y, name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function less_equal
"""
_ctx = ctx if ctx else _context.context()
_attr_T, _inputs_T = _execute.args_to_matching_eager([x, y], _ctx)
(x, y) = _inputs_T
_inputs_flat = [x, y]
_attrs = ("T", _attr_T)
_result = _execute.execute(b"LessEqual", 1, inputs=_inputs_flat,
attrs=_attrs, ctx=_ctx, name=name)
_execute.record_gradient(
"LessEqual", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
@tf_export('math.lgamma', 'lgamma')
def lgamma(x, name=None):
r"""Computes the log of the absolute value of `Gamma(x)` element-wise.
Args:
x: A `Tensor`. Must be one of the following types: `bfloat16`, `half`, `float32`, `float64`.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `x`.
"""
_ctx = _context._context
if _ctx is None or not _ctx._eager_context.is_eager:
_, _, _op = _op_def_lib._apply_op_helper(
"Lgamma", x=x, name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ("T", _op.get_attr("T"))
_execute.record_gradient(
"Lgamma", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
else:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._eager_context.device_name, "Lgamma", name,
_ctx._post_execution_callbacks, x)
return _result
except _core._FallbackException:
return lgamma_eager_fallback(
x, name=name, ctx=_ctx)
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
def lgamma_eager_fallback(x, name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function lgamma
"""
_ctx = ctx if ctx else _context.context()
_attr_T, (x,) = _execute.args_to_matching_eager([x], _ctx)
_inputs_flat = [x]
_attrs = ("T", _attr_T)
_result = _execute.execute(b"Lgamma", 1, inputs=_inputs_flat, attrs=_attrs,
ctx=_ctx, name=name)
_execute.record_gradient(
"Lgamma", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
@tf_export('lin_space', 'linspace')
def lin_space(start, stop, num, name=None):
r"""Generates values in an interval.
A sequence of `num` evenly-spaced values are generated beginning at `start`.
If `num > 1`, the values in the sequence increase by `stop - start / num - 1`,
so that the last one is exactly `stop`.
For example:
```
tf.linspace(10.0, 12.0, 3, name="linspace") => [ 10.0 11.0 12.0]
```
Args:
start: A `Tensor`. Must be one of the following types: `bfloat16`, `float32`, `float64`.
0-D tensor. First entry in the range.
stop: A `Tensor`. Must have the same type as `start`.
0-D tensor. Last entry in the range.
num: A `Tensor`. Must be one of the following types: `int32`, `int64`.
0-D tensor. Number of values to generate.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `start`.
"""
_ctx = _context._context
if _ctx is None or not _ctx._eager_context.is_eager:
_, _, _op = _op_def_lib._apply_op_helper(
"LinSpace", start=start, stop=stop, num=num, name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ("T", _op.get_attr("T"), "Tidx", _op.get_attr("Tidx"))
_execute.record_gradient(
"LinSpace", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
else:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._eager_context.device_name, "LinSpace",
name, _ctx._post_execution_callbacks, start, stop, num)
return _result
except _core._FallbackException:
return lin_space_eager_fallback(
start, stop, num, name=name, ctx=_ctx)
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
def lin_space_eager_fallback(start, stop, num, name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function lin_space
"""
_ctx = ctx if ctx else _context.context()
_attr_T, _inputs_T = _execute.args_to_matching_eager([start, stop], _ctx)
(start, stop) = _inputs_T
_attr_Tidx, (num,) = _execute.args_to_matching_eager([num], _ctx, _dtypes.int32)
_inputs_flat = [start, stop, num]
_attrs = ("T", _attr_T, "Tidx", _attr_Tidx)
_result = _execute.execute(b"LinSpace", 1, inputs=_inputs_flat,
attrs=_attrs, ctx=_ctx, name=name)
_execute.record_gradient(
"LinSpace", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
@tf_export('math.log', 'log')
def log(x, name=None):
r"""Computes natural logarithm of x element-wise.
I.e., \\(y = \log_e x\\).
Args:
x: A `Tensor`. Must be one of the following types: `bfloat16`, `half`, `float32`, `float64`, `complex64`, `complex128`.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `x`.
"""
_ctx = _context._context
if _ctx is None or not _ctx._eager_context.is_eager:
_, _, _op = _op_def_lib._apply_op_helper(
"Log", x=x, name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ("T", _op.get_attr("T"))
_execute.record_gradient(
"Log", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
else:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._eager_context.device_name, "Log", name,
_ctx._post_execution_callbacks, x)
return _result
except _core._FallbackException:
return log_eager_fallback(
x, name=name, ctx=_ctx)
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
def log_eager_fallback(x, name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function log
"""
_ctx = ctx if ctx else _context.context()
_attr_T, (x,) = _execute.args_to_matching_eager([x], _ctx)
_inputs_flat = [x]
_attrs = ("T", _attr_T)
_result = _execute.execute(b"Log", 1, inputs=_inputs_flat, attrs=_attrs,
ctx=_ctx, name=name)
_execute.record_gradient(
"Log", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
@tf_export('math.log1p', 'log1p')
def log1p(x, name=None):
r"""Computes natural logarithm of (1 + x) element-wise.
I.e., \\(y = \log_e (1 + x)\\).
Args:
x: A `Tensor`. Must be one of the following types: `bfloat16`, `half`, `float32`, `float64`, `complex64`, `complex128`.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `x`.
"""
_ctx = _context._context
if _ctx is None or not _ctx._eager_context.is_eager:
_, _, _op = _op_def_lib._apply_op_helper(
"Log1p", x=x, name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ("T", _op.get_attr("T"))
_execute.record_gradient(
"Log1p", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
else:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._eager_context.device_name, "Log1p", name,
_ctx._post_execution_callbacks, x)
return _result
except _core._FallbackException:
return log1p_eager_fallback(
x, name=name, ctx=_ctx)
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
def log1p_eager_fallback(x, name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function log1p
"""
_ctx = ctx if ctx else _context.context()
_attr_T, (x,) = _execute.args_to_matching_eager([x], _ctx)
_inputs_flat = [x]
_attrs = ("T", _attr_T)
_result = _execute.execute(b"Log1p", 1, inputs=_inputs_flat, attrs=_attrs,
ctx=_ctx, name=name)
_execute.record_gradient(
"Log1p", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
@tf_export('math.logical_and', 'logical_and')
def logical_and(x, y, name=None):
r"""Returns the truth value of x AND y element-wise.
*NOTE*: `math.logical_and` supports broadcasting. More about broadcasting
[here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
Args:
x: A `Tensor` of type `bool`.
y: A `Tensor` of type `bool`.
name: A name for the operation (optional).
Returns:
A `Tensor` of type `bool`.
"""
_ctx = _context._context
if _ctx is None or not _ctx._eager_context.is_eager:
_, _, _op = _op_def_lib._apply_op_helper(
"LogicalAnd", x=x, y=y, name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = None
_execute.record_gradient(
"LogicalAnd", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
else:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._eager_context.device_name, "LogicalAnd",
name, _ctx._post_execution_callbacks, x, y)
return _result
except _core._FallbackException:
return logical_and_eager_fallback(
x, y, name=name, ctx=_ctx)
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
def logical_and_eager_fallback(x, y, name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function logical_and
"""
_ctx = ctx if ctx else _context.context()
x = _ops.convert_to_tensor(x, _dtypes.bool)
y = _ops.convert_to_tensor(y, _dtypes.bool)
_inputs_flat = [x, y]
_attrs = None
_result = _execute.execute(b"LogicalAnd", 1, inputs=_inputs_flat,
attrs=_attrs, ctx=_ctx, name=name)
_execute.record_gradient(
"LogicalAnd", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
@tf_export('math.logical_not', 'logical_not')
def logical_not(x, name=None):
r"""Returns the truth value of NOT x element-wise.
Args:
x: A `Tensor` of type `bool`.
name: A name for the operation (optional).
Returns:
A `Tensor` of type `bool`.
"""
_ctx = _context._context
if _ctx is None or not _ctx._eager_context.is_eager:
_, _, _op = _op_def_lib._apply_op_helper(
"LogicalNot", x=x, name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = None
_execute.record_gradient(
"LogicalNot", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
else:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._eager_context.device_name, "LogicalNot",
name, _ctx._post_execution_callbacks, x)
return _result
except _core._FallbackException:
return logical_not_eager_fallback(
x, name=name, ctx=_ctx)
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
def logical_not_eager_fallback(x, name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function logical_not
"""
_ctx = ctx if ctx else _context.context()
x = _ops.convert_to_tensor(x, _dtypes.bool)
_inputs_flat = [x]
_attrs = None
_result = _execute.execute(b"LogicalNot", 1, inputs=_inputs_flat,
attrs=_attrs, ctx=_ctx, name=name)
_execute.record_gradient(
"LogicalNot", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
@tf_export('math.logical_or', 'logical_or')
def logical_or(x, y, name=None):
r"""Returns the truth value of x OR y element-wise.
*NOTE*: `math.logical_or` supports broadcasting. More about broadcasting
[here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
Args:
x: A `Tensor` of type `bool`.
y: A `Tensor` of type `bool`.
name: A name for the operation (optional).
Returns:
A `Tensor` of type `bool`.
"""
_ctx = _context._context
if _ctx is None or not _ctx._eager_context.is_eager:
_, _, _op = _op_def_lib._apply_op_helper(
"LogicalOr", x=x, y=y, name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = None
_execute.record_gradient(
"LogicalOr", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
else:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._eager_context.device_name, "LogicalOr",
name, _ctx._post_execution_callbacks, x, y)
return _result
except _core._FallbackException:
return logical_or_eager_fallback(
x, y, name=name, ctx=_ctx)
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
def logical_or_eager_fallback(x, y, name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function logical_or
"""
_ctx = ctx if ctx else _context.context()
x = _ops.convert_to_tensor(x, _dtypes.bool)
y = _ops.convert_to_tensor(y, _dtypes.bool)
_inputs_flat = [x, y]
_attrs = None
_result = _execute.execute(b"LogicalOr", 1, inputs=_inputs_flat,
attrs=_attrs, ctx=_ctx, name=name)
_execute.record_gradient(
"LogicalOr", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
def mat_mul(a, b, transpose_a=False, transpose_b=False, name=None):
r"""Multiply the matrix "a" by the matrix "b".
The inputs must be two-dimensional matrices and the inner dimension of
"a" (after being transposed if transpose_a is true) must match the
outer dimension of "b" (after being transposed if transposed_b is
true).
*Note*: The default kernel implementation for MatMul on GPUs uses
cublas.
Args:
a: A `Tensor`. Must be one of the following types: `bfloat16`, `half`, `float32`, `float64`, `int32`, `complex64`, `complex128`.
b: A `Tensor`. Must have the same type as `a`.
transpose_a: An optional `bool`. Defaults to `False`.
If true, "a" is transposed before multiplication.
transpose_b: An optional `bool`. Defaults to `False`.
If true, "b" is transposed before multiplication.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `a`.
"""
_ctx = _context._context
if _ctx is None or not _ctx._eager_context.is_eager:
if transpose_a is None:
transpose_a = False
transpose_a = _execute.make_bool(transpose_a, "transpose_a")
if transpose_b is None:
transpose_b = False
transpose_b = _execute.make_bool(transpose_b, "transpose_b")
_, _, _op = _op_def_lib._apply_op_helper(
"MatMul", a=a, b=b, transpose_a=transpose_a, transpose_b=transpose_b,
name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ("transpose_a", _op.get_attr("transpose_a"), "transpose_b",
_op.get_attr("transpose_b"), "T", _op.get_attr("T"))
_execute.record_gradient(
"MatMul", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
else:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._eager_context.device_name, "MatMul", name,
_ctx._post_execution_callbacks, a, b, "transpose_a", transpose_a,
"transpose_b", transpose_b)
return _result
except _core._FallbackException:
return mat_mul_eager_fallback(
a, b, transpose_a=transpose_a, transpose_b=transpose_b, name=name,
ctx=_ctx)
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
def mat_mul_eager_fallback(a, b, transpose_a=False, transpose_b=False, name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function mat_mul
"""
_ctx = ctx if ctx else _context.context()
if transpose_a is None:
transpose_a = False
transpose_a = _execute.make_bool(transpose_a, "transpose_a")
if transpose_b is None:
transpose_b = False
transpose_b = _execute.make_bool(transpose_b, "transpose_b")
_attr_T, _inputs_T = _execute.args_to_matching_eager([a, b], _ctx)
(a, b) = _inputs_T
_inputs_flat = [a, b]
_attrs = ("transpose_a", transpose_a, "transpose_b", transpose_b, "T",
_attr_T)
_result = _execute.execute(b"MatMul", 1, inputs=_inputs_flat, attrs=_attrs,
ctx=_ctx, name=name)
_execute.record_gradient(
"MatMul", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
def _max(input, axis, keep_dims=False, name=None):
r"""Computes the maximum of elements across dimensions of a tensor.
Reduces `input` along the dimensions given in `axis`. Unless
`keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in
`axis`. If `keep_dims` is true, the reduced dimensions are
retained with length 1.
Args:
input: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `uint16`, `complex128`, `half`, `uint32`, `uint64`.
The tensor to reduce.
axis: A `Tensor`. Must be one of the following types: `int32`, `int64`.
The dimensions to reduce. Must be in the range
`[-rank(input), rank(input))`.
keep_dims: An optional `bool`. Defaults to `False`.
If true, retain reduced dimensions with length 1.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `input`.
"""
_ctx = _context._context
if _ctx is None or not _ctx._eager_context.is_eager:
if keep_dims is None:
keep_dims = False
keep_dims = _execute.make_bool(keep_dims, "keep_dims")
_, _, _op = _op_def_lib._apply_op_helper(
"Max", input=input, reduction_indices=axis, keep_dims=keep_dims,
name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ("keep_dims", _op.get_attr("keep_dims"), "T", _op.get_attr("T"),
"Tidx", _op.get_attr("Tidx"))
_execute.record_gradient(
"Max", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
else:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._eager_context.device_name, "Max", name,
_ctx._post_execution_callbacks, input, axis, "keep_dims", keep_dims)
return _result
except _core._FallbackException:
return _max_eager_fallback(
input, axis, keep_dims=keep_dims, name=name, ctx=_ctx)
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
def _max_eager_fallback(input, axis, keep_dims=False, name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function _max
"""
_ctx = ctx if ctx else _context.context()
if keep_dims is None:
keep_dims = False
keep_dims = _execute.make_bool(keep_dims, "keep_dims")
_attr_T, (input,) = _execute.args_to_matching_eager([input], _ctx)
_attr_Tidx, (axis,) = _execute.args_to_matching_eager([axis], _ctx, _dtypes.int32)
_inputs_flat = [input, axis]
_attrs = ("keep_dims", keep_dims, "T", _attr_T, "Tidx", _attr_Tidx)
_result = _execute.execute(b"Max", 1, inputs=_inputs_flat, attrs=_attrs,
ctx=_ctx, name=name)
_execute.record_gradient(
"Max", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
@tf_export('math.maximum', 'maximum')
def maximum(x, y, name=None):
r"""Returns the max of x and y (i.e. x > y ? x : y) element-wise.
*NOTE*: `math.maximum` supports broadcasting. More about broadcasting
[here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
Args:
x: A `Tensor`. Must be one of the following types: `bfloat16`, `half`, `float32`, `float64`, `int32`, `int64`.
y: A `Tensor`. Must have the same type as `x`.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `x`.
"""
_ctx = _context._context
if _ctx is None or not _ctx._eager_context.is_eager:
_, _, _op = _op_def_lib._apply_op_helper(
"Maximum", x=x, y=y, name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ("T", _op.get_attr("T"))
_execute.record_gradient(
"Maximum", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
else:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._eager_context.device_name, "Maximum",
name, _ctx._post_execution_callbacks, x, y)
return _result
except _core._FallbackException:
return maximum_eager_fallback(
x, y, name=name, ctx=_ctx)
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
def maximum_eager_fallback(x, y, name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function maximum
"""
_ctx = ctx if ctx else _context.context()
_attr_T, _inputs_T = _execute.args_to_matching_eager([x, y], _ctx)
(x, y) = _inputs_T
_inputs_flat = [x, y]
_attrs = ("T", _attr_T)
_result = _execute.execute(b"Maximum", 1, inputs=_inputs_flat, attrs=_attrs,
ctx=_ctx, name=name)
_execute.record_gradient(
"Maximum", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
def mean(input, axis, keep_dims=False, name=None):
r"""Computes the mean of elements across dimensions of a tensor.
Reduces `input` along the dimensions given in `axis`. Unless
`keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in
`axis`. If `keep_dims` is true, the reduced dimensions are
retained with length 1.
Args:
input: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `uint16`, `complex128`, `half`, `uint32`, `uint64`.
The tensor to reduce.
axis: A `Tensor`. Must be one of the following types: `int32`, `int64`.
The dimensions to reduce. Must be in the range
`[-rank(input), rank(input))`.
keep_dims: An optional `bool`. Defaults to `False`.
If true, retain reduced dimensions with length 1.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `input`.
"""
_ctx = _context._context
if _ctx is None or not _ctx._eager_context.is_eager:
if keep_dims is None:
keep_dims = False
keep_dims = _execute.make_bool(keep_dims, "keep_dims")
_, _, _op = _op_def_lib._apply_op_helper(
"Mean", input=input, reduction_indices=axis, keep_dims=keep_dims,
name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ("keep_dims", _op.get_attr("keep_dims"), "T", _op.get_attr("T"),
"Tidx", _op.get_attr("Tidx"))
_execute.record_gradient(
"Mean", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
else:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._eager_context.device_name, "Mean", name,
_ctx._post_execution_callbacks, input, axis, "keep_dims", keep_dims)
return _result
except _core._FallbackException:
return mean_eager_fallback(
input, axis, keep_dims=keep_dims, name=name, ctx=_ctx)
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
def mean_eager_fallback(input, axis, keep_dims=False, name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function mean
"""
_ctx = ctx if ctx else _context.context()
if keep_dims is None:
keep_dims = False
keep_dims = _execute.make_bool(keep_dims, "keep_dims")
_attr_T, (input,) = _execute.args_to_matching_eager([input], _ctx)
_attr_Tidx, (axis,) = _execute.args_to_matching_eager([axis], _ctx, _dtypes.int32)
_inputs_flat = [input, axis]
_attrs = ("keep_dims", keep_dims, "T", _attr_T, "Tidx", _attr_Tidx)
_result = _execute.execute(b"Mean", 1, inputs=_inputs_flat, attrs=_attrs,
ctx=_ctx, name=name)
_execute.record_gradient(
"Mean", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
def _min(input, axis, keep_dims=False, name=None):
r"""Computes the minimum of elements across dimensions of a tensor.
Reduces `input` along the dimensions given in `axis`. Unless
`keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in
`axis`. If `keep_dims` is true, the reduced dimensions are
retained with length 1.
Args:
input: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `uint16`, `complex128`, `half`, `uint32`, `uint64`.
The tensor to reduce.
axis: A `Tensor`. Must be one of the following types: `int32`, `int64`.
The dimensions to reduce. Must be in the range
`[-rank(input), rank(input))`.
keep_dims: An optional `bool`. Defaults to `False`.
If true, retain reduced dimensions with length 1.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `input`.
"""
_ctx = _context._context
if _ctx is None or not _ctx._eager_context.is_eager:
if keep_dims is None:
keep_dims = False
keep_dims = _execute.make_bool(keep_dims, "keep_dims")
_, _, _op = _op_def_lib._apply_op_helper(
"Min", input=input, reduction_indices=axis, keep_dims=keep_dims,
name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ("keep_dims", _op.get_attr("keep_dims"), "T", _op.get_attr("T"),
"Tidx", _op.get_attr("Tidx"))
_execute.record_gradient(
"Min", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
else:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._eager_context.device_name, "Min", name,
_ctx._post_execution_callbacks, input, axis, "keep_dims", keep_dims)
return _result
except _core._FallbackException:
return _min_eager_fallback(
input, axis, keep_dims=keep_dims, name=name, ctx=_ctx)
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
def _min_eager_fallback(input, axis, keep_dims=False, name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function _min
"""
_ctx = ctx if ctx else _context.context()
if keep_dims is None:
keep_dims = False
keep_dims = _execute.make_bool(keep_dims, "keep_dims")
_attr_T, (input,) = _execute.args_to_matching_eager([input], _ctx)
_attr_Tidx, (axis,) = _execute.args_to_matching_eager([axis], _ctx, _dtypes.int32)
_inputs_flat = [input, axis]
_attrs = ("keep_dims", keep_dims, "T", _attr_T, "Tidx", _attr_Tidx)
_result = _execute.execute(b"Min", 1, inputs=_inputs_flat, attrs=_attrs,
ctx=_ctx, name=name)
_execute.record_gradient(
"Min", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
@tf_export('math.minimum', 'minimum')
def minimum(x, y, name=None):
r"""Returns the min of x and y (i.e. x < y ? x : y) element-wise.
*NOTE*: `math.minimum` supports broadcasting. More about broadcasting
[here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
Args:
x: A `Tensor`. Must be one of the following types: `bfloat16`, `half`, `float32`, `float64`, `int32`, `int64`.
y: A `Tensor`. Must have the same type as `x`.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `x`.
"""
_ctx = _context._context
if _ctx is None or not _ctx._eager_context.is_eager:
_, _, _op = _op_def_lib._apply_op_helper(
"Minimum", x=x, y=y, name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ("T", _op.get_attr("T"))
_execute.record_gradient(
"Minimum", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
else:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._eager_context.device_name, "Minimum",
name, _ctx._post_execution_callbacks, x, y)
return _result
except _core._FallbackException:
return minimum_eager_fallback(
x, y, name=name, ctx=_ctx)
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
def minimum_eager_fallback(x, y, name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function minimum
"""
_ctx = ctx if ctx else _context.context()
_attr_T, _inputs_T = _execute.args_to_matching_eager([x, y], _ctx)
(x, y) = _inputs_T
_inputs_flat = [x, y]
_attrs = ("T", _attr_T)
_result = _execute.execute(b"Minimum", 1, inputs=_inputs_flat, attrs=_attrs,
ctx=_ctx, name=name)
_execute.record_gradient(
"Minimum", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
def mod(x, y, name=None):
r"""Returns element-wise remainder of division. This emulates C semantics in that
the result here is consistent with a truncating divide. E.g.
`tf.truncatediv(x, y) * y + truncate_mod(x, y) = x`.
*NOTE*: `Mod` supports broadcasting. More about broadcasting
[here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
Args:
x: A `Tensor`. Must be one of the following types: `int32`, `int64`, `half`, `half`, `bfloat16`, `float32`, `float64`.
y: A `Tensor`. Must have the same type as `x`.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `x`.
"""
_ctx = _context._context
if _ctx is None or not _ctx._eager_context.is_eager:
_, _, _op = _op_def_lib._apply_op_helper(
"Mod", x=x, y=y, name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ("T", _op.get_attr("T"))
_execute.record_gradient(
"Mod", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
else:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._eager_context.device_name, "Mod", name,
_ctx._post_execution_callbacks, x, y)
return _result
except _core._FallbackException:
return mod_eager_fallback(
x, y, name=name, ctx=_ctx)
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
def mod_eager_fallback(x, y, name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function mod
"""
_ctx = ctx if ctx else _context.context()
_attr_T, _inputs_T = _execute.args_to_matching_eager([x, y], _ctx)
(x, y) = _inputs_T
_inputs_flat = [x, y]
_attrs = ("T", _attr_T)
_result = _execute.execute(b"Mod", 1, inputs=_inputs_flat, attrs=_attrs,
ctx=_ctx, name=name)
_execute.record_gradient(
"Mod", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
def mul(x, y, name=None):
r"""Returns x * y element-wise.
*NOTE*: `Multiply` supports broadcasting. More about broadcasting
[here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
Args:
x: A `Tensor`. Must be one of the following types: `bfloat16`, `half`, `float32`, `float64`, `uint8`, `int8`, `uint16`, `int16`, `int32`, `int64`, `complex64`, `complex128`.
y: A `Tensor`. Must have the same type as `x`.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `x`.
"""
_ctx = _context._context
if _ctx is None or not _ctx._eager_context.is_eager:
_, _, _op = _op_def_lib._apply_op_helper(
"Mul", x=x, y=y, name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ("T", _op.get_attr("T"))
_execute.record_gradient(
"Mul", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
else:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._eager_context.device_name, "Mul", name,
_ctx._post_execution_callbacks, x, y)
return _result
except _core._FallbackException:
return mul_eager_fallback(
x, y, name=name, ctx=_ctx)
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
def mul_eager_fallback(x, y, name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function mul
"""
_ctx = ctx if ctx else _context.context()
_attr_T, _inputs_T = _execute.args_to_matching_eager([x, y], _ctx)
(x, y) = _inputs_T
_inputs_flat = [x, y]
_attrs = ("T", _attr_T)
_result = _execute.execute(b"Mul", 1, inputs=_inputs_flat, attrs=_attrs,
ctx=_ctx, name=name)
_execute.record_gradient(
"Mul", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
def neg(x, name=None):
r"""Computes numerical negative value element-wise.
I.e., \\(y = -x\\).
Args:
x: A `Tensor`. Must be one of the following types: `bfloat16`, `half`, `float32`, `float64`, `int32`, `int64`, `complex64`, `complex128`.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `x`.
"""
_ctx = _context._context
if _ctx is None or not _ctx._eager_context.is_eager:
_, _, _op = _op_def_lib._apply_op_helper(
"Neg", x=x, name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ("T", _op.get_attr("T"))
_execute.record_gradient(
"Neg", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
else:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._eager_context.device_name, "Neg", name,
_ctx._post_execution_callbacks, x)
return _result
except _core._FallbackException:
return neg_eager_fallback(
x, name=name, ctx=_ctx)
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
def neg_eager_fallback(x, name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function neg
"""
_ctx = ctx if ctx else _context.context()
_attr_T, (x,) = _execute.args_to_matching_eager([x], _ctx)
_inputs_flat = [x]
_attrs = ("T", _attr_T)
_result = _execute.execute(b"Neg", 1, inputs=_inputs_flat, attrs=_attrs,
ctx=_ctx, name=name)
_execute.record_gradient(
"Neg", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
@tf_export('math.not_equal', 'not_equal')
def not_equal(x, y, name=None):
r"""Returns the truth value of (x != y) element-wise.
*NOTE*: `math.not_equal` supports broadcasting. More about broadcasting
[here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
Args:
x: A `Tensor`. Must be one of the following types: `bfloat16`, `half`, `float32`, `float64`, `uint8`, `int8`, `int16`, `int32`, `int64`, `complex64`, `quint8`, `qint8`, `qint32`, `string`, `bool`, `complex128`.
y: A `Tensor`. Must have the same type as `x`.
name: A name for the operation (optional).
Returns:
A `Tensor` of type `bool`.
"""
_ctx = _context._context
if _ctx is None or not _ctx._eager_context.is_eager:
_, _, _op = _op_def_lib._apply_op_helper(
"NotEqual", x=x, y=y, name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ("T", _op.get_attr("T"))
_execute.record_gradient(
"NotEqual", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
else:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._eager_context.device_name, "NotEqual",
name, _ctx._post_execution_callbacks, x, y)
return _result
except _core._FallbackException:
return not_equal_eager_fallback(
x, y, name=name, ctx=_ctx)
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
def not_equal_eager_fallback(x, y, name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function not_equal
"""
_ctx = ctx if ctx else _context.context()
_attr_T, _inputs_T = _execute.args_to_matching_eager([x, y], _ctx)
(x, y) = _inputs_T
_inputs_flat = [x, y]
_attrs = ("T", _attr_T)
_result = _execute.execute(b"NotEqual", 1, inputs=_inputs_flat,
attrs=_attrs, ctx=_ctx, name=name)
_execute.record_gradient(
"NotEqual", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
@tf_export('math.polygamma', 'polygamma')
def polygamma(a, x, name=None):
r"""Compute the polygamma function \\(\psi^{(n)}(x)\\).
The polygamma function is defined as:
\\(\psi^{(n)}(x) = \frac{d^n}{dx^n} \psi(x)\\)
where \\(\psi(x)\\) is the digamma function.
Args:
a: A `Tensor`. Must be one of the following types: `float32`, `float64`.
x: A `Tensor`. Must have the same type as `a`.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `a`.
"""
_ctx = _context._context
if _ctx is None or not _ctx._eager_context.is_eager:
_, _, _op = _op_def_lib._apply_op_helper(
"Polygamma", a=a, x=x, name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ("T", _op.get_attr("T"))
_execute.record_gradient(
"Polygamma", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
else:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._eager_context.device_name, "Polygamma",
name, _ctx._post_execution_callbacks, a, x)
return _result
except _core._FallbackException:
return polygamma_eager_fallback(
a, x, name=name, ctx=_ctx)
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
def polygamma_eager_fallback(a, x, name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function polygamma
"""
_ctx = ctx if ctx else _context.context()
_attr_T, _inputs_T = _execute.args_to_matching_eager([a, x], _ctx)
(a, x) = _inputs_T
_inputs_flat = [a, x]
_attrs = ("T", _attr_T)
_result = _execute.execute(b"Polygamma", 1, inputs=_inputs_flat,
attrs=_attrs, ctx=_ctx, name=name)
_execute.record_gradient(
"Polygamma", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
def _pow(x, y, name=None):
r"""Computes the power of one value to another.
Given a tensor `x` and a tensor `y`, this operation computes \\(x^y\\) for
corresponding elements in `x` and `y`. For example:
```
# tensor 'x' is [[2, 2]], [3, 3]]
# tensor 'y' is [[8, 16], [2, 3]]
tf.pow(x, y) ==> [[256, 65536], [9, 27]]
```
Args:
x: A `Tensor`. Must be one of the following types: `bfloat16`, `float32`, `half`, `float64`, `int32`, `int64`, `complex64`, `complex128`.
y: A `Tensor`. Must have the same type as `x`.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `x`.
"""
_ctx = _context._context
if _ctx is None or not _ctx._eager_context.is_eager:
_, _, _op = _op_def_lib._apply_op_helper(
"Pow", x=x, y=y, name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ("T", _op.get_attr("T"))
_execute.record_gradient(
"Pow", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
else:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._eager_context.device_name, "Pow", name,
_ctx._post_execution_callbacks, x, y)
return _result
except _core._FallbackException:
return _pow_eager_fallback(
x, y, name=name, ctx=_ctx)
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
def _pow_eager_fallback(x, y, name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function _pow
"""
_ctx = ctx if ctx else _context.context()
_attr_T, _inputs_T = _execute.args_to_matching_eager([x, y], _ctx)
(x, y) = _inputs_T
_inputs_flat = [x, y]
_attrs = ("T", _attr_T)
_result = _execute.execute(b"Pow", 1, inputs=_inputs_flat, attrs=_attrs,
ctx=_ctx, name=name)
_execute.record_gradient(
"Pow", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
def prod(input, axis, keep_dims=False, name=None):
r"""Computes the product of elements across dimensions of a tensor.
Reduces `input` along the dimensions given in `axis`. Unless
`keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in
`axis`. If `keep_dims` is true, the reduced dimensions are
retained with length 1.
Args:
input: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `uint16`, `complex128`, `half`, `uint32`, `uint64`.
The tensor to reduce.
axis: A `Tensor`. Must be one of the following types: `int32`, `int64`.
The dimensions to reduce. Must be in the range
`[-rank(input), rank(input))`.
keep_dims: An optional `bool`. Defaults to `False`.
If true, retain reduced dimensions with length 1.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `input`.
"""
_ctx = _context._context
if _ctx is None or not _ctx._eager_context.is_eager:
if keep_dims is None:
keep_dims = False
keep_dims = _execute.make_bool(keep_dims, "keep_dims")
_, _, _op = _op_def_lib._apply_op_helper(
"Prod", input=input, reduction_indices=axis, keep_dims=keep_dims,
name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ("keep_dims", _op.get_attr("keep_dims"), "T", _op.get_attr("T"),
"Tidx", _op.get_attr("Tidx"))
_execute.record_gradient(
"Prod", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
else:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._eager_context.device_name, "Prod", name,
_ctx._post_execution_callbacks, input, axis, "keep_dims", keep_dims)
return _result
except _core._FallbackException:
return prod_eager_fallback(
input, axis, keep_dims=keep_dims, name=name, ctx=_ctx)
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
def prod_eager_fallback(input, axis, keep_dims=False, name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function prod
"""
_ctx = ctx if ctx else _context.context()
if keep_dims is None:
keep_dims = False
keep_dims = _execute.make_bool(keep_dims, "keep_dims")
_attr_T, (input,) = _execute.args_to_matching_eager([input], _ctx)
_attr_Tidx, (axis,) = _execute.args_to_matching_eager([axis], _ctx, _dtypes.int32)
_inputs_flat = [input, axis]
_attrs = ("keep_dims", keep_dims, "T", _attr_T, "Tidx", _attr_Tidx)
_result = _execute.execute(b"Prod", 1, inputs=_inputs_flat, attrs=_attrs,
ctx=_ctx, name=name)
_execute.record_gradient(
"Prod", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
_quantize_down_and_shrink_range_outputs = ["output", "output_min",
"output_max"]
_QuantizeDownAndShrinkRangeOutput = _collections.namedtuple(
"QuantizeDownAndShrinkRange", _quantize_down_and_shrink_range_outputs)
def quantize_down_and_shrink_range(input, input_min, input_max, out_type, name=None):
r"""Convert the quantized 'input' tensor into a lower-precision 'output', using the
actual distribution of the values to maximize the usage of the lower bit depth
and adjusting the output min and max ranges accordingly.
[input_min, input_max] are scalar floats that specify the range for the float
interpretation of the 'input' data. For example, if input_min is -1.0f and
input_max is 1.0f, and we are dealing with quint16 quantized data, then a 0
value in the 16-bit data should be interpreted as -1.0f, and a 65535 means 1.0f.
This operator tries to squeeze as much precision as possible into an output with
a lower bit depth by calculating the actual min and max values found in the
data. For example, maybe that quint16 input has no values lower than 16,384 and
none higher than 49,152. That means only half the range is actually needed, all
the float interpretations are between -0.5f and 0.5f, so if we want to compress
the data into a quint8 output, we can use that range rather than the theoretical
-1.0f to 1.0f that is suggested by the input min and max.
In practice, this is most useful for taking output from operations like
QuantizedMatMul that can produce higher bit-depth outputs than their inputs and
may have large potential output ranges, but in practice have a distribution of
input values that only uses a small fraction of the possible range. By feeding
that output into this operator, we can reduce it from 32 bits down to 8 with
minimal loss of accuracy.
Args:
input: A `Tensor`. Must be one of the following types: `qint8`, `quint8`, `qint32`, `qint16`, `quint16`.
input_min: A `Tensor` of type `float32`.
The float value that the minimum quantized input value represents.
input_max: A `Tensor` of type `float32`.
The float value that the maximum quantized input value represents.
out_type: A `tf.DType` from: `tf.qint8, tf.quint8, tf.qint32, tf.qint16, tf.quint16`.
The type of the output. Should be a lower bit depth than Tinput.
name: A name for the operation (optional).
Returns:
A tuple of `Tensor` objects (output, output_min, output_max).
output: A `Tensor` of type `out_type`.
output_min: A `Tensor` of type `float32`.
output_max: A `Tensor` of type `float32`.
"""
_ctx = _context._context
if _ctx is None or not _ctx._eager_context.is_eager:
out_type = _execute.make_type(out_type, "out_type")
_, _, _op = _op_def_lib._apply_op_helper(
"QuantizeDownAndShrinkRange", input=input, input_min=input_min,
input_max=input_max, out_type=out_type, name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ("Tinput", _op.get_attr("Tinput"), "out_type",
_op.get_attr("out_type"))
_execute.record_gradient(
"QuantizeDownAndShrinkRange", _inputs_flat, _attrs, _result, name)
_result = _QuantizeDownAndShrinkRangeOutput._make(_result)
return _result
else:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._eager_context.device_name,
"QuantizeDownAndShrinkRange", name, _ctx._post_execution_callbacks,
input, input_min, input_max, "out_type", out_type)
_result = _QuantizeDownAndShrinkRangeOutput._make(_result)
return _result
except _core._FallbackException:
return quantize_down_and_shrink_range_eager_fallback(
input, input_min, input_max, out_type=out_type, name=name, ctx=_ctx)
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
def quantize_down_and_shrink_range_eager_fallback(input, input_min, input_max, out_type, name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function quantize_down_and_shrink_range
"""
_ctx = ctx if ctx else _context.context()
out_type = _execute.make_type(out_type, "out_type")
_attr_Tinput, (input,) = _execute.args_to_matching_eager([input], _ctx)
input_min = _ops.convert_to_tensor(input_min, _dtypes.float32)
input_max = _ops.convert_to_tensor(input_max, _dtypes.float32)
_inputs_flat = [input, input_min, input_max]
_attrs = ("Tinput", _attr_Tinput, "out_type", out_type)
_result = _execute.execute(b"QuantizeDownAndShrinkRange", 3,
inputs=_inputs_flat, attrs=_attrs, ctx=_ctx,
name=name)
_execute.record_gradient(
"QuantizeDownAndShrinkRange", _inputs_flat, _attrs, _result, name)
_result = _QuantizeDownAndShrinkRangeOutput._make(_result)
return _result
_quantized_add_outputs = ["z", "min_z", "max_z"]
_QuantizedAddOutput = _collections.namedtuple(
"QuantizedAdd", _quantized_add_outputs)
def quantized_add(x, y, min_x, max_x, min_y, max_y, Toutput=_dtypes.qint32, name=None):
r"""Returns x + y element-wise, working on quantized buffers.
Args:
x: A `Tensor`. Must be one of the following types: `qint8`, `quint8`, `qint32`, `qint16`, `quint16`.
y: A `Tensor`. Must be one of the following types: `qint8`, `quint8`, `qint32`, `qint16`, `quint16`.
min_x: A `Tensor` of type `float32`.
The float value that the lowest quantized `x` value represents.
max_x: A `Tensor` of type `float32`.
The float value that the highest quantized `x` value represents.
min_y: A `Tensor` of type `float32`.
The float value that the lowest quantized `y` value represents.
max_y: A `Tensor` of type `float32`.
The float value that the highest quantized `y` value represents.
Toutput: An optional `tf.DType` from: `tf.qint8, tf.quint8, tf.qint32, tf.qint16, tf.quint16`. Defaults to `tf.qint32`.
name: A name for the operation (optional).
Returns:
A tuple of `Tensor` objects (z, min_z, max_z).
z: A `Tensor` of type `Toutput`.
min_z: A `Tensor` of type `float32`.
max_z: A `Tensor` of type `float32`.
"""
_ctx = _context._context
if _ctx is None or not _ctx._eager_context.is_eager:
if Toutput is None:
Toutput = _dtypes.qint32
Toutput = _execute.make_type(Toutput, "Toutput")
_, _, _op = _op_def_lib._apply_op_helper(
"QuantizedAdd", x=x, y=y, min_x=min_x, max_x=max_x, min_y=min_y,
max_y=max_y, Toutput=Toutput, name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ("T1", _op.get_attr("T1"), "T2", _op.get_attr("T2"), "Toutput",
_op.get_attr("Toutput"))
_execute.record_gradient(
"QuantizedAdd", _inputs_flat, _attrs, _result, name)
_result = _QuantizedAddOutput._make(_result)
return _result
else:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._eager_context.device_name, "QuantizedAdd",
name, _ctx._post_execution_callbacks, x, y, min_x, max_x, min_y,
max_y, "Toutput", Toutput)
_result = _QuantizedAddOutput._make(_result)
return _result
except _core._FallbackException:
return quantized_add_eager_fallback(
x, y, min_x, max_x, min_y, max_y, Toutput=Toutput, name=name,
ctx=_ctx)
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
def quantized_add_eager_fallback(x, y, min_x, max_x, min_y, max_y, Toutput=_dtypes.qint32, name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function quantized_add
"""
_ctx = ctx if ctx else _context.context()
if Toutput is None:
Toutput = _dtypes.qint32
Toutput = _execute.make_type(Toutput, "Toutput")
_attr_T1, (x,) = _execute.args_to_matching_eager([x], _ctx)
_attr_T2, (y,) = _execute.args_to_matching_eager([y], _ctx)
min_x = _ops.convert_to_tensor(min_x, _dtypes.float32)
max_x = _ops.convert_to_tensor(max_x, _dtypes.float32)
min_y = _ops.convert_to_tensor(min_y, _dtypes.float32)
max_y = _ops.convert_to_tensor(max_y, _dtypes.float32)
_inputs_flat = [x, y, min_x, max_x, min_y, max_y]
_attrs = ("T1", _attr_T1, "T2", _attr_T2, "Toutput", Toutput)
_result = _execute.execute(b"QuantizedAdd", 3, inputs=_inputs_flat,
attrs=_attrs, ctx=_ctx, name=name)
_execute.record_gradient(
"QuantizedAdd", _inputs_flat, _attrs, _result, name)
_result = _QuantizedAddOutput._make(_result)
return _result
_quantized_mat_mul_outputs = ["out", "min_out", "max_out"]
_QuantizedMatMulOutput = _collections.namedtuple(
"QuantizedMatMul", _quantized_mat_mul_outputs)
def quantized_mat_mul(a, b, min_a, max_a, min_b, max_b, Toutput=_dtypes.qint32, transpose_a=False, transpose_b=False, Tactivation=_dtypes.quint8, name=None):
r"""Perform a quantized matrix multiplication of `a` by the matrix `b`.
The inputs must be two-dimensional matrices and the inner dimension of
`a` (after being transposed if `transpose_a` is non-zero) must match the
outer dimension of `b` (after being transposed if `transposed_b` is
non-zero).
Args:
a: A `Tensor`. Must be one of the following types: `qint8`, `quint8`, `qint32`, `qint16`, `quint16`.
Must be a two-dimensional tensor.
b: A `Tensor`. Must be one of the following types: `qint8`, `quint8`, `qint32`, `qint16`, `quint16`.
Must be a two-dimensional tensor.
min_a: A `Tensor` of type `float32`.
The float value that the lowest quantized `a` value represents.
max_a: A `Tensor` of type `float32`.
The float value that the highest quantized `a` value represents.
min_b: A `Tensor` of type `float32`.
The float value that the lowest quantized `b` value represents.
max_b: A `Tensor` of type `float32`.
The float value that the highest quantized `b` value represents.
Toutput: An optional `tf.DType` from: `tf.qint8, tf.quint8, tf.qint32, tf.qint16, tf.quint16`. Defaults to `tf.qint32`.
transpose_a: An optional `bool`. Defaults to `False`.
If true, `a` is transposed before multiplication.
transpose_b: An optional `bool`. Defaults to `False`.
If true, `b` is transposed before multiplication.
Tactivation: An optional `tf.DType` from: `tf.qint8, tf.quint8, tf.qint32, tf.qint16, tf.quint16`. Defaults to `tf.quint8`.
The type of output produced by activation function
following this operation.
name: A name for the operation (optional).
Returns:
A tuple of `Tensor` objects (out, min_out, max_out).
out: A `Tensor` of type `Toutput`.
min_out: A `Tensor` of type `float32`.
max_out: A `Tensor` of type `float32`.
"""
_ctx = _context._context
if _ctx is None or not _ctx._eager_context.is_eager:
if Toutput is None:
Toutput = _dtypes.qint32
Toutput = _execute.make_type(Toutput, "Toutput")
if transpose_a is None:
transpose_a = False
transpose_a = _execute.make_bool(transpose_a, "transpose_a")
if transpose_b is None:
transpose_b = False
transpose_b = _execute.make_bool(transpose_b, "transpose_b")
if Tactivation is None:
Tactivation = _dtypes.quint8
Tactivation = _execute.make_type(Tactivation, "Tactivation")
_, _, _op = _op_def_lib._apply_op_helper(
"QuantizedMatMul", a=a, b=b, min_a=min_a, max_a=max_a, min_b=min_b,
max_b=max_b, Toutput=Toutput, transpose_a=transpose_a,
transpose_b=transpose_b, Tactivation=Tactivation, name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ("T1", _op.get_attr("T1"), "T2", _op.get_attr("T2"), "Toutput",
_op.get_attr("Toutput"), "transpose_a",
_op.get_attr("transpose_a"), "transpose_b",
_op.get_attr("transpose_b"), "Tactivation",
_op.get_attr("Tactivation"))
_execute.record_gradient(
"QuantizedMatMul", _inputs_flat, _attrs, _result, name)
_result = _QuantizedMatMulOutput._make(_result)
return _result
else:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._eager_context.device_name,
"QuantizedMatMul", name, _ctx._post_execution_callbacks, a, b, min_a,
max_a, min_b, max_b, "Toutput", Toutput, "transpose_a", transpose_a,
"transpose_b", transpose_b, "Tactivation", Tactivation)
_result = _QuantizedMatMulOutput._make(_result)
return _result
except _core._FallbackException:
return quantized_mat_mul_eager_fallback(
a, b, min_a, max_a, min_b, max_b, Toutput=Toutput,
transpose_a=transpose_a, transpose_b=transpose_b,
Tactivation=Tactivation, name=name, ctx=_ctx)
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
def quantized_mat_mul_eager_fallback(a, b, min_a, max_a, min_b, max_b, Toutput=_dtypes.qint32, transpose_a=False, transpose_b=False, Tactivation=_dtypes.quint8, name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function quantized_mat_mul
"""
_ctx = ctx if ctx else _context.context()
if Toutput is None:
Toutput = _dtypes.qint32
Toutput = _execute.make_type(Toutput, "Toutput")
if transpose_a is None:
transpose_a = False
transpose_a = _execute.make_bool(transpose_a, "transpose_a")
if transpose_b is None:
transpose_b = False
transpose_b = _execute.make_bool(transpose_b, "transpose_b")
if Tactivation is None:
Tactivation = _dtypes.quint8
Tactivation = _execute.make_type(Tactivation, "Tactivation")
_attr_T1, (a,) = _execute.args_to_matching_eager([a], _ctx)
_attr_T2, (b,) = _execute.args_to_matching_eager([b], _ctx)
min_a = _ops.convert_to_tensor(min_a, _dtypes.float32)
max_a = _ops.convert_to_tensor(max_a, _dtypes.float32)
min_b = _ops.convert_to_tensor(min_b, _dtypes.float32)
max_b = _ops.convert_to_tensor(max_b, _dtypes.float32)
_inputs_flat = [a, b, min_a, max_a, min_b, max_b]
_attrs = ("T1", _attr_T1, "T2", _attr_T2, "Toutput", Toutput, "transpose_a",
transpose_a, "transpose_b", transpose_b, "Tactivation", Tactivation)
_result = _execute.execute(b"QuantizedMatMul", 3, inputs=_inputs_flat,
attrs=_attrs, ctx=_ctx, name=name)
_execute.record_gradient(
"QuantizedMatMul", _inputs_flat, _attrs, _result, name)
_result = _QuantizedMatMulOutput._make(_result)
return _result
_quantized_mul_outputs = ["z", "min_z", "max_z"]
_QuantizedMulOutput = _collections.namedtuple(
"QuantizedMul", _quantized_mul_outputs)
def quantized_mul(x, y, min_x, max_x, min_y, max_y, Toutput=_dtypes.qint32, name=None):
r"""Returns x * y element-wise, working on quantized buffers.
Args:
x: A `Tensor`. Must be one of the following types: `qint8`, `quint8`, `qint32`, `qint16`, `quint16`.
y: A `Tensor`. Must be one of the following types: `qint8`, `quint8`, `qint32`, `qint16`, `quint16`.
min_x: A `Tensor` of type `float32`.
The float value that the lowest quantized `x` value represents.
max_x: A `Tensor` of type `float32`.
The float value that the highest quantized `x` value represents.
min_y: A `Tensor` of type `float32`.
The float value that the lowest quantized `y` value represents.
max_y: A `Tensor` of type `float32`.
The float value that the highest quantized `y` value represents.
Toutput: An optional `tf.DType` from: `tf.qint8, tf.quint8, tf.qint32, tf.qint16, tf.quint16`. Defaults to `tf.qint32`.
name: A name for the operation (optional).
Returns:
A tuple of `Tensor` objects (z, min_z, max_z).
z: A `Tensor` of type `Toutput`.
min_z: A `Tensor` of type `float32`.
max_z: A `Tensor` of type `float32`.
"""
_ctx = _context._context
if _ctx is None or not _ctx._eager_context.is_eager:
if Toutput is None:
Toutput = _dtypes.qint32
Toutput = _execute.make_type(Toutput, "Toutput")
_, _, _op = _op_def_lib._apply_op_helper(
"QuantizedMul", x=x, y=y, min_x=min_x, max_x=max_x, min_y=min_y,
max_y=max_y, Toutput=Toutput, name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ("T1", _op.get_attr("T1"), "T2", _op.get_attr("T2"), "Toutput",
_op.get_attr("Toutput"))
_execute.record_gradient(
"QuantizedMul", _inputs_flat, _attrs, _result, name)
_result = _QuantizedMulOutput._make(_result)
return _result
else:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._eager_context.device_name, "QuantizedMul",
name, _ctx._post_execution_callbacks, x, y, min_x, max_x, min_y,
max_y, "Toutput", Toutput)
_result = _QuantizedMulOutput._make(_result)
return _result
except _core._FallbackException:
return quantized_mul_eager_fallback(
x, y, min_x, max_x, min_y, max_y, Toutput=Toutput, name=name,
ctx=_ctx)
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
def quantized_mul_eager_fallback(x, y, min_x, max_x, min_y, max_y, Toutput=_dtypes.qint32, name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function quantized_mul
"""
_ctx = ctx if ctx else _context.context()
if Toutput is None:
Toutput = _dtypes.qint32
Toutput = _execute.make_type(Toutput, "Toutput")
_attr_T1, (x,) = _execute.args_to_matching_eager([x], _ctx)
_attr_T2, (y,) = _execute.args_to_matching_eager([y], _ctx)
min_x = _ops.convert_to_tensor(min_x, _dtypes.float32)
max_x = _ops.convert_to_tensor(max_x, _dtypes.float32)
min_y = _ops.convert_to_tensor(min_y, _dtypes.float32)
max_y = _ops.convert_to_tensor(max_y, _dtypes.float32)
_inputs_flat = [x, y, min_x, max_x, min_y, max_y]
_attrs = ("T1", _attr_T1, "T2", _attr_T2, "Toutput", Toutput)
_result = _execute.execute(b"QuantizedMul", 3, inputs=_inputs_flat,
attrs=_attrs, ctx=_ctx, name=name)
_execute.record_gradient(
"QuantizedMul", _inputs_flat, _attrs, _result, name)
_result = _QuantizedMulOutput._make(_result)
return _result
def _range(start, limit, delta, name=None):
r"""Creates a sequence of numbers.
This operation creates a sequence of numbers that begins at `start` and
extends by increments of `delta` up to but not including `limit`.
For example:
```
# 'start' is 3
# 'limit' is 18
# 'delta' is 3
tf.range(start, limit, delta) ==> [3, 6, 9, 12, 15]
```
Args:
start: A `Tensor`. Must be one of the following types: `bfloat16`, `float32`, `float64`, `int32`, `int64`.
0-D (scalar). First entry in the sequence.
limit: A `Tensor`. Must have the same type as `start`.
0-D (scalar). Upper limit of sequence, exclusive.
delta: A `Tensor`. Must have the same type as `start`.
0-D (scalar). Optional. Default is 1. Number that increments `start`.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `start`.
"""
_ctx = _context._context
if _ctx is None or not _ctx._eager_context.is_eager:
_, _, _op = _op_def_lib._apply_op_helper(
"Range", start=start, limit=limit, delta=delta, name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ("Tidx", _op.get_attr("Tidx"))
_execute.record_gradient(
"Range", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
else:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._eager_context.device_name, "Range", name,
_ctx._post_execution_callbacks, start, limit, delta)
return _result
except _core._FallbackException:
return _range_eager_fallback(
start, limit, delta, name=name, ctx=_ctx)
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
def _range_eager_fallback(start, limit, delta, name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function _range
"""
_ctx = ctx if ctx else _context.context()
_attr_Tidx, _inputs_Tidx = _execute.args_to_matching_eager([start, limit, delta], _ctx, _dtypes.int32)
(start, limit, delta) = _inputs_Tidx
_inputs_flat = [start, limit, delta]
_attrs = ("Tidx", _attr_Tidx)
_result = _execute.execute(b"Range", 1, inputs=_inputs_flat, attrs=_attrs,
ctx=_ctx, name=name)
_execute.record_gradient(
"Range", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
def real(input, Tout=_dtypes.float32, name=None):
r"""Returns the real part of a complex number.
Given a tensor `input` of complex numbers, this operation returns a tensor of
type `float` that is the real part of each element in `input`. All elements in
`input` must be complex numbers of the form \\(a + bj\\), where *a* is the real
part returned by this operation and *b* is the imaginary part.
For example:
```
# tensor 'input' is [-2.25 + 4.75j, 3.25 + 5.75j]
tf.real(input) ==> [-2.25, 3.25]
```
Args:
input: A `Tensor`. Must be one of the following types: `complex64`, `complex128`.
Tout: An optional `tf.DType` from: `tf.float32, tf.float64`. Defaults to `tf.float32`.
name: A name for the operation (optional).
Returns:
A `Tensor` of type `Tout`.
"""
_ctx = _context._context
if _ctx is None or not _ctx._eager_context.is_eager:
if Tout is None:
Tout = _dtypes.float32
Tout = _execute.make_type(Tout, "Tout")
_, _, _op = _op_def_lib._apply_op_helper(
"Real", input=input, Tout=Tout, name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ("T", _op.get_attr("T"), "Tout", _op.get_attr("Tout"))
_execute.record_gradient(
"Real", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
else:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._eager_context.device_name, "Real", name,
_ctx._post_execution_callbacks, input, "Tout", Tout)
return _result
except _core._FallbackException:
return real_eager_fallback(
input, Tout=Tout, name=name, ctx=_ctx)
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
def real_eager_fallback(input, Tout=_dtypes.float32, name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function real
"""
_ctx = ctx if ctx else _context.context()
if Tout is None:
Tout = _dtypes.float32
Tout = _execute.make_type(Tout, "Tout")
_attr_T, (input,) = _execute.args_to_matching_eager([input], _ctx, _dtypes.complex64)
_inputs_flat = [input]
_attrs = ("T", _attr_T, "Tout", Tout)
_result = _execute.execute(b"Real", 1, inputs=_inputs_flat, attrs=_attrs,
ctx=_ctx, name=name)
_execute.record_gradient(
"Real", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
def real_div(x, y, name=None):
r"""Returns x / y element-wise for real types.
If `x` and `y` are reals, this will return the floating-point division.
*NOTE*: `Div` supports broadcasting. More about broadcasting
[here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
Args:
x: A `Tensor`. Must be one of the following types: `bfloat16`, `half`, `float32`, `float64`, `uint8`, `int8`, `uint16`, `int16`, `int32`, `int64`, `complex64`, `complex128`.
y: A `Tensor`. Must have the same type as `x`.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `x`.
"""
_ctx = _context._context
if _ctx is None or not _ctx._eager_context.is_eager:
_, _, _op = _op_def_lib._apply_op_helper(
"RealDiv", x=x, y=y, name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ("T", _op.get_attr("T"))
_execute.record_gradient(
"RealDiv", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
else:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._eager_context.device_name, "RealDiv",
name, _ctx._post_execution_callbacks, x, y)
return _result
except _core._FallbackException:
return real_div_eager_fallback(
x, y, name=name, ctx=_ctx)
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
def real_div_eager_fallback(x, y, name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function real_div
"""
_ctx = ctx if ctx else _context.context()
_attr_T, _inputs_T = _execute.args_to_matching_eager([x, y], _ctx)
(x, y) = _inputs_T
_inputs_flat = [x, y]
_attrs = ("T", _attr_T)
_result = _execute.execute(b"RealDiv", 1, inputs=_inputs_flat, attrs=_attrs,
ctx=_ctx, name=name)
_execute.record_gradient(
"RealDiv", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
@tf_export('math.reciprocal', 'reciprocal')
def reciprocal(x, name=None):
r"""Computes the reciprocal of x element-wise.
I.e., \\(y = 1 / x\\).
Args:
x: A `Tensor`. Must be one of the following types: `bfloat16`, `half`, `float32`, `float64`, `int32`, `int64`, `complex64`, `complex128`.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `x`.
"""
_ctx = _context._context
if _ctx is None or not _ctx._eager_context.is_eager:
_, _, _op = _op_def_lib._apply_op_helper(
"Reciprocal", x=x, name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ("T", _op.get_attr("T"))
_execute.record_gradient(
"Reciprocal", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
else:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._eager_context.device_name, "Reciprocal",
name, _ctx._post_execution_callbacks, x)
return _result
except _core._FallbackException:
return reciprocal_eager_fallback(
x, name=name, ctx=_ctx)
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
def reciprocal_eager_fallback(x, name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function reciprocal
"""
_ctx = ctx if ctx else _context.context()
_attr_T, (x,) = _execute.args_to_matching_eager([x], _ctx)
_inputs_flat = [x]
_attrs = ("T", _attr_T)
_result = _execute.execute(b"Reciprocal", 1, inputs=_inputs_flat,
attrs=_attrs, ctx=_ctx, name=name)
_execute.record_gradient(
"Reciprocal", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
def reciprocal_grad(y, dy, name=None):
r"""Computes the gradient for the inverse of `x` wrt its input.
Specifically, `grad = -dy * y*y`, where `y = 1/x`, and `dy`
is the corresponding input gradient.
Args:
y: A `Tensor`. Must be one of the following types: `bfloat16`, `half`, `float32`, `float64`, `complex64`, `complex128`.
dy: A `Tensor`. Must have the same type as `y`.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `y`.
"""
_ctx = _context._context
if _ctx is None or not _ctx._eager_context.is_eager:
_, _, _op = _op_def_lib._apply_op_helper(
"ReciprocalGrad", y=y, dy=dy, name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ("T", _op.get_attr("T"))
_execute.record_gradient(
"ReciprocalGrad", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
else:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._eager_context.device_name,
"ReciprocalGrad", name, _ctx._post_execution_callbacks, y, dy)
return _result
except _core._FallbackException:
return reciprocal_grad_eager_fallback(
y, dy, name=name, ctx=_ctx)
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
def reciprocal_grad_eager_fallback(y, dy, name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function reciprocal_grad
"""
_ctx = ctx if ctx else _context.context()
_attr_T, _inputs_T = _execute.args_to_matching_eager([y, dy], _ctx)
(y, dy) = _inputs_T
_inputs_flat = [y, dy]
_attrs = ("T", _attr_T)
_result = _execute.execute(b"ReciprocalGrad", 1, inputs=_inputs_flat,
attrs=_attrs, ctx=_ctx, name=name)
_execute.record_gradient(
"ReciprocalGrad", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
_requantization_range_outputs = ["output_min", "output_max"]
_RequantizationRangeOutput = _collections.namedtuple(
"RequantizationRange", _requantization_range_outputs)
def requantization_range(input, input_min, input_max, name=None):
r"""Given a quantized tensor described by (input, input_min, input_max), outputs a
range that covers the actual values present in that tensor. This op is
typically used to produce the requested_output_min and requested_output_max for
Requantize.
Args:
input: A `Tensor`. Must be one of the following types: `qint8`, `quint8`, `qint32`, `qint16`, `quint16`.
input_min: A `Tensor` of type `float32`.
The float value that the minimum quantized input value represents.
input_max: A `Tensor` of type `float32`.
The float value that the maximum quantized input value represents.
name: A name for the operation (optional).
Returns:
A tuple of `Tensor` objects (output_min, output_max).
output_min: A `Tensor` of type `float32`.
output_max: A `Tensor` of type `float32`.
"""
_ctx = _context._context
if _ctx is None or not _ctx._eager_context.is_eager:
_, _, _op = _op_def_lib._apply_op_helper(
"RequantizationRange", input=input, input_min=input_min,
input_max=input_max, name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ("Tinput", _op.get_attr("Tinput"))
_execute.record_gradient(
"RequantizationRange", _inputs_flat, _attrs, _result, name)
_result = _RequantizationRangeOutput._make(_result)
return _result
else:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._eager_context.device_name,
"RequantizationRange", name, _ctx._post_execution_callbacks, input,
input_min, input_max)
_result = _RequantizationRangeOutput._make(_result)
return _result
except _core._FallbackException:
return requantization_range_eager_fallback(
input, input_min, input_max, name=name, ctx=_ctx)
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
def requantization_range_eager_fallback(input, input_min, input_max, name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function requantization_range
"""
_ctx = ctx if ctx else _context.context()
_attr_Tinput, (input,) = _execute.args_to_matching_eager([input], _ctx)
input_min = _ops.convert_to_tensor(input_min, _dtypes.float32)
input_max = _ops.convert_to_tensor(input_max, _dtypes.float32)
_inputs_flat = [input, input_min, input_max]
_attrs = ("Tinput", _attr_Tinput)
_result = _execute.execute(b"RequantizationRange", 2, inputs=_inputs_flat,
attrs=_attrs, ctx=_ctx, name=name)
_execute.record_gradient(
"RequantizationRange", _inputs_flat, _attrs, _result, name)
_result = _RequantizationRangeOutput._make(_result)
return _result
_requantize_outputs = ["output", "output_min", "output_max"]
_RequantizeOutput = _collections.namedtuple(
"Requantize", _requantize_outputs)
def requantize(input, input_min, input_max, requested_output_min, requested_output_max, out_type, name=None):
r"""Convert the quantized 'input' tensor into a lower-precision 'output', using the
output range specified with 'requested_output_min' and 'requested_output_max'.
[input_min, input_max] are scalar floats that specify the range for the float
interpretation of the 'input' data. For example, if input_min is -1.0f and
input_max is 1.0f, and we are dealing with quint16 quantized data, then a 0
value in the 16-bit data should be interpreted as -1.0f, and a 65535 means 1.0f.
Args:
input: A `Tensor`. Must be one of the following types: `qint8`, `quint8`, `qint32`, `qint16`, `quint16`.
input_min: A `Tensor` of type `float32`.
The float value that the minimum quantized input value represents.
input_max: A `Tensor` of type `float32`.
The float value that the maximum quantized input value represents.
requested_output_min: A `Tensor` of type `float32`.
The float value that the minimum quantized output value represents.
requested_output_max: A `Tensor` of type `float32`.
The float value that the maximum quantized output value represents.
out_type: A `tf.DType` from: `tf.qint8, tf.quint8, tf.qint32, tf.qint16, tf.quint16`.
The type of the output. Should be a lower bit depth than Tinput.
name: A name for the operation (optional).
Returns:
A tuple of `Tensor` objects (output, output_min, output_max).
output: A `Tensor` of type `out_type`.
output_min: A `Tensor` of type `float32`.
output_max: A `Tensor` of type `float32`.
"""
_ctx = _context._context
if _ctx is None or not _ctx._eager_context.is_eager:
out_type = _execute.make_type(out_type, "out_type")
_, _, _op = _op_def_lib._apply_op_helper(
"Requantize", input=input, input_min=input_min, input_max=input_max,
requested_output_min=requested_output_min,
requested_output_max=requested_output_max, out_type=out_type,
name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ("Tinput", _op.get_attr("Tinput"), "out_type",
_op.get_attr("out_type"))
_execute.record_gradient(
"Requantize", _inputs_flat, _attrs, _result, name)
_result = _RequantizeOutput._make(_result)
return _result
else:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._eager_context.device_name, "Requantize",
name, _ctx._post_execution_callbacks, input, input_min, input_max,
requested_output_min, requested_output_max, "out_type", out_type)
_result = _RequantizeOutput._make(_result)
return _result
except _core._FallbackException:
return requantize_eager_fallback(
input, input_min, input_max, requested_output_min,
requested_output_max, out_type=out_type, name=name, ctx=_ctx)
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
def requantize_eager_fallback(input, input_min, input_max, requested_output_min, requested_output_max, out_type, name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function requantize
"""
_ctx = ctx if ctx else _context.context()
out_type = _execute.make_type(out_type, "out_type")
_attr_Tinput, (input,) = _execute.args_to_matching_eager([input], _ctx)
input_min = _ops.convert_to_tensor(input_min, _dtypes.float32)
input_max = _ops.convert_to_tensor(input_max, _dtypes.float32)
requested_output_min = _ops.convert_to_tensor(requested_output_min, _dtypes.float32)
requested_output_max = _ops.convert_to_tensor(requested_output_max, _dtypes.float32)
_inputs_flat = [input, input_min, input_max, requested_output_min, requested_output_max]
_attrs = ("Tinput", _attr_Tinput, "out_type", out_type)
_result = _execute.execute(b"Requantize", 3, inputs=_inputs_flat,
attrs=_attrs, ctx=_ctx, name=name)
_execute.record_gradient(
"Requantize", _inputs_flat, _attrs, _result, name)
_result = _RequantizeOutput._make(_result)
return _result
@tf_export('math.rint', 'rint')
def rint(x, name=None):
r"""Returns element-wise integer closest to x.
If the result is midway between two representable values,
the even representable is chosen.
For example:
```
rint(-1.5) ==> -2.0
rint(0.5000001) ==> 1.0
rint([-1.7, -1.5, -0.2, 0.2, 1.5, 1.7, 2.0]) ==> [-2., -2., -0., 0., 2., 2., 2.]
```
Args:
x: A `Tensor`. Must be one of the following types: `bfloat16`, `half`, `float32`, `float64`.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `x`.
"""
_ctx = _context._context
if _ctx is None or not _ctx._eager_context.is_eager:
_, _, _op = _op_def_lib._apply_op_helper(
"Rint", x=x, name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ("T", _op.get_attr("T"))
_execute.record_gradient(
"Rint", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
else:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._eager_context.device_name, "Rint", name,
_ctx._post_execution_callbacks, x)
return _result
except _core._FallbackException:
return rint_eager_fallback(
x, name=name, ctx=_ctx)
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
def rint_eager_fallback(x, name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function rint
"""
_ctx = ctx if ctx else _context.context()
_attr_T, (x,) = _execute.args_to_matching_eager([x], _ctx)
_inputs_flat = [x]
_attrs = ("T", _attr_T)
_result = _execute.execute(b"Rint", 1, inputs=_inputs_flat, attrs=_attrs,
ctx=_ctx, name=name)
_execute.record_gradient(
"Rint", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
def round(x, name=None):
r"""Rounds the values of a tensor to the nearest integer, element-wise.
Rounds half to even. Also known as bankers rounding. If you want to round
according to the current system rounding mode use std::cint.
Args:
x: A `Tensor`. Must be one of the following types: `bfloat16`, `half`, `float32`, `float64`, `int32`, `int64`, `complex64`, `complex128`.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `x`.
"""
_ctx = _context._context
if _ctx is None or not _ctx._eager_context.is_eager:
_, _, _op = _op_def_lib._apply_op_helper(
"Round", x=x, name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ("T", _op.get_attr("T"))
_execute.record_gradient(
"Round", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
else:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._eager_context.device_name, "Round", name,
_ctx._post_execution_callbacks, x)
return _result
except _core._FallbackException:
return round_eager_fallback(
x, name=name, ctx=_ctx)
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
def round_eager_fallback(x, name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function round
"""
_ctx = ctx if ctx else _context.context()
_attr_T, (x,) = _execute.args_to_matching_eager([x], _ctx)
_inputs_flat = [x]
_attrs = ("T", _attr_T)
_result = _execute.execute(b"Round", 1, inputs=_inputs_flat, attrs=_attrs,
ctx=_ctx, name=name)
_execute.record_gradient(
"Round", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
@tf_export('math.rsqrt', 'rsqrt')
def rsqrt(x, name=None):
r"""Computes reciprocal of square root of x element-wise.
I.e., \\(y = 1 / \sqrt{x}\\).
Args:
x: A `Tensor`. Must be one of the following types: `bfloat16`, `half`, `float32`, `float64`, `complex64`, `complex128`.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `x`.
"""
_ctx = _context._context
if _ctx is None or not _ctx._eager_context.is_eager:
_, _, _op = _op_def_lib._apply_op_helper(
"Rsqrt", x=x, name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ("T", _op.get_attr("T"))
_execute.record_gradient(
"Rsqrt", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
else:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._eager_context.device_name, "Rsqrt", name,
_ctx._post_execution_callbacks, x)
return _result
except _core._FallbackException:
return rsqrt_eager_fallback(
x, name=name, ctx=_ctx)
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
def rsqrt_eager_fallback(x, name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function rsqrt
"""
_ctx = ctx if ctx else _context.context()
_attr_T, (x,) = _execute.args_to_matching_eager([x], _ctx)
_inputs_flat = [x]
_attrs = ("T", _attr_T)
_result = _execute.execute(b"Rsqrt", 1, inputs=_inputs_flat, attrs=_attrs,
ctx=_ctx, name=name)
_execute.record_gradient(
"Rsqrt", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
def rsqrt_grad(y, dy, name=None):
r"""Computes the gradient for the rsqrt of `x` wrt its input.
Specifically, `grad = dy * -0.5 * y^3`, where `y = rsqrt(x)`, and `dy`
is the corresponding input gradient.
Args:
y: A `Tensor`. Must be one of the following types: `bfloat16`, `half`, `float32`, `float64`, `complex64`, `complex128`.
dy: A `Tensor`. Must have the same type as `y`.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `y`.
"""
_ctx = _context._context
if _ctx is None or not _ctx._eager_context.is_eager:
_, _, _op = _op_def_lib._apply_op_helper(
"RsqrtGrad", y=y, dy=dy, name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ("T", _op.get_attr("T"))
_execute.record_gradient(
"RsqrtGrad", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
else:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._eager_context.device_name, "RsqrtGrad",
name, _ctx._post_execution_callbacks, y, dy)
return _result
except _core._FallbackException:
return rsqrt_grad_eager_fallback(
y, dy, name=name, ctx=_ctx)
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
def rsqrt_grad_eager_fallback(y, dy, name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function rsqrt_grad
"""
_ctx = ctx if ctx else _context.context()
_attr_T, _inputs_T = _execute.args_to_matching_eager([y, dy], _ctx)
(y, dy) = _inputs_T
_inputs_flat = [y, dy]
_attrs = ("T", _attr_T)
_result = _execute.execute(b"RsqrtGrad", 1, inputs=_inputs_flat,
attrs=_attrs, ctx=_ctx, name=name)
_execute.record_gradient(
"RsqrtGrad", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
@tf_export('math.segment_max', 'segment_max')
def segment_max(data, segment_ids, name=None):
r"""Computes the maximum along segments of a tensor.
Read @{$math_ops#Segmentation$the section on segmentation} for an explanation of
segments.
Computes a tensor such that
\\(output_i = \max_j(data_j)\\) where `max` is over `j` such
that `segment_ids[j] == i`.
If the max is empty for a given segment ID `i`, `output[i] = 0`.
<div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;">
<img style="width:100%" src="https://www.tensorflow.org/images/SegmentMax.png" alt>
</div>
Args:
data: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `int64`, `bfloat16`, `uint16`, `half`, `uint32`, `uint64`.
segment_ids: A `Tensor`. Must be one of the following types: `int32`, `int64`.
A 1-D tensor whose rank is equal to the rank of `data`'s
first dimension. Values should be sorted and can be repeated.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `data`.
"""
_ctx = _context._context
if _ctx is None or not _ctx._eager_context.is_eager:
_, _, _op = _op_def_lib._apply_op_helper(
"SegmentMax", data=data, segment_ids=segment_ids, name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ("T", _op.get_attr("T"), "Tindices", _op.get_attr("Tindices"))
_execute.record_gradient(
"SegmentMax", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
else:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._eager_context.device_name, "SegmentMax",
name, _ctx._post_execution_callbacks, data, segment_ids)
return _result
except _core._FallbackException:
return segment_max_eager_fallback(
data, segment_ids, name=name, ctx=_ctx)
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
def segment_max_eager_fallback(data, segment_ids, name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function segment_max
"""
_ctx = ctx if ctx else _context.context()
_attr_T, (data,) = _execute.args_to_matching_eager([data], _ctx)
_attr_Tindices, (segment_ids,) = _execute.args_to_matching_eager([segment_ids], _ctx)
_inputs_flat = [data, segment_ids]
_attrs = ("T", _attr_T, "Tindices", _attr_Tindices)
_result = _execute.execute(b"SegmentMax", 1, inputs=_inputs_flat,
attrs=_attrs, ctx=_ctx, name=name)
_execute.record_gradient(
"SegmentMax", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
@tf_export('math.segment_mean', 'segment_mean')
def segment_mean(data, segment_ids, name=None):
r"""Computes the mean along segments of a tensor.
Read @{$math_ops#Segmentation$the section on segmentation} for an explanation of
segments.
Computes a tensor such that
\\(output_i = \frac{\sum_j data_j}{N}\\) where `mean` is
over `j` such that `segment_ids[j] == i` and `N` is the total number of
values summed.
If the mean is empty for a given segment ID `i`, `output[i] = 0`.
<div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;">
<img style="width:100%" src="https://www.tensorflow.org/images/SegmentMean.png" alt>
</div>
Args:
data: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `uint16`, `complex128`, `half`, `uint32`, `uint64`.
segment_ids: A `Tensor`. Must be one of the following types: `int32`, `int64`.
A 1-D tensor whose rank is equal to the rank of `data`'s
first dimension. Values should be sorted and can be repeated.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `data`.
"""
_ctx = _context._context
if _ctx is None or not _ctx._eager_context.is_eager:
_, _, _op = _op_def_lib._apply_op_helper(
"SegmentMean", data=data, segment_ids=segment_ids, name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ("T", _op.get_attr("T"), "Tindices", _op.get_attr("Tindices"))
_execute.record_gradient(
"SegmentMean", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
else:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._eager_context.device_name, "SegmentMean",
name, _ctx._post_execution_callbacks, data, segment_ids)
return _result
except _core._FallbackException:
return segment_mean_eager_fallback(
data, segment_ids, name=name, ctx=_ctx)
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
def segment_mean_eager_fallback(data, segment_ids, name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function segment_mean
"""
_ctx = ctx if ctx else _context.context()
_attr_T, (data,) = _execute.args_to_matching_eager([data], _ctx)
_attr_Tindices, (segment_ids,) = _execute.args_to_matching_eager([segment_ids], _ctx)
_inputs_flat = [data, segment_ids]
_attrs = ("T", _attr_T, "Tindices", _attr_Tindices)
_result = _execute.execute(b"SegmentMean", 1, inputs=_inputs_flat,
attrs=_attrs, ctx=_ctx, name=name)
_execute.record_gradient(
"SegmentMean", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
@tf_export('math.segment_min', 'segment_min')
def segment_min(data, segment_ids, name=None):
r"""Computes the minimum along segments of a tensor.
Read @{$math_ops#Segmentation$the section on segmentation} for an explanation of
segments.
Computes a tensor such that
\\(output_i = \min_j(data_j)\\) where `min` is over `j` such
that `segment_ids[j] == i`.
If the min is empty for a given segment ID `i`, `output[i] = 0`.
<div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;">
<img style="width:100%" src="https://www.tensorflow.org/images/SegmentMin.png" alt>
</div>
Args:
data: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `int64`, `bfloat16`, `uint16`, `half`, `uint32`, `uint64`.
segment_ids: A `Tensor`. Must be one of the following types: `int32`, `int64`.
A 1-D tensor whose rank is equal to the rank of `data`'s
first dimension. Values should be sorted and can be repeated.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `data`.
"""
_ctx = _context._context
if _ctx is None or not _ctx._eager_context.is_eager:
_, _, _op = _op_def_lib._apply_op_helper(
"SegmentMin", data=data, segment_ids=segment_ids, name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ("T", _op.get_attr("T"), "Tindices", _op.get_attr("Tindices"))
_execute.record_gradient(
"SegmentMin", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
else:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._eager_context.device_name, "SegmentMin",
name, _ctx._post_execution_callbacks, data, segment_ids)
return _result
except _core._FallbackException:
return segment_min_eager_fallback(
data, segment_ids, name=name, ctx=_ctx)
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
def segment_min_eager_fallback(data, segment_ids, name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function segment_min
"""
_ctx = ctx if ctx else _context.context()
_attr_T, (data,) = _execute.args_to_matching_eager([data], _ctx)
_attr_Tindices, (segment_ids,) = _execute.args_to_matching_eager([segment_ids], _ctx)
_inputs_flat = [data, segment_ids]
_attrs = ("T", _attr_T, "Tindices", _attr_Tindices)
_result = _execute.execute(b"SegmentMin", 1, inputs=_inputs_flat,
attrs=_attrs, ctx=_ctx, name=name)
_execute.record_gradient(
"SegmentMin", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
@tf_export('math.segment_prod', 'segment_prod')
def segment_prod(data, segment_ids, name=None):
r"""Computes the product along segments of a tensor.
Read @{$math_ops#Segmentation$the section on segmentation} for an explanation of
segments.
Computes a tensor such that
\\(output_i = \prod_j data_j\\) where the product is over `j` such
that `segment_ids[j] == i`.
If the product is empty for a given segment ID `i`, `output[i] = 1`.
<div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;">
<img style="width:100%" src="https://www.tensorflow.org/images/SegmentProd.png" alt>
</div>
Args:
data: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `uint16`, `complex128`, `half`, `uint32`, `uint64`.
segment_ids: A `Tensor`. Must be one of the following types: `int32`, `int64`.
A 1-D tensor whose rank is equal to the rank of `data`'s
first dimension. Values should be sorted and can be repeated.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `data`.
"""
_ctx = _context._context
if _ctx is None or not _ctx._eager_context.is_eager:
_, _, _op = _op_def_lib._apply_op_helper(
"SegmentProd", data=data, segment_ids=segment_ids, name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ("T", _op.get_attr("T"), "Tindices", _op.get_attr("Tindices"))
_execute.record_gradient(
"SegmentProd", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
else:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._eager_context.device_name, "SegmentProd",
name, _ctx._post_execution_callbacks, data, segment_ids)
return _result
except _core._FallbackException:
return segment_prod_eager_fallback(
data, segment_ids, name=name, ctx=_ctx)
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
def segment_prod_eager_fallback(data, segment_ids, name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function segment_prod
"""
_ctx = ctx if ctx else _context.context()
_attr_T, (data,) = _execute.args_to_matching_eager([data], _ctx)
_attr_Tindices, (segment_ids,) = _execute.args_to_matching_eager([segment_ids], _ctx)
_inputs_flat = [data, segment_ids]
_attrs = ("T", _attr_T, "Tindices", _attr_Tindices)
_result = _execute.execute(b"SegmentProd", 1, inputs=_inputs_flat,
attrs=_attrs, ctx=_ctx, name=name)
_execute.record_gradient(
"SegmentProd", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
@tf_export('math.segment_sum', 'segment_sum')
def segment_sum(data, segment_ids, name=None):
r"""Computes the sum along segments of a tensor.
Read @{$math_ops#Segmentation$the section on segmentation} for an explanation of
segments.
Computes a tensor such that
\\(output_i = \sum_j data_j\\) where sum is over `j` such
that `segment_ids[j] == i`.
If the sum is empty for a given segment ID `i`, `output[i] = 0`.
<div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;">
<img style="width:100%" src="https://www.tensorflow.org/images/SegmentSum.png" alt>
</div>
Args:
data: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `uint16`, `complex128`, `half`, `uint32`, `uint64`.
segment_ids: A `Tensor`. Must be one of the following types: `int32`, `int64`.
A 1-D tensor whose rank is equal to the rank of `data`'s
first dimension. Values should be sorted and can be repeated.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `data`.
"""
_ctx = _context._context
if _ctx is None or not _ctx._eager_context.is_eager:
_, _, _op = _op_def_lib._apply_op_helper(
"SegmentSum", data=data, segment_ids=segment_ids, name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ("T", _op.get_attr("T"), "Tindices", _op.get_attr("Tindices"))
_execute.record_gradient(
"SegmentSum", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
else:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._eager_context.device_name, "SegmentSum",
name, _ctx._post_execution_callbacks, data, segment_ids)
return _result
except _core._FallbackException:
return segment_sum_eager_fallback(
data, segment_ids, name=name, ctx=_ctx)
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
def segment_sum_eager_fallback(data, segment_ids, name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function segment_sum
"""
_ctx = ctx if ctx else _context.context()
_attr_T, (data,) = _execute.args_to_matching_eager([data], _ctx)
_attr_Tindices, (segment_ids,) = _execute.args_to_matching_eager([segment_ids], _ctx)
_inputs_flat = [data, segment_ids]
_attrs = ("T", _attr_T, "Tindices", _attr_Tindices)
_result = _execute.execute(b"SegmentSum", 1, inputs=_inputs_flat,
attrs=_attrs, ctx=_ctx, name=name)
_execute.record_gradient(
"SegmentSum", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
def select(condition, x, y, name=None):
r"""Selects elements from `x` or `y`, depending on `condition`.
The `x`, and `y` tensors must all have the same shape, and the
output will also have that shape.
The `condition` tensor must be a scalar if `x` and `y` are scalars.
If `x` and `y` are vectors or higher rank, then `condition` must be either a
scalar, a vector with size matching the first dimension of `x`, or must have
the same shape as `x`.
The `condition` tensor acts as a mask that chooses, based on the value at each
element, whether the corresponding element / row in the output should be
taken from `x` (if true) or `y` (if false).
If `condition` is a vector and `x` and `y` are higher rank matrices, then
it chooses which row (outer dimension) to copy from `x` and `y`.
If `condition` has the same shape as `x` and `y`, then it chooses which
element to copy from `x` and `y`.
For example:
```python
# 'condition' tensor is [[True, False]
# [False, True]]
# 't' is [[1, 2],
# [3, 4]]
# 'e' is [[5, 6],
# [7, 8]]
select(condition, t, e) # => [[1, 6], [7, 4]]
# 'condition' tensor is [True, False]
# 't' is [[1, 2],
# [3, 4]]
# 'e' is [[5, 6],
# [7, 8]]
select(condition, t, e) ==> [[1, 2],
[7, 8]]
```
Args:
condition: A `Tensor` of type `bool`.
x: A `Tensor` which may have the same shape as `condition`.
If `condition` is rank 1, `x` may have higher rank,
but its first dimension must match the size of `condition`.
y: A `Tensor` with the same type and shape as `x`.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `t`.
"""
_ctx = _context._context
if _ctx is None or not _ctx._eager_context.is_eager:
_, _, _op = _op_def_lib._apply_op_helper(
"Select", condition=condition, t=x, e=y, name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ("T", _op.get_attr("T"))
_execute.record_gradient(
"Select", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
else:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._eager_context.device_name, "Select", name,
_ctx._post_execution_callbacks, condition, x, y)
return _result
except _core._FallbackException:
return select_eager_fallback(
condition, x, y, name=name, ctx=_ctx)
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
def select_eager_fallback(condition, x, y, name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function select
"""
_ctx = ctx if ctx else _context.context()
_attr_T, _inputs_T = _execute.args_to_matching_eager([x, y], _ctx)
(x, y) = _inputs_T
condition = _ops.convert_to_tensor(condition, _dtypes.bool)
_inputs_flat = [condition, x, y]
_attrs = ("T", _attr_T)
_result = _execute.execute(b"Select", 1, inputs=_inputs_flat, attrs=_attrs,
ctx=_ctx, name=name)
_execute.record_gradient(
"Select", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
def sigmoid(x, name=None):
r"""Computes sigmoid of `x` element-wise.
Specifically, `y = 1 / (1 + exp(-x))`.
Args:
x: A `Tensor`. Must be one of the following types: `bfloat16`, `half`, `float32`, `float64`, `complex64`, `complex128`.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `x`.
"""
_ctx = _context._context
if _ctx is None or not _ctx._eager_context.is_eager:
_, _, _op = _op_def_lib._apply_op_helper(
"Sigmoid", x=x, name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ("T", _op.get_attr("T"))
_execute.record_gradient(
"Sigmoid", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
else:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._eager_context.device_name, "Sigmoid",
name, _ctx._post_execution_callbacks, x)
return _result
except _core._FallbackException:
return sigmoid_eager_fallback(
x, name=name, ctx=_ctx)
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
def sigmoid_eager_fallback(x, name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function sigmoid
"""
_ctx = ctx if ctx else _context.context()
_attr_T, (x,) = _execute.args_to_matching_eager([x], _ctx)
_inputs_flat = [x]
_attrs = ("T", _attr_T)
_result = _execute.execute(b"Sigmoid", 1, inputs=_inputs_flat, attrs=_attrs,
ctx=_ctx, name=name)
_execute.record_gradient(
"Sigmoid", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
def sigmoid_grad(y, dy, name=None):
r"""Computes the gradient of the sigmoid of `x` wrt its input.
Specifically, `grad = dy * y * (1 - y)`, where `y = sigmoid(x)`, and
`dy` is the corresponding input gradient.
Args:
y: A `Tensor`. Must be one of the following types: `bfloat16`, `half`, `float32`, `float64`, `complex64`, `complex128`.
dy: A `Tensor`. Must have the same type as `y`.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `y`.
"""
_ctx = _context._context
if _ctx is None or not _ctx._eager_context.is_eager:
_, _, _op = _op_def_lib._apply_op_helper(
"SigmoidGrad", y=y, dy=dy, name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ("T", _op.get_attr("T"))
_execute.record_gradient(
"SigmoidGrad", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
else:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._eager_context.device_name, "SigmoidGrad",
name, _ctx._post_execution_callbacks, y, dy)
return _result
except _core._FallbackException:
return sigmoid_grad_eager_fallback(
y, dy, name=name, ctx=_ctx)
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
def sigmoid_grad_eager_fallback(y, dy, name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function sigmoid_grad
"""
_ctx = ctx if ctx else _context.context()
_attr_T, _inputs_T = _execute.args_to_matching_eager([y, dy], _ctx)
(y, dy) = _inputs_T
_inputs_flat = [y, dy]
_attrs = ("T", _attr_T)
_result = _execute.execute(b"SigmoidGrad", 1, inputs=_inputs_flat,
attrs=_attrs, ctx=_ctx, name=name)
_execute.record_gradient(
"SigmoidGrad", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
def sign(x, name=None):
r"""Returns an element-wise indication of the sign of a number.
`y = sign(x) = -1` if `x < 0`; 0 if `x == 0`; 1 if `x > 0`.
For complex numbers, `y = sign(x) = x / |x|` if `x != 0`, otherwise `y = 0`.
Args:
x: A `Tensor`. Must be one of the following types: `bfloat16`, `half`, `float32`, `float64`, `int32`, `int64`, `complex64`, `complex128`.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `x`.
"""
_ctx = _context._context
if _ctx is None or not _ctx._eager_context.is_eager:
_, _, _op = _op_def_lib._apply_op_helper(
"Sign", x=x, name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ("T", _op.get_attr("T"))
_execute.record_gradient(
"Sign", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
else:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._eager_context.device_name, "Sign", name,
_ctx._post_execution_callbacks, x)
return _result
except _core._FallbackException:
return sign_eager_fallback(
x, name=name, ctx=_ctx)
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
def sign_eager_fallback(x, name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function sign
"""
_ctx = ctx if ctx else _context.context()
_attr_T, (x,) = _execute.args_to_matching_eager([x], _ctx)
_inputs_flat = [x]
_attrs = ("T", _attr_T)
_result = _execute.execute(b"Sign", 1, inputs=_inputs_flat, attrs=_attrs,
ctx=_ctx, name=name)
_execute.record_gradient(
"Sign", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
@tf_export('math.sin', 'sin')
def sin(x, name=None):
r"""Computes sin of x element-wise.
Args:
x: A `Tensor`. Must be one of the following types: `bfloat16`, `half`, `float32`, `float64`, `complex64`, `complex128`.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `x`.
"""
_ctx = _context._context
if _ctx is None or not _ctx._eager_context.is_eager:
_, _, _op = _op_def_lib._apply_op_helper(
"Sin", x=x, name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ("T", _op.get_attr("T"))
_execute.record_gradient(
"Sin", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
else:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._eager_context.device_name, "Sin", name,
_ctx._post_execution_callbacks, x)
return _result
except _core._FallbackException:
return sin_eager_fallback(
x, name=name, ctx=_ctx)
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
def sin_eager_fallback(x, name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function sin
"""
_ctx = ctx if ctx else _context.context()
_attr_T, (x,) = _execute.args_to_matching_eager([x], _ctx)
_inputs_flat = [x]
_attrs = ("T", _attr_T)
_result = _execute.execute(b"Sin", 1, inputs=_inputs_flat, attrs=_attrs,
ctx=_ctx, name=name)
_execute.record_gradient(
"Sin", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
@tf_export('math.sinh', 'sinh')
def sinh(x, name=None):
r"""Computes hyperbolic sine of x element-wise.
Args:
x: A `Tensor`. Must be one of the following types: `bfloat16`, `half`, `float32`, `float64`, `complex64`, `complex128`.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `x`.
"""
_ctx = _context._context
if _ctx is None or not _ctx._eager_context.is_eager:
_, _, _op = _op_def_lib._apply_op_helper(
"Sinh", x=x, name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ("T", _op.get_attr("T"))
_execute.record_gradient(
"Sinh", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
else:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._eager_context.device_name, "Sinh", name,
_ctx._post_execution_callbacks, x)
return _result
except _core._FallbackException:
return sinh_eager_fallback(
x, name=name, ctx=_ctx)
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
def sinh_eager_fallback(x, name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function sinh
"""
_ctx = ctx if ctx else _context.context()
_attr_T, (x,) = _execute.args_to_matching_eager([x], _ctx)
_inputs_flat = [x]
_attrs = ("T", _attr_T)
_result = _execute.execute(b"Sinh", 1, inputs=_inputs_flat, attrs=_attrs,
ctx=_ctx, name=name)
_execute.record_gradient(
"Sinh", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
def sparse_mat_mul(a, b, transpose_a=False, transpose_b=False, a_is_sparse=False, b_is_sparse=False, name=None):
r"""Multiply matrix "a" by matrix "b".
The inputs must be two-dimensional matrices and the inner dimension of "a" must
match the outer dimension of "b". Both "a" and "b" must be `Tensor`s not
`SparseTensor`s. This op is optimized for the case where at least one of "a" or
"b" is sparse, in the sense that they have a large proportion of zero values.
The breakeven for using this versus a dense matrix multiply on one platform was
30% zero values in the sparse matrix.
The gradient computation of this operation will only take advantage of sparsity
in the input gradient when that gradient comes from a Relu.
Args:
a: A `Tensor`. Must be one of the following types: `float32`, `bfloat16`.
b: A `Tensor`. Must be one of the following types: `float32`, `bfloat16`.
transpose_a: An optional `bool`. Defaults to `False`.
transpose_b: An optional `bool`. Defaults to `False`.
a_is_sparse: An optional `bool`. Defaults to `False`.
b_is_sparse: An optional `bool`. Defaults to `False`.
name: A name for the operation (optional).
Returns:
A `Tensor` of type `float32`.
"""
_ctx = _context._context
if _ctx is None or not _ctx._eager_context.is_eager:
if transpose_a is None:
transpose_a = False
transpose_a = _execute.make_bool(transpose_a, "transpose_a")
if transpose_b is None:
transpose_b = False
transpose_b = _execute.make_bool(transpose_b, "transpose_b")
if a_is_sparse is None:
a_is_sparse = False
a_is_sparse = _execute.make_bool(a_is_sparse, "a_is_sparse")
if b_is_sparse is None:
b_is_sparse = False
b_is_sparse = _execute.make_bool(b_is_sparse, "b_is_sparse")
_, _, _op = _op_def_lib._apply_op_helper(
"SparseMatMul", a=a, b=b, transpose_a=transpose_a,
transpose_b=transpose_b, a_is_sparse=a_is_sparse,
b_is_sparse=b_is_sparse, name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ("transpose_a", _op.get_attr("transpose_a"), "transpose_b",
_op.get_attr("transpose_b"), "a_is_sparse",
_op.get_attr("a_is_sparse"), "b_is_sparse",
_op.get_attr("b_is_sparse"), "Ta", _op.get_attr("Ta"), "Tb",
_op.get_attr("Tb"))
_execute.record_gradient(
"SparseMatMul", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
else:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._eager_context.device_name, "SparseMatMul",
name, _ctx._post_execution_callbacks, a, b, "transpose_a",
transpose_a, "transpose_b", transpose_b, "a_is_sparse", a_is_sparse,
"b_is_sparse", b_is_sparse)
return _result
except _core._FallbackException:
return sparse_mat_mul_eager_fallback(
a, b, transpose_a=transpose_a, transpose_b=transpose_b,
a_is_sparse=a_is_sparse, b_is_sparse=b_is_sparse, name=name,
ctx=_ctx)
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
def sparse_mat_mul_eager_fallback(a, b, transpose_a=False, transpose_b=False, a_is_sparse=False, b_is_sparse=False, name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function sparse_mat_mul
"""
_ctx = ctx if ctx else _context.context()
if transpose_a is None:
transpose_a = False
transpose_a = _execute.make_bool(transpose_a, "transpose_a")
if transpose_b is None:
transpose_b = False
transpose_b = _execute.make_bool(transpose_b, "transpose_b")
if a_is_sparse is None:
a_is_sparse = False
a_is_sparse = _execute.make_bool(a_is_sparse, "a_is_sparse")
if b_is_sparse is None:
b_is_sparse = False
b_is_sparse = _execute.make_bool(b_is_sparse, "b_is_sparse")
_attr_Ta, (a,) = _execute.args_to_matching_eager([a], _ctx, _dtypes.float32)
_attr_Tb, (b,) = _execute.args_to_matching_eager([b], _ctx, _dtypes.float32)
_inputs_flat = [a, b]
_attrs = ("transpose_a", transpose_a, "transpose_b", transpose_b,
"a_is_sparse", a_is_sparse, "b_is_sparse", b_is_sparse, "Ta", _attr_Ta,
"Tb", _attr_Tb)
_result = _execute.execute(b"SparseMatMul", 1, inputs=_inputs_flat,
attrs=_attrs, ctx=_ctx, name=name)
_execute.record_gradient(
"SparseMatMul", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
def sparse_segment_mean(data, indices, segment_ids, name=None):
r"""Computes the mean along sparse segments of a tensor.
Read @{$math_ops#Segmentation$the section on segmentation} for an explanation of
segments.
Like `SegmentMean`, but `segment_ids` can have rank less than `data`'s first
dimension, selecting a subset of dimension 0, specified by `indices`.
Args:
data: A `Tensor`. Must be one of the following types: `float32`, `float64`.
indices: A `Tensor`. Must be one of the following types: `int32`, `int64`.
A 1-D tensor. Has same rank as `segment_ids`.
segment_ids: A `Tensor` of type `int32`.
A 1-D tensor. Values should be sorted and can be repeated.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `data`.
"""
_ctx = _context._context
if _ctx is None or not _ctx._eager_context.is_eager:
_, _, _op = _op_def_lib._apply_op_helper(
"SparseSegmentMean", data=data, indices=indices,
segment_ids=segment_ids, name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ("T", _op.get_attr("T"), "Tidx", _op.get_attr("Tidx"))
_execute.record_gradient(
"SparseSegmentMean", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
else:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._eager_context.device_name,
"SparseSegmentMean", name, _ctx._post_execution_callbacks, data,
indices, segment_ids)
return _result
except _core._FallbackException:
return sparse_segment_mean_eager_fallback(
data, indices, segment_ids, name=name, ctx=_ctx)
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
def sparse_segment_mean_eager_fallback(data, indices, segment_ids, name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function sparse_segment_mean
"""
_ctx = ctx if ctx else _context.context()
_attr_T, (data,) = _execute.args_to_matching_eager([data], _ctx)
_attr_Tidx, (indices,) = _execute.args_to_matching_eager([indices], _ctx, _dtypes.int32)
segment_ids = _ops.convert_to_tensor(segment_ids, _dtypes.int32)
_inputs_flat = [data, indices, segment_ids]
_attrs = ("T", _attr_T, "Tidx", _attr_Tidx)
_result = _execute.execute(b"SparseSegmentMean", 1, inputs=_inputs_flat,
attrs=_attrs, ctx=_ctx, name=name)
_execute.record_gradient(
"SparseSegmentMean", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
def sparse_segment_mean_grad(grad, indices, segment_ids, output_dim0, name=None):
r"""Computes gradients for SparseSegmentMean.
Returns tensor "output" with same shape as grad, except for dimension 0 whose
value is output_dim0.
Args:
grad: A `Tensor`. Must be one of the following types: `float32`, `float64`.
gradient propagated to the SparseSegmentMean op.
indices: A `Tensor`. Must be one of the following types: `int32`, `int64`.
indices passed to the corresponding SparseSegmentMean op.
segment_ids: A `Tensor` of type `int32`.
segment_ids passed to the corresponding SparseSegmentMean op.
output_dim0: A `Tensor` of type `int32`.
dimension 0 of "data" passed to SparseSegmentMean op.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `grad`.
"""
_ctx = _context._context
if _ctx is None or not _ctx._eager_context.is_eager:
_, _, _op = _op_def_lib._apply_op_helper(
"SparseSegmentMeanGrad", grad=grad, indices=indices,
segment_ids=segment_ids, output_dim0=output_dim0, name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ("T", _op.get_attr("T"), "Tidx", _op.get_attr("Tidx"))
_execute.record_gradient(
"SparseSegmentMeanGrad", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
else:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._eager_context.device_name,
"SparseSegmentMeanGrad", name, _ctx._post_execution_callbacks, grad,
indices, segment_ids, output_dim0)
return _result
except _core._FallbackException:
return sparse_segment_mean_grad_eager_fallback(
grad, indices, segment_ids, output_dim0, name=name, ctx=_ctx)
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
def sparse_segment_mean_grad_eager_fallback(grad, indices, segment_ids, output_dim0, name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function sparse_segment_mean_grad
"""
_ctx = ctx if ctx else _context.context()
_attr_T, (grad,) = _execute.args_to_matching_eager([grad], _ctx)
_attr_Tidx, (indices,) = _execute.args_to_matching_eager([indices], _ctx, _dtypes.int32)
segment_ids = _ops.convert_to_tensor(segment_ids, _dtypes.int32)
output_dim0 = _ops.convert_to_tensor(output_dim0, _dtypes.int32)
_inputs_flat = [grad, indices, segment_ids, output_dim0]
_attrs = ("T", _attr_T, "Tidx", _attr_Tidx)
_result = _execute.execute(b"SparseSegmentMeanGrad", 1, inputs=_inputs_flat,
attrs=_attrs, ctx=_ctx, name=name)
_execute.record_gradient(
"SparseSegmentMeanGrad", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
def sparse_segment_mean_with_num_segments(data, indices, segment_ids, num_segments, name=None):
r"""Computes the mean along sparse segments of a tensor.
Like `SparseSegmentMean`, but allows missing ids in `segment_ids`. If an id is
misisng, the `output` tensor at that position will be zeroed.
Read @{$math_ops#Segmentation$the section on segmentation} for an explanation of
segments.
Args:
data: A `Tensor`. Must be one of the following types: `float32`, `float64`.
indices: A `Tensor`. Must be one of the following types: `int32`, `int64`.
A 1-D tensor. Has same rank as `segment_ids`.
segment_ids: A `Tensor` of type `int32`.
A 1-D tensor. Values should be sorted and can be repeated.
num_segments: A `Tensor`. Must be one of the following types: `int32`, `int64`.
Should equal the number of distinct segment IDs.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `data`.
"""
_ctx = _context._context
if _ctx is None or not _ctx._eager_context.is_eager:
_, _, _op = _op_def_lib._apply_op_helper(
"SparseSegmentMeanWithNumSegments", data=data, indices=indices,
segment_ids=segment_ids, num_segments=num_segments, name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ("T", _op.get_attr("T"), "Tidx", _op.get_attr("Tidx"),
"Tnumsegments", _op.get_attr("Tnumsegments"))
_execute.record_gradient(
"SparseSegmentMeanWithNumSegments", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
else:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._eager_context.device_name,
"SparseSegmentMeanWithNumSegments", name,
_ctx._post_execution_callbacks, data, indices, segment_ids,
num_segments)
return _result
except _core._FallbackException:
return sparse_segment_mean_with_num_segments_eager_fallback(
data, indices, segment_ids, num_segments, name=name, ctx=_ctx)
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
def sparse_segment_mean_with_num_segments_eager_fallback(data, indices, segment_ids, num_segments, name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function sparse_segment_mean_with_num_segments
"""
_ctx = ctx if ctx else _context.context()
_attr_T, (data,) = _execute.args_to_matching_eager([data], _ctx)
_attr_Tidx, (indices,) = _execute.args_to_matching_eager([indices], _ctx, _dtypes.int32)
_attr_Tnumsegments, (num_segments,) = _execute.args_to_matching_eager([num_segments], _ctx, _dtypes.int32)
segment_ids = _ops.convert_to_tensor(segment_ids, _dtypes.int32)
_inputs_flat = [data, indices, segment_ids, num_segments]
_attrs = ("T", _attr_T, "Tidx", _attr_Tidx, "Tnumsegments",
_attr_Tnumsegments)
_result = _execute.execute(b"SparseSegmentMeanWithNumSegments", 1,
inputs=_inputs_flat, attrs=_attrs, ctx=_ctx,
name=name)
_execute.record_gradient(
"SparseSegmentMeanWithNumSegments", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
def sparse_segment_sqrt_n(data, indices, segment_ids, name=None):
r"""Computes the sum along sparse segments of a tensor divided by the sqrt of N.
N is the size of the segment being reduced.
Read @{$math_ops#Segmentation$the section on segmentation} for an explanation of
segments.
Args:
data: A `Tensor`. Must be one of the following types: `float32`, `float64`.
indices: A `Tensor`. Must be one of the following types: `int32`, `int64`.
A 1-D tensor. Has same rank as `segment_ids`.
segment_ids: A `Tensor` of type `int32`.
A 1-D tensor. Values should be sorted and can be repeated.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `data`.
"""
_ctx = _context._context
if _ctx is None or not _ctx._eager_context.is_eager:
_, _, _op = _op_def_lib._apply_op_helper(
"SparseSegmentSqrtN", data=data, indices=indices,
segment_ids=segment_ids, name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ("T", _op.get_attr("T"), "Tidx", _op.get_attr("Tidx"))
_execute.record_gradient(
"SparseSegmentSqrtN", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
else:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._eager_context.device_name,
"SparseSegmentSqrtN", name, _ctx._post_execution_callbacks, data,
indices, segment_ids)
return _result
except _core._FallbackException:
return sparse_segment_sqrt_n_eager_fallback(
data, indices, segment_ids, name=name, ctx=_ctx)
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
def sparse_segment_sqrt_n_eager_fallback(data, indices, segment_ids, name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function sparse_segment_sqrt_n
"""
_ctx = ctx if ctx else _context.context()
_attr_T, (data,) = _execute.args_to_matching_eager([data], _ctx)
_attr_Tidx, (indices,) = _execute.args_to_matching_eager([indices], _ctx, _dtypes.int32)
segment_ids = _ops.convert_to_tensor(segment_ids, _dtypes.int32)
_inputs_flat = [data, indices, segment_ids]
_attrs = ("T", _attr_T, "Tidx", _attr_Tidx)
_result = _execute.execute(b"SparseSegmentSqrtN", 1, inputs=_inputs_flat,
attrs=_attrs, ctx=_ctx, name=name)
_execute.record_gradient(
"SparseSegmentSqrtN", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
def sparse_segment_sqrt_n_grad(grad, indices, segment_ids, output_dim0, name=None):
r"""Computes gradients for SparseSegmentSqrtN.
Returns tensor "output" with same shape as grad, except for dimension 0 whose
value is output_dim0.
Args:
grad: A `Tensor`. Must be one of the following types: `float32`, `float64`.
gradient propagated to the SparseSegmentSqrtN op.
indices: A `Tensor`. Must be one of the following types: `int32`, `int64`.
indices passed to the corresponding SparseSegmentSqrtN op.
segment_ids: A `Tensor` of type `int32`.
segment_ids passed to the corresponding SparseSegmentSqrtN op.
output_dim0: A `Tensor` of type `int32`.
dimension 0 of "data" passed to SparseSegmentSqrtN op.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `grad`.
"""
_ctx = _context._context
if _ctx is None or not _ctx._eager_context.is_eager:
_, _, _op = _op_def_lib._apply_op_helper(
"SparseSegmentSqrtNGrad", grad=grad, indices=indices,
segment_ids=segment_ids, output_dim0=output_dim0, name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ("T", _op.get_attr("T"), "Tidx", _op.get_attr("Tidx"))
_execute.record_gradient(
"SparseSegmentSqrtNGrad", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
else:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._eager_context.device_name,
"SparseSegmentSqrtNGrad", name, _ctx._post_execution_callbacks, grad,
indices, segment_ids, output_dim0)
return _result
except _core._FallbackException:
return sparse_segment_sqrt_n_grad_eager_fallback(
grad, indices, segment_ids, output_dim0, name=name, ctx=_ctx)
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
def sparse_segment_sqrt_n_grad_eager_fallback(grad, indices, segment_ids, output_dim0, name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function sparse_segment_sqrt_n_grad
"""
_ctx = ctx if ctx else _context.context()
_attr_T, (grad,) = _execute.args_to_matching_eager([grad], _ctx)
_attr_Tidx, (indices,) = _execute.args_to_matching_eager([indices], _ctx, _dtypes.int32)
segment_ids = _ops.convert_to_tensor(segment_ids, _dtypes.int32)
output_dim0 = _ops.convert_to_tensor(output_dim0, _dtypes.int32)
_inputs_flat = [grad, indices, segment_ids, output_dim0]
_attrs = ("T", _attr_T, "Tidx", _attr_Tidx)
_result = _execute.execute(b"SparseSegmentSqrtNGrad", 1,
inputs=_inputs_flat, attrs=_attrs, ctx=_ctx,
name=name)
_execute.record_gradient(
"SparseSegmentSqrtNGrad", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
def sparse_segment_sqrt_n_with_num_segments(data, indices, segment_ids, num_segments, name=None):
r"""Computes the sum along sparse segments of a tensor divided by the sqrt of N.
N is the size of the segment being reduced.
Like `SparseSegmentSqrtN`, but allows missing ids in `segment_ids`. If an id is
misisng, the `output` tensor at that position will be zeroed.
Read @{$math_ops#Segmentation$the section on segmentation} for an explanation of
segments.
Args:
data: A `Tensor`. Must be one of the following types: `float32`, `float64`.
indices: A `Tensor`. Must be one of the following types: `int32`, `int64`.
A 1-D tensor. Has same rank as `segment_ids`.
segment_ids: A `Tensor` of type `int32`.
A 1-D tensor. Values should be sorted and can be repeated.
num_segments: A `Tensor`. Must be one of the following types: `int32`, `int64`.
Should equal the number of distinct segment IDs.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `data`.
"""
_ctx = _context._context
if _ctx is None or not _ctx._eager_context.is_eager:
_, _, _op = _op_def_lib._apply_op_helper(
"SparseSegmentSqrtNWithNumSegments", data=data, indices=indices,
segment_ids=segment_ids, num_segments=num_segments, name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ("T", _op.get_attr("T"), "Tidx", _op.get_attr("Tidx"),
"Tnumsegments", _op.get_attr("Tnumsegments"))
_execute.record_gradient(
"SparseSegmentSqrtNWithNumSegments", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
else:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._eager_context.device_name,
"SparseSegmentSqrtNWithNumSegments", name,
_ctx._post_execution_callbacks, data, indices, segment_ids,
num_segments)
return _result
except _core._FallbackException:
return sparse_segment_sqrt_n_with_num_segments_eager_fallback(
data, indices, segment_ids, num_segments, name=name, ctx=_ctx)
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
def sparse_segment_sqrt_n_with_num_segments_eager_fallback(data, indices, segment_ids, num_segments, name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function sparse_segment_sqrt_n_with_num_segments
"""
_ctx = ctx if ctx else _context.context()
_attr_T, (data,) = _execute.args_to_matching_eager([data], _ctx)
_attr_Tidx, (indices,) = _execute.args_to_matching_eager([indices], _ctx, _dtypes.int32)
_attr_Tnumsegments, (num_segments,) = _execute.args_to_matching_eager([num_segments], _ctx, _dtypes.int32)
segment_ids = _ops.convert_to_tensor(segment_ids, _dtypes.int32)
_inputs_flat = [data, indices, segment_ids, num_segments]
_attrs = ("T", _attr_T, "Tidx", _attr_Tidx, "Tnumsegments",
_attr_Tnumsegments)
_result = _execute.execute(b"SparseSegmentSqrtNWithNumSegments", 1,
inputs=_inputs_flat, attrs=_attrs, ctx=_ctx,
name=name)
_execute.record_gradient(
"SparseSegmentSqrtNWithNumSegments", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
def sparse_segment_sum(data, indices, segment_ids, name=None):
r"""Computes the sum along sparse segments of a tensor.
Read @{$math_ops#Segmentation$the section on segmentation} for an explanation of
segments.
Like `SegmentSum`, but `segment_ids` can have rank less than `data`'s first
dimension, selecting a subset of dimension 0, specified by `indices`.
For example:
```python
c = tf.constant([[1,2,3,4], [-1,-2,-3,-4], [5,6,7,8]])
# Select two rows, one segment.
tf.sparse_segment_sum(c, tf.constant([0, 1]), tf.constant([0, 0]))
# => [[0 0 0 0]]
# Select two rows, two segment.
tf.sparse_segment_sum(c, tf.constant([0, 1]), tf.constant([0, 1]))
# => [[ 1 2 3 4]
# [-1 -2 -3 -4]]
# Select all rows, two segments.
tf.sparse_segment_sum(c, tf.constant([0, 1, 2]), tf.constant([0, 0, 1]))
# => [[0 0 0 0]
# [5 6 7 8]]
# Which is equivalent to:
tf.segment_sum(c, tf.constant([0, 0, 1]))
```
Args:
data: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `int64`, `bfloat16`, `uint16`, `half`, `uint32`, `uint64`.
indices: A `Tensor`. Must be one of the following types: `int32`, `int64`.
A 1-D tensor. Has same rank as `segment_ids`.
segment_ids: A `Tensor` of type `int32`.
A 1-D tensor. Values should be sorted and can be repeated.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `data`.
"""
_ctx = _context._context
if _ctx is None or not _ctx._eager_context.is_eager:
_, _, _op = _op_def_lib._apply_op_helper(
"SparseSegmentSum", data=data, indices=indices,
segment_ids=segment_ids, name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ("T", _op.get_attr("T"), "Tidx", _op.get_attr("Tidx"))
_execute.record_gradient(
"SparseSegmentSum", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
else:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._eager_context.device_name,
"SparseSegmentSum", name, _ctx._post_execution_callbacks, data,
indices, segment_ids)
return _result
except _core._FallbackException:
return sparse_segment_sum_eager_fallback(
data, indices, segment_ids, name=name, ctx=_ctx)
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
def sparse_segment_sum_eager_fallback(data, indices, segment_ids, name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function sparse_segment_sum
"""
_ctx = ctx if ctx else _context.context()
_attr_T, (data,) = _execute.args_to_matching_eager([data], _ctx)
_attr_Tidx, (indices,) = _execute.args_to_matching_eager([indices], _ctx, _dtypes.int32)
segment_ids = _ops.convert_to_tensor(segment_ids, _dtypes.int32)
_inputs_flat = [data, indices, segment_ids]
_attrs = ("T", _attr_T, "Tidx", _attr_Tidx)
_result = _execute.execute(b"SparseSegmentSum", 1, inputs=_inputs_flat,
attrs=_attrs, ctx=_ctx, name=name)
_execute.record_gradient(
"SparseSegmentSum", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
def sparse_segment_sum_with_num_segments(data, indices, segment_ids, num_segments, name=None):
r"""Computes the sum along sparse segments of a tensor.
Like `SparseSegmentSum`, but allows missing ids in `segment_ids`. If an id is
misisng, the `output` tensor at that position will be zeroed.
Read @{$math_ops#Segmentation$the section on segmentation} for an explanation of
segments.
For example:
```python
c = tf.constant([[1,2,3,4], [-1,-2,-3,-4], [5,6,7,8]])
tf.sparse_segment_sum_with_num_segments(
c, tf.constant([0, 1]), tf.constant([0, 0]), num_segments=3)
# => [[0 0 0 0]
# [0 0 0 0]
# [0 0 0 0]]
tf.sparse_segment_sum_with_num_segments(c,
tf.constant([0, 1]),
tf.constant([0, 2],
num_segments=4))
# => [[ 1 2 3 4]
# [ 0 0 0 0]
# [-1 -2 -3 -4]
# [ 0 0 0 0]]
```
Args:
data: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `int64`, `bfloat16`, `uint16`, `half`, `uint32`, `uint64`.
indices: A `Tensor`. Must be one of the following types: `int32`, `int64`.
A 1-D tensor. Has same rank as `segment_ids`.
segment_ids: A `Tensor` of type `int32`.
A 1-D tensor. Values should be sorted and can be repeated.
num_segments: A `Tensor`. Must be one of the following types: `int32`, `int64`.
Should equal the number of distinct segment IDs.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `data`.
"""
_ctx = _context._context
if _ctx is None or not _ctx._eager_context.is_eager:
_, _, _op = _op_def_lib._apply_op_helper(
"SparseSegmentSumWithNumSegments", data=data, indices=indices,
segment_ids=segment_ids, num_segments=num_segments, name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ("T", _op.get_attr("T"), "Tidx", _op.get_attr("Tidx"),
"Tnumsegments", _op.get_attr("Tnumsegments"))
_execute.record_gradient(
"SparseSegmentSumWithNumSegments", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
else:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._eager_context.device_name,
"SparseSegmentSumWithNumSegments", name,
_ctx._post_execution_callbacks, data, indices, segment_ids,
num_segments)
return _result
except _core._FallbackException:
return sparse_segment_sum_with_num_segments_eager_fallback(
data, indices, segment_ids, num_segments, name=name, ctx=_ctx)
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
def sparse_segment_sum_with_num_segments_eager_fallback(data, indices, segment_ids, num_segments, name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function sparse_segment_sum_with_num_segments
"""
_ctx = ctx if ctx else _context.context()
_attr_T, (data,) = _execute.args_to_matching_eager([data], _ctx)
_attr_Tidx, (indices,) = _execute.args_to_matching_eager([indices], _ctx, _dtypes.int32)
_attr_Tnumsegments, (num_segments,) = _execute.args_to_matching_eager([num_segments], _ctx, _dtypes.int32)
segment_ids = _ops.convert_to_tensor(segment_ids, _dtypes.int32)
_inputs_flat = [data, indices, segment_ids, num_segments]
_attrs = ("T", _attr_T, "Tidx", _attr_Tidx, "Tnumsegments",
_attr_Tnumsegments)
_result = _execute.execute(b"SparseSegmentSumWithNumSegments", 1,
inputs=_inputs_flat, attrs=_attrs, ctx=_ctx,
name=name)
_execute.record_gradient(
"SparseSegmentSumWithNumSegments", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
def sqrt(x, name=None):
r"""Computes square root of x element-wise.
I.e., \\(y = \sqrt{x} = x^{1/2}\\).
Args:
x: A `Tensor`. Must be one of the following types: `bfloat16`, `half`, `float32`, `float64`, `complex64`, `complex128`.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `x`.
"""
_ctx = _context._context
if _ctx is None or not _ctx._eager_context.is_eager:
_, _, _op = _op_def_lib._apply_op_helper(
"Sqrt", x=x, name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ("T", _op.get_attr("T"))
_execute.record_gradient(
"Sqrt", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
else:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._eager_context.device_name, "Sqrt", name,
_ctx._post_execution_callbacks, x)
return _result
except _core._FallbackException:
return sqrt_eager_fallback(
x, name=name, ctx=_ctx)
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
def sqrt_eager_fallback(x, name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function sqrt
"""
_ctx = ctx if ctx else _context.context()
_attr_T, (x,) = _execute.args_to_matching_eager([x], _ctx)
_inputs_flat = [x]
_attrs = ("T", _attr_T)
_result = _execute.execute(b"Sqrt", 1, inputs=_inputs_flat, attrs=_attrs,
ctx=_ctx, name=name)
_execute.record_gradient(
"Sqrt", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
def sqrt_grad(y, dy, name=None):
r"""Computes the gradient for the sqrt of `x` wrt its input.
Specifically, `grad = dy * 0.5 / y`, where `y = sqrt(x)`, and `dy`
is the corresponding input gradient.
Args:
y: A `Tensor`. Must be one of the following types: `bfloat16`, `half`, `float32`, `float64`, `complex64`, `complex128`.
dy: A `Tensor`. Must have the same type as `y`.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `y`.
"""
_ctx = _context._context
if _ctx is None or not _ctx._eager_context.is_eager:
_, _, _op = _op_def_lib._apply_op_helper(
"SqrtGrad", y=y, dy=dy, name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ("T", _op.get_attr("T"))
_execute.record_gradient(
"SqrtGrad", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
else:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._eager_context.device_name, "SqrtGrad",
name, _ctx._post_execution_callbacks, y, dy)
return _result
except _core._FallbackException:
return sqrt_grad_eager_fallback(
y, dy, name=name, ctx=_ctx)
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
def sqrt_grad_eager_fallback(y, dy, name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function sqrt_grad
"""
_ctx = ctx if ctx else _context.context()
_attr_T, _inputs_T = _execute.args_to_matching_eager([y, dy], _ctx)
(y, dy) = _inputs_T
_inputs_flat = [y, dy]
_attrs = ("T", _attr_T)
_result = _execute.execute(b"SqrtGrad", 1, inputs=_inputs_flat,
attrs=_attrs, ctx=_ctx, name=name)
_execute.record_gradient(
"SqrtGrad", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
def square(x, name=None):
r"""Computes square of x element-wise.
I.e., \\(y = x * x = x^2\\).
Args:
x: A `Tensor`. Must be one of the following types: `bfloat16`, `half`, `float32`, `float64`, `int32`, `int64`, `complex64`, `complex128`.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `x`.
"""
_ctx = _context._context
if _ctx is None or not _ctx._eager_context.is_eager:
_, _, _op = _op_def_lib._apply_op_helper(
"Square", x=x, name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ("T", _op.get_attr("T"))
_execute.record_gradient(
"Square", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
else:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._eager_context.device_name, "Square", name,
_ctx._post_execution_callbacks, x)
return _result
except _core._FallbackException:
return square_eager_fallback(
x, name=name, ctx=_ctx)
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
def square_eager_fallback(x, name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function square
"""
_ctx = ctx if ctx else _context.context()
_attr_T, (x,) = _execute.args_to_matching_eager([x], _ctx)
_inputs_flat = [x]
_attrs = ("T", _attr_T)
_result = _execute.execute(b"Square", 1, inputs=_inputs_flat, attrs=_attrs,
ctx=_ctx, name=name)
_execute.record_gradient(
"Square", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
@tf_export('math.squared_difference', 'squared_difference')
def squared_difference(x, y, name=None):
r"""Returns (x - y)(x - y) element-wise.
*NOTE*: `math.squared_difference` supports broadcasting. More about broadcasting
[here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
Args:
x: A `Tensor`. Must be one of the following types: `bfloat16`, `half`, `float32`, `float64`, `int32`, `int64`, `complex64`, `complex128`.
y: A `Tensor`. Must have the same type as `x`.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `x`.
"""
_ctx = _context._context
if _ctx is None or not _ctx._eager_context.is_eager:
_, _, _op = _op_def_lib._apply_op_helper(
"SquaredDifference", x=x, y=y, name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ("T", _op.get_attr("T"))
_execute.record_gradient(
"SquaredDifference", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
else:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._eager_context.device_name,
"SquaredDifference", name, _ctx._post_execution_callbacks, x, y)
return _result
except _core._FallbackException:
return squared_difference_eager_fallback(
x, y, name=name, ctx=_ctx)
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
def squared_difference_eager_fallback(x, y, name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function squared_difference
"""
_ctx = ctx if ctx else _context.context()
_attr_T, _inputs_T = _execute.args_to_matching_eager([x, y], _ctx)
(x, y) = _inputs_T
_inputs_flat = [x, y]
_attrs = ("T", _attr_T)
_result = _execute.execute(b"SquaredDifference", 1, inputs=_inputs_flat,
attrs=_attrs, ctx=_ctx, name=name)
_execute.record_gradient(
"SquaredDifference", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
def sub(x, y, name=None):
r"""Returns x - y element-wise.
*NOTE*: `Subtract` supports broadcasting. More about broadcasting
[here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
Args:
x: A `Tensor`. Must be one of the following types: `bfloat16`, `half`, `float32`, `float64`, `uint8`, `int8`, `uint16`, `int16`, `int32`, `int64`, `complex64`, `complex128`.
y: A `Tensor`. Must have the same type as `x`.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `x`.
"""
_ctx = _context._context
if _ctx is None or not _ctx._eager_context.is_eager:
_, _, _op = _op_def_lib._apply_op_helper(
"Sub", x=x, y=y, name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ("T", _op.get_attr("T"))
_execute.record_gradient(
"Sub", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
else:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._eager_context.device_name, "Sub", name,
_ctx._post_execution_callbacks, x, y)
return _result
except _core._FallbackException:
return sub_eager_fallback(
x, y, name=name, ctx=_ctx)
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
def sub_eager_fallback(x, y, name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function sub
"""
_ctx = ctx if ctx else _context.context()
_attr_T, _inputs_T = _execute.args_to_matching_eager([x, y], _ctx)
(x, y) = _inputs_T
_inputs_flat = [x, y]
_attrs = ("T", _attr_T)
_result = _execute.execute(b"Sub", 1, inputs=_inputs_flat, attrs=_attrs,
ctx=_ctx, name=name)
_execute.record_gradient(
"Sub", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
def _sum(input, axis, keep_dims=False, name=None):
r"""Computes the sum of elements across dimensions of a tensor.
Reduces `input` along the dimensions given in `axis`. Unless
`keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in
`axis`. If `keep_dims` is true, the reduced dimensions are
retained with length 1.
Args:
input: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `uint16`, `complex128`, `half`, `uint32`, `uint64`.
The tensor to reduce.
axis: A `Tensor`. Must be one of the following types: `int32`, `int64`.
The dimensions to reduce. Must be in the range
`[-rank(input), rank(input))`.
keep_dims: An optional `bool`. Defaults to `False`.
If true, retain reduced dimensions with length 1.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `input`.
"""
_ctx = _context._context
if _ctx is None or not _ctx._eager_context.is_eager:
if keep_dims is None:
keep_dims = False
keep_dims = _execute.make_bool(keep_dims, "keep_dims")
_, _, _op = _op_def_lib._apply_op_helper(
"Sum", input=input, reduction_indices=axis, keep_dims=keep_dims,
name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ("keep_dims", _op.get_attr("keep_dims"), "T", _op.get_attr("T"),
"Tidx", _op.get_attr("Tidx"))
_execute.record_gradient(
"Sum", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
else:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._eager_context.device_name, "Sum", name,
_ctx._post_execution_callbacks, input, axis, "keep_dims", keep_dims)
return _result
except _core._FallbackException:
return _sum_eager_fallback(
input, axis, keep_dims=keep_dims, name=name, ctx=_ctx)
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
def _sum_eager_fallback(input, axis, keep_dims=False, name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function _sum
"""
_ctx = ctx if ctx else _context.context()
if keep_dims is None:
keep_dims = False
keep_dims = _execute.make_bool(keep_dims, "keep_dims")
_attr_T, (input,) = _execute.args_to_matching_eager([input], _ctx)
_attr_Tidx, (axis,) = _execute.args_to_matching_eager([axis], _ctx, _dtypes.int32)
_inputs_flat = [input, axis]
_attrs = ("keep_dims", keep_dims, "T", _attr_T, "Tidx", _attr_Tidx)
_result = _execute.execute(b"Sum", 1, inputs=_inputs_flat, attrs=_attrs,
ctx=_ctx, name=name)
_execute.record_gradient(
"Sum", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
@tf_export('math.tan', 'tan')
def tan(x, name=None):
r"""Computes tan of x element-wise.
Args:
x: A `Tensor`. Must be one of the following types: `bfloat16`, `half`, `float32`, `float64`, `int32`, `int64`, `complex64`, `complex128`.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `x`.
"""
_ctx = _context._context
if _ctx is None or not _ctx._eager_context.is_eager:
_, _, _op = _op_def_lib._apply_op_helper(
"Tan", x=x, name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ("T", _op.get_attr("T"))
_execute.record_gradient(
"Tan", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
else:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._eager_context.device_name, "Tan", name,
_ctx._post_execution_callbacks, x)
return _result
except _core._FallbackException:
return tan_eager_fallback(
x, name=name, ctx=_ctx)
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
def tan_eager_fallback(x, name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function tan
"""
_ctx = ctx if ctx else _context.context()
_attr_T, (x,) = _execute.args_to_matching_eager([x], _ctx)
_inputs_flat = [x]
_attrs = ("T", _attr_T)
_result = _execute.execute(b"Tan", 1, inputs=_inputs_flat, attrs=_attrs,
ctx=_ctx, name=name)
_execute.record_gradient(
"Tan", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
def tanh(x, name=None):
r"""Computes hyperbolic tangent of `x` element-wise.
Args:
x: A `Tensor`. Must be one of the following types: `bfloat16`, `half`, `float32`, `float64`, `complex64`, `complex128`.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `x`.
"""
_ctx = _context._context
if _ctx is None or not _ctx._eager_context.is_eager:
_, _, _op = _op_def_lib._apply_op_helper(
"Tanh", x=x, name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ("T", _op.get_attr("T"))
_execute.record_gradient(
"Tanh", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
else:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._eager_context.device_name, "Tanh", name,
_ctx._post_execution_callbacks, x)
return _result
except _core._FallbackException:
return tanh_eager_fallback(
x, name=name, ctx=_ctx)
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
def tanh_eager_fallback(x, name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function tanh
"""
_ctx = ctx if ctx else _context.context()
_attr_T, (x,) = _execute.args_to_matching_eager([x], _ctx)
_inputs_flat = [x]
_attrs = ("T", _attr_T)
_result = _execute.execute(b"Tanh", 1, inputs=_inputs_flat, attrs=_attrs,
ctx=_ctx, name=name)
_execute.record_gradient(
"Tanh", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
def tanh_grad(y, dy, name=None):
r"""Computes the gradient for the tanh of `x` wrt its input.
Specifically, `grad = dy * (1 - y*y)`, where `y = tanh(x)`, and `dy`
is the corresponding input gradient.
Args:
y: A `Tensor`. Must be one of the following types: `bfloat16`, `half`, `float32`, `float64`, `complex64`, `complex128`.
dy: A `Tensor`. Must have the same type as `y`.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `y`.
"""
_ctx = _context._context
if _ctx is None or not _ctx._eager_context.is_eager:
_, _, _op = _op_def_lib._apply_op_helper(
"TanhGrad", y=y, dy=dy, name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ("T", _op.get_attr("T"))
_execute.record_gradient(
"TanhGrad", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
else:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._eager_context.device_name, "TanhGrad",
name, _ctx._post_execution_callbacks, y, dy)
return _result
except _core._FallbackException:
return tanh_grad_eager_fallback(
y, dy, name=name, ctx=_ctx)
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
def tanh_grad_eager_fallback(y, dy, name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function tanh_grad
"""
_ctx = ctx if ctx else _context.context()
_attr_T, _inputs_T = _execute.args_to_matching_eager([y, dy], _ctx)
(y, dy) = _inputs_T
_inputs_flat = [y, dy]
_attrs = ("T", _attr_T)
_result = _execute.execute(b"TanhGrad", 1, inputs=_inputs_flat,
attrs=_attrs, ctx=_ctx, name=name)
_execute.record_gradient(
"TanhGrad", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
def truncate_div(x, y, name=None):
r"""Returns x / y element-wise for integer types.
Truncation designates that negative numbers will round fractional quantities
toward zero. I.e. -7 / 5 = -1. This matches C semantics but it is different
than Python semantics. See `FloorDiv` for a division function that matches
Python Semantics.
*NOTE*: `TruncateDiv` supports broadcasting. More about broadcasting
[here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
Args:
x: A `Tensor`. Must be one of the following types: `bfloat16`, `half`, `float32`, `float64`, `uint8`, `int8`, `uint16`, `int16`, `int32`, `int64`, `complex64`, `complex128`.
y: A `Tensor`. Must have the same type as `x`.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `x`.
"""
_ctx = _context._context
if _ctx is None or not _ctx._eager_context.is_eager:
_, _, _op = _op_def_lib._apply_op_helper(
"TruncateDiv", x=x, y=y, name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ("T", _op.get_attr("T"))
_execute.record_gradient(
"TruncateDiv", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
else:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._eager_context.device_name, "TruncateDiv",
name, _ctx._post_execution_callbacks, x, y)
return _result
except _core._FallbackException:
return truncate_div_eager_fallback(
x, y, name=name, ctx=_ctx)
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
def truncate_div_eager_fallback(x, y, name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function truncate_div
"""
_ctx = ctx if ctx else _context.context()
_attr_T, _inputs_T = _execute.args_to_matching_eager([x, y], _ctx)
(x, y) = _inputs_T
_inputs_flat = [x, y]
_attrs = ("T", _attr_T)
_result = _execute.execute(b"TruncateDiv", 1, inputs=_inputs_flat,
attrs=_attrs, ctx=_ctx, name=name)
_execute.record_gradient(
"TruncateDiv", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
def truncate_mod(x, y, name=None):
r"""Returns element-wise remainder of division. This emulates C semantics in that
the result here is consistent with a truncating divide. E.g. `truncate(x / y) *
y + truncate_mod(x, y) = x`.
*NOTE*: `TruncateMod` supports broadcasting. More about broadcasting
[here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
Args:
x: A `Tensor`. Must be one of the following types: `int32`, `int64`, `bfloat16`, `half`, `float32`, `float64`.
y: A `Tensor`. Must have the same type as `x`.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `x`.
"""
_ctx = _context._context
if _ctx is None or not _ctx._eager_context.is_eager:
_, _, _op = _op_def_lib._apply_op_helper(
"TruncateMod", x=x, y=y, name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ("T", _op.get_attr("T"))
_execute.record_gradient(
"TruncateMod", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
else:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._eager_context.device_name, "TruncateMod",
name, _ctx._post_execution_callbacks, x, y)
return _result
except _core._FallbackException:
return truncate_mod_eager_fallback(
x, y, name=name, ctx=_ctx)
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
def truncate_mod_eager_fallback(x, y, name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function truncate_mod
"""
_ctx = ctx if ctx else _context.context()
_attr_T, _inputs_T = _execute.args_to_matching_eager([x, y], _ctx)
(x, y) = _inputs_T
_inputs_flat = [x, y]
_attrs = ("T", _attr_T)
_result = _execute.execute(b"TruncateMod", 1, inputs=_inputs_flat,
attrs=_attrs, ctx=_ctx, name=name)
_execute.record_gradient(
"TruncateMod", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
@tf_export('math.unsorted_segment_max', 'unsorted_segment_max')
def unsorted_segment_max(data, segment_ids, num_segments, name=None):
r"""Computes the maximum along segments of a tensor.
Read @{$math_ops#Segmentation$the section on segmentation} for an explanation of
segments.
This operator is similar to the unsorted segment sum operator found
[(here)](../../../api_docs/python/math_ops.md#UnsortedSegmentSum).
Instead of computing the sum over segments, it computes the maximum such that:
\\(output_i = \max_j data_j\\) where max is over `j` such
that `segment_ids[j] == i`.
If the maximum is empty for a given segment ID `i`, it outputs the smallest
possible value for the specific numeric type,
`output[i] = numeric_limits<T>::lowest()`.
<div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;">
<img style="width:100%" src="https://www.tensorflow.org/images/UnsortedSegmentMax.png" alt>
</div>
Args:
data: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `int64`, `bfloat16`, `uint16`, `half`, `uint32`, `uint64`.
segment_ids: A `Tensor`. Must be one of the following types: `int32`, `int64`.
A 1-D tensor whose rank is equal to the rank of `data`'s
first dimension.
num_segments: A `Tensor`. Must be one of the following types: `int32`, `int64`.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `data`.
"""
_ctx = _context._context
if _ctx is None or not _ctx._eager_context.is_eager:
_, _, _op = _op_def_lib._apply_op_helper(
"UnsortedSegmentMax", data=data, segment_ids=segment_ids,
num_segments=num_segments, name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ("T", _op.get_attr("T"), "Tindices", _op.get_attr("Tindices"),
"Tnumsegments", _op.get_attr("Tnumsegments"))
_execute.record_gradient(
"UnsortedSegmentMax", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
else:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._eager_context.device_name,
"UnsortedSegmentMax", name, _ctx._post_execution_callbacks, data,
segment_ids, num_segments)
return _result
except _core._FallbackException:
return unsorted_segment_max_eager_fallback(
data, segment_ids, num_segments, name=name, ctx=_ctx)
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
def unsorted_segment_max_eager_fallback(data, segment_ids, num_segments, name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function unsorted_segment_max
"""
_ctx = ctx if ctx else _context.context()
_attr_T, (data,) = _execute.args_to_matching_eager([data], _ctx)
_attr_Tindices, (segment_ids,) = _execute.args_to_matching_eager([segment_ids], _ctx)
_attr_Tnumsegments, (num_segments,) = _execute.args_to_matching_eager([num_segments], _ctx, _dtypes.int32)
_inputs_flat = [data, segment_ids, num_segments]
_attrs = ("T", _attr_T, "Tindices", _attr_Tindices, "Tnumsegments",
_attr_Tnumsegments)
_result = _execute.execute(b"UnsortedSegmentMax", 1, inputs=_inputs_flat,
attrs=_attrs, ctx=_ctx, name=name)
_execute.record_gradient(
"UnsortedSegmentMax", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
@tf_export('math.unsorted_segment_min', 'unsorted_segment_min')
def unsorted_segment_min(data, segment_ids, num_segments, name=None):
r"""Computes the minimum along segments of a tensor.
Read @{$math_ops#segmentation$the section on segmentation} for an explanation of
segments.
This operator is similar to the unsorted segment sum operator found
[(here)](../../../api_docs/python/math_ops.md#UnsortedSegmentSum).
Instead of computing the sum over segments, it computes the minimum such that:
\\(output_i = \min_j data_j\\) where min is over `j` such
that `segment_ids[j] == i`.
If the minimum is empty for a given segment ID `i`, it outputs the largest
possible value for the specific numeric type,
`output[i] = numeric_limits<T>::max()`.
Args:
data: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `int64`, `bfloat16`, `uint16`, `half`, `uint32`, `uint64`.
segment_ids: A `Tensor`. Must be one of the following types: `int32`, `int64`.
A 1-D tensor whose rank is equal to the rank of `data`'s
first dimension.
num_segments: A `Tensor`. Must be one of the following types: `int32`, `int64`.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `data`.
"""
_ctx = _context._context
if _ctx is None or not _ctx._eager_context.is_eager:
_, _, _op = _op_def_lib._apply_op_helper(
"UnsortedSegmentMin", data=data, segment_ids=segment_ids,
num_segments=num_segments, name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ("T", _op.get_attr("T"), "Tindices", _op.get_attr("Tindices"),
"Tnumsegments", _op.get_attr("Tnumsegments"))
_execute.record_gradient(
"UnsortedSegmentMin", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
else:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._eager_context.device_name,
"UnsortedSegmentMin", name, _ctx._post_execution_callbacks, data,
segment_ids, num_segments)
return _result
except _core._FallbackException:
return unsorted_segment_min_eager_fallback(
data, segment_ids, num_segments, name=name, ctx=_ctx)
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
def unsorted_segment_min_eager_fallback(data, segment_ids, num_segments, name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function unsorted_segment_min
"""
_ctx = ctx if ctx else _context.context()
_attr_T, (data,) = _execute.args_to_matching_eager([data], _ctx)
_attr_Tindices, (segment_ids,) = _execute.args_to_matching_eager([segment_ids], _ctx)
_attr_Tnumsegments, (num_segments,) = _execute.args_to_matching_eager([num_segments], _ctx, _dtypes.int32)
_inputs_flat = [data, segment_ids, num_segments]
_attrs = ("T", _attr_T, "Tindices", _attr_Tindices, "Tnumsegments",
_attr_Tnumsegments)
_result = _execute.execute(b"UnsortedSegmentMin", 1, inputs=_inputs_flat,
attrs=_attrs, ctx=_ctx, name=name)
_execute.record_gradient(
"UnsortedSegmentMin", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
@tf_export('math.unsorted_segment_prod', 'unsorted_segment_prod')
def unsorted_segment_prod(data, segment_ids, num_segments, name=None):
r"""Computes the product along segments of a tensor.
Read @{$math_ops#segmentation$the section on segmentation} for an explanation of
segments.
This operator is similar to the unsorted segment sum operator found
[(here)](../../../api_docs/python/math_ops.md#UnsortedSegmentSum).
Instead of computing the sum over segments, it computes the product of all
entries belonging to a segment such that:
\\(output_i = \prod_j data_j\\) where the product is over `j` such
that `segment_ids[j] == i`.
If there is no entry for a given segment ID `i`, it outputs 1.
Args:
data: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `uint16`, `complex128`, `half`, `uint32`, `uint64`.
segment_ids: A `Tensor`. Must be one of the following types: `int32`, `int64`.
A 1-D tensor whose rank is equal to the rank of `data`'s
first dimension.
num_segments: A `Tensor`. Must be one of the following types: `int32`, `int64`.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `data`.
"""
_ctx = _context._context
if _ctx is None or not _ctx._eager_context.is_eager:
_, _, _op = _op_def_lib._apply_op_helper(
"UnsortedSegmentProd", data=data, segment_ids=segment_ids,
num_segments=num_segments, name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ("T", _op.get_attr("T"), "Tindices", _op.get_attr("Tindices"),
"Tnumsegments", _op.get_attr("Tnumsegments"))
_execute.record_gradient(
"UnsortedSegmentProd", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
else:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._eager_context.device_name,
"UnsortedSegmentProd", name, _ctx._post_execution_callbacks, data,
segment_ids, num_segments)
return _result
except _core._FallbackException:
return unsorted_segment_prod_eager_fallback(
data, segment_ids, num_segments, name=name, ctx=_ctx)
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
def unsorted_segment_prod_eager_fallback(data, segment_ids, num_segments, name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function unsorted_segment_prod
"""
_ctx = ctx if ctx else _context.context()
_attr_T, (data,) = _execute.args_to_matching_eager([data], _ctx)
_attr_Tindices, (segment_ids,) = _execute.args_to_matching_eager([segment_ids], _ctx)
_attr_Tnumsegments, (num_segments,) = _execute.args_to_matching_eager([num_segments], _ctx, _dtypes.int32)
_inputs_flat = [data, segment_ids, num_segments]
_attrs = ("T", _attr_T, "Tindices", _attr_Tindices, "Tnumsegments",
_attr_Tnumsegments)
_result = _execute.execute(b"UnsortedSegmentProd", 1, inputs=_inputs_flat,
attrs=_attrs, ctx=_ctx, name=name)
_execute.record_gradient(
"UnsortedSegmentProd", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
@tf_export('math.unsorted_segment_sum', 'unsorted_segment_sum')
def unsorted_segment_sum(data, segment_ids, num_segments, name=None):
r"""Computes the sum along segments of a tensor.
Read @{$math_ops#Segmentation$the section on segmentation} for an explanation of
segments.
Computes a tensor such that
\\(output[i] = sum_{j...} data[j...]\\) where the sum is over tuples `j...` such
that `segment_ids[j...] == i`. Unlike `SegmentSum`, `segment_ids`
need not be sorted and need not cover all values in the full
range of valid values.
If the sum is empty for a given segment ID `i`, `output[i] = 0`.
If the given segment ID `i` is negative, the value is dropped and will not be
added to the sum of the segment.
`num_segments` should equal the number of distinct segment IDs.
<div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;">
<img style="width:100%" src="https://www.tensorflow.org/images/UnsortedSegmentSum.png" alt>
</div>
Args:
data: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `uint16`, `complex128`, `half`, `uint32`, `uint64`.
segment_ids: A `Tensor`. Must be one of the following types: `int32`, `int64`.
A tensor whose shape is a prefix of `data.shape`.
num_segments: A `Tensor`. Must be one of the following types: `int32`, `int64`.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `data`.
"""
_ctx = _context._context
if _ctx is None or not _ctx._eager_context.is_eager:
_, _, _op = _op_def_lib._apply_op_helper(
"UnsortedSegmentSum", data=data, segment_ids=segment_ids,
num_segments=num_segments, name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ("T", _op.get_attr("T"), "Tindices", _op.get_attr("Tindices"),
"Tnumsegments", _op.get_attr("Tnumsegments"))
_execute.record_gradient(
"UnsortedSegmentSum", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
else:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._eager_context.device_name,
"UnsortedSegmentSum", name, _ctx._post_execution_callbacks, data,
segment_ids, num_segments)
return _result
except _core._FallbackException:
return unsorted_segment_sum_eager_fallback(
data, segment_ids, num_segments, name=name, ctx=_ctx)
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
def unsorted_segment_sum_eager_fallback(data, segment_ids, num_segments, name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function unsorted_segment_sum
"""
_ctx = ctx if ctx else _context.context()
_attr_T, (data,) = _execute.args_to_matching_eager([data], _ctx)
_attr_Tindices, (segment_ids,) = _execute.args_to_matching_eager([segment_ids], _ctx)
_attr_Tnumsegments, (num_segments,) = _execute.args_to_matching_eager([num_segments], _ctx, _dtypes.int32)
_inputs_flat = [data, segment_ids, num_segments]
_attrs = ("T", _attr_T, "Tindices", _attr_Tindices, "Tnumsegments",
_attr_Tnumsegments)
_result = _execute.execute(b"UnsortedSegmentSum", 1, inputs=_inputs_flat,
attrs=_attrs, ctx=_ctx, name=name)
_execute.record_gradient(
"UnsortedSegmentSum", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
@tf_export('math.zeta', 'zeta')
def zeta(x, q, name=None):
r"""Compute the Hurwitz zeta function \\(\zeta(x, q)\\).
The Hurwitz zeta function is defined as:
\\(\zeta(x, q) = \sum_{n=0}^{\infty} (q + n)^{-x}\\)
Args:
x: A `Tensor`. Must be one of the following types: `float32`, `float64`.
q: A `Tensor`. Must have the same type as `x`.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `x`.
"""
_ctx = _context._context
if _ctx is None or not _ctx._eager_context.is_eager:
_, _, _op = _op_def_lib._apply_op_helper(
"Zeta", x=x, q=q, name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ("T", _op.get_attr("T"))
_execute.record_gradient(
"Zeta", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
else:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._eager_context.device_name, "Zeta", name,
_ctx._post_execution_callbacks, x, q)
return _result
except _core._FallbackException:
return zeta_eager_fallback(
x, q, name=name, ctx=_ctx)
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
def zeta_eager_fallback(x, q, name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function zeta
"""
_ctx = ctx if ctx else _context.context()
_attr_T, _inputs_T = _execute.args_to_matching_eager([x, q], _ctx)
(x, q) = _inputs_T
_inputs_flat = [x, q]
_attrs = ("T", _attr_T)
_result = _execute.execute(b"Zeta", 1, inputs=_inputs_flat, attrs=_attrs,
ctx=_ctx, name=name)
_execute.record_gradient(
"Zeta", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
def _InitOpDefLibrary(op_list_proto_bytes):
op_list = _op_def_pb2.OpList()
op_list.ParseFromString(op_list_proto_bytes)
_op_def_registry.register_op_list(op_list)
op_def_lib = _op_def_library.OpDefLibrary()
op_def_lib.add_op_list(op_list)
return op_def_lib
# op {
# name: "Abs"
# input_arg {
# name: "x"
# type_attr: "T"
# }
# output_arg {
# name: "y"
# type_attr: "T"
# }
# attr {
# name: "T"
# type: "type"
# allowed_values {
# list {
# type: DT_BFLOAT16
# type: DT_HALF
# type: DT_FLOAT
# type: DT_DOUBLE
# type: DT_INT32
# type: DT_INT64
# }
# }
# }
# }
# op {
# name: "AccumulateNV2"
# input_arg {
# name: "inputs"
# type_attr: "T"
# number_attr: "N"
# }
# output_arg {
# name: "sum"
# type_attr: "T"
# }
# attr {
# name: "N"
# type: "int"
# has_minimum: true
# minimum: 1
# }
# attr {
# name: "T"
# type: "type"
# allowed_values {
# list {
# type: DT_FLOAT
# type: DT_DOUBLE
# type: DT_INT32
# type: DT_UINT8
# type: DT_INT16
# type: DT_INT8
# type: DT_COMPLEX64
# type: DT_INT64
# type: DT_QINT8
# type: DT_QUINT8
# type: DT_QINT32
# type: DT_BFLOAT16
# type: DT_UINT16
# type: DT_COMPLEX128
# type: DT_HALF
# type: DT_UINT32
# type: DT_UINT64
# }
# }
# }
# attr {
# name: "shape"
# type: "shape"
# }
# is_aggregate: true
# is_commutative: true
# }
# op {
# name: "Acos"
# input_arg {
# name: "x"
# type_attr: "T"
# }
# output_arg {
# name: "y"
# type_attr: "T"
# }
# attr {
# name: "T"
# type: "type"
# allowed_values {
# list {
# type: DT_BFLOAT16
# type: DT_HALF
# type: DT_FLOAT
# type: DT_DOUBLE
# type: DT_INT32
# type: DT_INT64
# type: DT_COMPLEX64
# type: DT_COMPLEX128
# }
# }
# }
# }
# op {
# name: "Acosh"
# input_arg {
# name: "x"
# type_attr: "T"
# }
# output_arg {
# name: "y"
# type_attr: "T"
# }
# attr {
# name: "T"
# type: "type"
# allowed_values {
# list {
# type: DT_BFLOAT16
# type: DT_HALF
# type: DT_FLOAT
# type: DT_DOUBLE
# type: DT_COMPLEX64
# type: DT_COMPLEX128
# }
# }
# }
# }
# op {
# name: "Add"
# input_arg {
# name: "x"
# type_attr: "T"
# }
# input_arg {
# name: "y"
# type_attr: "T"
# }
# output_arg {
# name: "z"
# type_attr: "T"
# }
# attr {
# name: "T"
# type: "type"
# allowed_values {
# list {
# type: DT_BFLOAT16
# type: DT_HALF
# type: DT_FLOAT
# type: DT_DOUBLE
# type: DT_UINT8
# type: DT_INT8
# type: DT_INT16
# type: DT_INT32
# type: DT_INT64
# type: DT_COMPLEX64
# type: DT_COMPLEX128
# type: DT_STRING
# }
# }
# }
# }
# op {
# name: "AddN"
# input_arg {
# name: "inputs"
# type_attr: "T"
# number_attr: "N"
# }
# output_arg {
# name: "sum"
# type_attr: "T"
# }
# attr {
# name: "N"
# type: "int"
# has_minimum: true
# minimum: 1
# }
# attr {
# name: "T"
# type: "type"
# allowed_values {
# list {
# type: DT_FLOAT
# type: DT_DOUBLE
# type: DT_INT32
# type: DT_UINT8
# type: DT_INT16
# type: DT_INT8
# type: DT_COMPLEX64
# type: DT_INT64
# type: DT_QINT8
# type: DT_QUINT8
# type: DT_QINT32
# type: DT_BFLOAT16
# type: DT_UINT16
# type: DT_COMPLEX128
# type: DT_HALF
# type: DT_UINT32
# type: DT_UINT64
# type: DT_VARIANT
# }
# }
# }
# is_aggregate: true
# is_commutative: true
# }
# op {
# name: "AddV2"
# input_arg {
# name: "x"
# type_attr: "T"
# }
# input_arg {
# name: "y"
# type_attr: "T"
# }
# output_arg {
# name: "z"
# type_attr: "T"
# }
# attr {
# name: "T"
# type: "type"
# allowed_values {
# list {
# type: DT_BFLOAT16
# type: DT_HALF
# type: DT_FLOAT
# type: DT_DOUBLE
# type: DT_UINT8
# type: DT_INT8
# type: DT_INT16
# type: DT_INT32
# type: DT_INT64
# type: DT_COMPLEX64
# type: DT_COMPLEX128
# }
# }
# }
# is_aggregate: true
# is_commutative: true
# }
# op {
# name: "All"
# input_arg {
# name: "input"
# type: DT_BOOL
# }
# input_arg {
# name: "reduction_indices"
# type_attr: "Tidx"
# }
# output_arg {
# name: "output"
# type: DT_BOOL
# }
# attr {
# name: "keep_dims"
# type: "bool"
# default_value {
# b: false
# }
# }
# attr {
# name: "Tidx"
# type: "type"
# default_value {
# type: DT_INT32
# }
# allowed_values {
# list {
# type: DT_INT32
# type: DT_INT64
# }
# }
# }
# }
# op {
# name: "Angle"
# input_arg {
# name: "input"
# type_attr: "T"
# }
# output_arg {
# name: "output"
# type_attr: "Tout"
# }
# attr {
# name: "T"
# type: "type"
# default_value {
# type: DT_COMPLEX64
# }
# allowed_values {
# list {
# type: DT_COMPLEX64
# type: DT_COMPLEX128
# }
# }
# }
# attr {
# name: "Tout"
# type: "type"
# default_value {
# type: DT_FLOAT
# }
# allowed_values {
# list {
# type: DT_FLOAT
# type: DT_DOUBLE
# }
# }
# }
# }
# op {
# name: "Any"
# input_arg {
# name: "input"
# type: DT_BOOL
# }
# input_arg {
# name: "reduction_indices"
# type_attr: "Tidx"
# }
# output_arg {
# name: "output"
# type: DT_BOOL
# }
# attr {
# name: "keep_dims"
# type: "bool"
# default_value {
# b: false
# }
# }
# attr {
# name: "Tidx"
# type: "type"
# default_value {
# type: DT_INT32
# }
# allowed_values {
# list {
# type: DT_INT32
# type: DT_INT64
# }
# }
# }
# }
# op {
# name: "ApproximateEqual"
# input_arg {
# name: "x"
# type_attr: "T"
# }
# input_arg {
# name: "y"
# type_attr: "T"
# }
# output_arg {
# name: "z"
# type: DT_BOOL
# }
# attr {
# name: "T"
# type: "type"
# allowed_values {
# list {
# type: DT_FLOAT
# type: DT_DOUBLE
# type: DT_INT32
# type: DT_UINT8
# type: DT_INT16
# type: DT_INT8
# type: DT_COMPLEX64
# type: DT_INT64
# type: DT_QINT8
# type: DT_QUINT8
# type: DT_QINT32
# type: DT_BFLOAT16
# type: DT_UINT16
# type: DT_COMPLEX128
# type: DT_HALF
# type: DT_UINT32
# type: DT_UINT64
# }
# }
# }
# attr {
# name: "tolerance"
# type: "float"
# default_value {
# f: 1e-05
# }
# }
# is_commutative: true
# }
# op {
# name: "ArgMax"
# input_arg {
# name: "input"
# type_attr: "T"
# }
# input_arg {
# name: "dimension"
# type_attr: "Tidx"
# }
# output_arg {
# name: "output"
# type_attr: "output_type"
# }
# attr {
# name: "T"
# type: "type"
# allowed_values {
# list {
# type: DT_FLOAT
# type: DT_DOUBLE
# type: DT_INT32
# type: DT_UINT8
# type: DT_INT16
# type: DT_INT8
# type: DT_COMPLEX64
# type: DT_INT64
# type: DT_QINT8
# type: DT_QUINT8
# type: DT_QINT32
# type: DT_BFLOAT16
# type: DT_UINT16
# type: DT_COMPLEX128
# type: DT_HALF
# type: DT_UINT32
# type: DT_UINT64
# }
# }
# }
# attr {
# name: "Tidx"
# type: "type"
# default_value {
# type: DT_INT32
# }
# allowed_values {
# list {
# type: DT_INT32
# type: DT_INT64
# }
# }
# }
# attr {
# name: "output_type"
# type: "type"
# default_value {
# type: DT_INT64
# }
# allowed_values {
# list {
# type: DT_INT32
# type: DT_INT64
# }
# }
# }
# }
# op {
# name: "ArgMin"
# input_arg {
# name: "input"
# type_attr: "T"
# }
# input_arg {
# name: "dimension"
# type_attr: "Tidx"
# }
# output_arg {
# name: "output"
# type_attr: "output_type"
# }
# attr {
# name: "T"
# type: "type"
# allowed_values {
# list {
# type: DT_FLOAT
# type: DT_DOUBLE
# type: DT_INT32
# type: DT_UINT8
# type: DT_INT16
# type: DT_INT8
# type: DT_COMPLEX64
# type: DT_INT64
# type: DT_QINT8
# type: DT_QUINT8
# type: DT_QINT32
# type: DT_BFLOAT16
# type: DT_UINT16
# type: DT_COMPLEX128
# type: DT_HALF
# type: DT_UINT32
# type: DT_UINT64
# }
# }
# }
# attr {
# name: "Tidx"
# type: "type"
# default_value {
# type: DT_INT32
# }
# allowed_values {
# list {
# type: DT_INT32
# type: DT_INT64
# }
# }
# }
# attr {
# name: "output_type"
# type: "type"
# default_value {
# type: DT_INT64
# }
# allowed_values {
# list {
# type: DT_INT32
# type: DT_INT64
# }
# }
# }
# }
# op {
# name: "Asin"
# input_arg {
# name: "x"
# type_attr: "T"
# }
# output_arg {
# name: "y"
# type_attr: "T"
# }
# attr {
# name: "T"
# type: "type"
# allowed_values {
# list {
# type: DT_BFLOAT16
# type: DT_HALF
# type: DT_FLOAT
# type: DT_DOUBLE
# type: DT_INT32
# type: DT_INT64
# type: DT_COMPLEX64
# type: DT_COMPLEX128
# }
# }
# }
# }
# op {
# name: "Asinh"
# input_arg {
# name: "x"
# type_attr: "T"
# }
# output_arg {
# name: "y"
# type_attr: "T"
# }
# attr {
# name: "T"
# type: "type"
# allowed_values {
# list {
# type: DT_BFLOAT16
# type: DT_HALF
# type: DT_FLOAT
# type: DT_DOUBLE
# type: DT_COMPLEX64
# type: DT_COMPLEX128
# }
# }
# }
# }
# op {
# name: "Atan"
# input_arg {
# name: "x"
# type_attr: "T"
# }
# output_arg {
# name: "y"
# type_attr: "T"
# }
# attr {
# name: "T"
# type: "type"
# allowed_values {
# list {
# type: DT_BFLOAT16
# type: DT_HALF
# type: DT_FLOAT
# type: DT_DOUBLE
# type: DT_INT32
# type: DT_INT64
# type: DT_COMPLEX64
# type: DT_COMPLEX128
# }
# }
# }
# }
# op {
# name: "Atan2"
# input_arg {
# name: "y"
# type_attr: "T"
# }
# input_arg {
# name: "x"
# type_attr: "T"
# }
# output_arg {
# name: "z"
# type_attr: "T"
# }
# attr {
# name: "T"
# type: "type"
# allowed_values {
# list {
# type: DT_BFLOAT16
# type: DT_HALF
# type: DT_FLOAT
# type: DT_DOUBLE
# }
# }
# }
# }
# op {
# name: "Atanh"
# input_arg {
# name: "x"
# type_attr: "T"
# }
# output_arg {
# name: "y"
# type_attr: "T"
# }
# attr {
# name: "T"
# type: "type"
# allowed_values {
# list {
# type: DT_BFLOAT16
# type: DT_HALF
# type: DT_FLOAT
# type: DT_DOUBLE
# type: DT_COMPLEX64
# type: DT_COMPLEX128
# }
# }
# }
# }
# op {
# name: "BatchMatMul"
# input_arg {
# name: "x"
# type_attr: "T"
# }
# input_arg {
# name: "y"
# type_attr: "T"
# }
# output_arg {
# name: "output"
# type_attr: "T"
# }
# attr {
# name: "T"
# type: "type"
# allowed_values {
# list {
# type: DT_BFLOAT16
# type: DT_HALF
# type: DT_FLOAT
# type: DT_DOUBLE
# type: DT_INT32
# type: DT_COMPLEX64
# type: DT_COMPLEX128
# }
# }
# }
# attr {
# name: "adj_x"
# type: "bool"
# default_value {
# b: false
# }
# }
# attr {
# name: "adj_y"
# type: "bool"
# default_value {
# b: false
# }
# }
# }
# op {
# name: "BesselI0e"
# input_arg {
# name: "x"
# type_attr: "T"
# }
# output_arg {
# name: "y"
# type_attr: "T"
# }
# attr {
# name: "T"
# type: "type"
# allowed_values {
# list {
# type: DT_BFLOAT16
# type: DT_HALF
# type: DT_FLOAT
# type: DT_DOUBLE
# }
# }
# }
# }
# op {
# name: "BesselI1e"
# input_arg {
# name: "x"
# type_attr: "T"
# }
# output_arg {
# name: "y"
# type_attr: "T"
# }
# attr {
# name: "T"
# type: "type"
# allowed_values {
# list {
# type: DT_BFLOAT16
# type: DT_HALF
# type: DT_FLOAT
# type: DT_DOUBLE
# }
# }
# }
# }
# op {
# name: "Betainc"
# input_arg {
# name: "a"
# type_attr: "T"
# }
# input_arg {
# name: "b"
# type_attr: "T"
# }
# input_arg {
# name: "x"
# type_attr: "T"
# }
# output_arg {
# name: "z"
# type_attr: "T"
# }
# attr {
# name: "T"
# type: "type"
# allowed_values {
# list {
# type: DT_FLOAT
# type: DT_DOUBLE
# }
# }
# }
# }
# op {
# name: "Bincount"
# input_arg {
# name: "arr"
# type: DT_INT32
# }
# input_arg {
# name: "size"
# type: DT_INT32
# }
# input_arg {
# name: "weights"
# type_attr: "T"
# }
# output_arg {
# name: "bins"
# type_attr: "T"
# }
# attr {
# name: "T"
# type: "type"
# allowed_values {
# list {
# type: DT_INT32
# type: DT_INT64
# type: DT_FLOAT
# type: DT_DOUBLE
# }
# }
# }
# }
# op {
# name: "Bucketize"
# input_arg {
# name: "input"
# type_attr: "T"
# }
# output_arg {
# name: "output"
# type: DT_INT32
# }
# attr {
# name: "T"
# type: "type"
# allowed_values {
# list {
# type: DT_INT32
# type: DT_INT64
# type: DT_FLOAT
# type: DT_DOUBLE
# }
# }
# }
# attr {
# name: "boundaries"
# type: "list(float)"
# }
# }
# op {
# name: "Cast"
# input_arg {
# name: "x"
# type_attr: "SrcT"
# }
# output_arg {
# name: "y"
# type_attr: "DstT"
# }
# attr {
# name: "SrcT"
# type: "type"
# }
# attr {
# name: "DstT"
# type: "type"
# }
# }
# op {
# name: "Ceil"
# input_arg {
# name: "x"
# type_attr: "T"
# }
# output_arg {
# name: "y"
# type_attr: "T"
# }
# attr {
# name: "T"
# type: "type"
# allowed_values {
# list {
# type: DT_BFLOAT16
# type: DT_HALF
# type: DT_FLOAT
# type: DT_DOUBLE
# }
# }
# }
# }
# op {
# name: "ClipByValue"
# input_arg {
# name: "t"
# type_attr: "T"
# }
# input_arg {
# name: "clip_value_min"
# type_attr: "T"
# }
# input_arg {
# name: "clip_value_max"
# type_attr: "T"
# }
# output_arg {
# name: "output"
# type_attr: "T"
# }
# attr {
# name: "T"
# type: "type"
# allowed_values {
# list {
# type: DT_FLOAT
# type: DT_DOUBLE
# type: DT_INT32
# type: DT_UINT8
# type: DT_INT16
# type: DT_INT8
# type: DT_COMPLEX64
# type: DT_INT64
# type: DT_QINT8
# type: DT_QUINT8
# type: DT_QINT32
# type: DT_BFLOAT16
# type: DT_UINT16
# type: DT_COMPLEX128
# type: DT_HALF
# type: DT_UINT32
# type: DT_UINT64
# }
# }
# }
# }
# op {
# name: "CompareAndBitpack"
# input_arg {
# name: "input"
# type_attr: "T"
# }
# input_arg {
# name: "threshold"
# type_attr: "T"
# }
# output_arg {
# name: "output"
# type: DT_UINT8
# }
# attr {
# name: "T"
# type: "type"
# allowed_values {
# list {
# type: DT_BOOL
# type: DT_HALF
# type: DT_FLOAT
# type: DT_DOUBLE
# type: DT_INT8
# type: DT_INT16
# type: DT_INT32
# type: DT_INT64
# }
# }
# }
# }
# op {
# name: "Complex"
# input_arg {
# name: "real"
# type_attr: "T"
# }
# input_arg {
# name: "imag"
# type_attr: "T"
# }
# output_arg {
# name: "out"
# type_attr: "Tout"
# }
# attr {
# name: "T"
# type: "type"
# default_value {
# type: DT_FLOAT
# }
# allowed_values {
# list {
# type: DT_FLOAT
# type: DT_DOUBLE
# }
# }
# }
# attr {
# name: "Tout"
# type: "type"
# default_value {
# type: DT_COMPLEX64
# }
# allowed_values {
# list {
# type: DT_COMPLEX64
# type: DT_COMPLEX128
# }
# }
# }
# }
# op {
# name: "ComplexAbs"
# input_arg {
# name: "x"
# type_attr: "T"
# }
# output_arg {
# name: "y"
# type_attr: "Tout"
# }
# attr {
# name: "T"
# type: "type"
# default_value {
# type: DT_COMPLEX64
# }
# allowed_values {
# list {
# type: DT_COMPLEX64
# type: DT_COMPLEX128
# }
# }
# }
# attr {
# name: "Tout"
# type: "type"
# default_value {
# type: DT_FLOAT
# }
# allowed_values {
# list {
# type: DT_FLOAT
# type: DT_DOUBLE
# }
# }
# }
# }
# op {
# name: "Conj"
# input_arg {
# name: "input"
# type_attr: "T"
# }
# output_arg {
# name: "output"
# type_attr: "T"
# }
# attr {
# name: "T"
# type: "type"
# default_value {
# type: DT_COMPLEX64
# }
# allowed_values {
# list {
# type: DT_COMPLEX64
# type: DT_COMPLEX128
# type: DT_VARIANT
# }
# }
# }
# }
# op {
# name: "Cos"
# input_arg {
# name: "x"
# type_attr: "T"
# }
# output_arg {
# name: "y"
# type_attr: "T"
# }
# attr {
# name: "T"
# type: "type"
# allowed_values {
# list {
# type: DT_BFLOAT16
# type: DT_HALF
# type: DT_FLOAT
# type: DT_DOUBLE
# type: DT_COMPLEX64
# type: DT_COMPLEX128
# }
# }
# }
# }
# op {
# name: "Cosh"
# input_arg {
# name: "x"
# type_attr: "T"
# }
# output_arg {
# name: "y"
# type_attr: "T"
# }
# attr {
# name: "T"
# type: "type"
# allowed_values {
# list {
# type: DT_BFLOAT16
# type: DT_HALF
# type: DT_FLOAT
# type: DT_DOUBLE
# type: DT_COMPLEX64
# type: DT_COMPLEX128
# }
# }
# }
# }
# op {
# name: "Cross"
# input_arg {
# name: "a"
# type_attr: "T"
# }
# input_arg {
# name: "b"
# type_attr: "T"
# }
# output_arg {
# name: "product"
# type_attr: "T"
# }
# attr {
# name: "T"
# type: "type"
# allowed_values {
# list {
# type: DT_FLOAT
# type: DT_DOUBLE
# type: DT_INT32
# type: DT_UINT8
# type: DT_INT16
# type: DT_INT8
# type: DT_INT64
# type: DT_BFLOAT16
# type: DT_UINT16
# type: DT_HALF
# type: DT_UINT32
# type: DT_UINT64
# }
# }
# }
# }
# op {
# name: "Cumprod"
# input_arg {
# name: "x"
# type_attr: "T"
# }
# input_arg {
# name: "axis"
# type_attr: "Tidx"
# }
# output_arg {
# name: "out"
# type_attr: "T"
# }
# attr {
# name: "exclusive"
# type: "bool"
# default_value {
# b: false
# }
# }
# attr {
# name: "reverse"
# type: "bool"
# default_value {
# b: false
# }
# }
# attr {
# name: "T"
# type: "type"
# allowed_values {
# list {
# type: DT_FLOAT
# type: DT_DOUBLE
# type: DT_INT32
# type: DT_UINT8
# type: DT_INT16
# type: DT_INT8
# type: DT_COMPLEX64
# type: DT_INT64
# type: DT_QINT8
# type: DT_QUINT8
# type: DT_QINT32
# type: DT_BFLOAT16
# type: DT_UINT16
# type: DT_COMPLEX128
# type: DT_HALF
# type: DT_UINT32
# type: DT_UINT64
# }
# }
# }
# attr {
# name: "Tidx"
# type: "type"
# default_value {
# type: DT_INT32
# }
# allowed_values {
# list {
# type: DT_INT32
# type: DT_INT64
# }
# }
# }
# }
# op {
# name: "Cumsum"
# input_arg {
# name: "x"
# type_attr: "T"
# }
# input_arg {
# name: "axis"
# type_attr: "Tidx"
# }
# output_arg {
# name: "out"
# type_attr: "T"
# }
# attr {
# name: "exclusive"
# type: "bool"
# default_value {
# b: false
# }
# }
# attr {
# name: "reverse"
# type: "bool"
# default_value {
# b: false
# }
# }
# attr {
# name: "T"
# type: "type"
# allowed_values {
# list {
# type: DT_FLOAT
# type: DT_DOUBLE
# type: DT_INT32
# type: DT_UINT8
# type: DT_INT16
# type: DT_INT8
# type: DT_COMPLEX64
# type: DT_INT64
# type: DT_QINT8
# type: DT_QUINT8
# type: DT_QINT32
# type: DT_BFLOAT16
# type: DT_UINT16
# type: DT_COMPLEX128
# type: DT_HALF
# type: DT_UINT32
# type: DT_UINT64
# }
# }
# }
# attr {
# name: "Tidx"
# type: "type"
# default_value {
# type: DT_INT32
# }
# allowed_values {
# list {
# type: DT_INT32
# type: DT_INT64
# }
# }
# }
# }
# op {
# name: "Digamma"
# input_arg {
# name: "x"
# type_attr: "T"
# }
# output_arg {
# name: "y"
# type_attr: "T"
# }
# attr {
# name: "T"
# type: "type"
# allowed_values {
# list {
# type: DT_BFLOAT16
# type: DT_HALF
# type: DT_FLOAT
# type: DT_DOUBLE
# }
# }
# }
# }
# op {
# name: "Div"
# input_arg {
# name: "x"
# type_attr: "T"
# }
# input_arg {
# name: "y"
# type_attr: "T"
# }
# output_arg {
# name: "z"
# type_attr: "T"
# }
# attr {
# name: "T"
# type: "type"
# allowed_values {
# list {
# type: DT_BFLOAT16
# type: DT_HALF
# type: DT_FLOAT
# type: DT_DOUBLE
# type: DT_UINT8
# type: DT_INT8
# type: DT_UINT16
# type: DT_INT16
# type: DT_INT32
# type: DT_INT64
# type: DT_COMPLEX64
# type: DT_COMPLEX128
# }
# }
# }
# }
# op {
# name: "Equal"
# input_arg {
# name: "x"
# type_attr: "T"
# }
# input_arg {
# name: "y"
# type_attr: "T"
# }
# output_arg {
# name: "z"
# type: DT_BOOL
# }
# attr {
# name: "T"
# type: "type"
# allowed_values {
# list {
# type: DT_BFLOAT16
# type: DT_HALF
# type: DT_FLOAT
# type: DT_DOUBLE
# type: DT_UINT8
# type: DT_INT8
# type: DT_INT16
# type: DT_INT32
# type: DT_INT64
# type: DT_COMPLEX64
# type: DT_QUINT8
# type: DT_QINT8
# type: DT_QINT32
# type: DT_STRING
# type: DT_BOOL
# type: DT_COMPLEX128
# }
# }
# }
# is_commutative: true
# }
# op {
# name: "Erf"
# input_arg {
# name: "x"
# type_attr: "T"
# }
# output_arg {
# name: "y"
# type_attr: "T"
# }
# attr {
# name: "T"
# type: "type"
# allowed_values {
# list {
# type: DT_BFLOAT16
# type: DT_HALF
# type: DT_FLOAT
# type: DT_DOUBLE
# }
# }
# }
# }
# op {
# name: "Erfc"
# input_arg {
# name: "x"
# type_attr: "T"
# }
# output_arg {
# name: "y"
# type_attr: "T"
# }
# attr {
# name: "T"
# type: "type"
# allowed_values {
# list {
# type: DT_BFLOAT16
# type: DT_HALF
# type: DT_FLOAT
# type: DT_DOUBLE
# }
# }
# }
# }
# op {
# name: "Exp"
# input_arg {
# name: "x"
# type_attr: "T"
# }
# output_arg {
# name: "y"
# type_attr: "T"
# }
# attr {
# name: "T"
# type: "type"
# allowed_values {
# list {
# type: DT_BFLOAT16
# type: DT_HALF
# type: DT_FLOAT
# type: DT_DOUBLE
# type: DT_COMPLEX64
# type: DT_COMPLEX128
# }
# }
# }
# }
# op {
# name: "Expm1"
# input_arg {
# name: "x"
# type_attr: "T"
# }
# output_arg {
# name: "y"
# type_attr: "T"
# }
# attr {
# name: "T"
# type: "type"
# allowed_values {
# list {
# type: DT_BFLOAT16
# type: DT_HALF
# type: DT_FLOAT
# type: DT_DOUBLE
# type: DT_COMPLEX64
# type: DT_COMPLEX128
# }
# }
# }
# }
# op {
# name: "Floor"
# input_arg {
# name: "x"
# type_attr: "T"
# }
# output_arg {
# name: "y"
# type_attr: "T"
# }
# attr {
# name: "T"
# type: "type"
# allowed_values {
# list {
# type: DT_BFLOAT16
# type: DT_HALF
# type: DT_FLOAT
# type: DT_DOUBLE
# }
# }
# }
# }
# op {
# name: "FloorDiv"
# input_arg {
# name: "x"
# type_attr: "T"
# }
# input_arg {
# name: "y"
# type_attr: "T"
# }
# output_arg {
# name: "z"
# type_attr: "T"
# }
# attr {
# name: "T"
# type: "type"
# allowed_values {
# list {
# type: DT_BFLOAT16
# type: DT_HALF
# type: DT_FLOAT
# type: DT_DOUBLE
# type: DT_UINT8
# type: DT_INT8
# type: DT_UINT16
# type: DT_INT16
# type: DT_INT32
# type: DT_INT64
# type: DT_COMPLEX64
# type: DT_COMPLEX128
# }
# }
# }
# }
# op {
# name: "FloorMod"
# input_arg {
# name: "x"
# type_attr: "T"
# }
# input_arg {
# name: "y"
# type_attr: "T"
# }
# output_arg {
# name: "z"
# type_attr: "T"
# }
# attr {
# name: "T"
# type: "type"
# allowed_values {
# list {
# type: DT_INT32
# type: DT_INT64
# type: DT_BFLOAT16
# type: DT_HALF
# type: DT_FLOAT
# type: DT_DOUBLE
# }
# }
# }
# }
# op {
# name: "Greater"
# input_arg {
# name: "x"
# type_attr: "T"
# }
# input_arg {
# name: "y"
# type_attr: "T"
# }
# output_arg {
# name: "z"
# type: DT_BOOL
# }
# attr {
# name: "T"
# type: "type"
# allowed_values {
# list {
# type: DT_FLOAT
# type: DT_DOUBLE
# type: DT_INT32
# type: DT_UINT8
# type: DT_INT16
# type: DT_INT8
# type: DT_INT64
# type: DT_BFLOAT16
# type: DT_UINT16
# type: DT_HALF
# type: DT_UINT32
# type: DT_UINT64
# }
# }
# }
# }
# op {
# name: "GreaterEqual"
# input_arg {
# name: "x"
# type_attr: "T"
# }
# input_arg {
# name: "y"
# type_attr: "T"
# }
# output_arg {
# name: "z"
# type: DT_BOOL
# }
# attr {
# name: "T"
# type: "type"
# allowed_values {
# list {
# type: DT_FLOAT
# type: DT_DOUBLE
# type: DT_INT32
# type: DT_UINT8
# type: DT_INT16
# type: DT_INT8
# type: DT_INT64
# type: DT_BFLOAT16
# type: DT_UINT16
# type: DT_HALF
# type: DT_UINT32
# type: DT_UINT64
# }
# }
# }
# }
# op {
# name: "HistogramFixedWidth"
# input_arg {
# name: "values"
# type_attr: "T"
# }
# input_arg {
# name: "value_range"
# type_attr: "T"
# }
# input_arg {
# name: "nbins"
# type: DT_INT32
# }
# output_arg {
# name: "out"
# type_attr: "dtype"
# }
# attr {
# name: "T"
# type: "type"
# allowed_values {
# list {
# type: DT_INT32
# type: DT_INT64
# type: DT_FLOAT
# type: DT_DOUBLE
# }
# }
# }
# attr {
# name: "dtype"
# type: "type"
# default_value {
# type: DT_INT32
# }
# allowed_values {
# list {
# type: DT_INT32
# type: DT_INT64
# }
# }
# }
# }
# op {
# name: "Igamma"
# input_arg {
# name: "a"
# type_attr: "T"
# }
# input_arg {
# name: "x"
# type_attr: "T"
# }
# output_arg {
# name: "z"
# type_attr: "T"
# }
# attr {
# name: "T"
# type: "type"
# allowed_values {
# list {
# type: DT_FLOAT
# type: DT_DOUBLE
# }
# }
# }
# }
# op {
# name: "IgammaGradA"
# input_arg {
# name: "a"
# type_attr: "T"
# }
# input_arg {
# name: "x"
# type_attr: "T"
# }
# output_arg {
# name: "z"
# type_attr: "T"
# }
# attr {
# name: "T"
# type: "type"
# allowed_values {
# list {
# type: DT_FLOAT
# type: DT_DOUBLE
# }
# }
# }
# }
# op {
# name: "Igammac"
# input_arg {
# name: "a"
# type_attr: "T"
# }
# input_arg {
# name: "x"
# type_attr: "T"
# }
# output_arg {
# name: "z"
# type_attr: "T"
# }
# attr {
# name: "T"
# type: "type"
# allowed_values {
# list {
# type: DT_FLOAT
# type: DT_DOUBLE
# }
# }
# }
# }
# op {
# name: "Imag"
# input_arg {
# name: "input"
# type_attr: "T"
# }
# output_arg {
# name: "output"
# type_attr: "Tout"
# }
# attr {
# name: "T"
# type: "type"
# default_value {
# type: DT_COMPLEX64
# }
# allowed_values {
# list {
# type: DT_COMPLEX64
# type: DT_COMPLEX128
# }
# }
# }
# attr {
# name: "Tout"
# type: "type"
# default_value {
# type: DT_FLOAT
# }
# allowed_values {
# list {
# type: DT_FLOAT
# type: DT_DOUBLE
# }
# }
# }
# }
# op {
# name: "Inv"
# input_arg {
# name: "x"
# type_attr: "T"
# }
# output_arg {
# name: "y"
# type_attr: "T"
# }
# attr {
# name: "T"
# type: "type"
# allowed_values {
# list {
# type: DT_BFLOAT16
# type: DT_HALF
# type: DT_FLOAT
# type: DT_DOUBLE
# type: DT_INT32
# type: DT_INT64
# type: DT_COMPLEX64
# type: DT_COMPLEX128
# }
# }
# }
# }
# op {
# name: "InvGrad"
# input_arg {
# name: "y"
# type_attr: "T"
# }
# input_arg {
# name: "dy"
# type_attr: "T"
# }
# output_arg {
# name: "z"
# type_attr: "T"
# }
# attr {
# name: "T"
# type: "type"
# allowed_values {
# list {
# type: DT_BFLOAT16
# type: DT_HALF
# type: DT_FLOAT
# type: DT_DOUBLE
# type: DT_COMPLEX64
# type: DT_COMPLEX128
# }
# }
# }
# }
# op {
# name: "IsFinite"
# input_arg {
# name: "x"
# type_attr: "T"
# }
# output_arg {
# name: "y"
# type: DT_BOOL
# }
# attr {
# name: "T"
# type: "type"
# allowed_values {
# list {
# type: DT_BFLOAT16
# type: DT_HALF
# type: DT_FLOAT
# type: DT_DOUBLE
# }
# }
# }
# }
# op {
# name: "IsInf"
# input_arg {
# name: "x"
# type_attr: "T"
# }
# output_arg {
# name: "y"
# type: DT_BOOL
# }
# attr {
# name: "T"
# type: "type"
# allowed_values {
# list {
# type: DT_BFLOAT16
# type: DT_HALF
# type: DT_FLOAT
# type: DT_DOUBLE
# }
# }
# }
# }
# op {
# name: "IsNan"
# input_arg {
# name: "x"
# type_attr: "T"
# }
# output_arg {
# name: "y"
# type: DT_BOOL
# }
# attr {
# name: "T"
# type: "type"
# allowed_values {
# list {
# type: DT_BFLOAT16
# type: DT_HALF
# type: DT_FLOAT
# type: DT_DOUBLE
# }
# }
# }
# }
# op {
# name: "Less"
# input_arg {
# name: "x"
# type_attr: "T"
# }
# input_arg {
# name: "y"
# type_attr: "T"
# }
# output_arg {
# name: "z"
# type: DT_BOOL
# }
# attr {
# name: "T"
# type: "type"
# allowed_values {
# list {
# type: DT_FLOAT
# type: DT_DOUBLE
# type: DT_INT32
# type: DT_UINT8
# type: DT_INT16
# type: DT_INT8
# type: DT_INT64
# type: DT_BFLOAT16
# type: DT_UINT16
# type: DT_HALF
# type: DT_UINT32
# type: DT_UINT64
# }
# }
# }
# }
# op {
# name: "LessEqual"
# input_arg {
# name: "x"
# type_attr: "T"
# }
# input_arg {
# name: "y"
# type_attr: "T"
# }
# output_arg {
# name: "z"
# type: DT_BOOL
# }
# attr {
# name: "T"
# type: "type"
# allowed_values {
# list {
# type: DT_FLOAT
# type: DT_DOUBLE
# type: DT_INT32
# type: DT_UINT8
# type: DT_INT16
# type: DT_INT8
# type: DT_INT64
# type: DT_BFLOAT16
# type: DT_UINT16
# type: DT_HALF
# type: DT_UINT32
# type: DT_UINT64
# }
# }
# }
# }
# op {
# name: "Lgamma"
# input_arg {
# name: "x"
# type_attr: "T"
# }
# output_arg {
# name: "y"
# type_attr: "T"
# }
# attr {
# name: "T"
# type: "type"
# allowed_values {
# list {
# type: DT_BFLOAT16
# type: DT_HALF
# type: DT_FLOAT
# type: DT_DOUBLE
# }
# }
# }
# }
# op {
# name: "LinSpace"
# input_arg {
# name: "start"
# type_attr: "T"
# }
# input_arg {
# name: "stop"
# type_attr: "T"
# }
# input_arg {
# name: "num"
# type_attr: "Tidx"
# }
# output_arg {
# name: "output"
# type_attr: "T"
# }
# attr {
# name: "T"
# type: "type"
# allowed_values {
# list {
# type: DT_BFLOAT16
# type: DT_FLOAT
# type: DT_DOUBLE
# }
# }
# }
# attr {
# name: "Tidx"
# type: "type"
# default_value {
# type: DT_INT32
# }
# allowed_values {
# list {
# type: DT_INT32
# type: DT_INT64
# }
# }
# }
# }
# op {
# name: "Log"
# input_arg {
# name: "x"
# type_attr: "T"
# }
# output_arg {
# name: "y"
# type_attr: "T"
# }
# attr {
# name: "T"
# type: "type"
# allowed_values {
# list {
# type: DT_BFLOAT16
# type: DT_HALF
# type: DT_FLOAT
# type: DT_DOUBLE
# type: DT_COMPLEX64
# type: DT_COMPLEX128
# }
# }
# }
# }
# op {
# name: "Log1p"
# input_arg {
# name: "x"
# type_attr: "T"
# }
# output_arg {
# name: "y"
# type_attr: "T"
# }
# attr {
# name: "T"
# type: "type"
# allowed_values {
# list {
# type: DT_BFLOAT16
# type: DT_HALF
# type: DT_FLOAT
# type: DT_DOUBLE
# type: DT_COMPLEX64
# type: DT_COMPLEX128
# }
# }
# }
# }
# op {
# name: "LogicalAnd"
# input_arg {
# name: "x"
# type: DT_BOOL
# }
# input_arg {
# name: "y"
# type: DT_BOOL
# }
# output_arg {
# name: "z"
# type: DT_BOOL
# }
# is_commutative: true
# }
# op {
# name: "LogicalNot"
# input_arg {
# name: "x"
# type: DT_BOOL
# }
# output_arg {
# name: "y"
# type: DT_BOOL
# }
# }
# op {
# name: "LogicalOr"
# input_arg {
# name: "x"
# type: DT_BOOL
# }
# input_arg {
# name: "y"
# type: DT_BOOL
# }
# output_arg {
# name: "z"
# type: DT_BOOL
# }
# is_commutative: true
# }
# op {
# name: "MatMul"
# input_arg {
# name: "a"
# type_attr: "T"
# }
# input_arg {
# name: "b"
# type_attr: "T"
# }
# output_arg {
# name: "product"
# type_attr: "T"
# }
# attr {
# name: "transpose_a"
# type: "bool"
# default_value {
# b: false
# }
# }
# attr {
# name: "transpose_b"
# type: "bool"
# default_value {
# b: false
# }
# }
# attr {
# name: "T"
# type: "type"
# allowed_values {
# list {
# type: DT_BFLOAT16
# type: DT_HALF
# type: DT_FLOAT
# type: DT_DOUBLE
# type: DT_INT32
# type: DT_COMPLEX64
# type: DT_COMPLEX128
# }
# }
# }
# }
# op {
# name: "Max"
# input_arg {
# name: "input"
# type_attr: "T"
# }
# input_arg {
# name: "reduction_indices"
# type_attr: "Tidx"
# }
# output_arg {
# name: "output"
# type_attr: "T"
# }
# attr {
# name: "keep_dims"
# type: "bool"
# default_value {
# b: false
# }
# }
# attr {
# name: "T"
# type: "type"
# allowed_values {
# list {
# type: DT_FLOAT
# type: DT_DOUBLE
# type: DT_INT32
# type: DT_UINT8
# type: DT_INT16
# type: DT_INT8
# type: DT_COMPLEX64
# type: DT_INT64
# type: DT_QINT8
# type: DT_QUINT8
# type: DT_QINT32
# type: DT_BFLOAT16
# type: DT_UINT16
# type: DT_COMPLEX128
# type: DT_HALF
# type: DT_UINT32
# type: DT_UINT64
# }
# }
# }
# attr {
# name: "Tidx"
# type: "type"
# default_value {
# type: DT_INT32
# }
# allowed_values {
# list {
# type: DT_INT32
# type: DT_INT64
# }
# }
# }
# }
# op {
# name: "Maximum"
# input_arg {
# name: "x"
# type_attr: "T"
# }
# input_arg {
# name: "y"
# type_attr: "T"
# }
# output_arg {
# name: "z"
# type_attr: "T"
# }
# attr {
# name: "T"
# type: "type"
# allowed_values {
# list {
# type: DT_BFLOAT16
# type: DT_HALF
# type: DT_FLOAT
# type: DT_DOUBLE
# type: DT_INT32
# type: DT_INT64
# }
# }
# }
# is_commutative: true
# }
# op {
# name: "Mean"
# input_arg {
# name: "input"
# type_attr: "T"
# }
# input_arg {
# name: "reduction_indices"
# type_attr: "Tidx"
# }
# output_arg {
# name: "output"
# type_attr: "T"
# }
# attr {
# name: "keep_dims"
# type: "bool"
# default_value {
# b: false
# }
# }
# attr {
# name: "T"
# type: "type"
# allowed_values {
# list {
# type: DT_FLOAT
# type: DT_DOUBLE
# type: DT_INT32
# type: DT_UINT8
# type: DT_INT16
# type: DT_INT8
# type: DT_COMPLEX64
# type: DT_INT64
# type: DT_QINT8
# type: DT_QUINT8
# type: DT_QINT32
# type: DT_BFLOAT16
# type: DT_UINT16
# type: DT_COMPLEX128
# type: DT_HALF
# type: DT_UINT32
# type: DT_UINT64
# }
# }
# }
# attr {
# name: "Tidx"
# type: "type"
# default_value {
# type: DT_INT32
# }
# allowed_values {
# list {
# type: DT_INT32
# type: DT_INT64
# }
# }
# }
# }
# op {
# name: "Min"
# input_arg {
# name: "input"
# type_attr: "T"
# }
# input_arg {
# name: "reduction_indices"
# type_attr: "Tidx"
# }
# output_arg {
# name: "output"
# type_attr: "T"
# }
# attr {
# name: "keep_dims"
# type: "bool"
# default_value {
# b: false
# }
# }
# attr {
# name: "T"
# type: "type"
# allowed_values {
# list {
# type: DT_FLOAT
# type: DT_DOUBLE
# type: DT_INT32
# type: DT_UINT8
# type: DT_INT16
# type: DT_INT8
# type: DT_COMPLEX64
# type: DT_INT64
# type: DT_QINT8
# type: DT_QUINT8
# type: DT_QINT32
# type: DT_BFLOAT16
# type: DT_UINT16
# type: DT_COMPLEX128
# type: DT_HALF
# type: DT_UINT32
# type: DT_UINT64
# }
# }
# }
# attr {
# name: "Tidx"
# type: "type"
# default_value {
# type: DT_INT32
# }
# allowed_values {
# list {
# type: DT_INT32
# type: DT_INT64
# }
# }
# }
# }
# op {
# name: "Minimum"
# input_arg {
# name: "x"
# type_attr: "T"
# }
# input_arg {
# name: "y"
# type_attr: "T"
# }
# output_arg {
# name: "z"
# type_attr: "T"
# }
# attr {
# name: "T"
# type: "type"
# allowed_values {
# list {
# type: DT_BFLOAT16
# type: DT_HALF
# type: DT_FLOAT
# type: DT_DOUBLE
# type: DT_INT32
# type: DT_INT64
# }
# }
# }
# is_commutative: true
# }
# op {
# name: "Mod"
# input_arg {
# name: "x"
# type_attr: "T"
# }
# input_arg {
# name: "y"
# type_attr: "T"
# }
# output_arg {
# name: "z"
# type_attr: "T"
# }
# attr {
# name: "T"
# type: "type"
# allowed_values {
# list {
# type: DT_INT32
# type: DT_INT64
# type: DT_HALF
# type: DT_HALF
# type: DT_BFLOAT16
# type: DT_FLOAT
# type: DT_DOUBLE
# }
# }
# }
# }
# op {
# name: "Mul"
# input_arg {
# name: "x"
# type_attr: "T"
# }
# input_arg {
# name: "y"
# type_attr: "T"
# }
# output_arg {
# name: "z"
# type_attr: "T"
# }
# attr {
# name: "T"
# type: "type"
# allowed_values {
# list {
# type: DT_BFLOAT16
# type: DT_HALF
# type: DT_FLOAT
# type: DT_DOUBLE
# type: DT_UINT8
# type: DT_INT8
# type: DT_UINT16
# type: DT_INT16
# type: DT_INT32
# type: DT_INT64
# type: DT_COMPLEX64
# type: DT_COMPLEX128
# }
# }
# }
# is_commutative: true
# }
# op {
# name: "Neg"
# input_arg {
# name: "x"
# type_attr: "T"
# }
# output_arg {
# name: "y"
# type_attr: "T"
# }
# attr {
# name: "T"
# type: "type"
# allowed_values {
# list {
# type: DT_BFLOAT16
# type: DT_HALF
# type: DT_FLOAT
# type: DT_DOUBLE
# type: DT_INT32
# type: DT_INT64
# type: DT_COMPLEX64
# type: DT_COMPLEX128
# }
# }
# }
# }
# op {
# name: "NotEqual"
# input_arg {
# name: "x"
# type_attr: "T"
# }
# input_arg {
# name: "y"
# type_attr: "T"
# }
# output_arg {
# name: "z"
# type: DT_BOOL
# }
# attr {
# name: "T"
# type: "type"
# allowed_values {
# list {
# type: DT_BFLOAT16
# type: DT_HALF
# type: DT_FLOAT
# type: DT_DOUBLE
# type: DT_UINT8
# type: DT_INT8
# type: DT_INT16
# type: DT_INT32
# type: DT_INT64
# type: DT_COMPLEX64
# type: DT_QUINT8
# type: DT_QINT8
# type: DT_QINT32
# type: DT_STRING
# type: DT_BOOL
# type: DT_COMPLEX128
# }
# }
# }
# is_commutative: true
# }
# op {
# name: "Polygamma"
# input_arg {
# name: "a"
# type_attr: "T"
# }
# input_arg {
# name: "x"
# type_attr: "T"
# }
# output_arg {
# name: "z"
# type_attr: "T"
# }
# attr {
# name: "T"
# type: "type"
# allowed_values {
# list {
# type: DT_FLOAT
# type: DT_DOUBLE
# }
# }
# }
# }
# op {
# name: "Pow"
# input_arg {
# name: "x"
# type_attr: "T"
# }
# input_arg {
# name: "y"
# type_attr: "T"
# }
# output_arg {
# name: "z"
# type_attr: "T"
# }
# attr {
# name: "T"
# type: "type"
# allowed_values {
# list {
# type: DT_BFLOAT16
# type: DT_FLOAT
# type: DT_HALF
# type: DT_DOUBLE
# type: DT_INT32
# type: DT_INT64
# type: DT_COMPLEX64
# type: DT_COMPLEX128
# }
# }
# }
# }
# op {
# name: "Prod"
# input_arg {
# name: "input"
# type_attr: "T"
# }
# input_arg {
# name: "reduction_indices"
# type_attr: "Tidx"
# }
# output_arg {
# name: "output"
# type_attr: "T"
# }
# attr {
# name: "keep_dims"
# type: "bool"
# default_value {
# b: false
# }
# }
# attr {
# name: "T"
# type: "type"
# allowed_values {
# list {
# type: DT_FLOAT
# type: DT_DOUBLE
# type: DT_INT32
# type: DT_UINT8
# type: DT_INT16
# type: DT_INT8
# type: DT_COMPLEX64
# type: DT_INT64
# type: DT_QINT8
# type: DT_QUINT8
# type: DT_QINT32
# type: DT_BFLOAT16
# type: DT_UINT16
# type: DT_COMPLEX128
# type: DT_HALF
# type: DT_UINT32
# type: DT_UINT64
# }
# }
# }
# attr {
# name: "Tidx"
# type: "type"
# default_value {
# type: DT_INT32
# }
# allowed_values {
# list {
# type: DT_INT32
# type: DT_INT64
# }
# }
# }
# }
# op {
# name: "QuantizeDownAndShrinkRange"
# input_arg {
# name: "input"
# type_attr: "Tinput"
# }
# input_arg {
# name: "input_min"
# type: DT_FLOAT
# }
# input_arg {
# name: "input_max"
# type: DT_FLOAT
# }
# output_arg {
# name: "output"
# type_attr: "out_type"
# }
# output_arg {
# name: "output_min"
# type: DT_FLOAT
# }
# output_arg {
# name: "output_max"
# type: DT_FLOAT
# }
# attr {
# name: "Tinput"
# type: "type"
# allowed_values {
# list {
# type: DT_QINT8
# type: DT_QUINT8
# type: DT_QINT32
# type: DT_QINT16
# type: DT_QUINT16
# }
# }
# }
# attr {
# name: "out_type"
# type: "type"
# allowed_values {
# list {
# type: DT_QINT8
# type: DT_QUINT8
# type: DT_QINT32
# type: DT_QINT16
# type: DT_QUINT16
# }
# }
# }
# }
# op {
# name: "QuantizedAdd"
# input_arg {
# name: "x"
# type_attr: "T1"
# }
# input_arg {
# name: "y"
# type_attr: "T2"
# }
# input_arg {
# name: "min_x"
# type: DT_FLOAT
# }
# input_arg {
# name: "max_x"
# type: DT_FLOAT
# }
# input_arg {
# name: "min_y"
# type: DT_FLOAT
# }
# input_arg {
# name: "max_y"
# type: DT_FLOAT
# }
# output_arg {
# name: "z"
# type_attr: "Toutput"
# }
# output_arg {
# name: "min_z"
# type: DT_FLOAT
# }
# output_arg {
# name: "max_z"
# type: DT_FLOAT
# }
# attr {
# name: "T1"
# type: "type"
# allowed_values {
# list {
# type: DT_QINT8
# type: DT_QUINT8
# type: DT_QINT32
# type: DT_QINT16
# type: DT_QUINT16
# }
# }
# }
# attr {
# name: "T2"
# type: "type"
# allowed_values {
# list {
# type: DT_QINT8
# type: DT_QUINT8
# type: DT_QINT32
# type: DT_QINT16
# type: DT_QUINT16
# }
# }
# }
# attr {
# name: "Toutput"
# type: "type"
# default_value {
# type: DT_QINT32
# }
# allowed_values {
# list {
# type: DT_QINT8
# type: DT_QUINT8
# type: DT_QINT32
# type: DT_QINT16
# type: DT_QUINT16
# }
# }
# }
# is_commutative: true
# }
# op {
# name: "QuantizedMatMul"
# input_arg {
# name: "a"
# type_attr: "T1"
# }
# input_arg {
# name: "b"
# type_attr: "T2"
# }
# input_arg {
# name: "min_a"
# type: DT_FLOAT
# }
# input_arg {
# name: "max_a"
# type: DT_FLOAT
# }
# input_arg {
# name: "min_b"
# type: DT_FLOAT
# }
# input_arg {
# name: "max_b"
# type: DT_FLOAT
# }
# output_arg {
# name: "out"
# type_attr: "Toutput"
# }
# output_arg {
# name: "min_out"
# type: DT_FLOAT
# }
# output_arg {
# name: "max_out"
# type: DT_FLOAT
# }
# attr {
# name: "T1"
# type: "type"
# allowed_values {
# list {
# type: DT_QINT8
# type: DT_QUINT8
# type: DT_QINT32
# type: DT_QINT16
# type: DT_QUINT16
# }
# }
# }
# attr {
# name: "T2"
# type: "type"
# allowed_values {
# list {
# type: DT_QINT8
# type: DT_QUINT8
# type: DT_QINT32
# type: DT_QINT16
# type: DT_QUINT16
# }
# }
# }
# attr {
# name: "Toutput"
# type: "type"
# default_value {
# type: DT_QINT32
# }
# allowed_values {
# list {
# type: DT_QINT8
# type: DT_QUINT8
# type: DT_QINT32
# type: DT_QINT16
# type: DT_QUINT16
# }
# }
# }
# attr {
# name: "transpose_a"
# type: "bool"
# default_value {
# b: false
# }
# }
# attr {
# name: "transpose_b"
# type: "bool"
# default_value {
# b: false
# }
# }
# attr {
# name: "Tactivation"
# type: "type"
# default_value {
# type: DT_QUINT8
# }
# allowed_values {
# list {
# type: DT_QINT8
# type: DT_QUINT8
# type: DT_QINT32
# type: DT_QINT16
# type: DT_QUINT16
# }
# }
# }
# }
# op {
# name: "QuantizedMul"
# input_arg {
# name: "x"
# type_attr: "T1"
# }
# input_arg {
# name: "y"
# type_attr: "T2"
# }
# input_arg {
# name: "min_x"
# type: DT_FLOAT
# }
# input_arg {
# name: "max_x"
# type: DT_FLOAT
# }
# input_arg {
# name: "min_y"
# type: DT_FLOAT
# }
# input_arg {
# name: "max_y"
# type: DT_FLOAT
# }
# output_arg {
# name: "z"
# type_attr: "Toutput"
# }
# output_arg {
# name: "min_z"
# type: DT_FLOAT
# }
# output_arg {
# name: "max_z"
# type: DT_FLOAT
# }
# attr {
# name: "T1"
# type: "type"
# allowed_values {
# list {
# type: DT_QINT8
# type: DT_QUINT8
# type: DT_QINT32
# type: DT_QINT16
# type: DT_QUINT16
# }
# }
# }
# attr {
# name: "T2"
# type: "type"
# allowed_values {
# list {
# type: DT_QINT8
# type: DT_QUINT8
# type: DT_QINT32
# type: DT_QINT16
# type: DT_QUINT16
# }
# }
# }
# attr {
# name: "Toutput"
# type: "type"
# default_value {
# type: DT_QINT32
# }
# allowed_values {
# list {
# type: DT_QINT8
# type: DT_QUINT8
# type: DT_QINT32
# type: DT_QINT16
# type: DT_QUINT16
# }
# }
# }
# is_commutative: true
# }
# op {
# name: "Range"
# input_arg {
# name: "start"
# type_attr: "Tidx"
# }
# input_arg {
# name: "limit"
# type_attr: "Tidx"
# }
# input_arg {
# name: "delta"
# type_attr: "Tidx"
# }
# output_arg {
# name: "output"
# type_attr: "Tidx"
# }
# attr {
# name: "Tidx"
# type: "type"
# default_value {
# type: DT_INT32
# }
# allowed_values {
# list {
# type: DT_BFLOAT16
# type: DT_FLOAT
# type: DT_DOUBLE
# type: DT_INT32
# type: DT_INT64
# }
# }
# }
# }
# op {
# name: "Real"
# input_arg {
# name: "input"
# type_attr: "T"
# }
# output_arg {
# name: "output"
# type_attr: "Tout"
# }
# attr {
# name: "T"
# type: "type"
# default_value {
# type: DT_COMPLEX64
# }
# allowed_values {
# list {
# type: DT_COMPLEX64
# type: DT_COMPLEX128
# }
# }
# }
# attr {
# name: "Tout"
# type: "type"
# default_value {
# type: DT_FLOAT
# }
# allowed_values {
# list {
# type: DT_FLOAT
# type: DT_DOUBLE
# }
# }
# }
# }
# op {
# name: "RealDiv"
# input_arg {
# name: "x"
# type_attr: "T"
# }
# input_arg {
# name: "y"
# type_attr: "T"
# }
# output_arg {
# name: "z"
# type_attr: "T"
# }
# attr {
# name: "T"
# type: "type"
# allowed_values {
# list {
# type: DT_BFLOAT16
# type: DT_HALF
# type: DT_FLOAT
# type: DT_DOUBLE
# type: DT_UINT8
# type: DT_INT8
# type: DT_UINT16
# type: DT_INT16
# type: DT_INT32
# type: DT_INT64
# type: DT_COMPLEX64
# type: DT_COMPLEX128
# }
# }
# }
# }
# op {
# name: "Reciprocal"
# input_arg {
# name: "x"
# type_attr: "T"
# }
# output_arg {
# name: "y"
# type_attr: "T"
# }
# attr {
# name: "T"
# type: "type"
# allowed_values {
# list {
# type: DT_BFLOAT16
# type: DT_HALF
# type: DT_FLOAT
# type: DT_DOUBLE
# type: DT_INT32
# type: DT_INT64
# type: DT_COMPLEX64
# type: DT_COMPLEX128
# }
# }
# }
# }
# op {
# name: "ReciprocalGrad"
# input_arg {
# name: "y"
# type_attr: "T"
# }
# input_arg {
# name: "dy"
# type_attr: "T"
# }
# output_arg {
# name: "z"
# type_attr: "T"
# }
# attr {
# name: "T"
# type: "type"
# allowed_values {
# list {
# type: DT_BFLOAT16
# type: DT_HALF
# type: DT_FLOAT
# type: DT_DOUBLE
# type: DT_COMPLEX64
# type: DT_COMPLEX128
# }
# }
# }
# }
# op {
# name: "RequantizationRange"
# input_arg {
# name: "input"
# type_attr: "Tinput"
# }
# input_arg {
# name: "input_min"
# type: DT_FLOAT
# }
# input_arg {
# name: "input_max"
# type: DT_FLOAT
# }
# output_arg {
# name: "output_min"
# type: DT_FLOAT
# }
# output_arg {
# name: "output_max"
# type: DT_FLOAT
# }
# attr {
# name: "Tinput"
# type: "type"
# allowed_values {
# list {
# type: DT_QINT8
# type: DT_QUINT8
# type: DT_QINT32
# type: DT_QINT16
# type: DT_QUINT16
# }
# }
# }
# }
# op {
# name: "Requantize"
# input_arg {
# name: "input"
# type_attr: "Tinput"
# }
# input_arg {
# name: "input_min"
# type: DT_FLOAT
# }
# input_arg {
# name: "input_max"
# type: DT_FLOAT
# }
# input_arg {
# name: "requested_output_min"
# type: DT_FLOAT
# }
# input_arg {
# name: "requested_output_max"
# type: DT_FLOAT
# }
# output_arg {
# name: "output"
# type_attr: "out_type"
# }
# output_arg {
# name: "output_min"
# type: DT_FLOAT
# }
# output_arg {
# name: "output_max"
# type: DT_FLOAT
# }
# attr {
# name: "Tinput"
# type: "type"
# allowed_values {
# list {
# type: DT_QINT8
# type: DT_QUINT8
# type: DT_QINT32
# type: DT_QINT16
# type: DT_QUINT16
# }
# }
# }
# attr {
# name: "out_type"
# type: "type"
# allowed_values {
# list {
# type: DT_QINT8
# type: DT_QUINT8
# type: DT_QINT32
# type: DT_QINT16
# type: DT_QUINT16
# }
# }
# }
# }
# op {
# name: "Rint"
# input_arg {
# name: "x"
# type_attr: "T"
# }
# output_arg {
# name: "y"
# type_attr: "T"
# }
# attr {
# name: "T"
# type: "type"
# allowed_values {
# list {
# type: DT_BFLOAT16
# type: DT_HALF
# type: DT_FLOAT
# type: DT_DOUBLE
# }
# }
# }
# }
# op {
# name: "Round"
# input_arg {
# name: "x"
# type_attr: "T"
# }
# output_arg {
# name: "y"
# type_attr: "T"
# }
# attr {
# name: "T"
# type: "type"
# allowed_values {
# list {
# type: DT_BFLOAT16
# type: DT_HALF
# type: DT_FLOAT
# type: DT_DOUBLE
# type: DT_INT32
# type: DT_INT64
# type: DT_COMPLEX64
# type: DT_COMPLEX128
# }
# }
# }
# }
# op {
# name: "Rsqrt"
# input_arg {
# name: "x"
# type_attr: "T"
# }
# output_arg {
# name: "y"
# type_attr: "T"
# }
# attr {
# name: "T"
# type: "type"
# allowed_values {
# list {
# type: DT_BFLOAT16
# type: DT_HALF
# type: DT_FLOAT
# type: DT_DOUBLE
# type: DT_COMPLEX64
# type: DT_COMPLEX128
# }
# }
# }
# }
# op {
# name: "RsqrtGrad"
# input_arg {
# name: "y"
# type_attr: "T"
# }
# input_arg {
# name: "dy"
# type_attr: "T"
# }
# output_arg {
# name: "z"
# type_attr: "T"
# }
# attr {
# name: "T"
# type: "type"
# allowed_values {
# list {
# type: DT_BFLOAT16
# type: DT_HALF
# type: DT_FLOAT
# type: DT_DOUBLE
# type: DT_COMPLEX64
# type: DT_COMPLEX128
# }
# }
# }
# }
# op {
# name: "SegmentMax"
# input_arg {
# name: "data"
# type_attr: "T"
# }
# input_arg {
# name: "segment_ids"
# type_attr: "Tindices"
# }
# output_arg {
# name: "output"
# type_attr: "T"
# }
# attr {
# name: "T"
# type: "type"
# allowed_values {
# list {
# type: DT_FLOAT
# type: DT_DOUBLE
# type: DT_INT32
# type: DT_UINT8
# type: DT_INT16
# type: DT_INT8
# type: DT_INT64
# type: DT_BFLOAT16
# type: DT_UINT16
# type: DT_HALF
# type: DT_UINT32
# type: DT_UINT64
# }
# }
# }
# attr {
# name: "Tindices"
# type: "type"
# allowed_values {
# list {
# type: DT_INT32
# type: DT_INT64
# }
# }
# }
# }
# op {
# name: "SegmentMean"
# input_arg {
# name: "data"
# type_attr: "T"
# }
# input_arg {
# name: "segment_ids"
# type_attr: "Tindices"
# }
# output_arg {
# name: "output"
# type_attr: "T"
# }
# attr {
# name: "T"
# type: "type"
# allowed_values {
# list {
# type: DT_FLOAT
# type: DT_DOUBLE
# type: DT_INT32
# type: DT_UINT8
# type: DT_INT16
# type: DT_INT8
# type: DT_COMPLEX64
# type: DT_INT64
# type: DT_QINT8
# type: DT_QUINT8
# type: DT_QINT32
# type: DT_BFLOAT16
# type: DT_UINT16
# type: DT_COMPLEX128
# type: DT_HALF
# type: DT_UINT32
# type: DT_UINT64
# }
# }
# }
# attr {
# name: "Tindices"
# type: "type"
# allowed_values {
# list {
# type: DT_INT32
# type: DT_INT64
# }
# }
# }
# }
# op {
# name: "SegmentMin"
# input_arg {
# name: "data"
# type_attr: "T"
# }
# input_arg {
# name: "segment_ids"
# type_attr: "Tindices"
# }
# output_arg {
# name: "output"
# type_attr: "T"
# }
# attr {
# name: "T"
# type: "type"
# allowed_values {
# list {
# type: DT_FLOAT
# type: DT_DOUBLE
# type: DT_INT32
# type: DT_UINT8
# type: DT_INT16
# type: DT_INT8
# type: DT_INT64
# type: DT_BFLOAT16
# type: DT_UINT16
# type: DT_HALF
# type: DT_UINT32
# type: DT_UINT64
# }
# }
# }
# attr {
# name: "Tindices"
# type: "type"
# allowed_values {
# list {
# type: DT_INT32
# type: DT_INT64
# }
# }
# }
# }
# op {
# name: "SegmentProd"
# input_arg {
# name: "data"
# type_attr: "T"
# }
# input_arg {
# name: "segment_ids"
# type_attr: "Tindices"
# }
# output_arg {
# name: "output"
# type_attr: "T"
# }
# attr {
# name: "T"
# type: "type"
# allowed_values {
# list {
# type: DT_FLOAT
# type: DT_DOUBLE
# type: DT_INT32
# type: DT_UINT8
# type: DT_INT16
# type: DT_INT8
# type: DT_COMPLEX64
# type: DT_INT64
# type: DT_QINT8
# type: DT_QUINT8
# type: DT_QINT32
# type: DT_BFLOAT16
# type: DT_UINT16
# type: DT_COMPLEX128
# type: DT_HALF
# type: DT_UINT32
# type: DT_UINT64
# }
# }
# }
# attr {
# name: "Tindices"
# type: "type"
# allowed_values {
# list {
# type: DT_INT32
# type: DT_INT64
# }
# }
# }
# }
# op {
# name: "SegmentSum"
# input_arg {
# name: "data"
# type_attr: "T"
# }
# input_arg {
# name: "segment_ids"
# type_attr: "Tindices"
# }
# output_arg {
# name: "output"
# type_attr: "T"
# }
# attr {
# name: "T"
# type: "type"
# allowed_values {
# list {
# type: DT_FLOAT
# type: DT_DOUBLE
# type: DT_INT32
# type: DT_UINT8
# type: DT_INT16
# type: DT_INT8
# type: DT_COMPLEX64
# type: DT_INT64
# type: DT_QINT8
# type: DT_QUINT8
# type: DT_QINT32
# type: DT_BFLOAT16
# type: DT_UINT16
# type: DT_COMPLEX128
# type: DT_HALF
# type: DT_UINT32
# type: DT_UINT64
# }
# }
# }
# attr {
# name: "Tindices"
# type: "type"
# allowed_values {
# list {
# type: DT_INT32
# type: DT_INT64
# }
# }
# }
# }
# op {
# name: "Select"
# input_arg {
# name: "condition"
# type: DT_BOOL
# }
# input_arg {
# name: "t"
# type_attr: "T"
# }
# input_arg {
# name: "e"
# type_attr: "T"
# }
# output_arg {
# name: "output"
# type_attr: "T"
# }
# attr {
# name: "T"
# type: "type"
# }
# }
# op {
# name: "Sigmoid"
# input_arg {
# name: "x"
# type_attr: "T"
# }
# output_arg {
# name: "y"
# type_attr: "T"
# }
# attr {
# name: "T"
# type: "type"
# allowed_values {
# list {
# type: DT_BFLOAT16
# type: DT_HALF
# type: DT_FLOAT
# type: DT_DOUBLE
# type: DT_COMPLEX64
# type: DT_COMPLEX128
# }
# }
# }
# }
# op {
# name: "SigmoidGrad"
# input_arg {
# name: "y"
# type_attr: "T"
# }
# input_arg {
# name: "dy"
# type_attr: "T"
# }
# output_arg {
# name: "z"
# type_attr: "T"
# }
# attr {
# name: "T"
# type: "type"
# allowed_values {
# list {
# type: DT_BFLOAT16
# type: DT_HALF
# type: DT_FLOAT
# type: DT_DOUBLE
# type: DT_COMPLEX64
# type: DT_COMPLEX128
# }
# }
# }
# }
# op {
# name: "Sign"
# input_arg {
# name: "x"
# type_attr: "T"
# }
# output_arg {
# name: "y"
# type_attr: "T"
# }
# attr {
# name: "T"
# type: "type"
# allowed_values {
# list {
# type: DT_BFLOAT16
# type: DT_HALF
# type: DT_FLOAT
# type: DT_DOUBLE
# type: DT_INT32
# type: DT_INT64
# type: DT_COMPLEX64
# type: DT_COMPLEX128
# }
# }
# }
# }
# op {
# name: "Sin"
# input_arg {
# name: "x"
# type_attr: "T"
# }
# output_arg {
# name: "y"
# type_attr: "T"
# }
# attr {
# name: "T"
# type: "type"
# allowed_values {
# list {
# type: DT_BFLOAT16
# type: DT_HALF
# type: DT_FLOAT
# type: DT_DOUBLE
# type: DT_COMPLEX64
# type: DT_COMPLEX128
# }
# }
# }
# }
# op {
# name: "Sinh"
# input_arg {
# name: "x"
# type_attr: "T"
# }
# output_arg {
# name: "y"
# type_attr: "T"
# }
# attr {
# name: "T"
# type: "type"
# allowed_values {
# list {
# type: DT_BFLOAT16
# type: DT_HALF
# type: DT_FLOAT
# type: DT_DOUBLE
# type: DT_COMPLEX64
# type: DT_COMPLEX128
# }
# }
# }
# }
# op {
# name: "SparseMatMul"
# input_arg {
# name: "a"
# type_attr: "Ta"
# }
# input_arg {
# name: "b"
# type_attr: "Tb"
# }
# output_arg {
# name: "product"
# type: DT_FLOAT
# }
# attr {
# name: "transpose_a"
# type: "bool"
# default_value {
# b: false
# }
# }
# attr {
# name: "transpose_b"
# type: "bool"
# default_value {
# b: false
# }
# }
# attr {
# name: "a_is_sparse"
# type: "bool"
# default_value {
# b: false
# }
# }
# attr {
# name: "b_is_sparse"
# type: "bool"
# default_value {
# b: false
# }
# }
# attr {
# name: "Ta"
# type: "type"
# default_value {
# type: DT_FLOAT
# }
# allowed_values {
# list {
# type: DT_FLOAT
# type: DT_BFLOAT16
# }
# }
# }
# attr {
# name: "Tb"
# type: "type"
# default_value {
# type: DT_FLOAT
# }
# allowed_values {
# list {
# type: DT_FLOAT
# type: DT_BFLOAT16
# }
# }
# }
# }
# op {
# name: "SparseSegmentMean"
# input_arg {
# name: "data"
# type_attr: "T"
# }
# input_arg {
# name: "indices"
# type_attr: "Tidx"
# }
# input_arg {
# name: "segment_ids"
# type: DT_INT32
# }
# output_arg {
# name: "output"
# type_attr: "T"
# }
# attr {
# name: "T"
# type: "type"
# allowed_values {
# list {
# type: DT_FLOAT
# type: DT_DOUBLE
# }
# }
# }
# attr {
# name: "Tidx"
# type: "type"
# default_value {
# type: DT_INT32
# }
# allowed_values {
# list {
# type: DT_INT32
# type: DT_INT64
# }
# }
# }
# }
# op {
# name: "SparseSegmentMeanGrad"
# input_arg {
# name: "grad"
# type_attr: "T"
# }
# input_arg {
# name: "indices"
# type_attr: "Tidx"
# }
# input_arg {
# name: "segment_ids"
# type: DT_INT32
# }
# input_arg {
# name: "output_dim0"
# type: DT_INT32
# }
# output_arg {
# name: "output"
# type_attr: "T"
# }
# attr {
# name: "T"
# type: "type"
# allowed_values {
# list {
# type: DT_FLOAT
# type: DT_DOUBLE
# }
# }
# }
# attr {
# name: "Tidx"
# type: "type"
# default_value {
# type: DT_INT32
# }
# allowed_values {
# list {
# type: DT_INT32
# type: DT_INT64
# }
# }
# }
# }
# op {
# name: "SparseSegmentMeanWithNumSegments"
# input_arg {
# name: "data"
# type_attr: "T"
# }
# input_arg {
# name: "indices"
# type_attr: "Tidx"
# }
# input_arg {
# name: "segment_ids"
# type: DT_INT32
# }
# input_arg {
# name: "num_segments"
# type_attr: "Tnumsegments"
# }
# output_arg {
# name: "output"
# type_attr: "T"
# }
# attr {
# name: "T"
# type: "type"
# allowed_values {
# list {
# type: DT_FLOAT
# type: DT_DOUBLE
# }
# }
# }
# attr {
# name: "Tidx"
# type: "type"
# default_value {
# type: DT_INT32
# }
# allowed_values {
# list {
# type: DT_INT32
# type: DT_INT64
# }
# }
# }
# attr {
# name: "Tnumsegments"
# type: "type"
# default_value {
# type: DT_INT32
# }
# allowed_values {
# list {
# type: DT_INT32
# type: DT_INT64
# }
# }
# }
# }
# op {
# name: "SparseSegmentSqrtN"
# input_arg {
# name: "data"
# type_attr: "T"
# }
# input_arg {
# name: "indices"
# type_attr: "Tidx"
# }
# input_arg {
# name: "segment_ids"
# type: DT_INT32
# }
# output_arg {
# name: "output"
# type_attr: "T"
# }
# attr {
# name: "T"
# type: "type"
# allowed_values {
# list {
# type: DT_FLOAT
# type: DT_DOUBLE
# }
# }
# }
# attr {
# name: "Tidx"
# type: "type"
# default_value {
# type: DT_INT32
# }
# allowed_values {
# list {
# type: DT_INT32
# type: DT_INT64
# }
# }
# }
# }
# op {
# name: "SparseSegmentSqrtNGrad"
# input_arg {
# name: "grad"
# type_attr: "T"
# }
# input_arg {
# name: "indices"
# type_attr: "Tidx"
# }
# input_arg {
# name: "segment_ids"
# type: DT_INT32
# }
# input_arg {
# name: "output_dim0"
# type: DT_INT32
# }
# output_arg {
# name: "output"
# type_attr: "T"
# }
# attr {
# name: "T"
# type: "type"
# allowed_values {
# list {
# type: DT_FLOAT
# type: DT_DOUBLE
# }
# }
# }
# attr {
# name: "Tidx"
# type: "type"
# default_value {
# type: DT_INT32
# }
# allowed_values {
# list {
# type: DT_INT32
# type: DT_INT64
# }
# }
# }
# }
# op {
# name: "SparseSegmentSqrtNWithNumSegments"
# input_arg {
# name: "data"
# type_attr: "T"
# }
# input_arg {
# name: "indices"
# type_attr: "Tidx"
# }
# input_arg {
# name: "segment_ids"
# type: DT_INT32
# }
# input_arg {
# name: "num_segments"
# type_attr: "Tnumsegments"
# }
# output_arg {
# name: "output"
# type_attr: "T"
# }
# attr {
# name: "T"
# type: "type"
# allowed_values {
# list {
# type: DT_FLOAT
# type: DT_DOUBLE
# }
# }
# }
# attr {
# name: "Tidx"
# type: "type"
# default_value {
# type: DT_INT32
# }
# allowed_values {
# list {
# type: DT_INT32
# type: DT_INT64
# }
# }
# }
# attr {
# name: "Tnumsegments"
# type: "type"
# default_value {
# type: DT_INT32
# }
# allowed_values {
# list {
# type: DT_INT32
# type: DT_INT64
# }
# }
# }
# }
# op {
# name: "SparseSegmentSum"
# input_arg {
# name: "data"
# type_attr: "T"
# }
# input_arg {
# name: "indices"
# type_attr: "Tidx"
# }
# input_arg {
# name: "segment_ids"
# type: DT_INT32
# }
# output_arg {
# name: "output"
# type_attr: "T"
# }
# attr {
# name: "T"
# type: "type"
# allowed_values {
# list {
# type: DT_FLOAT
# type: DT_DOUBLE
# type: DT_INT32
# type: DT_UINT8
# type: DT_INT16
# type: DT_INT8
# type: DT_INT64
# type: DT_BFLOAT16
# type: DT_UINT16
# type: DT_HALF
# type: DT_UINT32
# type: DT_UINT64
# }
# }
# }
# attr {
# name: "Tidx"
# type: "type"
# default_value {
# type: DT_INT32
# }
# allowed_values {
# list {
# type: DT_INT32
# type: DT_INT64
# }
# }
# }
# }
# op {
# name: "SparseSegmentSumWithNumSegments"
# input_arg {
# name: "data"
# type_attr: "T"
# }
# input_arg {
# name: "indices"
# type_attr: "Tidx"
# }
# input_arg {
# name: "segment_ids"
# type: DT_INT32
# }
# input_arg {
# name: "num_segments"
# type_attr: "Tnumsegments"
# }
# output_arg {
# name: "output"
# type_attr: "T"
# }
# attr {
# name: "T"
# type: "type"
# allowed_values {
# list {
# type: DT_FLOAT
# type: DT_DOUBLE
# type: DT_INT32
# type: DT_UINT8
# type: DT_INT16
# type: DT_INT8
# type: DT_INT64
# type: DT_BFLOAT16
# type: DT_UINT16
# type: DT_HALF
# type: DT_UINT32
# type: DT_UINT64
# }
# }
# }
# attr {
# name: "Tidx"
# type: "type"
# default_value {
# type: DT_INT32
# }
# allowed_values {
# list {
# type: DT_INT32
# type: DT_INT64
# }
# }
# }
# attr {
# name: "Tnumsegments"
# type: "type"
# default_value {
# type: DT_INT32
# }
# allowed_values {
# list {
# type: DT_INT32
# type: DT_INT64
# }
# }
# }
# }
# op {
# name: "Sqrt"
# input_arg {
# name: "x"
# type_attr: "T"
# }
# output_arg {
# name: "y"
# type_attr: "T"
# }
# attr {
# name: "T"
# type: "type"
# allowed_values {
# list {
# type: DT_BFLOAT16
# type: DT_HALF
# type: DT_FLOAT
# type: DT_DOUBLE
# type: DT_COMPLEX64
# type: DT_COMPLEX128
# }
# }
# }
# }
# op {
# name: "SqrtGrad"
# input_arg {
# name: "y"
# type_attr: "T"
# }
# input_arg {
# name: "dy"
# type_attr: "T"
# }
# output_arg {
# name: "z"
# type_attr: "T"
# }
# attr {
# name: "T"
# type: "type"
# allowed_values {
# list {
# type: DT_BFLOAT16
# type: DT_HALF
# type: DT_FLOAT
# type: DT_DOUBLE
# type: DT_COMPLEX64
# type: DT_COMPLEX128
# }
# }
# }
# }
# op {
# name: "Square"
# input_arg {
# name: "x"
# type_attr: "T"
# }
# output_arg {
# name: "y"
# type_attr: "T"
# }
# attr {
# name: "T"
# type: "type"
# allowed_values {
# list {
# type: DT_BFLOAT16
# type: DT_HALF
# type: DT_FLOAT
# type: DT_DOUBLE
# type: DT_INT32
# type: DT_INT64
# type: DT_COMPLEX64
# type: DT_COMPLEX128
# }
# }
# }
# }
# op {
# name: "SquaredDifference"
# input_arg {
# name: "x"
# type_attr: "T"
# }
# input_arg {
# name: "y"
# type_attr: "T"
# }
# output_arg {
# name: "z"
# type_attr: "T"
# }
# attr {
# name: "T"
# type: "type"
# allowed_values {
# list {
# type: DT_BFLOAT16
# type: DT_HALF
# type: DT_FLOAT
# type: DT_DOUBLE
# type: DT_INT32
# type: DT_INT64
# type: DT_COMPLEX64
# type: DT_COMPLEX128
# }
# }
# }
# is_commutative: true
# }
# op {
# name: "Sub"
# input_arg {
# name: "x"
# type_attr: "T"
# }
# input_arg {
# name: "y"
# type_attr: "T"
# }
# output_arg {
# name: "z"
# type_attr: "T"
# }
# attr {
# name: "T"
# type: "type"
# allowed_values {
# list {
# type: DT_BFLOAT16
# type: DT_HALF
# type: DT_FLOAT
# type: DT_DOUBLE
# type: DT_UINT8
# type: DT_INT8
# type: DT_UINT16
# type: DT_INT16
# type: DT_INT32
# type: DT_INT64
# type: DT_COMPLEX64
# type: DT_COMPLEX128
# }
# }
# }
# }
# op {
# name: "Sum"
# input_arg {
# name: "input"
# type_attr: "T"
# }
# input_arg {
# name: "reduction_indices"
# type_attr: "Tidx"
# }
# output_arg {
# name: "output"
# type_attr: "T"
# }
# attr {
# name: "keep_dims"
# type: "bool"
# default_value {
# b: false
# }
# }
# attr {
# name: "T"
# type: "type"
# allowed_values {
# list {
# type: DT_FLOAT
# type: DT_DOUBLE
# type: DT_INT32
# type: DT_UINT8
# type: DT_INT16
# type: DT_INT8
# type: DT_COMPLEX64
# type: DT_INT64
# type: DT_QINT8
# type: DT_QUINT8
# type: DT_QINT32
# type: DT_BFLOAT16
# type: DT_UINT16
# type: DT_COMPLEX128
# type: DT_HALF
# type: DT_UINT32
# type: DT_UINT64
# }
# }
# }
# attr {
# name: "Tidx"
# type: "type"
# default_value {
# type: DT_INT32
# }
# allowed_values {
# list {
# type: DT_INT32
# type: DT_INT64
# }
# }
# }
# }
# op {
# name: "Tan"
# input_arg {
# name: "x"
# type_attr: "T"
# }
# output_arg {
# name: "y"
# type_attr: "T"
# }
# attr {
# name: "T"
# type: "type"
# allowed_values {
# list {
# type: DT_BFLOAT16
# type: DT_HALF
# type: DT_FLOAT
# type: DT_DOUBLE
# type: DT_INT32
# type: DT_INT64
# type: DT_COMPLEX64
# type: DT_COMPLEX128
# }
# }
# }
# }
# op {
# name: "Tanh"
# input_arg {
# name: "x"
# type_attr: "T"
# }
# output_arg {
# name: "y"
# type_attr: "T"
# }
# attr {
# name: "T"
# type: "type"
# allowed_values {
# list {
# type: DT_BFLOAT16
# type: DT_HALF
# type: DT_FLOAT
# type: DT_DOUBLE
# type: DT_COMPLEX64
# type: DT_COMPLEX128
# }
# }
# }
# }
# op {
# name: "TanhGrad"
# input_arg {
# name: "y"
# type_attr: "T"
# }
# input_arg {
# name: "dy"
# type_attr: "T"
# }
# output_arg {
# name: "z"
# type_attr: "T"
# }
# attr {
# name: "T"
# type: "type"
# allowed_values {
# list {
# type: DT_BFLOAT16
# type: DT_HALF
# type: DT_FLOAT
# type: DT_DOUBLE
# type: DT_COMPLEX64
# type: DT_COMPLEX128
# }
# }
# }
# }
# op {
# name: "TruncateDiv"
# input_arg {
# name: "x"
# type_attr: "T"
# }
# input_arg {
# name: "y"
# type_attr: "T"
# }
# output_arg {
# name: "z"
# type_attr: "T"
# }
# attr {
# name: "T"
# type: "type"
# allowed_values {
# list {
# type: DT_BFLOAT16
# type: DT_HALF
# type: DT_FLOAT
# type: DT_DOUBLE
# type: DT_UINT8
# type: DT_INT8
# type: DT_UINT16
# type: DT_INT16
# type: DT_INT32
# type: DT_INT64
# type: DT_COMPLEX64
# type: DT_COMPLEX128
# }
# }
# }
# }
# op {
# name: "TruncateMod"
# input_arg {
# name: "x"
# type_attr: "T"
# }
# input_arg {
# name: "y"
# type_attr: "T"
# }
# output_arg {
# name: "z"
# type_attr: "T"
# }
# attr {
# name: "T"
# type: "type"
# allowed_values {
# list {
# type: DT_INT32
# type: DT_INT64
# type: DT_BFLOAT16
# type: DT_HALF
# type: DT_FLOAT
# type: DT_DOUBLE
# }
# }
# }
# }
# op {
# name: "UnsortedSegmentMax"
# input_arg {
# name: "data"
# type_attr: "T"
# }
# input_arg {
# name: "segment_ids"
# type_attr: "Tindices"
# }
# input_arg {
# name: "num_segments"
# type_attr: "Tnumsegments"
# }
# output_arg {
# name: "output"
# type_attr: "T"
# }
# attr {
# name: "T"
# type: "type"
# allowed_values {
# list {
# type: DT_FLOAT
# type: DT_DOUBLE
# type: DT_INT32
# type: DT_UINT8
# type: DT_INT16
# type: DT_INT8
# type: DT_INT64
# type: DT_BFLOAT16
# type: DT_UINT16
# type: DT_HALF
# type: DT_UINT32
# type: DT_UINT64
# }
# }
# }
# attr {
# name: "Tindices"
# type: "type"
# allowed_values {
# list {
# type: DT_INT32
# type: DT_INT64
# }
# }
# }
# attr {
# name: "Tnumsegments"
# type: "type"
# default_value {
# type: DT_INT32
# }
# allowed_values {
# list {
# type: DT_INT32
# type: DT_INT64
# }
# }
# }
# }
# op {
# name: "UnsortedSegmentMin"
# input_arg {
# name: "data"
# type_attr: "T"
# }
# input_arg {
# name: "segment_ids"
# type_attr: "Tindices"
# }
# input_arg {
# name: "num_segments"
# type_attr: "Tnumsegments"
# }
# output_arg {
# name: "output"
# type_attr: "T"
# }
# attr {
# name: "T"
# type: "type"
# allowed_values {
# list {
# type: DT_FLOAT
# type: DT_DOUBLE
# type: DT_INT32
# type: DT_UINT8
# type: DT_INT16
# type: DT_INT8
# type: DT_INT64
# type: DT_BFLOAT16
# type: DT_UINT16
# type: DT_HALF
# type: DT_UINT32
# type: DT_UINT64
# }
# }
# }
# attr {
# name: "Tindices"
# type: "type"
# allowed_values {
# list {
# type: DT_INT32
# type: DT_INT64
# }
# }
# }
# attr {
# name: "Tnumsegments"
# type: "type"
# default_value {
# type: DT_INT32
# }
# allowed_values {
# list {
# type: DT_INT32
# type: DT_INT64
# }
# }
# }
# }
# op {
# name: "UnsortedSegmentProd"
# input_arg {
# name: "data"
# type_attr: "T"
# }
# input_arg {
# name: "segment_ids"
# type_attr: "Tindices"
# }
# input_arg {
# name: "num_segments"
# type_attr: "Tnumsegments"
# }
# output_arg {
# name: "output"
# type_attr: "T"
# }
# attr {
# name: "T"
# type: "type"
# allowed_values {
# list {
# type: DT_FLOAT
# type: DT_DOUBLE
# type: DT_INT32
# type: DT_UINT8
# type: DT_INT16
# type: DT_INT8
# type: DT_COMPLEX64
# type: DT_INT64
# type: DT_QINT8
# type: DT_QUINT8
# type: DT_QINT32
# type: DT_BFLOAT16
# type: DT_UINT16
# type: DT_COMPLEX128
# type: DT_HALF
# type: DT_UINT32
# type: DT_UINT64
# }
# }
# }
# attr {
# name: "Tindices"
# type: "type"
# allowed_values {
# list {
# type: DT_INT32
# type: DT_INT64
# }
# }
# }
# attr {
# name: "Tnumsegments"
# type: "type"
# default_value {
# type: DT_INT32
# }
# allowed_values {
# list {
# type: DT_INT32
# type: DT_INT64
# }
# }
# }
# }
# op {
# name: "UnsortedSegmentSum"
# input_arg {
# name: "data"
# type_attr: "T"
# }
# input_arg {
# name: "segment_ids"
# type_attr: "Tindices"
# }
# input_arg {
# name: "num_segments"
# type_attr: "Tnumsegments"
# }
# output_arg {
# name: "output"
# type_attr: "T"
# }
# attr {
# name: "T"
# type: "type"
# allowed_values {
# list {
# type: DT_FLOAT
# type: DT_DOUBLE
# type: DT_INT32
# type: DT_UINT8
# type: DT_INT16
# type: DT_INT8
# type: DT_COMPLEX64
# type: DT_INT64
# type: DT_QINT8
# type: DT_QUINT8
# type: DT_QINT32
# type: DT_BFLOAT16
# type: DT_UINT16
# type: DT_COMPLEX128
# type: DT_HALF
# type: DT_UINT32
# type: DT_UINT64
# }
# }
# }
# attr {
# name: "Tindices"
# type: "type"
# allowed_values {
# list {
# type: DT_INT32
# type: DT_INT64
# }
# }
# }
# attr {
# name: "Tnumsegments"
# type: "type"
# default_value {
# type: DT_INT32
# }
# allowed_values {
# list {
# type: DT_INT32
# type: DT_INT64
# }
# }
# }
# }
# op {
# name: "Zeta"
# input_arg {
# name: "x"
# type_attr: "T"
# }
# input_arg {
# name: "q"
# type_attr: "T"
# }
# output_arg {
# name: "z"
# type_attr: "T"
# }
# attr {
# name: "T"
# type: "type"
# allowed_values {
# list {
# type: DT_FLOAT
# type: DT_DOUBLE
# }
# }
# }
# }
_op_def_lib = _InitOpDefLibrary(b"\n,\n\003Abs\022\006\n\001x\"\001T\032\006\n\001y\"\001T\"\025\n\001T\022\004type:\n\n\0102\006\016\023\001\002\003\t\no\n\rAccumulateNV2\022\016\n\006inputs\"\001T*\001N\032\010\n\003sum\"\001T\"\014\n\001N\022\003int(\0010\001\" \n\001T\022\004type:\025\n\0232\021\001\002\003\004\005\006\010\t\013\014\r\016\021\022\023\026\027\"\016\n\005shape\022\005shape\200\001\001\220\001\001\n/\n\004Acos\022\006\n\001x\"\001T\032\006\n\001y\"\001T\"\027\n\001T\022\004type:\014\n\n2\010\016\023\001\002\003\t\010\022\n.\n\005Acosh\022\006\n\001x\"\001T\032\006\n\001y\"\001T\"\025\n\001T\022\004type:\n\n\0102\006\016\023\001\002\010\022\n:\n\003Add\022\006\n\001x\"\001T\022\006\n\001y\"\001T\032\006\n\001z\"\001T\"\033\n\001T\022\004type:\020\n\0162\014\016\023\001\002\004\006\005\003\t\010\022\007\nW\n\004AddN\022\016\n\006inputs\"\001T*\001N\032\010\n\003sum\"\001T\"\014\n\001N\022\003int(\0010\001\"!\n\001T\022\004type:\026\n\0242\022\001\002\003\004\005\006\010\t\013\014\r\016\021\022\023\026\027\025\200\001\001\220\001\001\nA\n\005AddV2\022\006\n\001x\"\001T\022\006\n\001y\"\001T\032\006\n\001z\"\001T\"\032\n\001T\022\004type:\017\n\r2\013\016\023\001\002\004\006\005\003\t\010\022\200\001\001\220\001\001\nh\n\003All\022\t\n\005input\030\n\022\031\n\021reduction_indices\"\004Tidx\032\n\n\006output\030\n\"\025\n\tkeep_dims\022\004bool\032\002(\000\"\030\n\004Tidx\022\004type\032\0020\003:\006\n\0042\002\003\t\nT\n\005Angle\022\n\n\005input\"\001T\032\016\n\006output\"\004Tout\"\025\n\001T\022\004type\032\0020\010:\006\n\0042\002\010\022\"\030\n\004Tout\022\004type\032\0020\001:\006\n\0042\002\001\002\nh\n\003Any\022\t\n\005input\030\n\022\031\n\021reduction_indices\"\004Tidx\032\n\n\006output\030\n\"\025\n\tkeep_dims\022\004bool\032\002(\000\"\030\n\004Tidx\022\004type\032\0020\003:\006\n\0042\002\003\t\ni\n\020ApproximateEqual\022\006\n\001x\"\001T\022\006\n\001y\"\001T\032\005\n\001z\030\n\" \n\001T\022\004type:\025\n\0232\021\001\002\003\004\005\006\010\t\013\014\r\016\021\022\023\026\027\"\031\n\ttolerance\022\005float\032\005%\254\305\'7\220\001\001\n\233\001\n\006ArgMax\022\n\n\005input\"\001T\022\021\n\tdimension\"\004Tidx\032\025\n\006output\"\013output_type\" \n\001T\022\004type:\025\n\0232\021\001\002\003\004\005\006\010\t\013\014\r\016\021\022\023\026\027\"\030\n\004Tidx\022\004type\032\0020\003:\006\n\0042\002\003\t\"\037\n\013output_type\022\004type\032\0020\t:\006\n\0042\002\003\t\n\233\001\n\006ArgMin\022\n\n\005input\"\001T\022\021\n\tdimension\"\004Tidx\032\025\n\006output\"\013output_type\" \n\001T\022\004type:\025\n\0232\021\001\002\003\004\005\006\010\t\013\014\r\016\021\022\023\026\027\"\030\n\004Tidx\022\004type\032\0020\003:\006\n\0042\002\003\t\"\037\n\013output_type\022\004type\032\0020\t:\006\n\0042\002\003\t\n/\n\004Asin\022\006\n\001x\"\001T\032\006\n\001y\"\001T\"\027\n\001T\022\004type:\014\n\n2\010\016\023\001\002\003\t\010\022\n.\n\005Asinh\022\006\n\001x\"\001T\032\006\n\001y\"\001T\"\025\n\001T\022\004type:\n\n\0102\006\016\023\001\002\010\022\n/\n\004Atan\022\006\n\001x\"\001T\032\006\n\001y\"\001T\"\027\n\001T\022\004type:\014\n\n2\010\016\023\001\002\003\t\010\022\n4\n\005Atan2\022\006\n\001y\"\001T\022\006\n\001x\"\001T\032\006\n\001z\"\001T\"\023\n\001T\022\004type:\010\n\0062\004\016\023\001\002\n.\n\005Atanh\022\006\n\001x\"\001T\032\006\n\001y\"\001T\"\025\n\001T\022\004type:\n\n\0102\006\016\023\001\002\010\022\nh\n\013BatchMatMul\022\006\n\001x\"\001T\022\006\n\001y\"\001T\032\013\n\006output\"\001T\"\026\n\001T\022\004type:\013\n\t2\007\016\023\001\002\003\010\022\"\021\n\005adj_x\022\004bool\032\002(\000\"\021\n\005adj_y\022\004bool\032\002(\000\n0\n\tBesselI0e\022\006\n\001x\"\001T\032\006\n\001y\"\001T\"\023\n\001T\022\004type:\010\n\0062\004\016\023\001\002\n0\n\tBesselI1e\022\006\n\001x\"\001T\032\006\n\001y\"\001T\"\023\n\001T\022\004type:\010\n\0062\004\016\023\001\002\n<\n\007Betainc\022\006\n\001a\"\001T\022\006\n\001b\"\001T\022\006\n\001x\"\001T\032\006\n\001z\"\001T\"\021\n\001T\022\004type:\006\n\0042\002\001\002\nK\n\010Bincount\022\007\n\003arr\030\003\022\010\n\004size\030\003\022\014\n\007weights\"\001T\032\t\n\004bins\"\001T\"\023\n\001T\022\004type:\010\n\0062\004\003\t\001\002\nS\n\tBucketize\022\n\n\005input\"\001T\032\n\n\006output\030\003\"\023\n\001T\022\004type:\010\n\0062\004\003\t\001\002\"\031\n\nboundaries\022\013list(float)\n8\n\004Cast\022\t\n\001x\"\004SrcT\032\t\n\001y\"\004DstT\"\014\n\004SrcT\022\004type\"\014\n\004DstT\022\004type\n+\n\004Ceil\022\006\n\001x\"\001T\032\006\n\001y\"\001T\"\023\n\001T\022\004type:\010\n\0062\004\016\023\001\002\nn\n\013ClipByValue\022\006\n\001t\"\001T\022\023\n\016clip_value_min\"\001T\022\023\n\016clip_value_max\"\001T\032\013\n\006output\"\001T\" \n\001T\022\004type:\025\n\0232\021\001\002\003\004\005\006\010\t\013\014\r\016\021\022\023\026\027\nT\n\021CompareAndBitpack\022\n\n\005input\"\001T\022\016\n\tthreshold\"\001T\032\n\n\006output\030\004\"\027\n\001T\022\004type:\014\n\n2\010\n\023\001\002\006\005\003\t\n]\n\007Complex\022\t\n\004real\"\001T\022\t\n\004imag\"\001T\032\013\n\003out\"\004Tout\"\025\n\001T\022\004type\032\0020\001:\006\n\0042\002\001\002\"\030\n\004Tout\022\004type\032\0020\010:\006\n\0042\002\010\022\nP\n\nComplexAbs\022\006\n\001x\"\001T\032\t\n\001y\"\004Tout\"\025\n\001T\022\004type\032\0020\010:\006\n\0042\002\010\022\"\030\n\004Tout\022\004type\032\0020\001:\006\n\0042\002\001\002\n7\n\004Conj\022\n\n\005input\"\001T\032\013\n\006output\"\001T\"\026\n\001T\022\004type\032\0020\010:\007\n\0052\003\010\022\025\n,\n\003Cos\022\006\n\001x\"\001T\032\006\n\001y\"\001T\"\025\n\001T\022\004type:\n\n\0102\006\016\023\001\002\010\022\n-\n\004Cosh\022\006\n\001x\"\001T\032\006\n\001y\"\001T\"\025\n\001T\022\004type:\n\n\0102\006\016\023\001\002\010\022\nB\n\005Cross\022\006\n\001a\"\001T\022\006\n\001b\"\001T\032\014\n\007product\"\001T\"\033\n\001T\022\004type:\020\n\0162\014\001\002\003\004\005\006\t\016\021\023\026\027\n\221\001\n\007Cumprod\022\006\n\001x\"\001T\022\014\n\004axis\"\004Tidx\032\010\n\003out\"\001T\"\025\n\texclusive\022\004bool\032\002(\000\"\023\n\007reverse\022\004bool\032\002(\000\" \n\001T\022\004type:\025\n\0232\021\001\002\003\004\005\006\010\t\013\014\r\016\021\022\023\026\027\"\030\n\004Tidx\022\004type\032\0020\003:\006\n\0042\002\003\t\n\220\001\n\006Cumsum\022\006\n\001x\"\001T\022\014\n\004axis\"\004Tidx\032\010\n\003out\"\001T\"\025\n\texclusive\022\004bool\032\002(\000\"\023\n\007reverse\022\004bool\032\002(\000\" \n\001T\022\004type:\025\n\0232\021\001\002\003\004\005\006\010\t\013\014\r\016\021\022\023\026\027\"\030\n\004Tidx\022\004type\032\0020\003:\006\n\0042\002\003\t\n.\n\007Digamma\022\006\n\001x\"\001T\032\006\n\001y\"\001T\"\023\n\001T\022\004type:\010\n\0062\004\016\023\001\002\n:\n\003Div\022\006\n\001x\"\001T\022\006\n\001y\"\001T\032\006\n\001z\"\001T\"\033\n\001T\022\004type:\020\n\0162\014\016\023\001\002\004\006\021\005\003\t\010\022\nB\n\005Equal\022\006\n\001x\"\001T\022\006\n\001y\"\001T\032\005\n\001z\030\n\"\037\n\001T\022\004type:\024\n\0222\020\016\023\001\002\004\006\005\003\t\010\014\013\r\007\n\022\220\001\001\n*\n\003Erf\022\006\n\001x\"\001T\032\006\n\001y\"\001T\"\023\n\001T\022\004type:\010\n\0062\004\016\023\001\002\n+\n\004Erfc\022\006\n\001x\"\001T\032\006\n\001y\"\001T\"\023\n\001T\022\004type:\010\n\0062\004\016\023\001\002\n,\n\003Exp\022\006\n\001x\"\001T\032\006\n\001y\"\001T\"\025\n\001T\022\004type:\n\n\0102\006\016\023\001\002\010\022\n.\n\005Expm1\022\006\n\001x\"\001T\032\006\n\001y\"\001T\"\025\n\001T\022\004type:\n\n\0102\006\016\023\001\002\010\022\n,\n\005Floor\022\006\n\001x\"\001T\032\006\n\001y\"\001T\"\023\n\001T\022\004type:\010\n\0062\004\016\023\001\002\n?\n\010FloorDiv\022\006\n\001x\"\001T\022\006\n\001y\"\001T\032\006\n\001z\"\001T\"\033\n\001T\022\004type:\020\n\0162\014\016\023\001\002\004\006\021\005\003\t\010\022\n9\n\010FloorMod\022\006\n\001x\"\001T\022\006\n\001y\"\001T\032\006\n\001z\"\001T\"\025\n\001T\022\004type:\n\n\0102\006\003\t\016\023\001\002\n=\n\007Greater\022\006\n\001x\"\001T\022\006\n\001y\"\001T\032\005\n\001z\030\n\"\033\n\001T\022\004type:\020\n\0162\014\001\002\003\004\005\006\t\016\021\023\026\027\nB\n\014GreaterEqual\022\006\n\001x\"\001T\022\006\n\001y\"\001T\032\005\n\001z\030\n\"\033\n\001T\022\004type:\020\n\0162\014\001\002\003\004\005\006\t\016\021\023\026\027\n}\n\023HistogramFixedWidth\022\013\n\006values\"\001T\022\020\n\013value_range\"\001T\022\t\n\005nbins\030\003\032\014\n\003out\"\005dtype\"\023\n\001T\022\004type:\010\n\0062\004\003\t\001\002\"\031\n\005dtype\022\004type\032\0020\003:\006\n\0042\002\003\t\n3\n\006Igamma\022\006\n\001a\"\001T\022\006\n\001x\"\001T\032\006\n\001z\"\001T\"\021\n\001T\022\004type:\006\n\0042\002\001\002\n8\n\013IgammaGradA\022\006\n\001a\"\001T\022\006\n\001x\"\001T\032\006\n\001z\"\001T\"\021\n\001T\022\004type:\006\n\0042\002\001\002\n4\n\007Igammac\022\006\n\001a\"\001T\022\006\n\001x\"\001T\032\006\n\001z\"\001T\"\021\n\001T\022\004type:\006\n\0042\002\001\002\nS\n\004Imag\022\n\n\005input\"\001T\032\016\n\006output\"\004Tout\"\025\n\001T\022\004type\032\0020\010:\006\n\0042\002\010\022\"\030\n\004Tout\022\004type\032\0020\001:\006\n\0042\002\001\002\n.\n\003Inv\022\006\n\001x\"\001T\032\006\n\001y\"\001T\"\027\n\001T\022\004type:\014\n\n2\010\016\023\001\002\003\t\010\022\n9\n\007InvGrad\022\006\n\001y\"\001T\022\007\n\002dy\"\001T\032\006\n\001z\"\001T\"\025\n\001T\022\004type:\n\n\0102\006\016\023\001\002\010\022\n.\n\010IsFinite\022\006\n\001x\"\001T\032\005\n\001y\030\n\"\023\n\001T\022\004type:\010\n\0062\004\016\023\001\002\n+\n\005IsInf\022\006\n\001x\"\001T\032\005\n\001y\030\n\"\023\n\001T\022\004type:\010\n\0062\004\016\023\001\002\n+\n\005IsNan\022\006\n\001x\"\001T\032\005\n\001y\030\n\"\023\n\001T\022\004type:\010\n\0062\004\016\023\001\002\n:\n\004Less\022\006\n\001x\"\001T\022\006\n\001y\"\001T\032\005\n\001z\030\n\"\033\n\001T\022\004type:\020\n\0162\014\001\002\003\004\005\006\t\016\021\023\026\027\n?\n\tLessEqual\022\006\n\001x\"\001T\022\006\n\001y\"\001T\032\005\n\001z\030\n\"\033\n\001T\022\004type:\020\n\0162\014\001\002\003\004\005\006\t\016\021\023\026\027\n-\n\006Lgamma\022\006\n\001x\"\001T\032\006\n\001y\"\001T\"\023\n\001T\022\004type:\010\n\0062\004\016\023\001\002\ni\n\010LinSpace\022\n\n\005start\"\001T\022\t\n\004stop\"\001T\022\013\n\003num\"\004Tidx\032\013\n\006output\"\001T\"\022\n\001T\022\004type:\007\n\0052\003\016\001\002\"\030\n\004Tidx\022\004type\032\0020\003:\006\n\0042\002\003\t\n,\n\003Log\022\006\n\001x\"\001T\032\006\n\001y\"\001T\"\025\n\001T\022\004type:\n\n\0102\006\016\023\001\002\010\022\n.\n\005Log1p\022\006\n\001x\"\001T\032\006\n\001y\"\001T\"\025\n\001T\022\004type:\n\n\0102\006\016\023\001\002\010\022\n$\n\nLogicalAnd\022\005\n\001x\030\n\022\005\n\001y\030\n\032\005\n\001z\030\n\220\001\001\n\032\n\nLogicalNot\022\005\n\001x\030\n\032\005\n\001y\030\n\n#\n\tLogicalOr\022\005\n\001x\030\n\022\005\n\001y\030\n\032\005\n\001z\030\n\220\001\001\np\n\006MatMul\022\006\n\001a\"\001T\022\006\n\001b\"\001T\032\014\n\007product\"\001T\"\027\n\013transpose_a\022\004bool\032\002(\000\"\027\n\013transpose_b\022\004bool\032\002(\000\"\026\n\001T\022\004type:\013\n\t2\007\016\023\001\002\003\010\022\n\214\001\n\003Max\022\n\n\005input\"\001T\022\031\n\021reduction_indices\"\004Tidx\032\013\n\006output\"\001T\"\025\n\tkeep_dims\022\004bool\032\002(\000\" \n\001T\022\004type:\025\n\0232\021\001\002\003\004\005\006\010\t\013\014\r\016\021\022\023\026\027\"\030\n\004Tidx\022\004type\032\0020\003:\006\n\0042\002\003\t\n;\n\007Maximum\022\006\n\001x\"\001T\022\006\n\001y\"\001T\032\006\n\001z\"\001T\"\025\n\001T\022\004type:\n\n\0102\006\016\023\001\002\003\t\220\001\001\n\215\001\n\004Mean\022\n\n\005input\"\001T\022\031\n\021reduction_indices\"\004Tidx\032\013\n\006output\"\001T\"\025\n\tkeep_dims\022\004bool\032\002(\000\" \n\001T\022\004type:\025\n\0232\021\001\002\003\004\005\006\010\t\013\014\r\016\021\022\023\026\027\"\030\n\004Tidx\022\004type\032\0020\003:\006\n\0042\002\003\t\n\214\001\n\003Min\022\n\n\005input\"\001T\022\031\n\021reduction_indices\"\004Tidx\032\013\n\006output\"\001T\"\025\n\tkeep_dims\022\004bool\032\002(\000\" \n\001T\022\004type:\025\n\0232\021\001\002\003\004\005\006\010\t\013\014\r\016\021\022\023\026\027\"\030\n\004Tidx\022\004type\032\0020\003:\006\n\0042\002\003\t\n;\n\007Minimum\022\006\n\001x\"\001T\022\006\n\001y\"\001T\032\006\n\001z\"\001T\"\025\n\001T\022\004type:\n\n\0102\006\016\023\001\002\003\t\220\001\001\n5\n\003Mod\022\006\n\001x\"\001T\022\006\n\001y\"\001T\032\006\n\001z\"\001T\"\026\n\001T\022\004type:\013\n\t2\007\003\t\023\023\016\001\002\n=\n\003Mul\022\006\n\001x\"\001T\022\006\n\001y\"\001T\032\006\n\001z\"\001T\"\033\n\001T\022\004type:\020\n\0162\014\016\023\001\002\004\006\021\005\003\t\010\022\220\001\001\n.\n\003Neg\022\006\n\001x\"\001T\032\006\n\001y\"\001T\"\027\n\001T\022\004type:\014\n\n2\010\016\023\001\002\003\t\010\022\nE\n\010NotEqual\022\006\n\001x\"\001T\022\006\n\001y\"\001T\032\005\n\001z\030\n\"\037\n\001T\022\004type:\024\n\0222\020\016\023\001\002\004\006\005\003\t\010\014\013\r\007\n\022\220\001\001\n6\n\tPolygamma\022\006\n\001a\"\001T\022\006\n\001x\"\001T\032\006\n\001z\"\001T\"\021\n\001T\022\004type:\006\n\0042\002\001\002\n6\n\003Pow\022\006\n\001x\"\001T\022\006\n\001y\"\001T\032\006\n\001z\"\001T\"\027\n\001T\022\004type:\014\n\n2\010\016\001\023\002\003\t\010\022\n\215\001\n\004Prod\022\n\n\005input\"\001T\022\031\n\021reduction_indices\"\004Tidx\032\013\n\006output\"\001T\"\025\n\tkeep_dims\022\004bool\032\002(\000\" \n\001T\022\004type:\025\n\0232\021\001\002\003\004\005\006\010\t\013\014\r\016\021\022\023\026\027\"\030\n\004Tidx\022\004type\032\0020\003:\006\n\0042\002\003\t\n\267\001\n\032QuantizeDownAndShrinkRange\022\017\n\005input\"\006Tinput\022\r\n\tinput_min\030\001\022\r\n\tinput_max\030\001\032\022\n\006output\"\010out_type\032\016\n\noutput_min\030\001\032\016\n\noutput_max\030\001\"\031\n\006Tinput\022\004type:\t\n\0072\005\013\014\r\017\020\"\033\n\010out_type\022\004type:\t\n\0072\005\013\014\r\017\020\n\301\001\n\014QuantizedAdd\022\007\n\001x\"\002T1\022\007\n\001y\"\002T2\022\t\n\005min_x\030\001\022\t\n\005max_x\030\001\022\t\n\005min_y\030\001\022\t\n\005max_y\030\001\032\014\n\001z\"\007Toutput\032\t\n\005min_z\030\001\032\t\n\005max_z\030\001\"\025\n\002T1\022\004type:\t\n\0072\005\013\014\r\017\020\"\025\n\002T2\022\004type:\t\n\0072\005\013\014\r\017\020\"\036\n\007Toutput\022\004type\032\0020\r:\t\n\0072\005\013\014\r\017\020\220\001\001\n\235\002\n\017QuantizedMatMul\022\007\n\001a\"\002T1\022\007\n\001b\"\002T2\022\t\n\005min_a\030\001\022\t\n\005max_a\030\001\022\t\n\005min_b\030\001\022\t\n\005max_b\030\001\032\016\n\003out\"\007Toutput\032\013\n\007min_out\030\001\032\013\n\007max_out\030\001\"\025\n\002T1\022\004type:\t\n\0072\005\013\014\r\017\020\"\025\n\002T2\022\004type:\t\n\0072\005\013\014\r\017\020\"\036\n\007Toutput\022\004type\032\0020\r:\t\n\0072\005\013\014\r\017\020\"\027\n\013transpose_a\022\004bool\032\002(\000\"\027\n\013transpose_b\022\004bool\032\002(\000\"\"\n\013Tactivation\022\004type\032\0020\014:\t\n\0072\005\013\014\r\017\020\n\301\001\n\014QuantizedMul\022\007\n\001x\"\002T1\022\007\n\001y\"\002T2\022\t\n\005min_x\030\001\022\t\n\005max_x\030\001\022\t\n\005min_y\030\001\022\t\n\005max_y\030\001\032\014\n\001z\"\007Toutput\032\t\n\005min_z\030\001\032\t\n\005max_z\030\001\"\025\n\002T1\022\004type:\t\n\0072\005\013\014\r\017\020\"\025\n\002T2\022\004type:\t\n\0072\005\013\014\r\017\020\"\036\n\007Toutput\022\004type\032\0020\r:\t\n\0072\005\013\014\r\017\020\220\001\001\na\n\005Range\022\r\n\005start\"\004Tidx\022\r\n\005limit\"\004Tidx\022\r\n\005delta\"\004Tidx\032\016\n\006output\"\004Tidx\"\033\n\004Tidx\022\004type\032\0020\003:\t\n\0072\005\016\001\002\003\t\nS\n\004Real\022\n\n\005input\"\001T\032\016\n\006output\"\004Tout\"\025\n\001T\022\004type\032\0020\010:\006\n\0042\002\010\022\"\030\n\004Tout\022\004type\032\0020\001:\006\n\0042\002\001\002\n>\n\007RealDiv\022\006\n\001x\"\001T\022\006\n\001y\"\001T\032\006\n\001z\"\001T\"\033\n\001T\022\004type:\020\n\0162\014\016\023\001\002\004\006\021\005\003\t\010\022\n5\n\nReciprocal\022\006\n\001x\"\001T\032\006\n\001y\"\001T\"\027\n\001T\022\004type:\014\n\n2\010\016\023\001\002\003\t\010\022\n@\n\016ReciprocalGrad\022\006\n\001y\"\001T\022\007\n\002dy\"\001T\032\006\n\001z\"\001T\"\025\n\001T\022\004type:\n\n\0102\006\016\023\001\002\010\022\n\177\n\023RequantizationRange\022\017\n\005input\"\006Tinput\022\r\n\tinput_min\030\001\022\r\n\tinput_max\030\001\032\016\n\noutput_min\030\001\032\016\n\noutput_max\030\001\"\031\n\006Tinput\022\004type:\t\n\0072\005\013\014\r\017\020\n\333\001\n\nRequantize\022\017\n\005input\"\006Tinput\022\r\n\tinput_min\030\001\022\r\n\tinput_max\030\001\022\030\n\024requested_output_min\030\001\022\030\n\024requested_output_max\030\001\032\022\n\006output\"\010out_type\032\016\n\noutput_min\030\001\032\016\n\noutput_max\030\001\"\031\n\006Tinput\022\004type:\t\n\0072\005\013\014\r\017\020\"\033\n\010out_type\022\004type:\t\n\0072\005\013\014\r\017\020\n+\n\004Rint\022\006\n\001x\"\001T\032\006\n\001y\"\001T\"\023\n\001T\022\004type:\010\n\0062\004\016\023\001\002\n0\n\005Round\022\006\n\001x\"\001T\032\006\n\001y\"\001T\"\027\n\001T\022\004type:\014\n\n2\010\016\023\001\002\003\t\010\022\n.\n\005Rsqrt\022\006\n\001x\"\001T\032\006\n\001y\"\001T\"\025\n\001T\022\004type:\n\n\0102\006\016\023\001\002\010\022\n;\n\tRsqrtGrad\022\006\n\001y\"\001T\022\007\n\002dy\"\001T\032\006\n\001z\"\001T\"\025\n\001T\022\004type:\n\n\0102\006\016\023\001\002\010\022\nt\n\nSegmentMax\022\t\n\004data\"\001T\022\027\n\013segment_ids\"\010Tindices\032\013\n\006output\"\001T\"\033\n\001T\022\004type:\020\n\0162\014\001\002\003\004\005\006\t\016\021\023\026\027\"\030\n\010Tindices\022\004type:\006\n\0042\002\003\t\nz\n\013SegmentMean\022\t\n\004data\"\001T\022\027\n\013segment_ids\"\010Tindices\032\013\n\006output\"\001T\" \n\001T\022\004type:\025\n\0232\021\001\002\003\004\005\006\010\t\013\014\r\016\021\022\023\026\027\"\030\n\010Tindices\022\004type:\006\n\0042\002\003\t\nt\n\nSegmentMin\022\t\n\004data\"\001T\022\027\n\013segment_ids\"\010Tindices\032\013\n\006output\"\001T\"\033\n\001T\022\004type:\020\n\0162\014\001\002\003\004\005\006\t\016\021\023\026\027\"\030\n\010Tindices\022\004type:\006\n\0042\002\003\t\nz\n\013SegmentProd\022\t\n\004data\"\001T\022\027\n\013segment_ids\"\010Tindices\032\013\n\006output\"\001T\" \n\001T\022\004type:\025\n\0232\021\001\002\003\004\005\006\010\t\013\014\r\016\021\022\023\026\027\"\030\n\010Tindices\022\004type:\006\n\0042\002\003\t\ny\n\nSegmentSum\022\t\n\004data\"\001T\022\027\n\013segment_ids\"\010Tindices\032\013\n\006output\"\001T\" \n\001T\022\004type:\025\n\0232\021\001\002\003\004\005\006\010\t\013\014\r\016\021\022\023\026\027\"\030\n\010Tindices\022\004type:\006\n\0042\002\003\t\n?\n\006Select\022\r\n\tcondition\030\n\022\006\n\001t\"\001T\022\006\n\001e\"\001T\032\013\n\006output\"\001T\"\t\n\001T\022\004type\n0\n\007Sigmoid\022\006\n\001x\"\001T\032\006\n\001y\"\001T\"\025\n\001T\022\004type:\n\n\0102\006\016\023\001\002\010\022\n=\n\013SigmoidGrad\022\006\n\001y\"\001T\022\007\n\002dy\"\001T\032\006\n\001z\"\001T\"\025\n\001T\022\004type:\n\n\0102\006\016\023\001\002\010\022\n/\n\004Sign\022\006\n\001x\"\001T\032\006\n\001y\"\001T\"\027\n\001T\022\004type:\014\n\n2\010\016\023\001\002\003\t\010\022\n,\n\003Sin\022\006\n\001x\"\001T\032\006\n\001y\"\001T\"\025\n\001T\022\004type:\n\n\0102\006\016\023\001\002\010\022\n-\n\004Sinh\022\006\n\001x\"\001T\032\006\n\001y\"\001T\"\025\n\001T\022\004type:\n\n\0102\006\016\023\001\002\010\022\n\301\001\n\014SparseMatMul\022\007\n\001a\"\002Ta\022\007\n\001b\"\002Tb\032\013\n\007product\030\001\"\027\n\013transpose_a\022\004bool\032\002(\000\"\027\n\013transpose_b\022\004bool\032\002(\000\"\027\n\013a_is_sparse\022\004bool\032\002(\000\"\027\n\013b_is_sparse\022\004bool\032\002(\000\"\026\n\002Ta\022\004type\032\0020\001:\006\n\0042\002\001\016\"\026\n\002Tb\022\004type\032\0020\001:\006\n\0042\002\001\016\nz\n\021SparseSegmentMean\022\t\n\004data\"\001T\022\017\n\007indices\"\004Tidx\022\017\n\013segment_ids\030\003\032\013\n\006output\"\001T\"\021\n\001T\022\004type:\006\n\0042\002\001\002\"\030\n\004Tidx\022\004type\032\0020\003:\006\n\0042\002\003\t\n\217\001\n\025SparseSegmentMeanGrad\022\t\n\004grad\"\001T\022\017\n\007indices\"\004Tidx\022\017\n\013segment_ids\030\003\022\017\n\013output_dim0\030\003\032\013\n\006output\"\001T\"\021\n\001T\022\004type:\006\n\0042\002\001\002\"\030\n\004Tidx\022\004type\032\0020\003:\006\n\0042\002\003\t\n\311\001\n SparseSegmentMeanWithNumSegments\022\t\n\004data\"\001T\022\017\n\007indices\"\004Tidx\022\017\n\013segment_ids\030\003\022\034\n\014num_segments\"\014Tnumsegments\032\013\n\006output\"\001T\"\021\n\001T\022\004type:\006\n\0042\002\001\002\"\030\n\004Tidx\022\004type\032\0020\003:\006\n\0042\002\003\t\" \n\014Tnumsegments\022\004type\032\0020\003:\006\n\0042\002\003\t\n{\n\022SparseSegmentSqrtN\022\t\n\004data\"\001T\022\017\n\007indices\"\004Tidx\022\017\n\013segment_ids\030\003\032\013\n\006output\"\001T\"\021\n\001T\022\004type:\006\n\0042\002\001\002\"\030\n\004Tidx\022\004type\032\0020\003:\006\n\0042\002\003\t\n\220\001\n\026SparseSegmentSqrtNGrad\022\t\n\004grad\"\001T\022\017\n\007indices\"\004Tidx\022\017\n\013segment_ids\030\003\022\017\n\013output_dim0\030\003\032\013\n\006output\"\001T\"\021\n\001T\022\004type:\006\n\0042\002\001\002\"\030\n\004Tidx\022\004type\032\0020\003:\006\n\0042\002\003\t\n\312\001\n!SparseSegmentSqrtNWithNumSegments\022\t\n\004data\"\001T\022\017\n\007indices\"\004Tidx\022\017\n\013segment_ids\030\003\022\034\n\014num_segments\"\014Tnumsegments\032\013\n\006output\"\001T\"\021\n\001T\022\004type:\006\n\0042\002\001\002\"\030\n\004Tidx\022\004type\032\0020\003:\006\n\0042\002\003\t\" \n\014Tnumsegments\022\004type\032\0020\003:\006\n\0042\002\003\t\n\203\001\n\020SparseSegmentSum\022\t\n\004data\"\001T\022\017\n\007indices\"\004Tidx\022\017\n\013segment_ids\030\003\032\013\n\006output\"\001T\"\033\n\001T\022\004type:\020\n\0162\014\001\002\003\004\005\006\t\016\021\023\026\027\"\030\n\004Tidx\022\004type\032\0020\003:\006\n\0042\002\003\t\n\322\001\n\037SparseSegmentSumWithNumSegments\022\t\n\004data\"\001T\022\017\n\007indices\"\004Tidx\022\017\n\013segment_ids\030\003\022\034\n\014num_segments\"\014Tnumsegments\032\013\n\006output\"\001T\"\033\n\001T\022\004type:\020\n\0162\014\001\002\003\004\005\006\t\016\021\023\026\027\"\030\n\004Tidx\022\004type\032\0020\003:\006\n\0042\002\003\t\" \n\014Tnumsegments\022\004type\032\0020\003:\006\n\0042\002\003\t\n-\n\004Sqrt\022\006\n\001x\"\001T\032\006\n\001y\"\001T\"\025\n\001T\022\004type:\n\n\0102\006\016\023\001\002\010\022\n:\n\010SqrtGrad\022\006\n\001y\"\001T\022\007\n\002dy\"\001T\032\006\n\001z\"\001T\"\025\n\001T\022\004type:\n\n\0102\006\016\023\001\002\010\022\n1\n\006Square\022\006\n\001x\"\001T\032\006\n\001y\"\001T\"\027\n\001T\022\004type:\014\n\n2\010\016\023\001\002\003\t\010\022\nG\n\021SquaredDifference\022\006\n\001x\"\001T\022\006\n\001y\"\001T\032\006\n\001z\"\001T\"\027\n\001T\022\004type:\014\n\n2\010\016\023\001\002\003\t\010\022\220\001\001\n:\n\003Sub\022\006\n\001x\"\001T\022\006\n\001y\"\001T\032\006\n\001z\"\001T\"\033\n\001T\022\004type:\020\n\0162\014\016\023\001\002\004\006\021\005\003\t\010\022\n\214\001\n\003Sum\022\n\n\005input\"\001T\022\031\n\021reduction_indices\"\004Tidx\032\013\n\006output\"\001T\"\025\n\tkeep_dims\022\004bool\032\002(\000\" \n\001T\022\004type:\025\n\0232\021\001\002\003\004\005\006\010\t\013\014\r\016\021\022\023\026\027\"\030\n\004Tidx\022\004type\032\0020\003:\006\n\0042\002\003\t\n.\n\003Tan\022\006\n\001x\"\001T\032\006\n\001y\"\001T\"\027\n\001T\022\004type:\014\n\n2\010\016\023\001\002\003\t\010\022\n-\n\004Tanh\022\006\n\001x\"\001T\032\006\n\001y\"\001T\"\025\n\001T\022\004type:\n\n\0102\006\016\023\001\002\010\022\n:\n\010TanhGrad\022\006\n\001y\"\001T\022\007\n\002dy\"\001T\032\006\n\001z\"\001T\"\025\n\001T\022\004type:\n\n\0102\006\016\023\001\002\010\022\nB\n\013TruncateDiv\022\006\n\001x\"\001T\022\006\n\001y\"\001T\032\006\n\001z\"\001T\"\033\n\001T\022\004type:\020\n\0162\014\016\023\001\002\004\006\021\005\003\t\010\022\n<\n\013TruncateMod\022\006\n\001x\"\001T\022\006\n\001y\"\001T\032\006\n\001z\"\001T\"\025\n\001T\022\004type:\n\n\0102\006\003\t\016\023\001\002\n\274\001\n\022UnsortedSegmentMax\022\t\n\004data\"\001T\022\027\n\013segment_ids\"\010Tindices\022\034\n\014num_segments\"\014Tnumsegments\032\013\n\006output\"\001T\"\033\n\001T\022\004type:\020\n\0162\014\001\002\003\004\005\006\t\016\021\023\026\027\"\030\n\010Tindices\022\004type:\006\n\0042\002\003\t\" \n\014Tnumsegments\022\004type\032\0020\003:\006\n\0042\002\003\t\n\274\001\n\022UnsortedSegmentMin\022\t\n\004data\"\001T\022\027\n\013segment_ids\"\010Tindices\022\034\n\014num_segments\"\014Tnumsegments\032\013\n\006output\"\001T\"\033\n\001T\022\004type:\020\n\0162\014\001\002\003\004\005\006\t\016\021\023\026\027\"\030\n\010Tindices\022\004type:\006\n\0042\002\003\t\" \n\014Tnumsegments\022\004type\032\0020\003:\006\n\0042\002\003\t\n\302\001\n\023UnsortedSegmentProd\022\t\n\004data\"\001T\022\027\n\013segment_ids\"\010Tindices\022\034\n\014num_segments\"\014Tnumsegments\032\013\n\006output\"\001T\" \n\001T\022\004type:\025\n\0232\021\001\002\003\004\005\006\010\t\013\014\r\016\021\022\023\026\027\"\030\n\010Tindices\022\004type:\006\n\0042\002\003\t\" \n\014Tnumsegments\022\004type\032\0020\003:\006\n\0042\002\003\t\n\301\001\n\022UnsortedSegmentSum\022\t\n\004data\"\001T\022\027\n\013segment_ids\"\010Tindices\022\034\n\014num_segments\"\014Tnumsegments\032\013\n\006output\"\001T\" \n\001T\022\004type:\025\n\0232\021\001\002\003\004\005\006\010\t\013\014\r\016\021\022\023\026\027\"\030\n\010Tindices\022\004type:\006\n\0042\002\003\t\" \n\014Tnumsegments\022\004type\032\0020\003:\006\n\0042\002\003\t\n1\n\004Zeta\022\006\n\001x\"\001T\022\006\n\001q\"\001T\032\006\n\001z\"\001T\"\021\n\001T\022\004type:\006\n\0042\002\001\002")
| 31.749766
| 26,169
| 0.632458
| 60,078
| 440,655
| 4.339775
| 0.020074
| 0.028582
| 0.02261
| 0.021103
| 0.914316
| 0.903285
| 0.888799
| 0.877802
| 0.868954
| 0.863216
| 0
| 0.053219
| 0.236975
| 440,655
| 13,878
| 26,170
| 31.752054
| 0.722215
| 0.385267
| 0
| 0.776066
| 1
| 0.018832
| 0.096812
| 0.05617
| 0
| 0
| 0
| 0
| 0
| 1
| 0.045027
| false
| 0
| 0.002568
| 0
| 0.137476
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 9
|
5311bdf4dfe6e2813dcf2c28b40dad10195c1693
| 66,098
|
py
|
Python
|
research/object_detection/data_decoders/tf_example_decoder_test.py
|
akshit-protonn/models
|
38c8c6fe4144c93d6aadd19981c2b90570c29eba
|
[
"Apache-2.0"
] | 18
|
2022-01-14T09:58:27.000Z
|
2022-01-14T09:58:37.000Z
|
research/object_detection/data_decoders/tf_example_decoder_test.py
|
akshit-protonn/models
|
38c8c6fe4144c93d6aadd19981c2b90570c29eba
|
[
"Apache-2.0"
] | 62
|
2021-06-09T00:47:27.000Z
|
2021-09-24T09:06:58.000Z
|
research/object_detection/data_decoders/tf_example_decoder_test.py
|
akshit-protonn/models
|
38c8c6fe4144c93d6aadd19981c2b90570c29eba
|
[
"Apache-2.0"
] | 2
|
2021-02-17T06:59:57.000Z
|
2021-03-18T10:12:30.000Z
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for object_detection.data_decoders.tf_example_decoder."""
import os
import numpy as np
import six
import tensorflow.compat.v1 as tf
from object_detection.core import standard_fields as fields
from object_detection.data_decoders import tf_example_decoder
from object_detection.protos import input_reader_pb2
from object_detection.utils import dataset_util
from object_detection.utils import test_case
class TfExampleDecoderTest(test_case.TestCase):
def _create_encoded_and_decoded_data(self, data, encoding_type):
if encoding_type == 'jpeg':
encode_fn = tf.image.encode_jpeg
decode_fn = tf.image.decode_jpeg
elif encoding_type == 'png':
encode_fn = tf.image.encode_png
decode_fn = tf.image.decode_png
else:
raise ValueError('Invalid encoding type.')
def prepare_data_fn():
encoded_data = encode_fn(data)
decoded_data = decode_fn(encoded_data)
return encoded_data, decoded_data
return self.execute_cpu(prepare_data_fn, [])
def testDecodeAdditionalChannels(self):
image = np.random.randint(256, size=(4, 5, 3)).astype(np.uint8)
encoded_jpeg, _ = self._create_encoded_and_decoded_data(image, 'jpeg')
additional_channel = np.random.randint(256, size=(4, 5, 1)).astype(np.uint8)
(encoded_additional_channel,
decoded_additional_channel) = self._create_encoded_and_decoded_data(
additional_channel, 'jpeg')
def graph_fn():
example = tf.train.Example(
features=tf.train.Features(
feature={
'image/encoded':
dataset_util.bytes_feature(encoded_jpeg),
'image/additional_channels/encoded':
dataset_util.bytes_list_feature(
[encoded_additional_channel] * 2),
'image/format':
dataset_util.bytes_feature(six.b('jpeg')),
'image/source_id':
dataset_util.bytes_feature(six.b('image_id')),
})).SerializeToString()
example_decoder = tf_example_decoder.TfExampleDecoder(
num_additional_channels=2)
return example_decoder.decode(tf.convert_to_tensor(example))
tensor_dict = self.execute_cpu(graph_fn, [])
self.assertAllEqual(
np.concatenate([decoded_additional_channel] * 2, axis=2),
tensor_dict[fields.InputDataFields.image_additional_channels])
def testDecodeJpegImage(self):
image = np.random.randint(256, size=(4, 5, 3)).astype(np.uint8)
encoded_jpeg, decoded_jpeg = self._create_encoded_and_decoded_data(
image, 'jpeg')
def graph_fn():
example = tf.train.Example(
features=tf.train.Features(
feature={
'image/encoded':
dataset_util.bytes_feature(encoded_jpeg),
'image/format':
dataset_util.bytes_feature(six.b('jpeg')),
'image/source_id':
dataset_util.bytes_feature(six.b('image_id')),
})).SerializeToString()
example_decoder = tf_example_decoder.TfExampleDecoder()
output = example_decoder.decode(tf.convert_to_tensor(example))
self.assertAllEqual(
(output[fields.InputDataFields.image].get_shape().as_list()),
[None, None, 3])
self.assertAllEqual(
(output[fields.InputDataFields.original_image_spatial_shape]
.get_shape().as_list()), [2])
return output
tensor_dict = self.execute_cpu(graph_fn, [])
self.assertAllEqual(decoded_jpeg, tensor_dict[fields.InputDataFields.image])
self.assertAllEqual([4, 5], tensor_dict[fields.InputDataFields.
original_image_spatial_shape])
self.assertEqual(
six.b('image_id'), tensor_dict[fields.InputDataFields.source_id])
def testDecodeImageKeyAndFilename(self):
image = np.random.randint(256, size=(4, 5, 3)).astype(np.uint8)
encoded_jpeg, _ = self._create_encoded_and_decoded_data(image, 'jpeg')
def graph_fn():
example = tf.train.Example(
features=tf.train.Features(
feature={
'image/encoded':
dataset_util.bytes_feature(encoded_jpeg),
'image/key/sha256':
dataset_util.bytes_feature(six.b('abc')),
'image/filename':
dataset_util.bytes_feature(six.b('filename'))
})).SerializeToString()
example_decoder = tf_example_decoder.TfExampleDecoder()
return example_decoder.decode(tf.convert_to_tensor(example))
tensor_dict = self.execute_cpu(graph_fn, [])
self.assertEqual(six.b('abc'), tensor_dict[fields.InputDataFields.key])
self.assertEqual(
six.b('filename'), tensor_dict[fields.InputDataFields.filename])
def testDecodePngImage(self):
image = np.random.randint(256, size=(4, 5, 3)).astype(np.uint8)
encoded_png, decoded_png = self._create_encoded_and_decoded_data(
image, 'png')
def graph_fn():
example = tf.train.Example(
features=tf.train.Features(
feature={
'image/encoded':
dataset_util.bytes_feature(encoded_png),
'image/format':
dataset_util.bytes_feature(six.b('png')),
'image/source_id':
dataset_util.bytes_feature(six.b('image_id'))
})).SerializeToString()
example_decoder = tf_example_decoder.TfExampleDecoder()
output = example_decoder.decode(tf.convert_to_tensor(example))
self.assertAllEqual(
(output[fields.InputDataFields.image].get_shape().as_list()),
[None, None, 3])
self.assertAllEqual(
(output[fields.InputDataFields.original_image_spatial_shape]
.get_shape().as_list()), [2])
return output
tensor_dict = self.execute_cpu(graph_fn, [])
self.assertAllEqual(decoded_png, tensor_dict[fields.InputDataFields.image])
self.assertAllEqual([4, 5], tensor_dict[fields.InputDataFields.
original_image_spatial_shape])
self.assertEqual(
six.b('image_id'), tensor_dict[fields.InputDataFields.source_id])
def testDecodePngInstanceMasks(self):
image = np.random.randint(256, size=(4, 5, 3)).astype(np.uint8)
encoded_png, _ = self._create_encoded_and_decoded_data(image, 'png')
mask_1 = np.random.randint(0, 2, size=(10, 10, 1)).astype(np.uint8)
mask_2 = np.random.randint(0, 2, size=(10, 10, 1)).astype(np.uint8)
encoded_png_1, _ = self._create_encoded_and_decoded_data(mask_1, 'png')
decoded_png_1 = np.squeeze(mask_1.astype(np.float32))
encoded_png_2, _ = self._create_encoded_and_decoded_data(mask_2, 'png')
decoded_png_2 = np.squeeze(mask_2.astype(np.float32))
encoded_masks = [encoded_png_1, encoded_png_2]
decoded_masks = np.stack([decoded_png_1, decoded_png_2])
def graph_fn():
example = tf.train.Example(
features=tf.train.Features(
feature={
'image/encoded':
dataset_util.bytes_feature(encoded_png),
'image/format':
dataset_util.bytes_feature(six.b('png')),
'image/object/mask':
dataset_util.bytes_list_feature(encoded_masks)
})).SerializeToString()
example_decoder = tf_example_decoder.TfExampleDecoder(
load_instance_masks=True,
instance_mask_type=input_reader_pb2.PNG_MASKS)
return example_decoder.decode(tf.convert_to_tensor(example))
tensor_dict = self.execute_cpu(graph_fn, [])
self.assertAllEqual(
decoded_masks,
tensor_dict[fields.InputDataFields.groundtruth_instance_masks])
def testDecodeEmptyPngInstanceMasks(self):
image_tensor = np.random.randint(256, size=(10, 10, 3)).astype(np.uint8)
encoded_png, _ = self._create_encoded_and_decoded_data(image_tensor, 'png')
encoded_masks = []
def graph_fn():
example = tf.train.Example(
features=tf.train.Features(
feature={
'image/encoded':
dataset_util.bytes_feature(encoded_png),
'image/format':
dataset_util.bytes_feature(six.b('png')),
'image/object/mask':
dataset_util.bytes_list_feature(encoded_masks),
'image/height':
dataset_util.int64_feature(10),
'image/width':
dataset_util.int64_feature(10),
})).SerializeToString()
example_decoder = tf_example_decoder.TfExampleDecoder(
load_instance_masks=True,
instance_mask_type=input_reader_pb2.PNG_MASKS)
return example_decoder.decode(tf.convert_to_tensor(example))
tensor_dict = self.execute_cpu(graph_fn, [])
self.assertAllEqual(
tensor_dict[fields.InputDataFields.groundtruth_instance_masks].shape,
[0, 10, 10])
def testDecodeBoundingBox(self):
image_tensor = np.random.randint(256, size=(4, 5, 3)).astype(np.uint8)
encoded_jpeg, _ = self._create_encoded_and_decoded_data(
image_tensor, 'jpeg')
bbox_ymins = [0.0, 4.0]
bbox_xmins = [1.0, 5.0]
bbox_ymaxs = [2.0, 6.0]
bbox_xmaxs = [3.0, 7.0]
def graph_fn():
example = tf.train.Example(
features=tf.train.Features(
feature={
'image/encoded':
dataset_util.bytes_feature(encoded_jpeg),
'image/format':
dataset_util.bytes_feature(six.b('jpeg')),
'image/object/bbox/ymin':
dataset_util.float_list_feature(bbox_ymins),
'image/object/bbox/xmin':
dataset_util.float_list_feature(bbox_xmins),
'image/object/bbox/ymax':
dataset_util.float_list_feature(bbox_ymaxs),
'image/object/bbox/xmax':
dataset_util.float_list_feature(bbox_xmaxs),
})).SerializeToString()
example_decoder = tf_example_decoder.TfExampleDecoder()
output = example_decoder.decode(tf.convert_to_tensor(example))
self.assertAllEqual((output[
fields.InputDataFields.groundtruth_boxes].get_shape().as_list()),
[None, 4])
return output
tensor_dict = self.execute_cpu(graph_fn, [])
expected_boxes = np.vstack([bbox_ymins, bbox_xmins, bbox_ymaxs,
bbox_xmaxs]).transpose()
self.assertAllEqual(expected_boxes,
tensor_dict[fields.InputDataFields.groundtruth_boxes])
def testDecodeKeypointDepth(self):
image_tensor = np.random.randint(256, size=(4, 5, 3)).astype(np.uint8)
encoded_jpeg, _ = self._create_encoded_and_decoded_data(
image_tensor, 'jpeg')
bbox_ymins = [0.0, 4.0]
bbox_xmins = [1.0, 5.0]
bbox_ymaxs = [2.0, 6.0]
bbox_xmaxs = [3.0, 7.0]
keypoint_ys = [0.0, 1.0, 2.0, 3.0, 4.0, 5.0]
keypoint_xs = [1.0, 2.0, 3.0, 4.0, 5.0, 6.0]
keypoint_visibility = [1, 2, 0, 1, 0, 2]
keypoint_depths = [0.1, 0.2, 0.3, 0.4, 0.5, 0.6]
keypoint_depth_weights = [1.0, 0.9, 0.8, 0.7, 0.6, 0.5]
def graph_fn():
example = tf.train.Example(
features=tf.train.Features(
feature={
'image/encoded':
dataset_util.bytes_feature(encoded_jpeg),
'image/format':
dataset_util.bytes_feature(six.b('jpeg')),
'image/object/bbox/ymin':
dataset_util.float_list_feature(bbox_ymins),
'image/object/bbox/xmin':
dataset_util.float_list_feature(bbox_xmins),
'image/object/bbox/ymax':
dataset_util.float_list_feature(bbox_ymaxs),
'image/object/bbox/xmax':
dataset_util.float_list_feature(bbox_xmaxs),
'image/object/keypoint/y':
dataset_util.float_list_feature(keypoint_ys),
'image/object/keypoint/x':
dataset_util.float_list_feature(keypoint_xs),
'image/object/keypoint/z':
dataset_util.float_list_feature(keypoint_depths),
'image/object/keypoint/z/weights':
dataset_util.float_list_feature(keypoint_depth_weights),
'image/object/keypoint/visibility':
dataset_util.int64_list_feature(keypoint_visibility),
})).SerializeToString()
example_decoder = tf_example_decoder.TfExampleDecoder(
num_keypoints=3, load_keypoint_depth_features=True)
output = example_decoder.decode(tf.convert_to_tensor(example))
self.assertAllEqual(
(output[fields.InputDataFields.groundtruth_keypoint_depths].get_shape(
).as_list()), [2, 3])
self.assertAllEqual(
(output[fields.InputDataFields.groundtruth_keypoint_depth_weights]
.get_shape().as_list()), [2, 3])
return output
tensor_dict = self.execute_cpu(graph_fn, [])
expected_keypoint_depths = [[0.1, 0.2, 0.3], [0.4, 0.5, 0.6]]
self.assertAllClose(
expected_keypoint_depths,
tensor_dict[fields.InputDataFields.groundtruth_keypoint_depths])
expected_keypoint_depth_weights = [[1.0, 0.9, 0.8], [0.7, 0.6, 0.5]]
self.assertAllClose(
expected_keypoint_depth_weights,
tensor_dict[fields.InputDataFields.groundtruth_keypoint_depth_weights])
def testDecodeKeypointDepthNoDepth(self):
image_tensor = np.random.randint(256, size=(4, 5, 3)).astype(np.uint8)
encoded_jpeg, _ = self._create_encoded_and_decoded_data(
image_tensor, 'jpeg')
bbox_ymins = [0.0, 4.0]
bbox_xmins = [1.0, 5.0]
bbox_ymaxs = [2.0, 6.0]
bbox_xmaxs = [3.0, 7.0]
keypoint_ys = [0.0, 1.0, 2.0, 3.0, 4.0, 5.0]
keypoint_xs = [1.0, 2.0, 3.0, 4.0, 5.0, 6.0]
keypoint_visibility = [1, 2, 0, 1, 0, 2]
def graph_fn():
example = tf.train.Example(
features=tf.train.Features(
feature={
'image/encoded':
dataset_util.bytes_feature(encoded_jpeg),
'image/format':
dataset_util.bytes_feature(six.b('jpeg')),
'image/object/bbox/ymin':
dataset_util.float_list_feature(bbox_ymins),
'image/object/bbox/xmin':
dataset_util.float_list_feature(bbox_xmins),
'image/object/bbox/ymax':
dataset_util.float_list_feature(bbox_ymaxs),
'image/object/bbox/xmax':
dataset_util.float_list_feature(bbox_xmaxs),
'image/object/keypoint/y':
dataset_util.float_list_feature(keypoint_ys),
'image/object/keypoint/x':
dataset_util.float_list_feature(keypoint_xs),
'image/object/keypoint/visibility':
dataset_util.int64_list_feature(keypoint_visibility),
})).SerializeToString()
example_decoder = tf_example_decoder.TfExampleDecoder(
num_keypoints=3, load_keypoint_depth_features=True)
output = example_decoder.decode(tf.convert_to_tensor(example))
return output
tensor_dict = self.execute_cpu(graph_fn, [])
expected_keypoints_depth_default = [[0.0, 0.0, 0.0], [0.0, 0.0, 0.0]]
self.assertAllClose(
expected_keypoints_depth_default,
tensor_dict[fields.InputDataFields.groundtruth_keypoint_depths])
self.assertAllClose(
expected_keypoints_depth_default,
tensor_dict[fields.InputDataFields.groundtruth_keypoint_depth_weights])
def testDecodeKeypoint(self):
image_tensor = np.random.randint(256, size=(4, 5, 3)).astype(np.uint8)
encoded_jpeg, _ = self._create_encoded_and_decoded_data(
image_tensor, 'jpeg')
bbox_ymins = [0.0, 4.0]
bbox_xmins = [1.0, 5.0]
bbox_ymaxs = [2.0, 6.0]
bbox_xmaxs = [3.0, 7.0]
keypoint_ys = [0.0, 1.0, 2.0, 3.0, 4.0, 5.0]
keypoint_xs = [1.0, 2.0, 3.0, 4.0, 5.0, 6.0]
keypoint_visibility = [1, 2, 0, 1, 0, 2]
def graph_fn():
example = tf.train.Example(
features=tf.train.Features(
feature={
'image/encoded':
dataset_util.bytes_feature(encoded_jpeg),
'image/format':
dataset_util.bytes_feature(six.b('jpeg')),
'image/object/bbox/ymin':
dataset_util.float_list_feature(bbox_ymins),
'image/object/bbox/xmin':
dataset_util.float_list_feature(bbox_xmins),
'image/object/bbox/ymax':
dataset_util.float_list_feature(bbox_ymaxs),
'image/object/bbox/xmax':
dataset_util.float_list_feature(bbox_xmaxs),
'image/object/keypoint/y':
dataset_util.float_list_feature(keypoint_ys),
'image/object/keypoint/x':
dataset_util.float_list_feature(keypoint_xs),
'image/object/keypoint/visibility':
dataset_util.int64_list_feature(keypoint_visibility),
})).SerializeToString()
example_decoder = tf_example_decoder.TfExampleDecoder(num_keypoints=3)
output = example_decoder.decode(tf.convert_to_tensor(example))
self.assertAllEqual((output[
fields.InputDataFields.groundtruth_boxes].get_shape().as_list()),
[None, 4])
self.assertAllEqual((output[
fields.InputDataFields.groundtruth_keypoints].get_shape().as_list()),
[2, 3, 2])
return output
tensor_dict = self.execute_cpu(graph_fn, [])
expected_boxes = np.vstack([bbox_ymins, bbox_xmins, bbox_ymaxs,
bbox_xmaxs]).transpose()
self.assertAllEqual(expected_boxes,
tensor_dict[fields.InputDataFields.groundtruth_boxes])
expected_keypoints = [
[[0.0, 1.0], [1.0, 2.0], [np.nan, np.nan]],
[[3.0, 4.0], [np.nan, np.nan], [5.0, 6.0]]]
self.assertAllClose(
expected_keypoints,
tensor_dict[fields.InputDataFields.groundtruth_keypoints])
expected_visibility = (
(np.array(keypoint_visibility) > 0).reshape((2, 3)))
self.assertAllEqual(
expected_visibility,
tensor_dict[fields.InputDataFields.groundtruth_keypoint_visibilities])
def testDecodeKeypointNoVisibilities(self):
image_tensor = np.random.randint(256, size=(4, 5, 3)).astype(np.uint8)
encoded_jpeg, _ = self._create_encoded_and_decoded_data(
image_tensor, 'jpeg')
bbox_ymins = [0.0, 4.0]
bbox_xmins = [1.0, 5.0]
bbox_ymaxs = [2.0, 6.0]
bbox_xmaxs = [3.0, 7.0]
keypoint_ys = [0.0, 1.0, 2.0, 3.0, 4.0, 5.0]
keypoint_xs = [1.0, 2.0, 3.0, 4.0, 5.0, 6.0]
def graph_fn():
example = tf.train.Example(
features=tf.train.Features(
feature={
'image/encoded':
dataset_util.bytes_feature(encoded_jpeg),
'image/format':
dataset_util.bytes_feature(six.b('jpeg')),
'image/object/bbox/ymin':
dataset_util.float_list_feature(bbox_ymins),
'image/object/bbox/xmin':
dataset_util.float_list_feature(bbox_xmins),
'image/object/bbox/ymax':
dataset_util.float_list_feature(bbox_ymaxs),
'image/object/bbox/xmax':
dataset_util.float_list_feature(bbox_xmaxs),
'image/object/keypoint/y':
dataset_util.float_list_feature(keypoint_ys),
'image/object/keypoint/x':
dataset_util.float_list_feature(keypoint_xs),
})).SerializeToString()
example_decoder = tf_example_decoder.TfExampleDecoder(num_keypoints=3)
output = example_decoder.decode(tf.convert_to_tensor(example))
self.assertAllEqual((output[
fields.InputDataFields.groundtruth_boxes].get_shape().as_list()),
[None, 4])
self.assertAllEqual((output[
fields.InputDataFields.groundtruth_keypoints].get_shape().as_list()),
[2, 3, 2])
return output
tensor_dict = self.execute_cpu(graph_fn, [])
expected_boxes = np.vstack([bbox_ymins, bbox_xmins, bbox_ymaxs,
bbox_xmaxs]).transpose()
self.assertAllEqual(expected_boxes,
tensor_dict[fields.InputDataFields.groundtruth_boxes])
expected_keypoints = (
np.vstack([keypoint_ys, keypoint_xs]).transpose().reshape((2, 3, 2)))
self.assertAllEqual(
expected_keypoints,
tensor_dict[fields.InputDataFields.groundtruth_keypoints])
expected_visibility = np.ones((2, 3))
self.assertAllEqual(
expected_visibility,
tensor_dict[fields.InputDataFields.groundtruth_keypoint_visibilities])
def testDecodeDefaultGroundtruthWeights(self):
image_tensor = np.random.randint(256, size=(4, 5, 3)).astype(np.uint8)
encoded_jpeg, _ = self._create_encoded_and_decoded_data(
image_tensor, 'jpeg')
bbox_ymins = [0.0, 4.0]
bbox_xmins = [1.0, 5.0]
bbox_ymaxs = [2.0, 6.0]
bbox_xmaxs = [3.0, 7.0]
def graph_fn():
example = tf.train.Example(
features=tf.train.Features(
feature={
'image/encoded':
dataset_util.bytes_feature(encoded_jpeg),
'image/format':
dataset_util.bytes_feature(six.b('jpeg')),
'image/object/bbox/ymin':
dataset_util.float_list_feature(bbox_ymins),
'image/object/bbox/xmin':
dataset_util.float_list_feature(bbox_xmins),
'image/object/bbox/ymax':
dataset_util.float_list_feature(bbox_ymaxs),
'image/object/bbox/xmax':
dataset_util.float_list_feature(bbox_xmaxs),
})).SerializeToString()
example_decoder = tf_example_decoder.TfExampleDecoder()
output = example_decoder.decode(tf.convert_to_tensor(example))
self.assertAllEqual((output[
fields.InputDataFields.groundtruth_boxes].get_shape().as_list()),
[None, 4])
return output
tensor_dict = self.execute_cpu(graph_fn, [])
self.assertAllClose(tensor_dict[fields.InputDataFields.groundtruth_weights],
np.ones(2, dtype=np.float32))
def testDecodeObjectLabel(self):
image_tensor = np.random.randint(256, size=(4, 5, 3)).astype(np.uint8)
encoded_jpeg, _ = self._create_encoded_and_decoded_data(
image_tensor, 'jpeg')
bbox_classes = [0, 1]
def graph_fn():
example = tf.train.Example(
features=tf.train.Features(
feature={
'image/encoded':
dataset_util.bytes_feature(encoded_jpeg),
'image/format':
dataset_util.bytes_feature(six.b('jpeg')),
'image/object/class/label':
dataset_util.int64_list_feature(bbox_classes),
})).SerializeToString()
example_decoder = tf_example_decoder.TfExampleDecoder()
output = example_decoder.decode(tf.convert_to_tensor(example))
self.assertAllEqual((output[
fields.InputDataFields.groundtruth_classes].get_shape().as_list()),
[2])
return output
tensor_dict = self.execute_cpu(graph_fn, [])
self.assertAllEqual(bbox_classes,
tensor_dict[fields.InputDataFields.groundtruth_classes])
def testDecodeMultiClassScores(self):
image_tensor = np.random.randint(256, size=(4, 5, 3)).astype(np.uint8)
encoded_jpeg, _ = self._create_encoded_and_decoded_data(
image_tensor, 'jpeg')
bbox_ymins = [0.0, 4.0]
bbox_xmins = [1.0, 5.0]
bbox_ymaxs = [2.0, 6.0]
bbox_xmaxs = [3.0, 7.0]
flattened_multiclass_scores = [100., 50.] + [20., 30.]
def graph_fn():
example = tf.train.Example(
features=tf.train.Features(
feature={
'image/encoded':
dataset_util.bytes_feature(encoded_jpeg),
'image/format':
dataset_util.bytes_feature(six.b('jpeg')),
'image/object/class/multiclass_scores':
dataset_util.float_list_feature(
flattened_multiclass_scores),
'image/object/bbox/ymin':
dataset_util.float_list_feature(bbox_ymins),
'image/object/bbox/xmin':
dataset_util.float_list_feature(bbox_xmins),
'image/object/bbox/ymax':
dataset_util.float_list_feature(bbox_ymaxs),
'image/object/bbox/xmax':
dataset_util.float_list_feature(bbox_xmaxs),
})).SerializeToString()
example_decoder = tf_example_decoder.TfExampleDecoder(
load_multiclass_scores=True)
return example_decoder.decode(tf.convert_to_tensor(example))
tensor_dict = self.execute_cpu(graph_fn, [])
self.assertAllEqual(flattened_multiclass_scores,
tensor_dict[fields.InputDataFields.multiclass_scores])
def testDecodeEmptyMultiClassScores(self):
image_tensor = np.random.randint(256, size=(4, 5, 3)).astype(np.uint8)
encoded_jpeg, _ = self._create_encoded_and_decoded_data(
image_tensor, 'jpeg')
bbox_ymins = [0.0, 4.0]
bbox_xmins = [1.0, 5.0]
bbox_ymaxs = [2.0, 6.0]
bbox_xmaxs = [3.0, 7.0]
def graph_fn():
example = tf.train.Example(
features=tf.train.Features(
feature={
'image/encoded':
dataset_util.bytes_feature(encoded_jpeg),
'image/format':
dataset_util.bytes_feature(six.b('jpeg')),
'image/object/bbox/ymin':
dataset_util.float_list_feature(bbox_ymins),
'image/object/bbox/xmin':
dataset_util.float_list_feature(bbox_xmins),
'image/object/bbox/ymax':
dataset_util.float_list_feature(bbox_ymaxs),
'image/object/bbox/xmax':
dataset_util.float_list_feature(bbox_xmaxs),
})).SerializeToString()
example_decoder = tf_example_decoder.TfExampleDecoder(
load_multiclass_scores=True)
return example_decoder.decode(tf.convert_to_tensor(example))
tensor_dict = self.execute_cpu(graph_fn, [])
self.assertEqual(
(0,), tensor_dict[fields.InputDataFields.multiclass_scores].shape)
def testDecodeObjectLabelNoText(self):
image_tensor = np.random.randint(256, size=(4, 5, 3)).astype(np.uint8)
encoded_jpeg, _ = self._create_encoded_and_decoded_data(
image_tensor, 'jpeg')
bbox_classes = [1, 2]
def graph_fn():
example = tf.train.Example(
features=tf.train.Features(
feature={
'image/encoded':
dataset_util.bytes_feature(encoded_jpeg),
'image/format':
dataset_util.bytes_feature(six.b('jpeg')),
'image/object/class/label':
dataset_util.int64_list_feature(bbox_classes),
})).SerializeToString()
label_map_string = """
item {
id:1
name:'cat'
}
item {
id:2
name:'dog'
}
"""
label_map_path = os.path.join(self.get_temp_dir(), 'label_map.pbtxt')
with tf.gfile.Open(label_map_path, 'wb') as f:
f.write(label_map_string)
example_decoder = tf_example_decoder.TfExampleDecoder(
label_map_proto_file=label_map_path)
output = example_decoder.decode(tf.convert_to_tensor(example))
self.assertAllEqual((output[
fields.InputDataFields.groundtruth_classes].get_shape().as_list()),
[None])
return output
tensor_dict = self.execute_cpu(graph_fn, [])
self.assertAllEqual(bbox_classes,
tensor_dict[fields.InputDataFields.groundtruth_classes])
def testDecodeObjectLabelWithText(self):
image_tensor = np.random.randint(256, size=(4, 5, 3)).astype(np.uint8)
encoded_jpeg, _ = self._create_encoded_and_decoded_data(
image_tensor, 'jpeg')
bbox_classes_text = [six.b('cat'), six.b('dog')]
# Annotation label gets overridden by labelmap id.
annotated_bbox_classes = [3, 4]
expected_bbox_classes = [1, 2]
def graph_fn():
example = tf.train.Example(
features=tf.train.Features(
feature={
'image/encoded':
dataset_util.bytes_feature(encoded_jpeg),
'image/format':
dataset_util.bytes_feature(six.b('jpeg')),
'image/object/class/text':
dataset_util.bytes_list_feature(bbox_classes_text),
'image/object/class/label':
dataset_util.int64_list_feature(annotated_bbox_classes),
})).SerializeToString()
label_map_string = """
item {
id:1
name:'cat'
}
item {
id:2
name:'dog'
}
"""
label_map_path = os.path.join(self.get_temp_dir(), 'label_map.pbtxt')
with tf.gfile.Open(label_map_path, 'wb') as f:
f.write(label_map_string)
example_decoder = tf_example_decoder.TfExampleDecoder(
label_map_proto_file=label_map_path)
return example_decoder.decode(tf.convert_to_tensor(example))
tensor_dict = self.execute_cpu(graph_fn, [])
self.assertAllEqual(expected_bbox_classes,
tensor_dict[fields.InputDataFields.groundtruth_classes])
def testDecodeObjectLabelUnrecognizedName(self):
image_tensor = np.random.randint(256, size=(4, 5, 3)).astype(np.uint8)
encoded_jpeg, _ = self._create_encoded_and_decoded_data(
image_tensor, 'jpeg')
bbox_classes_text = [six.b('cat'), six.b('cheetah')]
def graph_fn():
example = tf.train.Example(
features=tf.train.Features(
feature={
'image/encoded':
dataset_util.bytes_feature(encoded_jpeg),
'image/format':
dataset_util.bytes_feature(six.b('jpeg')),
'image/object/class/text':
dataset_util.bytes_list_feature(bbox_classes_text),
})).SerializeToString()
label_map_string = """
item {
id:2
name:'cat'
}
item {
id:1
name:'dog'
}
"""
label_map_path = os.path.join(self.get_temp_dir(), 'label_map.pbtxt')
with tf.gfile.Open(label_map_path, 'wb') as f:
f.write(label_map_string)
example_decoder = tf_example_decoder.TfExampleDecoder(
label_map_proto_file=label_map_path)
output = example_decoder.decode(tf.convert_to_tensor(example))
self.assertAllEqual((output[
fields.InputDataFields.groundtruth_classes].get_shape().as_list()),
[None])
return output
tensor_dict = self.execute_cpu(graph_fn, [])
self.assertAllEqual([2, -1],
tensor_dict[fields.InputDataFields.groundtruth_classes])
def testDecodeObjectLabelWithMappingWithDisplayName(self):
image_tensor = np.random.randint(256, size=(4, 5, 3)).astype(np.uint8)
encoded_jpeg, _ = self._create_encoded_and_decoded_data(
image_tensor, 'jpeg')
bbox_classes_text = [six.b('cat'), six.b('dog')]
def graph_fn():
example = tf.train.Example(
features=tf.train.Features(
feature={
'image/encoded':
dataset_util.bytes_feature(encoded_jpeg),
'image/format':
dataset_util.bytes_feature(six.b('jpeg')),
'image/object/class/text':
dataset_util.bytes_list_feature(bbox_classes_text),
})).SerializeToString()
label_map_string = """
item {
id:3
display_name:'cat'
}
item {
id:1
display_name:'dog'
}
"""
label_map_path = os.path.join(self.get_temp_dir(), 'label_map.pbtxt')
with tf.gfile.Open(label_map_path, 'wb') as f:
f.write(label_map_string)
example_decoder = tf_example_decoder.TfExampleDecoder(
label_map_proto_file=label_map_path)
output = example_decoder.decode(tf.convert_to_tensor(example))
self.assertAllEqual((output[
fields.InputDataFields.groundtruth_classes].get_shape().as_list()),
[None])
return output
tensor_dict = self.execute_cpu(graph_fn, [])
self.assertAllEqual([3, 1],
tensor_dict[fields.InputDataFields.groundtruth_classes])
def testDecodeObjectLabelUnrecognizedNameWithMappingWithDisplayName(self):
image_tensor = np.random.randint(256, size=(4, 5, 3)).astype(np.uint8)
encoded_jpeg, _ = self._create_encoded_and_decoded_data(
image_tensor, 'jpeg')
bbox_classes_text = [six.b('cat'), six.b('cheetah')]
bbox_classes_id = [5, 6]
def graph_fn():
example = tf.train.Example(
features=tf.train.Features(
feature={
'image/encoded':
dataset_util.bytes_feature(encoded_jpeg),
'image/format':
dataset_util.bytes_feature(six.b('jpeg')),
'image/object/class/text':
dataset_util.bytes_list_feature(bbox_classes_text),
'image/object/class/label':
dataset_util.int64_list_feature(bbox_classes_id),
})).SerializeToString()
label_map_string = """
item {
name:'/m/cat'
id:3
display_name:'cat'
}
item {
name:'/m/dog'
id:1
display_name:'dog'
}
"""
label_map_path = os.path.join(self.get_temp_dir(), 'label_map.pbtxt')
with tf.gfile.Open(label_map_path, 'wb') as f:
f.write(label_map_string)
example_decoder = tf_example_decoder.TfExampleDecoder(
label_map_proto_file=label_map_path)
return example_decoder.decode(tf.convert_to_tensor(example))
tensor_dict = self.execute_cpu(graph_fn, [])
self.assertAllEqual([3, -1],
tensor_dict[fields.InputDataFields.groundtruth_classes])
def testDecodeObjectLabelWithMappingWithName(self):
image_tensor = np.random.randint(256, size=(4, 5, 3)).astype(np.uint8)
encoded_jpeg, _ = self._create_encoded_and_decoded_data(
image_tensor, 'jpeg')
bbox_classes_text = [six.b('cat'), six.b('dog')]
def graph_fn():
example = tf.train.Example(
features=tf.train.Features(
feature={
'image/encoded':
dataset_util.bytes_feature(encoded_jpeg),
'image/format':
dataset_util.bytes_feature(six.b('jpeg')),
'image/object/class/text':
dataset_util.bytes_list_feature(bbox_classes_text),
})).SerializeToString()
label_map_string = """
item {
id:3
name:'cat'
}
item {
id:1
name:'dog'
}
"""
label_map_path = os.path.join(self.get_temp_dir(), 'label_map.pbtxt')
with tf.gfile.Open(label_map_path, 'wb') as f:
f.write(label_map_string)
example_decoder = tf_example_decoder.TfExampleDecoder(
label_map_proto_file=label_map_path)
output = example_decoder.decode(tf.convert_to_tensor(example))
self.assertAllEqual((output[
fields.InputDataFields.groundtruth_classes].get_shape().as_list()),
[None])
return output
tensor_dict = self.execute_cpu(graph_fn, [])
self.assertAllEqual([3, 1],
tensor_dict[fields.InputDataFields.groundtruth_classes])
def testDecodeObjectArea(self):
image_tensor = np.random.randint(256, size=(4, 5, 3)).astype(np.uint8)
encoded_jpeg, _ = self._create_encoded_and_decoded_data(
image_tensor, 'jpeg')
object_area = [100., 174.]
def graph_fn():
example = tf.train.Example(
features=tf.train.Features(
feature={
'image/encoded':
dataset_util.bytes_feature(encoded_jpeg),
'image/format':
dataset_util.bytes_feature(six.b('jpeg')),
'image/object/area':
dataset_util.float_list_feature(object_area),
})).SerializeToString()
example_decoder = tf_example_decoder.TfExampleDecoder()
output = example_decoder.decode(tf.convert_to_tensor(example))
self.assertAllEqual((output[
fields.InputDataFields.groundtruth_area].get_shape().as_list()), [2])
return output
tensor_dict = self.execute_cpu(graph_fn, [])
self.assertAllEqual(object_area,
tensor_dict[fields.InputDataFields.groundtruth_area])
def testDecodeVerifiedNegClasses(self):
image_tensor = np.random.randint(256, size=(4, 5, 3)).astype(np.uint8)
encoded_jpeg, _ = self._create_encoded_and_decoded_data(
image_tensor, 'jpeg')
neg_category_ids = [0, 5, 8]
def graph_fn():
example = tf.train.Example(
features=tf.train.Features(
feature={
'image/encoded':
dataset_util.bytes_feature(encoded_jpeg),
'image/format':
dataset_util.bytes_feature(six.b('jpeg')),
'image/neg_category_ids':
dataset_util.int64_list_feature(neg_category_ids),
})).SerializeToString()
example_decoder = tf_example_decoder.TfExampleDecoder()
output = example_decoder.decode(tf.convert_to_tensor(example))
return output
tensor_dict = self.execute_cpu(graph_fn, [])
self.assertAllEqual(
neg_category_ids,
tensor_dict[fields.InputDataFields.groundtruth_verified_neg_classes])
def testDecodeNotExhaustiveClasses(self):
image_tensor = np.random.randint(256, size=(4, 5, 3)).astype(np.uint8)
encoded_jpeg, _ = self._create_encoded_and_decoded_data(
image_tensor, 'jpeg')
not_exhaustive_category_ids = [0, 5, 8]
def graph_fn():
example = tf.train.Example(
features=tf.train.Features(
feature={
'image/encoded':
dataset_util.bytes_feature(encoded_jpeg),
'image/format':
dataset_util.bytes_feature(six.b('jpeg')),
'image/not_exhaustive_category_ids':
dataset_util.int64_list_feature(
not_exhaustive_category_ids),
})).SerializeToString()
example_decoder = tf_example_decoder.TfExampleDecoder()
output = example_decoder.decode(tf.convert_to_tensor(example))
return output
tensor_dict = self.execute_cpu(graph_fn, [])
self.assertAllEqual(
not_exhaustive_category_ids,
tensor_dict[fields.InputDataFields.groundtruth_not_exhaustive_classes])
def testDecodeObjectIsCrowd(self):
image_tensor = np.random.randint(256, size=(4, 5, 3)).astype(np.uint8)
encoded_jpeg, _ = self._create_encoded_and_decoded_data(
image_tensor, 'jpeg')
object_is_crowd = [0, 1]
def graph_fn():
example = tf.train.Example(
features=tf.train.Features(
feature={
'image/encoded':
dataset_util.bytes_feature(encoded_jpeg),
'image/format':
dataset_util.bytes_feature(six.b('jpeg')),
'image/object/is_crowd':
dataset_util.int64_list_feature(object_is_crowd),
})).SerializeToString()
example_decoder = tf_example_decoder.TfExampleDecoder()
output = example_decoder.decode(tf.convert_to_tensor(example))
self.assertAllEqual((output[
fields.InputDataFields.groundtruth_is_crowd].get_shape().as_list()),
[2])
return output
tensor_dict = self.execute_cpu(graph_fn, [])
self.assertAllEqual(
[bool(item) for item in object_is_crowd],
tensor_dict[fields.InputDataFields.groundtruth_is_crowd])
def testDecodeObjectDifficult(self):
image_tensor = np.random.randint(256, size=(4, 5, 3)).astype(np.uint8)
encoded_jpeg, _ = self._create_encoded_and_decoded_data(
image_tensor, 'jpeg')
object_difficult = [0, 1]
def graph_fn():
example = tf.train.Example(
features=tf.train.Features(
feature={
'image/encoded':
dataset_util.bytes_feature(encoded_jpeg),
'image/format':
dataset_util.bytes_feature(six.b('jpeg')),
'image/object/difficult':
dataset_util.int64_list_feature(object_difficult),
})).SerializeToString()
example_decoder = tf_example_decoder.TfExampleDecoder()
output = example_decoder.decode(tf.convert_to_tensor(example))
self.assertAllEqual((output[
fields.InputDataFields.groundtruth_difficult].get_shape().as_list()),
[2])
return output
tensor_dict = self.execute_cpu(graph_fn, [])
self.assertAllEqual(
[bool(item) for item in object_difficult],
tensor_dict[fields.InputDataFields.groundtruth_difficult])
def testDecodeObjectGroupOf(self):
image_tensor = np.random.randint(256, size=(4, 5, 3)).astype(np.uint8)
encoded_jpeg, _ = self._create_encoded_and_decoded_data(
image_tensor, 'jpeg')
object_group_of = [0, 1]
def graph_fn():
example = tf.train.Example(
features=tf.train.Features(
feature={
'image/encoded':
dataset_util.bytes_feature(encoded_jpeg),
'image/format':
dataset_util.bytes_feature(six.b('jpeg')),
'image/object/group_of':
dataset_util.int64_list_feature(object_group_of),
})).SerializeToString()
example_decoder = tf_example_decoder.TfExampleDecoder()
output = example_decoder.decode(tf.convert_to_tensor(example))
self.assertAllEqual((output[
fields.InputDataFields.groundtruth_group_of].get_shape().as_list()),
[2])
return output
tensor_dict = self.execute_cpu(graph_fn, [])
self.assertAllEqual(
[bool(item) for item in object_group_of],
tensor_dict[fields.InputDataFields.groundtruth_group_of])
def testDecodeObjectWeight(self):
image_tensor = np.random.randint(256, size=(4, 5, 3)).astype(np.uint8)
encoded_jpeg, _ = self._create_encoded_and_decoded_data(
image_tensor, 'jpeg')
object_weights = [0.75, 1.0]
def graph_fn():
example = tf.train.Example(
features=tf.train.Features(
feature={
'image/encoded':
dataset_util.bytes_feature(encoded_jpeg),
'image/format':
dataset_util.bytes_feature(six.b('jpeg')),
'image/object/weight':
dataset_util.float_list_feature(object_weights),
})).SerializeToString()
example_decoder = tf_example_decoder.TfExampleDecoder()
output = example_decoder.decode(tf.convert_to_tensor(example))
self.assertAllEqual((output[
fields.InputDataFields.groundtruth_weights].get_shape().as_list()),
[None])
return output
tensor_dict = self.execute_cpu(graph_fn, [])
self.assertAllEqual(object_weights,
tensor_dict[fields.InputDataFields.groundtruth_weights])
def testDecodeClassConfidence(self):
image_tensor = np.random.randint(256, size=(4, 5, 3)).astype(np.uint8)
encoded_jpeg, _ = self._create_encoded_and_decoded_data(
image_tensor, 'jpeg')
class_confidence = [0.0, 1.0, 0.0]
def graph_fn():
example = tf.train.Example(
features=tf.train.Features(
feature={
'image/encoded':
dataset_util.bytes_feature(encoded_jpeg),
'image/format':
dataset_util.bytes_feature(six.b('jpeg')),
'image/class/confidence':
dataset_util.float_list_feature(class_confidence),
})).SerializeToString()
example_decoder = tf_example_decoder.TfExampleDecoder()
output = example_decoder.decode(tf.convert_to_tensor(example))
self.assertAllEqual(
(output[fields.InputDataFields.groundtruth_image_confidences]
.get_shape().as_list()), [3])
return output
tensor_dict = self.execute_cpu(graph_fn, [])
self.assertAllEqual(
class_confidence,
tensor_dict[fields.InputDataFields.groundtruth_image_confidences])
def testDecodeInstanceSegmentation(self):
num_instances = 4
image_height = 5
image_width = 3
# Randomly generate image.
image_tensor = np.random.randint(
256, size=(image_height, image_width, 3)).astype(np.uint8)
encoded_jpeg, _ = self._create_encoded_and_decoded_data(
image_tensor, 'jpeg')
# Randomly generate instance segmentation masks.
instance_masks = (
np.random.randint(2, size=(num_instances, image_height,
image_width)).astype(np.float32))
instance_masks_flattened = np.reshape(instance_masks, [-1])
# Randomly generate class labels for each instance.
object_classes = np.random.randint(
100, size=(num_instances)).astype(np.int64)
def graph_fn():
example = tf.train.Example(
features=tf.train.Features(
feature={
'image/encoded':
dataset_util.bytes_feature(encoded_jpeg),
'image/format':
dataset_util.bytes_feature(six.b('jpeg')),
'image/height':
dataset_util.int64_feature(image_height),
'image/width':
dataset_util.int64_feature(image_width),
'image/object/mask':
dataset_util.float_list_feature(instance_masks_flattened),
'image/object/class/label':
dataset_util.int64_list_feature(object_classes)
})).SerializeToString()
example_decoder = tf_example_decoder.TfExampleDecoder(
load_instance_masks=True)
output = example_decoder.decode(tf.convert_to_tensor(example))
self.assertAllEqual(
(output[fields.InputDataFields.groundtruth_instance_masks].get_shape(
).as_list()), [4, 5, 3])
self.assertAllEqual((output[
fields.InputDataFields.groundtruth_classes].get_shape().as_list()),
[4])
return output
tensor_dict = self.execute_cpu(graph_fn, [])
self.assertAllEqual(
instance_masks.astype(np.float32),
tensor_dict[fields.InputDataFields.groundtruth_instance_masks])
self.assertAllEqual(object_classes,
tensor_dict[fields.InputDataFields.groundtruth_classes])
def testInstancesNotAvailableByDefault(self):
num_instances = 4
image_height = 5
image_width = 3
# Randomly generate image.
image_tensor = np.random.randint(
256, size=(image_height, image_width, 3)).astype(np.uint8)
encoded_jpeg, _ = self._create_encoded_and_decoded_data(
image_tensor, 'jpeg')
# Randomly generate instance segmentation masks.
instance_masks = (
np.random.randint(2, size=(num_instances, image_height,
image_width)).astype(np.float32))
instance_masks_flattened = np.reshape(instance_masks, [-1])
# Randomly generate class labels for each instance.
object_classes = np.random.randint(
100, size=(num_instances)).astype(np.int64)
def graph_fn():
example = tf.train.Example(
features=tf.train.Features(
feature={
'image/encoded':
dataset_util.bytes_feature(encoded_jpeg),
'image/format':
dataset_util.bytes_feature(six.b('jpeg')),
'image/height':
dataset_util.int64_feature(image_height),
'image/width':
dataset_util.int64_feature(image_width),
'image/object/mask':
dataset_util.float_list_feature(instance_masks_flattened),
'image/object/class/label':
dataset_util.int64_list_feature(object_classes)
})).SerializeToString()
example_decoder = tf_example_decoder.TfExampleDecoder()
return example_decoder.decode(tf.convert_to_tensor(example))
tensor_dict = self.execute_cpu(graph_fn, [])
self.assertNotIn(fields.InputDataFields.groundtruth_instance_masks,
tensor_dict)
def testDecodeImageLabels(self):
image_tensor = np.random.randint(256, size=(4, 5, 3)).astype(np.uint8)
encoded_jpeg, _ = self._create_encoded_and_decoded_data(
image_tensor, 'jpeg')
def graph_fn_1():
example = tf.train.Example(
features=tf.train.Features(
feature={
'image/encoded': dataset_util.bytes_feature(encoded_jpeg),
'image/format': dataset_util.bytes_feature(six.b('jpeg')),
'image/class/label': dataset_util.int64_list_feature([1, 2]),
})).SerializeToString()
example_decoder = tf_example_decoder.TfExampleDecoder()
return example_decoder.decode(tf.convert_to_tensor(example))
tensor_dict = self.execute_cpu(graph_fn_1, [])
self.assertIn(fields.InputDataFields.groundtruth_image_classes, tensor_dict)
self.assertAllEqual(
tensor_dict[fields.InputDataFields.groundtruth_image_classes],
np.array([1, 2]))
def graph_fn_2():
example = tf.train.Example(
features=tf.train.Features(
feature={
'image/encoded':
dataset_util.bytes_feature(encoded_jpeg),
'image/format':
dataset_util.bytes_feature(six.b('jpeg')),
'image/class/text':
dataset_util.bytes_list_feature(
[six.b('dog'), six.b('cat')]),
})).SerializeToString()
label_map_string = """
item {
id:3
name:'cat'
}
item {
id:1
name:'dog'
}
"""
label_map_path = os.path.join(self.get_temp_dir(), 'label_map.pbtxt')
with tf.gfile.Open(label_map_path, 'wb') as f:
f.write(label_map_string)
example_decoder = tf_example_decoder.TfExampleDecoder(
label_map_proto_file=label_map_path)
return example_decoder.decode(tf.convert_to_tensor(example))
tensor_dict = self.execute_cpu(graph_fn_2, [])
self.assertIn(fields.InputDataFields.groundtruth_image_classes, tensor_dict)
self.assertAllEqual(
tensor_dict[fields.InputDataFields.groundtruth_image_classes],
np.array([1, 3]))
def testDecodeContextFeatures(self):
image_tensor = np.random.randint(256, size=(4, 5, 3)).astype(np.uint8)
encoded_jpeg, _ = self._create_encoded_and_decoded_data(
image_tensor, 'jpeg')
bbox_ymins = [0.0, 4.0]
bbox_xmins = [1.0, 5.0]
bbox_ymaxs = [2.0, 6.0]
bbox_xmaxs = [3.0, 7.0]
num_features = 8
context_feature_length = 10
context_features = np.random.random(num_features*context_feature_length)
def graph_fn():
example = tf.train.Example(
features=tf.train.Features(
feature={
'image/encoded':
dataset_util.bytes_feature(encoded_jpeg),
'image/format':
dataset_util.bytes_feature(six.b('jpeg')),
'image/context_features':
dataset_util.float_list_feature(context_features),
'image/context_feature_length':
dataset_util.int64_feature(context_feature_length),
'image/object/bbox/ymin':
dataset_util.float_list_feature(bbox_ymins),
'image/object/bbox/xmin':
dataset_util.float_list_feature(bbox_xmins),
'image/object/bbox/ymax':
dataset_util.float_list_feature(bbox_ymaxs),
'image/object/bbox/xmax':
dataset_util.float_list_feature(bbox_xmaxs),
})).SerializeToString()
example_decoder = tf_example_decoder.TfExampleDecoder(
load_context_features=True)
return example_decoder.decode(tf.convert_to_tensor(example))
tensor_dict = self.execute_cpu(graph_fn, [])
self.assertAllClose(
context_features.reshape(num_features, context_feature_length),
tensor_dict[fields.InputDataFields.context_features])
self.assertAllEqual(
context_feature_length,
tensor_dict[fields.InputDataFields.context_feature_length])
def testContextFeaturesNotAvailableByDefault(self):
image_tensor = np.random.randint(256, size=(4, 5, 3)).astype(np.uint8)
encoded_jpeg, _ = self._create_encoded_and_decoded_data(
image_tensor, 'jpeg')
bbox_ymins = [0.0, 4.0]
bbox_xmins = [1.0, 5.0]
bbox_ymaxs = [2.0, 6.0]
bbox_xmaxs = [3.0, 7.0]
num_features = 10
context_feature_length = 10
context_features = np.random.random(num_features*context_feature_length)
def graph_fn():
example = tf.train.Example(
features=tf.train.Features(
feature={
'image/encoded':
dataset_util.bytes_feature(encoded_jpeg),
'image/format':
dataset_util.bytes_feature(six.b('jpeg')),
'image/context_features':
dataset_util.float_list_feature(context_features),
'image/context_feature_length':
dataset_util.int64_feature(context_feature_length),
'image/object/bbox/ymin':
dataset_util.float_list_feature(bbox_ymins),
'image/object/bbox/xmin':
dataset_util.float_list_feature(bbox_xmins),
'image/object/bbox/ymax':
dataset_util.float_list_feature(bbox_ymaxs),
'image/object/bbox/xmax':
dataset_util.float_list_feature(bbox_xmaxs),
})).SerializeToString()
example_decoder = tf_example_decoder.TfExampleDecoder()
return example_decoder.decode(tf.convert_to_tensor(example))
tensor_dict = self.execute_cpu(graph_fn, [])
self.assertNotIn(fields.InputDataFields.context_features,
tensor_dict)
def testExpandLabels(self):
label_map_string = """
item {
id:1
name:'cat'
ancestor_ids: 2
}
item {
id:2
name:'animal'
descendant_ids: 1
}
item {
id:3
name:'man'
ancestor_ids: 5
}
item {
id:4
name:'woman'
display_name:'woman'
ancestor_ids: 5
}
item {
id:5
name:'person'
descendant_ids: 3
descendant_ids: 4
}
"""
label_map_path = os.path.join(self.get_temp_dir(), 'label_map.pbtxt')
with tf.gfile.Open(label_map_path, 'wb') as f:
f.write(label_map_string)
image_tensor = np.random.randint(256, size=(4, 5, 3)).astype(np.uint8)
encoded_jpeg, _ = self._create_encoded_and_decoded_data(
image_tensor, 'jpeg')
bbox_ymins = [0.0, 4.0]
bbox_xmins = [1.0, 5.0]
bbox_ymaxs = [2.0, 6.0]
bbox_xmaxs = [3.0, 7.0]
bbox_classes_text = [six.b('cat'), six.b('cat')]
bbox_group_of = [0, 1]
image_class_text = [six.b('cat'), six.b('person')]
image_confidence = [1.0, 0.0]
def graph_fn():
example = tf.train.Example(
features=tf.train.Features(
feature={
'image/encoded':
dataset_util.bytes_feature(encoded_jpeg),
'image/format':
dataset_util.bytes_feature(six.b('jpeg')),
'image/object/bbox/ymin':
dataset_util.float_list_feature(bbox_ymins),
'image/object/bbox/xmin':
dataset_util.float_list_feature(bbox_xmins),
'image/object/bbox/ymax':
dataset_util.float_list_feature(bbox_ymaxs),
'image/object/bbox/xmax':
dataset_util.float_list_feature(bbox_xmaxs),
'image/object/class/text':
dataset_util.bytes_list_feature(bbox_classes_text),
'image/object/group_of':
dataset_util.int64_list_feature(bbox_group_of),
'image/class/text':
dataset_util.bytes_list_feature(image_class_text),
'image/class/confidence':
dataset_util.float_list_feature(image_confidence),
})).SerializeToString()
example_decoder = tf_example_decoder.TfExampleDecoder(
label_map_proto_file=label_map_path, expand_hierarchy_labels=True)
return example_decoder.decode(tf.convert_to_tensor(example))
tensor_dict = self.execute_cpu(graph_fn, [])
boxes = np.vstack([bbox_ymins, bbox_xmins, bbox_ymaxs,
bbox_xmaxs]).transpose()
expected_boxes = np.stack(
[boxes[0, :], boxes[0, :], boxes[1, :], boxes[1, :]], axis=0)
expected_boxes_class = np.array([1, 2, 1, 2])
expected_boxes_group_of = np.array([0, 0, 1, 1])
expected_image_class = np.array([1, 2, 3, 4, 5])
expected_image_confidence = np.array([1.0, 1.0, 0.0, 0.0, 0.0])
self.assertAllEqual(expected_boxes,
tensor_dict[fields.InputDataFields.groundtruth_boxes])
self.assertAllEqual(expected_boxes_class,
tensor_dict[fields.InputDataFields.groundtruth_classes])
self.assertAllEqual(
expected_boxes_group_of,
tensor_dict[fields.InputDataFields.groundtruth_group_of])
self.assertAllEqual(
expected_image_class,
tensor_dict[fields.InputDataFields.groundtruth_image_classes])
self.assertAllEqual(
expected_image_confidence,
tensor_dict[fields.InputDataFields.groundtruth_image_confidences])
def testDecodeDensePose(self):
image_tensor = np.random.randint(256, size=(4, 5, 3)).astype(np.uint8)
encoded_jpeg, _ = self._create_encoded_and_decoded_data(
image_tensor, 'jpeg')
bbox_ymins = [0.0, 4.0, 2.0]
bbox_xmins = [1.0, 5.0, 8.0]
bbox_ymaxs = [2.0, 6.0, 1.0]
bbox_xmaxs = [3.0, 7.0, 3.3]
densepose_num = [0, 4, 2]
densepose_part_index = [2, 2, 3, 4, 2, 9]
densepose_x = [0.1, 0.2, 0.3, 0.4, 0.5, 0.6]
densepose_y = [0.9, 0.8, 0.7, 0.6, 0.5, 0.4]
densepose_u = [0.01, 0.02, 0.03, 0.04, 0.05, 0.06]
densepose_v = [0.99, 0.98, 0.97, 0.96, 0.95, 0.94]
def graph_fn():
example = tf.train.Example(
features=tf.train.Features(
feature={
'image/encoded':
dataset_util.bytes_feature(encoded_jpeg),
'image/format':
dataset_util.bytes_feature(six.b('jpeg')),
'image/object/bbox/ymin':
dataset_util.float_list_feature(bbox_ymins),
'image/object/bbox/xmin':
dataset_util.float_list_feature(bbox_xmins),
'image/object/bbox/ymax':
dataset_util.float_list_feature(bbox_ymaxs),
'image/object/bbox/xmax':
dataset_util.float_list_feature(bbox_xmaxs),
'image/object/densepose/num':
dataset_util.int64_list_feature(densepose_num),
'image/object/densepose/part_index':
dataset_util.int64_list_feature(densepose_part_index),
'image/object/densepose/x':
dataset_util.float_list_feature(densepose_x),
'image/object/densepose/y':
dataset_util.float_list_feature(densepose_y),
'image/object/densepose/u':
dataset_util.float_list_feature(densepose_u),
'image/object/densepose/v':
dataset_util.float_list_feature(densepose_v),
})).SerializeToString()
example_decoder = tf_example_decoder.TfExampleDecoder(
load_dense_pose=True)
output = example_decoder.decode(tf.convert_to_tensor(example))
dp_num_points = output[fields.InputDataFields.groundtruth_dp_num_points]
dp_part_ids = output[fields.InputDataFields.groundtruth_dp_part_ids]
dp_surface_coords = output[
fields.InputDataFields.groundtruth_dp_surface_coords]
return dp_num_points, dp_part_ids, dp_surface_coords
dp_num_points, dp_part_ids, dp_surface_coords = self.execute_cpu(
graph_fn, [])
expected_dp_num_points = [0, 4, 2]
expected_dp_part_ids = [
[0, 0, 0, 0],
[2, 2, 3, 4],
[2, 9, 0, 0]
]
expected_dp_surface_coords = np.array(
[
# Instance 0 (no points).
[[0., 0., 0., 0.],
[0., 0., 0., 0.],
[0., 0., 0., 0.],
[0., 0., 0., 0.]],
# Instance 1 (4 points).
[[0.9, 0.1, 0.99, 0.01],
[0.8, 0.2, 0.98, 0.02],
[0.7, 0.3, 0.97, 0.03],
[0.6, 0.4, 0.96, 0.04]],
# Instance 2 (2 points).
[[0.5, 0.5, 0.95, 0.05],
[0.4, 0.6, 0.94, 0.06],
[0., 0., 0., 0.],
[0., 0., 0., 0.]],
], dtype=np.float32)
self.assertAllEqual(dp_num_points, expected_dp_num_points)
self.assertAllEqual(dp_part_ids, expected_dp_part_ids)
self.assertAllClose(dp_surface_coords, expected_dp_surface_coords)
def testDecodeTrack(self):
image_tensor = np.random.randint(256, size=(4, 5, 3)).astype(np.uint8)
encoded_jpeg, _ = self._create_encoded_and_decoded_data(
image_tensor, 'jpeg')
bbox_ymins = [0.0, 4.0, 2.0]
bbox_xmins = [1.0, 5.0, 8.0]
bbox_ymaxs = [2.0, 6.0, 1.0]
bbox_xmaxs = [3.0, 7.0, 3.3]
track_labels = [0, 1, 2]
def graph_fn():
example = tf.train.Example(
features=tf.train.Features(
feature={
'image/encoded':
dataset_util.bytes_feature(encoded_jpeg),
'image/format':
dataset_util.bytes_feature(six.b('jpeg')),
'image/object/bbox/ymin':
dataset_util.float_list_feature(bbox_ymins),
'image/object/bbox/xmin':
dataset_util.float_list_feature(bbox_xmins),
'image/object/bbox/ymax':
dataset_util.float_list_feature(bbox_ymaxs),
'image/object/bbox/xmax':
dataset_util.float_list_feature(bbox_xmaxs),
'image/object/track/label':
dataset_util.int64_list_feature(track_labels),
})).SerializeToString()
example_decoder = tf_example_decoder.TfExampleDecoder(
load_track_id=True)
output = example_decoder.decode(tf.convert_to_tensor(example))
track_ids = output[fields.InputDataFields.groundtruth_track_ids]
return track_ids
track_ids = self.execute_cpu(graph_fn, [])
expected_track_labels = [0, 1, 2]
self.assertAllEqual(track_ids, expected_track_labels)
if __name__ == '__main__':
tf.test.main()
| 40.03513
| 80
| 0.603089
| 7,438
| 66,098
| 5.061576
| 0.046383
| 0.056683
| 0.038674
| 0.048874
| 0.868944
| 0.846393
| 0.816564
| 0.798953
| 0.781157
| 0.765007
| 0
| 0.02502
| 0.285863
| 66,098
| 1,650
| 81
| 40.059394
| 0.772557
| 0.016415
| 0
| 0.776297
| 0
| 0
| 0.085575
| 0.03527
| 0
| 0
| 0
| 0
| 0.058906
| 1
| 0.053997
| false
| 0
| 0.006311
| 0
| 0.08906
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
531e55e6be488ba1586f078680847b9d77b065ff
| 4,416
|
py
|
Python
|
tests/test_base_protocol.py
|
Qix-/aiohttp
|
aee067dccad3dc0e79778a1b213105f20bf39baf
|
[
"Apache-2.0"
] | 3
|
2019-01-15T04:17:33.000Z
|
2019-03-13T13:12:15.000Z
|
tests/test_base_protocol.py
|
Qix-/aiohttp
|
aee067dccad3dc0e79778a1b213105f20bf39baf
|
[
"Apache-2.0"
] | 309
|
2019-08-20T21:49:50.000Z
|
2021-07-31T13:27:18.000Z
|
tests/test_base_protocol.py
|
amenezes/aiohttp
|
e8049814a2161278bae178cb96334ce0c98e66f3
|
[
"Apache-2.0"
] | 1
|
2020-12-02T16:06:16.000Z
|
2020-12-02T16:06:16.000Z
|
import asyncio
from contextlib import suppress
from unittest import mock
import pytest
from aiohttp.base_protocol import BaseProtocol
async def test_loop() -> None:
loop = asyncio.get_event_loop()
asyncio.set_event_loop(None)
pr = BaseProtocol(loop)
assert pr._loop is loop
async def test_pause_writing() -> None:
loop = asyncio.get_event_loop()
pr = BaseProtocol(loop)
assert not pr._paused
pr.pause_writing()
assert pr._paused
async def test_resume_writing_no_waiters() -> None:
loop = asyncio.get_event_loop()
pr = BaseProtocol(loop=loop)
pr.pause_writing()
assert pr._paused
pr.resume_writing()
assert not pr._paused
async def test_connection_made() -> None:
loop = asyncio.get_event_loop()
pr = BaseProtocol(loop=loop)
tr = mock.Mock()
assert pr.transport is None
pr.connection_made(tr)
assert pr.transport is not None
async def test_connection_lost_not_paused() -> None:
loop = asyncio.get_event_loop()
pr = BaseProtocol(loop=loop)
tr = mock.Mock()
pr.connection_made(tr)
assert not pr._connection_lost
pr.connection_lost(None)
assert pr.transport is None
assert pr._connection_lost
async def test_connection_lost_paused_without_waiter() -> None:
loop = asyncio.get_event_loop()
pr = BaseProtocol(loop=loop)
tr = mock.Mock()
pr.connection_made(tr)
assert not pr._connection_lost
pr.pause_writing()
pr.connection_lost(None)
assert pr.transport is None
assert pr._connection_lost
async def test_drain_lost() -> None:
loop = asyncio.get_event_loop()
pr = BaseProtocol(loop=loop)
tr = mock.Mock()
pr.connection_made(tr)
pr.connection_lost(None)
with pytest.raises(ConnectionResetError):
await pr._drain_helper()
async def test_drain_not_paused() -> None:
loop = asyncio.get_event_loop()
pr = BaseProtocol(loop=loop)
tr = mock.Mock()
pr.connection_made(tr)
assert pr._drain_waiter is None
await pr._drain_helper()
assert pr._drain_waiter is None
async def test_resume_drain_waited() -> None:
loop = asyncio.get_event_loop()
pr = BaseProtocol(loop=loop)
tr = mock.Mock()
pr.connection_made(tr)
pr.pause_writing()
t = loop.create_task(pr._drain_helper())
await asyncio.sleep(0)
assert pr._drain_waiter is not None
pr.resume_writing()
assert (await t) is None
assert pr._drain_waiter is None
async def test_lost_drain_waited_ok() -> None:
loop = asyncio.get_event_loop()
pr = BaseProtocol(loop=loop)
tr = mock.Mock()
pr.connection_made(tr)
pr.pause_writing()
t = loop.create_task(pr._drain_helper())
await asyncio.sleep(0)
assert pr._drain_waiter is not None
pr.connection_lost(None)
assert (await t) is None
assert pr._drain_waiter is None
async def test_lost_drain_waited_exception() -> None:
loop = asyncio.get_event_loop()
pr = BaseProtocol(loop=loop)
tr = mock.Mock()
pr.connection_made(tr)
pr.pause_writing()
t = loop.create_task(pr._drain_helper())
await asyncio.sleep(0)
assert pr._drain_waiter is not None
exc = RuntimeError()
pr.connection_lost(exc)
with pytest.raises(RuntimeError) as cm:
await t
assert cm.value is exc
assert pr._drain_waiter is None
async def test_lost_drain_cancelled() -> None:
loop = asyncio.get_event_loop()
pr = BaseProtocol(loop=loop)
tr = mock.Mock()
pr.connection_made(tr)
pr.pause_writing()
fut = loop.create_future()
async def wait():
fut.set_result(None)
await pr._drain_helper()
t = loop.create_task(wait())
await fut
t.cancel()
assert pr._drain_waiter is not None
pr.connection_lost(None)
with suppress(asyncio.CancelledError):
await t
assert pr._drain_waiter is None
async def test_resume_drain_cancelled() -> None:
loop = asyncio.get_event_loop()
pr = BaseProtocol(loop=loop)
tr = mock.Mock()
pr.connection_made(tr)
pr.pause_writing()
fut = loop.create_future()
async def wait():
fut.set_result(None)
await pr._drain_helper()
t = loop.create_task(wait())
await fut
t.cancel()
assert pr._drain_waiter is not None
pr.resume_writing()
with suppress(asyncio.CancelledError):
await t
assert pr._drain_waiter is None
| 24.131148
| 63
| 0.688406
| 622
| 4,416
| 4.636656
| 0.101286
| 0.058252
| 0.054092
| 0.081137
| 0.818308
| 0.771498
| 0.727115
| 0.727115
| 0.727115
| 0.711512
| 0
| 0.000867
| 0.216259
| 4,416
| 182
| 64
| 24.263736
| 0.832418
| 0
| 0
| 0.814286
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.2
| 1
| 0
| false
| 0
| 0.035714
| 0
| 0.035714
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
5324fa73c034a05cd172d09f6d03e2153b7f495e
| 35
|
py
|
Python
|
nptweak/__init__.py
|
kmedian/nptweak
|
222f46b8abb9b00f1ae8065d38d0514193aa8a4b
|
[
"MIT"
] | null | null | null |
nptweak/__init__.py
|
kmedian/nptweak
|
222f46b8abb9b00f1ae8065d38d0514193aa8a4b
|
[
"MIT"
] | 2
|
2019-12-03T12:37:17.000Z
|
2019-12-03T12:37:45.000Z
|
nptweak/__init__.py
|
kmedian/nptweak
|
222f46b8abb9b00f1ae8065d38d0514193aa8a4b
|
[
"MIT"
] | null | null | null |
from .to_2darray import to_2darray
| 17.5
| 34
| 0.857143
| 6
| 35
| 4.666667
| 0.666667
| 0.642857
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.064516
| 0.114286
| 35
| 1
| 35
| 35
| 0.83871
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
53683ad065e876599c6cda203cf6ca253e4f6885
| 7,499
|
py
|
Python
|
traffic_predict/model.py
|
Wangjw6/project
|
daae9de42fe7bf7ff29c20246e1164b62b7cef4a
|
[
"MIT"
] | null | null | null |
traffic_predict/model.py
|
Wangjw6/project
|
daae9de42fe7bf7ff29c20246e1164b62b7cef4a
|
[
"MIT"
] | null | null | null |
traffic_predict/model.py
|
Wangjw6/project
|
daae9de42fe7bf7ff29c20246e1164b62b7cef4a
|
[
"MIT"
] | null | null | null |
# -*- coding:utf-8 -*-
import tensorflow as tf
class CNN:
def __init__(self, save_or_load_path=None, trainable=True, learning_rate = 0.00002,timestep=9,road=189,predstep=1):
self.trainable = trainable
self.learning_rate = learning_rate
self.road = road
self.input_size = timestep * road
self.output_size = predstep * road
self.bottom = tf.placeholder(tf.float32, shape=[None, self.input_size], name='input') # 25*2*6
self.target = tf.placeholder(tf.float32, shape=[None, self.output_size], name='target')
self.timestep = timestep
def weight_variable(self,shape):
initial = tf.truncated_normal(shape, stddev=0.1)
return tf.Variable(initial)
def bias_variable(self,shape):
initial = tf.truncated_normal(shape, stddev=0.1)
return initial
def conv2d(self,x, W):
return tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding='SAME')
def conv1d(self,x, W):
return tf.nn.conv1d(x, W, stride=2, padding='SAME')
def max_pool_2x2(self,x):
return tf.nn.max_pool(x, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')
def build_CNN(self, ):
# conv first
bottom = tf.reshape(self.bottom, [-1, self.road, self.timestep, 1])
W_conv1 = self.weight_variable([3, 3, 1, 64])
b_conv1 = self.bias_variable([64])
h_conv1 = tf.nn.elu(self.conv2d(bottom, W_conv1) + b_conv1)
h_pool1 = self.max_pool_2x2(h_conv1)
h_flat3 = tf.reshape(h_pool1, [-1, 95 * 5 * 64])
W_fc2 = self.weight_variable([95 * 5 * 64, 1200])
b_fc2 = self.bias_variable([1200])
h = tf.nn.elu(tf.matmul(h_flat3, W_fc2) + b_fc2)
# h_flat3 = tf.reshape(h_pool3, [-1, 400])
W_fc2 = self.weight_variable([1200, self.output_size])
b_fc2 = self.bias_variable([self.output_size])
self.predict = tf.nn.elu(tf.matmul(h, W_fc2) + b_fc2)
global_step = tf.Variable(0, trainable=False)
self.learning_rate = 0.0002 #tf.train.exponential_decay(0.001, global_step, 500, 0.9,staircase=True)
self.loss = tf.reduce_mean(tf.squared_difference(self.target, self.predict))
self.accuracy = 1. - tf.reduce_mean(abs(self.target-self.predict)/self.target)
self.trainop = tf.train.AdamOptimizer(self.learning_rate).minimize(self.loss, global_step=global_step)
# self.trainop = tf.train.RMSPropOptimizer(self.learning_rate, 0.99, 0.0, 1e-6).minimize(self.loss)
return self.predict
class CNN15:
def __init__(self, save_or_load_path=None, trainable=True, learning_rate = 0.00002,timestep=9,road=189,predstep=3):
self.trainable = trainable
self.learning_rate = learning_rate
self.road = road
self.input_size = timestep * road
self.output_size = predstep * road
self.bottom = tf.placeholder(tf.float32, shape=[None, self.input_size], name='input') # 25*2*6
self.target = tf.placeholder(tf.float32, shape=[None, self.output_size], name='target')
self.timestep = timestep
def weight_variable(self,shape):
initial = tf.truncated_normal(shape, stddev=0.1)
return tf.Variable(initial)
def bias_variable(self,shape):
initial = tf.truncated_normal(shape, stddev=0.1)
return initial
def conv2d(self,x, W):
return tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding='SAME')
def conv1d(self,x, W):
return tf.nn.conv1d(x, W, stride=2, padding='SAME')
def max_pool_2x2(self,x):
return tf.nn.max_pool(x, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')
def build_CNN(self, ):
# conv first
bottom = tf.reshape(self.bottom, [-1, self.road, self.timestep, 1])
W_conv1 = self.weight_variable([3, 3, 1, 64])
b_conv1 = self.bias_variable([64])
h_conv1 = tf.nn.elu(self.conv2d(bottom, W_conv1) + b_conv1)
h_pool1 = self.max_pool_2x2(h_conv1)
h_flat3 = tf.reshape(h_pool1, [-1, 95 * 5 * 64])
W_fc2 = self.weight_variable([95 * 5 * 64, 1200])
b_fc2 = self.bias_variable([1200])
h = tf.nn.elu(tf.matmul(h_flat3, W_fc2) + b_fc2)
# h_flat3 = tf.reshape(h_pool3, [-1, 400])
W_fc2 = self.weight_variable([1200, self.output_size])
b_fc2 = self.bias_variable([self.output_size])
self.predict = tf.nn.elu(tf.matmul(h, W_fc2) + b_fc2)
global_step = tf.Variable(0, trainable=False)
self.learning_rate = 0.0002 #tf.train.exponential_decay(0.001, global_step, 500, 0.9,staircase=True)
self.loss = tf.reduce_mean(tf.squared_difference(self.target, self.predict))
self.accuracy = 1. - tf.reduce_mean(abs(self.target-self.predict)/self.target)
self.trainop = tf.train.AdamOptimizer(self.learning_rate).minimize(self.loss, global_step=global_step)
# self.trainop = tf.train.RMSPropOptimizer(self.learning_rate, 0.99, 0.0, 1e-6).minimize(self.loss)
return self.predict
class CNN30:
def __init__(self, save_or_load_path=None, trainable=True, learning_rate=0.00002,timestep=9,road=189,predstep=6):
self.trainable = trainable
self.learning_rate = learning_rate
self.road = road
self.input_size = timestep * road
self.output_size = predstep * road
self.bottom = tf.placeholder(tf.float32, shape=[None, self.input_size], name='input') # 25*2*6
self.target = tf.placeholder(tf.float32, shape=[None, self.output_size], name='target')
self.timestep = timestep
def weight_variable(self,shape):
initial = tf.truncated_normal(shape, stddev=0.1)
return tf.Variable(initial)
def bias_variable(self,shape):
initial = tf.truncated_normal(shape, stddev=0.1)
return initial
def conv2d(self,x, W):
return tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding='SAME')
def conv1d(self,x, W):
return tf.nn.conv1d(x, W, stride=2, padding='SAME')
def max_pool_2x2(self,x):
return tf.nn.max_pool(x, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')
def build_CNN(self, ):
# conv first
bottom = tf.reshape(self.bottom, [-1, self.road, self.timestep, 1])
W_conv1 = self.weight_variable([3, 3, 1, 64])
b_conv1 = self.bias_variable([64])
h_conv1 = tf.nn.elu(self.conv2d(bottom, W_conv1) + b_conv1)
h_pool1 = self.max_pool_2x2(h_conv1)
h_flat3 = tf.reshape(h_pool1, [-1, 95 * 5 * 64])
W_fc2 = self.weight_variable([95 * 5 * 64, 1200])
b_fc2 = self.bias_variable([1200])
h = tf.nn.elu(tf.matmul(h_flat3, W_fc2) + b_fc2)
# h_flat3 = tf.reshape(h_pool3, [-1, 400])
W_fc2 = self.weight_variable([1200, self.output_size])
b_fc2 = self.bias_variable([self.output_size])
self.predict = tf.nn.elu(tf.matmul(h, W_fc2) + b_fc2)
global_step = tf.Variable(0, trainable=False)
self.learning_rate = 0.0002 # tf.train.exponential_decay(0.001, global_step, 500, 0.9,staircase=True)
self.loss = tf.reduce_mean(tf.squared_difference(self.target, self.predict))
self.accuracy = 1. - tf.reduce_mean(abs(self.target - self.predict) / self.target)
self.trainop = tf.train.AdamOptimizer(self.learning_rate).minimize(self.loss, global_step=global_step)
# self.trainop = tf.train.RMSPropOptimizer(self.learning_rate, 0.99, 0.0, 1e-6).minimize(self.loss)
return self.predict
| 43.346821
| 119
| 0.642619
| 1,132
| 7,499
| 4.090106
| 0.094523
| 0.046652
| 0.041469
| 0.02851
| 0.988985
| 0.988985
| 0.988985
| 0.988985
| 0.988985
| 0.988985
| 0
| 0.064072
| 0.215362
| 7,499
| 173
| 120
| 43.346821
| 0.722808
| 0.094813
| 0
| 0.943548
| 0
| 0
| 0.010189
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.169355
| false
| 0
| 0.008065
| 0.072581
| 0.346774
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
729d6a65e6746aea2916773666e9ce787cb8c7de
| 10,772
|
py
|
Python
|
dataconnector.py
|
iamthinkking/COMP4217_FinalProject
|
98cadb013bab52677bffb951b6d173caf4bb22b3
|
[
"MIT"
] | null | null | null |
dataconnector.py
|
iamthinkking/COMP4217_FinalProject
|
98cadb013bab52677bffb951b6d173caf4bb22b3
|
[
"MIT"
] | null | null | null |
dataconnector.py
|
iamthinkking/COMP4217_FinalProject
|
98cadb013bab52677bffb951b6d173caf4bb22b3
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python3
import pymysql
class Connection:
SQL_HOST = 'localhost'
SQL_USR = ''
SQL_PWD = ''
SQL_DB = 'HOSPITAL'
# initialize database object
def __init__(self, usr, pwd):
self.USR = usr
self.PWD = pwd
# return an database connection
def __enter__(self):
# Open database connection
self.CON = pymysql.connect("localhost", self.USR, self.PWD, "HOSPITAL", autocommit=True)
return self
def __exit__(self, exc_type, exc_val, exc_tb):
# make sure the database connection gets closed
self.CON.close()
def get_doctors(self):
data = ()
try:
# prepare a cursor object using cursor() method
with self.CON.cursor() as cursor:
# execute SQL query using execute() method.
cursor.execute("CALL sp_get_doctors();")
# Fetch all the tuples in a list of lists.
data = cursor.fetchall()
except pymysql.err.OperationalError as e:
return data
finally:
return data
def get_nurses(self):
data = ()
try:
# prepare a cursor object using cursor() method
with self.CON.cursor() as cursor:
# execute SQL query using execute() method.
cursor.execute("CALL sp_get_nurses();")
# Fetch all the tuples in a list of lists.
data = cursor.fetchall()
except pymysql.err.OperationalError as e:
return data
finally:
return data
def GetMedicineAllergyByMostPatients(self):
data = ()
try:
# prepare a cursor object using cursor() method
with self.CON.cursor() as cursor:
# execute SQL query using execute() method.
cursor.execute("CALL GetMedicineAllergyByMostPatients();")
# Fetch all the tuples in a list of lists.
data = cursor.fetchall()
except pymysql.err.OperationalError as e:
return data
finally:
return data
def GetInternsByMostPatient(self):
data = ()
try:
# prepare a cursor object using cursor() method
with self.CON.cursor() as cursor:
# execute SQL query using execute() method.
cursor.execute("CALL GetInternsByMostPatient();")
# Fetch all the tuples in a list of lists.
data = cursor.fetchall()
except pymysql.err.OperationalError as e:
return data
finally:
return data
def GetInternPerformanceData(self):
data = ()
try:
# prepare a cursor object using cursor() method
with self.CON.cursor() as cursor:
# execute SQL query using execute() method.
cursor.execute("CALL GetInternPerformanceData();")
# Fetch all the tuples in a list of lists.
data = cursor.fetchall()
except pymysql.err.OperationalError as e:
return data
finally:
return data
def get_patients(self, q=""):
data = ()
try:
# prepare a cursor object using cursor() method
with self.CON.cursor() as cursor:
# execute SQL query using execute() method.
cursor.execute("CALL get_patients('"+str(q)+"');")
# Fetch all the tuples in a list of lists.
data = cursor.fetchall()
except pymysql.err.OperationalError as e:
print(e)
return data
finally:
return data
def GetPatientByDiagnosisAndDate(self, start_date, end_date, diagnosis=""):
data = ()
# prepare a cursor object using cursor() method
with self.CON.cursor() as cursor:
# execute SQL query using execute method
cursor.execute("CALL GetPatientByDiagnosisAndDate('" + str(start_date) + "', '"
+ str(end_date) + "', '" + str(diagnosis) + "');")
# fetch all the tuples in a list of lists
data = cursor.fetchall()
return data
def get_allergens_of_patient(self, patID):
data = ()
try:
# prepare a cursor object using cursor() method
with self.CON.cursor() as cursor:
# execute SQL query using execute() method.
cursor.execute("CALL get_allergens_of_patient('"+str(patID)+"');")
# Fetch all the tuples in a list of lists.
data = cursor.fetchall()
except pymysql.err.OperationalError as e:
print(e)
return data
finally:
return data
def add_patient(self, fname, lname, dob, address, phone):
data = ()
try:
# prepare a cursor object using cursor() method
with self.CON.cursor() as cursor:
# execute SQL query using execute() method.
cursor.execute("CALL sp_add_patient('" + fname + "', '" + lname + "', '" + str(dob) + "', '" + address +
"', " + str(phone) + ");")
self.CON.commit()
except pymysql.err.OperationalError as e:
return data
finally:
return data
def make_diagnosis(self, docID, patID, icdID, icdDesc, icdname, specifics):
data = ()
try:
# prepare a cursor object using cursor() method
with self.CON.cursor() as cursor:
# execute SQL query using execute() method.
cursor.execute("CALL make_diagnosis(" + str(docID) + ", " + str(patID) + ", " + str(icdID) + ", '" +
icdDesc + "', '" + str(icdname) + "', '" + specifics + "');")
except pymysql.err.OperationalError as e:
return data
finally:
self.CON.commit()
return data
def check_vitals(self, nurseID, patID, temp, pulse_arg, bp, resp):
data = ()
try:
# prepare a cursor object using cursor() method
with self.CON.cursor() as cursor:
# execute SQL query using execute() method.
cursor.execute("CALL check_vitals(" + str(nurseID) + ", " + str(patID) + ", " + str(temp) + ", '" +
str(pulse_arg) + "', '" + str(bp) + "', '" + str(resp) + "');")
except pymysql.err.OperationalError as e:
return data
finally:
self.CON.commit()
return data
def login(self):
data = ()
try:
# prepare a cursor object using cursor() method
with self.CON.cursor() as cursor:
# execute SQL query using execute() method.
cursor.execute("CALL sp_get_currentuser('" + self.USR + "');")
# gets only one tuple from the database's response
data = cursor.fetchone()
except pymysql.err.OperationalError as e:
return data
finally:
return data
def get_role(self):
data = ()
try:
# prepare a cursor object using cursor() method
with self.CON.cursor() as cursor:
# execute SQL query using execute() method.
cursor.execute("CALL sp_get_currentuser('" + self.USR + "');")
# gets only one tuple from the database's response
data = cursor.fetchone()
except pymysql.err.OperationalError as e:
return data
finally:
return data
def GetNursesByPatientAndDate(self, start_date, end_date, pat_ID):
data = ()
# prepare a cursor object using cursor() method
with self.CON.cursor() as cursor:
# execute SQL query using execute method
cursor.execute("CALL GetNursesByPatientAndDate('" + str(start_date) + "', '"
+ str(end_date) + "', '" + str(pat_ID) + "');")
# fetch all the tuples in a list of lists
data = cursor.fetchall()
return data
def get_allergens_of_patient(self,patID):
data = ()
# prepare a cursor object using cursor() method
with self.CON.cursor() as cursor:
# execute SQL query using execute method
cursor.execute("CALL get_allergens_of_patient('" + str(patID) + "');")
# fetch all the tuples in a list of lists
data = cursor.fetchall()
return data
def get_medicine_allergy_by_most_patients(self):
data = ()
# prepare a cursor object using cursor() method
with self.CON.cursor() as cursor:
# execute SQL query using execute method
cursor.execute("CALL get_medicine_allergy_by_most_patients();")
# fetch all the tuples in a list of lists
data = cursor.fetchall()
return data
def GetResultsByPatient(self,patID):
data = ()
# prepare a cursor object using cursor() method
with self.CON.cursor() as cursor:
# execute SQL query using execute method
cursor.execute("CALL GetResultsByPatient('" + str(patID) + "');")
# fetch all the tuples in a list of lists
data = cursor.fetchall()
return data
def get_nurses_by_patient_and_date(self,start_date, end_date, patID):
data = ()
# prepare a cursor object using cursor() method
with self.CON.cursor() as cursor:
# execute SQL query using execute method
cursor.execute("CALL get_nurses_by_patient_and_date('" + str(start_date) + "', '" + str(end_date) + "', '"
+ str(patID) + "');")
# fetch all the tuples in a list of lists
data = cursor.fetchall()
return data
def get_interns_by_most_patients(self):
data = ()
# prepare a cursor object using cursor() method
with self.CON.cursor() as cursor:
# execute SQL query using execute method
cursor.execute("CALL get_interns_by_most_patients();")
# fetch all the tuples in a list of lists
data = cursor.fetchall()
return data
| 30.602273
| 121
| 0.525529
| 1,115
| 10,772
| 4.991928
| 0.109417
| 0.088753
| 0.04779
| 0.068272
| 0.808839
| 0.794466
| 0.781171
| 0.768236
| 0.768236
| 0.768236
| 0
| 0.00015
| 0.383123
| 10,772
| 351
| 122
| 30.689459
| 0.837472
| 0.228463
| 0
| 0.717391
| 0
| 0
| 0.086698
| 0.040628
| 0
| 0
| 0
| 0
| 0
| 1
| 0.119565
| false
| 0
| 0.005435
| 0
| 0.326087
| 0.01087
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
72c7cb9e21a63cc41a2a8dafac7960b8bc5acb97
| 370
|
py
|
Python
|
launchpad_py/__init__.py
|
inniyah/launchpad-py
|
b8dd4815b05d7e75ba5ca09ced64ddc38f515bad
|
[
"CC-BY-4.0"
] | 1
|
2020-05-07T04:08:13.000Z
|
2020-05-07T04:08:13.000Z
|
launchpad_py/__init__.py
|
inniyah/launchpad-py
|
b8dd4815b05d7e75ba5ca09ced64ddc38f515bad
|
[
"CC-BY-4.0"
] | null | null | null |
launchpad_py/__init__.py
|
inniyah/launchpad-py
|
b8dd4815b05d7e75ba5ca09ced64ddc38f515bad
|
[
"CC-BY-4.0"
] | null | null | null |
# more specific selections for Python 3 (ASkr, 2/2018)
from launchpad_py.launchpad import Launchpad
from launchpad_py.launchpad import LaunchpadMk2
from launchpad_py.launchpad import LaunchpadPro
from launchpad_py.launchpad import LaunchControlXL
from launchpad_py.launchpad import LaunchKeyMini
from launchpad_py.launchpad import Dicer
from launchpad_py import charset
| 41.111111
| 54
| 0.87027
| 50
| 370
| 6.3
| 0.38
| 0.288889
| 0.333333
| 0.457143
| 0.571429
| 0
| 0
| 0
| 0
| 0
| 0
| 0.021021
| 0.1
| 370
| 8
| 55
| 46.25
| 0.924925
| 0.140541
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
72ce4318d1d0f496564578d4caec5a73368d7bf6
| 68,544
|
py
|
Python
|
system/indy-node-tests/TestAuthMapSuite.py
|
Toktar/indy-test-automation
|
4d583dda7cbf2a9f451b3a01312a90e55c7bacc8
|
[
"Apache-2.0"
] | null | null | null |
system/indy-node-tests/TestAuthMapSuite.py
|
Toktar/indy-test-automation
|
4d583dda7cbf2a9f451b3a01312a90e55c7bacc8
|
[
"Apache-2.0"
] | null | null | null |
system/indy-node-tests/TestAuthMapSuite.py
|
Toktar/indy-test-automation
|
4d583dda7cbf2a9f451b3a01312a90e55c7bacc8
|
[
"Apache-2.0"
] | null | null | null |
import pytest
import asyncio
from system.utils import *
from random import randrange as rr
import hashlib
import time
from datetime import datetime, timedelta, timezone
from indy import payment
import logging
logger = logging.getLogger(__name__)
@pytest.mark.usefixtures('docker_setup_and_teardown')
class TestAuthMapSuite:
@pytest.mark.parametrize('adder_role, adder_role_num', [
('TRUSTEE', '0'),
('STEWARD', '2'),
('TRUST_ANCHOR', '101'),
('NETWORK_MONITOR', '201')
])
@pytest.mark.parametrize('editor_role, editor_role_num', [
('NETWORK_MONITOR', '201'),
('TRUST_ANCHOR', '101'),
('STEWARD', '2'),
('TRUSTEE', '0')
])
@pytest.mark.asyncio
async def test_case_nym(self, pool_handler, wallet_handler, get_default_trustee,
adder_role, adder_role_num, editor_role, editor_role_num):
trustee_did, _ = get_default_trustee
new_did, new_vk = await did.create_and_store_my_did(wallet_handler, '{}')
# add adder to add nym
adder_did, adder_vk = await did.create_and_store_my_did(wallet_handler, '{}')
res = await send_nym(pool_handler, wallet_handler, trustee_did, adder_did, adder_vk, None, adder_role)
assert res['op'] == 'REPLY'
# add editor to edit nym
editor_did, editor_vk = await did.create_and_store_my_did(wallet_handler, '{}')
res = await send_nym(pool_handler, wallet_handler, trustee_did, editor_did, editor_vk, None, editor_role)
assert res['op'] == 'REPLY'
req = await ledger.build_auth_rule_request(trustee_did, '1', 'ADD', 'role', '*', '',
json.dumps({
'constraint_id': 'ROLE',
'role': adder_role_num,
'sig_count': 1,
'need_to_be_owner': False,
'metadata': {}
}))
res2 = json.loads(await ledger.sign_and_submit_request(pool_handler, wallet_handler, trustee_did, req))
print(res2)
assert res2['op'] == 'REPLY'
req = await ledger.build_auth_rule_request(trustee_did, '1', 'EDIT', 'verkey', '*', '*',
json.dumps({
'constraint_id': 'ROLE',
'role': editor_role_num,
'sig_count': 1,
'need_to_be_owner': False,
'metadata': {}
}))
res3 = json.loads(await ledger.sign_and_submit_request(pool_handler, wallet_handler, trustee_did, req))
print(res3)
assert res3['op'] == 'REPLY'
# add nym with verkey by adder
res4 = await send_nym(pool_handler, wallet_handler, adder_did, new_did, adder_vk) # push adder vk
print(res4)
assert res4['op'] == 'REPLY'
# edit verkey by editor
res5 = await send_nym(pool_handler, wallet_handler, editor_did, new_did, editor_vk) # push editor vk
print(res5)
assert res5['op'] == 'REPLY'
# negative cases
if adder_role != editor_role:
# try to add another nym with editor did - should be rejected
res6 = await send_nym(pool_handler, wallet_handler, editor_did, random_did_and_json()[0])
print(res6)
assert res6['op'] == 'REJECT'
# try to edit initial nym one more time with adder did - should be rejected
res7 = await send_nym(pool_handler, wallet_handler, adder_did, new_did, adder_vk)
print(res7)
assert res7['op'] == 'REJECT'
@pytest.mark.parametrize('adder_role, adder_role_num', [
('TRUSTEE', '0'),
('STEWARD', '2'),
('TRUST_ANCHOR', '101'),
('NETWORK_MONITOR', '201')
])
@pytest.mark.parametrize('editor_role, editor_role_num', [
('NETWORK_MONITOR', '201'),
('TRUST_ANCHOR', '101'),
('STEWARD', '2'),
('TRUSTEE', '0')
])
@pytest.mark.asyncio
async def test_case_attrib(self, pool_handler, wallet_handler, get_default_trustee,
adder_role, adder_role_num, editor_role, editor_role_num):
trustee_did, _ = get_default_trustee
# add target nym
target_did, target_vk = await did.create_and_store_my_did(wallet_handler, '{}')
res = await send_nym(pool_handler, wallet_handler, trustee_did, target_did, target_vk)
assert res['op'] == 'REPLY'
# add adder to add attrib
adder_did, adder_vk = await did.create_and_store_my_did(wallet_handler, '{}')
res = await send_nym(pool_handler, wallet_handler, trustee_did, adder_did, adder_vk, None, adder_role)
assert res['op'] == 'REPLY'
# add editor to edit attrib
editor_did, editor_vk = await did.create_and_store_my_did(wallet_handler, '{}')
res = await send_nym(pool_handler, wallet_handler, trustee_did, editor_did, editor_vk, None, editor_role)
assert res['op'] == 'REPLY'
# set rule for adding
req = await ledger.build_auth_rule_request(trustee_did, '100', 'ADD', '*', None, '*',
json.dumps({
'constraint_id': 'ROLE',
'role': adder_role_num,
'sig_count': 1,
'need_to_be_owner': False,
'metadata': {}
}))
res2 = json.loads(await ledger.sign_and_submit_request(pool_handler, wallet_handler, trustee_did, req))
print(res2)
assert res2['op'] == 'REPLY'
# set rule for editing
req = await ledger.build_auth_rule_request(trustee_did, '100', 'EDIT', '*', '*', '*',
json.dumps({
'constraint_id': 'ROLE',
'role': editor_role_num,
'sig_count': 1,
'need_to_be_owner': False,
'metadata': {}
}))
res3 = json.loads(await ledger.sign_and_submit_request(pool_handler, wallet_handler, trustee_did, req))
print(res3)
assert res3['op'] == 'REPLY'
# add attrib for target did by non-owner adder
res4 = await send_attrib(pool_handler, wallet_handler, adder_did, target_did,
None, json.dumps({'key1': 'value1'}), None)
print(res4)
assert res4['op'] == 'REPLY'
# edit attrib for target did by non-owner editor
res5 = await send_attrib(pool_handler, wallet_handler, editor_did, target_did,
None, json.dumps({'key1': 'value2'}), None)
print(res5)
assert res5['op'] == 'REPLY'
# negative cases
if adder_role != editor_role:
# try to add another attrib with editor did - should be rejected
res6 = await send_attrib(pool_handler, wallet_handler, editor_did, target_did,
None, json.dumps({'key2': 'value1'}), None)
print(res6)
assert res6['op'] == 'REJECT'
# try to edit initial attrib one more time with adder did - should be rejected
res7 = await send_attrib(pool_handler, wallet_handler, adder_did, target_did,
None, json.dumps({'key1': 'value3'}), None)
print(res7)
assert res7['op'] == 'REJECT'
@pytest.mark.parametrize('adder_role, adder_role_num', [
('TRUSTEE', '0'),
('STEWARD', '2'),
('TRUST_ANCHOR', '101'),
('NETWORK_MONITOR', '201')
])
@pytest.mark.asyncio
async def test_case_schema(self, pool_handler, wallet_handler, get_default_trustee,
adder_role, adder_role_num): # we can add schema only
trustee_did, _ = get_default_trustee
# add adder to add schema
adder_did, adder_vk = await did.create_and_store_my_did(wallet_handler, '{}')
res = await send_nym(pool_handler, wallet_handler, trustee_did, adder_did, adder_vk, None, adder_role)
assert res['op'] == 'REPLY'
# set rule for adding
req = await ledger.build_auth_rule_request(trustee_did, '101', 'ADD', '*', None, '*',
json.dumps({
'constraint_id': 'ROLE',
'role': adder_role_num,
'sig_count': 1,
'need_to_be_owner': False,
'metadata': {}
}))
res2 = json.loads(await ledger.sign_and_submit_request(pool_handler, wallet_handler, trustee_did, req))
print(res2)
assert res2['op'] == 'REPLY'
# add schema
res4 = await send_schema(pool_handler, wallet_handler, adder_did, 'schema1', '1.0', json.dumps(['attr1']))
print(res4)
assert res4[1]['op'] == 'REPLY'
# edit schema - nobody can edit schemas - should be rejected
res5 = await send_schema(pool_handler, wallet_handler, adder_did, 'schema1', '1.0',
json.dumps(['attr1', 'attr2']))
print(res5)
assert res5[1]['op'] == 'REJECT'
@pytest.mark.parametrize('adder_role, adder_role_num', [
('TRUSTEE', '0'),
('STEWARD', '2'),
('TRUST_ANCHOR', '101'),
('NETWORK_MONITOR', '201')
])
@pytest.mark.parametrize('editor_role, editor_role_num', [
('NETWORK_MONITOR', '201'),
('TRUST_ANCHOR', '101'),
('STEWARD', '2'),
('TRUSTEE', '0')
])
@pytest.mark.asyncio
# use the same did with different roles to ADD and EDIT since adder did is a part of unique cred def id
async def test_case_cred_def(self, pool_handler, wallet_handler, get_default_trustee,
adder_role, adder_role_num, editor_role, editor_role_num):
trustee_did, _ = get_default_trustee
# add adder to add cred def
adder_did, adder_vk = await did.create_and_store_my_did(wallet_handler, '{}')
res = await send_nym(pool_handler, wallet_handler, trustee_did, adder_did, adder_vk, None, adder_role)
assert res['op'] == 'REPLY'
schema_id, _ = await send_schema(pool_handler, wallet_handler, trustee_did,
'schema1', '1.0', json.dumps(["age", "sex", "height", "name"]))
await asyncio.sleep(1)
res = await get_schema(pool_handler, wallet_handler, trustee_did, schema_id)
schema_id, schema_json = await ledger.parse_get_schema_response(json.dumps(res))
# set rule for adding
req = await ledger.build_auth_rule_request(trustee_did, '102', 'ADD', '*', None, '*',
json.dumps({
'constraint_id': 'ROLE',
'role': adder_role_num,
'sig_count': 1,
'need_to_be_owner': False,
'metadata': {}
}))
res2 = json.loads(await ledger.sign_and_submit_request(pool_handler, wallet_handler, trustee_did, req))
print(res2)
assert res2['op'] == 'REPLY'
# set rule for editing
req = await ledger.build_auth_rule_request(trustee_did, '102', 'EDIT', '*', '*', '*',
json.dumps({
'constraint_id': 'ROLE',
'role': editor_role_num,
'sig_count': 1,
'need_to_be_owner': False,
'metadata': {}
}))
res3 = json.loads(await ledger.sign_and_submit_request(pool_handler, wallet_handler, trustee_did, req))
print(res3)
assert res3['op'] == 'REPLY'
# add cred def
cred_def_id, cred_def_json = \
await anoncreds.issuer_create_and_store_credential_def(wallet_handler, adder_did, schema_json, 'TAG1',
None, json.dumps({'support_revocation': False}))
request = await ledger.build_cred_def_request(adder_did, cred_def_json)
res4 = json.loads(await ledger.sign_and_submit_request(pool_handler, wallet_handler, adder_did, request))
print(res4)
assert res4['op'] == 'REPLY'
if adder_role != editor_role:
# try to edit cred def as adder - should be rejected
_request = json.loads(request)
_request['operation']['data']['primary']['n'] = '123456789'
_request['reqId'] += _request['reqId']
res5 = json.loads(await ledger.sign_and_submit_request(pool_handler, wallet_handler, adder_did,
json.dumps(_request)))
print(res5)
assert res5['op'] == 'REJECT'
# change adder role to edit cred def
res = await send_nym(pool_handler, wallet_handler, trustee_did, adder_did, None, None, editor_role)
print(res)
assert res['op'] == 'REPLY'
# edit cred def
request = json.loads(request)
request['operation']['data']['primary']['n'] = '123456'
request['reqId'] += request['reqId']
res6 = json.loads(await ledger.sign_and_submit_request(pool_handler, wallet_handler, adder_did,
json.dumps(request)))
print(res6)
assert res6['op'] == 'REPLY'
if adder_role != editor_role:
# try to add another cred def as editor - should be rejected
cred_def_id, cred_def_json = \
await anoncreds.issuer_create_and_store_credential_def(wallet_handler, adder_did, schema_json, 'TAG2',
None, json.dumps({'support_revocation': True}))
request = await ledger.build_cred_def_request(adder_did, cred_def_json)
res7 = json.loads(await ledger.sign_and_submit_request(pool_handler, wallet_handler, adder_did, request))
print(res7)
assert res7['op'] == 'REJECT'
@pytest.mark.parametrize('adder_role, adder_role_num', [
('TRUSTEE', '0'),
('STEWARD', '2'),
('TRUST_ANCHOR', '101'),
('NETWORK_MONITOR', '201')
])
@pytest.mark.parametrize('editor_role, editor_role_num', [
('NETWORK_MONITOR', '201'),
('TRUST_ANCHOR', '101'),
('STEWARD', '2'),
('TRUSTEE', '0')
])
@pytest.mark.asyncio
# use the same did with different roles to ADD and EDIT since adder did is a part of unique revoc reg def id
async def test_case_revoc_reg_def(self, pool_handler, wallet_handler, get_default_trustee,
adder_role, adder_role_num, editor_role, editor_role_num):
trustee_did, _ = get_default_trustee
# add adder to add revoc reg def
adder_did, adder_vk = await did.create_and_store_my_did(wallet_handler, '{}')
res = await send_nym(pool_handler, wallet_handler, trustee_did, adder_did, adder_vk, None, adder_role)
assert res['op'] == 'REPLY'
schema_id, _ = await send_schema(pool_handler, wallet_handler, trustee_did,
'schema1', '1.0', json.dumps(['age', 'sex', 'height', 'name']))
await asyncio.sleep(1)
res = await get_schema(pool_handler, wallet_handler, trustee_did, schema_id)
schema_id, schema_json = await ledger.parse_get_schema_response(json.dumps(res))
cred_def_id, _, res = await send_cred_def(pool_handler, wallet_handler, trustee_did, schema_json,
'cred_def_tag', None, json.dumps({'support_revocation': True}))
# set rule for adding
req = await ledger.build_auth_rule_request(trustee_did, '113', 'ADD', '*', None, '*',
json.dumps({
'constraint_id': 'ROLE',
'role': adder_role_num,
'sig_count': 1,
'need_to_be_owner': False,
'metadata': {}
}))
res2 = json.loads(await ledger.sign_and_submit_request(pool_handler, wallet_handler, trustee_did, req))
print(res2)
assert res2['op'] == 'REPLY'
# set rule for editing
req = await ledger.build_auth_rule_request(trustee_did, '113', 'EDIT', '*', '*', '*',
json.dumps({
'constraint_id': 'ROLE',
'role': editor_role_num,
'sig_count': 1,
'need_to_be_owner': False,
'metadata': {}
}))
res3 = json.loads(await ledger.sign_and_submit_request(pool_handler, wallet_handler, trustee_did, req))
print(res3)
assert res3['op'] == 'REPLY'
# add revoc reg def
tails_writer_config = json.dumps({'base_dir': 'tails', 'uri_pattern': ''})
tails_writer_handle = await blob_storage.open_writer('default', tails_writer_config)
revoc_reg_def_id, revoc_reg_def_json, revoc_reg_entry_json = \
await anoncreds.issuer_create_and_store_revoc_reg(wallet_handler, adder_did, None, 'TAG1',
cred_def_id, json.dumps({
'max_cred_num': 1,
'issuance_type': 'ISSUANCE_BY_DEFAULT'}),
tails_writer_handle)
request = await ledger.build_revoc_reg_def_request(adder_did, revoc_reg_def_json)
res4 = json.loads(await ledger.sign_and_submit_request(pool_handler, wallet_handler, adder_did, request))
print(res4)
assert res4['op'] == 'REPLY'
if adder_role != editor_role:
# try to edit revoc reg def as adder - should be rejected
_request = json.loads(request)
_request['operation']['value']['tailsHash'] = random_string(30)
_request['reqId'] += _request['reqId']
res5 = json.loads(await ledger.sign_and_submit_request(pool_handler, wallet_handler, adder_did,
json.dumps(_request)))
print(res5)
assert res5['op'] == 'REJECT'
# change adder role to edit revoc reg def
res = await send_nym(pool_handler, wallet_handler, trustee_did, adder_did, None, None, editor_role)
print(res)
assert res['op'] == 'REPLY'
# edit revoc reg def
request = json.loads(request)
request['operation']['value']['tailsHash'] = random_string(20)
request['reqId'] += request['reqId']
res6 = json.loads(await ledger.sign_and_submit_request(pool_handler, wallet_handler, adder_did,
json.dumps(request)))
print(res6)
assert res6['op'] == 'REPLY'
if adder_role != editor_role:
# try to add another revoc reg def as editor - should be rejected
revoc_reg_def_id, revoc_reg_def_json, revoc_reg_entry_json = \
await anoncreds.issuer_create_and_store_revoc_reg(wallet_handler, adder_did, None, 'TAG2',
cred_def_id, json.dumps({
'max_cred_num': 2,
'issuance_type': 'ISSUANCE_BY_DEFAULT'}),
tails_writer_handle)
request = await ledger.build_revoc_reg_def_request(adder_did, revoc_reg_def_json)
res7 = json.loads(await ledger.sign_and_submit_request(pool_handler, wallet_handler, adder_did, request))
print(res7)
assert res7['op'] == 'REJECT'
@pytest.mark.parametrize('adder_role, adder_role_num', [
('TRUSTEE', '0'),
('STEWARD', '2'),
('TRUST_ANCHOR', '101'),
('NETWORK_MONITOR', '201')
])
@pytest.mark.parametrize('editor_role, editor_role_num', [
('NETWORK_MONITOR', '201'),
('TRUST_ANCHOR', '101'),
('STEWARD', '2'),
('TRUSTEE', '0')
])
@pytest.mark.asyncio
async def test_case_revoc_reg_entry(self, pool_handler, wallet_handler, get_default_trustee,
adder_role, adder_role_num, editor_role, editor_role_num):
trustee_did, _ = get_default_trustee
# add adder to add revoc reg entry
adder_did, adder_vk = await did.create_and_store_my_did(wallet_handler, '{}')
res = await send_nym(pool_handler, wallet_handler, trustee_did, adder_did, adder_vk, None, adder_role)
assert res['op'] == 'REPLY'
schema_id, _ = await send_schema(pool_handler, wallet_handler, trustee_did,
'schema1', '1.0', json.dumps(['age', 'sex', 'height', 'name']))
await asyncio.sleep(1)
res = await get_schema(pool_handler, wallet_handler, trustee_did, schema_id)
schema_id, schema_json = await ledger.parse_get_schema_response(json.dumps(res))
cred_def_id, _, res = await send_cred_def(pool_handler, wallet_handler, trustee_did, schema_json,
'cred_def_tag', None, json.dumps({'support_revocation': True}))
# set rule for revoc reg def adding - network monitor case
req = await ledger.build_auth_rule_request(trustee_did, '113', 'ADD', '*', None, '*',
json.dumps({
'constraint_id': 'ROLE',
'role': '*',
'sig_count': 1,
'need_to_be_owner': False,
'metadata': {}
}))
res21 = json.loads(await ledger.sign_and_submit_request(pool_handler, wallet_handler, trustee_did, req))
print(res21)
assert res21['op'] == 'REPLY'
# set rule for adding
req = await ledger.build_auth_rule_request(trustee_did, '114', 'ADD', '*', None, '*',
json.dumps({
'constraint_id': 'ROLE',
'role': adder_role_num,
'sig_count': 1,
'need_to_be_owner': False,
'metadata': {}
}))
res22 = json.loads(await ledger.sign_and_submit_request(pool_handler, wallet_handler, trustee_did, req))
print(res22)
assert res22['op'] == 'REPLY'
# set rule for editing
req = await ledger.build_auth_rule_request(trustee_did, '114', 'EDIT', '*', '*', '*',
json.dumps({
'constraint_id': 'ROLE',
'role': editor_role_num,
'sig_count': 1,
'need_to_be_owner': False,
'metadata': {}
}))
res3 = json.loads(await ledger.sign_and_submit_request(pool_handler, wallet_handler, trustee_did, req))
print(res3)
assert res3['op'] == 'REPLY'
# add revoc reg entry
tails_writer_config = json.dumps({'base_dir': 'tails', 'uri_pattern': ''})
tails_writer_handle = await blob_storage.open_writer('default', tails_writer_config)
revoc_reg_def_id, revoc_reg_def_json, revoc_reg_entry_json = \
await anoncreds.issuer_create_and_store_revoc_reg(wallet_handler, adder_did, None, 'TAG1',
cred_def_id, json.dumps({
'max_cred_num': 10,
'issuance_type': 'ISSUANCE_BY_DEFAULT'}),
tails_writer_handle)
req = await ledger.build_revoc_reg_def_request(adder_did, revoc_reg_def_json)
res = json.loads(await ledger.sign_and_submit_request(pool_handler, wallet_handler, adder_did, req))
assert res['op'] == 'REPLY'
request = await ledger.build_revoc_reg_entry_request(adder_did, revoc_reg_def_id, 'CL_ACCUM',
revoc_reg_entry_json)
res4 = json.loads(await ledger.sign_and_submit_request(pool_handler, wallet_handler, adder_did, request))
print(res4)
assert res4['op'] == 'REPLY'
if adder_role != editor_role:
# try to edit revoc reg entry as adder - should be rejected
_request = json.loads(request)
_request['operation']['value']['prevAccum'] = _request['operation']['value']['accum']
_request['operation']['value']['accum'] = random_string(20)
_request['operation']['value']['revoked'] = [7, 8, 9]
_request['reqId'] += _request['reqId']
res5 = json.loads(await ledger.sign_and_submit_request(pool_handler, wallet_handler, adder_did,
json.dumps(_request)))
print(res5)
assert res5['op'] == 'REJECT'
# change adder role to edit revoc reg def
res = await send_nym(pool_handler, wallet_handler, trustee_did, adder_did, None, None, editor_role)
print(res)
assert res['op'] == 'REPLY'
# edit revoc reg entry
request = json.loads(request)
request['operation']['value']['prevAccum'] = request['operation']['value']['accum']
request['operation']['value']['accum'] = random_string(10)
request['operation']['value']['revoked'] = [1, 2, 3]
request['reqId'] += request['reqId']
res6 = json.loads(await ledger.sign_and_submit_request(pool_handler, wallet_handler, adder_did,
json.dumps(request)))
print(res6)
assert res6['op'] == 'REPLY'
if adder_role != editor_role:
# try to add another revoc reg entry as editor - should be rejected
revoc_reg_def_id, revoc_reg_def_json, revoc_reg_entry_json = \
await anoncreds.issuer_create_and_store_revoc_reg(wallet_handler, adder_did, None, 'TAG2',
cred_def_id, json.dumps({
'max_cred_num': 20,
'issuance_type': 'ISSUANCE_BY_DEFAULT'}),
tails_writer_handle)
req = await ledger.build_revoc_reg_def_request(adder_did, revoc_reg_def_json)
res = json.loads(await ledger.sign_and_submit_request(pool_handler, wallet_handler, adder_did, req))
assert res['op'] == 'REPLY'
request = await ledger.build_revoc_reg_entry_request(adder_did, revoc_reg_def_id, 'CL_ACCUM',
revoc_reg_entry_json)
res7 = json.loads(await ledger.sign_and_submit_request(pool_handler, wallet_handler, adder_did, request))
print(res7)
assert res7['op'] == 'REJECT'
@pytest.mark.skip('INDY-2024')
@pytest.mark.parametrize('adder_role, adder_role_num', [
('TRUSTEE', '0'),
('STEWARD', '2'),
('TRUST_ANCHOR', '101'),
('NETWORK_MONITOR', '201')
])
@pytest.mark.parametrize('editor_role, editor_role_num', [
('NETWORK_MONITOR', '201'),
('TRUST_ANCHOR', '101'),
('STEWARD', '2'),
('TRUSTEE', '0')
])
@pytest.mark.asyncio
async def test_case_node(self, pool_handler, wallet_handler, get_default_trustee,
adder_role, adder_role_num, editor_role, editor_role_num):
trustee_did, _ = get_default_trustee
# add adder to add node
adder_did, adder_vk = await did.create_and_store_my_did(wallet_handler, '{}')
res = await send_nym(pool_handler, wallet_handler, trustee_did, adder_did, adder_vk, None, adder_role)
assert res['op'] == 'REPLY'
# add editor to edit node
editor_did, editor_vk = await did.create_and_store_my_did(wallet_handler, '{}')
res = await send_nym(pool_handler, wallet_handler, trustee_did, editor_did, editor_vk, None, editor_role)
assert res['op'] == 'REPLY'
# set rule for adding
req = await ledger.build_auth_rule_request(trustee_did, '0', 'ADD', 'services', '*', str(['VALIDATOR']),
json.dumps({
'constraint_id': 'ROLE',
'role': adder_role_num,
'sig_count': 1,
'need_to_be_owner': False,
'metadata': {}
}))
res2 = json.loads(await ledger.sign_and_submit_request(pool_handler, wallet_handler, trustee_did, req))
print(res2)
assert res2['op'] == 'REPLY'
# set rule for editing
req = await ledger.build_auth_rule_request(trustee_did, '0', 'EDIT', 'services', str(['VALIDATOR']), str([]),
json.dumps({
'constraint_id': 'ROLE',
'role': editor_role_num,
'sig_count': 1,
'need_to_be_owner': False,
'metadata': {}
}))
res3 = json.loads(await ledger.sign_and_submit_request(pool_handler, wallet_handler, trustee_did, req))
print(res3)
assert res3['op'] == 'REPLY'
# add node
alias = random_string(5)
client_ip = '{}.{}.{}.{}'.format(rr(1, 255), 0, 0, rr(1, 255))
client_port = rr(1, 32767)
node_ip = '{}.{}.{}.{}'.format(rr(1, 255), 0, 0, rr(1, 255))
node_port = rr(1, 32767)
req = await ledger.build_node_request(adder_did, adder_vk, # adder_vk is used as node target did here
json.dumps(
{
'alias': alias,
'client_ip': client_ip,
'client_port': client_port,
'node_ip': node_ip,
'node_port': node_port,
'services': ['VALIDATOR']
}))
res4 = json.loads(await ledger.sign_and_submit_request(pool_handler, wallet_handler, adder_did, req))
print(res4)
assert res4['op'] == 'REPLY'
# edit node
req = await ledger.build_node_request(editor_did, adder_vk, # adder_vk is used as node target did here
json.dumps(
{
'alias': alias,
'services': []
}))
res5 = json.loads(await ledger.sign_and_submit_request(pool_handler, wallet_handler, editor_did, req))
print(res5)
assert res5['op'] == 'REPLY'
@pytest.mark.parametrize('adder_role, adder_role_num', [
('TRUSTEE', '0'),
('STEWARD', '2'),
('TRUST_ANCHOR', '101'),
('NETWORK_MONITOR', '201')
])
@pytest.mark.parametrize('editor_role, editor_role_num', [
('NETWORK_MONITOR', '201'),
('TRUST_ANCHOR', '101'),
('STEWARD', '2'),
('TRUSTEE', '0')
])
@pytest.mark.asyncio
async def test_case_pool_upgrade(self, pool_handler, wallet_handler, get_default_trustee,
adder_role, adder_role_num, editor_role, editor_role_num):
trustee_did, _ = get_default_trustee
# add adder to start pool upgrdae
adder_did, adder_vk = await did.create_and_store_my_did(wallet_handler, '{}')
res = await send_nym(pool_handler, wallet_handler, trustee_did, adder_did, adder_vk, None, adder_role)
assert res['op'] == 'REPLY'
# add editor to cancel pool upgrade
editor_did, editor_vk = await did.create_and_store_my_did(wallet_handler, '{}')
res = await send_nym(pool_handler, wallet_handler, trustee_did, editor_did, editor_vk, None, editor_role)
assert res['op'] == 'REPLY'
# set rule for adding
req = await ledger.build_auth_rule_request(trustee_did, '109', 'ADD', 'action', '*', 'start',
json.dumps({
'constraint_id': 'ROLE',
'role': adder_role_num,
'sig_count': 1,
'need_to_be_owner': False,
'metadata': {}
}))
res2 = json.loads(await ledger.sign_and_submit_request(pool_handler, wallet_handler, trustee_did, req))
print(res2)
assert res2['op'] == 'REPLY'
# set rule for editing
req = await ledger.build_auth_rule_request(trustee_did, '109', 'EDIT', 'action', 'start', 'cancel',
json.dumps({
'constraint_id': 'ROLE',
'role': editor_role_num,
'sig_count': 1,
'need_to_be_owner': False,
'metadata': {}
}))
res3 = json.loads(await ledger.sign_and_submit_request(pool_handler, wallet_handler, trustee_did, req))
print(res3)
assert res3['op'] == 'REPLY'
# start pool upgrade
init_time = 30
version = '1.9.999'
name = 'upgrade' + '_' + version + '_' + datetime.now(tz=timezone.utc).strftime('%Y-%m-%dT%H:%M:%S%z')
_sha256 = hashlib.sha256().hexdigest()
_timeout = 5
reinstall = False
force = False
package = 'indy-node'
dests = ['Gw6pDLhcBcoQesN72qfotTgFa7cbuqZpkX3Xo6pLhPhv', '8ECVSk179mjsjKRLWiQtssMLgp6EPhWXtaYyStWPSGAb',
'DKVxG2fXXTU8yT5N7hGEbXB3dfdAnYv1JczDUHpmDxya', '4PS3EDQ3dW1tci1Bp6543CfuuebjFrg36kLAUcskGfaA',
'4SWokCJWJc69Tn74VvLS6t2G2ucvXqM9FDMsWJjmsUxe', 'Cv1Ehj43DDM5ttNBmC6VPpEfwXWwfGktHwjDJsTV5Fz8',
'BM8dTooz5uykCbYSAAFwKNkYfT4koomBHsSWHTDtkjhW']
docker_7_schedule = json.dumps(dict(
{dest: datetime.strftime(datetime.now(tz=timezone.utc) + timedelta(minutes=init_time + i * 5),
'%Y-%m-%dT%H:%M:%S%z')
for dest, i in zip(dests, range(len(dests)))}
))
req = await ledger.build_pool_upgrade_request(adder_did, name, version, 'start', _sha256, _timeout,
docker_7_schedule, None, reinstall, force, package)
res4 = json.loads(await ledger.sign_and_submit_request(pool_handler, wallet_handler, adder_did, req))
print(res4)
assert res4['op'] == 'REPLY'
# cancel pool upgrade
req = await ledger.build_pool_upgrade_request(editor_did, name, version, 'cancel', _sha256, _timeout,
docker_7_schedule, None, reinstall, force, package)
res5 = json.loads(await ledger.sign_and_submit_request(pool_handler, wallet_handler, editor_did, req))
print(res5)
assert res5['op'] == 'REPLY'
@pytest.mark.parametrize('adder_role, adder_role_num', [
('TRUSTEE', '0'),
('STEWARD', '2'),
('TRUST_ANCHOR', '101'),
('NETWORK_MONITOR', '201')
])
@pytest.mark.asyncio
async def test_case_pool_restart(self, pool_handler, wallet_handler, get_default_trustee,
adder_role, adder_role_num): # we can add pool restart only
trustee_did, _ = get_default_trustee
# add adder to restart pool
adder_did, adder_vk = await did.create_and_store_my_did(wallet_handler, '{}')
res = await send_nym(pool_handler, wallet_handler, trustee_did, adder_did, adder_vk, None, adder_role)
assert res['op'] == 'REPLY'
await asyncio.sleep(15)
# set rule for adding
req = await ledger.build_auth_rule_request(trustee_did, '118', 'ADD', 'action', '*', '*',
json.dumps({
'constraint_id': 'ROLE',
'role': adder_role_num,
'sig_count': 1,
'need_to_be_owner': False,
'metadata': {}
}))
res2 = json.loads(await ledger.sign_and_submit_request(pool_handler, wallet_handler, trustee_did, req))
print(res2)
assert res2['op'] == 'REPLY'
# restart pool
req = await ledger.build_pool_restart_request\
(adder_did, 'start', datetime.strftime(datetime.now(tz=timezone.utc) + timedelta(minutes=60),
'%Y-%m-%dT%H:%M:%S%z'))
res3 = json.loads(await ledger.sign_and_submit_request(pool_handler, wallet_handler, adder_did, req))
res3 = [json.loads(v) for k, v in res3.items()]
print(res3)
assert all([res['op'] == 'REPLY' for res in res3])
@pytest.mark.parametrize('adder_role, adder_role_num', [
('TRUSTEE', '0'),
('STEWARD', '2'),
('TRUST_ANCHOR', '101'),
('NETWORK_MONITOR', '201')
])
@pytest.mark.asyncio
async def test_case_validator_info(self, pool_handler, wallet_handler, get_default_trustee,
adder_role, adder_role_num): # we can add validator info only
trustee_did, _ = get_default_trustee
# add adder to get validator info
adder_did, adder_vk = await did.create_and_store_my_did(wallet_handler, '{}')
res = await send_nym(pool_handler, wallet_handler, trustee_did, adder_did, adder_vk, None, adder_role)
assert res['op'] == 'REPLY'
await asyncio.sleep(15)
# set rule for adding
req = await ledger.build_auth_rule_request(trustee_did, '119', 'ADD', '*', '*', '*',
json.dumps({
'constraint_id': 'ROLE',
'role': adder_role_num,
'sig_count': 1,
'need_to_be_owner': False,
'metadata': {}
}))
res2 = json.loads(await ledger.sign_and_submit_request(pool_handler, wallet_handler, trustee_did, req))
print(res2)
assert res2['op'] == 'REPLY'
req = await ledger.build_get_validator_info_request(adder_did)
res3 = json.loads(await ledger.sign_and_submit_request(pool_handler, wallet_handler, adder_did, req))
res3 = [json.loads(v) for k, v in res3.items()]
print(res3)
assert all([res['op'] == 'REPLY' for res in res3])
@pytest.mark.parametrize('editor_role, editor_role_num', [
('NETWORK_MONITOR', '201'),
('TRUST_ANCHOR', '101'),
('STEWARD', '2'),
('TRUSTEE', '0')
])
@pytest.mark.asyncio
async def test_case_pool_config(self, pool_handler, wallet_handler, get_default_trustee,
editor_role, editor_role_num): # we can edit pool config only
trustee_did, _ = get_default_trustee
# add editor to edit pool config
editor_did, editor_vk = await did.create_and_store_my_did(wallet_handler, '{}')
res = await send_nym(pool_handler, wallet_handler, trustee_did, editor_did, editor_vk, None, editor_role)
assert res['op'] == 'REPLY'
# set rule for editing
req = await ledger.build_auth_rule_request(trustee_did, '111', 'EDIT', 'action', '*', '*',
json.dumps({
'constraint_id': 'ROLE',
'role': editor_role_num,
'sig_count': 1,
'need_to_be_owner': False,
'metadata': {}
}))
res2 = json.loads(await ledger.sign_and_submit_request(pool_handler, wallet_handler, trustee_did, req))
print(res2)
assert res2['op'] == 'REPLY'
req = await ledger.build_pool_config_request(editor_did, False, False)
res3 = json.loads(await ledger.sign_and_submit_request(pool_handler, wallet_handler, editor_did, req))
print(res3)
assert res3['op'] == 'REPLY'
@pytest.mark.parametrize('editor_role, editor_role_num', [
('NETWORK_MONITOR', '201'),
('TRUST_ANCHOR', '101'),
('STEWARD', '2'),
('TRUSTEE', '0')
])
@pytest.mark.asyncio
async def test_case_auth_rule(self, pool_handler, wallet_handler, get_default_trustee,
editor_role, editor_role_num): # we can edit auth rule only
trustee_did, _ = get_default_trustee
# add editor to edit auth rule
editor_did, editor_vk = await did.create_and_store_my_did(wallet_handler, '{}')
res = await send_nym(pool_handler, wallet_handler, trustee_did, editor_did, editor_vk, None, editor_role)
assert res['op'] == 'REPLY'
# set rule for editing
req = await ledger.build_auth_rule_request(trustee_did, '120', 'EDIT', '*', '*', '*',
json.dumps({
'constraint_id': 'ROLE',
'role': editor_role_num,
'sig_count': 1,
'need_to_be_owner': False,
'metadata': {}
}))
res2 = json.loads(await ledger.sign_and_submit_request(pool_handler, wallet_handler, trustee_did, req))
print(res2)
assert res2['op'] == 'REPLY'
await asyncio.sleep(15)
req = await ledger.build_auth_rule_request(editor_did, '111', 'EDIT', 'action', '*', '*',
json.dumps({
'constraint_id': 'ROLE',
'role': '*',
'sig_count': 5,
'need_to_be_owner': True,
'metadata': {}
}))
res3 = json.loads(await ledger.sign_and_submit_request(pool_handler, wallet_handler, editor_did, req))
print(res3)
assert res3['op'] == 'REPLY'
@pytest.mark.parametrize('adder_role, adder_role_num', [
('TRUSTEE', '0'),
('STEWARD', '2'),
('TRUST_ANCHOR', '101'),
('NETWORK_MONITOR', '201')
])
@pytest.mark.parametrize('sig_count', [0, 1, 3])
@pytest.mark.asyncio
async def test_case_mint(self, payment_init, pool_handler, wallet_handler, get_default_trustee,
adder_role, adder_role_num, sig_count):
libsovtoken_payment_method = 'sov'
trustee_did, _ = get_default_trustee
address = await payment.create_payment_address(wallet_handler, libsovtoken_payment_method, json.dumps(
{"seed": str('0000000000000000000000000Wallet0')}))
# set rule for adding
req = await ledger.build_auth_rule_request(trustee_did, '10000', 'ADD', '*', '*', '*',
json.dumps({
'constraint_id': 'ROLE',
'role': adder_role_num,
'sig_count': sig_count,
'need_to_be_owner': False,
'metadata': {}
}))
res2 = json.loads(await ledger.sign_and_submit_request(pool_handler, wallet_handler, trustee_did, req))
print(res2)
assert res2['op'] == 'REPLY'
if sig_count == 0:
# add identity owner adder to mint tokens
adder_did, adder_vk = await did.create_and_store_my_did(wallet_handler, '{}')
res = await send_nym(pool_handler, wallet_handler, trustee_did, adder_did, adder_vk, None, None)
assert res['op'] == 'REPLY'
req, _ = await payment.build_mint_req(wallet_handler, adder_did,
json.dumps([{"recipient": address, "amount": 100}]), None)
res1 = json.loads(await ledger.sign_and_submit_request(pool_handler, wallet_handler, adder_did, req))
print(res1)
assert res1['op'] == 'REPLY'
elif sig_count == 1:
# add adder to mint tokens
adder_did, adder_vk = await did.create_and_store_my_did(wallet_handler, '{}')
res = await send_nym(pool_handler, wallet_handler, trustee_did, adder_did, adder_vk, None, adder_role)
assert res['op'] == 'REPLY'
req, _ = await payment.build_mint_req(wallet_handler, adder_did,
json.dumps([{"recipient": address, "amount": 100}]), None)
res1 = json.loads(await ledger.sign_and_submit_request(pool_handler, wallet_handler, adder_did, req))
print(res1)
assert res1['op'] == 'REPLY'
else:
# add adders to mint tokens
adder_did1, adder_vk1 = await did.create_and_store_my_did(wallet_handler, '{}')
res = await send_nym(pool_handler, wallet_handler, trustee_did, adder_did1, adder_vk1, None, adder_role)
assert res['op'] == 'REPLY'
adder_did2, adder_vk2 = await did.create_and_store_my_did(wallet_handler, '{}')
res = await send_nym(pool_handler, wallet_handler, trustee_did, adder_did2, adder_vk2, None, adder_role)
assert res['op'] == 'REPLY'
adder_did3, adder_vk3 = await did.create_and_store_my_did(wallet_handler, '{}')
res = await send_nym(pool_handler, wallet_handler, trustee_did, adder_did3, adder_vk3, None, adder_role)
assert res['op'] == 'REPLY'
req, _ = await payment.build_mint_req(wallet_handler, adder_did1,
json.dumps([{"recipient": address, "amount": 100}]), None)
req = await ledger.multi_sign_request(wallet_handler, adder_did1, req)
req = await ledger.multi_sign_request(wallet_handler, adder_did2, req)
req = await ledger.multi_sign_request(wallet_handler, adder_did3, req)
res1 = json.loads(await ledger.submit_request(pool_handler, req))
print(res1)
assert res1['op'] == 'REPLY'
@pytest.mark.parametrize('editor_role, editor_role_num', [
('NETWORK_MONITOR', '201'),
('TRUST_ANCHOR', '101'),
('STEWARD', '2'),
('TRUSTEE', '0')
])
@pytest.mark.parametrize('sig_count', [0, 1, 3])
@pytest.mark.asyncio
async def test_case_set_fees(self, payment_init, pool_handler, wallet_handler, get_default_trustee,
editor_role, editor_role_num, sig_count):
libsovtoken_payment_method = 'sov'
fees = {'1': 1, '100': 1, '101': 1, '102': 1, '113': 1, '114': 1, '10001': 1}
trustee_did, _ = get_default_trustee
# set rule for adding
req = await ledger.build_auth_rule_request(trustee_did, '20000', 'EDIT', '*', '*', '*',
json.dumps({
'constraint_id': 'ROLE',
'role': editor_role_num,
'sig_count': sig_count,
'need_to_be_owner': False,
'metadata': {}
}))
res2 = json.loads(await ledger.sign_and_submit_request(pool_handler, wallet_handler, trustee_did, req))
print(res2)
assert res2['op'] == 'REPLY'
if sig_count == 0:
# add identity owner editor to set fees
editor_did, editor_vk = await did.create_and_store_my_did(wallet_handler, '{}')
res = await send_nym(pool_handler, wallet_handler, trustee_did, editor_did, editor_vk, None, None)
assert res['op'] == 'REPLY'
req = await payment.build_set_txn_fees_req(wallet_handler, editor_did, libsovtoken_payment_method,
json.dumps(fees))
res1 = json.loads(await ledger.sign_and_submit_request(pool_handler, wallet_handler, editor_did, req))
print(res1)
assert res1['op'] == 'REPLY'
elif sig_count == 1:
# add editor to set fees
editor_did, editor_vk = await did.create_and_store_my_did(wallet_handler, '{}')
res = await send_nym(pool_handler, wallet_handler, trustee_did, editor_did, editor_vk, None, editor_role)
assert res['op'] == 'REPLY'
req = await payment.build_set_txn_fees_req(wallet_handler, editor_did, libsovtoken_payment_method,
json.dumps(fees))
res1 = json.loads(await ledger.sign_and_submit_request(pool_handler, wallet_handler, editor_did, req))
print(res1)
assert res1['op'] == 'REPLY'
else:
# add editors to set fees
editor_did1, editor_vk1 = await did.create_and_store_my_did(wallet_handler, '{}')
res = await send_nym(pool_handler, wallet_handler, trustee_did, editor_did1, editor_vk1, None, editor_role)
assert res['op'] == 'REPLY'
editor_did2, editor_vk2 = await did.create_and_store_my_did(wallet_handler, '{}')
res = await send_nym(pool_handler, wallet_handler, trustee_did, editor_did2, editor_vk2, None, editor_role)
assert res['op'] == 'REPLY'
editor_did3, editor_vk3 = await did.create_and_store_my_did(wallet_handler, '{}')
res = await send_nym(pool_handler, wallet_handler, trustee_did, editor_did3, editor_vk3, None, editor_role)
assert res['op'] == 'REPLY'
req = await payment.build_set_txn_fees_req(wallet_handler, editor_did1, libsovtoken_payment_method,
json.dumps(fees))
req = await ledger.multi_sign_request(wallet_handler, editor_did1, req)
req = await ledger.multi_sign_request(wallet_handler, editor_did2, req)
req = await ledger.multi_sign_request(wallet_handler, editor_did3, req)
res1 = json.loads(await ledger.submit_request(pool_handler, req))
print(res1)
assert res1['op'] == 'REPLY'
@pytest.mark.parametrize('adder_role, adder_role_num', [
('TRUSTEE', '0'),
('STEWARD', '2'),
('TRUST_ANCHOR', '101'),
('NETWORK_MONITOR', '201')
])
@pytest.mark.parametrize('sig_count', [0, 1, 3])
@pytest.mark.asyncio
async def test_case_payment(self, payment_init, pool_handler, wallet_handler, get_default_trustee,
adder_role, adder_role_num, sig_count):
libsovtoken_payment_method = 'sov'
trustee_did, _ = get_default_trustee
address1 = await payment.create_payment_address(wallet_handler, libsovtoken_payment_method, json.dumps(
{"seed": str('0000000000000000000000000Wallet1')}))
address2 = await payment.create_payment_address(wallet_handler, libsovtoken_payment_method, json.dumps(
{"seed": str('0000000000000000000000000Wallet2')}))
# set rule for easier mint adding
req = await ledger.build_auth_rule_request(trustee_did, '10000', 'ADD', '*', '*', '*',
json.dumps({
'constraint_id': 'ROLE',
'role': '*',
'sig_count': 1,
'need_to_be_owner': False,
'metadata': {}
}))
res1 = json.loads(await ledger.sign_and_submit_request(pool_handler, wallet_handler, trustee_did, req))
print(res1)
assert res1['op'] == 'REPLY'
# set rule for adding
req = await ledger.build_auth_rule_request(trustee_did, '10001', 'ADD', '*', '*', '*',
json.dumps({
'constraint_id': 'ROLE',
'role': adder_role_num,
'sig_count': sig_count,
'need_to_be_owner': False,
'metadata': {}
}))
res2 = json.loads(await ledger.sign_and_submit_request(pool_handler, wallet_handler, trustee_did, req))
print(res2)
assert res2['op'] == 'REPLY'
# initial minting
req, _ = await payment.build_mint_req(wallet_handler, trustee_did,
json.dumps([{"recipient": address1, "amount": 100}]), None)
res11 = json.loads(await ledger.sign_and_submit_request(pool_handler, wallet_handler, trustee_did, req))
print(res11)
assert res11['op'] == 'REPLY'
req, _ = await payment.build_get_payment_sources_request(wallet_handler, trustee_did, address1)
res111 = await ledger.sign_and_submit_request(pool_handler, wallet_handler, trustee_did, req)
source1 = \
json.loads(await payment.parse_get_payment_sources_response(libsovtoken_payment_method,
res111))[0]['source']
if sig_count == 0:
# add identity owner adder to send xfer
adder_did, adder_vk = await did.create_and_store_my_did(wallet_handler, '{}')
res = await send_nym(pool_handler, wallet_handler, trustee_did, adder_did, adder_vk, None, None)
assert res['op'] == 'REPLY'
req, _ = await payment.build_payment_req(wallet_handler, adder_did,
json.dumps([source1]),
json.dumps([{"recipient": address2, "amount": 100}]), None)
res1 = json.loads(await ledger.sign_and_submit_request(pool_handler, wallet_handler, adder_did, req))
print(res1)
assert res1['op'] == 'REPLY'
elif sig_count == 1:
# add adder to send xfer
adder_did, adder_vk = await did.create_and_store_my_did(wallet_handler, '{}')
res = await send_nym(pool_handler, wallet_handler, trustee_did, adder_did, adder_vk, None, adder_role)
assert res['op'] == 'REPLY'
req, _ = await payment.build_payment_req(wallet_handler, adder_did,
json.dumps([source1]),
json.dumps([{"recipient": address2, "amount": 100}]), None)
res1 = json.loads(await ledger.sign_and_submit_request(pool_handler, wallet_handler, adder_did, req))
print(res1)
assert res1['op'] == 'REPLY'
else:
# add adders to send xfer
adder_did1, adder_vk1 = await did.create_and_store_my_did(wallet_handler, '{}')
res = await send_nym(pool_handler, wallet_handler, trustee_did, adder_did1, adder_vk1, None, adder_role)
assert res['op'] == 'REPLY'
adder_did2, adder_vk2 = await did.create_and_store_my_did(wallet_handler, '{}')
res = await send_nym(pool_handler, wallet_handler, trustee_did, adder_did2, adder_vk2, None, adder_role)
assert res['op'] == 'REPLY'
adder_did3, adder_vk3 = await did.create_and_store_my_did(wallet_handler, '{}')
res = await send_nym(pool_handler, wallet_handler, trustee_did, adder_did3, adder_vk3, None, adder_role)
assert res['op'] == 'REPLY'
req, _ = await payment.build_payment_req(wallet_handler, adder_did1,
json.dumps([source1]),
json.dumps([{"recipient": address2, "amount": 100}]), None)
req = await ledger.multi_sign_request(wallet_handler, adder_did1, req)
req = await ledger.multi_sign_request(wallet_handler, adder_did2, req)
req = await ledger.multi_sign_request(wallet_handler, adder_did3, req)
res1 = json.loads(await ledger.submit_request(pool_handler, req))
print(res1)
assert res1['op'] == 'REPLY'
# TODO might make sense to move to separate module since other tests here
# organized per txn type
@pytest.mark.asyncio
async def test_case_forbidden(self, pool_handler, wallet_handler, get_default_trustee):
trustee_did, _ = get_default_trustee
trustee_role, trustee_role_num = 'TRUSTEE', '0'
logger.info("1 Adding new trustee to ledger")
new_trustee_did, new_trustee_vk = await did.create_and_store_my_did(wallet_handler, '{}')
res = await send_nym(
pool_handler, wallet_handler, trustee_did, new_trustee_did, new_trustee_vk, None, trustee_role
)
assert res['op'] == 'REPLY'
logger.info("2 Setting forbidden auth rule for adding trustees")
req = await ledger.build_auth_rule_request(trustee_did, '1', 'ADD', 'role', '*', trustee_role_num,
json.dumps({
'constraint_id': 'FORBIDDEN',
}))
res = json.loads(await ledger.sign_and_submit_request(pool_handler, wallet_handler, trustee_did, req))
assert res['op'] == 'REPLY'
logger.info("3 Getting newly set forbidden constraint")
req = await ledger.build_get_auth_rule_request(trustee_did, '1', 'ADD', 'role', '*', trustee_role_num)
res = json.loads(await ledger.sign_and_submit_request(pool_handler, wallet_handler, trustee_did, req))
assert res['op'] == 'REPLY'
assert res['result']['data'][0]['constraint']['constraint_id'] == 'FORBIDDEN'
logger.info("4 Trying to add one more trustee")
one_more_new_trustee_did, one_more_new_trustee_vk = await did.create_and_store_my_did(wallet_handler, '{}')
res = await send_nym(
pool_handler, wallet_handler, trustee_did, one_more_new_trustee_did, one_more_new_trustee_vk, None, trustee_role
)
assert res['op'] == 'REJECT'
# TODO might make sense to move to separate module since other tests here
# organized per txn type
@pytest.mark.asyncio
async def test_case_auth_rules(self, pool_handler, wallet_handler, get_default_trustee):
trustee_did, _ = get_default_trustee
trustee_role, trustee_role_num = 'TRUSTEE', '0'
steward_role, steward_role_num = 'STEWARD', '2'
logger.info("1 Creating new steward")
steward_did, steward_vk = await did.create_and_store_my_did(wallet_handler, '{}')
res = await send_nym(pool_handler, wallet_handler, trustee_did, steward_did, steward_vk, None, steward_role)
assert res['op'] == 'REPLY'
logger.info("2 Creating some new trustee")
_new_trustee_did, _new_trustee_vk = await did.create_and_store_my_did(wallet_handler, '{}')
res = await send_nym(pool_handler, wallet_handler, trustee_did, _new_trustee_did, _new_trustee_vk, None, trustee_role)
assert res['op'] == 'REPLY'
logger.info("3 Trying to add new trustee using steward as submitter")
new_trustee_did, new_trustee_vk = await did.create_and_store_my_did(wallet_handler, '{}')
res = await send_nym(
pool_handler, wallet_handler, steward_did, new_trustee_did, new_trustee_vk, None, trustee_role
)
assert res['op'] == 'REJECT'
logger.info("4 Trying to add new steward using steward as submitter")
new_steward_did, new_steward_vk = await did.create_and_store_my_did(wallet_handler, '{}')
res = await send_nym(
pool_handler, wallet_handler, steward_did, new_steward_did, new_steward_vk, None, trustee_role
)
assert res['op'] == 'REJECT'
logger.info("5 Send auth rules txn to allow stewards to add new trustees and stewrds")
one_steward_constraint = {
'constraint_id': 'ROLE',
'role': steward_role_num,
'sig_count': 1,
'need_to_be_owner': False,
'metadata': {}
}
req = await ledger.build_auth_rules_request(trustee_did, json.dumps([
{
'auth_type': '1',
'auth_action': 'ADD',
'field': 'role',
'old_value': '*',
'new_value': trustee_role_num,
'constraint': one_steward_constraint
}, {
'auth_type': '1',
'auth_action': 'ADD',
'field': 'role',
'old_value': '*',
'new_value': steward_role_num,
'constraint': one_steward_constraint
},
]))
res = json.loads(await ledger.sign_and_submit_request(pool_handler, wallet_handler, trustee_did, req))
assert res['op'] == 'REPLY'
logger.info("6 Getting recently set auth rules")
for role_num in (trustee_role_num, steward_role_num):
req = await ledger.build_get_auth_rule_request(trustee_did, '1', 'ADD', 'role', '*', role_num)
res = json.loads(await ledger.sign_and_submit_request(pool_handler, wallet_handler, trustee_did, req))
assert res['op'] == 'REPLY'
assert res['result']['data'][0]['constraint'] == one_steward_constraint
logger.info("7 Trying to add new trustee using trustee as submitter")
res = await send_nym(
pool_handler, wallet_handler, trustee_did, new_trustee_did, new_trustee_vk, None, trustee_role
)
assert res['op'] == 'REJECT'
logger.info("8 Trying to add new steward using trustee as submitter")
res = await send_nym(
pool_handler, wallet_handler, trustee_did, new_trustee_did, new_steward_vk, None, trustee_role
)
assert res['op'] == 'REJECT'
logger.info("9 Adding new trustee using steward as submitter")
new_trustee_did, new_trustee_vk = await did.create_and_store_my_did(wallet_handler, '{}')
res = await send_nym(
pool_handler, wallet_handler, steward_did, new_trustee_did, new_trustee_vk, None, trustee_role
)
assert res['op'] == 'REPLY'
logger.info("10 Adding new steward using steward as submitter")
new_steward_did, new_steward_vk = await did.create_and_store_my_did(wallet_handler, '{}')
res = await send_nym(
pool_handler, wallet_handler, steward_did, new_steward_did, new_steward_vk, None, trustee_role
)
assert res['op'] == 'REPLY'
| 59.345455
| 126
| 0.521067
| 7,107
| 68,544
| 4.70508
| 0.048966
| 0.080864
| 0.070158
| 0.099046
| 0.903346
| 0.898562
| 0.885822
| 0.876761
| 0.870241
| 0.851132
| 0
| 0.021087
| 0.376649
| 68,544
| 1,154
| 127
| 59.39688
| 0.761533
| 0.044307
| 0
| 0.790291
| 0
| 0
| 0.104281
| 0.006559
| 0.000971
| 0
| 0
| 0.000867
| 0.113592
| 1
| 0
| false
| 0
| 0.008738
| 0
| 0.009709
| 0.065049
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
72cf2ee1f1f114780fd41988e0ddaa2bfd651b27
| 18,351
|
py
|
Python
|
test/integration/test_reindex.py
|
jgough/opensearch-curator
|
e8d7eb4d969eac551db9f99bd021d0c05e28dc35
|
[
"Apache-2.0"
] | 8
|
2021-11-10T15:15:16.000Z
|
2022-03-10T10:09:50.000Z
|
test/integration/test_reindex.py
|
jgough/opensearch-curator
|
e8d7eb4d969eac551db9f99bd021d0c05e28dc35
|
[
"Apache-2.0"
] | 1
|
2021-11-18T11:28:44.000Z
|
2021-11-21T09:30:54.000Z
|
test/integration/test_reindex.py
|
jgough/opensearch-curator
|
e8d7eb4d969eac551db9f99bd021d0c05e28dc35
|
[
"Apache-2.0"
] | 3
|
2022-01-28T18:40:38.000Z
|
2022-03-22T18:40:59.000Z
|
import opensearchpy
import curator
import os
import json
import string
import random
import tempfile
import click
from click import testing as clicktest
import time
from . import CuratorTestCase
from unittest.case import SkipTest
from . import testvars as testvars
import logging
logger = logging.getLogger(__name__)
host, port = os.environ.get('TEST_ES_SERVER', 'localhost:9200').split(':')
rhost, rport = os.environ.get('REMOTE_ES_SERVER', 'localhost:9201').split(':')
port = int(port) if port else 9200
rport = int(rport) if rport else 9201
class TestActionFileReindex(CuratorTestCase):
def test_reindex_manual(self):
wait_interval = 1
max_wait = 3
source = 'my_source'
dest = 'my_dest'
expected = 3
self.create_index(source)
self.add_docs(source)
self.write_config(
self.args['configfile'], testvars.client_config.format(host, port))
self.write_config(self.args['actionfile'],
testvars.reindex.format(wait_interval, max_wait, source, dest))
test = clicktest.CliRunner()
_ = test.invoke(
curator.cli,
['--config', self.args['configfile'], self.args['actionfile']],
)
self.assertEqual(expected, self.client.count(index=dest)['count'])
def test_reindex_selected(self):
wait_interval = 1
max_wait = 3
source = 'my_source'
dest = 'my_dest'
expected = 3
self.create_index(source)
self.add_docs(source)
self.write_config(
self.args['configfile'], testvars.client_config.format(host, port))
self.write_config(self.args['actionfile'],
testvars.reindex.format(wait_interval, max_wait, 'REINDEX_SELECTION', dest))
test = clicktest.CliRunner()
_ = test.invoke(
curator.cli,
['--config', self.args['configfile'], self.args['actionfile']],
)
self.assertEqual(expected, self.client.count(index=dest)['count'])
def test_reindex_empty_list(self):
wait_interval = 1
max_wait = 3
source = 'my_source'
dest = 'my_dest'
expected = '.tasks'
self.write_config(
self.args['configfile'], testvars.client_config.format(host, port))
self.write_config(self.args['actionfile'],
testvars.reindex.format(wait_interval, max_wait, source, dest))
test = clicktest.CliRunner()
_ = test.invoke(
curator.cli,
['--config', self.args['configfile'], self.args['actionfile']],
)
self.assertEqual(expected, curator.get_indices(self.client)[0])
def test_reindex_selected_many_to_one(self):
wait_interval = 1
max_wait = 3
source1 = 'my_source1'
source2 = 'my_source2'
dest = 'my_dest'
expected = 6
self.create_index(source1)
self.add_docs(source1)
self.create_index(source2)
for i in ["4", "5", "6"]:
ver = curator.get_version(self.client)
if ver >= (7, 0, 0):
self.client.create(
index=source2, doc_type='doc', id=i, body={"doc" + i :'TEST DOCUMENT'})
else:
self.client.create(
index=source2, doc_type='doc', id=i, body={"doc" + i :'TEST DOCUMENT'})
# Decorators make this pylint exception necessary
# pylint: disable=E1123
self.client.indices.flush(index=source2, force=True)
self.client.indices.refresh(index=source2)
self.write_config(
self.args['configfile'], testvars.client_config.format(host, port))
self.write_config(
self.args['actionfile'],
testvars.reindex.format(wait_interval, max_wait, 'REINDEX_SELECTION', dest)
)
test = clicktest.CliRunner()
_ = test.invoke(
curator.cli,
['--config', self.args['configfile'], self.args['actionfile']],
)
self.client.indices.refresh(index=dest)
self.assertEqual(expected, self.client.count(index=dest)['count'])
def test_reindex_selected_empty_list_fail(self):
wait_interval = 1
max_wait = 3
source1 = 'my_source1'
source2 = 'my_source2'
dest = 'my_dest'
expected = 6
self.create_index(source1)
self.add_docs(source1)
self.create_index(source2)
for i in ["4", "5", "6"]:
self.client.create(
index=source2, doc_type='log', id=i,
body={"doc" + i :'TEST DOCUMENT'},
)
# Decorators make this pylint exception necessary
# pylint: disable=E1123
self.client.indices.flush(index=source2, force=True)
self.write_config(
self.args['configfile'], testvars.client_config.format(host, port))
self.write_config(self.args['actionfile'],
testvars.reindex_empty_list.format('false', wait_interval, max_wait, dest))
test = clicktest.CliRunner()
_ = test.invoke(
curator.cli,
['--config', self.args['configfile'], self.args['actionfile']],
)
self.assertEqual(_.exit_code, 1)
def test_reindex_selected_empty_list_pass(self):
wait_interval = 1
max_wait = 3
source1 = 'my_source1'
source2 = 'my_source2'
dest = 'my_dest'
expected = 6
self.create_index(source1)
self.add_docs(source1)
self.create_index(source2)
for i in ["4", "5", "6"]:
self.client.create(
index=source2, doc_type='log', id=i,
body={"doc" + i :'TEST DOCUMENT'},
)
# Decorators make this pylint exception necessary
# pylint: disable=E1123
self.client.indices.flush(index=source2, force=True)
self.write_config(
self.args['configfile'], testvars.client_config.format(host, port))
self.write_config(self.args['actionfile'],
testvars.reindex_empty_list.format('true', wait_interval, max_wait, dest))
test = clicktest.CliRunner()
_ = test.invoke(
curator.cli,
['--config', self.args['configfile'], self.args['actionfile']],
)
self.assertEqual(_.exit_code, 0)
def test_reindex_from_remote(self):
wait_interval = 1
max_wait = 3
source1 = 'my_source1'
source2 = 'my_source2'
prefix = 'my_'
dest = 'my_dest'
expected = 6
# Build remote client
try:
rclient = curator.get_client(
host=rhost, port=rport, skip_version_test=True)
rclient.info()
except:
raise SkipTest(
'Unable to connect to host at {0}:{1}'.format(rhost, rport))
# Build indices remotely.
counter = 0
for rindex in [source1, source2]:
rclient.indices.create(index=rindex)
for i in range(0, 3):
rclient.create(
index=rindex, doc_type='log', id=str(counter+1),
body={"doc" + str(counter+i) :'TEST DOCUMENT'},
)
counter += 1
# Decorators make this pylint exception necessary
# pylint: disable=E1123
rclient.indices.flush(index=rindex, force=True)
self.write_config(
self.args['configfile'], testvars.client_config.format(host, port))
self.write_config(self.args['actionfile'],
testvars.remote_reindex.format(
wait_interval,
max_wait,
'http://{0}:{1}'.format(rhost, rport),
'REINDEX_SELECTION',
dest,
prefix
)
)
test = clicktest.CliRunner()
_ = test.invoke(
curator.cli,
['--config', self.args['configfile'], self.args['actionfile']],
)
# Do our own cleanup here.
rclient.indices.delete(index='{0},{1}'.format(source1, source2))
self.assertEqual(expected, self.client.count(index=dest)['count'])
def test_reindex_migrate_from_remote(self):
wait_interval = 1
max_wait = 3
source1 = 'my_source1'
source2 = 'my_source2'
prefix = 'my_'
dest = 'MIGRATION'
expected = 3
# Build remote client
try:
rclient = curator.get_client(
host=rhost, port=rport, skip_version_test=True)
rclient.info()
except:
raise SkipTest(
'Unable to connect to host at {0}:{1}'.format(rhost, rport))
# Build indices remotely.
counter = 0
for rindex in [source1, source2]:
rclient.indices.create(index=rindex)
for i in range(0, 3):
rclient.create(
index=rindex, doc_type='log', id=str(counter+1),
body={"doc" + str(counter+i) :'TEST DOCUMENT'},
)
counter += 1
# Decorators make this pylint exception necessary
# pylint: disable=E1123
rclient.indices.flush(index=rindex, force=True)
self.write_config(
self.args['configfile'], testvars.client_config.format(host, port))
self.write_config(self.args['actionfile'],
testvars.remote_reindex.format(
wait_interval,
max_wait,
'http://{0}:{1}'.format(rhost, rport),
'REINDEX_SELECTION',
dest,
prefix
)
)
test = clicktest.CliRunner()
_ = test.invoke(
curator.cli,
['--config', self.args['configfile'], self.args['actionfile']],
)
# Do our own cleanup here.
rclient.indices.delete(index='{0},{1}'.format(source1, source2))
# And now the neat trick of verifying that the reindex worked to both
# indices, and they preserved their names
self.assertEqual(expected, self.client.count(index=source1)['count'])
self.assertEqual(expected, self.client.count(index=source2)['count'])
def test_reindex_migrate_from_remote_with_pre_suf_fixes(self):
wait_interval = 1
max_wait = 3
source1 = 'my_source1'
source2 = 'my_source2'
prefix = 'my_'
dest = 'MIGRATION'
expected = 3
mpfx = 'pre-'
msfx = '-fix'
# Build remote client
try:
rclient = curator.get_client(
host=rhost, port=rport, skip_version_test=True)
rclient.info()
except:
raise SkipTest(
'Unable to connect to host at {0}:{1}'.format(rhost, rport))
# Build indices remotely.
counter = 0
for rindex in [source1, source2]:
rclient.indices.create(index=rindex)
for i in range(0, 3):
rclient.create(
index=rindex, doc_type='log', id=str(counter+1),
body={"doc" + str(counter+i) :'TEST DOCUMENT'},
)
counter += 1
# Decorators make this pylint exception necessary
# pylint: disable=E1123
rclient.indices.flush(index=rindex, force=True)
self.write_config(
self.args['configfile'], testvars.client_config.format(host, port))
self.write_config(self.args['actionfile'],
testvars.migration_reindex.format(
wait_interval,
max_wait,
mpfx,
msfx,
'http://{0}:{1}'.format(rhost, rport),
'REINDEX_SELECTION',
dest,
prefix
)
)
test = clicktest.CliRunner()
_ = test.invoke(
curator.cli,
['--config', self.args['configfile'], self.args['actionfile']],
)
# Do our own cleanup here.
rclient.indices.delete(index='{0},{1}'.format(source1, source2))
# And now the neat trick of verifying that the reindex worked to both
# indices, and they preserved their names
self.assertEqual(expected, self.client.count(index='{0}{1}{2}'.format(mpfx,source1,msfx))['count'])
self.assertEqual(expected, self.client.count(index='{0}{1}{2}'.format(mpfx,source1,msfx))['count'])
def test_reindex_from_remote_no_connection(self):
wait_interval = 1
max_wait = 3
bad_port = 70000
dest = 'my_dest'
expected = 1
self.write_config(
self.args['configfile'], testvars.client_config.format(host, port))
self.write_config(self.args['actionfile'],
testvars.remote_reindex.format(
wait_interval,
max_wait,
'http://{0}:{1}'.format(rhost, bad_port),
'REINDEX_SELECTION',
dest,
'my_'
)
)
test = clicktest.CliRunner()
_ = test.invoke(
curator.cli,
['--config', self.args['configfile'], self.args['actionfile']],
)
self.assertEqual(expected, _.exit_code)
def test_reindex_from_remote_no_indices(self):
wait_interval = 1
max_wait = 3
source1 = 'wrong1'
source2 = 'wrong2'
prefix = 'my_'
dest = 'my_dest'
expected = 1
# Build remote client
try:
rclient = curator.get_client(
host=rhost, port=rport, skip_version_test=True)
rclient.info()
except:
raise SkipTest(
'Unable to connect to host at {0}:{1}'.format(rhost, rport))
# Build indices remotely.
counter = 0
for rindex in [source1, source2]:
rclient.indices.create(index=rindex)
for i in range(0, 3):
rclient.create(
index=rindex, doc_type='log', id=str(counter+1),
body={"doc" + str(counter+i) :'TEST DOCUMENT'},
)
counter += 1
# Decorators make this pylint exception necessary
# pylint: disable=E1123
rclient.indices.flush(index=rindex, force=True)
self.write_config(
self.args['configfile'], testvars.client_config.format(host, port))
self.write_config(self.args['actionfile'],
testvars.remote_reindex.format(
wait_interval,
max_wait,
'http://{0}:{1}'.format(rhost, rport),
'REINDEX_SELECTION',
dest,
prefix
)
)
test = clicktest.CliRunner()
_ = test.invoke(
curator.cli,
['--config', self.args['configfile'], self.args['actionfile']],
)
# Do our own cleanup here.
rclient.indices.delete(index='{0},{1}'.format(source1, source2))
self.assertEqual(expected, _.exit_code)
def test_reindex_into_alias(self):
wait_interval = 1
max_wait = 3
source = 'my_source'
dest = 'my_dest'
expected = 3
alias_body = {'aliases' : {dest : {}}}
self.client.indices.create(index='dummy', body=alias_body)
self.add_docs(source)
self.write_config(self.args['configfile'], testvars.client_config.format(host, port))
self.write_config(
self.args['actionfile'], testvars.reindex.format(wait_interval, max_wait, source, dest)
)
test = clicktest.CliRunner()
_ = test.invoke(
curator.cli,
['--config', self.args['configfile'], self.args['actionfile']],
)
self.assertEqual(expected, self.client.count(index=dest)['count'])
def test_reindex_manual_date_math(self):
wait_interval = 1
max_wait = 3
source = '<source-{now/d}>'
dest = '<target-{now/d}>'
expected = 3
self.create_index(source)
self.add_docs(source)
self.write_config(
self.args['configfile'], testvars.client_config.format(host, port))
self.write_config(self.args['actionfile'],
testvars.reindex.format(wait_interval, max_wait, source, dest))
test = clicktest.CliRunner()
_ = test.invoke(
curator.cli,
['--config', self.args['configfile'], self.args['actionfile']],
)
self.assertEqual(expected, self.client.count(index=dest)['count'])
def test_reindex_bad_mapping(self):
# This test addresses GitHub issue #1260
wait_interval = 1
max_wait = 3
source = 'my_source'
dest = 'my_dest'
expected = 1
ver = curator.get_version(self.client)
if ver < (7, 0, 0):
request_body = {
"settings": { "number_of_shards": 1, "number_of_replicas": 0},
"mappings": { "doc": { "properties": { "doc1": { "type": "keyword" }}}}
}
else:
request_body = {
"settings": { "number_of_shards": 1, "number_of_replicas": 0},
"mappings": { "properties": { "doc1": { "type": "keyword" }}}
}
self.client.indices.create(index=source, body=request_body)
self.add_docs(source)
# Create the dest index with a different mapping.
if ver < (7, 0, 0):
request_body['mappings']['doc']['properties']['doc1']['type'] = 'integer'
else:
request_body['mappings']['properties']['doc1']['type'] = 'integer'
self.client.indices.create(index=dest, body=request_body)
self.write_config(
self.args['configfile'], testvars.client_config.format(host, port))
self.write_config(self.args['actionfile'],
testvars.reindex.format(wait_interval, max_wait, source, dest))
test = clicktest.CliRunner()
_ = test.invoke(
curator.cli,
['--config', self.args['configfile'], self.args['actionfile']],
)
self.assertEqual(expected, _.exit_code)
| 37.681725
| 107
| 0.555719
| 1,954
| 18,351
| 5.068577
| 0.098772
| 0.045234
| 0.05937
| 0.053716
| 0.897213
| 0.872476
| 0.85733
| 0.848546
| 0.833704
| 0.833704
| 0
| 0.019331
| 0.32347
| 18,351
| 486
| 108
| 37.759259
| 0.778413
| 0.058307
| 0
| 0.760369
| 0
| 0
| 0.110048
| 0
| 0
| 0
| 0
| 0
| 0.036866
| 1
| 0.032258
| false
| 0.002304
| 0.032258
| 0
| 0.06682
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
72dda3613656327f62f267160ac432bf2f3e78fb
| 34,924
|
py
|
Python
|
game/content/ghplots/lancemates.py
|
jwvhewitt/gearhead-caramel
|
dfe1bc5dbf2960b82a97577f4bf687b60040d8bf
|
[
"Apache-2.0"
] | 74
|
2015-03-09T00:33:09.000Z
|
2022-02-25T20:28:27.000Z
|
game/content/ghplots/lancemates.py
|
jwvhewitt/gearhead-caramel
|
dfe1bc5dbf2960b82a97577f4bf687b60040d8bf
|
[
"Apache-2.0"
] | 108
|
2017-12-30T20:26:12.000Z
|
2021-01-16T12:37:00.000Z
|
game/content/ghplots/lancemates.py
|
jwvhewitt/gearhead-caramel
|
dfe1bc5dbf2960b82a97577f4bf687b60040d8bf
|
[
"Apache-2.0"
] | 61
|
2018-03-03T09:55:31.000Z
|
2022-03-18T17:28:33.000Z
|
import pbge
from game.content.plotutility import LMSkillsSelfIntro
from game.content import backstory
from pbge.plots import Plot
from pbge.dialogue import Offer, ContextTag
from game.ghdialogue import context
import gears
import game.content.gharchitecture
import game.content.ghterrain
import random
from game import memobrowser
Memo = memobrowser.Memo
# *******************
# *** UTILITIES ***
# *******************
def get_hire_cost(camp, npc):
return (npc.renown * npc.renown * (200 - npc.get_reaction_score(camp.pc, camp)))//10
# **************************
# *** RANDOM_LANCEMATE ***
# **************************
class UtterlyRandomLancemate(Plot):
LABEL = "RANDOM_LANCEMATE"
def custom_init(self, nart):
npc = gears.selector.random_character(rank=min(random.randint(10, 50),random.randint(10, 50)),
mecha_colors=gears.color.random_mecha_colors(),
local_tags=tuple(self.elements["METROSCENE"].attributes),
combatant=True)
scene = self.seek_element(nart, "LOCALE", self._is_best_scene, scope=self.elements["METROSCENE"])
specialties = [sk for sk in gears.stats.NONCOMBAT_SKILLS if sk in npc.statline]
if random.randint(-12,3) > len(specialties):
npc.statline[random.choice(gears.stats.NONCOMBAT_SKILLS)] += random.randint(1,4)
self.register_element("NPC", npc, dident="LOCALE")
self.add_sub_plot(nart, "RLM_Relationship")
return True
def _is_best_scene(self,nart,candidate):
return isinstance(candidate,pbge.scenes.Scene) and gears.tags.SCENE_PUBLIC in candidate.attributes
class UtterlyGenericLancemate(Plot):
LABEL = "RANDOM_LANCEMATE"
JOBS = ("Mecha Pilot","Arena Pilot","Recon Pilot","Mercenary","Bounty Hunter")
def custom_init(self, nart):
npc = gears.selector.random_character(rank=min(random.randint(10, 50),random.randint(10, 50)),
job=gears.jobs.ALL_JOBS[random.choice(self.JOBS)],
mecha_colors=gears.color.random_mecha_colors(),
local_tags=tuple(self.elements["METROSCENE"].attributes),
combatant=True)
if random.randint(1,20) == 1:
npc.statline[random.choice(gears.stats.NONCOMBAT_SKILLS)] += random.randint(1,4)
scene = self.seek_element(nart, "LOCALE", self._is_best_scene, scope=self.elements["METROSCENE"])
self.register_element("NPC", npc, dident="LOCALE")
self.add_sub_plot(nart, "RLM_Relationship")
return True
def _is_best_scene(self,nart,candidate):
return isinstance(candidate,pbge.scenes.Scene) and gears.tags.SCENE_PUBLIC in candidate.attributes
class GiftedNewbieLancemate(Plot):
# Amazing stats, amazingly crap skills.
LABEL = "RANDOM_LANCEMATE"
JOBS = ("Mecha Pilot","Arena Pilot","Citizen","Explorer","Factory Worker")
UNIQUE = True
def custom_init(self, nart):
npc = gears.selector.random_character(statline=gears.base.Being.random_stats(random.randint(100, 110)),
rank=random.randint(5, 15),
job=gears.jobs.ALL_JOBS[random.choice(self.JOBS)],
mecha_colors=gears.color.random_mecha_colors(),
local_tags=tuple(self.elements["METROSCENE"].attributes),
combatant=True, birth_year=nart.camp.year - random.randint(18,23))
if random.randint(1,10) == 1:
npc.statline[random.choice(gears.stats.NONCOMBAT_SKILLS)] += random.randint(1,4)
scene = self.seek_element(nart, "LOCALE", self._is_best_scene, scope=self.elements["METROSCENE"])
self.register_element("NPC", npc, dident="LOCALE")
self.add_sub_plot(nart, "RLM_Relationship")
return True
def _is_best_scene(self,nart,candidate):
return isinstance(candidate,pbge.scenes.Scene) and gears.tags.SCENE_PUBLIC in candidate.attributes
class OlderMentorLancemate(Plot):
LABEL = "RANDOM_LANCEMATE"
UNIQUE = True
def custom_init(self, nart):
npc = gears.selector.random_character(rank=random.randint(41, 85),
mecha_colors=gears.color.random_mecha_colors(),
local_tags=tuple(self.elements["METROSCENE"].attributes),
combatant=True, birth_year=nart.camp.year - random.randint(32,50))
npc.statline[random.choice(gears.stats.NONCOMBAT_SKILLS)] += random.randint(1, 4)
scene = self.seek_element(nart, "LOCALE", self._is_best_scene, scope=self.elements["METROSCENE"])
self.register_element("NPC", npc, dident="LOCALE")
self.add_sub_plot(nart, "RLM_Relationship")
return True
def _is_best_scene(self,nart,candidate):
return isinstance(candidate,pbge.scenes.Scene) and gears.tags.SCENE_PUBLIC in candidate.attributes
class DeadzonerInGreenZoneLancemate(Plot):
LABEL = "RANDOM_LANCEMATE"
JOBS = ("Mercenary","Bandit","Scavenger","Aristo","Tekno","Sheriff")
UNIQUE = True
@classmethod
def matches( self, pstate ):
"""Returns True if this plot matches the current plot state."""
return gears.personality.GreenZone in pstate.elements["METROSCENE"].attributes
def custom_init(self, nart):
npc = gears.selector.random_character(rank=min(random.randint(20, 55),random.randint(20, 55)),
job=gears.jobs.ALL_JOBS[random.choice(self.JOBS)],
mecha_colors=gears.color.random_mecha_colors(),
local_tags=(gears.personality.DeadZone,),
combatant=True)
scene = self.seek_element(nart, "LOCALE", self._is_best_scene, scope=self.elements["METROSCENE"])
self.register_element("NPC", npc, dident="LOCALE")
self.add_sub_plot(nart, "RLM_Relationship")
return True
def _is_best_scene(self,nart,candidate):
return isinstance(candidate,pbge.scenes.Scene) and gears.tags.SCENE_PUBLIC in candidate.attributes
class GladiatorLancemate(Plot):
LABEL = "RANDOM_LANCEMATE"
UNIQUE = True
@classmethod
def matches( self, pstate ):
"""Returns True if this plot matches the current plot state."""
return gears.personality.DeadZone in pstate.elements["METROSCENE"].attributes
def custom_init(self, nart):
npc = gears.selector.random_character(rank=min(random.randint(25, 65),random.randint(25, 65)),
can_cyberize=True,
job=gears.jobs.ALL_JOBS["Gladiator"],
mecha_colors=gears.color.random_mecha_colors(),
local_tags=(gears.personality.DeadZone,),
combatant=True)
scene = self.seek_element(nart, "LOCALE", self._is_best_scene, scope=self.elements["METROSCENE"])
self.register_element("NPC", npc, dident="LOCALE")
self.add_sub_plot(nart, "RLM_Relationship")
return True
def _is_best_scene(self,nart,candidate: gears.GearHeadScene):
return isinstance(candidate,pbge.scenes.Scene) and gears.tags.SCENE_PUBLIC in candidate.attributes
class MutantLancemate(Plot):
LABEL = "RANDOM_LANCEMATE"
UNIQUE = True
@classmethod
def matches( self, pstate ):
"""Returns True if this plot matches the current plot state."""
return {gears.personality.GreenZone,gears.personality.DeadZone}.intersection(pstate.elements["METROSCENE"].attributes)
def custom_init(self, nart):
npc = gears.selector.random_character(rank=random.randint(20, 45),
mecha_colors=gears.color.random_mecha_colors(),
local_tags=tuple(self.elements["METROSCENE"].attributes),
combatant=True)
scene = self.seek_element(nart, "LOCALE", self._is_best_scene, scope=self.elements["METROSCENE"])
mutation = random.choice(gears.personality.MUTATIONS)
mutation.apply(npc)
npc.personality.add(mutation)
specialties = [sk for sk in gears.stats.NONCOMBAT_SKILLS if sk in npc.statline]
if random.randint(-12,3) > len(specialties):
npc.statline[random.choice(gears.stats.NONCOMBAT_SKILLS)] += random.randint(1,4)
self.register_element("NPC", npc, dident="LOCALE")
self.add_sub_plot(nart, "RLM_Relationship")
return True
def _is_best_scene(self,nart,candidate):
return isinstance(candidate, pbge.scenes.Scene) and gears.tags.SCENE_PUBLIC in candidate.attributes
class FormerLancemateReturns(Plot):
LABEL = "RANDOM_LANCEMATE"
active = True
scope = "METRO"
def custom_init(self, nart):
npc: gears.base.Character = nart.camp.egg.seek_dramatis_person(nart.camp, self._is_good_npc, self)
if npc:
scene = self.seek_element(nart, "LOCALE", self._is_best_scene, scope=self.elements["METROSCENE"])
self.register_element("NPC", npc, dident="LOCALE")
#print(npc,scene)
self.bs = backstory.Backstory(("LONGTIMENOSEE",),keywords=[t.name.upper() for t in npc.get_tags()])
return npc
def _is_good_npc(self,nart,candidate):
return isinstance(candidate, gears.base.Character) and candidate.relationship and gears.relationships.RT_LANCEMATE in candidate.relationship.tags
def _is_best_scene(self,nart,candidate):
return isinstance(candidate,gears.GearHeadScene) and gears.tags.SCENE_PUBLIC in candidate.attributes
def _get_dialogue_grammar(self, npc, camp):
mygram = dict()
if npc is self.elements["NPC"]:
for k in self.bs.results.keys():
mygram[k] = [self.bs.get_one(k),]
else:
mygram["[News]"] = ["{NPC} has been hanging out at {LOCALE}".format(**self.elements), ]
return mygram
def NPC_offers(self, camp):
mylist = list()
mylist.append(Offer("[INFO_PERSONAL]",
context=ContextTag([context.PERSONAL]),
no_repeats=True, effect=self.end_plot))
return mylist
def t_START(self, camp):
if self.elements["NPC"] in camp.party:
self.end_plot(camp)
# **************************
# *** RLM_Relationship ***
# **************************
# Elements:
# NPC: The NPC who needs a personality
# METROSCENE: The city or whatever that the NPC calls home
#
# These subplots contain a personality for a random (potential) lancemate.
# Also include a means for the lancemate to gain the "RT_LANCEMATE" tag.
class RLM_Beginner(Plot):
LABEL = "RLM_Relationship"
active = True
scope = True
UNIQUE = True
@classmethod
def matches( self, pstate ):
"""Returns True if this plot matches the current plot state."""
return pstate.elements["NPC"].renown < 25
def custom_init(self, nart):
npc = self.elements["NPC"]
npc.relationship = gears.relationships.Relationship(attitude=gears.relationships.A_JUNIOR)
# This character gets fewer mecha points.
npc.relationship.data["mecha_level_bonus"] = -10
self._got_rumor = False
return True
def NPC_offers(self, camp):
mylist = list()
npc = self.elements["NPC"]
if gears.relationships.RT_LANCEMATE not in npc.relationship.tags:
if camp.can_add_lancemate():
mylist.append(Offer("I can't believe you asked me... [LETSGO]",
context=ContextTag((context.JOIN,)),
effect=self._join_lance
))
mylist.append(Offer(
"[HELLO] Some day I want to become a cavalier like you.", context=ContextTag((context.HELLO,))
))
mylist.append(LMSkillsSelfIntro(npc))
return mylist
def _get_dialogue_grammar(self, npc, camp):
mygram = dict()
if camp.scene.get_root_scene() is self.elements["METROSCENE"] and npc is not self.elements["NPC"]:
# This is an NPC in Wujung. Give them some news.
mygram["[News]"] = ["{} has dreams of someday becoming a cavalier".format(self.elements["NPC"]), ]
return mygram
def _join_lance(self, camp):
npc = self.elements["NPC"]
npc.relationship.tags.add(gears.relationships.RT_LANCEMATE)
effect = game.content.plotutility.AutoJoiner(npc)
effect(camp)
self.end_plot(camp)
def _get_generic_offers(self, npc, camp):
"""Get any offers that could apply to non-element NPCs."""
goffs = list()
if camp.scene.get_root_scene() is self.elements["METROSCENE"] and npc is not self.elements["NPC"] and not self._got_rumor:
mynpc = self.elements["NPC"]
goffs.append(Offer(
msg="As far as I know {} usually hangs out at {}.".format(mynpc,mynpc.get_scene()),
context=ContextTag((context.INFO,)), effect=self._get_rumor,
subject=str(mynpc), data={"subject": str(mynpc)}, no_repeats=True
))
return goffs
def _get_rumor(self,camp):
mynpc = self.elements["NPC"]
self._got_rumor = True
self.memo = Memo( "{} dreams of becoming a cavalier.".format(mynpc)
, mynpc.get_scene()
)
class RLM_Friendly(Plot):
LABEL = "RLM_Relationship"
active = True
scope = True
UNIQUE = True
def custom_init(self, nart):
npc = self.elements["NPC"]
npc.relationship = gears.relationships.Relationship(attitude=gears.relationships.A_FRIENDLY)
self._got_rumor = False
return True
def NPC_offers(self, camp):
mylist = list()
npc = self.elements["NPC"]
if gears.relationships.RT_LANCEMATE not in npc.relationship.tags:
if camp.can_add_lancemate() and npc.get_reaction_score(camp.pc, camp) > 0:
mylist.append(Offer("[THANKS_FOR_CHOOSING_ME] [LETSGO]",
context=ContextTag((context.JOIN,)),
effect=self._join_lance
))
mylist.append(Offer(
"[HELLO] [WAITINGFORMISSION]", context=ContextTag((context.HELLO,))
))
mylist.append(LMSkillsSelfIntro(npc))
return mylist
def _join_lance(self, camp):
npc = self.elements["NPC"]
npc.relationship.tags.add(gears.relationships.RT_LANCEMATE)
effect = game.content.plotutility.AutoJoiner(npc)
effect(camp)
self.end_plot(camp)
def _get_dialogue_grammar(self, npc, camp):
mygram = dict()
if camp.scene.get_root_scene() is self.elements["METROSCENE"] and npc is not self.elements["NPC"] and not self._got_rumor:
# This is an NPC in Wujung. Give them some news.
mygram["[News]"] = ["{} is looking for a lance to join".format(self.elements["NPC"]), ]
return mygram
def _get_generic_offers(self, npc, camp):
"""Get any offers that could apply to non-element NPCs."""
goffs = list()
if camp.scene.get_root_scene() is self.elements["METROSCENE"] and npc is not self.elements["NPC"] and not self._got_rumor:
mynpc = self.elements["NPC"]
goffs.append(Offer(
msg="You can usually find {} at {}, if you're planning to invite {} to join your lance.".format(mynpc,mynpc.get_scene(),mynpc.gender.object_pronoun),
context=ContextTag((context.INFO,)), effect=self._get_rumor,
subject=str(mynpc), data={"subject": str(mynpc)}, no_repeats=True
))
return goffs
def _get_rumor(self,camp):
mynpc = self.elements["NPC"]
self._got_rumor = True
self.memo = Memo( "{} is looking for a lance to join.".format(mynpc)
, mynpc.get_scene()
)
class RLM_Medic(Plot):
LABEL = "RLM_Relationship"
active = True
scope = True
UNIQUE = True
VIRTUES = (gears.personality.Peace,gears.personality.Fellowship)
@classmethod
def matches( self, pstate ):
"""Returns True if this plot matches the current plot state."""
return pstate.elements["NPC"].job and gears.tags.Medic in pstate.elements["NPC"].job.tags
def custom_init(self, nart):
npc = self.elements["NPC"]
npc.relationship = gears.relationships.Relationship(expectation=gears.relationships.E_GREATERGOOD)
new_virtue = random.choice(self.VIRTUES)
if new_virtue not in npc.personality:
npc.personality.add(new_virtue)
return True
def NPC_offers(self, camp):
mylist = list()
npc = self.elements["NPC"]
if gears.relationships.RT_LANCEMATE not in npc.relationship.tags:
if camp.can_add_lancemate():
mylist.append(Offer("[THANKS_FOR_CHOOSING_ME] [LETSGO]",
context=ContextTag((context.JOIN,)),
effect=self._join_lance
))
else:
mylist.append(Offer("You've got a full crew right now, but if you ever find yourself in need of a qualified medic come back and find me.",
context=ContextTag((context.JOIN,)),
effect=self._defer_join
))
mylist.append(Offer(
"[HELLO] Lately I've been spending too much time here, when I'd rather be out in the danger zone saving lives.", context=ContextTag((context.HELLO,))
))
mylist.append(LMSkillsSelfIntro(npc))
return mylist
def _get_dialogue_grammar(self, npc, camp):
mygram = dict()
if camp.scene.get_root_scene() is self.elements["METROSCENE"] and npc is not self.elements["NPC"]:
# This is an NPC in Wujung. Give them some news.
mygram["[News]"] = ["{} wants to leave {} so {} can make a positive difference in the world".format(self.elements["NPC"],self.elements["NPC"].get_scene(),self.elements["NPC"].gender.subject_pronoun), ]
return mygram
def _join_lance(self, camp):
npc = self.elements["NPC"]
npc.relationship.tags.add(gears.relationships.RT_LANCEMATE)
effect = game.content.plotutility.AutoJoiner(npc)
effect(camp)
self.end_plot(camp)
def _defer_join(self, camp):
npc = self.elements["NPC"]
npc.relationship.tags.add(gears.relationships.RT_LANCEMATE)
self.end_plot(camp)
class RLM_Mercenary(Plot):
LABEL = "RLM_Relationship"
active = True
scope = True
UNIQUE = True
@classmethod
def matches( self, pstate ):
"""Returns True if this plot matches the current plot state."""
return pstate.elements["NPC"].job and {gears.tags.Adventurer,gears.tags.Military}.intersection(pstate.elements["NPC"].job.tags)
def custom_init(self, nart):
npc = self.elements["NPC"]
npc.relationship = gears.relationships.Relationship(expectation=gears.relationships.E_MERCENARY)
# This character gets extra mecha points, showing their good investment sense.
npc.relationship.data["mecha_level_bonus"] = 10
self._got_rumor = False
return True
def NPC_offers(self, camp):
mylist = list()
npc = self.elements["NPC"]
self.hire_cost = get_hire_cost(camp,npc)
if gears.relationships.RT_LANCEMATE not in npc.relationship.tags:
if camp.can_add_lancemate():
mylist.append(Offer("I'll join your lance for a mere ${}. [DOYOUACCEPTMYOFFER]".format(self.hire_cost),
context=ContextTag((context.PROPOSAL, context.JOIN)),
data={"subject": "joining my lance"},
subject=self, subject_start=True,
))
mylist.append(Offer("[DENY_JOIN] [GOODBYE]",
context=ContextTag((context.DENY, context.JOIN)), subject=self
))
if camp.credits >= self.hire_cost:
mylist.append(Offer("[THANKS_FOR_CHOOSING_ME] [LETSGO]",
context=ContextTag((context.ACCEPT, context.JOIN)), subject=self,
effect=self._join_lance
))
mylist.append(Offer(
"[HELLO] I am a mercenary pilot, looking for my next contract.", context=ContextTag((context.HELLO,))
))
mylist.append(LMSkillsSelfIntro(npc))
return mylist
def _get_dialogue_grammar(self, npc, camp):
mygram = dict()
if camp.scene.get_root_scene() is self.elements["METROSCENE"] and npc is not self.elements["NPC"]:
# This is an NPC in Wujung. Give them some news.
mygram["[News]"] = ["{} is hoping to make some quick cash".format(self.elements["NPC"]), ]
return mygram
def _join_lance(self, camp):
npc = self.elements["NPC"]
npc.relationship.tags.add(gears.relationships.RT_LANCEMATE)
camp.credits -= self.hire_cost
effect = game.content.plotutility.AutoJoiner(npc)
effect(camp)
self.end_plot(camp)
def _get_generic_offers(self, npc, camp):
"""Get any offers that could apply to non-element NPCs."""
goffs = list()
if camp.scene.get_root_scene() is self.elements["METROSCENE"] and npc is not self.elements["NPC"] and not self._got_rumor:
mynpc = self.elements["NPC"]
goffs.append(Offer(
msg="As far as I know {} can usually be found at {}.".format(mynpc,mynpc.get_scene()),
context=ContextTag((context.INFO,)), effect=self._get_rumor,
subject=str(mynpc), data={"subject": str(mynpc)}, no_repeats=True
))
return goffs
def _get_rumor(self,camp):
mynpc = self.elements["NPC"]
self._got_rumor = True
self.memo = Memo("{} is a mercenary pilot looking for a job.".format(mynpc)
, mynpc.get_scene()
)
class RLM_Professional(Plot):
LABEL = "RLM_Relationship"
active = True
scope = True
UNIQUE = True
@classmethod
def matches( self, pstate ):
"""Returns True if this plot matches the current plot state."""
return pstate.elements["NPC"].renown > 20
def custom_init(self, nart):
npc = self.elements["NPC"]
npc.relationship = gears.relationships.Relationship(expectation=gears.relationships.E_PROFESSIONAL)
# This character gets 10 extra stat points, showing their elite nature.
npc.roll_stats(10, clear_first=False)
self._got_rumor = False
return True
def NPC_offers(self, camp):
mylist = list()
npc = self.elements["NPC"]
self.hire_cost = get_hire_cost(camp,npc)
if gears.relationships.RT_LANCEMATE not in npc.relationship.tags:
if camp.can_add_lancemate():
mylist.append(Offer(
"[NOEXPOSURE] I think ${} is a fair signing price. [DOYOUACCEPTMYOFFER]".format(self.hire_cost),
context=ContextTag((context.PROPOSAL, context.JOIN)), data={"subject": "joining my lance"},
subject=self, subject_start=True,
))
mylist.append(Offer("[DENY_JOIN] [GOODBYE]",
context=ContextTag((context.DENY, context.JOIN)), subject=self
))
if camp.credits >= self.hire_cost:
mylist.append(Offer("[THANKS_FOR_CHOOSING_ME] [LETSGO]",
context=ContextTag((context.ACCEPT, context.JOIN)), subject=self,
effect=self._join_lance
))
mylist.append(Offer(
"[HELLO] I see you are also a cavalier.", context=ContextTag((context.HELLO,))
))
mylist.append(LMSkillsSelfIntro(npc))
return mylist
def _get_dialogue_grammar(self, npc, camp):
mygram = dict()
if camp.scene.get_root_scene() is self.elements["METROSCENE"] and npc is not self.elements["NPC"]:
# This is an NPC in Wujung. Give them some news.
mygram["[News]"] = ["{} is an experienced pilot looking for work".format(self.elements["NPC"]), ]
return mygram
def _join_lance(self, camp):
npc = self.elements["NPC"]
npc.relationship.tags.add(gears.relationships.RT_LANCEMATE)
camp.credits -= self.hire_cost
effect = game.content.plotutility.AutoJoiner(npc)
effect(camp)
self.end_plot(camp)
def _get_generic_offers(self, npc, camp):
"""Get any offers that could apply to non-element NPCs."""
goffs = list()
if camp.scene.get_root_scene() is self.elements["METROSCENE"] and npc is not self.elements["NPC"] and not self._got_rumor:
mynpc = self.elements["NPC"]
goffs.append(Offer(
msg="You can usually find {} at {}. Bring cash if you're planning to hire {}.".format(mynpc,mynpc.get_scene(),mynpc.gender.object_pronoun),
context=ContextTag((context.INFO,)), effect=self._get_rumor,
subject=str(mynpc), data={"subject": str(mynpc)}, no_repeats=True
))
return goffs
def _get_rumor(self,camp):
mynpc = self.elements["NPC"]
self._got_rumor = True
self.memo = Memo( "{} is an experienced pilot looking for work.".format(mynpc)
, mynpc.get_scene()
)
class RLM_RatherGeneric(Plot):
LABEL = "RLM_Relationship"
active = True
scope = True
def custom_init(self, nart):
npc = self.elements["NPC"]
npc.relationship = gears.relationships.Relationship()
self._got_rumor = False
return True
def NPC_offers(self, camp):
mylist = list()
npc = self.elements["NPC"]
self.hire_cost = get_hire_cost(camp,npc)
if gears.relationships.RT_LANCEMATE not in npc.relationship.tags:
if camp.can_add_lancemate():
if npc.get_reaction_score(camp.pc, camp) > 60:
mylist.append(Offer("[IWOULDLOVETO] [THANKS_FOR_CHOOSING_ME]",
context=ContextTag((context.PROPOSAL, context.JOIN)),
data={"subject": "joining my lance"},
effect=self._join_lance
))
else:
mylist.append(Offer("My regular signing rate is ${}. [DOYOUACCEPTMYOFFER]".format(self.hire_cost),
context=ContextTag((context.PROPOSAL, context.JOIN)),
data={"subject": "joining my lance"},
subject=self, subject_start=True,
))
mylist.append(Offer("[DENY_JOIN] [GOODBYE]",
context=ContextTag((context.DENY, context.JOIN)), subject=self
))
if camp.credits >= self.hire_cost:
mylist.append(Offer("[THANKS_FOR_CHOOSING_ME] [LETSGO]",
context=ContextTag((context.ACCEPT, context.JOIN)), subject=self,
effect=self._pay_to_join
))
mylist.append(Offer(
"[HELLO] [WAITINGFORMISSION]", context=ContextTag((context.HELLO,))
))
else:
mylist.append(Offer(
"[HELLO] Must be nice going off, having adventures with your lancemates. I'd like to do that again someday.", context=ContextTag((context.HELLO,))
))
mylist.append(LMSkillsSelfIntro(npc))
return mylist
def _get_dialogue_grammar(self, npc, camp):
mygram = dict()
if camp.scene.get_root_scene() is self.elements["METROSCENE"] and npc is not self.elements["NPC"]:
mygram["[News]"] = ["{} is looking for a new lance to join".format(self.elements["NPC"]), ]
return mygram
def _pay_to_join(self,camp):
camp.credits -= self.hire_cost
self._join_lance(camp)
def _join_lance(self, camp):
npc = self.elements["NPC"]
npc.relationship.tags.add(gears.relationships.RT_LANCEMATE)
effect = game.content.plotutility.AutoJoiner(npc)
effect(camp)
self.end_plot(camp)
def _get_generic_offers(self, npc, camp):
"""Get any offers that could apply to non-element NPCs."""
goffs = list()
if camp.scene.get_root_scene() is self.elements["METROSCENE"] and npc is not self.elements["NPC"] and not self._got_rumor:
mynpc = self.elements["NPC"]
goffs.append(Offer(
msg="You can find {} at {}.".format(mynpc,mynpc.get_scene()),
context=ContextTag((context.INFO,)), effect=self._get_rumor,
subject=str(mynpc), data={"subject": str(mynpc)}, no_repeats=True
))
return goffs
def _get_rumor(self,camp):
mynpc = self.elements["NPC"]
self._got_rumor = True
self.memo = Memo("{} is looking for a new lance.".format(mynpc)
, mynpc.get_scene()
)
class RLM_DamagedGoodsSale(Plot):
LABEL = "RLM_Relationship"
active = True
scope = True
UNIQUE = True
def custom_init(self, nart):
npc = self.elements["NPC"]
npc.relationship = gears.relationships.Relationship(expectation=gears.relationships.E_IMPROVER)
# This NPC gets a stat bonus but a crappy mech to show their history.
npc.relationship.data["mecha_level_bonus"] = -15
npc.roll_stats(5, clear_first=False)
self._got_rumor = False
return True
def NPC_offers(self, camp):
mylist = list()
npc = self.elements["NPC"]
self.hire_cost = get_hire_cost(camp,npc)//2
if gears.relationships.RT_LANCEMATE not in npc.relationship.tags:
if camp.can_add_lancemate():
if npc.get_reaction_score(camp.pc, camp) > 20:
mylist.append(Offer("[IWOULDLOVETO] I'll do my best to not let you down.",
context=ContextTag((context.PROPOSAL, context.JOIN)),
data={"subject": "joining my lance"},
effect=self._join_lance
))
else:
mylist.append(Offer("I'll sign up with you for just ${}. [DOYOUACCEPTMYOFFER]".format(self.hire_cost),
context=ContextTag((context.PROPOSAL, context.JOIN)),
data={"subject": "joining my lance"},
subject=self, subject_start=True,
))
mylist.append(Offer("[DENY_JOIN] [GOODBYE]",
context=ContextTag((context.DENY, context.JOIN)), subject=self
))
if camp.credits >= self.hire_cost:
mylist.append(Offer("[THANKS_FOR_CHOOSING_ME] I'll do my best to not let you down.",
context=ContextTag((context.ACCEPT, context.JOIN)), subject=self,
effect=self._pay_to_join
))
mylist.append(Offer(
"[HELLO] The life of a cavalier is full of ups and downs... right now I'm in one of those downs.", context=ContextTag((context.HELLO,))
))
else:
mylist.append(Offer(
"[HELLO] Be careful out there... all it takes is one little mistake to cost you everything.", context=ContextTag((context.HELLO,))
))
mylist.append(LMSkillsSelfIntro(npc))
return mylist
def _get_dialogue_grammar(self, npc, camp):
mygram = dict()
if camp.scene.get_root_scene() is self.elements["METROSCENE"] and npc is not self.elements["NPC"]:
mygram["[News]"] = ["{NPC} is a down on {NPC.gender.possessive_determiner} luck cavalier looking for another chance".format(**self.elements), ]
return mygram
def _pay_to_join(self,camp):
camp.credits -= self.hire_cost
self._join_lance(camp)
def _join_lance(self, camp):
npc = self.elements["NPC"]
npc.relationship.tags.add(gears.relationships.RT_LANCEMATE)
effect = game.content.plotutility.AutoJoiner(npc)
effect(camp)
self.end_plot(camp)
def _get_generic_offers(self, npc, camp):
"""Get any offers that could apply to non-element NPCs."""
goffs = list()
if camp.scene.get_root_scene() is self.elements["METROSCENE"] and npc is not self.elements["NPC"] and not self._got_rumor:
mynpc = self.elements["NPC"]
goffs.append(Offer(
msg="You can find {} at {}. Don't say that you weren't warned.".format(mynpc,mynpc.get_scene()),
context=ContextTag((context.INFO,)), effect=self._get_rumor,
subject=str(mynpc), data={"subject": str(mynpc)}, no_repeats=True
))
return goffs
def _get_rumor(self,camp):
mynpc = self.elements["NPC"]
self._got_rumor = True
self.memo = Memo( "{} is looking for a new lance.".format(mynpc)
, mynpc.get_scene()
)
| 45.005155
| 213
| 0.583295
| 3,931
| 34,924
| 5.048079
| 0.107097
| 0.051401
| 0.043086
| 0.020863
| 0.824934
| 0.815662
| 0.807347
| 0.792582
| 0.777968
| 0.770712
| 0
| 0.004401
| 0.303803
| 34,924
| 775
| 214
| 45.063226
| 0.811755
| 0.050424
| 0
| 0.781403
| 0
| 0.006525
| 0.113389
| 0.006108
| 0
| 0
| 0
| 0
| 0
| 1
| 0.115824
| false
| 0
| 0.017945
| 0.016313
| 0.32137
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
72e3ac4fde0a35b1aa2592f2a98574d5dd8e6f76
| 10,192
|
py
|
Python
|
nca47/api/controllers/v1/firewall/securityZone.py
|
WosunOO/nca_xianshu
|
bbb548cb67b755a57528796d4c5a66ee68df2678
|
[
"Apache-2.0"
] | null | null | null |
nca47/api/controllers/v1/firewall/securityZone.py
|
WosunOO/nca_xianshu
|
bbb548cb67b755a57528796d4c5a66ee68df2678
|
[
"Apache-2.0"
] | null | null | null |
nca47/api/controllers/v1/firewall/securityZone.py
|
WosunOO/nca_xianshu
|
bbb548cb67b755a57528796d4c5a66ee68df2678
|
[
"Apache-2.0"
] | null | null | null |
from oslo_serialization import jsonutils as json
from nca47.api.controllers.v1 import base
from nca47.common.i18n import _
from nca47.common.i18n import _LI, _LE
from nca47.common.exception import Nca47Exception
from oslo_log import log
from nca47.api.controllers.v1 import tools
from nca47.manager.central import CentralManager
from nca47.common.exception import ParamFormatError
from amqp.five import string
from nca47.common.exception import BadRequest
from oslo_messaging import RemoteError
from nca47.common import exception
LOG = log.getLogger(__name__)
class SecurityZoneController(base.BaseRestController):
def __init__(self):
self.manager = CentralManager.get_instance()
super(SecurityZoneController, self).__init__()
def create(self, req, *args, **kwargs):
try:
url = req.url
if len(args) > 1:
raise BadRequest(resource="SecurityZone create", msg=url)
context = req.context
body_values = json.loads(req.body)
valid_attributes = ['tenant_id', 'dc_name', 'network_zone',
'name', 'ifnames', 'priority', 'vfwname']
values = tools.validat_values(body_values, valid_attributes)
LOG.info(_LI("input the SecurityZone values with dic format \
is %(json)s"),
{"json": body_values})
values["name"] = (values["tenant_id"] + "_" +
values["network_zone"] +
"_" + values["name"])
response = self.manager.create_securityZone(context, values)
return response
except Nca47Exception as e:
self.response.status = e.code
LOG.error(_LE('Error exception! error info: %' + e.message))
LOG.exception(e)
self.response.status = e.code
return tools.ret_info(e.code, e.message)
except RemoteError as exception:
self.response.status = 500
message = exception.value
return tools.ret_info(self.response.status, message)
except Exception as e:
LOG.exception(e)
self.response.status = 500
return tools.ret_info(self.response.status, e.message)
def remove(self, req, *args, **kwargs):
try:
url = req.url
if len(args) > 1:
raise BadRequest(resource="SecurityZone del", msg=url)
context = req.context
body_values = json.loads(req.body)
valid_attributes = ['tenant_id', 'dc_name', 'network_zone', 'id']
values = tools.validat_values(body_values, valid_attributes)
# input the SecurityZone values with dic format
LOG.info(_LI("delete the SecurityZone values with dic forma \
is %(json)s"),
{"json": body_values})
response = self.manager.del_securityZone(context, values)
return response
except Nca47Exception as e:
self.response.status = e.code
LOG.error(_LE('Error exception! error info: %' + e.message))
LOG.exception(e)
self.response.status = e.code
return tools.ret_info(e.code, e.message)
except RemoteError as exception:
self.response.status = 500
message = exception.value
return tools.ret_info(self.response.status, message)
except Exception as e:
LOG.exception(e)
self.response.status = 500
return tools.ret_info(self.response.status, e.message)
def list(self, req, *args, **kwargs):
try:
url = req.url
if len(args) > 1:
raise BadRequest(resource="SecurityZone getAll", msg=url)
context = req.context
body_values = json.loads(req.body)
valid_attributes = ['tenant_id', 'dc_name',
'network_zone', 'vfwname']
values = tools.validat_values(body_values, valid_attributes)
# get_all the SecurityZone values with dic format
LOG.info(_LI("get_all the SecurityZone values with dic format \
is %(json)s"),
{"json": body_values})
response = self.manager.get_securityZones(context, values)
return response
except Nca47Exception as e:
self.response.status = e.code
LOG.error(_LE('Error exception! error info: %' + e.message))
LOG.exception(e)
self.response.status = e.code
return tools.ret_info(e.code, e.message)
except RemoteError as exception:
self.response.status = 500
message = exception.value
return tools.ret_info(self.response.status, message)
except Exception as e:
LOG.exception(e)
self.response.status = 500
return tools.ret_info(self.response.status, e.message)
def show(self, req, *args, **kwargs):
try:
url = req.url
if len(args) > 1:
raise BadRequest(resource="SecurityZone get", msg=url)
context = req.context
body_values = json.loads(req.body)
valid_attributes = ['id']
values = tools.validat_values(body_values, valid_attributes)
# get the staticnat values with dic format
LOG.info(_LI("get the SecurityZone values with dic format\
is %(json)s"),
{"json": body_values})
response = self.manager.get_securityZone(context, values)
return response
except Nca47Exception as e:
self.response.status = e.code
LOG.error(_LE('Error exception! error info: %' + e.message))
LOG.exception(e)
self.response.status = e.code
return tools.ret_info(e.code, e.message)
except RemoteError as exception:
self.response.status = 500
message = exception.value
return tools.ret_info(self.response.status, message)
except Exception as e:
LOG.exception(e)
self.response.status = 500
return tools.ret_info(self.response.status, e.message)
def addif(self, req, *args, **kwargs):
try:
url = req.url
if len(args) > 1:
raise BadRequest(resource="SecurityZone add vlan", msg=url)
context = req.context
body_values = json.loads(req.body)
valid_attributes = ['tenant_id', 'dc_name', 'network_zone', 'id',
'ifname']
values = tools.validat_values(body_values, valid_attributes)
# input the SecurityZone values with dic format
LOG.info(_LI("input the SecurityZone values with dic formatO is\
%(json)s"),
{"json": body_values})
response = self.manager.get_securityZone(context, values)
if not isinstance(values["ifname"], string):
raise ParamFormatError(param_name="ifname")
if values["ifname"] in response.ifnames:
message = ("securityZone with ifname=" +
values["ifname"] + " already exists")
return tools.ret_info("400", message)
response.ifnames.append(values["ifname"])
values["ifnames"] = response.ifnames
response = self.manager.update_securityZone(context, values)
return response
except Nca47Exception as e:
self.response.status = e.code
LOG.error(_LE('Error exception! error info: %' + e.message))
LOG.exception(e)
self.response.status = e.code
return tools.ret_info(e.code, e.message)
except RemoteError as exception:
self.response.status = 500
message = exception.value
return tools.ret_info(self.response.status, message)
except Exception as e:
LOG.exception(e)
self.response.status = 500
return tools.ret_info(self.response.status, e.message)
def delif(self, req, *args, **kwargs):
try:
url = req.url
if len(args) > 1:
raise BadRequest(resource="SecurityZone del vlan", msg=url)
context = req.context
body_values = json.loads(req.body)
valid_attributes = ['tenant_id', 'dc_name', 'network_zone', 'id',
'ifname']
values = tools.validat_values(body_values, valid_attributes)
# input the SecurityZone values with dic format
LOG.info(_LI("input the SecurityZone values with dic format\
is %(json)s"),
{"json": body_values})
response = self.manager.get_securityZone(context, values)
if not isinstance(values["ifname"], string):
raise ParamFormatError(param_name="ifname")
if values["ifname"] not in response.ifnames:
message = ("securityZone with ifname=" +
values["ifname"]+" don't exist!")
return tools.ret_info("400", message)
response.ifnames.remove(values["ifname"])
values["ifnames"] = response.ifnames
response = self.manager.update_securityZone(context, values)
return response
except Nca47Exception as e:
self.response.status = e.code
LOG.error(_LE('Error exception! error info: %' + e.message))
LOG.exception(e)
self.response.status = e.code
return tools.ret_info(e.code, e.message)
except RemoteError as exception:
self.response.status = 500
message = exception.value
return tools.ret_info(self.response.status, message)
except Exception as e:
LOG.exception(e)
self.response.status = 500
return tools.ret_info(self.response.status, e.message)
| 45.097345
| 77
| 0.577414
| 1,111
| 10,192
| 5.190819
| 0.10441
| 0.074909
| 0.112363
| 0.062424
| 0.891625
| 0.862494
| 0.851743
| 0.851743
| 0.829894
| 0.766083
| 0
| 0.012547
| 0.327512
| 10,192
| 225
| 78
| 45.297778
| 0.828859
| 0.022174
| 0
| 0.748815
| 0
| 0
| 0.07008
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.033175
| false
| 0
| 0.061611
| 0
| 0.222749
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
f412b42dfc85a5a206a8dd5d9f02a0078c055cdd
| 60,615
|
py
|
Python
|
sdk/python/pulumi_gcp/accesscontextmanager/service_perimeter.py
|
sisisin/pulumi-gcp
|
af6681d70ea457843409110c1324817fe55f68ad
|
[
"ECL-2.0",
"Apache-2.0"
] | 121
|
2018-06-18T19:16:42.000Z
|
2022-03-31T06:06:48.000Z
|
sdk/python/pulumi_gcp/accesscontextmanager/service_perimeter.py
|
sisisin/pulumi-gcp
|
af6681d70ea457843409110c1324817fe55f68ad
|
[
"ECL-2.0",
"Apache-2.0"
] | 492
|
2018-06-22T19:41:03.000Z
|
2022-03-31T15:33:53.000Z
|
sdk/python/pulumi_gcp/accesscontextmanager/service_perimeter.py
|
sisisin/pulumi-gcp
|
af6681d70ea457843409110c1324817fe55f68ad
|
[
"ECL-2.0",
"Apache-2.0"
] | 43
|
2018-06-19T01:43:13.000Z
|
2022-03-23T22:43:37.000Z
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
from . import outputs
from ._inputs import *
__all__ = ['ServicePerimeterArgs', 'ServicePerimeter']
@pulumi.input_type
class ServicePerimeterArgs:
def __init__(__self__, *,
parent: pulumi.Input[str],
title: pulumi.Input[str],
description: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
perimeter_type: Optional[pulumi.Input[str]] = None,
spec: Optional[pulumi.Input['ServicePerimeterSpecArgs']] = None,
status: Optional[pulumi.Input['ServicePerimeterStatusArgs']] = None,
use_explicit_dry_run_spec: Optional[pulumi.Input[bool]] = None):
"""
The set of arguments for constructing a ServicePerimeter resource.
:param pulumi.Input[str] parent: The AccessPolicy this ServicePerimeter lives in.
Format: accessPolicies/{policy_id}
:param pulumi.Input[str] title: Human readable title. Must be unique within the Policy.
:param pulumi.Input[str] description: Description of the ServicePerimeter and its use. Does not affect
behavior.
:param pulumi.Input[str] name: Resource name for the ServicePerimeter. The short_name component must
begin with a letter and only include alphanumeric and '_'.
Format: accessPolicies/{policy_id}/servicePerimeters/{short_name}
:param pulumi.Input[str] perimeter_type: Specifies the type of the Perimeter. There are two types: regular and
bridge. Regular Service Perimeter contains resources, access levels,
and restricted services. Every resource can be in at most
ONE regular Service Perimeter.
In addition to being in a regular service perimeter, a resource can also
be in zero or more perimeter bridges. A perimeter bridge only contains
resources. Cross project operations are permitted if all effected
resources share some perimeter (whether bridge or regular). Perimeter
Bridge does not contain access levels or services: those are governed
entirely by the regular perimeter that resource is in.
Perimeter Bridges are typically useful when building more complex
topologies with many independent perimeters that need to share some data
with a common perimeter, but should not be able to share data among
themselves.
Default value is `PERIMETER_TYPE_REGULAR`.
Possible values are `PERIMETER_TYPE_REGULAR` and `PERIMETER_TYPE_BRIDGE`.
:param pulumi.Input['ServicePerimeterSpecArgs'] spec: Proposed (or dry run) ServicePerimeter configuration.
This configuration allows to specify and test ServicePerimeter configuration
without enforcing actual access restrictions. Only allowed to be set when
the `useExplicitDryRunSpec` flag is set.
Structure is documented below.
:param pulumi.Input['ServicePerimeterStatusArgs'] status: ServicePerimeter configuration. Specifies sets of resources,
restricted services and access levels that determine
perimeter content and boundaries.
Structure is documented below.
:param pulumi.Input[bool] use_explicit_dry_run_spec: Use explicit dry run spec flag. Ordinarily, a dry-run spec implicitly exists
for all Service Perimeters, and that spec is identical to the status for those
Service Perimeters. When this flag is set, it inhibits the generation of the
implicit spec, thereby allowing the user to explicitly provide a
configuration ("spec") to use in a dry-run version of the Service Perimeter.
This allows the user to test changes to the enforced config ("status") without
actually enforcing them. This testing is done through analyzing the differences
between currently enforced and suggested restrictions. useExplicitDryRunSpec must
bet set to True if any of the fields in the spec are set to non-default values.
"""
pulumi.set(__self__, "parent", parent)
pulumi.set(__self__, "title", title)
if description is not None:
pulumi.set(__self__, "description", description)
if name is not None:
pulumi.set(__self__, "name", name)
if perimeter_type is not None:
pulumi.set(__self__, "perimeter_type", perimeter_type)
if spec is not None:
pulumi.set(__self__, "spec", spec)
if status is not None:
pulumi.set(__self__, "status", status)
if use_explicit_dry_run_spec is not None:
pulumi.set(__self__, "use_explicit_dry_run_spec", use_explicit_dry_run_spec)
@property
@pulumi.getter
def parent(self) -> pulumi.Input[str]:
"""
The AccessPolicy this ServicePerimeter lives in.
Format: accessPolicies/{policy_id}
"""
return pulumi.get(self, "parent")
@parent.setter
def parent(self, value: pulumi.Input[str]):
pulumi.set(self, "parent", value)
@property
@pulumi.getter
def title(self) -> pulumi.Input[str]:
"""
Human readable title. Must be unique within the Policy.
"""
return pulumi.get(self, "title")
@title.setter
def title(self, value: pulumi.Input[str]):
pulumi.set(self, "title", value)
@property
@pulumi.getter
def description(self) -> Optional[pulumi.Input[str]]:
"""
Description of the ServicePerimeter and its use. Does not affect
behavior.
"""
return pulumi.get(self, "description")
@description.setter
def description(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "description", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
Resource name for the ServicePerimeter. The short_name component must
begin with a letter and only include alphanumeric and '_'.
Format: accessPolicies/{policy_id}/servicePerimeters/{short_name}
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter(name="perimeterType")
def perimeter_type(self) -> Optional[pulumi.Input[str]]:
"""
Specifies the type of the Perimeter. There are two types: regular and
bridge. Regular Service Perimeter contains resources, access levels,
and restricted services. Every resource can be in at most
ONE regular Service Perimeter.
In addition to being in a regular service perimeter, a resource can also
be in zero or more perimeter bridges. A perimeter bridge only contains
resources. Cross project operations are permitted if all effected
resources share some perimeter (whether bridge or regular). Perimeter
Bridge does not contain access levels or services: those are governed
entirely by the regular perimeter that resource is in.
Perimeter Bridges are typically useful when building more complex
topologies with many independent perimeters that need to share some data
with a common perimeter, but should not be able to share data among
themselves.
Default value is `PERIMETER_TYPE_REGULAR`.
Possible values are `PERIMETER_TYPE_REGULAR` and `PERIMETER_TYPE_BRIDGE`.
"""
return pulumi.get(self, "perimeter_type")
@perimeter_type.setter
def perimeter_type(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "perimeter_type", value)
@property
@pulumi.getter
def spec(self) -> Optional[pulumi.Input['ServicePerimeterSpecArgs']]:
"""
Proposed (or dry run) ServicePerimeter configuration.
This configuration allows to specify and test ServicePerimeter configuration
without enforcing actual access restrictions. Only allowed to be set when
the `useExplicitDryRunSpec` flag is set.
Structure is documented below.
"""
return pulumi.get(self, "spec")
@spec.setter
def spec(self, value: Optional[pulumi.Input['ServicePerimeterSpecArgs']]):
pulumi.set(self, "spec", value)
@property
@pulumi.getter
def status(self) -> Optional[pulumi.Input['ServicePerimeterStatusArgs']]:
"""
ServicePerimeter configuration. Specifies sets of resources,
restricted services and access levels that determine
perimeter content and boundaries.
Structure is documented below.
"""
return pulumi.get(self, "status")
@status.setter
def status(self, value: Optional[pulumi.Input['ServicePerimeterStatusArgs']]):
pulumi.set(self, "status", value)
@property
@pulumi.getter(name="useExplicitDryRunSpec")
def use_explicit_dry_run_spec(self) -> Optional[pulumi.Input[bool]]:
"""
Use explicit dry run spec flag. Ordinarily, a dry-run spec implicitly exists
for all Service Perimeters, and that spec is identical to the status for those
Service Perimeters. When this flag is set, it inhibits the generation of the
implicit spec, thereby allowing the user to explicitly provide a
configuration ("spec") to use in a dry-run version of the Service Perimeter.
This allows the user to test changes to the enforced config ("status") without
actually enforcing them. This testing is done through analyzing the differences
between currently enforced and suggested restrictions. useExplicitDryRunSpec must
bet set to True if any of the fields in the spec are set to non-default values.
"""
return pulumi.get(self, "use_explicit_dry_run_spec")
@use_explicit_dry_run_spec.setter
def use_explicit_dry_run_spec(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "use_explicit_dry_run_spec", value)
@pulumi.input_type
class _ServicePerimeterState:
def __init__(__self__, *,
create_time: Optional[pulumi.Input[str]] = None,
description: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
parent: Optional[pulumi.Input[str]] = None,
perimeter_type: Optional[pulumi.Input[str]] = None,
spec: Optional[pulumi.Input['ServicePerimeterSpecArgs']] = None,
status: Optional[pulumi.Input['ServicePerimeterStatusArgs']] = None,
title: Optional[pulumi.Input[str]] = None,
update_time: Optional[pulumi.Input[str]] = None,
use_explicit_dry_run_spec: Optional[pulumi.Input[bool]] = None):
"""
Input properties used for looking up and filtering ServicePerimeter resources.
:param pulumi.Input[str] create_time: Time the AccessPolicy was created in UTC.
:param pulumi.Input[str] description: Description of the ServicePerimeter and its use. Does not affect
behavior.
:param pulumi.Input[str] name: Resource name for the ServicePerimeter. The short_name component must
begin with a letter and only include alphanumeric and '_'.
Format: accessPolicies/{policy_id}/servicePerimeters/{short_name}
:param pulumi.Input[str] parent: The AccessPolicy this ServicePerimeter lives in.
Format: accessPolicies/{policy_id}
:param pulumi.Input[str] perimeter_type: Specifies the type of the Perimeter. There are two types: regular and
bridge. Regular Service Perimeter contains resources, access levels,
and restricted services. Every resource can be in at most
ONE regular Service Perimeter.
In addition to being in a regular service perimeter, a resource can also
be in zero or more perimeter bridges. A perimeter bridge only contains
resources. Cross project operations are permitted if all effected
resources share some perimeter (whether bridge or regular). Perimeter
Bridge does not contain access levels or services: those are governed
entirely by the regular perimeter that resource is in.
Perimeter Bridges are typically useful when building more complex
topologies with many independent perimeters that need to share some data
with a common perimeter, but should not be able to share data among
themselves.
Default value is `PERIMETER_TYPE_REGULAR`.
Possible values are `PERIMETER_TYPE_REGULAR` and `PERIMETER_TYPE_BRIDGE`.
:param pulumi.Input['ServicePerimeterSpecArgs'] spec: Proposed (or dry run) ServicePerimeter configuration.
This configuration allows to specify and test ServicePerimeter configuration
without enforcing actual access restrictions. Only allowed to be set when
the `useExplicitDryRunSpec` flag is set.
Structure is documented below.
:param pulumi.Input['ServicePerimeterStatusArgs'] status: ServicePerimeter configuration. Specifies sets of resources,
restricted services and access levels that determine
perimeter content and boundaries.
Structure is documented below.
:param pulumi.Input[str] title: Human readable title. Must be unique within the Policy.
:param pulumi.Input[str] update_time: Time the AccessPolicy was updated in UTC.
:param pulumi.Input[bool] use_explicit_dry_run_spec: Use explicit dry run spec flag. Ordinarily, a dry-run spec implicitly exists
for all Service Perimeters, and that spec is identical to the status for those
Service Perimeters. When this flag is set, it inhibits the generation of the
implicit spec, thereby allowing the user to explicitly provide a
configuration ("spec") to use in a dry-run version of the Service Perimeter.
This allows the user to test changes to the enforced config ("status") without
actually enforcing them. This testing is done through analyzing the differences
between currently enforced and suggested restrictions. useExplicitDryRunSpec must
bet set to True if any of the fields in the spec are set to non-default values.
"""
if create_time is not None:
pulumi.set(__self__, "create_time", create_time)
if description is not None:
pulumi.set(__self__, "description", description)
if name is not None:
pulumi.set(__self__, "name", name)
if parent is not None:
pulumi.set(__self__, "parent", parent)
if perimeter_type is not None:
pulumi.set(__self__, "perimeter_type", perimeter_type)
if spec is not None:
pulumi.set(__self__, "spec", spec)
if status is not None:
pulumi.set(__self__, "status", status)
if title is not None:
pulumi.set(__self__, "title", title)
if update_time is not None:
pulumi.set(__self__, "update_time", update_time)
if use_explicit_dry_run_spec is not None:
pulumi.set(__self__, "use_explicit_dry_run_spec", use_explicit_dry_run_spec)
@property
@pulumi.getter(name="createTime")
def create_time(self) -> Optional[pulumi.Input[str]]:
"""
Time the AccessPolicy was created in UTC.
"""
return pulumi.get(self, "create_time")
@create_time.setter
def create_time(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "create_time", value)
@property
@pulumi.getter
def description(self) -> Optional[pulumi.Input[str]]:
"""
Description of the ServicePerimeter and its use. Does not affect
behavior.
"""
return pulumi.get(self, "description")
@description.setter
def description(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "description", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
Resource name for the ServicePerimeter. The short_name component must
begin with a letter and only include alphanumeric and '_'.
Format: accessPolicies/{policy_id}/servicePerimeters/{short_name}
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def parent(self) -> Optional[pulumi.Input[str]]:
"""
The AccessPolicy this ServicePerimeter lives in.
Format: accessPolicies/{policy_id}
"""
return pulumi.get(self, "parent")
@parent.setter
def parent(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "parent", value)
@property
@pulumi.getter(name="perimeterType")
def perimeter_type(self) -> Optional[pulumi.Input[str]]:
"""
Specifies the type of the Perimeter. There are two types: regular and
bridge. Regular Service Perimeter contains resources, access levels,
and restricted services. Every resource can be in at most
ONE regular Service Perimeter.
In addition to being in a regular service perimeter, a resource can also
be in zero or more perimeter bridges. A perimeter bridge only contains
resources. Cross project operations are permitted if all effected
resources share some perimeter (whether bridge or regular). Perimeter
Bridge does not contain access levels or services: those are governed
entirely by the regular perimeter that resource is in.
Perimeter Bridges are typically useful when building more complex
topologies with many independent perimeters that need to share some data
with a common perimeter, but should not be able to share data among
themselves.
Default value is `PERIMETER_TYPE_REGULAR`.
Possible values are `PERIMETER_TYPE_REGULAR` and `PERIMETER_TYPE_BRIDGE`.
"""
return pulumi.get(self, "perimeter_type")
@perimeter_type.setter
def perimeter_type(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "perimeter_type", value)
@property
@pulumi.getter
def spec(self) -> Optional[pulumi.Input['ServicePerimeterSpecArgs']]:
"""
Proposed (or dry run) ServicePerimeter configuration.
This configuration allows to specify and test ServicePerimeter configuration
without enforcing actual access restrictions. Only allowed to be set when
the `useExplicitDryRunSpec` flag is set.
Structure is documented below.
"""
return pulumi.get(self, "spec")
@spec.setter
def spec(self, value: Optional[pulumi.Input['ServicePerimeterSpecArgs']]):
pulumi.set(self, "spec", value)
@property
@pulumi.getter
def status(self) -> Optional[pulumi.Input['ServicePerimeterStatusArgs']]:
"""
ServicePerimeter configuration. Specifies sets of resources,
restricted services and access levels that determine
perimeter content and boundaries.
Structure is documented below.
"""
return pulumi.get(self, "status")
@status.setter
def status(self, value: Optional[pulumi.Input['ServicePerimeterStatusArgs']]):
pulumi.set(self, "status", value)
@property
@pulumi.getter
def title(self) -> Optional[pulumi.Input[str]]:
"""
Human readable title. Must be unique within the Policy.
"""
return pulumi.get(self, "title")
@title.setter
def title(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "title", value)
@property
@pulumi.getter(name="updateTime")
def update_time(self) -> Optional[pulumi.Input[str]]:
"""
Time the AccessPolicy was updated in UTC.
"""
return pulumi.get(self, "update_time")
@update_time.setter
def update_time(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "update_time", value)
@property
@pulumi.getter(name="useExplicitDryRunSpec")
def use_explicit_dry_run_spec(self) -> Optional[pulumi.Input[bool]]:
"""
Use explicit dry run spec flag. Ordinarily, a dry-run spec implicitly exists
for all Service Perimeters, and that spec is identical to the status for those
Service Perimeters. When this flag is set, it inhibits the generation of the
implicit spec, thereby allowing the user to explicitly provide a
configuration ("spec") to use in a dry-run version of the Service Perimeter.
This allows the user to test changes to the enforced config ("status") without
actually enforcing them. This testing is done through analyzing the differences
between currently enforced and suggested restrictions. useExplicitDryRunSpec must
bet set to True if any of the fields in the spec are set to non-default values.
"""
return pulumi.get(self, "use_explicit_dry_run_spec")
@use_explicit_dry_run_spec.setter
def use_explicit_dry_run_spec(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "use_explicit_dry_run_spec", value)
class ServicePerimeter(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
description: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
parent: Optional[pulumi.Input[str]] = None,
perimeter_type: Optional[pulumi.Input[str]] = None,
spec: Optional[pulumi.Input[pulumi.InputType['ServicePerimeterSpecArgs']]] = None,
status: Optional[pulumi.Input[pulumi.InputType['ServicePerimeterStatusArgs']]] = None,
title: Optional[pulumi.Input[str]] = None,
use_explicit_dry_run_spec: Optional[pulumi.Input[bool]] = None,
__props__=None):
"""
ServicePerimeter describes a set of GCP resources which can freely import
and export data amongst themselves, but not export outside of the
ServicePerimeter. If a request with a source within this ServicePerimeter
has a target outside of the ServicePerimeter, the request will be blocked.
Otherwise the request is allowed. There are two types of Service Perimeter
- Regular and Bridge. Regular Service Perimeters cannot overlap, a single
GCP project can only belong to a single regular Service Perimeter. Service
Perimeter Bridges can contain only GCP projects as members, a single GCP
project may belong to multiple Service Perimeter Bridges.
To get more information about ServicePerimeter, see:
* [API documentation](https://cloud.google.com/access-context-manager/docs/reference/rest/v1/accessPolicies.servicePerimeters)
* How-to Guides
* [Service Perimeter Quickstart](https://cloud.google.com/vpc-service-controls/docs/quickstart)
> **Warning:** If you are using User ADCs (Application Default Credentials) with this resource,
you must specify a `billing_project` and set `user_project_override` to true
in the provider configuration. Otherwise the ACM API will return a 403 error.
Your account must have the `serviceusage.services.use` permission on the
`billing_project` you defined.
## Example Usage
### Access Context Manager Service Perimeter Basic
```python
import pulumi
import pulumi_gcp as gcp
access_policy = gcp.accesscontextmanager.AccessPolicy("access-policy",
parent="organizations/123456789",
title="my policy")
service_perimeter = gcp.accesscontextmanager.ServicePerimeter("service-perimeter",
parent=access_policy.name.apply(lambda name: f"accessPolicies/{name}"),
status=gcp.accesscontextmanager.ServicePerimeterStatusArgs(
restricted_services=["storage.googleapis.com"],
),
title="restrict_storage")
access_level = gcp.accesscontextmanager.AccessLevel("access-level",
basic=gcp.accesscontextmanager.AccessLevelBasicArgs(
conditions=[gcp.accesscontextmanager.AccessLevelBasicConditionArgs(
device_policy=gcp.accesscontextmanager.AccessLevelBasicConditionDevicePolicyArgs(
os_constraints=[gcp.accesscontextmanager.AccessLevelBasicConditionDevicePolicyOsConstraintArgs(
os_type="DESKTOP_CHROME_OS",
)],
require_screen_lock=False,
),
regions=[
"CH",
"IT",
"US",
],
)],
),
parent=access_policy.name.apply(lambda name: f"accessPolicies/{name}"),
title="chromeos_no_lock")
```
### Access Context Manager Service Perimeter Secure Data Exchange
```python
import pulumi
import pulumi_gcp as gcp
access_policy = gcp.accesscontextmanager.AccessPolicy("access-policy",
parent="organizations/123456789",
title="my policy")
secure_data_exchange = gcp.accesscontextmanager.ServicePerimeters("secure-data-exchange",
parent=access_policy.name.apply(lambda name: f"accessPolicies/{name}"),
service_perimeters=[
gcp.accesscontextmanager.ServicePerimetersServicePerimeterArgs(
name=access_policy.name.apply(lambda name: f"accessPolicies/{name}/servicePerimeters/"),
title="",
status=gcp.accesscontextmanager.ServicePerimetersServicePerimeterStatusArgs(
restricted_services=["storage.googleapis.com"],
),
),
gcp.accesscontextmanager.ServicePerimetersServicePerimeterArgs(
name=access_policy.name.apply(lambda name: f"accessPolicies/{name}/servicePerimeters/"),
title="",
status=gcp.accesscontextmanager.ServicePerimetersServicePerimeterStatusArgs(
restricted_services=["bigtable.googleapis.com"],
vpc_accessible_services=gcp.accesscontextmanager.ServicePerimetersServicePerimeterStatusVpcAccessibleServicesArgs(
enable_restriction=True,
allowed_services=["bigquery.googleapis.com"],
),
),
),
])
access_level = gcp.accesscontextmanager.AccessLevel("access-level",
parent=access_policy.name.apply(lambda name: f"accessPolicies/{name}"),
title="secure_data_exchange",
basic=gcp.accesscontextmanager.AccessLevelBasicArgs(
conditions=[gcp.accesscontextmanager.AccessLevelBasicConditionArgs(
device_policy=gcp.accesscontextmanager.AccessLevelBasicConditionDevicePolicyArgs(
require_screen_lock=False,
os_constraints=[gcp.accesscontextmanager.AccessLevelBasicConditionDevicePolicyOsConstraintArgs(
os_type="DESKTOP_CHROME_OS",
)],
),
regions=[
"CH",
"IT",
"US",
],
)],
))
test_access = gcp.accesscontextmanager.ServicePerimeter("test-access",
parent=f"accessPolicies/{google_access_context_manager_access_policy['test-access']['name']}",
title="%s",
perimeter_type="PERIMETER_TYPE_REGULAR",
status=gcp.accesscontextmanager.ServicePerimeterStatusArgs(
restricted_services=[
"bigquery.googleapis.com",
"storage.googleapis.com",
],
access_levels=[access_level.name],
vpc_accessible_services=gcp.accesscontextmanager.ServicePerimeterStatusVpcAccessibleServicesArgs(
enable_restriction=True,
allowed_services=[
"bigquery.googleapis.com",
"storage.googleapis.com",
],
),
ingress_policies=[gcp.accesscontextmanager.ServicePerimeterStatusIngressPolicyArgs(
ingress_from=gcp.accesscontextmanager.ServicePerimeterStatusIngressPolicyIngressFromArgs(
sources=[gcp.accesscontextmanager.ServicePerimeterStatusIngressPolicyIngressFromSourceArgs(
access_level=google_access_context_manager_access_level["test-access"]["name"],
)],
identity_type="ANY_IDENTITY",
),
ingress_to=gcp.accesscontextmanager.ServicePerimeterStatusIngressPolicyIngressToArgs(
resources=["*"],
operations=[
gcp.accesscontextmanager.ServicePerimeterStatusIngressPolicyIngressToOperationArgs(
service_name="bigquery.googleapis.com",
method_selectors=[
gcp.accesscontextmanager.ServicePerimeterStatusIngressPolicyIngressToOperationMethodSelectorArgs(
method="BigQueryStorage.ReadRows",
),
gcp.accesscontextmanager.ServicePerimeterStatusIngressPolicyIngressToOperationMethodSelectorArgs(
method="TableService.ListTables",
),
gcp.accesscontextmanager.ServicePerimeterStatusIngressPolicyIngressToOperationMethodSelectorArgs(
permission="bigquery.jobs.get",
),
],
),
gcp.accesscontextmanager.ServicePerimeterStatusIngressPolicyIngressToOperationArgs(
service_name="storage.googleapis.com",
method_selectors=[gcp.accesscontextmanager.ServicePerimeterStatusIngressPolicyIngressToOperationMethodSelectorArgs(
method="google.storage.objects.create",
)],
),
],
),
)],
egress_policies=[gcp.accesscontextmanager.ServicePerimeterStatusEgressPolicyArgs(
egress_from=gcp.accesscontextmanager.ServicePerimeterStatusEgressPolicyEgressFromArgs(
identity_type="ANY_USER_ACCOUNT",
),
)],
))
```
### Access Context Manager Service Perimeter Dry Run
```python
import pulumi
import pulumi_gcp as gcp
access_policy = gcp.accesscontextmanager.AccessPolicy("access-policy",
parent="organizations/123456789",
title="my policy")
service_perimeter = gcp.accesscontextmanager.ServicePerimeter("service-perimeter",
parent=access_policy.name.apply(lambda name: f"accessPolicies/{name}"),
spec=gcp.accesscontextmanager.ServicePerimeterSpecArgs(
restricted_services=["storage.googleapis.com"],
),
status=gcp.accesscontextmanager.ServicePerimeterStatusArgs(
restricted_services=["bigquery.googleapis.com"],
),
title="restrict_bigquery_dryrun_storage",
use_explicit_dry_run_spec=True)
```
## Import
ServicePerimeter can be imported using any of these accepted formats
```sh
$ pulumi import gcp:accesscontextmanager/servicePerimeter:ServicePerimeter default {{name}}
```
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] description: Description of the ServicePerimeter and its use. Does not affect
behavior.
:param pulumi.Input[str] name: Resource name for the ServicePerimeter. The short_name component must
begin with a letter and only include alphanumeric and '_'.
Format: accessPolicies/{policy_id}/servicePerimeters/{short_name}
:param pulumi.Input[str] parent: The AccessPolicy this ServicePerimeter lives in.
Format: accessPolicies/{policy_id}
:param pulumi.Input[str] perimeter_type: Specifies the type of the Perimeter. There are two types: regular and
bridge. Regular Service Perimeter contains resources, access levels,
and restricted services. Every resource can be in at most
ONE regular Service Perimeter.
In addition to being in a regular service perimeter, a resource can also
be in zero or more perimeter bridges. A perimeter bridge only contains
resources. Cross project operations are permitted if all effected
resources share some perimeter (whether bridge or regular). Perimeter
Bridge does not contain access levels or services: those are governed
entirely by the regular perimeter that resource is in.
Perimeter Bridges are typically useful when building more complex
topologies with many independent perimeters that need to share some data
with a common perimeter, but should not be able to share data among
themselves.
Default value is `PERIMETER_TYPE_REGULAR`.
Possible values are `PERIMETER_TYPE_REGULAR` and `PERIMETER_TYPE_BRIDGE`.
:param pulumi.Input[pulumi.InputType['ServicePerimeterSpecArgs']] spec: Proposed (or dry run) ServicePerimeter configuration.
This configuration allows to specify and test ServicePerimeter configuration
without enforcing actual access restrictions. Only allowed to be set when
the `useExplicitDryRunSpec` flag is set.
Structure is documented below.
:param pulumi.Input[pulumi.InputType['ServicePerimeterStatusArgs']] status: ServicePerimeter configuration. Specifies sets of resources,
restricted services and access levels that determine
perimeter content and boundaries.
Structure is documented below.
:param pulumi.Input[str] title: Human readable title. Must be unique within the Policy.
:param pulumi.Input[bool] use_explicit_dry_run_spec: Use explicit dry run spec flag. Ordinarily, a dry-run spec implicitly exists
for all Service Perimeters, and that spec is identical to the status for those
Service Perimeters. When this flag is set, it inhibits the generation of the
implicit spec, thereby allowing the user to explicitly provide a
configuration ("spec") to use in a dry-run version of the Service Perimeter.
This allows the user to test changes to the enforced config ("status") without
actually enforcing them. This testing is done through analyzing the differences
between currently enforced and suggested restrictions. useExplicitDryRunSpec must
bet set to True if any of the fields in the spec are set to non-default values.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: ServicePerimeterArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
ServicePerimeter describes a set of GCP resources which can freely import
and export data amongst themselves, but not export outside of the
ServicePerimeter. If a request with a source within this ServicePerimeter
has a target outside of the ServicePerimeter, the request will be blocked.
Otherwise the request is allowed. There are two types of Service Perimeter
- Regular and Bridge. Regular Service Perimeters cannot overlap, a single
GCP project can only belong to a single regular Service Perimeter. Service
Perimeter Bridges can contain only GCP projects as members, a single GCP
project may belong to multiple Service Perimeter Bridges.
To get more information about ServicePerimeter, see:
* [API documentation](https://cloud.google.com/access-context-manager/docs/reference/rest/v1/accessPolicies.servicePerimeters)
* How-to Guides
* [Service Perimeter Quickstart](https://cloud.google.com/vpc-service-controls/docs/quickstart)
> **Warning:** If you are using User ADCs (Application Default Credentials) with this resource,
you must specify a `billing_project` and set `user_project_override` to true
in the provider configuration. Otherwise the ACM API will return a 403 error.
Your account must have the `serviceusage.services.use` permission on the
`billing_project` you defined.
## Example Usage
### Access Context Manager Service Perimeter Basic
```python
import pulumi
import pulumi_gcp as gcp
access_policy = gcp.accesscontextmanager.AccessPolicy("access-policy",
parent="organizations/123456789",
title="my policy")
service_perimeter = gcp.accesscontextmanager.ServicePerimeter("service-perimeter",
parent=access_policy.name.apply(lambda name: f"accessPolicies/{name}"),
status=gcp.accesscontextmanager.ServicePerimeterStatusArgs(
restricted_services=["storage.googleapis.com"],
),
title="restrict_storage")
access_level = gcp.accesscontextmanager.AccessLevel("access-level",
basic=gcp.accesscontextmanager.AccessLevelBasicArgs(
conditions=[gcp.accesscontextmanager.AccessLevelBasicConditionArgs(
device_policy=gcp.accesscontextmanager.AccessLevelBasicConditionDevicePolicyArgs(
os_constraints=[gcp.accesscontextmanager.AccessLevelBasicConditionDevicePolicyOsConstraintArgs(
os_type="DESKTOP_CHROME_OS",
)],
require_screen_lock=False,
),
regions=[
"CH",
"IT",
"US",
],
)],
),
parent=access_policy.name.apply(lambda name: f"accessPolicies/{name}"),
title="chromeos_no_lock")
```
### Access Context Manager Service Perimeter Secure Data Exchange
```python
import pulumi
import pulumi_gcp as gcp
access_policy = gcp.accesscontextmanager.AccessPolicy("access-policy",
parent="organizations/123456789",
title="my policy")
secure_data_exchange = gcp.accesscontextmanager.ServicePerimeters("secure-data-exchange",
parent=access_policy.name.apply(lambda name: f"accessPolicies/{name}"),
service_perimeters=[
gcp.accesscontextmanager.ServicePerimetersServicePerimeterArgs(
name=access_policy.name.apply(lambda name: f"accessPolicies/{name}/servicePerimeters/"),
title="",
status=gcp.accesscontextmanager.ServicePerimetersServicePerimeterStatusArgs(
restricted_services=["storage.googleapis.com"],
),
),
gcp.accesscontextmanager.ServicePerimetersServicePerimeterArgs(
name=access_policy.name.apply(lambda name: f"accessPolicies/{name}/servicePerimeters/"),
title="",
status=gcp.accesscontextmanager.ServicePerimetersServicePerimeterStatusArgs(
restricted_services=["bigtable.googleapis.com"],
vpc_accessible_services=gcp.accesscontextmanager.ServicePerimetersServicePerimeterStatusVpcAccessibleServicesArgs(
enable_restriction=True,
allowed_services=["bigquery.googleapis.com"],
),
),
),
])
access_level = gcp.accesscontextmanager.AccessLevel("access-level",
parent=access_policy.name.apply(lambda name: f"accessPolicies/{name}"),
title="secure_data_exchange",
basic=gcp.accesscontextmanager.AccessLevelBasicArgs(
conditions=[gcp.accesscontextmanager.AccessLevelBasicConditionArgs(
device_policy=gcp.accesscontextmanager.AccessLevelBasicConditionDevicePolicyArgs(
require_screen_lock=False,
os_constraints=[gcp.accesscontextmanager.AccessLevelBasicConditionDevicePolicyOsConstraintArgs(
os_type="DESKTOP_CHROME_OS",
)],
),
regions=[
"CH",
"IT",
"US",
],
)],
))
test_access = gcp.accesscontextmanager.ServicePerimeter("test-access",
parent=f"accessPolicies/{google_access_context_manager_access_policy['test-access']['name']}",
title="%s",
perimeter_type="PERIMETER_TYPE_REGULAR",
status=gcp.accesscontextmanager.ServicePerimeterStatusArgs(
restricted_services=[
"bigquery.googleapis.com",
"storage.googleapis.com",
],
access_levels=[access_level.name],
vpc_accessible_services=gcp.accesscontextmanager.ServicePerimeterStatusVpcAccessibleServicesArgs(
enable_restriction=True,
allowed_services=[
"bigquery.googleapis.com",
"storage.googleapis.com",
],
),
ingress_policies=[gcp.accesscontextmanager.ServicePerimeterStatusIngressPolicyArgs(
ingress_from=gcp.accesscontextmanager.ServicePerimeterStatusIngressPolicyIngressFromArgs(
sources=[gcp.accesscontextmanager.ServicePerimeterStatusIngressPolicyIngressFromSourceArgs(
access_level=google_access_context_manager_access_level["test-access"]["name"],
)],
identity_type="ANY_IDENTITY",
),
ingress_to=gcp.accesscontextmanager.ServicePerimeterStatusIngressPolicyIngressToArgs(
resources=["*"],
operations=[
gcp.accesscontextmanager.ServicePerimeterStatusIngressPolicyIngressToOperationArgs(
service_name="bigquery.googleapis.com",
method_selectors=[
gcp.accesscontextmanager.ServicePerimeterStatusIngressPolicyIngressToOperationMethodSelectorArgs(
method="BigQueryStorage.ReadRows",
),
gcp.accesscontextmanager.ServicePerimeterStatusIngressPolicyIngressToOperationMethodSelectorArgs(
method="TableService.ListTables",
),
gcp.accesscontextmanager.ServicePerimeterStatusIngressPolicyIngressToOperationMethodSelectorArgs(
permission="bigquery.jobs.get",
),
],
),
gcp.accesscontextmanager.ServicePerimeterStatusIngressPolicyIngressToOperationArgs(
service_name="storage.googleapis.com",
method_selectors=[gcp.accesscontextmanager.ServicePerimeterStatusIngressPolicyIngressToOperationMethodSelectorArgs(
method="google.storage.objects.create",
)],
),
],
),
)],
egress_policies=[gcp.accesscontextmanager.ServicePerimeterStatusEgressPolicyArgs(
egress_from=gcp.accesscontextmanager.ServicePerimeterStatusEgressPolicyEgressFromArgs(
identity_type="ANY_USER_ACCOUNT",
),
)],
))
```
### Access Context Manager Service Perimeter Dry Run
```python
import pulumi
import pulumi_gcp as gcp
access_policy = gcp.accesscontextmanager.AccessPolicy("access-policy",
parent="organizations/123456789",
title="my policy")
service_perimeter = gcp.accesscontextmanager.ServicePerimeter("service-perimeter",
parent=access_policy.name.apply(lambda name: f"accessPolicies/{name}"),
spec=gcp.accesscontextmanager.ServicePerimeterSpecArgs(
restricted_services=["storage.googleapis.com"],
),
status=gcp.accesscontextmanager.ServicePerimeterStatusArgs(
restricted_services=["bigquery.googleapis.com"],
),
title="restrict_bigquery_dryrun_storage",
use_explicit_dry_run_spec=True)
```
## Import
ServicePerimeter can be imported using any of these accepted formats
```sh
$ pulumi import gcp:accesscontextmanager/servicePerimeter:ServicePerimeter default {{name}}
```
:param str resource_name: The name of the resource.
:param ServicePerimeterArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(ServicePerimeterArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
description: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
parent: Optional[pulumi.Input[str]] = None,
perimeter_type: Optional[pulumi.Input[str]] = None,
spec: Optional[pulumi.Input[pulumi.InputType['ServicePerimeterSpecArgs']]] = None,
status: Optional[pulumi.Input[pulumi.InputType['ServicePerimeterStatusArgs']]] = None,
title: Optional[pulumi.Input[str]] = None,
use_explicit_dry_run_spec: Optional[pulumi.Input[bool]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = ServicePerimeterArgs.__new__(ServicePerimeterArgs)
__props__.__dict__["description"] = description
__props__.__dict__["name"] = name
if parent is None and not opts.urn:
raise TypeError("Missing required property 'parent'")
__props__.__dict__["parent"] = parent
__props__.__dict__["perimeter_type"] = perimeter_type
__props__.__dict__["spec"] = spec
__props__.__dict__["status"] = status
if title is None and not opts.urn:
raise TypeError("Missing required property 'title'")
__props__.__dict__["title"] = title
__props__.__dict__["use_explicit_dry_run_spec"] = use_explicit_dry_run_spec
__props__.__dict__["create_time"] = None
__props__.__dict__["update_time"] = None
super(ServicePerimeter, __self__).__init__(
'gcp:accesscontextmanager/servicePerimeter:ServicePerimeter',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
create_time: Optional[pulumi.Input[str]] = None,
description: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
parent: Optional[pulumi.Input[str]] = None,
perimeter_type: Optional[pulumi.Input[str]] = None,
spec: Optional[pulumi.Input[pulumi.InputType['ServicePerimeterSpecArgs']]] = None,
status: Optional[pulumi.Input[pulumi.InputType['ServicePerimeterStatusArgs']]] = None,
title: Optional[pulumi.Input[str]] = None,
update_time: Optional[pulumi.Input[str]] = None,
use_explicit_dry_run_spec: Optional[pulumi.Input[bool]] = None) -> 'ServicePerimeter':
"""
Get an existing ServicePerimeter resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] create_time: Time the AccessPolicy was created in UTC.
:param pulumi.Input[str] description: Description of the ServicePerimeter and its use. Does not affect
behavior.
:param pulumi.Input[str] name: Resource name for the ServicePerimeter. The short_name component must
begin with a letter and only include alphanumeric and '_'.
Format: accessPolicies/{policy_id}/servicePerimeters/{short_name}
:param pulumi.Input[str] parent: The AccessPolicy this ServicePerimeter lives in.
Format: accessPolicies/{policy_id}
:param pulumi.Input[str] perimeter_type: Specifies the type of the Perimeter. There are two types: regular and
bridge. Regular Service Perimeter contains resources, access levels,
and restricted services. Every resource can be in at most
ONE regular Service Perimeter.
In addition to being in a regular service perimeter, a resource can also
be in zero or more perimeter bridges. A perimeter bridge only contains
resources. Cross project operations are permitted if all effected
resources share some perimeter (whether bridge or regular). Perimeter
Bridge does not contain access levels or services: those are governed
entirely by the regular perimeter that resource is in.
Perimeter Bridges are typically useful when building more complex
topologies with many independent perimeters that need to share some data
with a common perimeter, but should not be able to share data among
themselves.
Default value is `PERIMETER_TYPE_REGULAR`.
Possible values are `PERIMETER_TYPE_REGULAR` and `PERIMETER_TYPE_BRIDGE`.
:param pulumi.Input[pulumi.InputType['ServicePerimeterSpecArgs']] spec: Proposed (or dry run) ServicePerimeter configuration.
This configuration allows to specify and test ServicePerimeter configuration
without enforcing actual access restrictions. Only allowed to be set when
the `useExplicitDryRunSpec` flag is set.
Structure is documented below.
:param pulumi.Input[pulumi.InputType['ServicePerimeterStatusArgs']] status: ServicePerimeter configuration. Specifies sets of resources,
restricted services and access levels that determine
perimeter content and boundaries.
Structure is documented below.
:param pulumi.Input[str] title: Human readable title. Must be unique within the Policy.
:param pulumi.Input[str] update_time: Time the AccessPolicy was updated in UTC.
:param pulumi.Input[bool] use_explicit_dry_run_spec: Use explicit dry run spec flag. Ordinarily, a dry-run spec implicitly exists
for all Service Perimeters, and that spec is identical to the status for those
Service Perimeters. When this flag is set, it inhibits the generation of the
implicit spec, thereby allowing the user to explicitly provide a
configuration ("spec") to use in a dry-run version of the Service Perimeter.
This allows the user to test changes to the enforced config ("status") without
actually enforcing them. This testing is done through analyzing the differences
between currently enforced and suggested restrictions. useExplicitDryRunSpec must
bet set to True if any of the fields in the spec are set to non-default values.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = _ServicePerimeterState.__new__(_ServicePerimeterState)
__props__.__dict__["create_time"] = create_time
__props__.__dict__["description"] = description
__props__.__dict__["name"] = name
__props__.__dict__["parent"] = parent
__props__.__dict__["perimeter_type"] = perimeter_type
__props__.__dict__["spec"] = spec
__props__.__dict__["status"] = status
__props__.__dict__["title"] = title
__props__.__dict__["update_time"] = update_time
__props__.__dict__["use_explicit_dry_run_spec"] = use_explicit_dry_run_spec
return ServicePerimeter(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="createTime")
def create_time(self) -> pulumi.Output[str]:
"""
Time the AccessPolicy was created in UTC.
"""
return pulumi.get(self, "create_time")
@property
@pulumi.getter
def description(self) -> pulumi.Output[Optional[str]]:
"""
Description of the ServicePerimeter and its use. Does not affect
behavior.
"""
return pulumi.get(self, "description")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
Resource name for the ServicePerimeter. The short_name component must
begin with a letter and only include alphanumeric and '_'.
Format: accessPolicies/{policy_id}/servicePerimeters/{short_name}
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def parent(self) -> pulumi.Output[str]:
"""
The AccessPolicy this ServicePerimeter lives in.
Format: accessPolicies/{policy_id}
"""
return pulumi.get(self, "parent")
@property
@pulumi.getter(name="perimeterType")
def perimeter_type(self) -> pulumi.Output[Optional[str]]:
"""
Specifies the type of the Perimeter. There are two types: regular and
bridge. Regular Service Perimeter contains resources, access levels,
and restricted services. Every resource can be in at most
ONE regular Service Perimeter.
In addition to being in a regular service perimeter, a resource can also
be in zero or more perimeter bridges. A perimeter bridge only contains
resources. Cross project operations are permitted if all effected
resources share some perimeter (whether bridge or regular). Perimeter
Bridge does not contain access levels or services: those are governed
entirely by the regular perimeter that resource is in.
Perimeter Bridges are typically useful when building more complex
topologies with many independent perimeters that need to share some data
with a common perimeter, but should not be able to share data among
themselves.
Default value is `PERIMETER_TYPE_REGULAR`.
Possible values are `PERIMETER_TYPE_REGULAR` and `PERIMETER_TYPE_BRIDGE`.
"""
return pulumi.get(self, "perimeter_type")
@property
@pulumi.getter
def spec(self) -> pulumi.Output[Optional['outputs.ServicePerimeterSpec']]:
"""
Proposed (or dry run) ServicePerimeter configuration.
This configuration allows to specify and test ServicePerimeter configuration
without enforcing actual access restrictions. Only allowed to be set when
the `useExplicitDryRunSpec` flag is set.
Structure is documented below.
"""
return pulumi.get(self, "spec")
@property
@pulumi.getter
def status(self) -> pulumi.Output[Optional['outputs.ServicePerimeterStatus']]:
"""
ServicePerimeter configuration. Specifies sets of resources,
restricted services and access levels that determine
perimeter content and boundaries.
Structure is documented below.
"""
return pulumi.get(self, "status")
@property
@pulumi.getter
def title(self) -> pulumi.Output[str]:
"""
Human readable title. Must be unique within the Policy.
"""
return pulumi.get(self, "title")
@property
@pulumi.getter(name="updateTime")
def update_time(self) -> pulumi.Output[str]:
"""
Time the AccessPolicy was updated in UTC.
"""
return pulumi.get(self, "update_time")
@property
@pulumi.getter(name="useExplicitDryRunSpec")
def use_explicit_dry_run_spec(self) -> pulumi.Output[Optional[bool]]:
"""
Use explicit dry run spec flag. Ordinarily, a dry-run spec implicitly exists
for all Service Perimeters, and that spec is identical to the status for those
Service Perimeters. When this flag is set, it inhibits the generation of the
implicit spec, thereby allowing the user to explicitly provide a
configuration ("spec") to use in a dry-run version of the Service Perimeter.
This allows the user to test changes to the enforced config ("status") without
actually enforcing them. This testing is done through analyzing the differences
between currently enforced and suggested restrictions. useExplicitDryRunSpec must
bet set to True if any of the fields in the spec are set to non-default values.
"""
return pulumi.get(self, "use_explicit_dry_run_spec")
| 52.480519
| 147
| 0.640502
| 6,120
| 60,615
| 6.207516
| 0.066013
| 0.034746
| 0.029113
| 0.027218
| 0.948381
| 0.937326
| 0.929113
| 0.922664
| 0.918794
| 0.912345
| 0
| 0.001461
| 0.288394
| 60,615
| 1,154
| 148
| 52.525997
| 0.879283
| 0.647727
| 0
| 0.743119
| 1
| 0
| 0.115593
| 0.055118
| 0
| 0
| 0
| 0
| 0
| 1
| 0.16208
| false
| 0.003058
| 0.021407
| 0
| 0.281346
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
f432670cd6f74e0a57b036d2bab7509d31c45355
| 34,716
|
py
|
Python
|
venv/lib/python3.8/site-packages/arch/tests/univariate/test_recursions.py
|
YileC928/finm-portfolio-2021
|
3fa1e97423fa731bce0cad3457807e1873120891
|
[
"MIT"
] | null | null | null |
venv/lib/python3.8/site-packages/arch/tests/univariate/test_recursions.py
|
YileC928/finm-portfolio-2021
|
3fa1e97423fa731bce0cad3457807e1873120891
|
[
"MIT"
] | null | null | null |
venv/lib/python3.8/site-packages/arch/tests/univariate/test_recursions.py
|
YileC928/finm-portfolio-2021
|
3fa1e97423fa731bce0cad3457807e1873120891
|
[
"MIT"
] | null | null | null |
import os
import timeit
from typing import List
import numpy as np
from numpy.random import RandomState
from numpy.testing import assert_allclose, assert_almost_equal
import pytest
from scipy.special import gamma
import arch.univariate.recursions_python as recpy
CYTHON_COVERAGE = os.environ.get("ARCH_CYTHON_COVERAGE", "0") in ("true", "1", "True")
try:
import arch.univariate.recursions as rec_cython
missing_extension = False
except ImportError:
missing_extension = True
if missing_extension:
rec = recpy
else:
rec = rec_cython
try:
import numba # noqa
missing_numba = False
except ImportError:
missing_numba = True
pytestmark = pytest.mark.filterwarnings("ignore::arch.compat.numba.PerformanceWarning")
class Timer(object):
def __init__(
self,
first,
first_name,
second,
second_name,
model_name,
setup,
repeat=5,
number=10,
) -> None:
self.first_code = first
self.second_code = second
self.setup = setup
self.first_name = first_name
self.second_name = second_name
self.model_name = model_name
self.repeat = repeat
self.number = number
self._run = False
self.times: List[float] = []
self._codes = [first, second]
self.ratio = np.inf
def display(self):
if not self._run:
self.time()
self.ratio = self.times[0] / self.times[1]
title = self.model_name + " timing"
print("\n" + title)
print("-" * len(title))
print(self.first_name + ": " + "{:0.3f} ms".format(1000 * self.times[0]))
print(self.second_name + ": " + "{:0.3f} ms".format(1000 * self.times[1]))
if self.ratio < 1:
print(
"{0} is {1:0.1f}% faster".format(
self.first_name, 100 * (1 / self.ratio - 1)
)
)
else:
print(
"{0} is {1:0.1f}% faster".format(
self.second_name, 100 * (self.ratio - 1)
)
)
print(
self.first_name
+ "/"
+ self.second_name
+ " Ratio: {:0.3f}\n".format(self.ratio)
)
def time(self):
self.times = []
for code in self._codes:
timer = timeit.Timer(code, setup=self.setup)
self.times.append(min(timer.repeat(self.repeat, self.number)))
class TestRecursions(object):
@classmethod
def setup_class(cls):
cls.nobs = 1000
cls.rng = RandomState(12345)
cls.resids = cls.rng.standard_normal(cls.nobs)
cls.sigma2 = np.zeros_like(cls.resids)
var = cls.resids.var()
var_bounds = np.array([var / 1000000.0, var * 1000000.0])
cls.var_bounds = np.ones((cls.nobs, 2)) * var_bounds
cls.backcast = 1.0
cls.timer_setup = """
import numpy as np
import arch.univariate.recursions as rec
import arch.univariate.recursions_python as recpy
nobs = 10000
resids = np.random.standard_normal(nobs)
sigma2 = np.zeros_like(resids)
var = resids.var()
backcast = 1.0
var_bounds = np.array([var / 1000000.0, var * 1000000.0])
var_bounds = np.ones((nobs, 2)) * var_bounds
"""
def test_garch(self):
nobs, resids = self.nobs, self.resids
sigma2, backcast = self.sigma2, self.backcast
parameters = np.array([0.1, 0.4, 0.3, 0.2])
fresids = resids ** 2.0
sresids = np.sign(resids)
recpy.garch_recursion(
parameters,
fresids,
sresids,
sigma2,
1,
1,
1,
nobs,
backcast,
self.var_bounds,
)
sigma2_numba = sigma2.copy()
recpy.garch_recursion_python(
parameters,
fresids,
sresids,
sigma2,
1,
1,
1,
nobs,
backcast,
self.var_bounds,
)
sigma2_python = sigma2.copy()
rec.garch_recursion(
parameters,
fresids,
sresids,
sigma2,
1,
1,
1,
nobs,
backcast,
self.var_bounds,
)
assert_almost_equal(sigma2_numba, sigma2)
assert_almost_equal(sigma2_python, sigma2)
parameters = np.array([0.1, -0.4, 0.3, 0.2])
recpy.garch_recursion_python(
parameters,
fresids,
sresids,
sigma2,
1,
1,
1,
nobs,
backcast,
self.var_bounds,
)
assert np.all(sigma2 >= self.var_bounds[:, 0])
assert np.all(sigma2 <= 2 * self.var_bounds[:, 1])
parameters = np.array([0.1, 0.4, 3, 2])
recpy.garch_recursion_python(
parameters,
fresids,
sresids,
sigma2,
1,
1,
1,
nobs,
backcast,
self.var_bounds,
)
assert np.all(sigma2 >= self.var_bounds[:, 0])
assert np.all(sigma2 <= 2 * self.var_bounds[:, 1])
parameters = np.array([0.1, 0.4, 0.3, 0.2])
mod_fresids = fresids.copy()
mod_fresids[:1] = np.inf
recpy.garch_recursion_python(
parameters,
mod_fresids,
sresids,
sigma2,
1,
1,
1,
nobs,
backcast,
self.var_bounds,
)
assert np.all(sigma2 >= self.var_bounds[:, 0])
assert np.all(sigma2 <= 2 * self.var_bounds[:, 1])
rec.garch_recursion(
parameters,
mod_fresids,
sresids,
sigma2,
1,
1,
1,
nobs,
backcast,
self.var_bounds,
)
assert np.all(sigma2 >= self.var_bounds[:, 0])
assert np.all(sigma2 <= 2 * self.var_bounds[:, 1])
def test_harch(self):
nobs, resids = self.nobs, self.resids
sigma2, backcast = self.sigma2, self.backcast
parameters = np.array([0.1, 0.4, 0.3, 0.2])
lags = np.array([1, 5, 22], dtype=np.int32)
recpy.harch_recursion_python(
parameters, resids, sigma2, lags, nobs, backcast, self.var_bounds
)
sigma2_python = sigma2.copy()
recpy.harch_recursion(
parameters, resids, sigma2, lags, nobs, backcast, self.var_bounds
)
sigma2_numba = sigma2.copy()
rec.harch_recursion(
parameters, resids, sigma2, lags, nobs, backcast, self.var_bounds
)
assert_almost_equal(sigma2_numba, sigma2)
assert_almost_equal(sigma2_python, sigma2)
parameters = np.array([-0.1, -0.4, 0.3, 0.2])
recpy.harch_recursion_python(
parameters, resids, sigma2, lags, nobs, backcast, self.var_bounds
)
assert np.all(sigma2 >= self.var_bounds[:, 0])
assert np.all(sigma2 <= 2 * self.var_bounds[:, 1])
parameters = np.array([0.1, 4e8, 3, 2])
recpy.harch_recursion_python(
parameters, resids, sigma2, lags, nobs, backcast, self.var_bounds
)
assert np.all(sigma2 >= self.var_bounds[:, 0])
assert np.all(sigma2 <= 2 * self.var_bounds[:, 1])
parameters = np.array([0.1, 4e8, 3, 2])
mod_resids = resids.copy()
mod_resids[:10] = np.inf
recpy.harch_recursion_python(
parameters, mod_resids, sigma2, lags, nobs, backcast, self.var_bounds
)
assert np.all(sigma2 >= self.var_bounds[:, 0])
assert np.all(sigma2 <= 2 * self.var_bounds[:, 1])
rec.harch_recursion(
parameters, mod_resids, sigma2, lags, nobs, backcast, self.var_bounds
)
assert np.all(sigma2 >= self.var_bounds[:, 0])
assert np.all(sigma2 <= 2 * self.var_bounds[:, 1])
def test_arch(self):
nobs, resids = self.nobs, self.resids
sigma2, backcast = self.sigma2, self.backcast
parameters = np.array([0.1, 0.4, 0.3, 0.2])
p = 3
recpy.arch_recursion_python(
parameters, resids, sigma2, p, nobs, backcast, self.var_bounds
)
sigma2_python = sigma2.copy()
recpy.arch_recursion(
parameters, resids, sigma2, p, nobs, backcast, self.var_bounds
)
sigma2_numba = sigma2.copy()
rec.arch_recursion(
parameters, resids, sigma2, p, nobs, backcast, self.var_bounds
)
assert_almost_equal(sigma2_numba, sigma2)
assert_almost_equal(sigma2_python, sigma2)
parameters = np.array([-0.1, -0.4, 0.3, 0.2])
recpy.arch_recursion_python(
parameters, resids, sigma2, p, nobs, backcast, self.var_bounds
)
assert np.all(sigma2 >= self.var_bounds[:, 0])
assert np.all(sigma2 <= 2 * self.var_bounds[:, 1])
parameters = np.array([0.1, 4e8, 3, 2])
recpy.arch_recursion_python(
parameters, resids, sigma2, p, nobs, backcast, self.var_bounds
)
assert np.all(sigma2 >= self.var_bounds[:, 0])
assert np.all(sigma2 <= 2 * self.var_bounds[:, 1])
mod_resids = resids.copy()
mod_resids[:10] = np.inf
recpy.arch_recursion_python(
parameters, mod_resids, sigma2, p, nobs, backcast, self.var_bounds
)
assert np.all(sigma2 >= self.var_bounds[:, 0])
assert np.all(sigma2 <= 2 * self.var_bounds[:, 1])
rec.arch_recursion(
parameters, mod_resids, sigma2, p, nobs, backcast, self.var_bounds
)
assert np.all(sigma2 >= self.var_bounds[:, 0])
assert np.all(sigma2 <= 2 * self.var_bounds[:, 1])
def test_garch_power_1(self):
nobs, resids = self.nobs, self.resids
sigma2, backcast = self.sigma2, self.backcast
parameters = np.array([0.1, 0.4, 0.3, 0.2])
fresids = np.abs(resids) ** 1.0
sresids = np.sign(resids)
recpy.garch_recursion(
parameters,
fresids,
sresids,
sigma2,
1,
1,
1,
nobs,
backcast,
self.var_bounds,
)
sigma2_python = sigma2.copy()
rec.garch_recursion(
parameters,
fresids,
sresids,
sigma2,
1,
1,
1,
nobs,
backcast,
self.var_bounds,
)
assert_almost_equal(sigma2_python, sigma2)
def test_garch_direct(self):
nobs, resids = self.nobs, self.resids
sigma2, backcast = self.sigma2, self.backcast
parameters = np.array([0.1, 0.4, 0.3, 0.2])
fresids = np.abs(resids) ** 2.0
sresids = np.sign(resids)
for t in range(nobs):
if t == 0:
sigma2[t] = parameters.dot(
np.array([1.0, backcast, 0.5 * backcast, backcast])
)
else:
var = np.array(
[
1.0,
resids[t - 1] ** 2.0,
resids[t - 1] ** 2.0 * (resids[t - 1] < 0),
sigma2[t - 1],
]
)
sigma2[t] = parameters.dot(var)
sigma2_python = sigma2.copy()
rec.garch_recursion(
parameters,
fresids,
sresids,
sigma2,
1,
1,
1,
nobs,
backcast,
self.var_bounds,
)
assert_almost_equal(sigma2_python, sigma2)
def test_garch_no_q(self):
nobs, resids = self.nobs, self.resids
sigma2, backcast = self.sigma2, self.backcast
parameters = np.array([0.1, 0.4, 0.3])
fresids = resids ** 2.0
sresids = np.sign(resids)
recpy.garch_recursion(
parameters,
fresids,
sresids,
sigma2,
1,
1,
0,
nobs,
backcast,
self.var_bounds,
)
sigma2_python = sigma2.copy()
rec.garch_recursion(
parameters,
fresids,
sresids,
sigma2,
1,
1,
0,
nobs,
backcast,
self.var_bounds,
)
assert_almost_equal(sigma2_python, sigma2)
def test_garch_no_p(self):
nobs, resids = self.nobs, self.resids
sigma2, backcast = self.sigma2, self.backcast
parameters = np.array([0.1, 0.4, 0.3])
fresids = resids ** 2.0
sresids = np.sign(resids)
recpy.garch_recursion(
parameters,
fresids,
sresids,
sigma2,
0,
1,
1,
nobs,
backcast,
self.var_bounds,
)
sigma2_python = sigma2.copy()
rec.garch_recursion(
parameters,
fresids,
sresids,
sigma2,
0,
1,
1,
nobs,
backcast,
self.var_bounds,
)
assert_almost_equal(sigma2_python, sigma2)
def test_garch_no_o(self):
nobs, resids = self.nobs, self.resids
sigma2, backcast = self.sigma2, self.backcast
parameters = np.array([0.1, 0.4, 0.3, 0.2])
fresids = resids ** 2.0
sresids = np.sign(resids)
recpy.garch_recursion(
parameters,
fresids,
sresids,
sigma2,
1,
0,
1,
nobs,
backcast,
self.var_bounds,
)
sigma2_python = sigma2.copy()
rec.garch_recursion(
parameters,
fresids,
sresids,
sigma2,
1,
0,
1,
nobs,
backcast,
self.var_bounds,
)
assert_almost_equal(sigma2_python, sigma2)
def test_garch_arch(self):
backcast = self.backcast
nobs, resids, sigma2 = self.nobs, self.resids, self.sigma2
parameters = np.array([0.1, 0.4, 0.3, 0.2])
fresids = resids ** 2.0
sresids = np.sign(resids)
rec.garch_recursion(
parameters,
fresids,
sresids,
sigma2,
3,
0,
0,
nobs,
backcast,
self.var_bounds,
)
sigma2_garch = sigma2.copy()
rec.arch_recursion(
parameters, resids, sigma2, 3, nobs, backcast, self.var_bounds
)
assert_almost_equal(sigma2_garch, sigma2)
def test_bounds(self):
nobs, resids = self.nobs, self.resids
sigma2, backcast = self.sigma2, self.backcast
parameters = np.array([1e100, 0.4, 0.3, 0.2])
lags = np.array([1, 5, 22], dtype=np.int32)
recpy.harch_recursion(
parameters, resids, sigma2, lags, nobs, backcast, self.var_bounds
)
sigma2_python = sigma2.copy()
rec.harch_recursion(
parameters, resids, sigma2, lags, nobs, backcast, self.var_bounds
)
assert_almost_equal(sigma2_python, sigma2)
assert np.all(sigma2 >= self.var_bounds[:, 0])
assert np.all(sigma2 <= 2 * self.var_bounds[:, 1])
parameters = np.array([-1e100, 0.4, 0.3, 0.2])
recpy.harch_recursion(
parameters, resids, sigma2, lags, nobs, backcast, self.var_bounds
)
sigma2_python = sigma2.copy()
rec.harch_recursion(
parameters, resids, sigma2, lags, nobs, backcast, self.var_bounds
)
assert_almost_equal(sigma2_python, sigma2)
assert_almost_equal(sigma2, self.var_bounds[:, 0])
parameters = np.array([1e100, 0.4, 0.3, 0.2])
fresids = resids ** 2.0
sresids = np.sign(resids)
recpy.garch_recursion(
parameters,
fresids,
sresids,
sigma2,
1,
1,
1,
nobs,
backcast,
self.var_bounds,
)
sigma2_python = sigma2.copy()
rec.garch_recursion(
parameters,
fresids,
sresids,
sigma2,
1,
1,
1,
nobs,
backcast,
self.var_bounds,
)
assert_almost_equal(sigma2_python, sigma2)
assert np.all(sigma2 >= self.var_bounds[:, 0])
assert np.all(sigma2 <= 2 * self.var_bounds[:, 1])
parameters = np.array([-1e100, 0.4, 0.3, 0.2])
recpy.garch_recursion(
parameters,
fresids,
sresids,
sigma2,
1,
1,
1,
nobs,
backcast,
self.var_bounds,
)
sigma2_python = sigma2.copy()
rec.garch_recursion(
parameters,
fresids,
sresids,
sigma2,
1,
1,
1,
nobs,
backcast,
self.var_bounds,
)
assert_almost_equal(sigma2_python, sigma2)
assert_almost_equal(sigma2, self.var_bounds[:, 0])
parameters = np.array([1e100, 0.4, 0.3, 0.2])
recpy.arch_recursion(
parameters, resids, sigma2, 3, nobs, backcast, self.var_bounds
)
sigma2_python = sigma2.copy()
rec.arch_recursion(
parameters, resids, sigma2, 3, nobs, backcast, self.var_bounds
)
assert_almost_equal(sigma2_python, sigma2)
assert np.all(sigma2 >= self.var_bounds[:, 0])
assert np.all(sigma2 <= 2 * self.var_bounds[:, 1])
parameters = np.array([-1e100, 0.4, 0.3, 0.2])
recpy.arch_recursion(
parameters, resids, sigma2, 3, nobs, backcast, self.var_bounds
)
sigma2_python = sigma2.copy()
rec.arch_recursion(
parameters, resids, sigma2, 3, nobs, backcast, self.var_bounds
)
assert_almost_equal(sigma2_python, sigma2)
assert_almost_equal(sigma2, self.var_bounds[:, 0])
def test_egarch(self):
nobs = self.nobs
parameters = np.array([0.0, 0.1, -0.1, 0.95])
resids, sigma2 = self.resids, self.sigma2
p = o = q = 1
backcast = 0.0
var_bounds = self.var_bounds
lnsigma2 = np.empty_like(sigma2)
std_resids = np.empty_like(sigma2)
abs_std_resids = np.empty_like(sigma2)
recpy.egarch_recursion(
parameters,
resids,
sigma2,
p,
o,
q,
nobs,
backcast,
var_bounds,
lnsigma2,
std_resids,
abs_std_resids,
)
sigma2_numba = sigma2.copy()
recpy.egarch_recursion_python(
parameters,
resids,
sigma2,
p,
o,
q,
nobs,
backcast,
var_bounds,
lnsigma2,
std_resids,
abs_std_resids,
)
sigma2_python = sigma2.copy()
rec.egarch_recursion(
parameters,
resids,
sigma2,
p,
o,
q,
nobs,
backcast,
var_bounds,
lnsigma2,
std_resids,
abs_std_resids,
)
assert_almost_equal(sigma2_numba, sigma2)
assert_almost_equal(sigma2_python, sigma2)
norm_const = np.sqrt(2 / np.pi)
for t in range(nobs):
lnsigma2[t] = parameters[0]
if t == 0:
lnsigma2[t] += parameters[3] * backcast
else:
stdresid = resids[t - 1] / np.sqrt(sigma2[t - 1])
lnsigma2[t] += parameters[1] * (np.abs(stdresid) - norm_const)
lnsigma2[t] += parameters[2] * stdresid
lnsigma2[t] += parameters[3] * lnsigma2[t - 1]
sigma2[t] = np.exp(lnsigma2[t])
assert_almost_equal(sigma2_python, sigma2)
parameters = np.array([-100.0, 0.1, -0.1, 0.95])
recpy.egarch_recursion_python(
parameters,
resids,
sigma2,
p,
o,
q,
nobs,
backcast,
var_bounds,
lnsigma2,
std_resids,
abs_std_resids,
)
assert np.all(sigma2 >= self.var_bounds[:, 0])
assert np.all(sigma2 <= 2 * self.var_bounds[:, 1])
parameters = np.array([0.0, 0.1, -0.1, 9.5])
recpy.egarch_recursion_python(
parameters,
resids,
sigma2,
p,
o,
q,
nobs,
backcast,
var_bounds,
lnsigma2,
std_resids,
abs_std_resids,
)
assert np.all(sigma2 >= self.var_bounds[:, 0])
assert np.all(sigma2 <= 2 * self.var_bounds[:, 1])
parameters = np.array([0.0, 0.1, -0.1, 0.95])
mod_resids = resids.copy()
mod_resids[:1] = np.inf
recpy.egarch_recursion_python(
parameters,
resids,
sigma2,
p,
o,
q,
nobs,
backcast,
var_bounds,
lnsigma2,
std_resids,
abs_std_resids,
)
assert np.all(sigma2 >= self.var_bounds[:, 0])
assert np.all(sigma2 <= 2 * self.var_bounds[:, 1])
def test_midas_hyperbolic(self):
nobs, resids = self.nobs, self.resids
sigma2, backcast = self.sigma2, self.backcast
parameters = np.array([0.1, 0.8, 0])
j = np.arange(1, 22 + 1)
weights = gamma(j + 0.6) / (gamma(j + 1) * gamma(0.6))
weights = weights / weights.sum()
recpy.midas_recursion(
parameters, weights, resids, sigma2, nobs, backcast, self.var_bounds
)
sigma2_numba = sigma2.copy()
recpy.midas_recursion_python(
parameters, weights, resids, sigma2, nobs, backcast, self.var_bounds
)
sigma2_python = sigma2.copy()
rec.midas_recursion(
parameters, weights, resids, sigma2, nobs, backcast, self.var_bounds
)
assert_almost_equal(sigma2_numba, sigma2)
assert_almost_equal(sigma2_python, sigma2)
mod_resids = resids.copy()
mod_resids[:10] = np.inf
recpy.midas_recursion_python(
parameters, weights, mod_resids, sigma2, nobs, backcast, self.var_bounds
)
assert np.all(sigma2 >= self.var_bounds[:, 0])
assert np.all(sigma2 <= 2 * self.var_bounds[:, 1])
parameters = np.array([0.1, 10e10, 0])
j = np.arange(1, 22 + 1)
weights = gamma(j + 0.6) / (gamma(j + 1) * gamma(0.6))
weights = weights / weights.sum()
recpy.midas_recursion_python(
parameters, weights, resids, sigma2, nobs, backcast, self.var_bounds
)
assert np.all(sigma2 >= self.var_bounds[:, 0])
assert np.all(sigma2 <= 2 * self.var_bounds[:, 1])
rec.midas_recursion(
parameters, weights, resids, sigma2, nobs, backcast, self.var_bounds
)
assert np.all(sigma2 >= self.var_bounds[:, 0])
assert np.all(sigma2 <= 2 * self.var_bounds[:, 1])
parameters = np.array([0.1, -0.4, 0])
recpy.midas_recursion_python(
parameters, weights, resids, sigma2, nobs, backcast, self.var_bounds
)
assert np.all(sigma2 >= self.var_bounds[:, 0])
assert np.all(sigma2 <= 2 * self.var_bounds[:, 1])
rec.midas_recursion(
parameters, weights, resids, sigma2, nobs, backcast, self.var_bounds
)
assert np.all(sigma2 >= self.var_bounds[:, 0])
assert np.all(sigma2 <= 2 * self.var_bounds[:, 1])
def test_figarch_recursion(self):
nobs, resids = self.nobs, self.resids
sigma2, backcast = self.sigma2, self.backcast
parameters = np.array([1.0, 0.2, 0.4, 0.3])
fresids = resids ** 2
p = q = 1
trunc_lag = 1000
rec.figarch_recursion(
parameters,
fresids,
sigma2,
p,
q,
nobs,
trunc_lag,
backcast,
self.var_bounds,
)
lam = rec.figarch_weights(parameters[1:], p, q, trunc_lag=trunc_lag)
lam_rev = lam[::-1]
omega_tilde = parameters[0] / (1 - parameters[-1])
sigma2_direct = np.empty_like(sigma2)
for t in range(nobs):
backcasts = trunc_lag - t
sigma2_direct[t] = omega_tilde
if backcasts:
sigma2_direct[t] += backcast * lam_rev[:backcasts].sum()
if t:
sigma2_direct[t] += np.sum(lam_rev[-t:] * fresids[max(0, t - 1000) : t])
assert_almost_equal(sigma2_direct, sigma2)
recpy.figarch_recursion(
parameters,
fresids,
sigma2,
p,
q,
nobs,
trunc_lag,
backcast,
self.var_bounds,
)
sigma2_numba = sigma2.copy()
recpy.figarch_recursion_python(
parameters,
fresids,
sigma2,
p,
q,
nobs,
trunc_lag,
backcast,
self.var_bounds,
)
sigma2_python = sigma2.copy()
rec.figarch_recursion(
parameters,
fresids,
sigma2,
p,
q,
nobs,
trunc_lag,
backcast,
self.var_bounds,
)
assert_almost_equal(sigma2_numba, sigma2)
assert_almost_equal(sigma2_python, sigma2)
def test_figarch_weights(self):
parameters = np.array([1.0, 0.4])
lam = rec.figarch_weights(parameters[1:], 0, 0, trunc_lag=1000)
lam_direct = np.empty_like(lam)
lam_direct[0] = parameters[-1]
for i in range(1, 1000):
lam_direct[i] = (i - parameters[-1]) / (i + 1) * lam_direct[i - 1]
assert_almost_equal(lam, lam_direct)
@pytest.mark.skipif(
missing_numba or missing_extension, reason="numba not installed"
)
def test_garch_performance(self):
garch_setup = """
parameters = np.array([.1, .4, .3, .2])
fresids = resids ** 2.0
sresids = np.sign(resids)
"""
garch_first = """
recpy.garch_recursion(parameters, fresids, sresids, sigma2, 1, 1, 1, nobs,
backcast, var_bounds)
"""
garch_second = """
rec.garch_recursion(parameters, fresids, sresids, sigma2, 1, 1, 1, nobs, backcast,
var_bounds)
"""
timer = Timer(
garch_first,
"Numba",
garch_second,
"Cython",
"GARCH",
self.timer_setup + garch_setup,
)
timer.display()
assert timer.ratio < 10.0
if not (missing_numba or CYTHON_COVERAGE):
assert 0.1 < timer.ratio
@pytest.mark.skipif(
missing_numba or missing_extension, reason="numba not installed"
)
def test_harch_performance(self):
harch_setup = """
parameters = np.array([.1, .4, .3, .2])
lags = np.array([1, 5, 22], dtype=np.int32)
"""
harch_first = """
recpy.harch_recursion(parameters, resids, sigma2, lags, nobs, backcast,
var_bounds)
"""
harch_second = """
rec.harch_recursion(parameters, resids, sigma2, lags, nobs, backcast, var_bounds)
"""
timer = Timer(
harch_first,
"Numba",
harch_second,
"Cython",
"HARCH",
self.timer_setup + harch_setup,
)
timer.display()
assert timer.ratio < 10.0
if not (missing_numba or CYTHON_COVERAGE):
assert 0.1 < timer.ratio
@pytest.mark.skipif(
missing_numba or missing_extension, reason="numba not installed"
)
def test_egarch_performance(self):
egarch_setup = """
parameters = np.array([0.0, 0.1, -0.1, 0.95])
p = o = q = 1
backcast = 0.0
lnsigma2 = np.empty_like(sigma2)
std_resids = np.empty_like(sigma2)
abs_std_resids = np.empty_like(sigma2)
"""
egarch_first = """
recpy.egarch_recursion(parameters, resids, sigma2, p, o, q, nobs, backcast,
var_bounds, lnsigma2, std_resids, abs_std_resids)
"""
egarch_second = """
rec.egarch_recursion(parameters, resids, sigma2, p, o, q, nobs, backcast,
var_bounds, lnsigma2, std_resids, abs_std_resids)
"""
timer = Timer(
egarch_first,
"Numba",
egarch_second,
"Cython",
"EGARCH",
self.timer_setup + egarch_setup,
)
timer.display()
assert timer.ratio < 10.0
if not (missing_numba or CYTHON_COVERAGE):
assert 0.1 < timer.ratio
@pytest.mark.skipif(
missing_numba or missing_extension, reason="numba not installed"
)
def test_midas_performance(self):
midas_setup = """
from scipy.special import gamma
parameters = np.array([.1, 0.8, 0])
j = np.arange(1,22+1)
weights = gamma(j+0.6) / (gamma(j+1) * gamma(0.6))
weights = weights / weights.sum()
"""
midas_first = """
recpy.midas_recursion(parameters, weights, resids, sigma2, nobs, backcast, var_bounds)
"""
midas_second = """
rec.midas_recursion(parameters, weights, resids, sigma2, nobs, backcast, var_bounds)
"""
timer = Timer(
midas_first,
"Numba",
midas_second,
"Cython",
"MIDAS",
self.timer_setup + midas_setup,
)
timer.display()
assert timer.ratio < 10.0
if not (missing_numba or CYTHON_COVERAGE):
assert 0.1 < timer.ratio
@pytest.mark.skipif(
missing_numba or missing_extension, reason="numba not installed"
)
def test_figarch_performance(self):
midas_setup = """
p = q = 1
trunc_lag = 1000
parameters = np.array([1.0, 0.2, 0.2, 0.04])
fresids = resids ** 2.0
"""
midas_first = """
recpy.figarch_recursion(parameters, fresids, sigma2, p, q, nobs, trunc_lag, backcast, var_bounds)
"""
midas_second = """
rec.figarch_recursion(parameters, fresids, sigma2, p, q, nobs, trunc_lag, backcast, var_bounds)
"""
timer = Timer(
midas_first,
"Numba",
midas_second,
"Cython",
"FIGARCH",
self.timer_setup + midas_setup,
)
timer.display()
assert timer.ratio < 10.0
if not (missing_numba or CYTHON_COVERAGE):
assert 0.1 < timer.ratio
def test_garch_aparch_equiv(self):
parameters = np.array([0.1, 0.1, 0.8])
fresids = self.resids ** 2
sresids = np.sign(self.resids)
sigma2 = np.empty(1000)
p = q = 1
o = 0
recpy.garch_recursion_python(
parameters,
fresids,
sresids,
sigma2,
p,
o,
q,
self.nobs,
self.backcast,
self.var_bounds,
)
sigma2_garch = sigma2.copy()
parameters = np.array([0.1, 0.1, 0.8, 2])
sigma2[:] = np.nan
sigma2_delta = np.empty_like(sigma2)
recpy.aparch_recursion_python(
parameters,
self.resids,
np.abs(self.resids),
sigma2,
sigma2_delta,
p,
o,
q,
self.nobs,
self.backcast,
self.var_bounds,
)
assert_allclose(sigma2_garch, sigma2, atol=1e-6)
sigma2[:] = np.nan
recpy.aparch_recursion(
parameters,
self.resids,
np.abs(self.resids),
sigma2,
sigma2_delta,
p,
o,
q,
self.nobs,
self.backcast,
self.var_bounds,
)
assert_allclose(sigma2_garch, sigma2, atol=1e-6)
sigma2[:] = np.nan
rec.aparch_recursion(
parameters,
self.resids,
np.abs(self.resids),
sigma2,
sigma2_delta,
p,
o,
q,
self.nobs,
self.backcast,
self.var_bounds,
)
assert_allclose(sigma2_garch, sigma2, atol=1e-6)
def test_asym_aparch_smoke(self):
sigma2 = np.empty(1000)
p = o = q = 1
parameters = np.array([0.1, 0.1, 0.1, 0.8, 1.3])
sigma2[:] = np.nan
sigma2_delta = np.empty_like(sigma2)
recpy.aparch_recursion_python(
parameters,
self.resids,
np.abs(self.resids),
sigma2,
sigma2_delta,
p,
o,
q,
self.nobs,
self.backcast,
self.var_bounds,
)
assert np.all(np.isfinite(sigma2))
sigma2_py = sigma2.copy()
sigma2[:] = np.nan
recpy.aparch_recursion(
parameters,
self.resids,
np.abs(self.resids),
sigma2,
sigma2_delta,
p,
o,
q,
self.nobs,
self.backcast,
self.var_bounds,
)
assert np.all(np.isfinite(sigma2))
assert_allclose(sigma2_py, sigma2)
sigma2[:] = np.nan
rec.aparch_recursion(
parameters,
self.resids,
np.abs(self.resids),
sigma2,
sigma2_delta,
p,
o,
q,
self.nobs,
self.backcast,
self.var_bounds,
)
assert np.all(np.isfinite(sigma2))
assert_allclose(sigma2_py, sigma2)
def test_bounds_check():
var_bounds = np.array([0.1, 10])
assert_almost_equal(recpy.bounds_check_python(-1.0, var_bounds), 0.1)
assert_almost_equal(
recpy.bounds_check_python(20.0, var_bounds), 10 + np.log(20.0 / 10.0)
)
assert_almost_equal(recpy.bounds_check_python(np.inf, var_bounds), 1010.0)
| 28.857855
| 97
| 0.514863
| 3,856
| 34,716
| 4.474844
| 0.049793
| 0.073022
| 0.085135
| 0.076673
| 0.815068
| 0.797334
| 0.778673
| 0.759548
| 0.7439
| 0.728253
| 0
| 0.04987
| 0.377348
| 34,716
| 1,202
| 98
| 28.881864
| 0.748381
| 0.000115
| 0
| 0.728094
| 0
| 0.00542
| 0.068799
| 0.015673
| 0
| 0
| 0
| 0
| 0.088528
| 1
| 0.023487
| false
| 0
| 0.015357
| 0
| 0.04065
| 0.006323
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
f4431b68372b44ad4517e0ab87e6c368a124ad83
| 142
|
py
|
Python
|
backend/server/tables/__init__.py
|
shiv12095/realtimeviz
|
ee2bf10b5f9467212f9a9ce8957d80456ebd0259
|
[
"MIT"
] | 1
|
2021-03-03T13:54:15.000Z
|
2021-03-03T13:54:15.000Z
|
backend/server/tables/__init__.py
|
shiv12095/realtimeviz
|
ee2bf10b5f9467212f9a9ce8957d80456ebd0259
|
[
"MIT"
] | null | null | null |
backend/server/tables/__init__.py
|
shiv12095/realtimeviz
|
ee2bf10b5f9467212f9a9ce8957d80456ebd0259
|
[
"MIT"
] | 1
|
2021-03-03T13:59:48.000Z
|
2021-03-03T13:59:48.000Z
|
from .lime_bike_feed import LimeBikeFeed
from .lime_bike_trips import LimeBikeTrips
from .lime_bike_trips_analyze import LimeBikeTripsAnalyze
| 35.5
| 57
| 0.894366
| 19
| 142
| 6.315789
| 0.526316
| 0.2
| 0.3
| 0.283333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.084507
| 142
| 3
| 58
| 47.333333
| 0.923077
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
be43dfd884e7a14b827d8c59b29470159f680616
| 5,332
|
py
|
Python
|
deploy/trained_model.py
|
Samyak005/Multi-Hop-QG
|
15cc794a48ac9df058689c410007ea52b0e12a6a
|
[
"MIT"
] | null | null | null |
deploy/trained_model.py
|
Samyak005/Multi-Hop-QG
|
15cc794a48ac9df058689c410007ea52b0e12a6a
|
[
"MIT"
] | null | null | null |
deploy/trained_model.py
|
Samyak005/Multi-Hop-QG
|
15cc794a48ac9df058689c410007ea52b0e12a6a
|
[
"MIT"
] | null | null | null |
import torch
import logging
# Transformer version 4.9.1 - Newer versions may not work.
from transformers import AutoTokenizer
from trained_gpt_model import get_inference2
def t5_supp_inference(review_text):
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') # CPU may not work, got to check.
# device = torch.device('cpu')
print('Using device:' + str(device))
PRETRAINED_MODEL = 't5-base'
SEQ_LENGTH = 600
tokenizer = AutoTokenizer.from_pretrained(PRETRAINED_MODEL)
tokenizer.add_special_tokens(
{'additional_special_tokens': ['<answer>', '<context>']}
)
model = torch.load("../trained_models/t5_model_hotpot_supporting_facts_last.pth")
model.eval()
encoded_text = tokenizer(
review_text,
padding=True,
max_length=SEQ_LENGTH,
truncation=True,
return_tensors="pt"
).to(device)
input_ids = encoded_text['input_ids']
with torch.no_grad():
output = model.generate(input_ids)
decoded_string = tokenizer.decode(output[0], skip_special_tokens=True)
logging.debug("Decoded string" + decoded_string)
print(decoded_string)
# device.empty_cache()
del model
del tokenizer
return decoded_string
def t5_full_inference(review_text):
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') # CPU may not work, got to check.
# device = torch.device('cpu')
print('Using device:' + str(device))
PRETRAINED_MODEL = 't5-base'
SEQ_LENGTH = 600
tokenizer = AutoTokenizer.from_pretrained(PRETRAINED_MODEL)
tokenizer.add_special_tokens(
{'additional_special_tokens': ['<answer>', '<context>']}
)
model = torch.load("../trained_models/t5_model_hotpot_full_context_last.pth")
model.eval()
encoded_text = tokenizer(
review_text,
padding=True,
max_length=SEQ_LENGTH,
truncation=True,
return_tensors="pt"
).to(device)
input_ids = encoded_text['input_ids']
with torch.no_grad():
output = model.generate(input_ids)
decoded_string = tokenizer.decode(output[0], skip_special_tokens=True)
logging.debug("Decoded string" + decoded_string)
print(decoded_string)
# device.empty_cache()
del model
del tokenizer
return decoded_string
def bart_supp_inference(review_text):
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') # CPU may not work, got to check.
# device = torch.device('cpu')
print('Using device:' + str(device))
PRETRAINED_MODEL = 'facebook/bart-base'
SEQ_LENGTH = 600
tokenizer = AutoTokenizer.from_pretrained(PRETRAINED_MODEL)
tokenizer.add_special_tokens(
{'additional_special_tokens': ['<answer>', '<context>']}
)
model = torch.load("../trained_models/bart_model_hotpot_supporting_facts_last.pth")
model.eval()
encoded_text = tokenizer(
review_text,
padding=True,
max_length=SEQ_LENGTH,
truncation=True,
return_tensors="pt"
).to(device)
input_ids = encoded_text['input_ids']
with torch.no_grad():
output = model.generate(input_ids)
decoded_string = tokenizer.decode(output[0], skip_special_tokens=True)
logging.debug("Decoded string" + decoded_string)
print(decoded_string)
# device.empty_cache()
del model
del tokenizer
return decoded_string
def bart_full_inference(review_text):
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') # CPU may not work, got to check.
# device = torch.device('cpu')
print('Using device:' + str(device))
PRETRAINED_MODEL = 'facebook/bart-base'
SEQ_LENGTH = 600
tokenizer = AutoTokenizer.from_pretrained(PRETRAINED_MODEL)
tokenizer.add_special_tokens(
{'additional_special_tokens': ['<answer>', '<context>']}
)
model = torch.load("../trained_models/bart_model_hotpot_full_context_last.pth")
model.eval()
encoded_text = tokenizer(
review_text,
padding=True,
max_length=SEQ_LENGTH,
truncation=True,
return_tensors="pt"
).to(device)
input_ids = encoded_text['input_ids']
with torch.no_grad():
output = model.generate(input_ids)
decoded_string = tokenizer.decode(output[0], skip_special_tokens=True)
logging.debug("Decoded string" + decoded_string)
print(decoded_string)
# device.empty_cache()
del model
del tokenizer
return decoded_string
# if __name__ == "__main__":
# review_text = "<answer> a fusional language <context> Typologically, Estonian represents a transitional form from an agglutinating language to a fusional language. The canonical word order is SVO (subject–verb–object)."
# t5_supp_inference(review_text, md2, device)
def get_inference(answer, context, model_name):
valuation_text = "<answer> " + answer + " <context> " + context
if model_name == 't5_supp':
return t5_supp_inference(valuation_text)
elif model_name == 't5_full':
return t5_full_inference(valuation_text)
elif model_name == 'bart_supp':
return bart_supp_inference(valuation_text)
elif model_name == 'bart_full':
return bart_full_inference(valuation_text)
elif model_name == 'gpt2':
return get_inference2(answer, context)
| 33.534591
| 225
| 0.69036
| 659
| 5,332
| 5.31563
| 0.169954
| 0.074222
| 0.038824
| 0.028547
| 0.850699
| 0.842992
| 0.842992
| 0.796175
| 0.796175
| 0.796175
| 0
| 0.007976
| 0.200488
| 5,332
| 158
| 226
| 33.746835
| 0.813277
| 0.128095
| 0
| 0.774194
| 0
| 0
| 0.148132
| 0.071691
| 0
| 0
| 0
| 0
| 0
| 1
| 0.040323
| false
| 0
| 0.032258
| 0
| 0.145161
| 0.064516
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
be763dff688768c2aba41209e3bec63f50ee2a53
| 19,099
|
py
|
Python
|
boa_test/tests/test_ico_template.py
|
mixbee/neo-boa
|
da7366c26c7b8e60afb9ac27439a1da37b0be355
|
[
"MIT"
] | 4
|
2018-08-22T03:30:34.000Z
|
2019-04-16T10:54:08.000Z
|
boa_test/tests/test_ico_template.py
|
mixbee/neo-boa
|
da7366c26c7b8e60afb9ac27439a1da37b0be355
|
[
"MIT"
] | 3
|
2018-09-03T09:19:26.000Z
|
2019-01-24T00:06:29.000Z
|
boa_test/tests/test_ico_template.py
|
mixbee/neo-boa
|
da7366c26c7b8e60afb9ac27439a1da37b0be355
|
[
"MIT"
] | 12
|
2018-07-19T06:36:44.000Z
|
2019-05-13T05:45:58.000Z
|
from boa_test.tests.boa_test import BoaFixtureTest
from boa.compiler import Compiler
from neo.Core.TX.Transaction import Transaction
from neo.Prompt.Commands.BuildNRun import TestBuild
from neo.EventHub import events
from neo.SmartContract.SmartContractEvent import SmartContractEvent, NotifyEvent
from neo.Settings import settings
from neo.Prompt.Utils import parse_param
from neo.Core.FunctionCode import FunctionCode
from neocore.Fixed8 import Fixed8
from boa_test.example.demo.nex.token import *
import shutil
import os
from logzero import logger
settings.USE_DEBUG_STORAGE = True
settings.DEBUG_STORAGE_PATH = './fixtures/debugstorage'
class TestContract(BoaFixtureTest):
dispatched_events = []
dispatched_logs = []
@classmethod
def tearDownClass(cls):
super(BoaFixtureTest, cls).tearDownClass()
try:
if os.path.exists(settings.debug_storage_leveldb_path):
shutil.rmtree(settings.debug_storage_leveldb_path)
else:
logger.error("debug storage path doesn't exist")
except Exception as e:
logger.error("couldn't remove debug storage %s " % e)
@classmethod
def setUpClass(cls):
super(TestContract, cls).setUpClass()
def on_notif(evt):
print(evt)
cls.dispatched_events.append(evt)
print("dispatched events %s " % cls.dispatched_events)
def on_log(evt):
print(evt)
cls.dispatched_logs.append(evt)
events.on(SmartContractEvent.RUNTIME_NOTIFY, on_notif)
events.on(SmartContractEvent.RUNTIME_LOG, on_log)
def test_ICOTemplate_1(self):
output = Compiler.instance().load('%s/boa_test/example/demo/ICO_Template.py' % TestContract.dirname).default
out = output.write()
# print(output.to_s())
tx, results, total_ops, engine = TestBuild(out, ['name', '[]'], self.GetWallet1(), '0705', '05')
self.assertEqual(len(results), 1)
self.assertEqual(results[0].GetString(), TOKEN_NAME)
tx, results, total_ops, engine = TestBuild(out, ['symbol', '[]'], self.GetWallet1(), '0705', '05')
self.assertEqual(len(results), 1)
self.assertEqual(results[0].GetString(), TOKEN_SYMBOL)
tx, results, total_ops, engine = TestBuild(out, ['decimals', '[]'], self.GetWallet1(), '0705', '05')
self.assertEqual(len(results), 1)
self.assertEqual(results[0].GetBigInteger(), TOKEN_DECIMALS)
tx, results, total_ops, engine = TestBuild(out, ['totalSupply', '[]'], self.GetWallet1(), '0705', '05')
self.assertEqual(len(results), 1)
self.assertEqual(results[0].GetBigInteger(), 0)
tx, results, total_ops, engine = TestBuild(out, ['nonexistentmethod', '[]'], self.GetWallet1(), '0705', '05')
self.assertEqual(len(results), 1)
self.assertEqual(results[0].GetString(), 'unknown operation')
# deploy with wallet 2 should fail CheckWitness
tx, results, total_ops, engine = TestBuild(out, ['deploy', '[]'], self.GetWallet2(), '0705', '05')
self.assertEqual(len(results), 1)
self.assertEqual(results[0].GetBoolean(), False)
tx, results, total_ops, engine = TestBuild(out, ['deploy', '[]'], self.GetWallet1(), '0705', '05')
self.assertEqual(len(results), 1)
self.assertEqual(results[0].GetBoolean(), True)
# second time, it should already be deployed and return false
tx, results, total_ops, engine = TestBuild(out, ['deploy', '[]'], self.GetWallet1(), '0705', '05')
self.assertEqual(len(results), 1)
self.assertEqual(results[0].GetBoolean(), False)
# now total supply should be equal to the initial owner amount
tx, results, total_ops, engine = TestBuild(out, ['totalSupply', '[]'], self.GetWallet1(), '0705', '05')
self.assertEqual(len(results), 1)
self.assertEqual(results[0].GetBigInteger(), TOKEN_INITIAL_AMOUNT)
# now the owner should have a balance of the TOKEN_INITIAL_AMOUNT
tx, results, total_ops, engine = TestBuild(out, ['balanceOf', parse_param([bytearray(TOKEN_OWNER)])], self.GetWallet1(), '0705', '05')
self.assertEqual(len(results), 1)
self.assertEqual(results[0].GetBigInteger(), TOKEN_INITIAL_AMOUNT)
def test_ICOTemplate_2(self):
output = Compiler.instance().load('%s/boa_test/example/demo/ICO_Template.py' % TestContract.dirname).default
out = output.write()
# now transfer tokens to wallet 2
TestContract.dispatched_events = []
test_transfer_amount = 2400000001
tx, results, total_ops, engine = TestBuild(out, ['transfer', parse_param([bytearray(TOKEN_OWNER), self.wallet_2_script_hash.Data, test_transfer_amount])], self.GetWallet1(), '0705', '05')
self.assertEqual(len(results), 1)
self.assertEqual(results[0].GetBoolean(), True)
self.assertEqual(len(TestContract.dispatched_events), 1)
evt = TestContract.dispatched_events[0]
self.assertIsInstance(evt, NotifyEvent)
self.assertEqual(evt.addr_from.Data, bytearray(TOKEN_OWNER))
self.assertEqual(evt.addr_to, self.wallet_2_script_hash)
self.assertEqual(evt.amount, test_transfer_amount)
# now get balance of wallet 2
tx, results, total_ops, engine = TestBuild(out, ['balanceOf', parse_param([self.wallet_2_script_hash.Data])], self.GetWallet1(), '0705', '05')
self.assertEqual(len(results), 1)
self.assertEqual(results[0].GetBigInteger(), test_transfer_amount)
# now the owner should have less
tx, results, total_ops, engine = TestBuild(out, ['balanceOf', parse_param([bytearray(TOKEN_OWNER)])], self.GetWallet1(), '0705', '05')
self.assertEqual(len(results), 1)
self.assertEqual(results[0].GetBigInteger(), TOKEN_INITIAL_AMOUNT - test_transfer_amount)
# now this transfer should fail
tx, results, total_ops, engine = TestBuild(out, ['transfer', parse_param([bytearray(TOKEN_OWNER), self.wallet_2_script_hash.Data, TOKEN_INITIAL_AMOUNT])], self.GetWallet1(), '0705', '05')
self.assertEqual(len(results), 1)
self.assertEqual(results[0].GetBoolean(), False)
# this transfer should fail because it is not signed by the 'from' address
tx, results, total_ops, engine = TestBuild(out, ['transfer', parse_param([bytearray(TOKEN_OWNER), self.wallet_2_script_hash.Data, 10000])], self.GetWallet3(), '0705', '05')
self.assertEqual(len(results), 1)
self.assertEqual(results[0].GetBoolean(), False)
# now this transfer should fail, this is from address with no tokens
tx, results, total_ops, engine = TestBuild(out, ['transfer', parse_param([self.wallet_3_script_hash.Data, self.wallet_2_script_hash.Data, 1000])], self.GetWallet3(), '0705', '05')
self.assertEqual(len(results), 1)
self.assertEqual(results[0].GetBoolean(), False)
# get balance of bad data
tx, results, total_ops, engine = TestBuild(out, ['balanceOf', parse_param(['abc'])], self.GetWallet1(), '0705', '05')
self.assertEqual(len(results), 1)
self.assertEqual(results[0].GetBigInteger(), 0)
# get balance no params
tx, results, total_ops, engine = TestBuild(out, ['balanceOf', parse_param([])], self.GetWallet1(), '0705', '05')
self.assertEqual(len(results), 1)
self.assertEqual(results[0].GetBoolean(), False)
def test_ICOTemplate_3_KYC(self):
output = Compiler.instance().load('%s/boa_test/example/demo/ICO_Template.py' % TestContract.dirname).default
out = output.write()
print(output.to_s())
# now transfer tokens to wallet 2
TestContract.dispatched_events = []
# test mint tokens without being kyc verified
tx, results, total_ops, engine = TestBuild(out, ['mintTokens', '[]', '--attach-neo=10'], self.GetWallet3(), '0705', '05')
self.assertEqual(len(results), 1)
self.assertEqual(results[0].GetBoolean(), False)
# Try to register as a non owner
tx, results, total_ops, engine = TestBuild(out, ['crowdsale_register', parse_param([self.wallet_3_script_hash.Data])], self.GetWallet3(), '0705', '05')
self.assertEqual(len(results), 1)
self.assertEqual(results[0].GetBoolean(), False)
# Get status of non registered address
tx, results, total_ops, engine = TestBuild(out, ['crowdsale_status', parse_param([self.wallet_3_script_hash.Data])], self.GetWallet3(), '0705', '05')
self.assertEqual(len(results), 1)
self.assertEqual(results[0].GetBoolean(), False)
TestContract.dispatched_events = []
# register an address
tx, results, total_ops, engine = TestBuild(out, ['crowdsale_register', parse_param([self.wallet_3_script_hash.Data])], self.GetWallet1(), '0705', '05')
self.assertEqual(len(results), 1)
self.assertEqual(results[0].GetBigInteger(), 1)
# it should dispatch an event
self.assertEqual(len(TestContract.dispatched_events), 1)
evt = TestContract.dispatched_events[0]
self.assertEqual(evt.event_payload.Value[0].Value, b'kyc_registration')
# register 2 addresses at once
tx, results, total_ops, engine = TestBuild(out, ['crowdsale_register', parse_param([self.wallet_3_script_hash.Data, self.wallet_2_script_hash.Data])], self.GetWallet1(), '0705', '05')
self.assertEqual(len(results), 1)
self.assertEqual(results[0].GetBigInteger(), 2)
# now check reg status
tx, results, total_ops, engine = TestBuild(out, ['crowdsale_status', parse_param([self.wallet_3_script_hash.Data])], self.GetWallet3(), '0705', '05')
self.assertEqual(len(results), 1)
self.assertEqual(results[0].GetBoolean(), True)
def test_ICOTemplate_4_attachments(self):
output = Compiler.instance().load('%s/boa_test/example/demo/ICO_Template.py' % TestContract.dirname).default
out = output.write()
# test mint tokens without being kyc verified
tx, results, total_ops, engine = TestBuild(out, ['get_attachments', '[]', '--attach-neo=10'], self.GetWallet3(), '0705', '05')
self.assertEqual(len(results), 1)
attachments = results[0].GetArray()
self.assertEqual(len(attachments), 4)
fn = FunctionCode(out, '0705', '05')
self.assertEqual(attachments[0].GetByteArray(), fn.ScriptHash().Data)
self.assertEqual(attachments[1].GetByteArray(), self.wallet_3_script_hash.Data)
self.assertEqual(attachments[2].GetBigInteger(), Fixed8.FromDecimal(10).value)
self.assertEqual(attachments[3].GetBigInteger(), 0)
tx, results, total_ops, engine = TestBuild(out, ['get_attachments', '[]'], self.GetWallet3(), '0705', '05')
self.assertEqual(len(results), 1)
attachments = results[0].GetArray()
self.assertEqual(len(attachments), 4)
self.assertEqual(attachments[1].GetByteArray(), bytearray())
self.assertEqual(attachments[2].GetBigInteger(), 0)
self.assertEqual(attachments[3].GetBigInteger(), 0)
tx, results, total_ops, engine = TestBuild(out, ['get_attachments', '[]', '--attach-neo=3', '--attach-gas=3.12'], self.GetWallet1(), '0705', '05')
self.assertEqual(len(results), 1)
attachments = results[0].GetArray()
self.assertEqual(len(attachments), 4)
self.assertEqual(attachments[1].GetByteArray(), self.wallet_1_script_hash.Data)
self.assertEqual(attachments[2].GetBigInteger(), Fixed8.FromDecimal(3).value)
self.assertEqual(attachments[3].GetBigInteger(), Fixed8.FromDecimal(3.12).value)
def test_ICOTemplate_5_mint(self):
output = Compiler.instance().load('%s/boa_test/example/demo/ICO_Template.py' % TestContract.dirname).default
out = output.write()
# register an address
tx, results, total_ops, engine = TestBuild(out, ['crowdsale_register', parse_param([self.wallet_3_script_hash.Data])], self.GetWallet1(), '0705', '05')
self.assertEqual(len(results), 1)
self.assertEqual(results[0].GetBigInteger(), 1)
TestContract.dispatched_events = []
# test mint tokens, this should return true
tx, results, total_ops, engine = TestBuild(out, ['mintTokens', '[]', '--attach-neo=10'], self.GetWallet3(), '0705', '05')
self.assertEqual(len(results), 1)
self.assertEqual(results[0].GetBoolean(), True)
# it should dispatch an event
self.assertEqual(len(TestContract.dispatched_events), 1)
evt = TestContract.dispatched_events[0]
self.assertIsInstance(evt, NotifyEvent)
self.assertEqual(evt.amount, 10 * TOKENS_PER_NEO)
self.assertEqual(evt.addr_to, self.wallet_3_script_hash)
# test mint tokens again, this should be false since you can't do it twice
tx, results, total_ops, engine = TestBuild(out, ['mintTokens', '[]', '--attach-neo=10'], self.GetWallet3(), '0705', '05')
self.assertEqual(len(results), 1)
self.assertEqual(results[0].GetBoolean(), False)
# now the minter should have a balance
tx, results, total_ops, engine = TestBuild(out, ['balanceOf', parse_param([self.wallet_3_script_hash.Data])], self.GetWallet1(), '0705', '05')
self.assertEqual(len(results), 1)
self.assertEqual(results[0].GetBigInteger(), 10 * TOKENS_PER_NEO)
# now the total circulation should be bigger
tx, results, total_ops, engine = TestBuild(out, ['totalSupply', '[]'], self.GetWallet1(), '0705', '05')
self.assertEqual(len(results), 1)
self.assertEqual(results[0].GetBigInteger(), (10 * TOKENS_PER_NEO) + TOKEN_INITIAL_AMOUNT)
def test_ICOTemplate_6_approval(self):
output = Compiler.instance().load('%s/boa_test/example/demo/ICO_Template.py' % TestContract.dirname).default
out = output.write()
# tranfer_from, approve, allowance
tx, results, total_ops, engine = TestBuild(out, ['allowance', parse_param([self.wallet_3_script_hash.Data, self.wallet_2_script_hash.Data])], self.GetWallet2(), '0705', '05')
self.assertEqual(len(results), 1)
self.assertEqual(results[0].GetBigInteger(), 0)
# try to transfer from
tx, results, total_ops, engine = TestBuild(out, ['transferFrom', parse_param([self.wallet_3_script_hash.Data, self.wallet_2_script_hash.Data, 10000])], self.GetWallet2(), '0705', '05')
self.assertEqual(len(results), 1)
self.assertEqual(results[0].GetBoolean(), False)
# try to approve from someone not yourself
tx, results, total_ops, engine = TestBuild(out, ['approve', parse_param([self.wallet_3_script_hash.Data, self.wallet_2_script_hash.Data, 10000])], self.GetWallet2(), '0705', '05')
self.assertEqual(len(results), 1)
self.assertEqual(results[0].GetBigInteger(), 0)
# try to approve more than you have
tx, results, total_ops, engine = TestBuild(out, ['approve', parse_param([self.wallet_3_script_hash.Data, self.wallet_2_script_hash.Data, TOKEN_INITIAL_AMOUNT])], self.GetWallet3(), '0705', '05')
self.assertEqual(len(results), 1)
self.assertEqual(results[0].GetBigInteger(), 0)
TestContract.dispatched_events = []
# approve should work
tx, results, total_ops, engine = TestBuild(out, ['approve', parse_param([self.wallet_3_script_hash.Data, self.wallet_2_script_hash.Data, 1234])], self.GetWallet3(), '0705', '05')
self.assertEqual(len(results), 1)
self.assertEqual(results[0].GetBoolean(), True)
# it should dispatch an event
self.assertEqual(len(TestContract.dispatched_events), 1)
evt = TestContract.dispatched_events[0]
self.assertIsInstance(evt, NotifyEvent)
self.assertEqual(evt.notify_type, b'approve')
self.assertEqual(evt.amount, 1234)
# check allowance
tx, results, total_ops, engine = TestBuild(out, ['allowance', parse_param([self.wallet_3_script_hash.Data, self.wallet_2_script_hash.Data])], self.GetWallet2(), '0705', '05')
self.assertEqual(len(results), 1)
self.assertEqual(results[0].GetBigInteger(), 1234)
# approve should not be additive, it should overwrite previous approvals
tx, results, total_ops, engine = TestBuild(out, ['approve', parse_param([self.wallet_3_script_hash.Data, self.wallet_2_script_hash.Data, 133234])], self.GetWallet3(), '0705', '05')
self.assertEqual(len(results), 1)
self.assertEqual(results[0].GetBoolean(), True)
tx, results, total_ops, engine = TestBuild(out, ['allowance', parse_param([self.wallet_3_script_hash.Data, self.wallet_2_script_hash.Data])], self.GetWallet2(), '0705', '05')
self.assertEqual(len(results), 1)
self.assertEqual(results[0].GetBigInteger(), 133234)
# now you can transfer from
tx, results, total_ops, engine = TestBuild(out, ['transferFrom', parse_param([self.wallet_3_script_hash.Data, self.wallet_2_script_hash.Data, 10000])], self.GetWallet2(), '0705', '05')
self.assertEqual(len(results), 1)
self.assertEqual(results[0].GetBoolean(), True)
# now the recevier should have a balance
# it is equal to 10000 plus test_transfer_amount = 2400000001
tx, results, total_ops, engine = TestBuild(out, ['balanceOf', parse_param([self.wallet_2_script_hash.Data])], self.GetWallet1(), '0705', '05')
self.assertEqual(len(results), 1)
self.assertEqual(results[0].GetBigInteger(), 10000 + 2400000001)
# now the allowance should be less
tx, results, total_ops, engine = TestBuild(out, ['allowance', parse_param([self.wallet_3_script_hash.Data, self.wallet_2_script_hash.Data])], self.GetWallet2(), '0705', '05')
self.assertEqual(len(results), 1)
self.assertEqual(results[0].GetBigInteger(), 133234 - 10000)
# try to transfer too much, even with approval
tx, results, total_ops, engine = TestBuild(out, ['transferFrom', parse_param([self.wallet_3_script_hash.Data, self.wallet_2_script_hash.Data, 14440000])], self.GetWallet2(), '0705', '05')
self.assertEqual(len(results), 1)
self.assertEqual(results[0].GetBoolean(), False)
# cant approve negative amounts
tx, results, total_ops, engine = TestBuild(out, ['approve', parse_param([self.wallet_3_script_hash.Data, self.wallet_2_script_hash.Data, -1000])], self.GetWallet3(), '0705', '05')
self.assertEqual(len(results), 1)
self.assertEqual(results[0].GetBoolean(), False)
def test_many_ops(self):
output = Compiler.instance().load('%s/boa_test/example/demo/ICO_Template.py' % TestContract.dirname).default
out = output.write()
# tranfer_from, approve, allowance
tx, results, total_ops, engine = TestBuild(out, ['another_op_5', bytearray()], self.GetWallet2(), '0705', '05')
self.assertEqual(len(results), 1)
self.assertEqual(results[0].GetBigInteger(), 6)
| 51.899457
| 202
| 0.673857
| 2,327
| 19,099
| 5.386334
| 0.103567
| 0.136429
| 0.076113
| 0.078746
| 0.806925
| 0.785862
| 0.770385
| 0.751795
| 0.748923
| 0.741184
| 0
| 0.041723
| 0.185559
| 19,099
| 367
| 203
| 52.040872
| 0.764063
| 0.084664
| 0
| 0.579167
| 0
| 0
| 0.075498
| 0.017383
| 0
| 0
| 0
| 0
| 0.4875
| 1
| 0.045833
| false
| 0
| 0.058333
| 0
| 0.116667
| 0.016667
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
be7fc184a7b92d4ec6db9908dc208989d6e4f546
| 23,144
|
py
|
Python
|
Mining_Projects/getAllProjects_Parallel.py
|
ai-se/heroes_compsci
|
613fd623a6da073b2c62c773ed902acb0c756809
|
[
"MIT"
] | null | null | null |
Mining_Projects/getAllProjects_Parallel.py
|
ai-se/heroes_compsci
|
613fd623a6da073b2c62c773ed902acb0c756809
|
[
"MIT"
] | 12
|
2019-12-17T04:04:19.000Z
|
2019-12-26T20:23:02.000Z
|
Mining_Projects/getAllProjects_Parallel.py
|
ai-se/heroes_compsci
|
613fd623a6da073b2c62c773ed902acb0c756809
|
[
"MIT"
] | 1
|
2020-03-12T22:19:48.000Z
|
2020-03-12T22:19:48.000Z
|
""" @Author Jchakra"""
""" This code is to download project information using GitHub API (Following Amrit's Hero paper criteria of how to find good projects) """
from multiprocessing import Process,Lock
import time
import json
import requests
## Downloading all the projects
def func1():
repo_result = []
Token_list = [''**'',''**'',''**'',''**'',''**'']
i = 0
api_url = 'https://api.github.com/'
while i < 10000: # This number will be increased to collect all the projects
repo_url = api_url + 'repositories?since=' + str(i)
exception_count = 0
while exception_count < 2:
try:
for k in range(0,len(Token_list)):
headers = {'Content-Type': 'application/json','Authorization': 'Bearer {0}'.format(Token_list[k])}
#print(Token_list[k])
repo_response = requests.get(repo_url, headers=headers).json()
#print(repo_response)
try:
if ( len(repo_response['message']) > 0):
if( k == len(Token_list) - 1):
time.sleep(600)
exception_count = exception_count + 1
else:
continue
except:
break
if ( exception_count == 0):
break
else:
continue
except:
exception_count = 0
project_list = []
try:
for j in range(0,len(repo_response)):
project_id = repo_response[j]['id']
project_name = repo_response[j]['name']
project_full_name = repo_response[j]['full_name']
project_html_url = repo_response[j]['html_url']
project_owner_name = repo_response[j]['owner']['login']
project_obj = {"id" : project_id, "name": project_name, "full_name" : project_full_name, "html_url" : project_html_url, "owner" : project_owner_name , "issues" :
"", "commits" : "", "PR" : ""}
project_list.append(project_obj)
except:
print ("exception occurred")
try:
last_id = repo_response[99]["id"]
i = last_id
repo_result = repo_result + project_list
except:
print(" exception inside function 1 ")
break
## Removing projects having less than 8 issues
p = 0
while p < len(repo_result):
repo_owner = repo_result[p]['owner']
repo_name = repo_result[p]['name']
issue_url = api_url + 'repos/' + repo_owner + '/' + repo_name + '/' + 'issues'
exception_count = 0
while exception_count < 2:
try:
for k in range(0,len(Token_list)):
headers = {'Content-Type': 'application/json','Authorization': 'Bearer {0}'.format(Token_list[k])}
#print(Token_list[k])
issue_response = requests.get(issue_url, headers=headers).json()
try:
if ( len(issue_response['message']) > 0):
if( k == len(Token_list) - 1):
time.sleep(600)
exception_count = exception_count + 1
else:
continue
except:
break
if ( exception_count == 0):
break
else:
continue
except:
exception_count = 0
if(len(issue_response) > 10):
repo_result[p]["issues"] = len(issue_response)
p = p + 1
else:
repo_result.pop(p)
## Selecting the projects with Pull Request > 0
m = 0
while m < len(repo_result):
repo_owner = repo_result[m]['owner']
repo_name = repo_result[m]['name']
PR_url = api_url + 'repos/' + repo_owner + '/' + repo_name + '/' + 'pulls?state=all'
exception_count = 0
while exception_count < 2:
try:
for k in range(0,len(Token_list)):
headers = {'Content-Type': 'application/json','Authorization': 'Bearer {0}'.format(Token_list[k])}
#print(Token_list[k])
PR_response = requests.get(PR_url, headers=headers).json()
try:
if ( len(PR_response['message']) > 0):
if( k == len(Token_list) - 1):
time.sleep(600)
exception_count = exception_count + 1
else:
continue
except:
break
if ( exception_count == 0):
break
else:
continue
except:
exception_count = 0
if(len(PR_response) > 0):
repo_result[m]["PR"] = len(PR_response)
m = m + 1
else:
repo_result.pop(m)
## Selecting Projects with commits > 20
n = 0
while n < len(repo_result):
repo_owner = repo_result[n]['owner']
repo_name = repo_result[n]['name']
commit_url = api_url + 'repos/' + repo_owner + '/' + repo_name + '/' + 'commits'
exception_count = 0
while exception_count < 2:
try:
for k in range(0,len(Token_list)):
headers = {'Content-Type': 'application/json','Authorization': 'Bearer {0}'.format(Token_list[k])}
#print(Token_list[k])
commit_response = requests.get(commit_url, headers=headers).json()
try:
if ( len(commit_response['message']) > 0):
if( k == len(Token_list) - 1):
time.sleep(600)
exception_count = exception_count + 1
else:
continue
except:
break
if ( exception_count == 0):
break
else:
continue
except:
exception_count = 0
if(len(commit_response) > 20):
repo_result[n]["commits"] = len(commit_response)
n = n + 1
else:
repo_result.pop(n)
with open("repo_file1.json", "w") as repo_file:
json.dump(repo_result, repo_file)
print("function 1 finished", len(repo_result))
def func2():
repo_result = []
Token_list = [''**'',''**'',''**'',''**'',''**'']
i = 10000
api_url = 'https://api.github.com/'
while i < 20000: # This number will be increased to collect all the projects
repo_url = api_url + 'repositories?since=' + str(i)
exception_count = 0
while exception_count < 2:
try:
for k in range(0,len(Token_list)):
headers = {'Content-Type': 'application/json','Authorization': 'Bearer {0}'.format(Token_list[k])}
#print(Token_list[k])
repo_response = requests.get(repo_url, headers=headers).json()
#print(repo_response)
try:
if ( len(repo_response['message']) > 0):
if( k == len(Token_list) - 1):
time.sleep(600)
exception_count = exception_count + 1
else:
continue
except:
break
if ( exception_count == 0):
break
else:
continue
except:
exception_count = 0
project_list = []
try:
for j in range(0,len(repo_response)):
project_id = repo_response[j]['id']
project_name = repo_response[j]['name']
project_full_name = repo_response[j]['full_name']
project_html_url = repo_response[j]['html_url']
project_owner_name = repo_response[j]['owner']['login']
project_obj = {"id" : project_id, "name": project_name, "full_name" : project_full_name, "html_url" : project_html_url, "owner" : project_owner_name , "issues" :
"", "commits" : "", "PR" : ""}
project_list.append(project_obj)
except:
print ("exception occurred")
try:
last_id = repo_response[99]["id"]
i = last_id
repo_result = repo_result + project_list
except:
print(" exception inside function 2 ")
break
## Removing projects having less than 8 issues
p = 0
while p < len(repo_result):
repo_owner = repo_result[p]['owner']
repo_name = repo_result[p]['name']
issue_url = api_url + 'repos/' + repo_owner + '/' + repo_name + '/' + 'issues'
exception_count = 0
while exception_count < 2:
try:
for k in range(0,len(Token_list)):
headers = {'Content-Type': 'application/json','Authorization': 'Bearer {0}'.format(Token_list[k])}
#print(Token_list[k])
issue_response = requests.get(issue_url, headers=headers).json()
try:
if ( len(issue_response['message']) > 0):
if( k == len(Token_list) - 1):
time.sleep(600)
exception_count = exception_count + 1
else:
continue
except:
break
if ( exception_count == 0):
break
else:
continue
except:
exception_count = 0
if(len(issue_response) > 10):
repo_result[p]["issues"] = len(issue_response)
p = p + 1
else:
repo_result.pop(p)
## Selecting the projects with Pull Request > 0
m = 0
while m < len(repo_result):
repo_owner = repo_result[m]['owner']
repo_name = repo_result[m]['name']
PR_url = api_url + 'repos/' + repo_owner + '/' + repo_name + '/' + 'pulls?state=all'
exception_count = 0
while exception_count < 2:
try:
for k in range(0,len(Token_list)):
headers = {'Content-Type': 'application/json','Authorization': 'Bearer {0}'.format(Token_list[k])}
#print(Token_list[k])
PR_response = requests.get(PR_url, headers=headers).json()
try:
if ( len(PR_response['message']) > 0):
if( k == len(Token_list) - 1):
time.sleep(600)
exception_count = exception_count + 1
else:
continue
except:
break
if ( exception_count == 0):
break
else:
continue
except:
exception_count = 0
if(len(PR_response) > 0):
repo_result[m]["PR"] = len(PR_response)
m = m + 1
else:
repo_result.pop(m)
## Selecting Projects with commits > 20
n = 0
while n < len(repo_result):
repo_owner = repo_result[n]['owner']
repo_name = repo_result[n]['name']
commit_url = api_url + 'repos/' + repo_owner + '/' + repo_name + '/' + 'commits'
exception_count = 0
while exception_count < 2:
try:
for k in range(0,len(Token_list)):
headers = {'Content-Type': 'application/json','Authorization': 'Bearer {0}'.format(Token_list[k])}
#print(Token_list[k])
commit_response = requests.get(commit_url, headers=headers).json()
try:
if ( len(commit_response['message']) > 0):
if( k == len(Token_list) - 1):
time.sleep(600)
exception_count = exception_count + 1
else:
continue
except:
break
if ( exception_count == 0):
break
else:
continue
except:
exception_count = 0
if(len(commit_response) > 20):
repo_result[n]["commits"] = len(commit_response)
n = n + 1
else:
repo_result.pop(n)
with open("repo_file2.json", "w") as repo_file:
json.dump(repo_result, repo_file)
print("function 2 finished", len(repo_result))
def func3():
repo_result = []
Token_list = [''**'',''**'',''**'',''**'',''**'']
i = 20000
api_url = 'https://api.github.com/'
while i < 30000: # This number will be increased to collect all the projects
repo_url = api_url + 'repositories?since=' + str(i)
exception_count = 0
while exception_count < 2:
try:
for k in range(0,len(Token_list)):
headers = {'Content-Type': 'application/json','Authorization': 'Bearer {0}'.format(Token_list[k])}
#print(Token_list[k])
repo_response = requests.get(repo_url, headers=headers).json()
#print(repo_response)
try:
if ( len(repo_response['message']) > 0):
if( k == len(Token_list) - 1):
time.sleep(600)
exception_count = exception_count + 1
else:
continue
except:
break
if ( exception_count == 0):
break
else:
continue
except:
exception_count = 0
project_list = []
try:
for j in range(0,len(repo_response)):
project_id = repo_response[j]['id']
project_name = repo_response[j]['name']
project_full_name = repo_response[j]['full_name']
project_html_url = repo_response[j]['html_url']
project_owner_name = repo_response[j]['owner']['login']
project_obj = {"id" : project_id, "name": project_name, "full_name" : project_full_name, "html_url" : project_html_url, "owner" : project_owner_name , "issues" :
"", "commits" : "", "PR" : ""}
project_list.append(project_obj)
except:
print ("exception occurred")
try:
last_id = repo_response[99]["id"]
i = last_id
repo_result = repo_result + project_list
except:
print(" exception inside function 3 ")
break
## Removing projects having less than 8 issues
p = 0
while p < len(repo_result):
repo_owner = repo_result[p]['owner']
repo_name = repo_result[p]['name']
issue_url = api_url + 'repos/' + repo_owner + '/' + repo_name + '/' + 'issues'
exception_count = 0
while exception_count < 2:
try:
for k in range(0,len(Token_list)):
headers = {'Content-Type': 'application/json','Authorization': 'Bearer {0}'.format(Token_list[k])}
#print(Token_list[k])
issue_response = requests.get(issue_url, headers=headers).json()
try:
if ( len(issue_response['message']) > 0):
if( k == len(Token_list) - 1):
time.sleep(600)
exception_count = exception_count + 1
else:
continue
except:
break
if ( exception_count == 0):
break
else:
continue
except:
exception_count = 0
if(len(issue_response) > 10):
repo_result[p]["issues"] = len(issue_response)
p = p + 1
else:
repo_result.pop(p)
## Selecting the projects with Pull Request > 0
m = 0
while m < len(repo_result):
repo_owner = repo_result[m]['owner']
repo_name = repo_result[m]['name']
PR_url = api_url + 'repos/' + repo_owner + '/' + repo_name + '/' + 'pulls?state=all'
exception_count = 0
while exception_count < 2:
try:
for k in range(0,len(Token_list)):
headers = {'Content-Type': 'application/json','Authorization': 'Bearer {0}'.format(Token_list[k])}
#print(Token_list[k])
PR_response = requests.get(PR_url, headers=headers).json()
try:
if ( len(PR_response['message']) > 0):
if( k == len(Token_list) - 1):
time.sleep(600)
exception_count = exception_count + 1
else:
continue
except:
break
if ( exception_count == 0):
break
else:
continue
except:
exception_count = 0
if(len(PR_response) > 0):
repo_result[m]["PR"] = len(PR_response)
m = m + 1
else:
repo_result.pop(m)
## Selecting Projects with commits > 20
n = 0
while n < len(repo_result):
repo_owner = repo_result[n]['owner']
repo_name = repo_result[n]['name']
commit_url = api_url + 'repos/' + repo_owner + '/' + repo_name + '/' + 'commits'
exception_count = 0
while exception_count < 2:
try:
for k in range(0,len(Token_list)):
headers = {'Content-Type': 'application/json','Authorization': 'Bearer {0}'.format(Token_list[k])}
#print(Token_list[k])
commit_response = requests.get(commit_url, headers=headers).json()
try:
if ( len(commit_response['message']) > 0):
if( k == len(Token_list) - 1):
time.sleep(600)
exception_count = exception_count + 1
else:
continue
except:
break
if ( exception_count == 0):
break
else:
continue
except:
exception_count = 0
if(len(commit_response) > 20):
repo_result[n]["commits"] = len(commit_response)
n = n + 1
else:
repo_result.pop(n)
with open("repo_file3.json", "w") as repo_file:
json.dump(repo_result, repo_file)
print("function 3 finished", len(repo_result))
def func4():
repo_result = []
Token_list = [''**'',''**'',''**'',''**'',''**'']
i = 30000
api_url = 'https://api.github.com/'
while i < 40000: # This number will be increased to collect all the projects
repo_url = api_url + 'repositories?since=' + str(i)
exception_count = 0
while exception_count < 2:
try:
for k in range(0,len(Token_list)):
headers = {'Content-Type': 'application/json','Authorization': 'Bearer {0}'.format(Token_list[k])}
#print(Token_list[k])
repo_response = requests.get(repo_url, headers=headers).json()
#print(repo_response)
try:
if ( len(repo_response['message']) > 0):
if( k == len(Token_list) - 1):
time.sleep(600)
exception_count = exception_count + 1
else:
continue
except:
break
if ( exception_count == 0):
break
else:
continue
except:
exception_count = 0
project_list = []
try:
for j in range(0,len(repo_response)):
project_id = repo_response[j]['id']
project_name = repo_response[j]['name']
project_full_name = repo_response[j]['full_name']
project_html_url = repo_response[j]['html_url']
project_owner_name = repo_response[j]['owner']['login']
project_obj = {"id" : project_id, "name": project_name, "full_name" : project_full_name, "html_url" : project_html_url, "owner" : project_owner_name , "issues" :
"", "commits" : "", "PR" : ""}
project_list.append(project_obj)
except:
print ("exception occurred")
try:
last_id = repo_response[99]["id"]
i = last_id
repo_result = repo_result + project_list
except:
print(" exception inside function 4 ")
break
## Removing projects having less than 8 issues
p = 0
while p < len(repo_result):
repo_owner = repo_result[p]['owner']
repo_name = repo_result[p]['name']
issue_url = api_url + 'repos/' + repo_owner + '/' + repo_name + '/' + 'issues'
exception_count = 0
while exception_count < 2:
try:
for k in range(0,len(Token_list)):
headers = {'Content-Type': 'application/json','Authorization': 'Bearer {0}'.format(Token_list[k])}
#print(Token_list[k])
issue_response = requests.get(issue_url, headers=headers).json()
try:
if ( len(issue_response['message']) > 0):
if( k == len(Token_list) - 1):
time.sleep(600)
exception_count = exception_count + 1
else:
continue
except:
break
if ( exception_count == 0):
break
else:
continue
except:
exception_count = 0
if(len(issue_response) > 10):
repo_result[p]["issues"] = len(issue_response)
p = p + 1
else:
repo_result.pop(p)
## Selecting the projects with Pull Request > 0
m = 0
while m < len(repo_result):
repo_owner = repo_result[m]['owner']
repo_name = repo_result[m]['name']
PR_url = api_url + 'repos/' + repo_owner + '/' + repo_name + '/' + 'pulls?state=all'
exception_count = 0
while exception_count < 2:
try:
for k in range(0,len(Token_list)):
headers = {'Content-Type': 'application/json','Authorization': 'Bearer {0}'.format(Token_list[k])}
#print(Token_list[k])
PR_response = requests.get(PR_url, headers=headers).json()
try:
if ( len(PR_response['message']) > 0):
if( k == len(Token_list) - 1):
time.sleep(600)
exception_count = exception_count + 1
else:
continue
except:
break
if ( exception_count == 0):
break
else:
continue
except:
exception_count = 0
if(len(PR_response) > 0):
repo_result[m]["PR"] = len(PR_response)
m = m + 1
else:
repo_result.pop(m)
## Selecting Projects with commits > 20
n = 0
while n < len(repo_result):
repo_owner = repo_result[n]['owner']
repo_name = repo_result[n]['name']
commit_url = api_url + 'repos/' + repo_owner + '/' + repo_name + '/' + 'commits'
exception_count = 0
while exception_count < 2:
try:
for k in range(0,len(Token_list)):
headers = {'Content-Type': 'application/json','Authorization': 'Bearer {0}'.format(Token_list[k])}
#print(Token_list[k])
commit_response = requests.get(commit_url, headers=headers).json()
try:
if ( len(commit_response['message']) > 0):
if( k == len(Token_list) - 1):
time.sleep(600)
exception_count = exception_count + 1
else:
continue
except:
break
if ( exception_count == 0):
break
else:
continue
except:
exception_count = 0
if(len(commit_response) > 20):
repo_result[n]["commits"] = len(commit_response)
n = n + 1
else:
repo_result.pop(n)
with open("repo_file4.json", "w") as repo_file:
json.dump(repo_result, repo_file)
print("function 4 finished", len(repo_result))
if __name__ == '__main__':
lock = Lock()
p1 = Process(target=func1)
p2 = Process(target=func2)
p3 = Process(target=func3)
p4 = Process(target=func4)
p1.start()
p2.start()
p3.start()
p4.start()
p1.join()
p2.join()
p3.join()
p4.join()
| 29.407878
| 169
| 0.527523
| 2,636
| 23,144
| 4.419196
| 0.055387
| 0.115375
| 0.061808
| 0.018886
| 0.959396
| 0.946347
| 0.946347
| 0.946347
| 0.936389
| 0.936389
| 0
| 0.021969
| 0.347045
| 23,144
| 787
| 170
| 29.407878
| 0.748875
| 0.061614
| 0
| 0.929293
| 0
| 0
| 0.098522
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.006734
| false
| 0
| 0.006734
| 0
| 0.013468
| 0.020202
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
fe619b4bba8137e17d2356d7038bb205bbb3ddcb
| 8,074
|
py
|
Python
|
src/ralph/discovery/tests/plugins/samples/http_ibm_system_x.py
|
quamilek/ralph
|
bf7231ea096924332b874718b33cd1f43f9c783b
|
[
"Apache-2.0"
] | null | null | null |
src/ralph/discovery/tests/plugins/samples/http_ibm_system_x.py
|
quamilek/ralph
|
bf7231ea096924332b874718b33cd1f43f9c783b
|
[
"Apache-2.0"
] | null | null | null |
src/ralph/discovery/tests/plugins/samples/http_ibm_system_x.py
|
quamilek/ralph
|
bf7231ea096924332b874718b33cd1f43f9c783b
|
[
"Apache-2.0"
] | null | null | null |
macs_response = '''<?xml version="1.0"?><s:Envelope xmlns:s="http://www.w3.org/2003/05/soap-envelope" xmlns:wsa="http://schemas.xmlsoap.org/ws/2004/08/addressing" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xmlns:wxf="http://schemas.xmlsoap.org/ws/2004/09/transfer"><s:Header><wsa:To>http://schemas.xmlsoap.org/ws/2004/08/addressing/role/anonymous</wsa:To><wsa:Action>http://www.ibm.com/iBMC/sp/Monitors/GetHostMacAddressesResponse</wsa:Action><wsa:RelatesTo>dt:1348742659504</wsa:RelatesTo><wsa:From><wsa:Address>http://10.10.10.10/wsman</wsa:Address></wsa:From><wsa:MessageID>uuid:111efb9a-f7d8-4977-8472-bcad40212a71</wsa:MessageID></s:Header><s:Body><GetHostMacAddressesResponse><HostMACaddress><HostMaddr><Description>Host Ethernet MAC Address 1</Description><Address>6E:F3:DD:E5:96:40</Address></HostMaddr><HostMaddr><Description>Host Ethernet MAC Address 2</Description><Address>6E:F3:DD:E5:96:42</Address></HostMaddr></HostMACaddress></GetHostMacAddressesResponse></s:Body></s:Envelope>
'''
memory_response = '''<?xml version="1.0"?><s:Envelope xmlns:s="http://www.w3.org/2003/05/soap-envelope" xmlns:wsa="http://schemas.xmlsoap.org/ws/2004/08/addressing" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xmlns:wxf="http://schemas.xmlsoap.org/ws/2004/09/transfer"><s:Header><wsa:To>http://schemas.xmlsoap.org/ws/2004/08/addressing/role/anonymous</wsa:To><wsa:Action>http://www.ibm.com/iBMC/sp/Monitors/GetMemoryInfoResponse</wsa:Action><wsa:RelatesTo>dt:1348742659500</wsa:RelatesTo><wsa:From><wsa:Address>http://10.10.10.10/wsman</wsa:Address></wsa:From><wsa:MessageID>uuid:dc560696-2ba4-4917-b7e7-1aac1983b727</wsa:MessageID></s:Header><s:Body><GetMemoryInfoResponse><Memory><MemoryInfo><Description>DIMM 2</Description><PartNumber>HMT351R7BFR4A-H9</PartNumber><SerialNumber>33b8a62f</SerialNumber><ManufactureDate>4511</ManufactureDate><Type>DDR3</Type><Size>4</Size></MemoryInfo><MemoryInfo><Description>DIMM 3</Description><PartNumber>M393B1K70CH0-YH9</PartNumber><SerialNumber>b38aa385</SerialNumber><ManufactureDate>2211</ManufactureDate><Type>DDR3</Type><Size>8</Size></MemoryInfo><MemoryInfo><Description>DIMM 6</Description><PartNumber>M393B1K70CH0-YH9</PartNumber><SerialNumber>a78aa385</SerialNumber><ManufactureDate>2211</ManufactureDate><Type>DDR3</Type><Size>8</Size></MemoryInfo><MemoryInfo><Description>DIMM 9</Description><PartNumber>EBJ40RF4ECFA-DJ-F</PartNumber><SerialNumber>b524042b</SerialNumber><ManufactureDate>4711</ManufactureDate><Type>DDR3</Type><Size>4</Size></MemoryInfo><MemoryInfo><Description>DIMM 11</Description><PartNumber>EBJ40RF4ECFA-DJ-F</PartNumber><SerialNumber>ba24042b</SerialNumber><ManufactureDate>4711</ManufactureDate><Type>DDR3</Type><Size>4</Size></MemoryInfo><MemoryInfo><Description>DIMM 12</Description><PartNumber>M393B1K70CH0-YH9</PartNumber><SerialNumber>8e8aa385</SerialNumber><ManufactureDate>2211</ManufactureDate><Type>DDR3</Type><Size>8</Size></MemoryInfo><MemoryInfo><Description>DIMM 15</Description><PartNumber>M393B1K70CH0-YH9</PartNumber><SerialNumber>7feda482</SerialNumber><ManufactureDate>2211</ManufactureDate><Type>DDR3</Type><Size>8</Size></MemoryInfo><MemoryInfo><Description>DIMM 18</Description><PartNumber>EBJ40RF4ECFA-DJ-F</PartNumber><SerialNumber>d924042b</SerialNumber><ManufactureDate>4711</ManufactureDate><Type>DDR3</Type><Size>4</Size></MemoryInfo></Memory></GetMemoryInfoResponse></s:Body></s:Envelope>
'''
generic_data_response = '''<?xml version="1.0"?><s:Envelope xmlns:s="http://www.w3.org/2003/05/soap-envelope" xmlns:wsa="http://schemas.xmlsoap.org/ws/2004/08/addressing" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xmlns:wxf="http://schemas.xmlsoap.org/ws/2004/09/transfer"><s:Header><wsa:To>http://schemas.xmlsoap.org/ws/2004/08/addressing/role/anonymous</wsa:To><wsa:Action>http://www.ibm.com/iBMC/sp/Monitors/GetVitalProductDataResponse</wsa:Action><wsa:RelatesTo>dt:1348742659499</wsa:RelatesTo><wsa:From><wsa:Address>http://10.10.10.10/wsman</wsa:Address></wsa:From><wsa:MessageID>uuid:e6829941-2510-4b3d-b9f3-61c7be372dfd</wsa:MessageID></s:Header><s:Body><GetVitalProductDataResponse><GetVitalProductDataResponse><MachineLevelVPD><ProductName>System x3550 M3</ProductName><MachineTypeAndModel>794452G</MachineTypeAndModel><SerialNumber>KD55ARA</SerialNumber><UUID>99A4E4A303023961B8E1561E33328996</UUID></MachineLevelVPD><ComponentLevelVPD><FRUNumber>59Y3915</FRUNumber><FRUName>DASD Backplane 1</FRUName><SerialNumber>Y010RW1AR1Y0</SerialNumber><MfgID>USIS</MfgID></ComponentLevelVPD><ComponentLevelVPD><FRUNumber>39Y7229</FRUNumber><FRUName>Power Supply 1</FRUName><SerialNumber>K1411183222</SerialNumber><MfgID>ACBE</MfgID></ComponentLevelVPD><ComponentLevelVPD><FRUNumber>39Y7229</FRUNumber><FRUName>Power Supply 2</FRUName><SerialNumber>K141115Y2BK</SerialNumber><MfgID>ACBE</MfgID></ComponentLevelVPD><ComponentActivityLog><FRUNumber>39Y7229</FRUNumber><FRUName>Power Supply 1</FRUName><SerialNumber>K1411183222</SerialNumber><MfgID>ACBE</MfgID><Action>Added</Action><TimeStamp>11/25/2011:13:53:13</TimeStamp></ComponentActivityLog><ComponentActivityLog><FRUNumber>59Y3915</FRUNumber><FRUName>DASD Backplane 1</FRUName><SerialNumber>Y010RW1AR1Y0</SerialNumber><MfgID>USIS</MfgID><Action>Added</Action><TimeStamp>11/25/2011:13:53:13</TimeStamp></ComponentActivityLog><ComponentActivityLog><FRUNumber>39Y7229</FRUNumber><FRUName>Power Supply 2</FRUName><SerialNumber>K141115Y2BK</SerialNumber><MfgID>ACBE</MfgID><Action>Added</Action><TimeStamp>01/27/2012:10:28:39</TimeStamp></ComponentActivityLog><VPD><FirmwareName>IMM</FirmwareName><VersionString>YUOOC7E</VersionString><ReleaseDate>09/30/2011</ReleaseDate></VPD><VPD><FirmwareName>UEFI</FirmwareName><VersionString>D6E154A</VersionString><ReleaseDate>09/23/2011</ReleaseDate></VPD><VPD><FirmwareName>DSA</FirmwareName><VersionString>DSYT89P </VersionString><ReleaseDate>10/28/2011</ReleaseDate></VPD></GetVitalProductDataResponse></GetVitalProductDataResponse></s:Body></s:Envelope>
'''
sn_response = '''<?xml version="1.0"?><s:Envelope xmlns:s="http://www.w3.org/2003/05/soap-envelope" xmlns:wsa="http://schemas.xmlsoap.org/ws/2004/08/addressing" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xmlns:wxf="http://schemas.xmlsoap.org/ws/2004/09/transfer"><s:Header><wsa:To>http://schemas.xmlsoap.org/ws/2004/08/addressing/role/anonymous</wsa:To><wsa:Action>http://www.ibm.com/iBMC/sp/iBMCControl/GetSPNameSettingsResponse</wsa:Action><wsa:RelatesTo>dt:1348742647137</wsa:RelatesTo><wsa:From><wsa:Address>http://10.10.10.10/wsman</wsa:Address></wsa:From><wsa:MessageID>uuid:d2ac4b59-9f60-456e-a182-6a077557e4c1</wsa:MessageID></s:Header><s:Body><GetSPNameSettingsResponse><SPName>SN# KD55ARA</SPName></GetSPNameSettingsResponse></s:Body></s:Envelope>
'''
processors_response = '''<?xml version="1.0"?><s:Envelope xmlns:s="http://www.w3.org/2003/05/soap-envelope" xmlns:wsa="http://schemas.xmlsoap.org/ws/2004/08/addressing" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xmlns:wxf="http://schemas.xmlsoap.org/ws/2004/09/transfer"><s:Header><wsa:To>http://schemas.xmlsoap.org/ws/2004/08/addressing/role/anonymous</wsa:To><wsa:Action>http://www.ibm.com/iBMC/sp/Monitors/GetProcessorInfoResponse</wsa:Action><wsa:RelatesTo>dt:1348757382511</wsa:RelatesTo><wsa:From><wsa:Address>http://rack-605-12-mgmt.dc2/wsman</wsa:Address></wsa:From><wsa:MessageID>uuid:9e5ec08d-0fac-449a-80fa-37cc78290a21</wsa:MessageID></s:Header><s:Body><GetProcessorInfoResponse><Processor><ProcessorInfo><Description>Processor 1</Description><Speed>2666</Speed><Identifier>3030363735304141</Identifier><Type>Central</Type><Family>Intel Xeon</Family><Cores>8</Cores><Threads>1</Threads><Voltage>1.087000</Voltage><Datawidth>64</Datawidth></ProcessorInfo><ProcessorInfo><Description>Processor 2</Description><Speed>2666</Speed><Identifier>3030363735304141</Identifier><Type>Central</Type><Family>Intel Xeon</Family><Cores>8</Cores><Threads>1</Threads><Voltage>1.087000</Voltage><Datawidth>64</Datawidth></ProcessorInfo></Processor></GetProcessorInfoResponse></s:Body></s:Envelope>
'''
| 621.076923
| 2,572
| 0.792172
| 1,050
| 8,074
| 6.085714
| 0.198095
| 0.016432
| 0.042254
| 0.049296
| 0.746479
| 0.715493
| 0.647261
| 0.602973
| 0.592958
| 0.582316
| 0
| 0.100038
| 0.012014
| 8,074
| 12
| 2,573
| 672.833333
| 0.701015
| 0
| 1
| 0.5
| 0
| 0.5
| 0.983775
| 0.541244
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 1
| 0
| 0
| 1
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
feac612781029aac47e6d21c85d8519de53dcb55
| 7,188
|
py
|
Python
|
tests/test_installation.py
|
phdye/nimporter
|
64eccc74950811e03efdde50649e84ca1fe87ae4
|
[
"MIT"
] | null | null | null |
tests/test_installation.py
|
phdye/nimporter
|
64eccc74950811e03efdde50649e84ca1fe87ae4
|
[
"MIT"
] | null | null | null |
tests/test_installation.py
|
phdye/nimporter
|
64eccc74950811e03efdde50649e84ca1fe87ae4
|
[
"MIT"
] | null | null | null |
"""
Test to make sure that libraries built with Nimporter can be installed via Pip.
"""
import sys, os, subprocess, shutil, pkg_resources, json, warnings
from pathlib import Path
import pytest
import nimporter
PYTHON = 'python' if sys.platform == 'win32' else 'python3'
PIP = 'pip' if shutil.which('pip') else 'pip3'
@pytest.mark.integration_test
def test_ensure_nimporter_installed():
"Make sure that Nimporter is installed before running integration tests."
libs = {lib.key.lower() for lib in pkg_resources.working_set}
assert 'nimporter' in libs, (
f'Nimporter is not installed. Please install via:'
f'`{PIP} install .` before running the integration tests.'
)
@pytest.mark.integration_test
def test_create_sdist():
"Test the successful creation of a source distribution."
with nimporter.cd('tests/proj1'):
subprocess.Popen(f'{PYTHON} setup.py sdist'.split()).wait()
dist = Path('dist')
egg = Path('project1.egg-info')
try:
assert dist.exists()
assert egg.exists()
targets = list(dist.glob('project1*'))
assert len(targets) == 1
assert targets[0].exists()
# Make sure the appropriate compiler is being used
for extension in Path('nim-extensions').iterdir():
(nim_build_data_file,) = extension.glob('*json')
nim_build_data = json.loads(nim_build_data_file.read_text())
expected = nimporter.NimCompiler.get_compatible_compiler()
installed_ccs = nimporter.NimCompiler.get_installed_compilers()
if not expected:
warnings.warn(
f'No compatible C compiler installed: {installed_ccs}'
)
else:
cc_path = installed_ccs[expected]
actual = nim_build_data['linkcmd'].split()[0].strip()
if not actual.startswith(cc_path.stem):
warnings.warn(
f'Nim used a different C compiler than what Python '
f'expects. Python uses {cc_path.stem} and Nim used '
f'{actual}'
)
finally:
shutil.rmtree(str(dist.absolute()))
shutil.rmtree(str(egg.absolute()))
@pytest.mark.integration_test
def test_create_bdist():
"Test the successful create of a wheel."
with nimporter.cd('tests/proj1'):
subprocess.Popen(f'{PYTHON} setup.py bdist_wheel'.split()).wait()
dist = Path('dist')
build = Path('build')
egg = Path('project1.egg-info')
try:
assert dist.exists()
assert build.exists()
assert egg.exists()
targets = list(Path('dist').glob('project1*.whl'))
assert len(targets) == 1
assert targets[0].exists()
# Make sure the appropriate compiler is being used
for extension in Path('nim-extensions').iterdir():
(nim_build_data_file,) = extension.glob('*json')
nim_build_data = json.loads(nim_build_data_file.read_text())
expected = nimporter.NimCompiler.get_compatible_compiler()
installed_ccs = nimporter.NimCompiler.get_installed_compilers()
if not expected:
warnings.warn(
f'No compatible C compiler installed: {installed_ccs}'
)
else:
cc_path = installed_ccs[expected]
actual = nim_build_data['linkcmd'].split()[0].strip()
if not actual.startswith(cc_path.stem):
warnings.warn(
f'Nim used a different C compiler than what Python '
f'expects. Python uses {cc_path.stem} and Nim used '
f'{actual}'
)
finally:
shutil.rmtree(str(dist.absolute()))
shutil.rmtree(str(build.absolute()))
shutil.rmtree(str(egg.absolute()))
@pytest.mark.slow_integration_test
def test_install_sdist():
"Make sure that the project can be installed by Pip"
with nimporter.cd('tests/proj1'):
subprocess.Popen(f'{PYTHON} setup.py sdist'.split()).wait()
dist = Path('dist')
egg = Path('project1.egg-info')
try:
assert dist.exists()
assert egg.exists()
targets = list(dist.glob('project1*'))
assert len(targets) == 1
(target,) = targets
assert target.exists()
subprocess.Popen(f'{PIP} install {target}'.split()).wait()
finally:
shutil.rmtree(str(dist.absolute()))
shutil.rmtree(str(egg.absolute()))
# Make sure that `tests/proj1` is not imported as a SimpleNamespace and that
# the installed library in `site-packages` is used.
with nimporter.cd('../..'):
try:
import proj1
assert proj1
import proj1.performance
assert proj1.performance
import proj1.lib1
assert proj1.lib1
assert proj1.foo
assert proj1.bar
assert proj1.baz
assert proj1.baz() == 1
except Exception as e:
warnings.warn(str(e))
# Cannot delete a DLL in use by another process on Windows
if sys.platform != 'win32':
subprocess.Popen(f'{PIP} uninstall project1 -y'.split()).wait()
@pytest.mark.slow_integration_test
def test_install_bdist():
"Make sure that the wheel can be installed by Pip"
with nimporter.cd('tests/proj1'):
subprocess.Popen(f'{PYTHON} setup.py bdist_wheel'.split()).wait()
dist = Path('dist')
build = Path('build')
egg = Path('project1.egg-info')
try:
assert dist.exists()
assert build.exists()
assert egg.exists()
targets = list(Path('dist').glob('project1*.whl'))
assert len(targets) == 1
wheel = targets[0]
assert wheel.exists()
subprocess.Popen(f'{PIP} install {wheel}'.split()).wait()
finally:
shutil.rmtree(str(dist.absolute()))
shutil.rmtree(str(build.absolute()))
shutil.rmtree(str(egg.absolute()))
# Make sure that `tests/proj1` is not imported as a SimpleNamespace and that
# the installed library in `site-packages` is used.
with nimporter.cd('../..'):
try:
import proj1
assert proj1
import proj1.performance
assert proj1.performance
import proj1.lib1
assert proj1.lib1
assert proj1.foo
assert proj1.bar
assert proj1.baz
assert proj1.baz() == 1
except Exception as e:
warnings.warn(str(e))
# Cannot delete a DLL in use by another process on Windows
if sys.platform != 'win32':
subprocess.Popen(f'{PIP} uninstall project1 -y'.split()).wait()
| 38.854054
| 80
| 0.564969
| 805
| 7,188
| 4.963975
| 0.191304
| 0.038539
| 0.037538
| 0.034535
| 0.832833
| 0.832833
| 0.808809
| 0.792292
| 0.768268
| 0.768268
| 0
| 0.012233
| 0.329021
| 7,188
| 184
| 81
| 39.065217
| 0.816297
| 0.11241
| 0
| 0.801282
| 0
| 0
| 0.1775
| 0
| 0
| 0
| 0
| 0
| 0.211538
| 1
| 0.032051
| false
| 0
| 0.153846
| 0
| 0.185897
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
2284f5a8afa9699354bd56f97faf33c044aeae81
| 160
|
py
|
Python
|
cnn/donas_utils/dataset/__init__.py
|
eric8607242/darts
|
34c79a0956039f56a6a87bfb7f4b1ae2af615bea
|
[
"Apache-2.0"
] | null | null | null |
cnn/donas_utils/dataset/__init__.py
|
eric8607242/darts
|
34c79a0956039f56a6a87bfb7f4b1ae2af615bea
|
[
"Apache-2.0"
] | null | null | null |
cnn/donas_utils/dataset/__init__.py
|
eric8607242/darts
|
34c79a0956039f56a6a87bfb7f4b1ae2af615bea
|
[
"Apache-2.0"
] | null | null | null |
from .dataset import get_cifar100, get_cifar10, get_imagenet_lmdb, get_imagenet
__all__ = ["get_cifar100", "get_cifar10", "get_imagenet_lmdb", "get_imagenet"]
| 40
| 79
| 0.8
| 22
| 160
| 5.181818
| 0.409091
| 0.385965
| 0.245614
| 0.368421
| 0.824561
| 0.824561
| 0.824561
| 0.824561
| 0.824561
| 0
| 0
| 0.068493
| 0.0875
| 160
| 3
| 80
| 53.333333
| 0.712329
| 0
| 0
| 0
| 0
| 0
| 0.325
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.5
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 11
|
22a8b0a10c5a619e3d02f83382579627b355c5a9
| 186
|
py
|
Python
|
.venv/lib/python3.8/site-packages/poetry/core/_vendor/lark/__pyinstaller/__init__.py
|
RivtLib/replit01
|
ce1ae18b446a9c844f40e88a51c71fbc45ab3ad7
|
[
"MIT"
] | 1
|
2020-08-07T16:09:57.000Z
|
2020-08-07T16:09:57.000Z
|
.venv/lib/python3.8/site-packages/poetry/core/_vendor/lark/__pyinstaller/__init__.py
|
RivtLib/replit01
|
ce1ae18b446a9c844f40e88a51c71fbc45ab3ad7
|
[
"MIT"
] | null | null | null |
.venv/lib/python3.8/site-packages/poetry/core/_vendor/lark/__pyinstaller/__init__.py
|
RivtLib/replit01
|
ce1ae18b446a9c844f40e88a51c71fbc45ab3ad7
|
[
"MIT"
] | null | null | null |
# For usage of lark with PyInstaller. See https://pyinstaller-sample-hook.readthedocs.io/en/latest/index.html
import os
def get_hook_dirs():
return [os.path.dirname(__file__)]
| 31
| 110
| 0.747312
| 28
| 186
| 4.75
| 0.892857
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.134409
| 186
| 6
| 111
| 31
| 0.826087
| 0.575269
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| true
| 0
| 0.333333
| 0.333333
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 1
| 1
| 1
| 0
|
0
| 7
|
22b050a05912835a15d1f775a59389484ca92826
| 142
|
py
|
Python
|
scripts/update_asp_l1.py
|
sot/mica
|
136a9b0d9521efda5208067b51cf0c8700b4def3
|
[
"BSD-3-Clause"
] | null | null | null |
scripts/update_asp_l1.py
|
sot/mica
|
136a9b0d9521efda5208067b51cf0c8700b4def3
|
[
"BSD-3-Clause"
] | 150
|
2015-01-23T17:09:53.000Z
|
2022-01-10T00:50:54.000Z
|
scripts/update_asp_l1.py
|
sot/mica
|
136a9b0d9521efda5208067b51cf0c8700b4def3
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env python
# Licensed under a 3-clause BSD style license - see LICENSE.rst
import mica.archive.asp_l1
mica.archive.asp_l1.main()
| 20.285714
| 63
| 0.760563
| 25
| 142
| 4.24
| 0.8
| 0.207547
| 0.264151
| 0.301887
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.024194
| 0.126761
| 142
| 6
| 64
| 23.666667
| 0.830645
| 0.577465
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.5
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 7
|
22f3df9c130fc202edc44714de04e929f4e7eab3
| 91,430
|
py
|
Python
|
test/model/data/all_foreground_valid_data.py
|
TiankunZhou/dials
|
bd5c95b73c442cceb1c61b1690fd4562acf4e337
|
[
"BSD-3-Clause"
] | 2
|
2021-03-17T11:25:46.000Z
|
2021-11-18T04:20:54.000Z
|
test/model/data/all_foreground_valid_data.py
|
TiankunZhou/dials
|
bd5c95b73c442cceb1c61b1690fd4562acf4e337
|
[
"BSD-3-Clause"
] | 2
|
2020-07-31T22:37:30.000Z
|
2020-07-31T23:08:55.000Z
|
test/model/data/all_foreground_valid_data.py
|
TiankunZhou/dials
|
bd5c95b73c442cceb1c61b1690fd4562acf4e337
|
[
"BSD-3-Clause"
] | 1
|
2020-02-04T15:39:06.000Z
|
2020-02-04T15:39:06.000Z
|
from __future__ import absolute_import, division, print_function
data = r"""cdials_array_family_flex_ext
shoebox
p1
(tRp2
(cscitbx_array_family_flex_ext
grid
p3
((I0
t(I8
tI01
tRp4
(I8
tbS'\x02\x01\x02\x08\x00\x03\\\x01\x03m\x01\x03\x04\x06\x03\x15\x06\x00\x02\x01\x02\x03\x02\x01\x02\x11\x02\x11\x02\x9c\x02\x06\x02\x80\x02\x06\x02\xe8\x02\x05\x02\x86\x02\x07\x02\xe0\x02\x03\x02\xa0\x02\x04\x02\x80\x02\x03\x02\x80\x02\x05\x02\xf4\x02\x06\x02\x88\x02\x06\x02\xe8\x02\x06\x02\xdc\x02\x06\x02\x85\x02\x08\x02\xc8\x02\x05\x02\x84\x02\x06\x02\xf0\x02\x04\x02\x90\x02\x06\x00\x02\x84\x02\x06\x02\x84\x02\x06\x02\xf0\x02\x06\x02\xf0\x02\x05\x02\x82\x02\x07\x02\xd8\x02\x05\x02\xd8\x02\x06\x02\x80\x02\x02\x02\xa0\x02\x04\x02\xa2\x02\x07\x02\xc0\x02\x04\x02\xe8\x02\x06\x02\xe0\x02\x03\x02\xa0\x02\x03\x02\x8c\x02\x06\x02\xac\x02\x06\x02\x9c\x02\x06\x02\xb8\x02\x06\x02\xc0\x02\x03\x02\xb4\x02\x06\x02\xc8\x02\x06\x02\xe0\x02\x03\x02\x90\x02\x04\x02\x88\x02\x06\x02\xc8\x02\x06\x02\xba\x02\x07\x82\xc0\x02\x02\x02\xb6\x02\x07\x02\x80\x02\x06\x02\x80\x02\x05\x02\xa0\x02\x04\x02\xf0\x02\x06\x02\xfc\x02\x06\x02\xc8\x02\x05\x02\xf4\x02\x06\x02\xb6\x02\x07\x02\x80\x02\x06\x02\x80\x02\x03\x02\xd8\x02\x05\x02\xf0\x02\x06\x02\x88\x02\x05\x02\xec\x02\x06\x00\x02\xc0\x02\x03\x02\xc0\x02\x04\x02\xe0\x02\x03\x02\xe0\x02\x04\x02\xd4\x02\x06\x02\xa2\x02\x07\x02\xa0\x02\x03\x02\xfc\x02\x07\x02\xc0\x02\x03\x02\x9c\x02\x07\x02\xe0\x02\x05\x02\xe0\x02\x05\x02\x8c\x02\x06\x02\xe0\x02\x03\x02\x80\x02\x07\x02\xe0\x02\x06\x02\xa4\x02\x06\x02\xf8\x02\x06\x02\xb8\x02\x05\x02\xee\x02\x07\x02\xe0\x02\x06\x02\xc4\x02\x06\x02\xc0\x02\x05\x02\xd0\x02\x06\x02\xc2\x02\x07\x02\xa0\x02\x03\x02\x90\x02\x05\x02\x9a\x02\x07\x02\xd0\x02\x05\x02\xd8\x02\x05\x02\x80\x02\x06\x02\xac\x02\x06\x02\x88\x02\x07\x02\xb0\x02\x04\x02\xa6\x02\x07\x02\xa0\x02\x05\x02\xa0\x02\x04\x02\x92\x02\x07\x02\xe2\x02\x07\x02\x94\x02\x06\x02\x90\x02\x04\x02\xc0\x02\x04\x02\x98\x02\x05\x02\xd4\x02\x06\x02\xb8\x02\x05\x02\xd0\x02\x05\x02\x90\x02\x06\x02\xd4\x02\x06\x02\xdc\x02\x06\x02\x90\x02\x04\x02\x90\x02\x06\x02\xa4\x02\x06\x02\xa0\x02\x07\x02\xe8\x02\x07\x02\xe0\x02\x06\x02\x96\x02\x07\x02\x98\x02\x06\x02\xd4\x02\x06\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x03\x02\x01\x02\x11\x02\x11\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x03\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x05\x02\x05\x02\x05\x02\x05\x02\x05\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x04\x02\x04\x02\x04\x02\x04\x02\x04\x02\x04\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x04\x02\x04\x02\x04\x02\x04\x02\x04\x02\x04\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x04\x02\x04\x02\x04\x02\x04\x02\x04\x02\x04\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x04\x02\x04\x02\x04\x02\x04\x02\x04\x02\x04\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x04\x02\x04\x02\x04\x02\x04\x02\x04\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x04\x02\x04\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x03\x02\x01\x02\x11\x02\x11\x04\x98UU\x02\x06\x04\x98UU\x02\x06\x04\x98UU\x02\x06\x04\x98UU\x02\x06\x04\x98UU\x02\x06\x04\x98UU\x02\x06\x04\x98UU\x02\x06\x04\x98UU\x02\x06\x04\x98UU\x02\x06\x04\x98UU\x02\x06\x04\x98UU\x02\x06\x04\x98UU\x02\x06\x04\x98UU\x02\x06\x04\x98UU\x02\x06\x04\x98UU\x02\x06\x04\x98UU\x02\x06\x04\x98UU\x02\x06\x04\x98UU\x02\x06\x04\x98UU\x02\x06\x04\x98UU\x02\x06\x04\x98UU\x02\x06\x04\x98UU\x02\x06\x04\x98UU\x02\x06\x04\x98UU\x02\x06\x04\x98UU\x02\x06\x04\x98UU\x02\x06\x04\x98UU\x02\x06\x04\x98UU\x02\x06\x04\x98UU\x02\x06\x04\x98UU\x02\x06\x04\x98UU\x02\x06\x04\x98UU\x02\x06\x04\x98UU\x02\x06\x04\x98UU\x02\x06\x04\x98UU\x02\x06\x04\x98UU\x02\x06\x04\x98UU\x02\x06\x04\x98UU\x02\x06\x04\x98UU\x02\x06\x04\x98UU\x02\x06\x04\x98UU\x02\x06\x04\x98UU\x02\x06\x04\x98UU\x02\x06\x04\x98UU\x02\x06\x04\x98UU\x02\x06\x04\x98UU\x02\x06\x04\x98UU\x02\x06\x04\x98UU\x02\x06\x04\x98UU\x02\x06\x04\x98UU\x02\x06\x04\x98UU\x02\x06\x04\x98UU\x02\x06\x04\x98UU\x02\x06\x04\x98UU\x02\x06\x04\x98UU\x02\x06\x04\x98UU\x02\x06\x04\x98UU\x02\x06\x04\x98UU\x02\x06\x04\x98UU\x02\x06\x04\x98UU\x02\x06\x04\x98UU\x02\x06\x04\x98UU\x02\x06\x04\x98UU\x02\x06\x04\x98UU\x02\x06\x04\x98UU\x02\x06\x04\x98UU\x02\x06\x04\x98UU\x02\x06\x04\x98UU\x02\x06\x04\x98UU\x02\x06\x04\x98UU\x02\x06\x04\x98UU\x02\x06\x04\x98UU\x02\x06\x04\x98UU\x02\x06\x04\x98UU\x02\x06\x04\x98UU\x02\x06\x04\x98UU\x02\x06\x04\x98UU\x02\x06\x04\x98UU\x02\x06\x04\x98UU\x02\x06\x04\x98UU\x02\x06\x04\x98UU\x02\x06\x04\x98UU\x02\x06\x04\x98UU\x02\x06\x04\x98UU\x02\x06\x04\x98UU\x02\x06\x04\x98UU\x02\x06\x04\x98UU\x02\x06\x04\x98UU\x02\x06\x04\x98UU\x02\x06\x04\x98UU\x02\x06\x04\x98UU\x02\x06\x04\x98UU\x02\x06\x04\x98UU\x02\x06\x04\x98UU\x02\x06\x04\x98UU\x02\x06\x04\x98UU\x02\x06\x04\x98UU\x02\x06\x04\x98UU\x02\x06\x04\x98UU\x02\x06\x04\x98UU\x02\x06\x04\x98UU\x02\x06\x04\x98UU\x02\x06\x04\x98UU\x02\x06\x04\x98UU\x02\x06\x04\x98UU\x02\x06\x04\x98UU\x02\x06\x04\x98UU\x02\x06\x04\x98UU\x02\x06\x04\x98UU\x02\x06\x04\x98UU\x02\x06\x04\x98UU\x02\x06\x04\x98UU\x02\x06\x04\x98UU\x02\x06\x04\x98UU\x02\x06\x04\x98UU\x02\x06\x04\x98UU\x02\x06\x04\x98UU\x02\x06\x04\x98UU\x02\x06\x04\x98UU\x02\x06\x04\x98UU\x02\x06\x04\x98UU\x02\x06\x04\x98UU\x02\x06\x04\x98UU\x02\x06\x04\x98UU\x02\x06\x04\x98UU\x02\x06\x04\x98UU\x02\x06\x04\x98UU\x02\x06\x04\x98UU\x02\x06\x04\x98UU\x02\x06\x04\x98UU\x02\x06\x04\x98UU\x02\x06\x04\x98UU\x02\x06\x04\x98UU\x02\x06\x04\x98UU\x02\x06\x04\x98UU\x02\x06\x04\x98UU\x02\x06\x04\x98UU\x02\x06\x04\x98UU\x02\x06\x04\x98UU\x02\x06\x04\x98UU\x02\x06\x04\x98UU\x02\x06\x04\x98UU\x02\x06\x04\x98UU\x02\x06\x04\x98UU\x02\x06\x04\x98UU\x02\x06\x04\x98UU\x02\x06\x04\x98UU\x02\x06\x04\x98UU\x02\x06\x04\x98UU\x02\x06\x04\x98UU\x02\x06\x04\x98UU\x02\x06\x04\x98UU\x02\x06\x04\x98UU\x02\x06\x04\x98UU\x02\x06\x04\x98UU\x02\x06\x04\x98UU\x02\x06\x04\x98UU\x02\x06\x04\x98UU\x02\x06\x04\x98UU\x02\x06\x04\x98UU\x02\x06\x04\x98UU\x02\x06\x04\x98UU\x02\x06\x04\x98UU\x02\x06\x04\x98UU\x02\x06\x04\x98UU\x02\x06\x04\x98UU\x02\x06\x04\x98UU\x02\x06\x04\x98UU\x02\x06\x04\x98UU\x02\x06\x04\x98UU\x02\x06\x04\x98UU\x02\x06\x04\x98UU\x02\x06\x04\x98UU\x02\x06\x04\x98UU\x02\x06\x04\x98UU\x02\x06\x04\x98UU\x02\x06\x04\x98UU\x02\x06\x04\x98UU\x02\x06\x04\x98UU\x02\x06\x04\x98UU\x02\x06\x04\x98UU\x02\x06\x04\x98UU\x02\x06\x04\x98UU\x02\x06\x04\x98UU\x02\x06\x04\x98UU\x02\x06\x04\x98UU\x02\x06\x04\x98UU\x02\x06\x04\x98UU\x02\x06\x04\x98UU\x02\x06\x04\x98UU\x02\x06\x04\x98UU\x02\x06\x04\x98UU\x02\x06\x04\x98UU\x02\x06\x04\x98UU\x02\x06\x04\x98UU\x02\x06\x04\x98UU\x02\x06\x04\x98UU\x02\x06\x04\x98UU\x02\x06\x04\x98UU\x02\x06\x04\x98UU\x02\x06\x04\x98UU\x02\x06\x04\x98UU\x02\x06\x04\x98UU\x02\x06\x04\x98UU\x02\x06\x04\x98UU\x02\x06\x04\x98UU\x02\x06\x04\x98UU\x02\x06\x04\x98UU\x02\x06\x04\x98UU\x02\x06\x04\x98UU\x02\x06\x04\x98UU\x02\x06\x04\x98UU\x02\x06\x04\x98UU\x02\x06\x04\x98UU\x02\x06\x04\x98UU\x02\x06\x04\x98UU\x02\x06\x04\x98UU\x02\x06\x04\x98UU\x02\x06\x04\x98UU\x02\x06\x04\x98UU\x02\x06\x04\x98UU\x02\x06\x04\x98UU\x02\x06\x04\x98UU\x02\x06\x04\x98UU\x02\x06\x04\x98UU\x02\x06\x04\x98UU\x02\x06\x04\x98UU\x02\x06\x04\x98UU\x02\x06\x04\x98UU\x02\x06\x04\x98UU\x02\x06\x04\x98UU\x02\x06\x04\x98UU\x02\x06\x04\x98UU\x02\x06\x04\x98UU\x02\x06\x04\x98UU\x02\x06\x04\x98UU\x02\x06\x04\x98UU\x02\x06\x04\x98UU\x02\x06\x04\x98UU\x02\x06\x04\x98UU\x02\x06\x04\x98UU\x02\x06\x04\x98UU\x02\x06\x04\x98UU\x02\x06\x04\x98UU\x02\x06\x04\x98UU\x02\x06\x04\x98UU\x02\x06\x04\x98UU\x02\x06\x04\x98UU\x02\x06\x04\x98UU\x02\x06\x04\x98UU\x02\x06\x04\x98UU\x02\x06\x04\x98UU\x02\x06\x04\x98UU\x02\x06\x04\x98UU\x02\x06\x04\x98UU\x02\x06\x04\x98UU\x02\x06\x04\x98UU\x02\x06\x04\x98UU\x02\x06\x04\x98UU\x02\x06\x04\x98UU\x02\x06\x04\x98UU\x02\x06\x04\x98UU\x02\x06\x04\x98UU\x02\x06\x04\x98UU\x02\x06\x04\x98UU\x02\x06\x04\x98UU\x02\x06\x04\x98UU\x02\x06\x04\x98UU\x02\x06\x04\x98UU\x02\x06\x04\x98UU\x02\x06\x04\x98UU\x02\x06\x04\x98UU\x02\x06\x04\x98UU\x02\x06\x04\x98UU\x02\x06\x04\x98UU\x02\x06\x04\x98UU\x02\x06\x04\x98UU\x02\x06\x04\x98UU\x02\x06\x04\x98UU\x02\x06\x04\x98UU\x02\x06\x04\x98UU\x02\x06\x04\x98UU\x02\x06\x04\x98UU\x02\x06\x04\x98UU\x02\x06\x04\x98UU\x02\x06\x04\x98UU\x02\x06\x04\x98UU\x02\x06\x04\x98UU\x02\x06\x04\x98UU\x02\x06\x00\x02\xb5\x02\xc6\x03\xab\x05\x03\xbc\x05\x00\x02\x01\x02\x03\x02\x01\x02\x11\x02\x11\x02\xc0\x02\x02\x02\xa0\x02\x05\x82\x80\x02\x01\x02\xd0\x02\x05\x02\xa0\x02\x06\x02\x84\x02\x06\x02\xd0\x02\x06\x02\xa0\x02\x03\x02\x80\x02\x01\x02\xa0\x02\x03\x02\x80\x02\x01\x02\xf0\x02\x05\x02\xa8\x02\x06\x02\xd4\x02\x06\x02\xbc\x02\x06\x02\x8a\x02\x07\x02\xe4\x02\x06\x00\x82\x80\x02\x03\x02\xc0\x02\x02\x02\x90\x02\x04\x02\xaa\x02\x07\x02\xc0\x02\x05\x02\xa0\x02\x06\x02\xf0\x02\x04\x02\xe0\x02\x03\x82\x80\x02\x01\x02\xa0\x02\x03\x02\xe0\x02\x04\x02\xd8\x02\x05\x02\xc4\x02\x06\x83\xc3P\x02\x11\x02\x80\x02\x04\x02\x92\x02\x07\x83\xc3P\x02\x11\x83\xc3P\x02\x11\x02\xc0\x02\x03\x82\x80\x02\x02\x02\xf4\x02\x06\x02\xa4\x02\x06\x02\xf0\x02\x04\x02\x80\x02\x06\x02\x80\x02\x04\x02\xe6\x02\x07\x02\x94\x02\x07\x02\x98\x02\x06\x02\xb0\x02\x04\x02\xa8\x02\x07\x02\x98\x02\x06\x02\xa0\x02\x05\x02\xa4\x02\x07\x83\xc3P\x02\x11\x83\xc3P\x02\x11\x83\xc3P\x02\x11\x02\xc0\x02\x02\x00\x02\xd0\x02\x04\x02\xf0\x02\x05\x02\x80\x02\x02\x02\xf8\x02\x05\x02\x94\x02\x06\x02\x96\x02\x07\x02\x80\x02\x01\x00\x02\xc0\x02\x04\x02\xc0\x02\x02\x02\xf0\x02\x06\x02\x80\x02\x06\x83\xc3P\x02\x11\x83\xc3P\x02\x11\x83\xc3P\x02\x11\x82\xe0\x02\x03\x82\x80\x02\x01\x02\x80\x02\x02\x02\xc0\x02\x02\x02\xdc\x02\x06\x02\xf8\x02\x06\x02\xb8\x02\x05\x02\xa8\x02\x05\x02\x80\x02\x02\x02\x80\x02\x01\x82\xc0\x02\x02\x82\x80\x02\x02\x02\xb0\x02\x04\x02\xda\x02\x07\x83\xc3P\x02\x11\x83\xc3P\x02\x11\x83\xc3P\x02\x11\x83\xc3P\x02\x11\x83\xc3P\x02\x11\x83\xc3P\x02\x11\x82\x80\x02\x02\x82\x80\x02\x01\x02\xd0\x02\x05\x02\x80\x02\x03\x02\x88\x02\x06\x02\x80\x02\x02\x02\x80\x02\x01\x82\x80\x02\x01\x00\x02\x88\x02\x05\x02\xf0\x02\x06\x83\xc3P\x02\x11\x83\xc3P\x02\x11\x83\xc3P\x02\x11\x83\xc3P\x02\x11\x83\xc3P\x02\x11\x83\xc3P\x02\x11\x00\x82\xc0\x02\x02\x82\xc0\x02\x03\x02\xe8\x02\x05\x02\xa8\x02\x07\x02\x96\x02\x07\x02\x8e\x02\x07\x02\xbc\x02\x07\x02\xa8\x02\x05\x02\xb0\x02\x04\x82\x80\x02\x03\x83\xc3P\x02\x11\x83\xc3P\x02\x11\x83\xc3P\x02\x11\x83\xc3P\x02\x11\x83\xc3P\x02\x11\x83\xc3P\x02\x11\x83\xc3P\x02\x11\x83\xc3P\x02\x11\x02\xc0\x02\x03\x02\xd0\x02\x05\x02\x80\x02\x07\x02\xc0\x02\x03\x02\xc4\x02\x07\x02\xc8\x02\x05\x02\xdc\x02\x06\x02\xc8\x02\x06\x02\xc0\x02\x02\x83\xc3P\x02\x11\x83\xc3P\x02\x11\x83\xc3P\x02\x11\x83\xc3P\x02\x11\x83\xc3P\x02\x11\x83\xc3P\x02\x11\x83\xc3P\x02\x11\x83\xc3P\x02\x11\x83\xc3P\x02\x11\x83\xc3P\x02\x11\x02\xa0\x02\x03\x02\xf0\x02\x05\x02\xe8\x02\x05\x02\xa8\x02\x05\x02\xe8\x02\x05\x02\xa8\x02\x05\x02\xe0\x02\x03\x83\xc3P\x02\x11\x83\xc3P\x02\x11\x83\xc3P\x02\x11\x83\xc3P\x02\x11\x83\xc3P\x02\x11\x83\xc3P\x02\x11\x83\xc3P\x02\x11\x83\xc3P\x02\x11\x83\xc3P\x02\x11\x83\xc3P\x02\x11\x00\x02\x98\x02\x05\x82\x80\x02\x01\x02\x80\x02\x02\x02\xb0\x02\x05\x02\x90\x02\x04\x02\xc0\x02\x04\x83\xc3P\x02\x11\x83\xc3P\x02\x11\x83\xc3P\x02\x11\x83\xc3P\x02\x11\x83\xc3P\x02\x11\x83\xc3P\x02\x11\x83\xc3P\x02\x11\x83\xc3P\x02\x11\x83\xc3P\x02\x11\x82\xc0\x02\x02\x83\xc3P\x02\x11\x02\x80\x02\x02\x82\x80\x02\x01\x02\xb8\x02\x05\x02\xc8\x02\x05\x02\xc8\x02\x05\x02\x84\x02\x06\x83\xc3P\x02\x11\x83\xc3P\x02\x11\x83\xc3P\x02\x11\x83\xc3P\x02\x11\x83\xc3P\x02\x11\x83\xc3P\x02\x11\x83\xc3P\x02\x11\x83\xc3P\x02\x11\x83\xc3P\x02\x11\x83\xc3P\x02\x11\x83\xc3P\x02\x11\x83\xc3P\x02\x11\x82\x80\x02\x02\x02\xc0\x02\x02\x02\xe0\x02\x05\x02\x88\x02\x06\x02\xd0\x02\x06\x83\xc3P\x02\x11\x83\xc3P\x02\x11\x83\xc3P\x02\x11\x83\xc3P\x02\x11\x83\xc3P\x02\x11\x83\xc3P\x02\x11\x83\xc3P\x02\x11\x83\xc3P\x02\x11\x83\xc3P\x02\x11\x83\xc3P\x02\x11\x83\xc3P\x02\x11\x83\xc3P\x02\x11\x83\xc3P\x02\x11\x83\xc3P\x02\x11\x02\xa0\x02\x03\x02\xc0\x02\x02\x02\x9e\x02\x07\x83\xc3P\x02\x11\x83\xc3P\x02\x11\x83\xc3P\x02\x11\x83\xc3P\x02\x11\x83\xc3P\x02\x11\x83\xc3P\x02\x11\x83\xc3P\x02\x11\x83\xc3P\x02\x11\x83\xc3P\x02\x11\x83\xc3P\x02\x11\x83\xc3P\x02\x11\x83\xc3P\x02\x11\x83\xc3P\x02\x11\x83\xc3P\x02\x11\x02\xe0\x02\x03\x83\xc3P\x02\x11\x02\xa0\x02\x04\x83\xc3P\x02\x11\x83\xc3P\x02\x11\x83\xc3P\x02\x11\x83\xc3P\x02\x11\x83\xc3P\x02\x11\x83\xc3P\x02\x11\x83\xc3P\x02\x11\x83\xc3P\x02\x11\x83\xc3P\x02\x11\x83\xc3P\x02\x11\x83\xc3P\x02\x11\x83\xc3P\x02\x11\x83\xc3P\x02\x11\x83\xc3P\x02\x11\x83\xc3P\x02\x11\x83\xc3P\x02\x11\x82\x80\x02\x02\x83\xc3P\x02\x11\x83\xc3P\x02\x11\x83\xc3P\x02\x11\x83\xc3P\x02\x11\x83\xc3P\x02\x11\x83\xc3P\x02\x11\x83\xc3P\x02\x11\x83\xc3P\x02\x11\x83\xc3P\x02\x11\x83\xc3P\x02\x11\x83\xc3P\x02\x11\x83\xc3P\x02\x11\x83\xc3P\x02\x11\x83\xc3P\x02\x11\x83\xc3P\x02\x11\x83\xc3P\x02\x11\x83\xc3P\x02\x11\x83\xc3P\x02\x11\x83\xc3P\x02\x11\x83\xc3P\x02\x11\x83\xc3P\x02\x11\x83\xc3P\x02\x11\x83\xc3P\x02\x11\x83\xc3P\x02\x11\x83\xc3P\x02\x11\x83\xc3P\x02\x11\x83\xc3P\x02\x11\x83\xc3P\x02\x11\x83\xc3P\x02\x11\x83\xc3P\x02\x11\x83\xc3P\x02\x11\x83\xc3P\x02\x11\x83\xc3P\x02\x11\x83\xc3P\x02\x11\x02\x03\x02\x01\x02\x11\x02\x11\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x02\x02\x13\x02\x13\x02\x02\x02\x02\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x03\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x02\x02\x02\x02\x02\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x02\x02\x02\x02\x02\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x03\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x13\x02\x05\x02\x05\x02\x05\x02\x05\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x05\x02\x05\x02\x05\x02\x05\x02\x05\x02\x05\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x04\x02\x04\x02\x04\x02\x05\x02\x05\x02\x05\x02\x05\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x04\x02\x04\x02\x04\x02\x04\x02\x04\x02\x05\x02\x05\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x04\x02\x04\x02\x04\x02\x04\x02\x04\x02\x05\x02\x05\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x04\x02\x04\x02\x04\x02\x04\x02\x05\x02\x04\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x04\x02\x04\x02\x04\x02\x04\x02\x02\x02\x02\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x13\x02\x13\x02\x13\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x13\x02\x02\x02\x13\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x13\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x03\x02\x01\x02\x11\x02\x11\x04\xbc\xf2\xfc\x02\x05\x04\xbc\xf2\xfc\x02\x05\x04\xbc\xf2\xfc\x02\x05\x04\xbc\xf2\xfc\x02\x05\x04\xbc\xf2\xfc\x02\x05\x04\xbc\xf2\xfc\x02\x05\x04\xbc\xf2\xfc\x02\x05\x04\xbc\xf2\xfc\x02\x05\x04\xbc\xf2\xfc\x02\x05\x04\xbc\xf2\xfc\x02\x05\x04\xbc\xf2\xfc\x02\x05\x04\xbc\xf2\xfc\x02\x05\x04\xbc\xf2\xfc\x02\x05\x04\xbc\xf2\xfc\x02\x05\x04\xbc\xf2\xfc\x02\x05\x04\xbc\xf2\xfc\x02\x05\x04\xbc\xf2\xfc\x02\x05\x04\xbc\xf2\xfc\x02\x05\x04\xbc\xf2\xfc\x02\x05\x04\xbc\xf2\xfc\x02\x05\x04\xbc\xf2\xfc\x02\x05\x04\xbc\xf2\xfc\x02\x05\x04\xbc\xf2\xfc\x02\x05\x04\xbc\xf2\xfc\x02\x05\x04\xbc\xf2\xfc\x02\x05\x04\xbc\xf2\xfc\x02\x05\x04\xbc\xf2\xfc\x02\x05\x04\xbc\xf2\xfc\x02\x05\x04\xbc\xf2\xfc\x02\x05\x04\xbc\xf2\xfc\x02\x05\x04\xbc\xf2\xfc\x02\x05\x04\xbc\xf2\xfc\x02\x05\x04\xbc\xf2\xfc\x02\x05\x04\xbc\xf2\xfc\x02\x05\x04\xbc\xf2\xfc\x02\x05\x04\xbc\xf2\xfc\x02\x05\x04\xbc\xf2\xfc\x02\x05\x04\xbc\xf2\xfc\x02\x05\x04\xbc\xf2\xfc\x02\x05\x04\xbc\xf2\xfc\x02\x05\x04\xbc\xf2\xfc\x02\x05\x04\xbc\xf2\xfc\x02\x05\x04\xbc\xf2\xfc\x02\x05\x04\xbc\xf2\xfc\x02\x05\x04\xbc\xf2\xfc\x02\x05\x04\xbc\xf2\xfc\x02\x05\x04\xbc\xf2\xfc\x02\x05\x04\xbc\xf2\xfc\x02\x05\x04\xbc\xf2\xfc\x02\x05\x04\xbc\xf2\xfc\x02\x05\x04\xbc\xf2\xfc\x02\x05\x04\xbc\xf2\xfc\x02\x05\x04\xbc\xf2\xfc\x02\x05\x04\xbc\xf2\xfc\x02\x05\x04\xbc\xf2\xfc\x02\x05\x04\xbc\xf2\xfc\x02\x05\x04\xbc\xf2\xfc\x02\x05\x04\xbc\xf2\xfc\x02\x05\x04\xbc\xf2\xfc\x02\x05\x04\xbc\xf2\xfc\x02\x05\x04\xbc\xf2\xfc\x02\x05\x04\xbc\xf2\xfc\x02\x05\x04\xbc\xf2\xfc\x02\x05\x04\xbc\xf2\xfc\x02\x05\x04\xbc\xf2\xfc\x02\x05\x04\xbc\xf2\xfc\x02\x05\x04\xbc\xf2\xfc\x02\x05\x04\xbc\xf2\xfc\x02\x05\x04\xbc\xf2\xfc\x02\x05\x04\xbc\xf2\xfc\x02\x05\x04\xbc\xf2\xfc\x02\x05\x04\xbc\xf2\xfc\x02\x05\x04\xbc\xf2\xfc\x02\x05\x04\xbc\xf2\xfc\x02\x05\x04\xbc\xf2\xfc\x02\x05\x04\xbc\xf2\xfc\x02\x05\x04\xbc\xf2\xfc\x02\x05\x04\xbc\xf2\xfc\x02\x05\x04\xbc\xf2\xfc\x02\x05\x04\xbc\xf2\xfc\x02\x05\x04\xbc\xf2\xfc\x02\x05\x04\xbc\xf2\xfc\x02\x05\x04\xbc\xf2\xfc\x02\x05\x04\xbc\xf2\xfc\x02\x05\x04\xbc\xf2\xfc\x02\x05\x04\xbc\xf2\xfc\x02\x05\x04\xbc\xf2\xfc\x02\x05\x04\xbc\xf2\xfc\x02\x05\x04\xbc\xf2\xfc\x02\x05\x04\xbc\xf2\xfc\x02\x05\x04\xbc\xf2\xfc\x02\x05\x04\xbc\xf2\xfc\x02\x05\x04\xbc\xf2\xfc\x02\x05\x04\xbc\xf2\xfc\x02\x05\x04\xbc\xf2\xfc\x02\x05\x04\xbc\xf2\xfc\x02\x05\x04\xbc\xf2\xfc\x02\x05\x04\xbc\xf2\xfc\x02\x05\x04\xbc\xf2\xfc\x02\x05\x04\xbc\xf2\xfc\x02\x05\x04\xbc\xf2\xfc\x02\x05\x04\xbc\xf2\xfc\x02\x05\x04\xbc\xf2\xfc\x02\x05\x04\xbc\xf2\xfc\x02\x05\x04\xbc\xf2\xfc\x02\x05\x04\xbc\xf2\xfc\x02\x05\x04\xbc\xf2\xfc\x02\x05\x04\xbc\xf2\xfc\x02\x05\x04\xbc\xf2\xfc\x02\x05\x04\xbc\xf2\xfc\x02\x05\x04\xbc\xf2\xfc\x02\x05\x04\xbc\xf2\xfc\x02\x05\x04\xbc\xf2\xfc\x02\x05\x04\xbc\xf2\xfc\x02\x05\x04\xbc\xf2\xfc\x02\x05\x04\xbc\xf2\xfc\x02\x05\x04\xbc\xf2\xfc\x02\x05\x04\xbc\xf2\xfc\x02\x05\x04\xbc\xf2\xfc\x02\x05\x04\xbc\xf2\xfc\x02\x05\x04\xbc\xf2\xfc\x02\x05\x04\xbc\xf2\xfc\x02\x05\x04\xbc\xf2\xfc\x02\x05\x04\xbc\xf2\xfc\x02\x05\x04\xbc\xf2\xfc\x02\x05\x04\xbc\xf2\xfc\x02\x05\x04\xbc\xf2\xfc\x02\x05\x04\xbc\xf2\xfc\x02\x05\x04\xbc\xf2\xfc\x02\x05\x04\xbc\xf2\xfc\x02\x05\x04\xbc\xf2\xfc\x02\x05\x04\xbc\xf2\xfc\x02\x05\x04\xbc\xf2\xfc\x02\x05\x04\xbc\xf2\xfc\x02\x05\x04\xbc\xf2\xfc\x02\x05\x04\xbc\xf2\xfc\x02\x05\x04\xbc\xf2\xfc\x02\x05\x04\xbc\xf2\xfc\x02\x05\x04\xbc\xf2\xfc\x02\x05\x04\xbc\xf2\xfc\x02\x05\x04\xbc\xf2\xfc\x02\x05\x04\xbc\xf2\xfc\x02\x05\x04\xbc\xf2\xfc\x02\x05\x04\xbc\xf2\xfc\x02\x05\x04\xbc\xf2\xfc\x02\x05\x04\xbc\xf2\xfc\x02\x05\x04\xbc\xf2\xfc\x02\x05\x04\xbc\xf2\xfc\x02\x05\x04\xbc\xf2\xfc\x02\x05\x04\xbc\xf2\xfc\x02\x05\x04\xbc\xf2\xfc\x02\x05\x04\xbc\xf2\xfc\x02\x05\x04\xbc\xf2\xfc\x02\x05\x04\xbc\xf2\xfc\x02\x05\x04\xbc\xf2\xfc\x02\x05\x04\xbc\xf2\xfc\x02\x05\x04\xbc\xf2\xfc\x02\x05\x04\xbc\xf2\xfc\x02\x05\x04\xbc\xf2\xfc\x02\x05\x04\xbc\xf2\xfc\x02\x05\x04\xbc\xf2\xfc\x02\x05\x04\xbc\xf2\xfc\x02\x05\x04\xbc\xf2\xfc\x02\x05\x04\xbc\xf2\xfc\x02\x05\x04\xbc\xf2\xfc\x02\x05\x04\xbc\xf2\xfc\x02\x05\x04\xbc\xf2\xfc\x02\x05\x04\xbc\xf2\xfc\x02\x05\x04\xbc\xf2\xfc\x02\x05\x04\xbc\xf2\xfc\x02\x05\x04\xbc\xf2\xfc\x02\x05\x04\xbc\xf2\xfc\x02\x05\x04\xbc\xf2\xfc\x02\x05\x04\xbc\xf2\xfc\x02\x05\x04\xbc\xf2\xfc\x02\x05\x04\xbc\xf2\xfc\x02\x05\x04\xbc\xf2\xfc\x02\x05\x04\xbc\xf2\xfc\x02\x05\x04\xbc\xf2\xfc\x02\x05\x04\xbc\xf2\xfc\x02\x05\x04\xbc\xf2\xfc\x02\x05\x04\xbc\xf2\xfc\x02\x05\x04\xbc\xf2\xfc\x02\x05\x04\xbc\xf2\xfc\x02\x05\x04\xbc\xf2\xfc\x02\x05\x04\xbc\xf2\xfc\x02\x05\x04\xbc\xf2\xfc\x02\x05\x04\xbc\xf2\xfc\x02\x05\x04\xbc\xf2\xfc\x02\x05\x04\xbc\xf2\xfc\x02\x05\x04\xbc\xf2\xfc\x02\x05\x04\xbc\xf2\xfc\x02\x05\x04\xbc\xf2\xfc\x02\x05\x04\xbc\xf2\xfc\x02\x05\x04\xbc\xf2\xfc\x02\x05\x04\xbc\xf2\xfc\x02\x05\x04\xbc\xf2\xfc\x02\x05\x04\xbc\xf2\xfc\x02\x05\x04\xbc\xf2\xfc\x02\x05\x04\xbc\xf2\xfc\x02\x05\x04\xbc\xf2\xfc\x02\x05\x04\xbc\xf2\xfc\x02\x05\x04\xbc\xf2\xfc\x02\x05\x04\xbc\xf2\xfc\x02\x05\x04\xbc\xf2\xfc\x02\x05\x04\xbc\xf2\xfc\x02\x05\x04\xbc\xf2\xfc\x02\x05\x04\xbc\xf2\xfc\x02\x05\x04\xbc\xf2\xfc\x02\x05\x04\xbc\xf2\xfc\x02\x05\x04\xbc\xf2\xfc\x02\x05\x04\xbc\xf2\xfc\x02\x05\x04\xbc\xf2\xfc\x02\x05\x04\xbc\xf2\xfc\x02\x05\x04\xbc\xf2\xfc\x02\x05\x04\xbc\xf2\xfc\x02\x05\x04\xbc\xf2\xfc\x02\x05\x04\xbc\xf2\xfc\x02\x05\x04\xbc\xf2\xfc\x02\x05\x04\xbc\xf2\xfc\x02\x05\x04\xbc\xf2\xfc\x02\x05\x04\xbc\xf2\xfc\x02\x05\x04\xbc\xf2\xfc\x02\x05\x04\xbc\xf2\xfc\x02\x05\x04\xbc\xf2\xfc\x02\x05\x04\xbc\xf2\xfc\x02\x05\x04\xbc\xf2\xfc\x02\x05\x04\xbc\xf2\xfc\x02\x05\x04\xbc\xf2\xfc\x02\x05\x04\xbc\xf2\xfc\x02\x05\x04\xbc\xf2\xfc\x02\x05\x04\xbc\xf2\xfc\x02\x05\x04\xbc\xf2\xfc\x02\x05\x04\xbc\xf2\xfc\x02\x05\x04\xbc\xf2\xfc\x02\x05\x04\xbc\xf2\xfc\x02\x05\x04\xbc\xf2\xfc\x02\x05\x04\xbc\xf2\xfc\x02\x05\x04\xbc\xf2\xfc\x02\x05\x04\xbc\xf2\xfc\x02\x05\x04\xbc\xf2\xfc\x02\x05\x04\xbc\xf2\xfc\x02\x05\x04\xbc\xf2\xfc\x02\x05\x04\xbc\xf2\xfc\x02\x05\x04\xbc\xf2\xfc\x02\x05\x04\xbc\xf2\xfc\x02\x05\x04\xbc\xf2\xfc\x02\x05\x04\xbc\xf2\xfc\x02\x05\x04\xbc\xf2\xfc\x02\x05\x04\xbc\xf2\xfc\x02\x05\x04\xbc\xf2\xfc\x02\x05\x04\xbc\xf2\xfc\x02\x05\x04\xbc\xf2\xfc\x02\x05\x04\xbc\xf2\xfc\x02\x05\x04\xbc\xf2\xfc\x02\x05\x04\xbc\xf2\xfc\x02\x05\x04\xbc\xf2\xfc\x02\x05\x04\xbc\xf2\xfc\x02\x05\x04\xbc\xf2\xfc\x02\x05\x04\xbc\xf2\xfc\x02\x05\x04\xbc\xf2\xfc\x02\x05\x04\xbc\xf2\xfc\x02\x05\x04\xbc\xf2\xfc\x02\x05\x04\xbc\xf2\xfc\x02\x05\x04\xbc\xf2\xfc\x02\x05\x04\xbc\xf2\xfc\x02\x05\x04\xbc\xf2\xfc\x02\x05\x04\xbc\xf2\xfc\x02\x05\x04\xbc\xf2\xfc\x02\x05\x04\xbc\xf2\xfc\x02\x05\x04\xbc\xf2\xfc\x02\x05\x04\xbc\xf2\xfc\x02\x05\x04\xbc\xf2\xfc\x02\x05\x04\xbc\xf2\xfc\x02\x05\x04\xbc\xf2\xfc\x02\x05\x04\xbc\xf2\xfc\x02\x05\x04\xbc\xf2\xfc\x02\x05\x04\xbc\xf2\xfc\x02\x05\x04\xbc\xf2\xfc\x02\x05\x04\xbc\xf2\xfc\x02\x05\x04\xbc\xf2\xfc\x02\x05\x04\xbc\xf2\xfc\x02\x05\x04\xbc\xf2\xfc\x02\x05\x04\xbc\xf2\xfc\x02\x05\x04\xbc\xf2\xfc\x02\x05\x04\xbc\xf2\xfc\x02\x05\x04\xbc\xf2\xfc\x02\x05\x04\xbc\xf2\xfc\x02\x05\x04\xbc\xf2\xfc\x02\x05\x00\x03\x1f\x02\x03-\x02\x03\r\x06\x03\x1d\x06\x00\x02\x01\x02\x03\x02\x01\x02\x10\x02\x0e\x02\xc0\x02\x05\x02\xa0\x02\x07\x02\xb8\x02\x05\x02\xaa\x02\x07\x02\x9c\x02\x06\x02\xf8\x02\x06\x02\xd0\x02\x05\x02\x90\x02\x06\x02\x90\x02\x05\x02\xf0\x02\x05\x02\xc0\x02\x02\x02\xdc\x02\x06\x02\xd0\x02\x06\x02\xe8\x02\x06\x02\x80\x02\x05\x02\x90\x02\x06\x02\x9c\x02\x07\x02\x82\x02\x07\x02\x90\x02\x06\x02\xcc\x02\x06\x02\xd0\x02\x06\x02\x8a\x02\x07\x02\xe0\x02\x05\x02\x98\x02\x06\x02\xa0\x02\x06\x02\xd0\x02\x04\x82\xa0\x02\x03\x02\xc8\x02\x05\x02\xe0\x02\x05\x02\xc0\x02\x02\x02\xa8\x02\x05\x02\xb4\x02\x06\x02\xd8\x02\x07\x02\x86\x02\x07\x02\xb2\x02\x07\x02\x9c\x02\x06\x02\xf8\x02\x05\x02\xe4\x02\x06\x02\xd8\x02\x06\x02\x8a\x02\x07\x02\xf8\x02\x05\x02\x94\x02\x06\x02\x90\x02\x07\x02\xc8\x02\x05\x02\xf0\x02\x05\x02\xd0\x02\x04\x02\x8c\x02\x07\x02\x80\x02\x07\x02\x80\x02\x03\x02\xd8\x02\x05\x02\xe8\x02\x05\x02\x90\x02\x07\x02\x8c\x02\x07\x02\xe0\x02\x03\x02\x9c\x02\x06\x02\xdc\x02\x06\x02\x94\x02\x06\x02\x90\x02\x04\x02\x98\x02\x06\x02\xb8\x02\x05\x02\xf8\x02\x06\x02\xbc\x02\x06\x02\x80\x02\x04\x02\xc0\x02\x06\x02\xe4\x02\x06\x02\x90\x02\x06\x02\x80\x02\x05\x02\xec\x02\x06\x02\x8a\x02\x07\x02\x94\x02\x07\x02\x80\x02\x05\x02\xe0\x02\x04\x02\xb2\x02\x07\x02\x80\x02\x02\x82\x80\x02\x02\x02\xd0\x02\x04\x02\x80\x02\x04\x02\xd0\x02\x05\x02\xf0\x02\x05\x02\x80\x02\x04\x02\xb2\x02\x07\x02\xb0\x02\x06\x02\xf0\x02\x04\x02\x80\x02\x05\x02\x80\x02\x02\x02\x80\x02\x02\x02\xde\x02\x07\x02\xbc\x02\x06\x02\x8e\x02\x07\x02\xe0\x02\x06\x02\xc0\x02\x04\x02\xf0\x02\x04\x02\xe8\x02\x05\x02\xa0\x02\x03\x02\x8a\x02\x07\x02\xc0\x02\x05\x02\xec\x02\x06\x02\x9c\x02\x06\x02\xd0\x02\x05\x02\xb4\x02\x07\x02\x8e\x02\x07\x02\xca\x02\x07\x02\x86\x02\x07\x02\x80\x02\x02\x02\xa4\x02\x06\x02\x80\x02\x02\x02\xe8\x02\x05\x02\xa6\x02\x07\x02\x80\x02\x06\x02\xd8\x02\x06\x02\x9c\x02\x07\x02\x88\x02\x07\x02\xb8\x02\x05\x02\xf4\x02\x06\x02\xa4\x02\x06\x02\xcc\x02\x06\x02\xd0\x02\x04\x02\x88\x02\x07\x02\xbc\x02\x06\x02\xa0\x02\x06\x02\x84\x02\x06\x02\xcc\x02\x06\x02\xc0\x02\x06\x02\xc6\x02\x07\x02\xd4\x02\x06\x02\xec\x02\x06\x02\xa8\x02\x07\x02\x8a\x02\x08\x02\xf0\x02\x07\x02\x98\x02\x06\x02\x80\x02\x08\x02\xf8\x02\x07\x02\x8b\x02\x08\x02\x94\x02\x06\x02\xae\x02\x07\x02\xa8\x02\x05\x02\xbe\x02\x07\x02\x8a\x02\x08\x02\x91\x02\x08\x02\xfc\x02\x06\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\xa0\x02\x07\x02\xbb\x02\x08\x02\xfe\x02\x07\x02\x81\x02\x08\x02\xa0\x02\x06\x02\x8c\x02\x07\x02\xf0\x02\x06\x02\xfc\x02\x06\x02\xce\x02\x07\x02\x84\x02\x07\x02\xb0\x02\x08\x02\x8a\x02\x08\x02\x8c\x02\x07\x02\x96\x02\x07\x02\x90\x02\x06\x02\xa6\x02\x07\x02\x80\x02\x06\x02\xd0\x02\x06\x82\x80\x02\x01\x02\xd8\x02\x06\x82\x80\x02\x03\x02\xc8\x02\x05\x02\xae\x02\x07\x02\xe0\x02\x06\x02\x82\x02\x07\x02\xfc\x02\x06\x02\xf8\x02\x06\x02\xb4\x02\x06\x02\x98\x02\x06\x02\xb0\x02\x06\x02\xd6\x02\x07\x02\x80\x02\x06\x02\xb8\x02\x05\x02\xde\x02\x07\x02\xae\x02\x07\x02\x90\x02\x06\x02\xb0\x02\x04\x02\xa0\x02\x05\x02\x9c\x02\x06\x02\x94\x02\x06\x02\x80\x02\x05\x02\xa6\x02\x07\x02\x03\x02\x01\x02\x10\x02\x0e\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x05\x02\x05\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x05\x02\x05\x02\x05\x02\x05\x02\x05\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x05\x02\x05\x02\x05\x02\x05\x02\x05\x02\x05\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x05\x02\x05\x02\x05\x02\x05\x02\x05\x02\x05\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x05\x02\x05\x02\x05\x02\x05\x02\x05\x02\x05\x02\x13\x02\x13\x02\x13\x02\x13\x02\x02\x02\x02\x02\x02\x02\x02\x02\x04\x02\x04\x02\x04\x02\x04\x02\x04\x02\x04\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x04\x02\x04\x02\x04\x02\x04\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x03\x02\x01\x02\x10\x02\x0e\x04\xb5Eu\x02\x06\x04\xb5Eu\x02\x06\x04\xb5Eu\x02\x06\x04\xb5Eu\x02\x06\x04\xb5Eu\x02\x06\x04\xb5Eu\x02\x06\x04\xb5Eu\x02\x06\x04\xb5Eu\x02\x06\x04\xb5Eu\x02\x06\x04\xb5Eu\x02\x06\x04\xb5Eu\x02\x06\x04\xb5Eu\x02\x06\x04\xb5Eu\x02\x06\x04\xb5Eu\x02\x06\x04\xb5Eu\x02\x06\x04\xb5Eu\x02\x06\x04\xb5Eu\x02\x06\x04\xb5Eu\x02\x06\x04\xb5Eu\x02\x06\x04\xb5Eu\x02\x06\x04\xb5Eu\x02\x06\x04\xb5Eu\x02\x06\x04\xb5Eu\x02\x06\x04\xb5Eu\x02\x06\x04\xb5Eu\x02\x06\x04\xb5Eu\x02\x06\x04\xb5Eu\x02\x06\x04\xb5Eu\x02\x06\x04\xb5Eu\x02\x06\x04\xb5Eu\x02\x06\x04\xb5Eu\x02\x06\x04\xb5Eu\x02\x06\x04\xb5Eu\x02\x06\x04\xb5Eu\x02\x06\x04\xb5Eu\x02\x06\x04\xb5Eu\x02\x06\x04\xb5Eu\x02\x06\x04\xb5Eu\x02\x06\x04\xb5Eu\x02\x06\x04\xb5Eu\x02\x06\x04\xb5Eu\x02\x06\x04\xb5Eu\x02\x06\x04\xb5Eu\x02\x06\x04\xb5Eu\x02\x06\x04\xb5Eu\x02\x06\x04\xb5Eu\x02\x06\x04\xb5Eu\x02\x06\x04\xb5Eu\x02\x06\x04\xb5Eu\x02\x06\x04\xb5Eu\x02\x06\x04\xb5Eu\x02\x06\x04\xb5Eu\x02\x06\x04\xb5Eu\x02\x06\x04\xb5Eu\x02\x06\x04\xb5Eu\x02\x06\x04\xb5Eu\x02\x06\x04\xb5Eu\x02\x06\x04\xb5Eu\x02\x06\x04\xb5Eu\x02\x06\x04\xb5Eu\x02\x06\x04\xb5Eu\x02\x06\x04\xb5Eu\x02\x06\x04\xb5Eu\x02\x06\x04\xb5Eu\x02\x06\x04\xb5Eu\x02\x06\x04\xb5Eu\x02\x06\x04\xb5Eu\x02\x06\x04\xb5Eu\x02\x06\x04\xb5Eu\x02\x06\x04\xb5Eu\x02\x06\x04\xb5Eu\x02\x06\x04\xb5Eu\x02\x06\x04\xb5Eu\x02\x06\x04\xb5Eu\x02\x06\x04\xb5Eu\x02\x06\x04\xb5Eu\x02\x06\x04\xb5Eu\x02\x06\x04\xb5Eu\x02\x06\x04\xb5Eu\x02\x06\x04\xb5Eu\x02\x06\x04\xb5Eu\x02\x06\x04\xb5Eu\x02\x06\x04\xb5Eu\x02\x06\x04\xb5Eu\x02\x06\x04\xb5Eu\x02\x06\x04\xb5Eu\x02\x06\x04\xb5Eu\x02\x06\x04\xb5Eu\x02\x06\x04\xb5Eu\x02\x06\x04\xb5Eu\x02\x06\x04\xb5Eu\x02\x06\x04\xb5Eu\x02\x06\x04\xb5Eu\x02\x06\x04\xb5Eu\x02\x06\x04\xb5Eu\x02\x06\x04\xb5Eu\x02\x06\x04\xb5Eu\x02\x06\x04\xb5Eu\x02\x06\x04\xb5Eu\x02\x06\x04\xb5Eu\x02\x06\x04\xb5Eu\x02\x06\x04\xb5Eu\x02\x06\x04\xb5Eu\x02\x06\x04\xb5Eu\x02\x06\x04\xb5Eu\x02\x06\x04\xb5Eu\x02\x06\x04\xb5Eu\x02\x06\x04\xb5Eu\x02\x06\x04\xb5Eu\x02\x06\x04\xb5Eu\x02\x06\x04\xb5Eu\x02\x06\x04\xb5Eu\x02\x06\x04\xb5Eu\x02\x06\x04\xb5Eu\x02\x06\x04\xb5Eu\x02\x06\x04\xb5Eu\x02\x06\x04\xb5Eu\x02\x06\x04\xb5Eu\x02\x06\x04\xb5Eu\x02\x06\x04\xb5Eu\x02\x06\x04\xb5Eu\x02\x06\x04\xb5Eu\x02\x06\x04\xb5Eu\x02\x06\x04\xb5Eu\x02\x06\x04\xb5Eu\x02\x06\x04\xb5Eu\x02\x06\x04\xb5Eu\x02\x06\x04\xb5Eu\x02\x06\x04\xb5Eu\x02\x06\x04\xb5Eu\x02\x06\x04\xb5Eu\x02\x06\x04\xb5Eu\x02\x06\x04\xb5Eu\x02\x06\x04\xb5Eu\x02\x06\x04\xb5Eu\x02\x06\x04\xb5Eu\x02\x06\x04\xb5Eu\x02\x06\x04\xb5Eu\x02\x06\x04\xb5Eu\x02\x06\x04\xb5Eu\x02\x06\x04\xb5Eu\x02\x06\x04\xb5Eu\x02\x06\x04\xb5Eu\x02\x06\x04\xb5Eu\x02\x06\x04\xb5Eu\x02\x06\x04\xb5Eu\x02\x06\x04\xb5Eu\x02\x06\x04\xb5Eu\x02\x06\x04\xb5Eu\x02\x06\x04\xb5Eu\x02\x06\x04\xb5Eu\x02\x06\x04\xb5Eu\x02\x06\x04\xb5Eu\x02\x06\x04\xb5Eu\x02\x06\x04\xb5Eu\x02\x06\x04\xb5Eu\x02\x06\x04\xb5Eu\x02\x06\x04\xb5Eu\x02\x06\x04\xb5Eu\x02\x06\x04\xb5Eu\x02\x06\x04\xb5Eu\x02\x06\x04\xb5Eu\x02\x06\x04\xb5Eu\x02\x06\x04\xb5Eu\x02\x06\x04\xb5Eu\x02\x06\x04\xb5Eu\x02\x06\x04\xb5Eu\x02\x06\x04\xb5Eu\x02\x06\x04\xb5Eu\x02\x06\x04\xb5Eu\x02\x06\x04\xb5Eu\x02\x06\x04\xb5Eu\x02\x06\x04\xb5Eu\x02\x06\x04\xb5Eu\x02\x06\x04\xb5Eu\x02\x06\x04\xb5Eu\x02\x06\x04\xb5Eu\x02\x06\x04\xb5Eu\x02\x06\x04\xb5Eu\x02\x06\x04\xb5Eu\x02\x06\x04\xb5Eu\x02\x06\x04\xb5Eu\x02\x06\x04\xb5Eu\x02\x06\x04\xb5Eu\x02\x06\x04\xb5Eu\x02\x06\x04\xb5Eu\x02\x06\x04\xb5Eu\x02\x06\x04\xb5Eu\x02\x06\x04\xb5Eu\x02\x06\x04\xb5Eu\x02\x06\x04\xb5Eu\x02\x06\x04\xb5Eu\x02\x06\x04\xb5Eu\x02\x06\x04\xb5Eu\x02\x06\x04\xb5Eu\x02\x06\x04\xb5Eu\x02\x06\x04\xb5Eu\x02\x06\x04\xb5Eu\x02\x06\x04\xb5Eu\x02\x06\x04\xb5Eu\x02\x06\x04\xb5Eu\x02\x06\x04\xb5Eu\x02\x06\x04\xb5Eu\x02\x06\x04\xb5Eu\x02\x06\x04\xb5Eu\x02\x06\x04\xb5Eu\x02\x06\x04\xb5Eu\x02\x06\x04\xb5Eu\x02\x06\x04\xb5Eu\x02\x06\x04\xb5Eu\x02\x06\x04\xb5Eu\x02\x06\x04\xb5Eu\x02\x06\x04\xb5Eu\x02\x06\x04\xb5Eu\x02\x06\x04\xb5Eu\x02\x06\x04\xb5Eu\x02\x06\x04\xb5Eu\x02\x06\x04\xb5Eu\x02\x06\x04\xb5Eu\x02\x06\x04\xb5Eu\x02\x06\x04\xb5Eu\x02\x06\x04\xb5Eu\x02\x06\x04\xb5Eu\x02\x06\x04\xb5Eu\x02\x06\x00\x03\x90\x01\x03\xa0\x01\x03\xb6\x05\x03\xc6\x05\x00\x02\x01\x02\x03\x02\x01\x02\x10\x02\x10\x02\x94\x02\x07\x02\xb4\x02\x06\x83\xc3P\x02\x11\x02\xa0\x02\x06\x02\xac\x02\x06\x02\x84\x02\x07\x02\xa2\x02\x07\x02\x94\x02\x06\x02\x9c\x02\x07\x02\xe8\x02\x05\x02\xb8\x02\x05\x02\xd0\x02\x06\x02\x94\x02\x06\x02\xec\x02\x06\x02\xd0\x02\x04\x02\x98\x02\x07\x02\xac\x02\x06\x02\xac\x02\x07\x02\xec\x02\x06\x02\xb0\x02\x06\x02\xa6\x02\x07\x02\xa8\x02\x06\x02\xf0\x02\x04\x02\x80\x02\x06\x02\x80\x02\x03\x02\x94\x02\x06\x02\xdc\x02\x06\x02\xe4\x02\x06\x02\xc0\x02\x05\x02\xa4\x02\x06\x02\xc0\x02\x05\x02\x98\x02\x06\x02\xa0\x02\x04\x02\x9c\x02\x06\x02\x9e\x02\x07\x02\xd8\x02\x06\x02\x85\x02\x08\x02\xa4\x02\x06\x02\xf4\x02\x06\x02\x88\x02\x06\x02\xc0\x02\x03\x02\xe8\x02\x05\x02\xa2\x02\x07\x02\xe8\x02\x05\x02\x88\x02\x07\x02\xa0\x02\x07\x02\xc4\x02\x06\x02\xb8\x02\x06\x02\x94\x02\x06\x02\xc8\x02\x06\x02\xf8\x02\x05\x02\xe0\x02\x07\x02\x80\x02\x03\x02\xc0\x02\x03\x02\x90\x02\x05\x02\xb0\x02\x04\x82\x80\x02\x01\x02\x80\x02\x02\x02\x98\x02\x07\x02\xa0\x02\x07\x02\xec\x02\x06\x02\x80\x02\x04\x02\xd4\x02\x06\x02\xac\x02\x07\x02\xc0\x02\x02\x02\xf0\x02\x06\x02\xc0\x02\x05\x02\xe0\x02\x05\x02\xd8\x02\x05\x02\xe0\x02\x05\x02\xbc\x02\x06\x02\xb8\x02\x05\x02\xf0\x02\x06\x02\x84\x02\x06\x02\xc8\x02\x05\x02\x8e\x02\x07\x02\x8c\x02\x07\x02\x82\x02\x07\x02\xe8\x02\x05\x02\x88\x02\x06\x02\x94\x02\x06\x02\x98\x02\x06\x02\x80\x02\x06\x02\xe0\x02\x03\x02\x90\x02\x05\x02\xb0\x02\x05\x02\x8d\x02\x08\x02\xa4\x02\x06\x02\xc0\x02\x07\x02\x94\x02\x07\x02\xb0\x02\x04\x02\xa8\x02\x07\x02\xb0\x02\x04\x02\xac\x02\x06\x02\x98\x02\x05\x02\xdc\x02\x06\x02\x98\x02\x06\x02\xf0\x02\x06\x02\x98\x02\x06\x02\xcc\x02\x06\x02\xbc\x02\x06\x02\xc8\x02\x07\x02\xc0\x02\x07\x02\x9c\x02\x07\x02\xc0\x02\x02\x02\xc8\x02\x07\x02\x80\x02\x06\x02\xa0\x02\x06\x02\xf0\x02\x05\x02\x98\x02\x05\x82\x80\x02\x01\x02\xd0\x02\x06\x02\x86\x02\x07\x02\x90\x02\x05\x02\xae\x02\x07\x02\xa4\x02\x07\x02\xbc\x02\x07\x02\x94\x02\x07\x02\x82\x02\x07\x02\x80\x02\x01\x02\xfc\x02\x06\x02\xd0\x02\x04\x02\xe0\x02\x05\x02\x84\x02\x08\x02\xd0\x02\x04\x02\x86\x02\x07\x02\x80\x02\x03\x02\xe0\x02\x07\x02\xc0\x02\x07\x02\x80\x02\x02\x02\xf4\x02\x06\x02\xc0\x02\x02\x02\xc0\x02\x05\x02\x82\x02\x08\x02\xd0\x02\x04\x83\xc3P\x02\x11\x02\xd6\x02\x07\x02\x90\x02\x08\x02\xc8\x02\x06\x02\xb4\x02\x07\x02\xf0\x02\x05\x02\xd4\x02\x07\x02\xf8\x02\x05\x02\x80\x02\x03\x02\xe8\x02\x06\x02\xc0\x02\x02\x02\xa0\x02\x05\x02\xf0\x02\x06\x02\xe8\x02\x05\x00\x02\xf0\x02\x04\x02\xd4\x02\x06\x02\xaa\x02\x07\x02\xf8\x02\x05\x02\xc8\x02\x05\x02\x8d\x02\x08\x02\xa4\x02\x06\x02\xf8\x02\x05\x02\xd0\x02\x06\x02\x86\x02\x07\x02\xe0\x02\x04\x02\xb4\x02\x07\x02\x80\x02\x07\x02\x84\x02\x07\x02\xe8\x02\x05\x02\xe0\x02\x04\x02\xdc\x02\x06\x02\xb6\x02\t\x02\xe8\x02\x07\x02\x80\x02\x02\x02\xb4\x02\x06\x02\x9a\x02\x07\x83\xc3P\x02\x11\x02\xb0\x02\x04\x02\x80\x02\x06\x02\xe0\x02\x03\x02\x80\x02\x01\x02\xd0\x02\x05\x02\xb0\x02\x05\x02\xa8\x02\x06\x02\xba\x02\x07\x02\x84\x02\x06\x02\x9c\x02\x06\x02\xb5\x02\x08\x02\xd0\x02\x06\x02\x9c\x02\x06\x02\xa0\x02\x06\x02\xf0\x02\x05\x02\xe0\x02\x05\x02\x80\x02\x03\x02\xb0\x02\x04\x02\xf2\x02\x07\x02\x80\x02\x01\x02\x9c\x02\x06\x02\xe8\x02\x05\x02\x80\x02\x01\x02\x82\x02\x07\x02\xa8\x02\x06\x02\xfe\x02\x07\x02\xd0\x02\x05\x02\xc0\x02\x02\x02\xd0\x02\x04\x02\x90\x02\x05\x02\xd0\x02\x05\x02\xd0\x02\x06\x02\xd8\x02\x06\x02\x80\x02\x03\x02\xd0\x02\x05\x02\xb8\x02\x05\x02\xf0\x02\x05\x02\x8c\x02\x07\x02\xe8\x02\x05\x02\xe6\x02\x07\x02\xf0\x02\x07\x02\xb0\x02\x05\x02\x96\x02\x07\x02\xb0\x02\x04\x02\xc4\x02\x06\x02\xa4\x02\x06\x00\x02\x86\x02\x07\x02\xe0\x02\x06\x02\x94\x02\x06\x02\xe8\x02\x05\x02\x80\x02\x04\x02\xe0\x02\x05\x00\x02\xb8\x02\x05\x02\xc0\x02\x06\x00\x02\x9c\x02\x06\x02\x80\x02\x06\x02\xd6\x02\x07\x02\xa4\x02\x06\x02\xec\x02\x07\x02\x84\x02\x07\x02\x92\x02\x07\x02\xb0\x02\x04\x02\xc8\x02\x06\x02\xa0\x02\x04\x02\xe4\x02\x06\x02\x98\x02\x05\x02\xd4\x02\x06\x02\x80\x02\x06\x02\xe0\x02\x03\x02\x80\x02\x05\x02\xdc\x02\x07\x02\xd8\x02\x05\x02\x84\x02\x07\x02\xba\x02\x07\x02\xd0\x02\x07\x02\xc8\x02\x05\x02\xc0\x02\x06\x02\x86\x02\x07\x02\x90\x02\x04\x02\xc0\x02\x03\x02\x03\x02\x01\x02\x10\x02\x10\x02\x13\x02\x13\x02\x02\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x05\x02\x05\x02\x05\x02\x05\x02\x05\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x05\x02\x05\x02\x05\x02\x05\x02\x05\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x05\x02\x05\x02\x05\x02\x05\x02\x05\x02\x05\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x05\x02\x05\x02\x04\x02\x05\x02\x05\x02\x05\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x05\x02\x05\x02\x05\x02\x05\x02\x05\x02\x05\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x05\x02\x05\x02\x05\x02\x05\x02\x13\x02\x13\x02\x02\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x03\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x03\x02\x01\x02\x10\x02\x10\x04\xaf\xfb^\x02\x06\x04\xaf\xfb^\x02\x06\x04\xaf\xfb^\x02\x06\x04\xaf\xfb^\x02\x06\x04\xaf\xfb^\x02\x06\x04\xaf\xfb^\x02\x06\x04\xaf\xfb^\x02\x06\x04\xaf\xfb^\x02\x06\x04\xaf\xfb^\x02\x06\x04\xaf\xfb^\x02\x06\x04\xaf\xfb^\x02\x06\x04\xaf\xfb^\x02\x06\x04\xaf\xfb^\x02\x06\x04\xaf\xfb^\x02\x06\x04\xaf\xfb^\x02\x06\x04\xaf\xfb^\x02\x06\x04\xaf\xfb^\x02\x06\x04\xaf\xfb^\x02\x06\x04\xaf\xfb^\x02\x06\x04\xaf\xfb^\x02\x06\x04\xaf\xfb^\x02\x06\x04\xaf\xfb^\x02\x06\x04\xaf\xfb^\x02\x06\x04\xaf\xfb^\x02\x06\x04\xaf\xfb^\x02\x06\x04\xaf\xfb^\x02\x06\x04\xaf\xfb^\x02\x06\x04\xaf\xfb^\x02\x06\x04\xaf\xfb^\x02\x06\x04\xaf\xfb^\x02\x06\x04\xaf\xfb^\x02\x06\x04\xaf\xfb^\x02\x06\x04\xaf\xfb^\x02\x06\x04\xaf\xfb^\x02\x06\x04\xaf\xfb^\x02\x06\x04\xaf\xfb^\x02\x06\x04\xaf\xfb^\x02\x06\x04\xaf\xfb^\x02\x06\x04\xaf\xfb^\x02\x06\x04\xaf\xfb^\x02\x06\x04\xaf\xfb^\x02\x06\x04\xaf\xfb^\x02\x06\x04\xaf\xfb^\x02\x06\x04\xaf\xfb^\x02\x06\x04\xaf\xfb^\x02\x06\x04\xaf\xfb^\x02\x06\x04\xaf\xfb^\x02\x06\x04\xaf\xfb^\x02\x06\x04\xaf\xfb^\x02\x06\x04\xaf\xfb^\x02\x06\x04\xaf\xfb^\x02\x06\x04\xaf\xfb^\x02\x06\x04\xaf\xfb^\x02\x06\x04\xaf\xfb^\x02\x06\x04\xaf\xfb^\x02\x06\x04\xaf\xfb^\x02\x06\x04\xaf\xfb^\x02\x06\x04\xaf\xfb^\x02\x06\x04\xaf\xfb^\x02\x06\x04\xaf\xfb^\x02\x06\x04\xaf\xfb^\x02\x06\x04\xaf\xfb^\x02\x06\x04\xaf\xfb^\x02\x06\x04\xaf\xfb^\x02\x06\x04\xaf\xfb^\x02\x06\x04\xaf\xfb^\x02\x06\x04\xaf\xfb^\x02\x06\x04\xaf\xfb^\x02\x06\x04\xaf\xfb^\x02\x06\x04\xaf\xfb^\x02\x06\x04\xaf\xfb^\x02\x06\x04\xaf\xfb^\x02\x06\x04\xaf\xfb^\x02\x06\x04\xaf\xfb^\x02\x06\x04\xaf\xfb^\x02\x06\x04\xaf\xfb^\x02\x06\x04\xaf\xfb^\x02\x06\x04\xaf\xfb^\x02\x06\x04\xaf\xfb^\x02\x06\x04\xaf\xfb^\x02\x06\x04\xaf\xfb^\x02\x06\x04\xaf\xfb^\x02\x06\x04\xaf\xfb^\x02\x06\x04\xaf\xfb^\x02\x06\x04\xaf\xfb^\x02\x06\x04\xaf\xfb^\x02\x06\x04\xaf\xfb^\x02\x06\x04\xaf\xfb^\x02\x06\x04\xaf\xfb^\x02\x06\x04\xaf\xfb^\x02\x06\x04\xaf\xfb^\x02\x06\x04\xaf\xfb^\x02\x06\x04\xaf\xfb^\x02\x06\x04\xaf\xfb^\x02\x06\x04\xaf\xfb^\x02\x06\x04\xaf\xfb^\x02\x06\x04\xaf\xfb^\x02\x06\x04\xaf\xfb^\x02\x06\x04\xaf\xfb^\x02\x06\x04\xaf\xfb^\x02\x06\x04\xaf\xfb^\x02\x06\x04\xaf\xfb^\x02\x06\x04\xaf\xfb^\x02\x06\x04\xaf\xfb^\x02\x06\x04\xaf\xfb^\x02\x06\x04\xaf\xfb^\x02\x06\x04\xaf\xfb^\x02\x06\x04\xaf\xfb^\x02\x06\x04\xaf\xfb^\x02\x06\x04\xaf\xfb^\x02\x06\x04\xaf\xfb^\x02\x06\x04\xaf\xfb^\x02\x06\x04\xaf\xfb^\x02\x06\x04\xaf\xfb^\x02\x06\x04\xaf\xfb^\x02\x06\x04\xaf\xfb^\x02\x06\x04\xaf\xfb^\x02\x06\x04\xaf\xfb^\x02\x06\x04\xaf\xfb^\x02\x06\x04\xaf\xfb^\x02\x06\x04\xaf\xfb^\x02\x06\x04\xaf\xfb^\x02\x06\x04\xaf\xfb^\x02\x06\x04\xaf\xfb^\x02\x06\x04\xaf\xfb^\x02\x06\x04\xaf\xfb^\x02\x06\x04\xaf\xfb^\x02\x06\x04\xaf\xfb^\x02\x06\x04\xaf\xfb^\x02\x06\x04\xaf\xfb^\x02\x06\x04\xaf\xfb^\x02\x06\x04\xaf\xfb^\x02\x06\x04\xaf\xfb^\x02\x06\x04\xaf\xfb^\x02\x06\x04\xaf\xfb^\x02\x06\x04\xaf\xfb^\x02\x06\x04\xaf\xfb^\x02\x06\x04\xaf\xfb^\x02\x06\x04\xaf\xfb^\x02\x06\x04\xaf\xfb^\x02\x06\x04\xaf\xfb^\x02\x06\x04\xaf\xfb^\x02\x06\x04\xaf\xfb^\x02\x06\x04\xaf\xfb^\x02\x06\x04\xaf\xfb^\x02\x06\x04\xaf\xfb^\x02\x06\x04\xaf\xfb^\x02\x06\x04\xaf\xfb^\x02\x06\x04\xaf\xfb^\x02\x06\x04\xaf\xfb^\x02\x06\x04\xaf\xfb^\x02\x06\x04\xaf\xfb^\x02\x06\x04\xaf\xfb^\x02\x06\x04\xaf\xfb^\x02\x06\x04\xaf\xfb^\x02\x06\x04\xaf\xfb^\x02\x06\x04\xaf\xfb^\x02\x06\x04\xaf\xfb^\x02\x06\x04\xaf\xfb^\x02\x06\x04\xaf\xfb^\x02\x06\x04\xaf\xfb^\x02\x06\x04\xaf\xfb^\x02\x06\x04\xaf\xfb^\x02\x06\x04\xaf\xfb^\x02\x06\x04\xaf\xfb^\x02\x06\x04\xaf\xfb^\x02\x06\x04\xaf\xfb^\x02\x06\x04\xaf\xfb^\x02\x06\x04\xaf\xfb^\x02\x06\x04\xaf\xfb^\x02\x06\x04\xaf\xfb^\x02\x06\x04\xaf\xfb^\x02\x06\x04\xaf\xfb^\x02\x06\x04\xaf\xfb^\x02\x06\x04\xaf\xfb^\x02\x06\x04\xaf\xfb^\x02\x06\x04\xaf\xfb^\x02\x06\x04\xaf\xfb^\x02\x06\x04\xaf\xfb^\x02\x06\x04\xaf\xfb^\x02\x06\x04\xaf\xfb^\x02\x06\x04\xaf\xfb^\x02\x06\x04\xaf\xfb^\x02\x06\x04\xaf\xfb^\x02\x06\x04\xaf\xfb^\x02\x06\x04\xaf\xfb^\x02\x06\x04\xaf\xfb^\x02\x06\x04\xaf\xfb^\x02\x06\x04\xaf\xfb^\x02\x06\x04\xaf\xfb^\x02\x06\x04\xaf\xfb^\x02\x06\x04\xaf\xfb^\x02\x06\x04\xaf\xfb^\x02\x06\x04\xaf\xfb^\x02\x06\x04\xaf\xfb^\x02\x06\x04\xaf\xfb^\x02\x06\x04\xaf\xfb^\x02\x06\x04\xaf\xfb^\x02\x06\x04\xaf\xfb^\x02\x06\x04\xaf\xfb^\x02\x06\x04\xaf\xfb^\x02\x06\x04\xaf\xfb^\x02\x06\x04\xaf\xfb^\x02\x06\x04\xaf\xfb^\x02\x06\x04\xaf\xfb^\x02\x06\x04\xaf\xfb^\x02\x06\x04\xaf\xfb^\x02\x06\x04\xaf\xfb^\x02\x06\x04\xaf\xfb^\x02\x06\x04\xaf\xfb^\x02\x06\x04\xaf\xfb^\x02\x06\x04\xaf\xfb^\x02\x06\x04\xaf\xfb^\x02\x06\x04\xaf\xfb^\x02\x06\x04\xaf\xfb^\x02\x06\x04\xaf\xfb^\x02\x06\x04\xaf\xfb^\x02\x06\x04\xaf\xfb^\x02\x06\x04\xaf\xfb^\x02\x06\x04\xaf\xfb^\x02\x06\x04\xaf\xfb^\x02\x06\x04\xaf\xfb^\x02\x06\x04\xaf\xfb^\x02\x06\x04\xaf\xfb^\x02\x06\x04\xaf\xfb^\x02\x06\x04\xaf\xfb^\x02\x06\x04\xaf\xfb^\x02\x06\x04\xaf\xfb^\x02\x06\x04\xaf\xfb^\x02\x06\x04\xaf\xfb^\x02\x06\x04\xaf\xfb^\x02\x06\x04\xaf\xfb^\x02\x06\x04\xaf\xfb^\x02\x06\x04\xaf\xfb^\x02\x06\x04\xaf\xfb^\x02\x06\x04\xaf\xfb^\x02\x06\x04\xaf\xfb^\x02\x06\x04\xaf\xfb^\x02\x06\x04\xaf\xfb^\x02\x06\x04\xaf\xfb^\x02\x06\x04\xaf\xfb^\x02\x06\x04\xaf\xfb^\x02\x06\x04\xaf\xfb^\x02\x06\x04\xaf\xfb^\x02\x06\x04\xaf\xfb^\x02\x06\x04\xaf\xfb^\x02\x06\x04\xaf\xfb^\x02\x06\x04\xaf\xfb^\x02\x06\x04\xaf\xfb^\x02\x06\x04\xaf\xfb^\x02\x06\x04\xaf\xfb^\x02\x06\x04\xaf\xfb^\x02\x06\x04\xaf\xfb^\x02\x06\x04\xaf\xfb^\x02\x06\x04\xaf\xfb^\x02\x06\x04\xaf\xfb^\x02\x06\x00\x03\xbb\x01\x03\xcb\x01\x03I\x06\x03Z\x06\x00\x02\x01\x02\x03\x02\x01\x02\x11\x02\x10\x02\xe0\x02\x03\x02\xe8\x02\x05\x02\xb4\x02\x06\x02\xf8\x02\x05\x02\xd8\x02\x05\x02\x80\x02\x05\x02\xf0\x02\x05\x02\x84\x02\x06\x02\xe8\x02\x06\x02\xb0\x02\x05\x02\xa4\x02\x06\x82\x80\x02\x02\x02\x8c\x02\x07\x82\xa0\x02\x03\x02\xa2\x02\x07\x02\xf0\x02\x04\x02\xe8\x02\x05\x02\x80\x02\x04\x02\x80\x02\x02\x02\x84\x02\x06\x02\xd8\x02\x05\x02\x80\x02\x03\x02\x80\x02\x05\x02\xe0\x02\x05\x02\x8c\x02\x07\x02\xa0\x02\x04\x02\xb2\x02\x07\x02\xa8\x02\x05\x02\xe6\x02\x07\x02\xe8\x02\x06\x02\x80\x02\x03\x02\xe8\x02\x05\x02\xf0\x02\x04\x02\x88\x02\x05\x02\xa8\x02\x06\x02\xb4\x02\x06\x02\xb2\x02\x07\x02\x88\x02\x05\x02\xa8\x02\x06\x02\x88\x02\x05\x02\xac\x02\x06\x02\xf0\x02\x06\x02\x94\x02\x06\x02\xb0\x02\x05\x02\x9c\x02\x07\x02\xc6\x02\x07\x02\xb6\x02\x07\x02\x9e\x02\x07\x02\x88\x02\x06\x02\xc8\x02\x05\x02\x88\x02\x06\x02\xe6\x02\x07\x02\xac\x02\x07\x02\xc4\x02\x07\x02\xb8\x02\x06\x02\x8c\x02\x06\x02\xe0\x02\x06\x02\x90\x02\x04\x02\xe0\x02\x06\x02\xf0\x02\x05\x02\x8c\x02\x06\x02\xfc\x02\x06\x02\x80\x02\x01\x02\xd4\x02\x06\x02\x94\x02\x06\x02\xaa\x02\x07\x02\x8c\x02\x06\x02\x88\x02\x05\x02\x8a\x02\x07\x02\xd4\x02\x07\x02\xe0\x02\x04\x02\x88\x02\x06\x02\x8e\x02\x07\x02\xae\x02\x07\x02\x80\x02\x04\x02\x98\x02\x05\x02\xf4\x02\x06\x02\x84\x02\x07\x02\xe0\x02\x03\x02\xc4\x02\x06\x02\xa0\x02\x04\x02\x95\x02\x08\x02\xf8\x02\x05\x02\xa0\x02\x05\x02\x80\x02\x05\x02\x84\x02\x06\x02\xa8\x02\x05\x02\xe0\x02\x03\x02\xc0\x02\x04\x02\xe0\x02\x03\x02\x96\x02\x07\x02\x8f\x02\x08\x02\x90\x02\x05\x02\xe0\x02\x04\x02\xb0\x02\x06\x02\xf8\x02\x05\x02\xa0\x02\x03\x02\xe8\x02\x06\x02\x84\x02\x08\x02\xd0\x02\x06\x02\xc2\x02\x07\x02\xa4\x02\x07\x02\x96\x02\x07\x02\xf4\x02\x06\x02\xb4\x02\x07\x02\xbc\x02\x06\x02\xa0\x02\x03\x02\x98\x02\x07\x02\x98\x02\x05\x02\xc0\x02\x02\x02\xac\x02\x06\x02\xc0\x02\x05\x02\xc0\x02\x05\x02\xf0\x02\x05\x02\xf4\x02\x06\x02\xc8\x02\x05\x02\xd0\x02\x05\x02\x82\x02\x07\x02\xd0\x02\x04\x02\xec\x02\x06\x02\xc0\x02\x05\x02\x90\x02\x07\x02\x90\x02\x05\x02\xa2\x02\x07\x02\xe8\x02\x05\x02\xc0\x02\x03\x02\xd4\x02\x06\x02\x90\x02\x05\x02\x94\x02\x06\x82\xc0\x02\x02\x02\x80\x02\x02\x02\x80\x02\x02\x02\x8c\x02\x07\x02\x98\x02\x06\x02\xa0\x02\x06\x02\xe0\x02\x04\x02\xb4\x02\x06\x02\xf4\x02\x06\x02\x8c\x02\x07\x02\xd4\x02\x06\x02\xec\x02\x06\x02\xe0\x02\x05\x82\x80\x02\x02\x02\xc0\x02\x02\x02\xb8\x02\x05\x02\x80\x02\x04\x02\xd8\x02\x05\x02\xb4\x02\x06\x02\xc0\x02\x05\x02\x90\x02\x05\x02\xa8\x02\x07\x02\x84\x02\x06\x02\xc8\x02\x05\x02\x88\x02\x06\x02\x88\x02\x06\x02\x96\x02\x07\x02\xb8\x02\x06\x00\x02\x80\x02\x03\x02\xe0\x02\x03\x02\x88\x02\x07\x02\xb0\x02\x04\x02\xc0\x02\x04\x02\x80\x02\x07\x02\xbc\x02\x06\x02\xc4\x02\x07\x02\xa0\x02\x03\x02\xe0\x02\x04\x02\x90\x02\x06\x02\x9c\x02\x07\x02\xf8\x02\x05\x02\x98\x02\x07\x02\xc0\x02\x04\x02\x80\x02\x03\x02\xc0\x02\x05\x02\xc8\x02\x05\x02\x94\x02\x07\x02\xf8\x02\x05\x02\xf0\x02\x05\x02\xd4\x02\x06\x02\xc0\x02\x04\x02\xa0\x02\x04\x02\xac\x02\x07\x02\xc0\x02\x04\x02\x80\x02\x02\x02\xa0\x02\x03\x02\xd0\x02\x05\x02\x9c\x02\x07\x02\xd0\x02\x05\x02\xb2\x02\x07\x02\xc0\x02\x05\x02\xf0\x02\x04\x02\xc0\x02\x04\x02\xea\x02\x07\x02\xcc\x02\x06\x02\xac\x02\x06\x02\x90\x02\x04\x02\x88\x02\x06\x02\xa8\x02\x06\x02\xc8\x02\x05\x02\xf0\x02\x05\x02\x80\x02\x01\x02\xf8\x02\x05\x02\xe8\x02\x05\x02\x84\x02\x07\x02\x90\x02\x06\x02\x9c\x02\x07\x02\x94\x02\x06\x02\xf8\x02\x06\x02\xa0\x02\x04\x02\xa8\x02\x05\x02\xc8\x02\x05\x02\x88\x02\x06\x02\xf4\x02\x06\x02\xea\x02\x07\x02\x82\x02\x07\x02\x80\x02\x04\x02\xe4\x02\x06\x02\x94\x02\x06\x02\x80\x02\x02\x02\x80\x02\x01\x02\x84\x02\x06\x02\xc0\x02\x06\x02\xa8\x02\x05\x02\xf8\x02\x05\x02\xe0\x02\x04\x02\x80\x02\x03\x02\x80\x02\x06\x02\xf0\x02\x06\x02\x98\x02\x05\x02\xf8\x02\x06\x02\xf0\x02\x06\x02\xa0\x02\x03\x82\x80\x02\x01\x02\xd0\x02\x05\x02\xb8\x02\x06\x02\xa2\x02\x07\x02\x80\x02\x03\x02\xe8\x02\x05\x02\xe8\x02\x05\x02\x98\x02\x05\x02\xf0\x02\x05\x02\xd0\x02\x05\x02\x80\x02\x07\x02\x88\x02\x05\x02\x82\x02\x07\x02\xa2\x02\x07\x02\xa0\x02\x07\x02\xa8\x02\x07\x02\xd8\x02\x05\x02\xe2\x02\x07\x02\xd4\x02\x06\x02\xc8\x02\x05\x02\xcc\x02\x06\x02\xc0\x02\x04\x02\x98\x02\x07\x02\x84\x02\x06\x02\x8c\x02\x07\x02\x80\x02\x03\x02\x80\x02\x08\x02\xa8\x02\x06\x02\xd8\x02\x05\x02\x80\x02\x04\x02\xf0\x02\x06\x02\x8c\x02\x06\x02\x9a\x02\x07\x02\x8c\x02\x06\x02\xc4\x02\x06\x02\xb0\x02\x06\x02\x84\x02\x06\x02\xc0\x02\x05\x02\xc8\x02\x05\x02\x03\x02\x01\x02\x11\x02\x10\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x03\x02\x13\x02\x13\x02\x13\x02\x13\x02\x05\x02\x05\x02\x05\x02\x05\x02\x05\x02\x03\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x03\x02\x13\x02\x13\x02\x05\x02\x05\x02\x05\x02\x05\x02\x05\x02\x05\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x05\x02\x05\x02\x05\x02\x05\x02\x05\x02\x05\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x05\x02\x05\x02\x05\x02\x05\x02\x05\x02\x05\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x05\x02\x05\x02\x05\x02\x05\x02\x05\x02\x05\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x05\x02\x05\x02\x05\x02\x05\x02\x05\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x05\x02\x05\x02\x05\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x03\x02\x01\x02\x11\x02\x10\x04\x9a\x1a|\x02\x06\x04\x9a\x1a|\x02\x06\x04\x9a\x1a|\x02\x06\x04\x9a\x1a|\x02\x06\x04\x9a\x1a|\x02\x06\x04\x9a\x1a|\x02\x06\x04\x9a\x1a|\x02\x06\x04\x9a\x1a|\x02\x06\x04\x9a\x1a|\x02\x06\x04\x9a\x1a|\x02\x06\x04\x9a\x1a|\x02\x06\x04\x9a\x1a|\x02\x06\x04\x9a\x1a|\x02\x06\x04\x9a\x1a|\x02\x06\x04\x9a\x1a|\x02\x06\x04\x9a\x1a|\x02\x06\x04\x9a\x1a|\x02\x06\x04\x9a\x1a|\x02\x06\x04\x9a\x1a|\x02\x06\x04\x9a\x1a|\x02\x06\x04\x9a\x1a|\x02\x06\x04\x9a\x1a|\x02\x06\x04\x9a\x1a|\x02\x06\x04\x9a\x1a|\x02\x06\x04\x9a\x1a|\x02\x06\x04\x9a\x1a|\x02\x06\x04\x9a\x1a|\x02\x06\x04\x9a\x1a|\x02\x06\x04\x9a\x1a|\x02\x06\x04\x9a\x1a|\x02\x06\x04\x9a\x1a|\x02\x06\x04\x9a\x1a|\x02\x06\x04\x9a\x1a|\x02\x06\x04\x9a\x1a|\x02\x06\x04\x9a\x1a|\x02\x06\x04\x9a\x1a|\x02\x06\x04\x9a\x1a|\x02\x06\x04\x9a\x1a|\x02\x06\x04\x9a\x1a|\x02\x06\x04\x9a\x1a|\x02\x06\x04\x9a\x1a|\x02\x06\x04\x9a\x1a|\x02\x06\x04\x9a\x1a|\x02\x06\x04\x9a\x1a|\x02\x06\x04\x9a\x1a|\x02\x06\x04\x9a\x1a|\x02\x06\x04\x9a\x1a|\x02\x06\x04\x9a\x1a|\x02\x06\x04\x9a\x1a|\x02\x06\x04\x9a\x1a|\x02\x06\x04\x9a\x1a|\x02\x06\x04\x9a\x1a|\x02\x06\x04\x9a\x1a|\x02\x06\x04\x9a\x1a|\x02\x06\x04\x9a\x1a|\x02\x06\x04\x9a\x1a|\x02\x06\x04\x9a\x1a|\x02\x06\x04\x9a\x1a|\x02\x06\x04\x9a\x1a|\x02\x06\x04\x9a\x1a|\x02\x06\x04\x9a\x1a|\x02\x06\x04\x9a\x1a|\x02\x06\x04\x9a\x1a|\x02\x06\x04\x9a\x1a|\x02\x06\x04\x9a\x1a|\x02\x06\x04\x9a\x1a|\x02\x06\x04\x9a\x1a|\x02\x06\x04\x9a\x1a|\x02\x06\x04\x9a\x1a|\x02\x06\x04\x9a\x1a|\x02\x06\x04\x9a\x1a|\x02\x06\x04\x9a\x1a|\x02\x06\x04\x9a\x1a|\x02\x06\x04\x9a\x1a|\x02\x06\x04\x9a\x1a|\x02\x06\x04\x9a\x1a|\x02\x06\x04\x9a\x1a|\x02\x06\x04\x9a\x1a|\x02\x06\x04\x9a\x1a|\x02\x06\x04\x9a\x1a|\x02\x06\x04\x9a\x1a|\x02\x06\x04\x9a\x1a|\x02\x06\x04\x9a\x1a|\x02\x06\x04\x9a\x1a|\x02\x06\x04\x9a\x1a|\x02\x06\x04\x9a\x1a|\x02\x06\x04\x9a\x1a|\x02\x06\x04\x9a\x1a|\x02\x06\x04\x9a\x1a|\x02\x06\x04\x9a\x1a|\x02\x06\x04\x9a\x1a|\x02\x06\x04\x9a\x1a|\x02\x06\x04\x9a\x1a|\x02\x06\x04\x9a\x1a|\x02\x06\x04\x9a\x1a|\x02\x06\x04\x9a\x1a|\x02\x06\x04\x9a\x1a|\x02\x06\x04\x9a\x1a|\x02\x06\x04\x9a\x1a|\x02\x06\x04\x9a\x1a|\x02\x06\x04\x9a\x1a|\x02\x06\x04\x9a\x1a|\x02\x06\x04\x9a\x1a|\x02\x06\x04\x9a\x1a|\x02\x06\x04\x9a\x1a|\x02\x06\x04\x9a\x1a|\x02\x06\x04\x9a\x1a|\x02\x06\x04\x9a\x1a|\x02\x06\x04\x9a\x1a|\x02\x06\x04\x9a\x1a|\x02\x06\x04\x9a\x1a|\x02\x06\x04\x9a\x1a|\x02\x06\x04\x9a\x1a|\x02\x06\x04\x9a\x1a|\x02\x06\x04\x9a\x1a|\x02\x06\x04\x9a\x1a|\x02\x06\x04\x9a\x1a|\x02\x06\x04\x9a\x1a|\x02\x06\x04\x9a\x1a|\x02\x06\x04\x9a\x1a|\x02\x06\x04\x9a\x1a|\x02\x06\x04\x9a\x1a|\x02\x06\x04\x9a\x1a|\x02\x06\x04\x9a\x1a|\x02\x06\x04\x9a\x1a|\x02\x06\x04\x9a\x1a|\x02\x06\x04\x9a\x1a|\x02\x06\x04\x9a\x1a|\x02\x06\x04\x9a\x1a|\x02\x06\x04\x9a\x1a|\x02\x06\x04\x9a\x1a|\x02\x06\x04\x9a\x1a|\x02\x06\x04\x9a\x1a|\x02\x06\x04\x9a\x1a|\x02\x06\x04\x9a\x1a|\x02\x06\x04\x9a\x1a|\x02\x06\x04\x9a\x1a|\x02\x06\x04\x9a\x1a|\x02\x06\x04\x9a\x1a|\x02\x06\x04\x9a\x1a|\x02\x06\x04\x9a\x1a|\x02\x06\x04\x9a\x1a|\x02\x06\x04\x9a\x1a|\x02\x06\x04\x9a\x1a|\x02\x06\x04\x9a\x1a|\x02\x06\x04\x9a\x1a|\x02\x06\x04\x9a\x1a|\x02\x06\x04\x9a\x1a|\x02\x06\x04\x9a\x1a|\x02\x06\x04\x9a\x1a|\x02\x06\x04\x9a\x1a|\x02\x06\x04\x9a\x1a|\x02\x06\x04\x9a\x1a|\x02\x06\x04\x9a\x1a|\x02\x06\x04\x9a\x1a|\x02\x06\x04\x9a\x1a|\x02\x06\x04\x9a\x1a|\x02\x06\x04\x9a\x1a|\x02\x06\x04\x9a\x1a|\x02\x06\x04\x9a\x1a|\x02\x06\x04\x9a\x1a|\x02\x06\x04\x9a\x1a|\x02\x06\x04\x9a\x1a|\x02\x06\x04\x9a\x1a|\x02\x06\x04\x9a\x1a|\x02\x06\x04\x9a\x1a|\x02\x06\x04\x9a\x1a|\x02\x06\x04\x9a\x1a|\x02\x06\x04\x9a\x1a|\x02\x06\x04\x9a\x1a|\x02\x06\x04\x9a\x1a|\x02\x06\x04\x9a\x1a|\x02\x06\x04\x9a\x1a|\x02\x06\x04\x9a\x1a|\x02\x06\x04\x9a\x1a|\x02\x06\x04\x9a\x1a|\x02\x06\x04\x9a\x1a|\x02\x06\x04\x9a\x1a|\x02\x06\x04\x9a\x1a|\x02\x06\x04\x9a\x1a|\x02\x06\x04\x9a\x1a|\x02\x06\x04\x9a\x1a|\x02\x06\x04\x9a\x1a|\x02\x06\x04\x9a\x1a|\x02\x06\x04\x9a\x1a|\x02\x06\x04\x9a\x1a|\x02\x06\x04\x9a\x1a|\x02\x06\x04\x9a\x1a|\x02\x06\x04\x9a\x1a|\x02\x06\x04\x9a\x1a|\x02\x06\x04\x9a\x1a|\x02\x06\x04\x9a\x1a|\x02\x06\x04\x9a\x1a|\x02\x06\x04\x9a\x1a|\x02\x06\x04\x9a\x1a|\x02\x06\x04\x9a\x1a|\x02\x06\x04\x9a\x1a|\x02\x06\x04\x9a\x1a|\x02\x06\x04\x9a\x1a|\x02\x06\x04\x9a\x1a|\x02\x06\x04\x9a\x1a|\x02\x06\x04\x9a\x1a|\x02\x06\x04\x9a\x1a|\x02\x06\x04\x9a\x1a|\x02\x06\x04\x9a\x1a|\x02\x06\x04\x9a\x1a|\x02\x06\x04\x9a\x1a|\x02\x06\x04\x9a\x1a|\x02\x06\x04\x9a\x1a|\x02\x06\x04\x9a\x1a|\x02\x06\x04\x9a\x1a|\x02\x06\x04\x9a\x1a|\x02\x06\x04\x9a\x1a|\x02\x06\x04\x9a\x1a|\x02\x06\x04\x9a\x1a|\x02\x06\x04\x9a\x1a|\x02\x06\x04\x9a\x1a|\x02\x06\x04\x9a\x1a|\x02\x06\x04\x9a\x1a|\x02\x06\x04\x9a\x1a|\x02\x06\x04\x9a\x1a|\x02\x06\x04\x9a\x1a|\x02\x06\x04\x9a\x1a|\x02\x06\x04\x9a\x1a|\x02\x06\x04\x9a\x1a|\x02\x06\x04\x9a\x1a|\x02\x06\x04\x9a\x1a|\x02\x06\x04\x9a\x1a|\x02\x06\x04\x9a\x1a|\x02\x06\x04\x9a\x1a|\x02\x06\x04\x9a\x1a|\x02\x06\x04\x9a\x1a|\x02\x06\x04\x9a\x1a|\x02\x06\x04\x9a\x1a|\x02\x06\x04\x9a\x1a|\x02\x06\x04\x9a\x1a|\x02\x06\x04\x9a\x1a|\x02\x06\x04\x9a\x1a|\x02\x06\x04\x9a\x1a|\x02\x06\x04\x9a\x1a|\x02\x06\x04\x9a\x1a|\x02\x06\x04\x9a\x1a|\x02\x06\x04\x9a\x1a|\x02\x06\x04\x9a\x1a|\x02\x06\x04\x9a\x1a|\x02\x06\x04\x9a\x1a|\x02\x06\x04\x9a\x1a|\x02\x06\x04\x9a\x1a|\x02\x06\x04\x9a\x1a|\x02\x06\x04\x9a\x1a|\x02\x06\x04\x9a\x1a|\x02\x06\x04\x9a\x1a|\x02\x06\x04\x9a\x1a|\x02\x06\x04\x9a\x1a|\x02\x06\x04\x9a\x1a|\x02\x06\x04\x9a\x1a|\x02\x06\x04\x9a\x1a|\x02\x06\x04\x9a\x1a|\x02\x06\x04\x9a\x1a|\x02\x06\x04\x9a\x1a|\x02\x06\x04\x9a\x1a|\x02\x06\x04\x9a\x1a|\x02\x06\x04\x9a\x1a|\x02\x06\x04\x9a\x1a|\x02\x06\x04\x9a\x1a|\x02\x06\x04\x9a\x1a|\x02\x06\x04\x9a\x1a|\x02\x06\x04\x9a\x1a|\x02\x06\x04\x9a\x1a|\x02\x06\x04\x9a\x1a|\x02\x06\x04\x9a\x1a|\x02\x06\x04\x9a\x1a|\x02\x06\x00\x03e\x02\x03t\x02\x03m\x06\x03}\x06\x00\x02\x01\x02\x03\x02\x01\x02\x10\x02\x0f\x02\xdc\x02\x06\x02\xc0\x02\x02\x02\xd8\x02\x05\x02\xac\x02\x06\x02\xc0\x02\x02\x82\x80\x02\x01\x02\xa0\x02\x04\x02\xdc\x02\x06\x02\xf8\x02\x06\x02\xe0\x02\x03\x02\xcc\x02\x06\x02\xe8\x02\x06\x02\xe0\x02\x04\x02\xa6\x02\x07\x02\x90\x02\x06\x02\xb0\x02\x05\x02\x84\x02\x07\x02\x86\x02\x08\x02\xd8\x02\x05\x02\xa0\x02\x06\x02\xe0\x02\x05\x02\x93\x02\x08\x02\xb8\x02\x05\x02\xd4\x02\x06\x02\xc0\x02\x04\x02\x94\x02\x07\x02\xa6\x02\x07\x02\xe4\x02\x06\x02\xd4\x02\x06\x02\xe8\x02\x05\x02\xe8\x02\x05\x02\x80\x02\x03\x02\xd8\x02\x05\x02\xd4\x02\x06\x02\x84\x02\x06\x02\xd6\x02\x07\x02\xf8\x02\x05\x02\xfc\x02\x06\x02\xb4\x02\x06\x02\xb0\x02\x05\x02\xe8\x02\x05\x02\xf0\x02\x06\x02\xe8\x02\x05\x02\x84\x02\x06\x02\xdc\x02\x06\x02\x8c\x02\x06\x02\xa6\x02\x07\x02\xb6\x02\x07\x02\xec\x02\x06\x02\xc4\x02\x07\x02\x8a\x02\x07\x02\x86\x02\x07\x02\xe0\x02\x03\x02\xa0\x02\x03\x02\xa8\x02\x07\x02\xd0\x02\x07\x02\x84\x02\x06\x02\xbc\x02\x06\x02\xb8\x02\x05\x02\x80\x02\x02\x02\xa0\x02\x05\x02\x98\x02\x05\x02\xe0\x02\x05\x02\xf0\x02\x06\x02\xe8\x02\x05\x02\xcc\x02\x06\x02\xc0\x02\x05\x02\xd4\x02\x06\x02\xf8\x02\x06\x02\x8a\x02\x07\x02\xca\x02\x07\x02\x81\x02\x08\x82\x80\x02\x02\x02\x84\x02\x06\x02\xe0\x02\x05\x02\xe0\x02\x06\x02\xf8\x02\x05\x02\x92\x02\x07\x02\x84\x02\x08\x02\x80\x02\x04\x02\x84\x02\x06\x02\x82\x02\x07\x02\xb8\x02\x05\x02\xb8\x02\x05\x02\xa8\x02\x06\x02\xbe\x02\x07\x02\xd8\x02\x05\x02\x9c\x02\x06\x02\xd4\x02\x06\x02\xe0\x02\x05\x02\x90\x02\x04\x02\xb4\x02\x06\x02\x8e\x02\x07\x02\xb8\x02\x05\x02\x88\x02\x06\x02\xd4\x02\x06\x02\xe0\x02\x06\x02\x81\x02\x08\x02\x84\x02\x08\x02\x80\x02\x07\x02\xa0\x02\x06\x02\x86\x02\x07\x02\x86\x02\x07\x02\xb8\x02\x06\x02\x80\x02\x01\x02\x80\x02\x02\x02\xb8\x02\x06\x02\x80\x02\x03\x02\xa4\x02\x06\x02\x80\x02\x03\x02\xa4\x02\x06\x02\xb7\x02\x08\x02\xef\x02\t\x02\xbe\x02\x07\x02\x9a\x02\x07\x02\xb6\x02\x07\x82\x80\x02\x01\x02\xb4\x02\x06\x02\xe8\x02\x05\x02\x80\x02\x01\x02\xb8\x02\x07\x02\x8a\x02\x07\x02\x8c\x02\x07\x02\xb2\x02\x07\x02\x80\x02\x07\x02\x8c\x02\x06\x02\xb1\x02\t\x03\x9b@\x02\n\x02\xbc\x02\x06\x02\xe0\x02\x04\x02\x88\x02\x05\x02\x90\x02\x05\x02\xb2\x02\x07\x02\x9c\x02\x06\x02\xf0\x02\x04\x02\xf0\x02\x06\x02\xe4\x02\x06\x02\x84\x02\x06\x02\x80\x02\x08\x02\x80\x02\x07\x02\xe0\x02\x05\x02\xf0\x02\x07\x02\x84\x02\x07\x02\xf8\x02\x06\x02\xa4\x02\x06\x02\xb0\x02\x04\x02\x90\x02\x06\x02\x9c\x02\x06\x02\xc8\x02\x05\x02\xc0\x02\x06\x02\x94\x02\x06\x02\xe0\x02\x04\x02\xf0\x02\x07\x02\xd6\x02\x07\x02\xa0\x02\x03\x02\xac\x02\x07\x02\xd8\x02\x06\x02\x88\x02\x05\x02\xf0\x02\x05\x02\x9c\x02\x07\x02\xa0\x02\x03\x02\x88\x02\x06\x02\xe8\x02\x06\x02\xa0\x02\x03\x02\xd4\x02\x06\x02\xd8\x02\x06\x02\xda\x02\x07\x02\x9c\x02\x07\x02\xa8\x02\x06\x02\xa0\x02\x04\x02\xf8\x02\x06\x02\x80\x02\x06\x02\xb8\x02\x06\x02\x9c\x02\x06\x02\xd0\x02\x05\x02\xec\x02\x06\x02\x80\x02\x07\x02\xdc\x02\x06\x02\xa8\x02\x06\x02\xca\x02\x07\x02\xc0\x02\x05\x02\xa0\x02\x06\x02\x92\x02\x07\x00\x02\x94\x02\x07\x02\xc0\x02\x05\x02\xa0\x02\x03\x02\xc0\x02\x05\x02\xc8\x02\x05\x02\xae\x02\x07\x02\x9c\x02\x06\x02\x8c\x02\x07\x02\xe4\x02\x06\x02\x86\x02\x07\x02\xd8\x02\x06\x02\xa0\x02\x04\x02\xf4\x02\x06\x02\xf0\x02\x06\x02\xd8\x02\x05\x02\xa8\x02\x05\x02\xc0\x02\x05\x02\x80\x02\x01\x02\xe0\x02\x05\x02\x9e\x02\x07\x02\xc4\x02\x06\x02\xf0\x02\x05\x02\xf4\x02\x06\x02\xc4\x02\x06\x02\x84\x02\x06\x02\xf0\x02\x05\x02\xe0\x02\x05\x02\xa6\x02\x07\x02\xb8\x02\x06\x02\xa4\x02\x06\x02\x86\x02\x08\x02\x80\x02\x07\x02\xd8\x02\x06\x02\x80\x02\x01\x02\x9c\x02\x07\x02\xa0\x02\x04\x02\x80\x02\x03\x02\xc0\x02\x03\x02\xc4\x02\x07\x02\xaa\x02\x07\x82\x80\x02\x01\x02\xf0\x02\x05\x02\xdc\x02\x06\x02\x90\x02\x08\x02\xf8\x02\x05\x02\xd0\x02\x05\x02\xc0\x02\x05\x02\xa4\x02\x07\x02\x80\x02\x05\x02\xb8\x02\x06\x02\x84\x02\x06\x02\x84\x02\x06\x02\x94\x02\x07\x02\xa8\x02\x06\x02\xb0\x02\x04\x02\x80\x02\x03\x02\x03\x02\x01\x02\x10\x02\x0f\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x03\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x05\x02\x05\x02\x05\x02\x05\x02\x05\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x05\x02\x05\x02\x05\x02\x05\x02\x05\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x05\x02\x05\x02\x05\x02\x05\x02\x05\x02\x05\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x05\x02\x05\x02\x05\x02\x05\x02\x05\x02\x05\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x05\x02\x05\x02\x05\x02\x05\x02\x05\x02\x05\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x05\x02\x05\x02\x05\x02\x05\x02\x05\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x05\x02\x05\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x03\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x03\x02\x01\x02\x10\x02\x0f\x04\xb3\xafJ\x02\x06\x04\xb3\xafJ\x02\x06\x04\xb3\xafJ\x02\x06\x04\xb3\xafJ\x02\x06\x04\xb3\xafJ\x02\x06\x04\xb3\xafJ\x02\x06\x04\xb3\xafJ\x02\x06\x04\xb3\xafJ\x02\x06\x04\xb3\xafJ\x02\x06\x04\xb3\xafJ\x02\x06\x04\xb3\xafJ\x02\x06\x04\xb3\xafJ\x02\x06\x04\xb3\xafJ\x02\x06\x04\xb3\xafJ\x02\x06\x04\xb3\xafJ\x02\x06\x04\xb3\xafJ\x02\x06\x04\xb3\xafJ\x02\x06\x04\xb3\xafJ\x02\x06\x04\xb3\xafJ\x02\x06\x04\xb3\xafJ\x02\x06\x04\xb3\xafJ\x02\x06\x04\xb3\xafJ\x02\x06\x04\xb3\xafJ\x02\x06\x04\xb3\xafJ\x02\x06\x04\xb3\xafJ\x02\x06\x04\xb3\xafJ\x02\x06\x04\xb3\xafJ\x02\x06\x04\xb3\xafJ\x02\x06\x04\xb3\xafJ\x02\x06\x04\xb3\xafJ\x02\x06\x04\xb3\xafJ\x02\x06\x04\xb3\xafJ\x02\x06\x04\xb3\xafJ\x02\x06\x04\xb3\xafJ\x02\x06\x04\xb3\xafJ\x02\x06\x04\xb3\xafJ\x02\x06\x04\xb3\xafJ\x02\x06\x04\xb3\xafJ\x02\x06\x04\xb3\xafJ\x02\x06\x04\xb3\xafJ\x02\x06\x04\xb3\xafJ\x02\x06\x04\xb3\xafJ\x02\x06\x04\xb3\xafJ\x02\x06\x04\xb3\xafJ\x02\x06\x04\xb3\xafJ\x02\x06\x04\xb3\xafJ\x02\x06\x04\xb3\xafJ\x02\x06\x04\xb3\xafJ\x02\x06\x04\xb3\xafJ\x02\x06\x04\xb3\xafJ\x02\x06\x04\xb3\xafJ\x02\x06\x04\xb3\xafJ\x02\x06\x04\xb3\xafJ\x02\x06\x04\xb3\xafJ\x02\x06\x04\xb3\xafJ\x02\x06\x04\xb3\xafJ\x02\x06\x04\xb3\xafJ\x02\x06\x04\xb3\xafJ\x02\x06\x04\xb3\xafJ\x02\x06\x04\xb3\xafJ\x02\x06\x04\xb3\xafJ\x02\x06\x04\xb3\xafJ\x02\x06\x04\xb3\xafJ\x02\x06\x04\xb3\xafJ\x02\x06\x04\xb3\xafJ\x02\x06\x04\xb3\xafJ\x02\x06\x04\xb3\xafJ\x02\x06\x04\xb3\xafJ\x02\x06\x04\xb3\xafJ\x02\x06\x04\xb3\xafJ\x02\x06\x04\xb3\xafJ\x02\x06\x04\xb3\xafJ\x02\x06\x04\xb3\xafJ\x02\x06\x04\xb3\xafJ\x02\x06\x04\xb3\xafJ\x02\x06\x04\xb3\xafJ\x02\x06\x04\xb3\xafJ\x02\x06\x04\xb3\xafJ\x02\x06\x04\xb3\xafJ\x02\x06\x04\xb3\xafJ\x02\x06\x04\xb3\xafJ\x02\x06\x04\xb3\xafJ\x02\x06\x04\xb3\xafJ\x02\x06\x04\xb3\xafJ\x02\x06\x04\xb3\xafJ\x02\x06\x04\xb3\xafJ\x02\x06\x04\xb3\xafJ\x02\x06\x04\xb3\xafJ\x02\x06\x04\xb3\xafJ\x02\x06\x04\xb3\xafJ\x02\x06\x04\xb3\xafJ\x02\x06\x04\xb3\xafJ\x02\x06\x04\xb3\xafJ\x02\x06\x04\xb3\xafJ\x02\x06\x04\xb3\xafJ\x02\x06\x04\xb3\xafJ\x02\x06\x04\xb3\xafJ\x02\x06\x04\xb3\xafJ\x02\x06\x04\xb3\xafJ\x02\x06\x04\xb3\xafJ\x02\x06\x04\xb3\xafJ\x02\x06\x04\xb3\xafJ\x02\x06\x04\xb3\xafJ\x02\x06\x04\xb3\xafJ\x02\x06\x04\xb3\xafJ\x02\x06\x04\xb3\xafJ\x02\x06\x04\xb3\xafJ\x02\x06\x04\xb3\xafJ\x02\x06\x04\xb3\xafJ\x02\x06\x04\xb3\xafJ\x02\x06\x04\xb3\xafJ\x02\x06\x04\xb3\xafJ\x02\x06\x04\xb3\xafJ\x02\x06\x04\xb3\xafJ\x02\x06\x04\xb3\xafJ\x02\x06\x04\xb3\xafJ\x02\x06\x04\xb3\xafJ\x02\x06\x04\xb3\xafJ\x02\x06\x04\xb3\xafJ\x02\x06\x04\xb3\xafJ\x02\x06\x04\xb3\xafJ\x02\x06\x04\xb3\xafJ\x02\x06\x04\xb3\xafJ\x02\x06\x04\xb3\xafJ\x02\x06\x04\xb3\xafJ\x02\x06\x04\xb3\xafJ\x02\x06\x04\xb3\xafJ\x02\x06\x04\xb3\xafJ\x02\x06\x04\xb3\xafJ\x02\x06\x04\xb3\xafJ\x02\x06\x04\xb3\xafJ\x02\x06\x04\xb3\xafJ\x02\x06\x04\xb3\xafJ\x02\x06\x04\xb3\xafJ\x02\x06\x04\xb3\xafJ\x02\x06\x04\xb3\xafJ\x02\x06\x04\xb3\xafJ\x02\x06\x04\xb3\xafJ\x02\x06\x04\xb3\xafJ\x02\x06\x04\xb3\xafJ\x02\x06\x04\xb3\xafJ\x02\x06\x04\xb3\xafJ\x02\x06\x04\xb3\xafJ\x02\x06\x04\xb3\xafJ\x02\x06\x04\xb3\xafJ\x02\x06\x04\xb3\xafJ\x02\x06\x04\xb3\xafJ\x02\x06\x04\xb3\xafJ\x02\x06\x04\xb3\xafJ\x02\x06\x04\xb3\xafJ\x02\x06\x04\xb3\xafJ\x02\x06\x04\xb3\xafJ\x02\x06\x04\xb3\xafJ\x02\x06\x04\xb3\xafJ\x02\x06\x04\xb3\xafJ\x02\x06\x04\xb3\xafJ\x02\x06\x04\xb3\xafJ\x02\x06\x04\xb3\xafJ\x02\x06\x04\xb3\xafJ\x02\x06\x04\xb3\xafJ\x02\x06\x04\xb3\xafJ\x02\x06\x04\xb3\xafJ\x02\x06\x04\xb3\xafJ\x02\x06\x04\xb3\xafJ\x02\x06\x04\xb3\xafJ\x02\x06\x04\xb3\xafJ\x02\x06\x04\xb3\xafJ\x02\x06\x04\xb3\xafJ\x02\x06\x04\xb3\xafJ\x02\x06\x04\xb3\xafJ\x02\x06\x04\xb3\xafJ\x02\x06\x04\xb3\xafJ\x02\x06\x04\xb3\xafJ\x02\x06\x04\xb3\xafJ\x02\x06\x04\xb3\xafJ\x02\x06\x04\xb3\xafJ\x02\x06\x04\xb3\xafJ\x02\x06\x04\xb3\xafJ\x02\x06\x04\xb3\xafJ\x02\x06\x04\xb3\xafJ\x02\x06\x04\xb3\xafJ\x02\x06\x04\xb3\xafJ\x02\x06\x04\xb3\xafJ\x02\x06\x04\xb3\xafJ\x02\x06\x04\xb3\xafJ\x02\x06\x04\xb3\xafJ\x02\x06\x04\xb3\xafJ\x02\x06\x04\xb3\xafJ\x02\x06\x04\xb3\xafJ\x02\x06\x04\xb3\xafJ\x02\x06\x04\xb3\xafJ\x02\x06\x04\xb3\xafJ\x02\x06\x04\xb3\xafJ\x02\x06\x04\xb3\xafJ\x02\x06\x04\xb3\xafJ\x02\x06\x04\xb3\xafJ\x02\x06\x04\xb3\xafJ\x02\x06\x04\xb3\xafJ\x02\x06\x04\xb3\xafJ\x02\x06\x04\xb3\xafJ\x02\x06\x04\xb3\xafJ\x02\x06\x04\xb3\xafJ\x02\x06\x04\xb3\xafJ\x02\x06\x04\xb3\xafJ\x02\x06\x04\xb3\xafJ\x02\x06\x04\xb3\xafJ\x02\x06\x04\xb3\xafJ\x02\x06\x04\xb3\xafJ\x02\x06\x04\xb3\xafJ\x02\x06\x04\xb3\xafJ\x02\x06\x04\xb3\xafJ\x02\x06\x04\xb3\xafJ\x02\x06\x04\xb3\xafJ\x02\x06\x04\xb3\xafJ\x02\x06\x04\xb3\xafJ\x02\x06\x04\xb3\xafJ\x02\x06\x04\xb3\xafJ\x02\x06\x04\xb3\xafJ\x02\x06\x04\xb3\xafJ\x02\x06\x04\xb3\xafJ\x02\x06\x04\xb3\xafJ\x02\x06\x04\xb3\xafJ\x02\x06\x04\xb3\xafJ\x02\x06\x04\xb3\xafJ\x02\x06\x04\xb3\xafJ\x02\x06\x04\xb3\xafJ\x02\x06\x04\xb3\xafJ\x02\x06\x04\xb3\xafJ\x02\x06\x04\xb3\xafJ\x02\x06\x04\xb3\xafJ\x02\x06\x04\xb3\xafJ\x02\x06\x04\xb3\xafJ\x02\x06\x04\xb3\xafJ\x02\x06\x04\xb3\xafJ\x02\x06\x04\xb3\xafJ\x02\x06\x04\xb3\xafJ\x02\x06\x04\xb3\xafJ\x02\x06\x04\xb3\xafJ\x02\x06\x04\xb3\xafJ\x02\x06\x04\xb3\xafJ\x02\x06\x00\x034\x01\x03E\x01\x03\xd4\x05\x03\xe5\x05\x00\x02\x01\x02\x03\x02\x01\x02\x11\x02\x11\x00\x02\xf0\x02\x06\x02\xc0\x02\x05\x82\x80\x02\x02\x02\x8c\x02\x06\x02\x88\x02\x06\x02\x80\x02\x02\x02\xa4\x02\x06\x02\x8c\x02\x06\x02\xf0\x02\x06\x02\xf2\x02\x07\x02\x80\x02\x07\x02\x90\x02\x06\x02\x80\x02\x02\x02\xe8\x02\x05\x02\xc0\x02\x05\x02\x84\x02\x06\x02\xd0\x02\x04\x02\xa8\x02\x05\x02\xaa\x02\x07\x02\x84\x02\x07\x02\xc4\x02\x06\x02\xa4\x02\x07\x02\xf0\x02\x05\x02\xb4\x02\x06\x02\xd0\x02\x05\x02\xb4\x02\x06\x02\x8c\x02\x06\x02\xa4\x02\x07\x02\xd0\x02\x05\x02\xa8\x02\x05\x02\xbc\x02\x06\x02\x88\x02\x05\x02\x8e\x02\x07\x02\xa8\x02\x05\x02\x84\x02\x06\x02\x84\x02\x06\x02\x82\x02\x07\x02\x82\x02\x07\x02\x80\x02\x03\x02\xc8\x02\x05\x02\x80\x02\x01\x02\x82\x02\x07\x02\x92\x02\x07\x02\xd0\x02\x05\x02\x8c\x02\x06\x02\x80\x02\x04\x02\x90\x02\x04\x02\xf4\x02\x06\x02\xc4\x02\x06\x02\xe0\x02\x05\x02\xb0\x02\x04\x02\xf0\x02\x05\x02\x82\x02\x07\x02\x98\x02\x06\x02\xcc\x02\x06\x02\xd0\x02\x05\x02\x80\x02\x03\x02\x98\x02\x06\x02\xb8\x02\x05\x02\xf0\x02\x04\x02\xa0\x02\x03\x02\xc0\x02\x05\x02\x8c\x02\x06\x02\xc0\x02\x02\x02\x8c\x02\x07\x02\xa8\x02\x07\x02\xc0\x02\x02\x02\xd8\x02\x06\x02\xf8\x02\x05\x02\xf8\x02\x06\x02\xe4\x02\x06\x02\x80\x02\x03\x02\xc0\x02\x02\x02\x88\x02\x05\x02\x80\x02\x04\x02\xe0\x02\x03\x02\x80\x02\x03\x02\xa8\x02\x05\x02\xc8\x02\x05\x00\x02\x88\x02\x06\x02\xd0\x02\x07\x02\xb0\x02\x05\x02\xd8\x02\x05\x02\xb4\x02\x06\x02\x98\x02\x05\x02\x80\x02\x03\x82\x80\x02\x01\x02\xb0\x02\x06\x02\xa8\x02\x05\x02\xe0\x02\x03\x02\xe0\x02\x05\x02\xe8\x02\x05\x02\xe0\x02\x06\x02\xc0\x02\x03\x02\x80\x02\x05\x02\x9c\x02\x06\x02\xf0\x02\x04\x02\x92\x02\x08\x02\xe0\x02\x07\x02\xf8\x02\x06\x02\xe4\x02\x06\x02\x8c\x02\x06\x02\x88\x02\x06\x02\xd0\x02\x06\x02\xc0\x02\x05\x02\x84\x02\x06\x02\x80\x02\x02\x02\xc0\x02\x06\x02\x80\x02\x03\x02\xc0\x02\x02\x02\x9e\x02\x07\x02\xb0\x02\x04\x02\xc0\x02\x05\x02\xc0\x02\x06\x02\xa0\x02\x06\x02\x80\x02\x05\x02\xc8\x02\x05\x02\x90\x02\x07\x82\x80\x02\x02\x82\xc0\x02\x02\x82\xc0\x02\x03\x02\xd0\x02\x05\x02\xf0\x02\x06\x02\xd0\x02\x07\x02\xac\x02\x07\x02\xb0\x02\x06\x02\x86\x02\x07\x02\xa4\x02\x07\x02\x8e\x02\x07\x02\xd4\x02\x06\x02\xba\x02\x07\x02\xa0\x02\x05\x02\x88\x02\x06\x02\xb0\x02\x05\x02\xb6\x02\x07\x02\x90\x02\x06\x02\xb8\x02\x05\x02\xf0\x02\x04\x02\xa0\x02\x04\x02\xe4\x02\x06\x02\xc0\x02\x03\x02\x90\x02\x04\x02\xdc\x02\x06\x02\xac\x02\x07\x02\xdc\x02\x06\x02\xc0\x02\x04\x02\xf0\x02\x06\x02\xe0\x02\x05\x02\xdc\x02\x06\x02\xc8\x02\x05\x02\xc0\x02\x04\x02\xb8\x02\x06\x02\xc0\x02\x06\x02\x80\x02\x02\x02\x80\x02\x02\x02\x9c\x02\x06\x02\xf0\x02\x04\x02\xc0\x02\x03\x02\xf0\x02\x05\x02\x80\x02\x04\x02\x80\x02\x02\x02\xb0\x02\x04\x02\xe8\x02\x05\x02\xf8\x02\x05\x02\x90\x02\x05\x02\xf8\x02\x05\x02\xaa\x02\x07\x02\x88\x02\x05\x02\xa8\x02\x06\x02\xd8\x02\x05\x02\xe0\x02\x03\x02\xa4\x02\x06\x02\x80\x02\x06\x02\xf4\x02\x06\x02\xdc\x02\x07\x02\xb6\x02\x07\x02\xe8\x02\x05\x00\x02\x88\x02\x05\x02\x85\x02\x08\x02\x88\x02\x06\x02\xf8\x02\x06\x02\x94\x02\x06\x02\xa0\x02\x05\x02\xa0\x02\x04\x02\xf0\x02\x05\x02\x9a\x02\x07\x02\xc6\x02\x07\x02\xf0\x02\x05\x02\xc0\x02\x06\x02\xcc\x02\x06\x02\xb0\x02\x06\x02\x92\x02\x07\x02\x90\x02\x05\x02\xc0\x02\x04\x02\x9e\x02\x07\x02\x86\x02\x07\x02\xb0\x02\x04\x02\x98\x02\x05\x02\xf8\x02\x05\x02\xd0\x02\x05\x02\xc0\x02\x05\x02\xc0\x02\x02\x02\x9e\x02\x07\x02\x90\x02\x04\x02\xc4\x02\x06\x02\x8c\x02\x06\x02\xc8\x02\x06\x02\xb9\x02\x08\x02\x98\x02\x05\x02\xf0\x02\x06\x02\x84\x02\x06\x02\x80\x02\x06\x02\x94\x02\x06\x02\xbc\x02\x06\x02\x8c\x02\x06\x02\xe0\x02\x05\x02\xd8\x02\x05\x02\xc0\x02\x06\x02\x84\x02\x06\x02\x90\x02\x06\x02\xb0\x02\x04\x02\x84\x02\x06\x02\x84\x02\x07\x02\x98\x02\x07\x02\xee\x02\x07\x02\xc4\x02\x07\x02\xf8\x02\x05\x02\x84\x02\x06\x02\x98\x02\x05\x02\xbc\x02\x06\x02\x98\x02\x06\x02\x80\x02\x04\x02\xf0\x02\x05\x02\xa4\x02\x06\x02\xd8\x02\x05\x02\xa8\x02\x05\x02\x80\x02\x06\x02\xac\x02\x06\x02\xd0\x02\x06\x02\x9d\x02\x08\x02\xdc\x02\x06\x02\x94\x02\x06\x02\x90\x02\x06\x02\xa0\x02\x04\x02\xd8\x02\x05\x02\xb2\x02\x07\x02\x80\x02\x06\x02\x84\x02\x07\x02\x80\x02\x03\x02\xc0\x02\x02\x02\xe0\x02\x07\x02\xdc\x02\x06\x02\xb0\x02\x06\x02\x88\x02\x06\x02\xd8\x02\x07\x02\x84\x02\x06\x02\xc0\x02\x05\x02\xc0\x02\x04\x02\xea\x02\x07\x02\xa0\x02\x03\x82\x80\x02\x01\x02\xe4\x02\x06\x02\xc0\x02\x07\x02\x80\x02\x05\x02\xd8\x02\x06\x02\xa8\x02\x05\x82\x80\x02\x03\x02\xf0\x02\x06\x02\xd0\x02\x06\x02\x8c\x02\x06\x02\xda\x02\x07\x02\xd4\x02\x06\x02\xd4\x02\x06\x02\x84\x02\x06\x02\x88\x02\x06\x02\xe8\x02\x05\x02\x88\x02\x06\x02\xd8\x02\x05\x02\x84\x02\x06\x02\xe0\x02\x05\x02\x84\x02\x08\x02\x84\x02\x06\x02\x80\x02\x02\x02\x90\x02\x05\x02\xa0\x02\x05\x02\xe0\x02\x05\x02\x03\x02\x01\x02\x11\x02\x11\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x05\x02\x05\x02\x13\x02\x13\x02\x13\x02\x13\x02\x03\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x05\x02\x05\x02\x05\x02\x05\x02\x05\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x05\x02\x05\x02\x05\x02\x05\x02\x05\x02\x05\x02\x05\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x05\x02\x05\x02\x05\x02\x05\x02\x05\x02\x05\x02\x05\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x05\x02\x05\x02\x05\x02\x05\x02\x05\x02\x05\x02\x05\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x05\x02\x05\x02\x05\x02\x05\x02\x05\x02\x05\x02\x03\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x05\x02\x05\x02\x05\x02\x05\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x03\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x03\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x03\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x03\x02\x01\x02\x11\x02\x11\x04\x91\xf3\x83\x02\x06\x04\x91\xf3\x83\x02\x06\x04\x91\xf3\x83\x02\x06\x04\x91\xf3\x83\x02\x06\x04\x91\xf3\x83\x02\x06\x04\x91\xf3\x83\x02\x06\x04\x91\xf3\x83\x02\x06\x04\x91\xf3\x83\x02\x06\x04\x91\xf3\x83\x02\x06\x04\x91\xf3\x83\x02\x06\x04\x91\xf3\x83\x02\x06\x04\x91\xf3\x83\x02\x06\x04\x91\xf3\x83\x02\x06\x04\x91\xf3\x83\x02\x06\x04\x91\xf3\x83\x02\x06\x04\x91\xf3\x83\x02\x06\x04\x91\xf3\x83\x02\x06\x04\x91\xf3\x83\x02\x06\x04\x91\xf3\x83\x02\x06\x04\x91\xf3\x83\x02\x06\x04\x91\xf3\x83\x02\x06\x04\x91\xf3\x83\x02\x06\x04\x91\xf3\x83\x02\x06\x04\x91\xf3\x83\x02\x06\x04\x91\xf3\x83\x02\x06\x04\x91\xf3\x83\x02\x06\x04\x91\xf3\x83\x02\x06\x04\x91\xf3\x83\x02\x06\x04\x91\xf3\x83\x02\x06\x04\x91\xf3\x83\x02\x06\x04\x91\xf3\x83\x02\x06\x04\x91\xf3\x83\x02\x06\x04\x91\xf3\x83\x02\x06\x04\x91\xf3\x83\x02\x06\x04\x91\xf3\x83\x02\x06\x04\x91\xf3\x83\x02\x06\x04\x91\xf3\x83\x02\x06\x04\x91\xf3\x83\x02\x06\x04\x91\xf3\x83\x02\x06\x04\x91\xf3\x83\x02\x06\x04\x91\xf3\x83\x02\x06\x04\x91\xf3\x83\x02\x06\x04\x91\xf3\x83\x02\x06\x04\x91\xf3\x83\x02\x06\x04\x91\xf3\x83\x02\x06\x04\x91\xf3\x83\x02\x06\x04\x91\xf3\x83\x02\x06\x04\x91\xf3\x83\x02\x06\x04\x91\xf3\x83\x02\x06\x04\x91\xf3\x83\x02\x06\x04\x91\xf3\x83\x02\x06\x04\x91\xf3\x83\x02\x06\x04\x91\xf3\x83\x02\x06\x04\x91\xf3\x83\x02\x06\x04\x91\xf3\x83\x02\x06\x04\x91\xf3\x83\x02\x06\x04\x91\xf3\x83\x02\x06\x04\x91\xf3\x83\x02\x06\x04\x91\xf3\x83\x02\x06\x04\x91\xf3\x83\x02\x06\x04\x91\xf3\x83\x02\x06\x04\x91\xf3\x83\x02\x06\x04\x91\xf3\x83\x02\x06\x04\x91\xf3\x83\x02\x06\x04\x91\xf3\x83\x02\x06\x04\x91\xf3\x83\x02\x06\x04\x91\xf3\x83\x02\x06\x04\x91\xf3\x83\x02\x06\x04\x91\xf3\x83\x02\x06\x04\x91\xf3\x83\x02\x06\x04\x91\xf3\x83\x02\x06\x04\x91\xf3\x83\x02\x06\x04\x91\xf3\x83\x02\x06\x04\x91\xf3\x83\x02\x06\x04\x91\xf3\x83\x02\x06\x04\x91\xf3\x83\x02\x06\x04\x91\xf3\x83\x02\x06\x04\x91\xf3\x83\x02\x06\x04\x91\xf3\x83\x02\x06\x04\x91\xf3\x83\x02\x06\x04\x91\xf3\x83\x02\x06\x04\x91\xf3\x83\x02\x06\x04\x91\xf3\x83\x02\x06\x04\x91\xf3\x83\x02\x06\x04\x91\xf3\x83\x02\x06\x04\x91\xf3\x83\x02\x06\x04\x91\xf3\x83\x02\x06\x04\x91\xf3\x83\x02\x06\x04\x91\xf3\x83\x02\x06\x04\x91\xf3\x83\x02\x06\x04\x91\xf3\x83\x02\x06\x04\x91\xf3\x83\x02\x06\x04\x91\xf3\x83\x02\x06\x04\x91\xf3\x83\x02\x06\x04\x91\xf3\x83\x02\x06\x04\x91\xf3\x83\x02\x06\x04\x91\xf3\x83\x02\x06\x04\x91\xf3\x83\x02\x06\x04\x91\xf3\x83\x02\x06\x04\x91\xf3\x83\x02\x06\x04\x91\xf3\x83\x02\x06\x04\x91\xf3\x83\x02\x06\x04\x91\xf3\x83\x02\x06\x04\x91\xf3\x83\x02\x06\x04\x91\xf3\x83\x02\x06\x04\x91\xf3\x83\x02\x06\x04\x91\xf3\x83\x02\x06\x04\x91\xf3\x83\x02\x06\x04\x91\xf3\x83\x02\x06\x04\x91\xf3\x83\x02\x06\x04\x91\xf3\x83\x02\x06\x04\x91\xf3\x83\x02\x06\x04\x91\xf3\x83\x02\x06\x04\x91\xf3\x83\x02\x06\x04\x91\xf3\x83\x02\x06\x04\x91\xf3\x83\x02\x06\x04\x91\xf3\x83\x02\x06\x04\x91\xf3\x83\x02\x06\x04\x91\xf3\x83\x02\x06\x04\x91\xf3\x83\x02\x06\x04\x91\xf3\x83\x02\x06\x04\x91\xf3\x83\x02\x06\x04\x91\xf3\x83\x02\x06\x04\x91\xf3\x83\x02\x06\x04\x91\xf3\x83\x02\x06\x04\x91\xf3\x83\x02\x06\x04\x91\xf3\x83\x02\x06\x04\x91\xf3\x83\x02\x06\x04\x91\xf3\x83\x02\x06\x04\x91\xf3\x83\x02\x06\x04\x91\xf3\x83\x02\x06\x04\x91\xf3\x83\x02\x06\x04\x91\xf3\x83\x02\x06\x04\x91\xf3\x83\x02\x06\x04\x91\xf3\x83\x02\x06\x04\x91\xf3\x83\x02\x06\x04\x91\xf3\x83\x02\x06\x04\x91\xf3\x83\x02\x06\x04\x91\xf3\x83\x02\x06\x04\x91\xf3\x83\x02\x06\x04\x91\xf3\x83\x02\x06\x04\x91\xf3\x83\x02\x06\x04\x91\xf3\x83\x02\x06\x04\x91\xf3\x83\x02\x06\x04\x91\xf3\x83\x02\x06\x04\x91\xf3\x83\x02\x06\x04\x91\xf3\x83\x02\x06\x04\x91\xf3\x83\x02\x06\x04\x91\xf3\x83\x02\x06\x04\x91\xf3\x83\x02\x06\x04\x91\xf3\x83\x02\x06\x04\x91\xf3\x83\x02\x06\x04\x91\xf3\x83\x02\x06\x04\x91\xf3\x83\x02\x06\x04\x91\xf3\x83\x02\x06\x04\x91\xf3\x83\x02\x06\x04\x91\xf3\x83\x02\x06\x04\x91\xf3\x83\x02\x06\x04\x91\xf3\x83\x02\x06\x04\x91\xf3\x83\x02\x06\x04\x91\xf3\x83\x02\x06\x04\x91\xf3\x83\x02\x06\x04\x91\xf3\x83\x02\x06\x04\x91\xf3\x83\x02\x06\x04\x91\xf3\x83\x02\x06\x04\x91\xf3\x83\x02\x06\x04\x91\xf3\x83\x02\x06\x04\x91\xf3\x83\x02\x06\x04\x91\xf3\x83\x02\x06\x04\x91\xf3\x83\x02\x06\x04\x91\xf3\x83\x02\x06\x04\x91\xf3\x83\x02\x06\x04\x91\xf3\x83\x02\x06\x04\x91\xf3\x83\x02\x06\x04\x91\xf3\x83\x02\x06\x04\x91\xf3\x83\x02\x06\x04\x91\xf3\x83\x02\x06\x04\x91\xf3\x83\x02\x06\x04\x91\xf3\x83\x02\x06\x04\x91\xf3\x83\x02\x06\x04\x91\xf3\x83\x02\x06\x04\x91\xf3\x83\x02\x06\x04\x91\xf3\x83\x02\x06\x04\x91\xf3\x83\x02\x06\x04\x91\xf3\x83\x02\x06\x04\x91\xf3\x83\x02\x06\x04\x91\xf3\x83\x02\x06\x04\x91\xf3\x83\x02\x06\x04\x91\xf3\x83\x02\x06\x04\x91\xf3\x83\x02\x06\x04\x91\xf3\x83\x02\x06\x04\x91\xf3\x83\x02\x06\x04\x91\xf3\x83\x02\x06\x04\x91\xf3\x83\x02\x06\x04\x91\xf3\x83\x02\x06\x04\x91\xf3\x83\x02\x06\x04\x91\xf3\x83\x02\x06\x04\x91\xf3\x83\x02\x06\x04\x91\xf3\x83\x02\x06\x04\x91\xf3\x83\x02\x06\x04\x91\xf3\x83\x02\x06\x04\x91\xf3\x83\x02\x06\x04\x91\xf3\x83\x02\x06\x04\x91\xf3\x83\x02\x06\x04\x91\xf3\x83\x02\x06\x04\x91\xf3\x83\x02\x06\x04\x91\xf3\x83\x02\x06\x04\x91\xf3\x83\x02\x06\x04\x91\xf3\x83\x02\x06\x04\x91\xf3\x83\x02\x06\x04\x91\xf3\x83\x02\x06\x04\x91\xf3\x83\x02\x06\x04\x91\xf3\x83\x02\x06\x04\x91\xf3\x83\x02\x06\x04\x91\xf3\x83\x02\x06\x04\x91\xf3\x83\x02\x06\x04\x91\xf3\x83\x02\x06\x04\x91\xf3\x83\x02\x06\x04\x91\xf3\x83\x02\x06\x04\x91\xf3\x83\x02\x06\x04\x91\xf3\x83\x02\x06\x04\x91\xf3\x83\x02\x06\x04\x91\xf3\x83\x02\x06\x04\x91\xf3\x83\x02\x06\x04\x91\xf3\x83\x02\x06\x04\x91\xf3\x83\x02\x06\x04\x91\xf3\x83\x02\x06\x04\x91\xf3\x83\x02\x06\x04\x91\xf3\x83\x02\x06\x04\x91\xf3\x83\x02\x06\x04\x91\xf3\x83\x02\x06\x04\x91\xf3\x83\x02\x06\x04\x91\xf3\x83\x02\x06\x04\x91\xf3\x83\x02\x06\x04\x91\xf3\x83\x02\x06\x04\x91\xf3\x83\x02\x06\x04\x91\xf3\x83\x02\x06\x04\x91\xf3\x83\x02\x06\x04\x91\xf3\x83\x02\x06\x04\x91\xf3\x83\x02\x06\x04\x91\xf3\x83\x02\x06\x04\x91\xf3\x83\x02\x06\x04\x91\xf3\x83\x02\x06\x04\x91\xf3\x83\x02\x06\x04\x91\xf3\x83\x02\x06\x04\x91\xf3\x83\x02\x06\x04\x91\xf3\x83\x02\x06\x04\x91\xf3\x83\x02\x06\x04\x91\xf3\x83\x02\x06\x04\x91\xf3\x83\x02\x06\x04\x91\xf3\x83\x02\x06\x04\x91\xf3\x83\x02\x06\x04\x91\xf3\x83\x02\x06\x04\x91\xf3\x83\x02\x06\x04\x91\xf3\x83\x02\x06\x04\x91\xf3\x83\x02\x06\x04\x91\xf3\x83\x02\x06\x04\x91\xf3\x83\x02\x06\x04\x91\xf3\x83\x02\x06\x04\x91\xf3\x83\x02\x06\x04\x91\xf3\x83\x02\x06\x04\x91\xf3\x83\x02\x06\x04\x91\xf3\x83\x02\x06\x04\x91\xf3\x83\x02\x06\x04\x91\xf3\x83\x02\x06\x04\x91\xf3\x83\x02\x06\x04\x91\xf3\x83\x02\x06\x04\x91\xf3\x83\x02\x06\x04\x91\xf3\x83\x02\x06\x04\x91\xf3\x83\x02\x06\x04\x91\xf3\x83\x02\x06\x04\x91\xf3\x83\x02\x06\x04\x91\xf3\x83\x02\x06\x04\x91\xf3\x83\x02\x06\x04\x91\xf3\x83\x02\x06\x04\x91\xf3\x83\x02\x06\x04\x91\xf3\x83\x02\x06\x04\x91\xf3\x83\x02\x06\x04\x91\xf3\x83\x02\x06\x04\x91\xf3\x83\x02\x06\x04\x91\xf3\x83\x02\x06\x04\x91\xf3\x83\x02\x06\x04\x91\xf3\x83\x02\x06\x04\x91\xf3\x83\x02\x06\x04\x91\xf3\x83\x02\x06\x04\x91\xf3\x83\x02\x06\x04\x91\xf3\x83\x02\x06\x04\x91\xf3\x83\x02\x06\x04\x91\xf3\x83\x02\x06\x00\x03\xa4\x02\x03\xb1\x02\x03h\x06\x03w\x06\x00\x02\x01\x02\x03\x02\x01\x02\x0f\x02\r\x02\x88\x02\x06\x02\x90\x02\x05\x02\xa0\x02\x03\x02\xf0\x02\x06\x02\xaa\x02\x07\x02\xe0\x02\x06\x02\xb0\x02\x07\x02\xe0\x02\x04\x02\xac\x02\x06\x02\xb4\x02\x06\x02\xd0\x02\x06\x02\xa8\x02\x06\x02\x8a\x02\x07\x02\xba\x02\x07\x02\xb8\x02\x05\x02\x80\x02\x02\x02\xf8\x02\x05\x02\xf0\x02\x06\x02\xf0\x02\x05\x02\xf8\x02\x06\x02\xa0\x02\x07\x02\x8c\x02\x06\x02\x80\x02\x06\x02\x80\x02\x07\x82\xa0\x02\x03\x02\x80\x02\x01\x02\x80\x02\x05\x02\x98\x02\x07\x02\xf0\x02\x05\x02\xc0\x02\x07\x02\xd4\x02\x06\x02\xe0\x02\x07\x02\x90\x02\x04\x02\x98\x02\x06\x02\xd8\x02\x06\x02\xf4\x02\x06\x02\xe8\x02\x06\x02\xd0\x02\x05\x02\xb0\x02\x06\x02\xa8\x02\x05\x02\x88\x02\x06\x02\xac\x02\x06\x02\xf4\x02\x06\x02\x84\x02\x06\x02\x88\x02\x07\x02\x98\x02\x06\x02\x96\x02\x08\x02\xae\x02\x07\x02\xfc\x02\x06\x02\xa8\x02\x06\x02\xc8\x02\x06\x02\xd8\x02\x06\x02\x84\x02\x07\x02\x96\x02\x07\x02\x80\x02\x06\x02\xd8\x02\x05\x02\xf0\x02\x06\x02\x80\x02\x01\x02\xf8\x02\x05\x02\x80\x02\x03\x02\xe2\x02\x07\x02\xf8\x02\x07\x02\x9c\x02\x06\x02\x9c\x02\x06\x02\x80\x02\x01\x02\x94\x02\x07\x02\xc0\x02\x04\x02\xa0\x02\x07\x02\xdc\x02\x06\x02\xec\x02\x06\x02\x94\x02\x06\x02\x94\x02\x07\x02\x94\x02\x06\x02\xc8\x02\x05\x02\x80\x02\x01\x02\xc0\x02\x03\x02\xa6\x02\x07\x02\x8c\x02\x06\x02\xf0\x02\x05\x02\x88\x02\x07\x02\xa0\x02\x05\x02\xf0\x02\x06\x02\xd8\x02\x06\x02\x89\x02\x08\x03\xba\xc0\x02\n\x02\xd0\x02\x06\x02\x90\x02\x06\x02\xa0\x02\x05\x02\xac\x02\x07\x02\xa0\x02\x03\x00\x02\x84\x02\x07\x02\xe0\x02\x05\x02\xe0\x02\x03\x02\xe8\x02\x05\x02\xe0\x02\x05\x03\xab\x80\x02\t\x02\xe1\x02\n\x02\xec\x02\x06\x02\x80\x02\x02\x02\xe4\x02\x07\x02\x8c\x02\x06\x02\xd4\x02\x06\x00\x02\x90\x02\x07\x02\xd4\x02\x06\x02\x98\x02\x05\x02\x98\x02\x05\x02\x90\x02\x07\x02\xa2\x02\x07\x02\xcf\x02\x08\x02\xf0\x02\x05\x02\x80\x02\x01\x02\xec\x02\x06\x02\x80\x02\x07\x02\x84\x02\x06\x02\xe4\x02\x06\x02\xe0\x02\x03\x02\x88\x02\x07\x02\xa0\x02\x06\x02\xae\x02\x07\x02\xe8\x02\x05\x02\x84\x02\x07\x02\x94\x02\x06\x02\xb0\x02\x06\x02\x80\x02\x03\x02\xf0\x02\x05\x02\xa0\x02\x07\x02\xd4\x02\x07\x02\xe0\x02\x05\x02\xd0\x02\x04\x02\xe0\x02\x06\x02\xb4\x02\x06\x02\xc0\x02\x03\x02\x80\x02\x02\x02\xdc\x02\x06\x02\xa8\x02\x05\x02\xd8\x02\x05\x02\xe8\x02\x05\x02\xb6\x02\x07\x02\x98\x02\x05\x02\xc0\x02\x05\x02\xc8\x02\x06\x02\xc6\x02\x07\x02\xdc\x02\x06\x02\xd0\x02\x06\x02\xd0\x02\x06\x02\x84\x02\x06\x02\xf8\x02\x05\x02\x80\x02\x06\x02\xf0\x02\x05\x02\xe0\x02\x04\x02\x80\x02\x06\x02\xb0\x02\x05\x02\xdc\x02\x06\x02\x84\x02\x07\x02\xc0\x02\x02\x02\xa8\x02\x06\x02\x90\x02\x05\x02\xf0\x02\x05\x02\xac\x02\x06\x02\xbc\x02\x07\x02\xac\x02\x07\x02\xd0\x02\x05\x02\xd4\x02\x07\x02\xaa\x02\x07\x02\xac\x02\x06\x02\x84\x02\x06\x02\xf8\x02\x05\x02\xb0\x02\x04\x02\x88\x02\x06\x02\x84\x02\x06\x02\x88\x02\x07\x02\x9c\x02\x06\x02\xa0\x02\x03\x02\xb4\x02\x07\x02\xd0\x02\x06\x02\xa0\x02\x06\x02\x94\x02\x06\x02\x90\x02\x05\x02\xb0\x02\x06\x02\xf0\x02\x06\x02\xc0\x02\x04\x82\x80\x02\x02\x02\xe0\x02\x03\x02\xc0\x02\x02\x02\xb4\x02\x07\x02\xdc\x02\x06\x02\xc0\x02\x05\x02\xfc\x02\x06\x02\xd4\x02\x06\x02\xb0\x02\x06\x00\x82\x80\x02\x01\x02\x80\x02\x05\x02\x03\x02\x01\x02\x0f\x02\r\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x03\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x05\x02\x05\x02\x05\x02\x05\x02\x05\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x05\x02\x05\x02\x05\x02\x05\x02\x05\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x05\x02\x05\x02\x05\x02\x05\x02\x05\x02\x05\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x05\x02\x05\x02\x05\x02\x05\x02\x05\x02\x05\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x05\x02\x05\x02\x05\x02\x05\x02\x05\x02\x05\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x05\x02\x05\x02\x05\x02\x05\x02\x05\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x05\x02\x05\x02\x05\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x13\x02\x03\x02\x01\x02\x0f\x02\r\x04\xaf\x03>\x02\x06\x04\xaf\x03>\x02\x06\x04\xaf\x03>\x02\x06\x04\xaf\x03>\x02\x06\x04\xaf\x03>\x02\x06\x04\xaf\x03>\x02\x06\x04\xaf\x03>\x02\x06\x04\xaf\x03>\x02\x06\x04\xaf\x03>\x02\x06\x04\xaf\x03>\x02\x06\x04\xaf\x03>\x02\x06\x04\xaf\x03>\x02\x06\x04\xaf\x03>\x02\x06\x04\xaf\x03>\x02\x06\x04\xaf\x03>\x02\x06\x04\xaf\x03>\x02\x06\x04\xaf\x03>\x02\x06\x04\xaf\x03>\x02\x06\x04\xaf\x03>\x02\x06\x04\xaf\x03>\x02\x06\x04\xaf\x03>\x02\x06\x04\xaf\x03>\x02\x06\x04\xaf\x03>\x02\x06\x04\xaf\x03>\x02\x06\x04\xaf\x03>\x02\x06\x04\xaf\x03>\x02\x06\x04\xaf\x03>\x02\x06\x04\xaf\x03>\x02\x06\x04\xaf\x03>\x02\x06\x04\xaf\x03>\x02\x06\x04\xaf\x03>\x02\x06\x04\xaf\x03>\x02\x06\x04\xaf\x03>\x02\x06\x04\xaf\x03>\x02\x06\x04\xaf\x03>\x02\x06\x04\xaf\x03>\x02\x06\x04\xaf\x03>\x02\x06\x04\xaf\x03>\x02\x06\x04\xaf\x03>\x02\x06\x04\xaf\x03>\x02\x06\x04\xaf\x03>\x02\x06\x04\xaf\x03>\x02\x06\x04\xaf\x03>\x02\x06\x04\xaf\x03>\x02\x06\x04\xaf\x03>\x02\x06\x04\xaf\x03>\x02\x06\x04\xaf\x03>\x02\x06\x04\xaf\x03>\x02\x06\x04\xaf\x03>\x02\x06\x04\xaf\x03>\x02\x06\x04\xaf\x03>\x02\x06\x04\xaf\x03>\x02\x06\x04\xaf\x03>\x02\x06\x04\xaf\x03>\x02\x06\x04\xaf\x03>\x02\x06\x04\xaf\x03>\x02\x06\x04\xaf\x03>\x02\x06\x04\xaf\x03>\x02\x06\x04\xaf\x03>\x02\x06\x04\xaf\x03>\x02\x06\x04\xaf\x03>\x02\x06\x04\xaf\x03>\x02\x06\x04\xaf\x03>\x02\x06\x04\xaf\x03>\x02\x06\x04\xaf\x03>\x02\x06\x04\xaf\x03>\x02\x06\x04\xaf\x03>\x02\x06\x04\xaf\x03>\x02\x06\x04\xaf\x03>\x02\x06\x04\xaf\x03>\x02\x06\x04\xaf\x03>\x02\x06\x04\xaf\x03>\x02\x06\x04\xaf\x03>\x02\x06\x04\xaf\x03>\x02\x06\x04\xaf\x03>\x02\x06\x04\xaf\x03>\x02\x06\x04\xaf\x03>\x02\x06\x04\xaf\x03>\x02\x06\x04\xaf\x03>\x02\x06\x04\xaf\x03>\x02\x06\x04\xaf\x03>\x02\x06\x04\xaf\x03>\x02\x06\x04\xaf\x03>\x02\x06\x04\xaf\x03>\x02\x06\x04\xaf\x03>\x02\x06\x04\xaf\x03>\x02\x06\x04\xaf\x03>\x02\x06\x04\xaf\x03>\x02\x06\x04\xaf\x03>\x02\x06\x04\xaf\x03>\x02\x06\x04\xaf\x03>\x02\x06\x04\xaf\x03>\x02\x06\x04\xaf\x03>\x02\x06\x04\xaf\x03>\x02\x06\x04\xaf\x03>\x02\x06\x04\xaf\x03>\x02\x06\x04\xaf\x03>\x02\x06\x04\xaf\x03>\x02\x06\x04\xaf\x03>\x02\x06\x04\xaf\x03>\x02\x06\x04\xaf\x03>\x02\x06\x04\xaf\x03>\x02\x06\x04\xaf\x03>\x02\x06\x04\xaf\x03>\x02\x06\x04\xaf\x03>\x02\x06\x04\xaf\x03>\x02\x06\x04\xaf\x03>\x02\x06\x04\xaf\x03>\x02\x06\x04\xaf\x03>\x02\x06\x04\xaf\x03>\x02\x06\x04\xaf\x03>\x02\x06\x04\xaf\x03>\x02\x06\x04\xaf\x03>\x02\x06\x04\xaf\x03>\x02\x06\x04\xaf\x03>\x02\x06\x04\xaf\x03>\x02\x06\x04\xaf\x03>\x02\x06\x04\xaf\x03>\x02\x06\x04\xaf\x03>\x02\x06\x04\xaf\x03>\x02\x06\x04\xaf\x03>\x02\x06\x04\xaf\x03>\x02\x06\x04\xaf\x03>\x02\x06\x04\xaf\x03>\x02\x06\x04\xaf\x03>\x02\x06\x04\xaf\x03>\x02\x06\x04\xaf\x03>\x02\x06\x04\xaf\x03>\x02\x06\x04\xaf\x03>\x02\x06\x04\xaf\x03>\x02\x06\x04\xaf\x03>\x02\x06\x04\xaf\x03>\x02\x06\x04\xaf\x03>\x02\x06\x04\xaf\x03>\x02\x06\x04\xaf\x03>\x02\x06\x04\xaf\x03>\x02\x06\x04\xaf\x03>\x02\x06\x04\xaf\x03>\x02\x06\x04\xaf\x03>\x02\x06\x04\xaf\x03>\x02\x06\x04\xaf\x03>\x02\x06\x04\xaf\x03>\x02\x06\x04\xaf\x03>\x02\x06\x04\xaf\x03>\x02\x06\x04\xaf\x03>\x02\x06\x04\xaf\x03>\x02\x06\x04\xaf\x03>\x02\x06\x04\xaf\x03>\x02\x06\x04\xaf\x03>\x02\x06\x04\xaf\x03>\x02\x06\x04\xaf\x03>\x02\x06\x04\xaf\x03>\x02\x06\x04\xaf\x03>\x02\x06\x04\xaf\x03>\x02\x06\x04\xaf\x03>\x02\x06\x04\xaf\x03>\x02\x06\x04\xaf\x03>\x02\x06\x04\xaf\x03>\x02\x06\x04\xaf\x03>\x02\x06\x04\xaf\x03>\x02\x06\x04\xaf\x03>\x02\x06\x04\xaf\x03>\x02\x06\x04\xaf\x03>\x02\x06\x04\xaf\x03>\x02\x06\x04\xaf\x03>\x02\x06\x04\xaf\x03>\x02\x06\x04\xaf\x03>\x02\x06\x04\xaf\x03>\x02\x06\x04\xaf\x03>\x02\x06\x04\xaf\x03>\x02\x06\x04\xaf\x03>\x02\x06\x04\xaf\x03>\x02\x06\x04\xaf\x03>\x02\x06\x04\xaf\x03>\x02\x06\x04\xaf\x03>\x02\x06\x04\xaf\x03>\x02\x06\x04\xaf\x03>\x02\x06\x04\xaf\x03>\x02\x06\x04\xaf\x03>\x02\x06\x04\xaf\x03>\x02\x06\x04\xaf\x03>\x02\x06\x04\xaf\x03>\x02\x06\x04\xaf\x03>\x02\x06\x04\xaf\x03>\x02\x06\x04\xaf\x03>\x02\x06\x04\xaf\x03>\x02\x06\x04\xaf\x03>\x02\x06\x04\xaf\x03>\x02\x06\x04\xaf\x03>\x02\x06\x04\xaf\x03>\x02\x06\x04\xaf\x03>\x02\x06\x04\xaf\x03>\x02\x06\x04\xaf\x03>\x02\x06\x04\xaf\x03>\x02\x06\x04\xaf\x03>\x02\x06'
tb."""
| 5,378.235294
| 91,236
| 0.747971
| 22,307
| 91,430
| 3.065092
| 0.006545
| 0.204203
| 0.231407
| 0.232372
| 0.99083
| 0.979202
| 0.973732
| 0.838547
| 0.83005
| 0.735861
| 0
| 0.420099
| 0.000252
| 91,430
| 16
| 91,237
| 5,714.375
| 0.327907
| 0
| 0
| 0
| 0
| 0.066667
| 0.999114
| 0.998513
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.066667
| 0
| 0.066667
| 0.066667
| 0
| 0
| 0
| null | 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 16
|
fe1a8e41b9a6dd96ffc12066b0bee8e9c0b3b6b6
| 438
|
py
|
Python
|
fontslice/__init__.py
|
Arahabica/font-subset-css
|
393b9a452af49c2168c7a9f84983e4170937ea67
|
[
"MIT"
] | null | null | null |
fontslice/__init__.py
|
Arahabica/font-subset-css
|
393b9a452af49c2168c7a9f84983e4170937ea67
|
[
"MIT"
] | null | null | null |
fontslice/__init__.py
|
Arahabica/font-subset-css
|
393b9a452af49c2168c7a9f84983e4170937ea67
|
[
"MIT"
] | null | null | null |
import sys
from .main import (
_chunk_list,
_get_unicode_range_hash,
convert_unicode_range,
get_120_unicode_ranges,
get_unicode_ranges_from_text,
generate_css,
main,
)
__all__ = [
"_chunk_list",
"_get_unicode_range_hash",
"convert_unicode_range",
"get_120_unicode_ranges",
"get_unicode_ranges_from_text",
"generate_css",
"main",
]
if __name__ == "__main__":
sys.exit(main())
| 17.52
| 35
| 0.687215
| 54
| 438
| 4.759259
| 0.37037
| 0.155642
| 0.093385
| 0.14786
| 0.817121
| 0.817121
| 0.817121
| 0.817121
| 0.817121
| 0.817121
| 0
| 0.017391
| 0.212329
| 438
| 24
| 36
| 18.25
| 0.727536
| 0
| 0
| 0
| 1
| 0
| 0.294521
| 0.214612
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.095238
| 0
| 0.095238
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
a3ac4915a74b531c1dc0b8afb60e2d05592076cd
| 61,910
|
py
|
Python
|
SysPy_ver/funcs/_var_declaration.py
|
evlog/SysPy
|
d1ee6e2ca60492d20339c0016a9c24d027170553
|
[
"CNRI-Python"
] | 4
|
2017-12-28T14:00:16.000Z
|
2021-01-21T08:53:14.000Z
|
SysPy_ver/funcs/_var_declaration.py
|
evlog/SysPy
|
d1ee6e2ca60492d20339c0016a9c24d027170553
|
[
"CNRI-Python"
] | 1
|
2018-07-31T16:27:00.000Z
|
2018-07-31T16:27:37.000Z
|
SysPy_ver/funcs/_var_declaration.py
|
evlog/SysPy
|
d1ee6e2ca60492d20339c0016a9c24d027170553
|
[
"CNRI-Python"
] | 2
|
2015-10-12T09:13:13.000Z
|
2020-01-06T12:22:55.000Z
|
"""
*****************************************************************************
*
H E A D E R I N F O R M A T I O N *
*
*****************************************************************************
Project Name: SysPy (System Python)
http://cgi.di.uoa.gr/~evlog/syspy.html
File Name: _var_declaration.py
Created by: Evangelos Logaras
*****************************************************************************
*
C O P Y R I G H T N O T I C E *
*
*****************************************************************************
This library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation;
version 2.1 of the License, a copy of which is available from
http://www.gnu.org/licenses/old-licenses/lgpl-2.1.txt.
This library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with this library; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301
USA
*****************************************************************************
*
D E S C R I P T I O N *
*
*****************************************************************************
Variable declaration when a variable assignment is tracked.
"""
from pdb import *
def var_declaration(assign_lines_count, token_struct, assign_lines, signals, process_vars):
"""
FUNCTION: var_declaration(a int, b(), c[], d[], e[])
a: assign lines counter integer
b: token's tupple
c: list containing the VHDL code
d: list containing the signal statements
e: list containing
Variable declaration when a variable assignment is tracked.
"""
# Python's variable declerations
#----------------------------------------------------------------------------------------------------------------------------------
count0 = 0
count1 = 0
process_vars_d = []
vars0 = []
var0 = ''
var1 = ''
#----------------------------------------------------------------------------------------------------------------------------------
print("process_vars:", process_vars)
# Erasing duplicated registrations in "process_vars[]"
#----------------------------------------------------------------------------------------------------------------------------------
for i in range(len(process_vars)):
vars0 = []
#flag_process_vars = 0
if ((process_vars[i][0] == "name_left") or (process_vars[i][0] == "name_right")):
var0 = process_vars[i][1].replace('=', '')
var0 = var0.replace('! ', '')
var0 = var0.replace('>', '')
var0 = var0.replace('<', '')
var0 = var0.replace(' ', '')
vars0.append(var0)
elif (process_vars[i][0] == "name_right_binary_slice"):
var0 = process_vars[i][1][0]
vars0.append(var0)
elif (process_vars[i][0] == "name_right_binary_slice_var0"):
var0 = process_vars[i][1][0]
vars0.append(var0)
var0 = process_vars[i][1][1]
vars0.append(var0)
elif (process_vars[i][0] == "name_right_binary_slice_var1"):
var0 = process_vars[i][1][0]
vars0.append(var0)
var0 = process_vars[i][1][2]
vars0.append(var0)
elif (process_vars[i][0] == "name_right_binary_slice_var01"):
var0 = process_vars[i][1][0]
vars0.append(var0)
var0 = process_vars[i][1][1]
vars0.append(var0)
var0 = process_vars[i][1][2]
vars0.append(var0)
elif (process_vars[i][0] == "name_right_item"):
var0 = process_vars[i][1][0]
vars0.append(var0)
elif (process_vars[i][0] == "name_right_item_var"):
var0 = process_vars[i][1][0]
vars0.append(var0)
var0 = process_vars[i][1][1]
vars0.append(var0)
elif (process_vars[i][0] == "name_right_array_binary_item"):
var0 = process_vars[i][1][0]
vars0.append(var0)
elif (process_vars[i][0] == "name_right_array_binary_item_var0"):
var0 = process_vars[i][1][0]
vars0.append(var0)
var0 = process_vars[i][1][1]
vars0.append(var0)
elif (process_vars[i][0] == "name_right_array_binary_item_var1"):
var0 = process_vars[i][1][0]
vars0.append(var0)
var0 = process_vars[i][1][2]
vars0.append(var0)
elif (process_vars[i][0] == "name_right_array_binary_item_var01"):
var0 = process_vars[i][1][0]
vars0.append(var0)
var0 = process_vars[i][1][1]
vars0.append(var0)
var0 = process_vars[i][1][2]
vars0.append(var0)
elif (process_vars[i][0] == "name_right_array_binary_slice"):
var0 = process_vars[i][1][0]
vars0.append(var0)
elif (process_vars[i][0] == "name_right_array_binary_slice_var0"):
var0 = process_vars[i][1][0]
vars0.append(var0)
var0 = process_vars[i][1][1]
vars0.append(var0)
elif (process_vars[i][0] == "name_right_array_binary_slice_var1"):
var0 = process_vars[i][1][0]
vars0.append(var0)
var0 = process_vars[i][1][2]
vars0.append(var0)
elif (process_vars[i][0] == "name_right_array_binary_slice_var2"):
var0 = process_vars[i][1][0]
vars0.append(var0)
var0 = process_vars[i][1][3]
vars0.append(var0)
elif (process_vars[i][0] == "name_right_array_binary_slice_var01"):
var0 = process_vars[i][1][0]
vars0.append(var0)
var0 = process_vars[i][1][1]
vars0.append(var0)
var0 = process_vars[i][1][2]
vars0.append(var0)
elif (process_vars[i][0] == "name_right_array_binary_slice_var02"):
var0 = process_vars[i][1][0]
vars0.append(var0)
var0 = process_vars[i][1][1]
vars0.append(var0)
var0 = process_vars[i][1][3]
vars0.append(var0)
elif (process_vars[i][0] == "name_right_array_binary_slice_var12"):
var0 = process_vars[i][1][0]
vars0.append(var0)
var0 = process_vars[i][1][2]
vars0.append(var0)
var0 = process_vars[i][1][3]
vars0.append(var0)
flag_process_vars = 0
for n in range(0, len(vars0)):
for j in range(len(process_vars_d)):
if ((process_vars_d[j][0] == "name_left") or (process_vars_d[j][0] == "name_right")):
var1 = process_vars_d[j][1].replace('=', '')
var1 = var1.replace('! ', '')
var1 = var1.replace('>', '')
var1 = var1.replace('<', '')
var1 = var1.replace(' ', '')
elif (process_vars_d[j][0] == "name_right_binary_slice"):
var1 = process_vars_d[j][1][0]
elif (process_vars_d[j][0] == "name_right_binary_slice_var0"):
var1 = process_vars_d[j][1]
elif (process_vars_d[j][0] == "name_right_binary_slice_var1"):
var1 = process_vars_d[j][1]
elif (process_vars_d[j][0] == "name_right_binary_slice_var01"):
var1 = process_vars_d[j][1]
elif (process_vars_d[j][0] == "name_right_item"):
var1 = process_vars_d[j][1][0]
elif (process_vars_d[j][0] == "name_right_item_var"):
var1 = process_vars_d[j][1]
elif (process_vars_d[j][0] == "name_right_array_binary_item"):
var1 = process_vars_d[j][1][0]
elif (process_vars_d[j][0] == "name_right_array_binary_item_var0"):
var1 = process_vars_d[j][1]
elif (process_vars_d[j][0] == "name_right_array_binary_item_var1"):
var1 = process_vars_d[j][1]
elif (process_vars_d[j][0] == "name_right_array_binary_item_var01"):
var1 = process_vars_d[j][1]
elif (process_vars_d[j][0] == "name_right_array_binary_slice"):
var1 = process_vars_d[j][1][0]
elif (process_vars_d[j][0] == "name_right_array_binary_slice_var0"):
var1 = process_vars_d[j][1]
elif (process_vars_d[j][0] == "name_right_array_binary_slice_var1"):
var1 = process_vars_d[j][1]
elif (process_vars_d[j][0] == "name_right_array_binary_slice_var2"):
var1 = process_vars_d[j][1]
elif (process_vars_d[j][0] == "name_right_array_binary_slice_var01"):
var1 = process_vars_d[j][1]
elif (process_vars_d[j][0] == "name_right_array_binary_slice_var02"):
var1 = process_vars_d[j][1]
elif (process_vars_d[j][0] == "name_right_array_binary_slice_var12"):
var1 = process_vars_d[j][1]
if (vars0[n] == var1):
if (n == 0):
flag_process_vars += 1
if (n == 1):
flag_process_vars += 2
if (n == 2):
flag_process_vars += 4
if ((process_vars[i][0] == "name_left") or (process_vars[i][0] == "name_right")):
if (flag_process_vars == 0):
process_vars_d.append(process_vars[i])
elif (process_vars[i][0] == "name_right_binary_slice"):
if (flag_process_vars == 0):
process_vars_d.append(process_vars[i])
elif (process_vars[i][0] == "name_right_binary_slice_var0"):
if (flag_process_vars == 0):
process_vars_d.append(["name_right_binary_slice_var0", process_vars[i][1][0]])
process_vars_d.append(["name_right_binary_slice_var0", process_vars[i][1][1]])
elif (flag_process_vars == 1):
process_vars_d.append(["name_right_binary_slice_var0", process_vars[i][1][1]])
elif (flag_process_vars == 2):
process_vars_d.append(["name_right_binary_slice_var0", process_vars[i][1][0]])
elif (flag_process_vars == 3):
pass
elif (process_vars[i][0] == "name_right_binary_slice_var1"):
if (flag_process_vars == 0):
process_vars_d.append(["name_right_binary_slice_var1", process_vars[i][1][0]])
process_vars_d.append(["name_right_binary_slice_var1", process_vars[i][1][2]])
elif (flag_process_vars == 1):
process_vars_d.append(["name_right_binary_slice_var1", process_vars[i][1][2]])
elif (flag_process_vars == 2):
process_vars_d.append(["name_right_binary_slice_var1", process_vars[i][1][0]])
elif (flag_process_vars == 4):
pass
elif (process_vars[i][0] == "name_right_binary_slice_var01"):
if (flag_process_vars == 0):
process_vars_d.append(["name_right_binary_slice_var01", process_vars[i][1][0]])
process_vars_d.append(["name_right_binary_slice_var01", process_vars[i][1][1]])
process_vars_d.append(["name_right_binary_slice_var01", process_vars[i][1][2]])
elif (flag_process_vars == 1):
process_vars_d.append(["name_right_binary_slice_var01", process_vars[i][1][1]])
process_vars_d.append(["name_right_binary_slice_var01", process_vars[i][1][2]])
elif (flag_process_vars == 2):
process_vars_d.append(["name_right_binary_slice_var01", process_vars[i][1][0]])
process_vars_d.append(["name_right_binary_slice_var01", process_vars[i][1][2]])
elif (flag_process_vars == 3):
process_vars_d.append(["name_right_binary_slice_var01", process_vars[i][1][2]])
elif (flag_process_vars == 4):
process_vars_d.append(["name_right_binary_slice_var01", process_vars[i][1][0]])
process_vars_d.append(["name_right_binary_slice_var01", process_vars[i][1][1]])
elif (flag_process_vars == 5):
process_vars_d.append(["name_right_binary_slice_var01", process_vars[i][1][1]])
elif (flag_process_vars == 6):
process_vars_d.append(["name_right_binary_slice_var01", process_vars[i][1][0]])
elif (flag_process_vars == 7):
pass
elif (process_vars[i][0] == "name_right_item"):
if (flag_process_vars == 0):
process_vars_d.append(process_vars[i])
elif (process_vars[i][0] == "name_right_item_var"):
if (flag_process_vars == 0):
process_vars_d.append(["name_right_item_var", process_vars[i][1][0]])
process_vars_d.append(["name_right_item_var", process_vars[i][1][1]])
elif (flag_process_vars == 1):
process_vars_d.append(["name_right_item_var", process_vars[i][1][1]])
elif (flag_process_vars == 2):
process_vars_d.append(["name_right_item_var", process_vars[i][1][0]])
elif (flag_process_vars == 3):
pass
elif (process_vars[i][0] == "name_right_array_binary_item"):
if (flag_process_vars == 0):
process_vars_d.append(process_vars[i])
elif (process_vars[i][0] == "name_right_array_binary_item_var0"):
if (flag_process_vars == 0):
process_vars_d.append(["name_right_array_binary_item_var0", process_vars[i][1][0]])
process_vars_d.append(["name_right_array_binary_item_var0", process_vars[i][1][1]])
elif (flag_process_vars == 1):
process_vars_d.append(["name_right_array_binary_item_var0", process_vars[i][1][1]])
elif (flag_process_vars == 2):
process_vars_d.append(["name_right_array_binary_item_var0", process_vars[i][1][0]])
elif (flag_process_vars == 3):
pass
elif (process_vars[i][0] == "name_right_array_binary_item_var1"):
if (flag_process_vars == 0):
process_vars_d.append(["name_right_array_binary_item_var1", process_vars[i][1][0]])
process_vars_d.append(["name_right_array_binary_item_var1", process_vars[i][1][2]])
elif (flag_process_vars == 1):
process_vars_d.append(["name_right_array_binary_item_var1", process_vars[i][1][2]])
elif (flag_process_vars == 2):
process_vars_d.append(["name_right_array_binary_item_var1", process_vars[i][1][0]])
elif (flag_process_vars == 3):
pass
elif (process_vars[i][0] == "name_right_array_binary_item_var01"):
if (flag_process_vars == 0):
process_vars_d.append(["name_right_array_binary_item_var01", process_vars[i][1][0]])
process_vars_d.append(["name_right_array_binary_item_var01", process_vars[i][1][1]])
process_vars_d.append(["name_right_array_binary_item_var01", process_vars[i][1][2]])
elif (flag_process_vars == 1):
process_vars_d.append(["name_right_array_binary_item_var01", process_vars[i][1][1]])
process_vars_d.append(["name_right_array_binary_item_var01", process_vars[i][1][2]])
elif (flag_process_vars == 2):
process_vars_d.append(["name_right_array_binary_item_var01", process_vars[i][1][0]])
process_vars_d.append(["name_right_array_binary_item_var01", process_vars[i][1][2]])
elif (flag_process_vars == 3):
process_vars_d.append(["name_right_array_binary_item_var01", process_vars[i][1][2]])
elif (flag_process_vars == 4):
process_vars_d.append(["name_right_array_binary_item_var01", process_vars[i][1][0]])
process_vars_d.append(["name_right_array_binary_item_var01", process_vars[i][1][1]])
elif (flag_process_vars == 5):
process_vars_d.append(["name_right_array_binary_item_var01", process_vars[i][1][1]])
elif (flag_process_vars == 6):
process_vars_d.append(["name_right_array_binary_item_var01", process_vars[i][1][0]])
elif (flag_process_vars == 7):
pass
elif (process_vars[i][0] == "name_right_array_binary_slice"):
if (flag_process_vars == 0):
process_vars_d.append(process_vars[i])
elif (process_vars[i][0] == "name_right_array_binary_slice_var0"):
if (flag_process_vars == 0):
process_vars_d.append(["name_right_array_binary_slice_var0", process_vars[i][1][0]])
process_vars_d.append(["name_right_array_binary_slice_var0", process_vars[i][1][1]])
elif (flag_process_vars == 1):
process_vars_d.append(["name_right_array_binary_slice_var0", process_vars[i][1][1]])
elif (flag_process_vars == 2):
process_vars_d.append(["name_right_array_binary_slice_var0", process_vars[i][1][0]])
elif (flag_process_vars == 3):
pass
elif (process_vars[i][0] == "name_right_array_binary_slice_var1"):
if (flag_process_vars == 0):
process_vars_d.append(["name_right_array_binary_slice_var1", process_vars[i][1][0]])
process_vars_d.append(["name_right_array_binary_slice_var1", process_vars[i][1][2]])
elif (flag_process_vars == 1):
process_vars_d.append(["name_right_array_binary_slice_var1", process_vars[i][1][2]])
elif (flag_process_vars == 2):
process_vars_d.append(["name_right_array_binary_slice_var1", process_vars[i][1][0]])
elif (flag_process_vars == 3):
pass
elif (process_vars[i][0] == "name_right_array_binary_slice_var2"):
if (flag_process_vars == 0):
process_vars_d.append(["name_right_array_binary_slice_var2", process_vars[i][1][0]])
process_vars_d.append(["name_right_array_binary_slice_var2", process_vars[i][1][3]])
elif (flag_process_vars == 1):
process_vars_d.append(["name_right_array_binary_slice_var2", process_vars[i][1][3]])
elif (flag_process_vars == 2):
process_vars_d.append(["name_right_array_binary_slice_var2", process_vars[i][1][0]])
elif (flag_process_vars == 3):
pass
elif (process_vars[i][0] == "name_right_array_binary_slice_var01"):
if (flag_process_vars == 0):
process_vars_d.append(["name_right_array_binary_slice_var01", process_vars[i][1][0]])
process_vars_d.append(["name_right_array_binary_slice_var01", process_vars[i][1][1]])
process_vars_d.append(["name_right_array_binary_slice_var01", process_vars[i][1][2]])
elif (flag_process_vars == 1):
process_vars_d.append(["name_right_array_binary_slice_var01", process_vars[i][1][1]])
process_vars_d.append(["name_right_array_binary_slice_var01", process_vars[i][1][2]])
elif (flag_process_vars == 2):
process_vars_d.append(["name_right_array_binary_slice_var01", process_vars[i][1][0]])
process_vars_d.append(["name_right_array_binary_slice_var01", process_vars[i][1][2]])
elif (flag_process_vars == 3):
process_vars_d.append(["name_right_array_binary_slice_var01", process_vars[i][1][2]])
elif (flag_process_vars == 4):
process_vars_d.append(["name_right_array_binary_slice_var01", process_vars[i][1][0]])
process_vars_d.append(["name_right_array_binary_slice_var01", process_vars[i][1][1]])
elif (flag_process_vars == 5):
process_vars_d.append(["name_right_array_binary_slice_var01", process_vars[i][1][1]])
elif (flag_process_vars == 6):
process_vars_d.append(["name_right_array_binary_slice_var01", process_vars[i][1][0]])
elif (flag_process_vars == 7):
pass
elif (process_vars[i][0] == "name_right_array_binary_slice_var02"):
if (flag_process_vars == 0):
process_vars_d.append(["name_right_array_binary_slice_var02", process_vars[i][1][0]])
process_vars_d.append(["name_right_array_binary_slice_var02", process_vars[i][1][1]])
process_vars_d.append(["name_right_array_binary_slice_var02", process_vars[i][1][3]])
elif (flag_process_vars == 1):
process_vars_d.append(["name_right_array_binary_slice_var02", process_vars[i][1][1]])
process_vars_d.append(["name_right_array_binary_slice_var02", process_vars[i][1][3]])
elif (flag_process_vars == 2):
process_vars_d.append(["name_right_array_binary_slice_var02", process_vars[i][1][0]])
process_vars_d.append(["name_right_array_binary_slice_var02", process_vars[i][1][3]])
elif (flag_process_vars == 3):
process_vars_d.append(["name_right_array_binary_slice_var02", process_vars[i][1][3]])
elif (flag_process_vars == 4):
process_vars_d.append(["name_right_array_binary_slice_var02", process_vars[i][1][0]])
process_vars_d.append(["name_right_array_binary_slice_var02", process_vars[i][1][1]])
elif (flag_process_vars == 5):
process_vars_d.append(["name_right_array_binary_slice_var02", process_vars[i][1][1]])
elif (flag_process_vars == 6):
process_vars_d.append(["name_right_array_binary_slice_var02", process_vars[i][1][0]])
elif (flag_process_vars == 7):
pass
elif (process_vars[i][0] == "name_right_array_binary_slice_var12"):
if (flag_process_vars == 0):
process_vars_d.append(["name_right_array_binary_slice_var12", process_vars[i][1][0]])
process_vars_d.append(["name_right_array_binary_slice_var12", process_vars[i][1][2]])
process_vars_d.append(["name_right_array_binary_slice_var12", process_vars[i][1][3]])
elif (flag_process_vars == 1):
process_vars_d.append(["name_right_array_binary_slice_var12", process_vars[i][1][2]])
process_vars_d.append(["name_right_array_binary_slice_var12", process_vars[i][1][3]])
elif (flag_process_vars == 2):
process_vars_d.append(["name_right_array_binary_slice_var12", process_vars[i][1][0]])
process_vars_d.append(["name_right_array_binary_slice_var12", process_vars[i][1][3]])
elif (flag_process_vars == 3):
process_vars_d.append(["name_right_array_binary_slice_var12", process_vars[i][1][3]])
elif (flag_process_vars == 4):
process_vars_d.append(["name_right_array_binary_slice_var12", process_vars[i][1][0]])
process_vars_d.append(["name_right_array_binary_slice_var12", process_vars[i][1][2]])
elif (flag_process_vars == 5):
process_vars_d.append(["name_right_array_binary_slice_var12", process_vars[i][1][2]])
elif (flag_process_vars == 6):
process_vars_d.append(["name_right_array_binary_slice_var12", process_vars[i][1][0]])
elif (flag_process_vars == 7):
pass
process_vars = process_vars_d
#----------------------------------------------------------------------------------------------------------------------------------
j = assign_lines_count
for m in range(0, len(process_vars)):
if ((process_vars[m][0] == "name_left") or (process_vars[m][0] == "name_right")):
t = process_vars[m][1].replace('=', '')
t = t.replace(' ', '')
elif (process_vars[m][0] == "name_right_binary_slice"):
t = process_vars[m][1][0]
elif (process_vars[m][0] == "name_right_binary_slice_var0"):
t = process_vars[m][1]
elif (process_vars[m][0] == "name_right_binary_slice_var1"):
t = process_vars[m][1]
elif (process_vars[m][0] == "name_right_binary_slice_var01"):
t = process_vars[m][1]
elif (process_vars[m][0] == "name_right_item"):
t = process_vars[m][1][0]
elif (process_vars[m][0] == "name_right_item_var"):
t = process_vars[m][1]
elif (process_vars[m][0] == "name_right_array_binary_item"):
t = process_vars[m][1][0]
elif (process_vars[m][0] == "name_right_array_binary_item_var0"):
t = process_vars[m][1]
elif (process_vars[m][0] == "name_right_array_binary_item_var1"):
t = process_vars[m][1]
elif (process_vars[m][0] == "name_right_array_binary_item_var01"):
t = process_vars[m][1]
elif (process_vars[m][0] == "name_right_array_binary_slice"):
t = process_vars[m][1][0]
elif (process_vars[m][0] == "name_right_array_binary_slice_var0"):
t = process_vars[m][1]
elif (process_vars[m][0] == "name_right_array_binary_slice_var1"):
t = process_vars[m][1]
elif (process_vars[m][0] == "name_right_array_binary_slice_var2"):
t = process_vars[m][1]
elif (process_vars[m][0] == "name_right_array_binary_slice_var01"):
t = process_vars[m][1]
elif (process_vars[m][0] == "name_right_array_binary_slice_var02"):
t = process_vars[m][1]
elif (process_vars[m][0] == "name_right_array_binary_slice_var12"):
t = process_vars[m][1]
for i in range (0, len(signals)):
if (t == signals[i]['N']):
if (signals[i]['D'] == 'v'):
L = signals[i]['L'].__doc__
n = signals[i]['N'].__doc__
if (m == 0):
sp = ''
while 1:
if (assign_lines[j][0] == "process_sens_list"):
assign_lines[j][0] = assign_lines[j][0] + "_var"
for k in range(0, assign_lines[j][4]):
sp = sp + ' '
assign_lines[j][1] = assign_lines[j][1].replace("begin", '')
assign_lines[j][1] = assign_lines[j][1] + "\n\n" + sp + "-- Variables"
assign_lines[j][1] = assign_lines[j][1] + "\n" + sp + "-------------------------------------------------------------------"
if (signals[i]['T'] == 'b'):
if (L.find("int") == 0):
if (n.find("list") == 0):
for k in range(len(signals_intr[i]['N'])):
if (signals[i].has_key('V') == False):
assign_lines[j][1] = assign_lines[j][1] + "\n" + sp + "variable " + signals[i]['N'][k] + ": std_logic;\n"
elif (signals[i].has_key('V') == True):
assign_lines[j][1] = assign_lines[j][1] + "\n" + sp + "variable " + signals[i]['N'][k] + ": std_logic := '" + signals[i]['V'] + "';\n"
elif (n.find("str") == 0):
if (signals[i].has_key('V') == False):
assign_lines[j][1] = assign_lines[j][1] + "\n" + sp + "variable " + signals[i]['N'] + ": std_logic;\n"
elif (signals[i].has_key('V') == True):
assign_lines[j][1] = assign_lines[j][1] + "\n" + sp + "variable " + signals[i]['N'] + ": std_logic := '" + signals[i]['V'] + "';\n"
elif (L.find("list") == 0):
if (n.find("list") == 0):
for k in range(len(signals[i]['N'])):
if (signals[i].has_key('V') == False):
if (signals[i]['L'][0] > signals[i]['L'][1]):
assign_lines[j][1] = assign_lines[j][1] + "\n" + sp + "variable " + signals[i]['N'][k] + ": std_logic_vector(" + str(int(signals[i]['L'][0])) + " downto " + str(int(signals[i]['L'][1])) + ");\n"
else:
assign_lines[j][1] = assign_lines[j][1] + "\n" + sp + "variable " + signals[i]['N'][k] + ": std_logic_vector(" + str(int(signals[i]['L'][0])) + " to " + str(int(signals[i]['L'][1])) + ");\n"
elif (signals[i].has_key('V') == True):
if (signals_intr[i]['L'][0] > signals_intr[i]['L'][1]):
assign_lines[j][1] = assign_lines[j][1] + "\n" + sp + "variable " + signals[i]['N'][k] + ": std_logic_vector(" + str(int(signals[i]['L'][0])) + " downto " + str(int(signals[i]['L'][1])) + ") := \"" + signals[i]['V'] + "\";\n"
else:
assign_lines[j][1] = assign_lines[j][1] + "\n" + sp + "variable " + signals[i]['N'][k] + ": std_logic_vector(" + str(int(signals[i]['L'][0])) + " to " + str(int(signals[i]['L'][1])) + ") := '" + signals[i]['V'] + "';\n"
elif (n.find("str") == 0):
if (signals[i].has_key('V') == False):
if (signals[i]['L'][0] > signals[i]['L'][1]):
assign_lines[j][1] = assign_lines[j][1] + "\n" + sp + "variable " + signals[i]['N'] + ": std_logic_vector(" + str(int(signals[i]['L'][0])) + " downto " + str(int(signals[i]['L'][1])) + ");\n"
else:
assign_lines[j][1] = assign_lines[j][1] + "\n" + sp + "variable " + signals[i]['N'] + ": std_logic_vector(" + str(int(signals[i]['L'][0])) + " to " + str(int(signals[i]['L'][1])) + ");\n"
elif (signals[i].has_key('V') == True):
if (signals[i]['L'][0] > signals[i]['L'][1]):
assign_lines[j][1] = assign_lines[j][1] + "\n" + sp + "variable " + signals[i]['N'] + ": std_logic_vector(" + str(int(signals[i]['L'][0])) + " downto " + str(int(signals[i]['L'][1])) + ") := \"" + signals[i]['V'] + "\";\n"
else:
assign_lines[j][1] = assign_lines[j][1] + "\n" + sp + "variable " + signals[i]['N'] + ": std_logic_vector(" + str(int(signals[i]['L'][0])) + " to " + str(int(signals[i]['L'][1])) + ") := '" + signals[i]['V'] + "';\n"
break
elif (signals[i]['T'] == "int"):
if (n.find("str") == 0):
if (signals[i].has_key('V') == False):
assign_lines[j][1] = assign_lines[j][1] + "\n" + sp + "variable " + signals[i]['N'] + ": integer range " + str(signals[i]['L'][0]) + " to " + str(signals[i]['L'][1]) + ";\n"
elif (signals[i].has_key('V') == True):
assign_lines[j][1] = assign_lines[j][1] + "\n" + sp + "variable " + signals[i]['N'] + ": integer range " + str(signals[i]['L'][0]) + " to " + str(signals[i]['L'][1]) + " := " + str(signals[i]['V']) + ";\n"
elif (n.find("list") == 0):
for k in range(len(signals[i]['N'])):
if (signals[i].has_key('V') == False):
assign_lines[j][1] = assign_lines[j][1] + "\n" + sp + "variable " + signals[i]['N'][k] + ": integer range " + str(signals[i]['L'][0]) + " to " + str(signals[i]['L'][1]) + ";\n"
elif (signals_intr[i].has_key('V') == True):
assign_lines[j][1] = assign_lines[j][1] + "\n" + sp + "variable " + signals[i]['N'][k] + ": integer range " + str(signals[i]['L'][0]) + " to " + str(signals[i]['L'][1]) + " := " + str(signals[i]['V']) + ";\n"
break
elif (signals[i]['T'] == "arrb"):
if (n.find("str") == 0):
if (signals[i]['L'][1][0] > signals[i]['L'][1][1]):
assign_lines[j][1] = assign_lines[j][1] + "\n" + sp + "type type" + str(count0) + " is array (" + str(signals[i]['L'][0][0]) + " to " + str(signals[i]['L'][0][1]) + ") of std_logic_vector(" + str(signals_intr[i]['L'][1][0]) + " downto " + str(signals_intr[i]['L'][1][1]) + ");\n"
elif (signals[i]['L'][1][0] < signals[i]['L'][1][1]):
assign_lines[j][1] = assign_lines[j][1] + "\n" + sp + "type type" + str(count0) + " is array (" + str(signals[i]['L'][0][0]) + " to " + str(signals[i]['L'][0][1]) + ") of std_logic_vector(" + str(signals_intr[i]['L'][1][0]) + " to " + str(signals_intr[i]['L'][1][1]) + ");\n"
if (signals[i].has_key('V') == False):
assign_lines[j][1] = assign_lines[j][1] + "\n" + sp + "variable " + signals[i]['N'] + ": " + "type" + str(count0) + ";\n"
elif (signals[i].has_key('V') == True):
v = signals[i]['V'].__doc__
if (v.find("str") == 0):
assign_lines[j][1] = assign_lines[j][1] + "\n" + sp + "variable " + signals[i]['N'] + ": " + "type" + str(count0) + ": \"" + signals[i]['V'] + "\";\n"
elif(v.find("list") == 0):
assign_lines[j][1] = assign_lines[j][1] + "\n" + sp + "variable " + signals[i]['N'] + ": " + "type" + str(count0) + ": {"
for k in range(0, (signals[i]['L'][0][1] + 1)):
if (k == signals[i]['L'][0][1]):
assign_lines[j][1] = assign_lines[j][1] + "\"" + signals[i]['V'][k] + "\"};\n"
elif (k != signals[i]['L'][0][1]):
assign_lines[j][1] = assign_lines[j][1] + "\"" + signals[i]['V'][k] + "\", "
count0 = count0 + 1
break
elif (signals[i]['T'] == "arri"):
if (n.find("str") == 0):
assign_lines[j][1] = assign_lines[j][1] + "\n" + sp + "type type" + str(count0) + " is array (" + str(signals[i]['L'][0][0]) + " to " + str(signals[i]['L'][0][1]) + ") of integer range " + str(signals[i]['L'][1][0]) + " to " + str(signals[i]['L'][1][1]) + ";\n"
if (signals[i].has_key('V') == False):
assign_lines[j][1] = assign_lines[j][1] + "\n" + sp + "variable " + signals[i]['N'] + ": " + "type" + str(count0) + ";\n"
elif (signals[i].has_key('V') == True):
v = signals[i]['V'].__doc__
if (v.find("str") == 0):
assign_lines[j][1] = assign_lines[j][1] + "\n" + sp + "variable " + signals[i]['N'] + ": " + "type" + str(count0) + ": " + str(signals[i]['V']) + ";\n"
elif(v.find("list") == 0):
assign_lines[j][1] = assign_lines[j][1] + "\n" + sp + "variable " + signals[i]['N'] + ": " + "type" + str(count0) + ": {"
for k in range(0, (signals_intr[i]['L'][0][1] + 1)):
if (k == signals[i]['L'][0][1]):
assign_lines[j][1] = assign_lines[j][1] + signals[i]['V'][k] + "};\n"
elif (j != signals[i]['L'][0][1]):
assign_lines[j][1] = assign_lines[j][1] + signals[i]['V'][k] + ", "
count0 = count0 + 1
break
elif (signals[i]['T'] == 's'):
v = signals[i]['V'].__doc__
assign_lines[j][1] = assign_lines[j][1] + "\n" + sp + "type state_type" + str(count1) + " is ("
if (v.find("str") == 0):
assign_lines[j][1] = assign_lines[j][1] + signals[i]['V'] + ");\n"
elif (v.find("list") == 0):
for k in range(len(signals[i]['V'])):
if (k == (len(signals[i]['V']) - 1)):
assign_lines[j][1] = assign_lines[j][1] + signals[i]['V'][k] + ");\n"
else:
assign_lines[j][1] = assign_lines[j][1] + signals[i]['V'][k] + ", "
assign_lines[j][1] = assign_lines[j][1] + "\n" + sp + "signal " + args[i]['N'] + ": state_type" + str(count1) + ";\n"
count1 = count1 + 1
break
elif (j == 0):
break
j = j - 1
elif (m != 0):
if (signals[i]['T'] == 'b'):
if (L.find("int") == 0):
if (n.find("list") == 0):
for k in range(len(signals_intr[i]['N'])):
if (signals[i].has_key('V') == False):
assign_lines[j][1] = assign_lines[j][1] + sp + "variable " + signals[i]['N'][k] + ": std_logic;\n"
elif (signals[i].has_key('V') == True):
assign_lines[j][1] = assign_lines[j][1] + sp + "variable " + signals[i]['N'][k] + ": std_logic := '" + signals[i]['V'] + "';\n"
elif (n.find("str") == 0):
if (signals[i].has_key('V') == False):
assign_lines[j][1] = assign_lines[j][1] + sp + "variable " + signals[i]['N'] + ": std_logic;\n"
elif (signals[i].has_key('V') == True):
assign_lines[j][1] = assign_lines[j][1] + sp + "variable " + signals[i]['N'] + ": std_logic := '" + signals[i]['V'] + "';\n"
elif (L.find("list") == 0):
if (n.find("list") == 0):
for k in range(len(signals[i]['N'])):
if (signals[i].has_key('V') == False):
if (signals[i]['L'][0] > signals[i]['L'][1]):
assign_lines[j][1] = assign_lines[j][1] + sp + "variable " + signals[i]['N'][k] + ": std_logic_vector(" + str(int(signals[i]['L'][0])) + " downto " + str(int(signals[i]['L'][1])) + ");\n"
else:
assign_lines[j][1] = assign_lines[j][1] + sp + "variable " + signals[i]['N'][k] + ": std_logic_vector(" + str(int(signals[i]['L'][0])) + " to " + str(int(signals[i]['L'][1])) + ");\n"
elif (signals[i].has_key('V') == True):
if (signals_intr[i]['L'][0] > signals_intr[i]['L'][1]):
assign_lines[j][1] = assign_lines[j][1] + sp + "variable " + signals[i]['N'][k] + ": std_logic_vector(" + str(int(signals[i]['L'][0])) + " downto " + str(int(signals[i]['L'][1])) + ") := \"" + signals[i]['V'] + "\";\n"
else:
assign_lines[j][1] = assign_lines[j][1] + sp + "variable " + signals[i]['N'][k] + ": std_logic_vector(" + str(int(signals[i]['L'][0])) + " to " + str(int(signals[i]['L'][1])) + ") := '" + signals[i]['V'] + "';\n"
elif (n.find("str") == 0):
if (signals[i].has_key('V') == False):
if (signals[i]['L'][0] > signals[i]['L'][1]):
assign_lines[j][1] = assign_lines[j][1] + sp + "variable " + signals[i]['N'] + ": std_logic_vector(" + str(int(signals[i]['L'][0])) + " downto " + str(int(signals[i]['L'][1])) + ");\n"
else:
assign_lines[j][1] = assign_lines[j][1] + sp + "variable " + signals[i]['N'] + ": std_logic_vector(" + str(int(signals[i]['L'][0])) + " to " + str(int(signals[i]['L'][1])) + ");\n"
elif (signals[i].has_key('V') == True):
if (signals[i]['L'][0] > signals[i]['L'][1]):
assign_lines[j][1] = assign_lines[j][1] + sp + "variable " + signals[i]['N'] + ": std_logic_vector(" + str(int(signals[i]['L'][0])) + " downto " + str(int(signals[i]['L'][1])) + ") := \"" + signals[i]['V'] + "\";\n"
else:
assign_lines[j][1] = assign_lines[j][1] + sp + "variable " + signals[i]['N'] + ": std_logic_vector(" + str(int(signals[i]['L'][0])) + " to " + str(int(signals[i]['L'][1])) + ") := '" + signals[i]['V'] + "';\n"
elif (signals[i]['T'] == "int"):
if (n.find("str") == 0):
if (signals[i].has_key('V') == False):
assign_lines[j][1] = assign_lines[j][1] + sp + "variable " + signals[i]['N'] + ": integer range " + str(signals[i]['L'][0]) + " to " + str(signals[i]['L'][1]) + ";\n"
elif (signals[i].has_key('V') == True):
assign_lines[j][1] = assign_lines[j][1] + sp + "variable " + signals[i]['N'] + ": integer range " + str(signals[i]['L'][0]) + " to " + str(signals[i]['L'][1]) + " := " + str(signals[i]['V']) + ";\n"
elif (n.find("list") == 0):
for k in range(len(signals[i]['N'])):
if (signals[i].has_key('V') == False):
assign_lines[j][1] = assign_lines[j][1] + sp + "variable " + signals[i]['N'][k] + ": integer range " + str(signals[i]['L'][0]) + " to " + str(signals[i]['L'][1]) + ";\n"
elif (signals_intr[i].has_key('V') == True):
assign_lines[j][1] = assign_lines[j][1] + sp + "variable " + signals[i]['N'][k] + ": integer range " + str(signals[i]['L'][0]) + " to " + str(signals[i]['L'][1]) + " := " + str(signals[i]['V']) + ";\n"
elif (signals[i]['T'] == "arrb"):
if (n.find("str") == 0):
if (signals[i]['L'][1][0] > signals[i]['L'][1][1]):
assign_lines[j][1] = assign_lines[j][1] + sp + "type typev" + str(count0) + " is array (" + str(signals[i]['L'][0][0]) + " to " + str(signals[i]['L'][0][1]) + ") of std_logic_vector(" + str(signals[i]['L'][1][0]) + " downto " + str(signals[i]['L'][1][1]) + ");\n"
elif (signals[i]['L'][1][0] < signals[i]['L'][1][1]):
assign_lines[j][1] = assign_lines[j][1] + sp + "type typev" + str(count0) + " is array (" + str(signals[i]['L'][0][0]) + " to " + str(signals[i]['L'][0][1]) + ") of std_logic_vector(" + str(signals_intr[i]['L'][1][0]) + " to " + str(signals_intr[i]['L'][1][1]) + ");\n"
if (signals[i].has_key('V') == False):
assign_lines[j][1] = assign_lines[j][1] + sp + "variable " + signals[i]['N'] + ": " + "typev" + str(count0) + ";\n"
elif (signals[i].has_key('V') == True):
v = signals[i]['V'].__doc__
if (v.find("str") == 0):
assign_lines[j][1] = assign_lines[j][1] + sp + "variable " + signals[i]['N'] + ": " + "typev" + str(count0) + ": \"" + signals[i]['V'] + "\";\n"
elif(v.find("list") == 0):
assign_lines[j][1] = assign_lines[j][1] + sp + "variable " + signals[i]['N'] + ": " + "typev" + str(count0) + ": {"
for k in range(0, (signals[i]['L'][0][1] + 1)):
if (k == signals[i]['L'][0][1]):
assign_lines[j][1] = assign_lines[j][1] + "\"" + signals[i]['V'][k] + "\"};\n"
elif (k != signals[i]['L'][0][1]):
assign_lines[j][1] = assign_lines[j][1] + "\"" + signals[i]['V'][k] + "\", "
count0 = count0 + 1
elif (signals[i]['T'] == "arri"):
if (n.find("str") == 0):
assign_lines[j][1] = assign_lines[j][1] + sp + "type typev" + str(count0) + " is array (" + str(signals[i]['L'][0][0]) + " to " + str(signals[i]['L'][0][1]) + ") of integer range " + str(signals[i]['L'][1][0]) + " to " + str(signals[i]['L'][1][1]) + ";\n"
if (signals[i].has_key('V') == False):
assign_lines[j][1] = assign_lines[j][1] + sp + "variable " + signals[i]['N'] + ": " + "typev" + str(count0) + ";\n"
elif (signals[i].has_key('V') == True):
v = signals[i]['V'].__doc__
if (v.find("str") == 0):
assign_lines[j][1] = assign_lines[j][1] + sp + "variable " + signals[i]['N'] + ": " + "typev" + str(count0) + ": " + str(signals[i]['V']) + ";\n"
elif(v.find("list") == 0):
assign_lines[j][1] = assign_lines[j][1] + sp + "variable " + signals[i]['N'] + ": " + "typev" + str(count0) + ": {"
for k in range(0, (signals[i]['L'][0][1] + 1)):
if (k == signals[i]['L'][0][1]):
assign_lines[j][1] = assign_lines[j][1] + str(signals[i]['V'][k]) + "};\n"
elif (j != signals[i]['L'][0][1]):
assign_lines[j][1] = assign_lines[j][1] + str(signals[i]['V'][k]) + ", "
count0 = count0 + 1
elif (signals[i]['T'] == 's'):
v = signals[i]['V'].__doc__
assign_lines[j][1] = assign_lines[j][1] + "\n" + sp + "type state_typev" + str(count1) + " is ("
if (v.find("str") == 0):
assign_lines[j][1] = assign_lines[j][1] + signals[i]['V'] + ");\n"
elif (v.find("list") == 0):
for k in range(len(signals[i]['V'])):
if (k == (len(signals[i]['V']) - 1)):
assign_lines[j][1] = assign_lines[j][1] + signals[i]['V'][k] + ");\n"
else:
assign_lines[j][1] = assign_lines[j][1] + signals[i]['V'][k] + ", "
assign_lines[j][1] = assign_lines[j][1] + "\n" + sp + "signal " + args[i]['N'] + ": state_typev" + str(count1) + ";\n"
count1 = count1 + 1
if (len(process_vars) > 0):
assign_lines[j][1] = assign_lines[j][1] + sp + "-------------------------------------------------------------------"
assign_lines[j][1] = assign_lines[j][1] + "\n\n" + sp + "begin\n\n"
| 85.866852
| 356
| 0.37351
| 6,064
| 61,910
| 3.53628
| 0.033476
| 0.223139
| 0.095691
| 0.08851
| 0.931962
| 0.926273
| 0.917413
| 0.910651
| 0.905055
| 0.901418
| 0
| 0.037814
| 0.471184
| 61,910
| 720
| 357
| 85.986111
| 0.617184
| 0.049992
| 0
| 0.7744
| 0
| 0
| 0.132626
| 0.084013
| 0
| 0
| 0
| 0
| 0
| 1
| 0.0016
| false
| 0.0208
| 0.0016
| 0
| 0.0032
| 0.0016
| 0
| 0
| 0
| null | 1
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
a3f5451025cc5163c68a3eea15dfa30712bf9362
| 17,929
|
py
|
Python
|
benchmark/my_argparser.py
|
victor-estrade/SystGradDescent
|
822e7094290301ec47a99433381a8d6406798aff
|
[
"MIT"
] | 2
|
2019-03-20T09:05:02.000Z
|
2019-03-20T15:23:44.000Z
|
benchmark/my_argparser.py
|
victor-estrade/SystGradDescent
|
822e7094290301ec47a99433381a8d6406798aff
|
[
"MIT"
] | null | null | null |
benchmark/my_argparser.py
|
victor-estrade/SystGradDescent
|
822e7094290301ec47a99433381a8d6406798aff
|
[
"MIT"
] | null | null | null |
# coding: utf-8
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
from __future__ import unicode_literals
import argparse
def parse_args_tolerance():
parser = argparse.ArgumentParser(description='just for tolerance')
parser.add_argument("--tolerance", type=float,
default=0.1, help="tolerance value for Minuit migrad and simplex minimization")
args, _ = parser.parse_known_args()
return args.tolerance
def GB_parse_args(main_description="Training launcher"):
parser = argparse.ArgumentParser(description=main_description)
parser.add_argument("--verbose", "-v", type=int, choices=[0, 1, 2],
default=0, help="increase output verbosity")
parser.add_argument("--start-cv", type=int,
default=0, help="start of i_cv for range(start, end)")
parser.add_argument("--end-cv", type=int,
default=30, help="end of i_cv for range(start, end)")
parser.add_argument("--tolerance", type=float,
default=0.1, help="tolerance value for Minuit migrad and simplex minimization")
parser.add_argument('--load-run', help='load saved runs. Do not run the models',
action='store_true')
parser.add_argument('--estimate-only', help='Turns off conditional estimation for V_stat and V_syst',
action='store_true')
parser.add_argument('--conditional-only', help='Turns off common estimation',
action='store_true')
# MODEL HYPER PARAMETERS
parser.add_argument('--n-estimators', help='number of estimators',
default=100, type=int)
parser.add_argument('--max-depth', help='maximum depth of trees',
default=3, type=int)
parser.add_argument('--learning-rate', '--lr', help='learning rate',
default=1e-1, type=float)
# OTHER
parser.add_argument('--no-cuda', '--no-gpu', help='flag to use or not the gpu',
action='store_false', dest='cuda')
parser.add_argument('--retrain', help='flag to force retraining',
action='store_true')
parser.add_argument('--skip-minuit', help='flag to skip minuit NLL minization',
action='store_true')
args = parser.parse_args()
return args
def REG_parse_args(main_description="Training launcher"):
parser = argparse.ArgumentParser(description=main_description)
parser.add_argument("--verbose", "-v", type=int, choices=[0, 1, 2],
default=0, help="increase output verbosity")
parser.add_argument("--start-cv", type=int,
default=0, help="start of i_cv for range(start, end)")
parser.add_argument("--end-cv", type=int,
default=30, help="end of i_cv for range(start, end)")
parser.add_argument("--tolerance", type=float,
default=0.1, help="tolerance value for Minuit migrad and simplex minimization")
parser.add_argument('--load-run', help='load saved runs. Do not run the models',
action='store_true')
parser.add_argument('--estimate-only', help='Turns off conditional estimation for V_stat and V_syst',
action='store_true')
parser.add_argument('--conditional-only', help='Turns off common estimation',
action='store_true')
# MODEL HYPER PARAMETERS
parser.add_argument('--learning-rate', '--lr', help='learning rate',
default=1e-4, type=float)
parser.add_argument('--beta1', help='beta 1 for Adam',
default=0.5, type=float)
parser.add_argument('--beta2', help='beta 2 for Adam',
default=0.9, type=float)
parser.add_argument('--weight-decay', help='weight decay for SGD',
default=0.0, type=float)
parser.add_argument('--optimizer', help='optimizer name', dest='optimizer_name',
default='Adam', type=str, choices=('Adam', 'SGD', 'ADAM', 'sgd', 'adam'))
parser.add_argument('--n-unit', help='Number of units in layers. Controls NN width.',
default=200, type=int)
parser.add_argument('--sample-size', help='data sample size',
default=1000, type=int)
parser.add_argument('--batch-size', help='mini-batch size',
default=20, type=int)
parser.add_argument('--n-steps', help='number of update steps',
default=1000, type=int)
# OTHER
parser.add_argument('--no-cuda', '--no-gpu', help='flag to use or not the gpu',
action='store_false', dest='cuda')
parser.add_argument('--retrain', help='flag to force retraining',
action='store_true')
args = parser.parse_args()
return args
def INFERNO_parse_args(main_description="Training launcher"):
parser = argparse.ArgumentParser(description=main_description)
parser.add_argument("--verbose", "-v", type=int, choices=[0, 1, 2],
default=0, help="increase output verbosity")
parser.add_argument("--start-cv", type=int,
default=0, help="start of i_cv for range(start, end)")
parser.add_argument("--end-cv", type=int,
default=30, help="end of i_cv for range(start, end)")
parser.add_argument("--tolerance", type=float,
default=0.1, help="tolerance value for Minuit migrad and simplex minimization")
parser.add_argument('--load-run', help='load saved runs. Do not run the models',
action='store_true')
parser.add_argument('--estimate-only', help='Turns off conditional estimation for V_stat and V_syst',
action='store_true')
parser.add_argument('--conditional-only', help='Turns off common estimation',
action='store_true')
# MODEL HYPER PARAMETERS
parser.add_argument('--learning-rate', '--lr', help='learning rate',
default=1e-3, type=float)
parser.add_argument('--temperature', help='control initial softmax steepness',
default=1.0, type=float)
parser.add_argument('--beta1', help='beta 1 for Adam',
default=0.5, type=float)
parser.add_argument('--beta2', help='beta 2 for Adam',
default=0.9, type=float)
parser.add_argument('--weight-decay', help='weight decay for SGD',
default=0.0, type=float)
parser.add_argument('--optimizer', help='optimizer name', dest='optimizer_name',
default='Adam', type=str, choices=('Adam', 'SGD', 'ADAM', 'sgd', 'adam'))
parser.add_argument('--n-unit', help='Number of units in layers. Controls NN width.',
default=200, type=int)
parser.add_argument('--n-bins', help='number of output bins',
default=10, type=int)
parser.add_argument('--sample-size', help='data sample size',
default=1000, type=int)
parser.add_argument('--batch-size', help='mini-batch size',
default=20, type=int)
parser.add_argument('--n-steps', help='number of update steps',
default=1000, type=int)
# OTHER
parser.add_argument('--no-cuda', '--no-gpu', help='flag to use or not the gpu',
action='store_false', dest='cuda')
parser.add_argument('--retrain', help='flag to force retraining',
action='store_true')
args = parser.parse_args()
return args
def NET_parse_args(main_description="Training launcher"):
parser = argparse.ArgumentParser(description=main_description)
parser.add_argument("--verbose", "-v", type=int, choices=[0, 1, 2],
default=0, help="increase output verbosity")
parser.add_argument("--start-cv", type=int,
default=0, help="start of i_cv for range(start, end)")
parser.add_argument("--end-cv", type=int,
default=30, help="end of i_cv for range(start, end)")
parser.add_argument("--tolerance", type=float,
default=0.1, help="tolerance value for Minuit migrad and simplex minimization")
parser.add_argument('--load-run', help='load saved runs. Do not run the models',
action='store_true')
parser.add_argument('--estimate-only', help='Turns off conditional estimation for V_stat and V_syst',
action='store_true')
parser.add_argument('--conditional-only', help='Turns off common estimation',
action='store_true')
# MODEL HYPER PARAMETERS
parser.add_argument('--learning-rate', '--lr', help='learning rate',
default=1e-3, type=float)
parser.add_argument('--beta1', help='beta 1 for Adam',
default=0.9, type=float)
parser.add_argument('--beta2', help='beta 2 for Adam',
default=0.999, type=float)
parser.add_argument('--weight-decay', help='weight decay for SGD',
default=0.0, type=float)
parser.add_argument('--optimizer', help='optimizer name', dest='optimizer_name',
default='Adam', type=str, choices=('Adam', 'SGD', 'ADAM', 'sgd', 'adam'))
parser.add_argument('--n-unit', help='Number of units in layers. Controls NN width.',
default=200, type=int)
parser.add_argument('--sample-size', help='data sample size',
default=1000, type=int)
parser.add_argument('--batch-size', help='mini-batch size',
default=1000, type=int)
parser.add_argument('--n-steps', help='number of update steps',
default=1000, type=int)
# OTHER
parser.add_argument('--no-cuda', '--no-gpu', help='flag to use or not the gpu',
action='store_false', dest='cuda')
parser.add_argument('--retrain', help='flag to force retraining',
action='store_true')
args = parser.parse_args()
return args
def TP_parse_args(main_description="Training launcher"):
parser = argparse.ArgumentParser(description=main_description)
parser.add_argument("--verbose", "-v", type=int, choices=[0, 1, 2],
default=0, help="increase output verbosity")
parser.add_argument("--start-cv", type=int,
default=0, help="start of i_cv for range(start, end)")
parser.add_argument("--end-cv", type=int,
default=30, help="end of i_cv for range(start, end)")
parser.add_argument("--tolerance", type=float,
default=0.1, help="tolerance value for Minuit migrad and simplex minimization")
parser.add_argument('--load-run', help='load saved runs. Do not run the models',
action='store_true')
parser.add_argument('--estimate-only', help='Turns off conditional estimation for V_stat and V_syst',
action='store_true')
parser.add_argument('--conditional-only', help='Turns off common estimation',
action='store_true')
# MODEL HYPER PARAMETERS
parser.add_argument('--learning-rate', '--lr', help='learning rate',
default=1e-3, type=float)
parser.add_argument('--trade-off', help='trade-off between classic loss and adversarial loss',
default=1.0, type=float)
parser.add_argument('--beta1', help='beta 1 for Adam',
default=0.9, type=float)
parser.add_argument('--beta2', help='beta 2 for Adam',
default=0.999, type=float)
parser.add_argument('--weight-decay', help='weight decay for SGD',
default=0.0, type=float)
parser.add_argument('--optimizer', help='optimizer name', dest='optimizer_name',
default='Adam', type=str, choices=('Adam', 'SGD', 'ADAM', 'sgd', 'adam'))
parser.add_argument('--n-unit', help='Number of units in layers. Controls NN width.',
default=200, type=int)
parser.add_argument('--sample-size', help='data sample size',
default=1000, type=int)
parser.add_argument('--batch-size', help='mini-batch size',
default=1000, type=int)
parser.add_argument('--n-steps', help='number of update steps',
default=1000, type=int)
# OTHER
parser.add_argument('--no-cuda', '--no-gpu', help='flag to use or not the gpu',
action='store_false', dest='cuda')
parser.add_argument('--retrain', help='flag to force retraining',
action='store_true')
args = parser.parse_args()
return args
def PIVOT_parse_args(main_description="Training launcher"):
parser = argparse.ArgumentParser(description=main_description)
parser.add_argument("--verbose", "-v", type=int, choices=[0, 1, 2],
default=0, help="increase output verbosity")
parser.add_argument("--start-cv", type=int,
default=0, help="start of i_cv for range(start, end)")
parser.add_argument("--end-cv", type=int,
default=30, help="end of i_cv for range(start, end)")
parser.add_argument("--tolerance", type=float,
default=0.1, help="tolerance value for Minuit migrad and simplex minimization")
parser.add_argument('--load-run', help='load saved runs. Do not run the models',
action='store_true')
parser.add_argument('--estimate-only', help='Turns off conditional estimation for V_stat and V_syst',
action='store_true')
parser.add_argument('--conditional-only', help='Turns off common estimation',
action='store_true')
# MODEL HYPER PARAMETERS
parser.add_argument('--learning-rate', '--lr', help='learning rate',
default=1e-3, type=float)
parser.add_argument('--trade-off', help='trade-off between classic loss and adversarial loss',
default=1.0, type=float)
parser.add_argument('--beta1', help='beta 1 for Adam',
default=0.9, type=float)
parser.add_argument('--beta2', help='beta 2 for Adam',
default=0.999, type=float)
parser.add_argument('--weight-decay', help='weight decay for SGD',
default=0.0, type=float)
parser.add_argument('--optimizer', help='optimizer name', dest='optimizer_name',
default='Adam', type=str, choices=('Adam', 'SGD', 'ADAM', 'sgd', 'adam'))
parser.add_argument('--n-unit', help='Number of units in layers. Controls NN width.',
default=200, type=int)
parser.add_argument('--sample-size', help='data sample size',
default=1000, type=int)
parser.add_argument('--batch-size', help='mini-batch size',
default=1000, type=int)
parser.add_argument('--n-steps', help='number of update steps',
default=1000, type=int)
parser.add_argument('--n-net-pre-training-steps', help='number of update steps for pretraining the classifier',
default=1000, type=int)
parser.add_argument('--n-adv-pre-training-steps', help='number of update steps for pretraining the adversarial',
default=1000, type=int)
parser.add_argument('--n-recovery-steps', help='number of update steps for adversarial recovery',
default=1, type=int)
# OTHER
parser.add_argument('--no-cuda', '--no-gpu', help='flag to use or not the gpu',
action='store_false', dest='cuda')
parser.add_argument('--retrain', help='flag to force retraining',
action='store_true')
args = parser.parse_args()
return args
def FF_parse_args(main_description="Training launcher"):
parser = argparse.ArgumentParser(description=main_description)
parser.add_argument("--verbose", "-v", type=int, choices=[0, 1, 2],
default=0, help="increase output verbosity")
parser.add_argument("--start-cv", type=int,
default=0, help="start of i_cv for range(start, end)")
parser.add_argument("--end-cv", type=int,
default=30, help="end of i_cv for range(start, end)")
parser.add_argument("--tolerance", type=float,
default=0.1, help="tolerance value for Minuit migrad and simplex minimization")
parser.add_argument('--load-run', help='load saved runs. Do not run the models',
action='store_true')
parser.add_argument('--estimate-only', help='Turns off conditional estimation for V_stat and V_syst',
action='store_true')
parser.add_argument('--conditional-only', help='Turns off common estimation',
action='store_true')
# MODEL HYPER PARAMETERS
parser.add_argument('--feature-id', help='feature index to filter on',
default=0, type=int)
# OTHER
parser.add_argument('--no-cuda', '--no-gpu', help='flag to use or not the gpu',
action='store_false', dest='cuda')
parser.add_argument('--retrain', help='flag to force retraining',
action='store_true')
parser.add_argument('--skip-minuit', help='flag to skip minuit NLL minization',
action='store_true')
args = parser.parse_args()
return args
| 46.81201
| 116
| 0.593787
| 2,180
| 17,929
| 4.768349
| 0.077064
| 0.105628
| 0.199519
| 0.039827
| 0.945936
| 0.942953
| 0.942857
| 0.939875
| 0.931217
| 0.931217
| 0
| 0.017338
| 0.269731
| 17,929
| 382
| 117
| 46.934555
| 0.776598
| 0.012048
| 0
| 0.88968
| 0
| 0
| 0.329039
| 0.002938
| 0
| 0
| 0
| 0
| 0
| 1
| 0.02847
| false
| 0
| 0.017794
| 0
| 0.074733
| 0.003559
| 0
| 0
| 0
| null | 0
| 1
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
a3fc78d36ccfb5728f04880a3739b99e0d64d7a7
| 91,209
|
py
|
Python
|
angr/procedures/definitions/win32_wsmsvc.py
|
r4b3rt/angr
|
c133cfd4f83ffea2a1d9e064241e9459eaabc55f
|
[
"BSD-2-Clause"
] | null | null | null |
angr/procedures/definitions/win32_wsmsvc.py
|
r4b3rt/angr
|
c133cfd4f83ffea2a1d9e064241e9459eaabc55f
|
[
"BSD-2-Clause"
] | null | null | null |
angr/procedures/definitions/win32_wsmsvc.py
|
r4b3rt/angr
|
c133cfd4f83ffea2a1d9e064241e9459eaabc55f
|
[
"BSD-2-Clause"
] | null | null | null |
# pylint:disable=line-too-long
import logging
from ...sim_type import SimTypeFunction, SimTypeShort, SimTypeInt, SimTypeLong, SimTypeLongLong, SimTypeDouble, SimTypeFloat, SimTypePointer, SimTypeChar, SimStruct, SimTypeFixedSizeArray, SimTypeBottom, SimUnion, SimTypeBool
from ...calling_conventions import SimCCStdcall, SimCCMicrosoftAMD64
from .. import SIM_PROCEDURES as P
from . import SimLibrary
_l = logging.getLogger(name=__name__)
lib = SimLibrary()
lib.set_default_cc('X86', SimCCStdcall)
lib.set_default_cc('AMD64', SimCCMicrosoftAMD64)
lib.set_library_names("wsmsvc.dll")
prototypes = \
{
#
'WSManInitialize': SimTypeFunction([SimTypeInt(signed=False, label="UInt32"), SimTypePointer(SimTypePointer(SimStruct({}, name="WSMAN_API", pack=False, align=None), offset=0), offset=0)], SimTypeInt(signed=False, label="UInt32"), arg_names=["flags", "apiHandle"]),
#
'WSManDeinitialize': SimTypeFunction([SimTypePointer(SimStruct({}, name="WSMAN_API", pack=False, align=None), offset=0), SimTypeInt(signed=False, label="UInt32")], SimTypeInt(signed=False, label="UInt32"), arg_names=["apiHandle", "flags"]),
#
'WSManGetErrorMessage': SimTypeFunction([SimTypePointer(SimStruct({}, name="WSMAN_API", pack=False, align=None), offset=0), SimTypeInt(signed=False, label="UInt32"), SimTypePointer(SimTypeChar(label="Char"), offset=0), SimTypeInt(signed=False, label="UInt32"), SimTypeInt(signed=False, label="UInt32"), SimTypePointer(SimTypeChar(label="Char"), label="LPArray", offset=0), SimTypePointer(SimTypeInt(signed=False, label="UInt32"), offset=0)], SimTypeInt(signed=False, label="UInt32"), arg_names=["apiHandle", "flags", "languageCode", "errorCode", "messageLength", "message", "messageLengthUsed"]),
#
'WSManCreateSession': SimTypeFunction([SimTypePointer(SimStruct({}, name="WSMAN_API", pack=False, align=None), offset=0), SimTypePointer(SimTypeChar(label="Char"), offset=0), SimTypeInt(signed=False, label="UInt32"), SimTypePointer(SimStruct({"authenticationMechanism": SimTypeInt(signed=False, label="UInt32"), "Anonymous": SimUnion({"userAccount": SimStruct({"username": SimTypePointer(SimTypeChar(label="Char"), offset=0), "password": SimTypePointer(SimTypeChar(label="Char"), offset=0)}, name="WSMAN_USERNAME_PASSWORD_CREDS", pack=False, align=None), "certificateThumbprint": SimTypePointer(SimTypeChar(label="Char"), offset=0)}, name="<anon>", label="None")}, name="WSMAN_AUTHENTICATION_CREDENTIALS", pack=False, align=None), offset=0), SimTypePointer(SimStruct({"accessType": SimTypeInt(signed=False, label="UInt32"), "authenticationCredentials": SimStruct({"authenticationMechanism": SimTypeInt(signed=False, label="UInt32"), "Anonymous": SimUnion({"userAccount": SimStruct({"username": SimTypePointer(SimTypeChar(label="Char"), offset=0), "password": SimTypePointer(SimTypeChar(label="Char"), offset=0)}, name="WSMAN_USERNAME_PASSWORD_CREDS", pack=False, align=None), "certificateThumbprint": SimTypePointer(SimTypeChar(label="Char"), offset=0)}, name="<anon>", label="None")}, name="WSMAN_AUTHENTICATION_CREDENTIALS", pack=False, align=None)}, name="WSMAN_PROXY_INFO", pack=False, align=None), offset=0), SimTypePointer(SimTypePointer(SimStruct({}, name="WSMAN_SESSION", pack=False, align=None), offset=0), offset=0)], SimTypeInt(signed=False, label="UInt32"), arg_names=["apiHandle", "connection", "flags", "serverAuthenticationCredentials", "proxyInfo", "session"]),
#
'WSManCloseSession': SimTypeFunction([SimTypePointer(SimStruct({}, name="WSMAN_SESSION", pack=False, align=None), offset=0), SimTypeInt(signed=False, label="UInt32")], SimTypeInt(signed=False, label="UInt32"), arg_names=["session", "flags"]),
#
'WSManSetSessionOption': SimTypeFunction([SimTypePointer(SimStruct({}, name="WSMAN_SESSION", pack=False, align=None), offset=0), SimTypeInt(signed=False, label="WSManSessionOption"), SimTypePointer(SimStruct({"type": SimTypeInt(signed=False, label="WSManDataType"), "Anonymous": SimUnion({"text": SimStruct({"bufferLength": SimTypeInt(signed=False, label="UInt32"), "buffer": SimTypePointer(SimTypeChar(label="Char"), offset=0)}, name="WSMAN_DATA_TEXT", pack=False, align=None), "binaryData": SimStruct({"dataLength": SimTypeInt(signed=False, label="UInt32"), "data": SimTypePointer(SimTypeChar(label="Byte"), offset=0)}, name="WSMAN_DATA_BINARY", pack=False, align=None), "number": SimTypeInt(signed=False, label="UInt32")}, name="<anon>", label="None")}, name="WSMAN_DATA", pack=False, align=None), offset=0)], SimTypeInt(signed=False, label="UInt32"), arg_names=["session", "option", "data"]),
#
'WSManGetSessionOptionAsDword': SimTypeFunction([SimTypePointer(SimStruct({}, name="WSMAN_SESSION", pack=False, align=None), offset=0), SimTypeInt(signed=False, label="WSManSessionOption"), SimTypePointer(SimTypeInt(signed=False, label="UInt32"), offset=0)], SimTypeInt(signed=False, label="UInt32"), arg_names=["session", "option", "value"]),
#
'WSManGetSessionOptionAsString': SimTypeFunction([SimTypePointer(SimStruct({}, name="WSMAN_SESSION", pack=False, align=None), offset=0), SimTypeInt(signed=False, label="WSManSessionOption"), SimTypeInt(signed=False, label="UInt32"), SimTypePointer(SimTypeChar(label="Char"), label="LPArray", offset=0), SimTypePointer(SimTypeInt(signed=False, label="UInt32"), offset=0)], SimTypeInt(signed=False, label="UInt32"), arg_names=["session", "option", "stringLength", "string", "stringLengthUsed"]),
#
'WSManCloseOperation': SimTypeFunction([SimTypePointer(SimStruct({}, name="WSMAN_OPERATION", pack=False, align=None), offset=0), SimTypeInt(signed=False, label="UInt32")], SimTypeInt(signed=False, label="UInt32"), arg_names=["operationHandle", "flags"]),
#
'WSManCreateShell': SimTypeFunction([SimTypePointer(SimStruct({}, name="WSMAN_SESSION", pack=False, align=None), offset=0), SimTypeInt(signed=False, label="UInt32"), SimTypePointer(SimTypeChar(label="Char"), offset=0), SimTypePointer(SimStruct({"__AnonymousBase_wsman_L665_C48": SimStruct({"inputStreamSet": SimTypePointer(SimStruct({"streamIDsCount": SimTypeInt(signed=False, label="UInt32"), "streamIDs": SimTypePointer(SimTypePointer(SimTypeChar(label="Char"), offset=0), offset=0)}, name="WSMAN_STREAM_ID_SET", pack=False, align=None), offset=0), "outputStreamSet": SimTypePointer(SimStruct({"streamIDsCount": SimTypeInt(signed=False, label="UInt32"), "streamIDs": SimTypePointer(SimTypePointer(SimTypeChar(label="Char"), offset=0), offset=0)}, name="WSMAN_STREAM_ID_SET", pack=False, align=None), offset=0), "idleTimeoutMs": SimTypeInt(signed=False, label="UInt32"), "workingDirectory": SimTypePointer(SimTypeChar(label="Char"), offset=0), "variableSet": SimTypePointer(SimStruct({"varsCount": SimTypeInt(signed=False, label="UInt32"), "vars": SimTypePointer(SimStruct({"name": SimTypePointer(SimTypeChar(label="Char"), offset=0), "value": SimTypePointer(SimTypeChar(label="Char"), offset=0)}, name="WSMAN_ENVIRONMENT_VARIABLE", pack=False, align=None), offset=0)}, name="WSMAN_ENVIRONMENT_VARIABLE_SET", pack=False, align=None), offset=0)}, name="WSMAN_SHELL_STARTUP_INFO_V10", pack=False, align=None), "name": SimTypePointer(SimTypeChar(label="Char"), offset=0)}, name="WSMAN_SHELL_STARTUP_INFO_V11", pack=False, align=None), offset=0), SimTypePointer(SimStruct({"optionsCount": SimTypeInt(signed=False, label="UInt32"), "options": SimTypePointer(SimStruct({"name": SimTypePointer(SimTypeChar(label="Char"), offset=0), "value": SimTypePointer(SimTypeChar(label="Char"), offset=0), "mustComply": SimTypeInt(signed=True, label="Int32")}, name="WSMAN_OPTION", pack=False, align=None), offset=0), "optionsMustUnderstand": SimTypeInt(signed=True, label="Int32")}, name="WSMAN_OPTION_SET", pack=False, align=None), offset=0), SimTypePointer(SimStruct({"type": SimTypeInt(signed=False, label="WSManDataType"), "Anonymous": SimUnion({"text": SimStruct({"bufferLength": SimTypeInt(signed=False, label="UInt32"), "buffer": SimTypePointer(SimTypeChar(label="Char"), offset=0)}, name="WSMAN_DATA_TEXT", pack=False, align=None), "binaryData": SimStruct({"dataLength": SimTypeInt(signed=False, label="UInt32"), "data": SimTypePointer(SimTypeChar(label="Byte"), offset=0)}, name="WSMAN_DATA_BINARY", pack=False, align=None), "number": SimTypeInt(signed=False, label="UInt32")}, name="<anon>", label="None")}, name="WSMAN_DATA", pack=False, align=None), offset=0), SimTypePointer(SimStruct({"operationContext": SimTypePointer(SimTypeBottom(label="Void"), offset=0), "completionFunction": SimTypePointer(SimTypeFunction([SimTypePointer(SimTypeBottom(label="Void"), offset=0), SimTypeInt(signed=False, label="UInt32"), SimTypePointer(SimStruct({"code": SimTypeInt(signed=False, label="UInt32"), "errorDetail": SimTypePointer(SimTypeChar(label="Char"), offset=0), "language": SimTypePointer(SimTypeChar(label="Char"), offset=0), "machineName": SimTypePointer(SimTypeChar(label="Char"), offset=0), "pluginName": SimTypePointer(SimTypeChar(label="Char"), offset=0)}, name="WSMAN_ERROR", pack=False, align=None), offset=0), SimTypePointer(SimStruct({}, name="WSMAN_SHELL", pack=False, align=None), offset=0), SimTypePointer(SimStruct({}, name="WSMAN_COMMAND", pack=False, align=None), offset=0), SimTypePointer(SimStruct({}, name="WSMAN_OPERATION", pack=False, align=None), offset=0), SimTypePointer(SimUnion({"receiveData": SimStruct({"streamId": SimTypePointer(SimTypeChar(label="Char"), offset=0), "streamData": SimStruct({"type": SimTypeInt(signed=False, label="WSManDataType"), "Anonymous": SimUnion({"text": SimStruct({"bufferLength": SimTypeInt(signed=False, label="UInt32"), "buffer": SimTypePointer(SimTypeChar(label="Char"), offset=0)}, name="WSMAN_DATA_TEXT", pack=False, align=None), "binaryData": SimStruct({"dataLength": SimTypeInt(signed=False, label="UInt32"), "data": SimTypePointer(SimTypeChar(label="Byte"), offset=0)}, name="WSMAN_DATA_BINARY", pack=False, align=None), "number": SimTypeInt(signed=False, label="UInt32")}, name="<anon>", label="None")}, name="WSMAN_DATA", pack=False, align=None), "commandState": SimTypePointer(SimTypeChar(label="Char"), offset=0), "exitCode": SimTypeInt(signed=False, label="UInt32")}, name="WSMAN_RECEIVE_DATA_RESULT", pack=False, align=None), "connectData": SimStruct({"data": SimStruct({"type": SimTypeInt(signed=False, label="WSManDataType"), "Anonymous": SimUnion({"text": SimStruct({"bufferLength": SimTypeInt(signed=False, label="UInt32"), "buffer": SimTypePointer(SimTypeChar(label="Char"), offset=0)}, name="WSMAN_DATA_TEXT", pack=False, align=None), "binaryData": SimStruct({"dataLength": SimTypeInt(signed=False, label="UInt32"), "data": SimTypePointer(SimTypeChar(label="Byte"), offset=0)}, name="WSMAN_DATA_BINARY", pack=False, align=None), "number": SimTypeInt(signed=False, label="UInt32")}, name="<anon>", label="None")}, name="WSMAN_DATA", pack=False, align=None)}, name="WSMAN_CONNECT_DATA", pack=False, align=None), "createData": SimStruct({"data": SimStruct({"type": SimTypeInt(signed=False, label="WSManDataType"), "Anonymous": SimUnion({"text": SimStruct({"bufferLength": SimTypeInt(signed=False, label="UInt32"), "buffer": SimTypePointer(SimTypeChar(label="Char"), offset=0)}, name="WSMAN_DATA_TEXT", pack=False, align=None), "binaryData": SimStruct({"dataLength": SimTypeInt(signed=False, label="UInt32"), "data": SimTypePointer(SimTypeChar(label="Byte"), offset=0)}, name="WSMAN_DATA_BINARY", pack=False, align=None), "number": SimTypeInt(signed=False, label="UInt32")}, name="<anon>", label="None")}, name="WSMAN_DATA", pack=False, align=None)}, name="WSMAN_CREATE_SHELL_DATA", pack=False, align=None)}, name="<anon>", label="None"), offset=0)], SimTypeBottom(label="Void"), arg_names=["operationContext", "flags", "error", "shell", "command", "operationHandle", "data"]), offset=0)}, name="WSMAN_SHELL_ASYNC", pack=False, align=None), offset=0), SimTypePointer(SimTypePointer(SimStruct({}, name="WSMAN_SHELL", pack=False, align=None), offset=0), offset=0)], SimTypeBottom(label="Void"), arg_names=["session", "flags", "resourceUri", "startupInfo", "options", "createXml", "async", "shell"]),
#
'WSManRunShellCommand': SimTypeFunction([SimTypePointer(SimStruct({}, name="WSMAN_SHELL", pack=False, align=None), offset=0), SimTypeInt(signed=False, label="UInt32"), SimTypePointer(SimTypeChar(label="Char"), offset=0), SimTypePointer(SimStruct({"argsCount": SimTypeInt(signed=False, label="UInt32"), "args": SimTypePointer(SimTypePointer(SimTypeChar(label="Char"), offset=0), offset=0)}, name="WSMAN_COMMAND_ARG_SET", pack=False, align=None), offset=0), SimTypePointer(SimStruct({"optionsCount": SimTypeInt(signed=False, label="UInt32"), "options": SimTypePointer(SimStruct({"name": SimTypePointer(SimTypeChar(label="Char"), offset=0), "value": SimTypePointer(SimTypeChar(label="Char"), offset=0), "mustComply": SimTypeInt(signed=True, label="Int32")}, name="WSMAN_OPTION", pack=False, align=None), offset=0), "optionsMustUnderstand": SimTypeInt(signed=True, label="Int32")}, name="WSMAN_OPTION_SET", pack=False, align=None), offset=0), SimTypePointer(SimStruct({"operationContext": SimTypePointer(SimTypeBottom(label="Void"), offset=0), "completionFunction": SimTypePointer(SimTypeFunction([SimTypePointer(SimTypeBottom(label="Void"), offset=0), SimTypeInt(signed=False, label="UInt32"), SimTypePointer(SimStruct({"code": SimTypeInt(signed=False, label="UInt32"), "errorDetail": SimTypePointer(SimTypeChar(label="Char"), offset=0), "language": SimTypePointer(SimTypeChar(label="Char"), offset=0), "machineName": SimTypePointer(SimTypeChar(label="Char"), offset=0), "pluginName": SimTypePointer(SimTypeChar(label="Char"), offset=0)}, name="WSMAN_ERROR", pack=False, align=None), offset=0), SimTypePointer(SimStruct({}, name="WSMAN_SHELL", pack=False, align=None), offset=0), SimTypePointer(SimStruct({}, name="WSMAN_COMMAND", pack=False, align=None), offset=0), SimTypePointer(SimStruct({}, name="WSMAN_OPERATION", pack=False, align=None), offset=0), SimTypePointer(SimUnion({"receiveData": SimStruct({"streamId": SimTypePointer(SimTypeChar(label="Char"), offset=0), "streamData": SimStruct({"type": SimTypeInt(signed=False, label="WSManDataType"), "Anonymous": SimUnion({"text": SimStruct({"bufferLength": SimTypeInt(signed=False, label="UInt32"), "buffer": SimTypePointer(SimTypeChar(label="Char"), offset=0)}, name="WSMAN_DATA_TEXT", pack=False, align=None), "binaryData": SimStruct({"dataLength": SimTypeInt(signed=False, label="UInt32"), "data": SimTypePointer(SimTypeChar(label="Byte"), offset=0)}, name="WSMAN_DATA_BINARY", pack=False, align=None), "number": SimTypeInt(signed=False, label="UInt32")}, name="<anon>", label="None")}, name="WSMAN_DATA", pack=False, align=None), "commandState": SimTypePointer(SimTypeChar(label="Char"), offset=0), "exitCode": SimTypeInt(signed=False, label="UInt32")}, name="WSMAN_RECEIVE_DATA_RESULT", pack=False, align=None), "connectData": SimStruct({"data": SimStruct({"type": SimTypeInt(signed=False, label="WSManDataType"), "Anonymous": SimUnion({"text": SimStruct({"bufferLength": SimTypeInt(signed=False, label="UInt32"), "buffer": SimTypePointer(SimTypeChar(label="Char"), offset=0)}, name="WSMAN_DATA_TEXT", pack=False, align=None), "binaryData": SimStruct({"dataLength": SimTypeInt(signed=False, label="UInt32"), "data": SimTypePointer(SimTypeChar(label="Byte"), offset=0)}, name="WSMAN_DATA_BINARY", pack=False, align=None), "number": SimTypeInt(signed=False, label="UInt32")}, name="<anon>", label="None")}, name="WSMAN_DATA", pack=False, align=None)}, name="WSMAN_CONNECT_DATA", pack=False, align=None), "createData": SimStruct({"data": SimStruct({"type": SimTypeInt(signed=False, label="WSManDataType"), "Anonymous": SimUnion({"text": SimStruct({"bufferLength": SimTypeInt(signed=False, label="UInt32"), "buffer": SimTypePointer(SimTypeChar(label="Char"), offset=0)}, name="WSMAN_DATA_TEXT", pack=False, align=None), "binaryData": SimStruct({"dataLength": SimTypeInt(signed=False, label="UInt32"), "data": SimTypePointer(SimTypeChar(label="Byte"), offset=0)}, name="WSMAN_DATA_BINARY", pack=False, align=None), "number": SimTypeInt(signed=False, label="UInt32")}, name="<anon>", label="None")}, name="WSMAN_DATA", pack=False, align=None)}, name="WSMAN_CREATE_SHELL_DATA", pack=False, align=None)}, name="<anon>", label="None"), offset=0)], SimTypeBottom(label="Void"), arg_names=["operationContext", "flags", "error", "shell", "command", "operationHandle", "data"]), offset=0)}, name="WSMAN_SHELL_ASYNC", pack=False, align=None), offset=0), SimTypePointer(SimTypePointer(SimStruct({}, name="WSMAN_COMMAND", pack=False, align=None), offset=0), offset=0)], SimTypeBottom(label="Void"), arg_names=["shell", "flags", "commandLine", "args", "options", "async", "command"]),
#
'WSManSignalShell': SimTypeFunction([SimTypePointer(SimStruct({}, name="WSMAN_SHELL", pack=False, align=None), offset=0), SimTypePointer(SimStruct({}, name="WSMAN_COMMAND", pack=False, align=None), offset=0), SimTypeInt(signed=False, label="UInt32"), SimTypePointer(SimTypeChar(label="Char"), offset=0), SimTypePointer(SimStruct({"operationContext": SimTypePointer(SimTypeBottom(label="Void"), offset=0), "completionFunction": SimTypePointer(SimTypeFunction([SimTypePointer(SimTypeBottom(label="Void"), offset=0), SimTypeInt(signed=False, label="UInt32"), SimTypePointer(SimStruct({"code": SimTypeInt(signed=False, label="UInt32"), "errorDetail": SimTypePointer(SimTypeChar(label="Char"), offset=0), "language": SimTypePointer(SimTypeChar(label="Char"), offset=0), "machineName": SimTypePointer(SimTypeChar(label="Char"), offset=0), "pluginName": SimTypePointer(SimTypeChar(label="Char"), offset=0)}, name="WSMAN_ERROR", pack=False, align=None), offset=0), SimTypePointer(SimStruct({}, name="WSMAN_SHELL", pack=False, align=None), offset=0), SimTypePointer(SimStruct({}, name="WSMAN_COMMAND", pack=False, align=None), offset=0), SimTypePointer(SimStruct({}, name="WSMAN_OPERATION", pack=False, align=None), offset=0), SimTypePointer(SimUnion({"receiveData": SimStruct({"streamId": SimTypePointer(SimTypeChar(label="Char"), offset=0), "streamData": SimStruct({"type": SimTypeInt(signed=False, label="WSManDataType"), "Anonymous": SimUnion({"text": SimStruct({"bufferLength": SimTypeInt(signed=False, label="UInt32"), "buffer": SimTypePointer(SimTypeChar(label="Char"), offset=0)}, name="WSMAN_DATA_TEXT", pack=False, align=None), "binaryData": SimStruct({"dataLength": SimTypeInt(signed=False, label="UInt32"), "data": SimTypePointer(SimTypeChar(label="Byte"), offset=0)}, name="WSMAN_DATA_BINARY", pack=False, align=None), "number": SimTypeInt(signed=False, label="UInt32")}, name="<anon>", label="None")}, name="WSMAN_DATA", pack=False, align=None), "commandState": SimTypePointer(SimTypeChar(label="Char"), offset=0), "exitCode": SimTypeInt(signed=False, label="UInt32")}, name="WSMAN_RECEIVE_DATA_RESULT", pack=False, align=None), "connectData": SimStruct({"data": SimStruct({"type": SimTypeInt(signed=False, label="WSManDataType"), "Anonymous": SimUnion({"text": SimStruct({"bufferLength": SimTypeInt(signed=False, label="UInt32"), "buffer": SimTypePointer(SimTypeChar(label="Char"), offset=0)}, name="WSMAN_DATA_TEXT", pack=False, align=None), "binaryData": SimStruct({"dataLength": SimTypeInt(signed=False, label="UInt32"), "data": SimTypePointer(SimTypeChar(label="Byte"), offset=0)}, name="WSMAN_DATA_BINARY", pack=False, align=None), "number": SimTypeInt(signed=False, label="UInt32")}, name="<anon>", label="None")}, name="WSMAN_DATA", pack=False, align=None)}, name="WSMAN_CONNECT_DATA", pack=False, align=None), "createData": SimStruct({"data": SimStruct({"type": SimTypeInt(signed=False, label="WSManDataType"), "Anonymous": SimUnion({"text": SimStruct({"bufferLength": SimTypeInt(signed=False, label="UInt32"), "buffer": SimTypePointer(SimTypeChar(label="Char"), offset=0)}, name="WSMAN_DATA_TEXT", pack=False, align=None), "binaryData": SimStruct({"dataLength": SimTypeInt(signed=False, label="UInt32"), "data": SimTypePointer(SimTypeChar(label="Byte"), offset=0)}, name="WSMAN_DATA_BINARY", pack=False, align=None), "number": SimTypeInt(signed=False, label="UInt32")}, name="<anon>", label="None")}, name="WSMAN_DATA", pack=False, align=None)}, name="WSMAN_CREATE_SHELL_DATA", pack=False, align=None)}, name="<anon>", label="None"), offset=0)], SimTypeBottom(label="Void"), arg_names=["operationContext", "flags", "error", "shell", "command", "operationHandle", "data"]), offset=0)}, name="WSMAN_SHELL_ASYNC", pack=False, align=None), offset=0), SimTypePointer(SimTypePointer(SimStruct({}, name="WSMAN_OPERATION", pack=False, align=None), offset=0), offset=0)], SimTypeBottom(label="Void"), arg_names=["shell", "command", "flags", "code", "async", "signalOperation"]),
#
'WSManReceiveShellOutput': SimTypeFunction([SimTypePointer(SimStruct({}, name="WSMAN_SHELL", pack=False, align=None), offset=0), SimTypePointer(SimStruct({}, name="WSMAN_COMMAND", pack=False, align=None), offset=0), SimTypeInt(signed=False, label="UInt32"), SimTypePointer(SimStruct({"streamIDsCount": SimTypeInt(signed=False, label="UInt32"), "streamIDs": SimTypePointer(SimTypePointer(SimTypeChar(label="Char"), offset=0), offset=0)}, name="WSMAN_STREAM_ID_SET", pack=False, align=None), offset=0), SimTypePointer(SimStruct({"operationContext": SimTypePointer(SimTypeBottom(label="Void"), offset=0), "completionFunction": SimTypePointer(SimTypeFunction([SimTypePointer(SimTypeBottom(label="Void"), offset=0), SimTypeInt(signed=False, label="UInt32"), SimTypePointer(SimStruct({"code": SimTypeInt(signed=False, label="UInt32"), "errorDetail": SimTypePointer(SimTypeChar(label="Char"), offset=0), "language": SimTypePointer(SimTypeChar(label="Char"), offset=0), "machineName": SimTypePointer(SimTypeChar(label="Char"), offset=0), "pluginName": SimTypePointer(SimTypeChar(label="Char"), offset=0)}, name="WSMAN_ERROR", pack=False, align=None), offset=0), SimTypePointer(SimStruct({}, name="WSMAN_SHELL", pack=False, align=None), offset=0), SimTypePointer(SimStruct({}, name="WSMAN_COMMAND", pack=False, align=None), offset=0), SimTypePointer(SimStruct({}, name="WSMAN_OPERATION", pack=False, align=None), offset=0), SimTypePointer(SimUnion({"receiveData": SimStruct({"streamId": SimTypePointer(SimTypeChar(label="Char"), offset=0), "streamData": SimStruct({"type": SimTypeInt(signed=False, label="WSManDataType"), "Anonymous": SimUnion({"text": SimStruct({"bufferLength": SimTypeInt(signed=False, label="UInt32"), "buffer": SimTypePointer(SimTypeChar(label="Char"), offset=0)}, name="WSMAN_DATA_TEXT", pack=False, align=None), "binaryData": SimStruct({"dataLength": SimTypeInt(signed=False, label="UInt32"), "data": SimTypePointer(SimTypeChar(label="Byte"), offset=0)}, name="WSMAN_DATA_BINARY", pack=False, align=None), "number": SimTypeInt(signed=False, label="UInt32")}, name="<anon>", label="None")}, name="WSMAN_DATA", pack=False, align=None), "commandState": SimTypePointer(SimTypeChar(label="Char"), offset=0), "exitCode": SimTypeInt(signed=False, label="UInt32")}, name="WSMAN_RECEIVE_DATA_RESULT", pack=False, align=None), "connectData": SimStruct({"data": SimStruct({"type": SimTypeInt(signed=False, label="WSManDataType"), "Anonymous": SimUnion({"text": SimStruct({"bufferLength": SimTypeInt(signed=False, label="UInt32"), "buffer": SimTypePointer(SimTypeChar(label="Char"), offset=0)}, name="WSMAN_DATA_TEXT", pack=False, align=None), "binaryData": SimStruct({"dataLength": SimTypeInt(signed=False, label="UInt32"), "data": SimTypePointer(SimTypeChar(label="Byte"), offset=0)}, name="WSMAN_DATA_BINARY", pack=False, align=None), "number": SimTypeInt(signed=False, label="UInt32")}, name="<anon>", label="None")}, name="WSMAN_DATA", pack=False, align=None)}, name="WSMAN_CONNECT_DATA", pack=False, align=None), "createData": SimStruct({"data": SimStruct({"type": SimTypeInt(signed=False, label="WSManDataType"), "Anonymous": SimUnion({"text": SimStruct({"bufferLength": SimTypeInt(signed=False, label="UInt32"), "buffer": SimTypePointer(SimTypeChar(label="Char"), offset=0)}, name="WSMAN_DATA_TEXT", pack=False, align=None), "binaryData": SimStruct({"dataLength": SimTypeInt(signed=False, label="UInt32"), "data": SimTypePointer(SimTypeChar(label="Byte"), offset=0)}, name="WSMAN_DATA_BINARY", pack=False, align=None), "number": SimTypeInt(signed=False, label="UInt32")}, name="<anon>", label="None")}, name="WSMAN_DATA", pack=False, align=None)}, name="WSMAN_CREATE_SHELL_DATA", pack=False, align=None)}, name="<anon>", label="None"), offset=0)], SimTypeBottom(label="Void"), arg_names=["operationContext", "flags", "error", "shell", "command", "operationHandle", "data"]), offset=0)}, name="WSMAN_SHELL_ASYNC", pack=False, align=None), offset=0), SimTypePointer(SimTypePointer(SimStruct({}, name="WSMAN_OPERATION", pack=False, align=None), offset=0), offset=0)], SimTypeBottom(label="Void"), arg_names=["shell", "command", "flags", "desiredStreamSet", "async", "receiveOperation"]),
#
'WSManSendShellInput': SimTypeFunction([SimTypePointer(SimStruct({}, name="WSMAN_SHELL", pack=False, align=None), offset=0), SimTypePointer(SimStruct({}, name="WSMAN_COMMAND", pack=False, align=None), offset=0), SimTypeInt(signed=False, label="UInt32"), SimTypePointer(SimTypeChar(label="Char"), offset=0), SimTypePointer(SimStruct({"type": SimTypeInt(signed=False, label="WSManDataType"), "Anonymous": SimUnion({"text": SimStruct({"bufferLength": SimTypeInt(signed=False, label="UInt32"), "buffer": SimTypePointer(SimTypeChar(label="Char"), offset=0)}, name="WSMAN_DATA_TEXT", pack=False, align=None), "binaryData": SimStruct({"dataLength": SimTypeInt(signed=False, label="UInt32"), "data": SimTypePointer(SimTypeChar(label="Byte"), offset=0)}, name="WSMAN_DATA_BINARY", pack=False, align=None), "number": SimTypeInt(signed=False, label="UInt32")}, name="<anon>", label="None")}, name="WSMAN_DATA", pack=False, align=None), offset=0), SimTypeInt(signed=True, label="Int32"), SimTypePointer(SimStruct({"operationContext": SimTypePointer(SimTypeBottom(label="Void"), offset=0), "completionFunction": SimTypePointer(SimTypeFunction([SimTypePointer(SimTypeBottom(label="Void"), offset=0), SimTypeInt(signed=False, label="UInt32"), SimTypePointer(SimStruct({"code": SimTypeInt(signed=False, label="UInt32"), "errorDetail": SimTypePointer(SimTypeChar(label="Char"), offset=0), "language": SimTypePointer(SimTypeChar(label="Char"), offset=0), "machineName": SimTypePointer(SimTypeChar(label="Char"), offset=0), "pluginName": SimTypePointer(SimTypeChar(label="Char"), offset=0)}, name="WSMAN_ERROR", pack=False, align=None), offset=0), SimTypePointer(SimStruct({}, name="WSMAN_SHELL", pack=False, align=None), offset=0), SimTypePointer(SimStruct({}, name="WSMAN_COMMAND", pack=False, align=None), offset=0), SimTypePointer(SimStruct({}, name="WSMAN_OPERATION", pack=False, align=None), offset=0), SimTypePointer(SimUnion({"receiveData": SimStruct({"streamId": SimTypePointer(SimTypeChar(label="Char"), offset=0), "streamData": SimStruct({"type": SimTypeInt(signed=False, label="WSManDataType"), "Anonymous": SimUnion({"text": SimStruct({"bufferLength": SimTypeInt(signed=False, label="UInt32"), "buffer": SimTypePointer(SimTypeChar(label="Char"), offset=0)}, name="WSMAN_DATA_TEXT", pack=False, align=None), "binaryData": SimStruct({"dataLength": SimTypeInt(signed=False, label="UInt32"), "data": SimTypePointer(SimTypeChar(label="Byte"), offset=0)}, name="WSMAN_DATA_BINARY", pack=False, align=None), "number": SimTypeInt(signed=False, label="UInt32")}, name="<anon>", label="None")}, name="WSMAN_DATA", pack=False, align=None), "commandState": SimTypePointer(SimTypeChar(label="Char"), offset=0), "exitCode": SimTypeInt(signed=False, label="UInt32")}, name="WSMAN_RECEIVE_DATA_RESULT", pack=False, align=None), "connectData": SimStruct({"data": SimStruct({"type": SimTypeInt(signed=False, label="WSManDataType"), "Anonymous": SimUnion({"text": SimStruct({"bufferLength": SimTypeInt(signed=False, label="UInt32"), "buffer": SimTypePointer(SimTypeChar(label="Char"), offset=0)}, name="WSMAN_DATA_TEXT", pack=False, align=None), "binaryData": SimStruct({"dataLength": SimTypeInt(signed=False, label="UInt32"), "data": SimTypePointer(SimTypeChar(label="Byte"), offset=0)}, name="WSMAN_DATA_BINARY", pack=False, align=None), "number": SimTypeInt(signed=False, label="UInt32")}, name="<anon>", label="None")}, name="WSMAN_DATA", pack=False, align=None)}, name="WSMAN_CONNECT_DATA", pack=False, align=None), "createData": SimStruct({"data": SimStruct({"type": SimTypeInt(signed=False, label="WSManDataType"), "Anonymous": SimUnion({"text": SimStruct({"bufferLength": SimTypeInt(signed=False, label="UInt32"), "buffer": SimTypePointer(SimTypeChar(label="Char"), offset=0)}, name="WSMAN_DATA_TEXT", pack=False, align=None), "binaryData": SimStruct({"dataLength": SimTypeInt(signed=False, label="UInt32"), "data": SimTypePointer(SimTypeChar(label="Byte"), offset=0)}, name="WSMAN_DATA_BINARY", pack=False, align=None), "number": SimTypeInt(signed=False, label="UInt32")}, name="<anon>", label="None")}, name="WSMAN_DATA", pack=False, align=None)}, name="WSMAN_CREATE_SHELL_DATA", pack=False, align=None)}, name="<anon>", label="None"), offset=0)], SimTypeBottom(label="Void"), arg_names=["operationContext", "flags", "error", "shell", "command", "operationHandle", "data"]), offset=0)}, name="WSMAN_SHELL_ASYNC", pack=False, align=None), offset=0), SimTypePointer(SimTypePointer(SimStruct({}, name="WSMAN_OPERATION", pack=False, align=None), offset=0), offset=0)], SimTypeBottom(label="Void"), arg_names=["shell", "command", "flags", "streamId", "streamData", "endOfStream", "async", "sendOperation"]),
#
'WSManCloseCommand': SimTypeFunction([SimTypePointer(SimStruct({}, name="WSMAN_COMMAND", pack=False, align=None), offset=0), SimTypeInt(signed=False, label="UInt32"), SimTypePointer(SimStruct({"operationContext": SimTypePointer(SimTypeBottom(label="Void"), offset=0), "completionFunction": SimTypePointer(SimTypeFunction([SimTypePointer(SimTypeBottom(label="Void"), offset=0), SimTypeInt(signed=False, label="UInt32"), SimTypePointer(SimStruct({"code": SimTypeInt(signed=False, label="UInt32"), "errorDetail": SimTypePointer(SimTypeChar(label="Char"), offset=0), "language": SimTypePointer(SimTypeChar(label="Char"), offset=0), "machineName": SimTypePointer(SimTypeChar(label="Char"), offset=0), "pluginName": SimTypePointer(SimTypeChar(label="Char"), offset=0)}, name="WSMAN_ERROR", pack=False, align=None), offset=0), SimTypePointer(SimStruct({}, name="WSMAN_SHELL", pack=False, align=None), offset=0), SimTypePointer(SimStruct({}, name="WSMAN_COMMAND", pack=False, align=None), offset=0), SimTypePointer(SimStruct({}, name="WSMAN_OPERATION", pack=False, align=None), offset=0), SimTypePointer(SimUnion({"receiveData": SimStruct({"streamId": SimTypePointer(SimTypeChar(label="Char"), offset=0), "streamData": SimStruct({"type": SimTypeInt(signed=False, label="WSManDataType"), "Anonymous": SimUnion({"text": SimStruct({"bufferLength": SimTypeInt(signed=False, label="UInt32"), "buffer": SimTypePointer(SimTypeChar(label="Char"), offset=0)}, name="WSMAN_DATA_TEXT", pack=False, align=None), "binaryData": SimStruct({"dataLength": SimTypeInt(signed=False, label="UInt32"), "data": SimTypePointer(SimTypeChar(label="Byte"), offset=0)}, name="WSMAN_DATA_BINARY", pack=False, align=None), "number": SimTypeInt(signed=False, label="UInt32")}, name="<anon>", label="None")}, name="WSMAN_DATA", pack=False, align=None), "commandState": SimTypePointer(SimTypeChar(label="Char"), offset=0), "exitCode": SimTypeInt(signed=False, label="UInt32")}, name="WSMAN_RECEIVE_DATA_RESULT", pack=False, align=None), "connectData": SimStruct({"data": SimStruct({"type": SimTypeInt(signed=False, label="WSManDataType"), "Anonymous": SimUnion({"text": SimStruct({"bufferLength": SimTypeInt(signed=False, label="UInt32"), "buffer": SimTypePointer(SimTypeChar(label="Char"), offset=0)}, name="WSMAN_DATA_TEXT", pack=False, align=None), "binaryData": SimStruct({"dataLength": SimTypeInt(signed=False, label="UInt32"), "data": SimTypePointer(SimTypeChar(label="Byte"), offset=0)}, name="WSMAN_DATA_BINARY", pack=False, align=None), "number": SimTypeInt(signed=False, label="UInt32")}, name="<anon>", label="None")}, name="WSMAN_DATA", pack=False, align=None)}, name="WSMAN_CONNECT_DATA", pack=False, align=None), "createData": SimStruct({"data": SimStruct({"type": SimTypeInt(signed=False, label="WSManDataType"), "Anonymous": SimUnion({"text": SimStruct({"bufferLength": SimTypeInt(signed=False, label="UInt32"), "buffer": SimTypePointer(SimTypeChar(label="Char"), offset=0)}, name="WSMAN_DATA_TEXT", pack=False, align=None), "binaryData": SimStruct({"dataLength": SimTypeInt(signed=False, label="UInt32"), "data": SimTypePointer(SimTypeChar(label="Byte"), offset=0)}, name="WSMAN_DATA_BINARY", pack=False, align=None), "number": SimTypeInt(signed=False, label="UInt32")}, name="<anon>", label="None")}, name="WSMAN_DATA", pack=False, align=None)}, name="WSMAN_CREATE_SHELL_DATA", pack=False, align=None)}, name="<anon>", label="None"), offset=0)], SimTypeBottom(label="Void"), arg_names=["operationContext", "flags", "error", "shell", "command", "operationHandle", "data"]), offset=0)}, name="WSMAN_SHELL_ASYNC", pack=False, align=None), offset=0)], SimTypeBottom(label="Void"), arg_names=["commandHandle", "flags", "async"]),
#
'WSManCloseShell': SimTypeFunction([SimTypePointer(SimStruct({}, name="WSMAN_SHELL", pack=False, align=None), offset=0), SimTypeInt(signed=False, label="UInt32"), SimTypePointer(SimStruct({"operationContext": SimTypePointer(SimTypeBottom(label="Void"), offset=0), "completionFunction": SimTypePointer(SimTypeFunction([SimTypePointer(SimTypeBottom(label="Void"), offset=0), SimTypeInt(signed=False, label="UInt32"), SimTypePointer(SimStruct({"code": SimTypeInt(signed=False, label="UInt32"), "errorDetail": SimTypePointer(SimTypeChar(label="Char"), offset=0), "language": SimTypePointer(SimTypeChar(label="Char"), offset=0), "machineName": SimTypePointer(SimTypeChar(label="Char"), offset=0), "pluginName": SimTypePointer(SimTypeChar(label="Char"), offset=0)}, name="WSMAN_ERROR", pack=False, align=None), offset=0), SimTypePointer(SimStruct({}, name="WSMAN_SHELL", pack=False, align=None), offset=0), SimTypePointer(SimStruct({}, name="WSMAN_COMMAND", pack=False, align=None), offset=0), SimTypePointer(SimStruct({}, name="WSMAN_OPERATION", pack=False, align=None), offset=0), SimTypePointer(SimUnion({"receiveData": SimStruct({"streamId": SimTypePointer(SimTypeChar(label="Char"), offset=0), "streamData": SimStruct({"type": SimTypeInt(signed=False, label="WSManDataType"), "Anonymous": SimUnion({"text": SimStruct({"bufferLength": SimTypeInt(signed=False, label="UInt32"), "buffer": SimTypePointer(SimTypeChar(label="Char"), offset=0)}, name="WSMAN_DATA_TEXT", pack=False, align=None), "binaryData": SimStruct({"dataLength": SimTypeInt(signed=False, label="UInt32"), "data": SimTypePointer(SimTypeChar(label="Byte"), offset=0)}, name="WSMAN_DATA_BINARY", pack=False, align=None), "number": SimTypeInt(signed=False, label="UInt32")}, name="<anon>", label="None")}, name="WSMAN_DATA", pack=False, align=None), "commandState": SimTypePointer(SimTypeChar(label="Char"), offset=0), "exitCode": SimTypeInt(signed=False, label="UInt32")}, name="WSMAN_RECEIVE_DATA_RESULT", pack=False, align=None), "connectData": SimStruct({"data": SimStruct({"type": SimTypeInt(signed=False, label="WSManDataType"), "Anonymous": SimUnion({"text": SimStruct({"bufferLength": SimTypeInt(signed=False, label="UInt32"), "buffer": SimTypePointer(SimTypeChar(label="Char"), offset=0)}, name="WSMAN_DATA_TEXT", pack=False, align=None), "binaryData": SimStruct({"dataLength": SimTypeInt(signed=False, label="UInt32"), "data": SimTypePointer(SimTypeChar(label="Byte"), offset=0)}, name="WSMAN_DATA_BINARY", pack=False, align=None), "number": SimTypeInt(signed=False, label="UInt32")}, name="<anon>", label="None")}, name="WSMAN_DATA", pack=False, align=None)}, name="WSMAN_CONNECT_DATA", pack=False, align=None), "createData": SimStruct({"data": SimStruct({"type": SimTypeInt(signed=False, label="WSManDataType"), "Anonymous": SimUnion({"text": SimStruct({"bufferLength": SimTypeInt(signed=False, label="UInt32"), "buffer": SimTypePointer(SimTypeChar(label="Char"), offset=0)}, name="WSMAN_DATA_TEXT", pack=False, align=None), "binaryData": SimStruct({"dataLength": SimTypeInt(signed=False, label="UInt32"), "data": SimTypePointer(SimTypeChar(label="Byte"), offset=0)}, name="WSMAN_DATA_BINARY", pack=False, align=None), "number": SimTypeInt(signed=False, label="UInt32")}, name="<anon>", label="None")}, name="WSMAN_DATA", pack=False, align=None)}, name="WSMAN_CREATE_SHELL_DATA", pack=False, align=None)}, name="<anon>", label="None"), offset=0)], SimTypeBottom(label="Void"), arg_names=["operationContext", "flags", "error", "shell", "command", "operationHandle", "data"]), offset=0)}, name="WSMAN_SHELL_ASYNC", pack=False, align=None), offset=0)], SimTypeBottom(label="Void"), arg_names=["shellHandle", "flags", "async"]),
#
'WSManCreateShellEx': SimTypeFunction([SimTypePointer(SimStruct({}, name="WSMAN_SESSION", pack=False, align=None), offset=0), SimTypeInt(signed=False, label="UInt32"), SimTypePointer(SimTypeChar(label="Char"), offset=0), SimTypePointer(SimTypeChar(label="Char"), offset=0), SimTypePointer(SimStruct({"__AnonymousBase_wsman_L665_C48": SimStruct({"inputStreamSet": SimTypePointer(SimStruct({"streamIDsCount": SimTypeInt(signed=False, label="UInt32"), "streamIDs": SimTypePointer(SimTypePointer(SimTypeChar(label="Char"), offset=0), offset=0)}, name="WSMAN_STREAM_ID_SET", pack=False, align=None), offset=0), "outputStreamSet": SimTypePointer(SimStruct({"streamIDsCount": SimTypeInt(signed=False, label="UInt32"), "streamIDs": SimTypePointer(SimTypePointer(SimTypeChar(label="Char"), offset=0), offset=0)}, name="WSMAN_STREAM_ID_SET", pack=False, align=None), offset=0), "idleTimeoutMs": SimTypeInt(signed=False, label="UInt32"), "workingDirectory": SimTypePointer(SimTypeChar(label="Char"), offset=0), "variableSet": SimTypePointer(SimStruct({"varsCount": SimTypeInt(signed=False, label="UInt32"), "vars": SimTypePointer(SimStruct({"name": SimTypePointer(SimTypeChar(label="Char"), offset=0), "value": SimTypePointer(SimTypeChar(label="Char"), offset=0)}, name="WSMAN_ENVIRONMENT_VARIABLE", pack=False, align=None), offset=0)}, name="WSMAN_ENVIRONMENT_VARIABLE_SET", pack=False, align=None), offset=0)}, name="WSMAN_SHELL_STARTUP_INFO_V10", pack=False, align=None), "name": SimTypePointer(SimTypeChar(label="Char"), offset=0)}, name="WSMAN_SHELL_STARTUP_INFO_V11", pack=False, align=None), offset=0), SimTypePointer(SimStruct({"optionsCount": SimTypeInt(signed=False, label="UInt32"), "options": SimTypePointer(SimStruct({"name": SimTypePointer(SimTypeChar(label="Char"), offset=0), "value": SimTypePointer(SimTypeChar(label="Char"), offset=0), "mustComply": SimTypeInt(signed=True, label="Int32")}, name="WSMAN_OPTION", pack=False, align=None), offset=0), "optionsMustUnderstand": SimTypeInt(signed=True, label="Int32")}, name="WSMAN_OPTION_SET", pack=False, align=None), offset=0), SimTypePointer(SimStruct({"type": SimTypeInt(signed=False, label="WSManDataType"), "Anonymous": SimUnion({"text": SimStruct({"bufferLength": SimTypeInt(signed=False, label="UInt32"), "buffer": SimTypePointer(SimTypeChar(label="Char"), offset=0)}, name="WSMAN_DATA_TEXT", pack=False, align=None), "binaryData": SimStruct({"dataLength": SimTypeInt(signed=False, label="UInt32"), "data": SimTypePointer(SimTypeChar(label="Byte"), offset=0)}, name="WSMAN_DATA_BINARY", pack=False, align=None), "number": SimTypeInt(signed=False, label="UInt32")}, name="<anon>", label="None")}, name="WSMAN_DATA", pack=False, align=None), offset=0), SimTypePointer(SimStruct({"operationContext": SimTypePointer(SimTypeBottom(label="Void"), offset=0), "completionFunction": SimTypePointer(SimTypeFunction([SimTypePointer(SimTypeBottom(label="Void"), offset=0), SimTypeInt(signed=False, label="UInt32"), SimTypePointer(SimStruct({"code": SimTypeInt(signed=False, label="UInt32"), "errorDetail": SimTypePointer(SimTypeChar(label="Char"), offset=0), "language": SimTypePointer(SimTypeChar(label="Char"), offset=0), "machineName": SimTypePointer(SimTypeChar(label="Char"), offset=0), "pluginName": SimTypePointer(SimTypeChar(label="Char"), offset=0)}, name="WSMAN_ERROR", pack=False, align=None), offset=0), SimTypePointer(SimStruct({}, name="WSMAN_SHELL", pack=False, align=None), offset=0), SimTypePointer(SimStruct({}, name="WSMAN_COMMAND", pack=False, align=None), offset=0), SimTypePointer(SimStruct({}, name="WSMAN_OPERATION", pack=False, align=None), offset=0), SimTypePointer(SimUnion({"receiveData": SimStruct({"streamId": SimTypePointer(SimTypeChar(label="Char"), offset=0), "streamData": SimStruct({"type": SimTypeInt(signed=False, label="WSManDataType"), "Anonymous": SimUnion({"text": SimStruct({"bufferLength": SimTypeInt(signed=False, label="UInt32"), "buffer": SimTypePointer(SimTypeChar(label="Char"), offset=0)}, name="WSMAN_DATA_TEXT", pack=False, align=None), "binaryData": SimStruct({"dataLength": SimTypeInt(signed=False, label="UInt32"), "data": SimTypePointer(SimTypeChar(label="Byte"), offset=0)}, name="WSMAN_DATA_BINARY", pack=False, align=None), "number": SimTypeInt(signed=False, label="UInt32")}, name="<anon>", label="None")}, name="WSMAN_DATA", pack=False, align=None), "commandState": SimTypePointer(SimTypeChar(label="Char"), offset=0), "exitCode": SimTypeInt(signed=False, label="UInt32")}, name="WSMAN_RECEIVE_DATA_RESULT", pack=False, align=None), "connectData": SimStruct({"data": SimStruct({"type": SimTypeInt(signed=False, label="WSManDataType"), "Anonymous": SimUnion({"text": SimStruct({"bufferLength": SimTypeInt(signed=False, label="UInt32"), "buffer": SimTypePointer(SimTypeChar(label="Char"), offset=0)}, name="WSMAN_DATA_TEXT", pack=False, align=None), "binaryData": SimStruct({"dataLength": SimTypeInt(signed=False, label="UInt32"), "data": SimTypePointer(SimTypeChar(label="Byte"), offset=0)}, name="WSMAN_DATA_BINARY", pack=False, align=None), "number": SimTypeInt(signed=False, label="UInt32")}, name="<anon>", label="None")}, name="WSMAN_DATA", pack=False, align=None)}, name="WSMAN_CONNECT_DATA", pack=False, align=None), "createData": SimStruct({"data": SimStruct({"type": SimTypeInt(signed=False, label="WSManDataType"), "Anonymous": SimUnion({"text": SimStruct({"bufferLength": SimTypeInt(signed=False, label="UInt32"), "buffer": SimTypePointer(SimTypeChar(label="Char"), offset=0)}, name="WSMAN_DATA_TEXT", pack=False, align=None), "binaryData": SimStruct({"dataLength": SimTypeInt(signed=False, label="UInt32"), "data": SimTypePointer(SimTypeChar(label="Byte"), offset=0)}, name="WSMAN_DATA_BINARY", pack=False, align=None), "number": SimTypeInt(signed=False, label="UInt32")}, name="<anon>", label="None")}, name="WSMAN_DATA", pack=False, align=None)}, name="WSMAN_CREATE_SHELL_DATA", pack=False, align=None)}, name="<anon>", label="None"), offset=0)], SimTypeBottom(label="Void"), arg_names=["operationContext", "flags", "error", "shell", "command", "operationHandle", "data"]), offset=0)}, name="WSMAN_SHELL_ASYNC", pack=False, align=None), offset=0), SimTypePointer(SimTypePointer(SimStruct({}, name="WSMAN_SHELL", pack=False, align=None), offset=0), offset=0)], SimTypeBottom(label="Void"), arg_names=["session", "flags", "resourceUri", "shellId", "startupInfo", "options", "createXml", "async", "shell"]),
#
'WSManRunShellCommandEx': SimTypeFunction([SimTypePointer(SimStruct({}, name="WSMAN_SHELL", pack=False, align=None), offset=0), SimTypeInt(signed=False, label="UInt32"), SimTypePointer(SimTypeChar(label="Char"), offset=0), SimTypePointer(SimTypeChar(label="Char"), offset=0), SimTypePointer(SimStruct({"argsCount": SimTypeInt(signed=False, label="UInt32"), "args": SimTypePointer(SimTypePointer(SimTypeChar(label="Char"), offset=0), offset=0)}, name="WSMAN_COMMAND_ARG_SET", pack=False, align=None), offset=0), SimTypePointer(SimStruct({"optionsCount": SimTypeInt(signed=False, label="UInt32"), "options": SimTypePointer(SimStruct({"name": SimTypePointer(SimTypeChar(label="Char"), offset=0), "value": SimTypePointer(SimTypeChar(label="Char"), offset=0), "mustComply": SimTypeInt(signed=True, label="Int32")}, name="WSMAN_OPTION", pack=False, align=None), offset=0), "optionsMustUnderstand": SimTypeInt(signed=True, label="Int32")}, name="WSMAN_OPTION_SET", pack=False, align=None), offset=0), SimTypePointer(SimStruct({"operationContext": SimTypePointer(SimTypeBottom(label="Void"), offset=0), "completionFunction": SimTypePointer(SimTypeFunction([SimTypePointer(SimTypeBottom(label="Void"), offset=0), SimTypeInt(signed=False, label="UInt32"), SimTypePointer(SimStruct({"code": SimTypeInt(signed=False, label="UInt32"), "errorDetail": SimTypePointer(SimTypeChar(label="Char"), offset=0), "language": SimTypePointer(SimTypeChar(label="Char"), offset=0), "machineName": SimTypePointer(SimTypeChar(label="Char"), offset=0), "pluginName": SimTypePointer(SimTypeChar(label="Char"), offset=0)}, name="WSMAN_ERROR", pack=False, align=None), offset=0), SimTypePointer(SimStruct({}, name="WSMAN_SHELL", pack=False, align=None), offset=0), SimTypePointer(SimStruct({}, name="WSMAN_COMMAND", pack=False, align=None), offset=0), SimTypePointer(SimStruct({}, name="WSMAN_OPERATION", pack=False, align=None), offset=0), SimTypePointer(SimUnion({"receiveData": SimStruct({"streamId": SimTypePointer(SimTypeChar(label="Char"), offset=0), "streamData": SimStruct({"type": SimTypeInt(signed=False, label="WSManDataType"), "Anonymous": SimUnion({"text": SimStruct({"bufferLength": SimTypeInt(signed=False, label="UInt32"), "buffer": SimTypePointer(SimTypeChar(label="Char"), offset=0)}, name="WSMAN_DATA_TEXT", pack=False, align=None), "binaryData": SimStruct({"dataLength": SimTypeInt(signed=False, label="UInt32"), "data": SimTypePointer(SimTypeChar(label="Byte"), offset=0)}, name="WSMAN_DATA_BINARY", pack=False, align=None), "number": SimTypeInt(signed=False, label="UInt32")}, name="<anon>", label="None")}, name="WSMAN_DATA", pack=False, align=None), "commandState": SimTypePointer(SimTypeChar(label="Char"), offset=0), "exitCode": SimTypeInt(signed=False, label="UInt32")}, name="WSMAN_RECEIVE_DATA_RESULT", pack=False, align=None), "connectData": SimStruct({"data": SimStruct({"type": SimTypeInt(signed=False, label="WSManDataType"), "Anonymous": SimUnion({"text": SimStruct({"bufferLength": SimTypeInt(signed=False, label="UInt32"), "buffer": SimTypePointer(SimTypeChar(label="Char"), offset=0)}, name="WSMAN_DATA_TEXT", pack=False, align=None), "binaryData": SimStruct({"dataLength": SimTypeInt(signed=False, label="UInt32"), "data": SimTypePointer(SimTypeChar(label="Byte"), offset=0)}, name="WSMAN_DATA_BINARY", pack=False, align=None), "number": SimTypeInt(signed=False, label="UInt32")}, name="<anon>", label="None")}, name="WSMAN_DATA", pack=False, align=None)}, name="WSMAN_CONNECT_DATA", pack=False, align=None), "createData": SimStruct({"data": SimStruct({"type": SimTypeInt(signed=False, label="WSManDataType"), "Anonymous": SimUnion({"text": SimStruct({"bufferLength": SimTypeInt(signed=False, label="UInt32"), "buffer": SimTypePointer(SimTypeChar(label="Char"), offset=0)}, name="WSMAN_DATA_TEXT", pack=False, align=None), "binaryData": SimStruct({"dataLength": SimTypeInt(signed=False, label="UInt32"), "data": SimTypePointer(SimTypeChar(label="Byte"), offset=0)}, name="WSMAN_DATA_BINARY", pack=False, align=None), "number": SimTypeInt(signed=False, label="UInt32")}, name="<anon>", label="None")}, name="WSMAN_DATA", pack=False, align=None)}, name="WSMAN_CREATE_SHELL_DATA", pack=False, align=None)}, name="<anon>", label="None"), offset=0)], SimTypeBottom(label="Void"), arg_names=["operationContext", "flags", "error", "shell", "command", "operationHandle", "data"]), offset=0)}, name="WSMAN_SHELL_ASYNC", pack=False, align=None), offset=0), SimTypePointer(SimTypePointer(SimStruct({}, name="WSMAN_COMMAND", pack=False, align=None), offset=0), offset=0)], SimTypeBottom(label="Void"), arg_names=["shell", "flags", "commandId", "commandLine", "args", "options", "async", "command"]),
#
'WSManDisconnectShell': SimTypeFunction([SimTypePointer(SimStruct({}, name="WSMAN_SHELL", pack=False, align=None), offset=0), SimTypeInt(signed=False, label="UInt32"), SimTypePointer(SimStruct({"idleTimeoutMs": SimTypeInt(signed=False, label="UInt32")}, name="WSMAN_SHELL_DISCONNECT_INFO", pack=False, align=None), offset=0), SimTypePointer(SimStruct({"operationContext": SimTypePointer(SimTypeBottom(label="Void"), offset=0), "completionFunction": SimTypePointer(SimTypeFunction([SimTypePointer(SimTypeBottom(label="Void"), offset=0), SimTypeInt(signed=False, label="UInt32"), SimTypePointer(SimStruct({"code": SimTypeInt(signed=False, label="UInt32"), "errorDetail": SimTypePointer(SimTypeChar(label="Char"), offset=0), "language": SimTypePointer(SimTypeChar(label="Char"), offset=0), "machineName": SimTypePointer(SimTypeChar(label="Char"), offset=0), "pluginName": SimTypePointer(SimTypeChar(label="Char"), offset=0)}, name="WSMAN_ERROR", pack=False, align=None), offset=0), SimTypePointer(SimStruct({}, name="WSMAN_SHELL", pack=False, align=None), offset=0), SimTypePointer(SimStruct({}, name="WSMAN_COMMAND", pack=False, align=None), offset=0), SimTypePointer(SimStruct({}, name="WSMAN_OPERATION", pack=False, align=None), offset=0), SimTypePointer(SimUnion({"receiveData": SimStruct({"streamId": SimTypePointer(SimTypeChar(label="Char"), offset=0), "streamData": SimStruct({"type": SimTypeInt(signed=False, label="WSManDataType"), "Anonymous": SimUnion({"text": SimStruct({"bufferLength": SimTypeInt(signed=False, label="UInt32"), "buffer": SimTypePointer(SimTypeChar(label="Char"), offset=0)}, name="WSMAN_DATA_TEXT", pack=False, align=None), "binaryData": SimStruct({"dataLength": SimTypeInt(signed=False, label="UInt32"), "data": SimTypePointer(SimTypeChar(label="Byte"), offset=0)}, name="WSMAN_DATA_BINARY", pack=False, align=None), "number": SimTypeInt(signed=False, label="UInt32")}, name="<anon>", label="None")}, name="WSMAN_DATA", pack=False, align=None), "commandState": SimTypePointer(SimTypeChar(label="Char"), offset=0), "exitCode": SimTypeInt(signed=False, label="UInt32")}, name="WSMAN_RECEIVE_DATA_RESULT", pack=False, align=None), "connectData": SimStruct({"data": SimStruct({"type": SimTypeInt(signed=False, label="WSManDataType"), "Anonymous": SimUnion({"text": SimStruct({"bufferLength": SimTypeInt(signed=False, label="UInt32"), "buffer": SimTypePointer(SimTypeChar(label="Char"), offset=0)}, name="WSMAN_DATA_TEXT", pack=False, align=None), "binaryData": SimStruct({"dataLength": SimTypeInt(signed=False, label="UInt32"), "data": SimTypePointer(SimTypeChar(label="Byte"), offset=0)}, name="WSMAN_DATA_BINARY", pack=False, align=None), "number": SimTypeInt(signed=False, label="UInt32")}, name="<anon>", label="None")}, name="WSMAN_DATA", pack=False, align=None)}, name="WSMAN_CONNECT_DATA", pack=False, align=None), "createData": SimStruct({"data": SimStruct({"type": SimTypeInt(signed=False, label="WSManDataType"), "Anonymous": SimUnion({"text": SimStruct({"bufferLength": SimTypeInt(signed=False, label="UInt32"), "buffer": SimTypePointer(SimTypeChar(label="Char"), offset=0)}, name="WSMAN_DATA_TEXT", pack=False, align=None), "binaryData": SimStruct({"dataLength": SimTypeInt(signed=False, label="UInt32"), "data": SimTypePointer(SimTypeChar(label="Byte"), offset=0)}, name="WSMAN_DATA_BINARY", pack=False, align=None), "number": SimTypeInt(signed=False, label="UInt32")}, name="<anon>", label="None")}, name="WSMAN_DATA", pack=False, align=None)}, name="WSMAN_CREATE_SHELL_DATA", pack=False, align=None)}, name="<anon>", label="None"), offset=0)], SimTypeBottom(label="Void"), arg_names=["operationContext", "flags", "error", "shell", "command", "operationHandle", "data"]), offset=0)}, name="WSMAN_SHELL_ASYNC", pack=False, align=None), offset=0)], SimTypeBottom(label="Void"), arg_names=["shell", "flags", "disconnectInfo", "async"]),
#
'WSManReconnectShell': SimTypeFunction([SimTypePointer(SimStruct({}, name="WSMAN_SHELL", pack=False, align=None), offset=0), SimTypeInt(signed=False, label="UInt32"), SimTypePointer(SimStruct({"operationContext": SimTypePointer(SimTypeBottom(label="Void"), offset=0), "completionFunction": SimTypePointer(SimTypeFunction([SimTypePointer(SimTypeBottom(label="Void"), offset=0), SimTypeInt(signed=False, label="UInt32"), SimTypePointer(SimStruct({"code": SimTypeInt(signed=False, label="UInt32"), "errorDetail": SimTypePointer(SimTypeChar(label="Char"), offset=0), "language": SimTypePointer(SimTypeChar(label="Char"), offset=0), "machineName": SimTypePointer(SimTypeChar(label="Char"), offset=0), "pluginName": SimTypePointer(SimTypeChar(label="Char"), offset=0)}, name="WSMAN_ERROR", pack=False, align=None), offset=0), SimTypePointer(SimStruct({}, name="WSMAN_SHELL", pack=False, align=None), offset=0), SimTypePointer(SimStruct({}, name="WSMAN_COMMAND", pack=False, align=None), offset=0), SimTypePointer(SimStruct({}, name="WSMAN_OPERATION", pack=False, align=None), offset=0), SimTypePointer(SimUnion({"receiveData": SimStruct({"streamId": SimTypePointer(SimTypeChar(label="Char"), offset=0), "streamData": SimStruct({"type": SimTypeInt(signed=False, label="WSManDataType"), "Anonymous": SimUnion({"text": SimStruct({"bufferLength": SimTypeInt(signed=False, label="UInt32"), "buffer": SimTypePointer(SimTypeChar(label="Char"), offset=0)}, name="WSMAN_DATA_TEXT", pack=False, align=None), "binaryData": SimStruct({"dataLength": SimTypeInt(signed=False, label="UInt32"), "data": SimTypePointer(SimTypeChar(label="Byte"), offset=0)}, name="WSMAN_DATA_BINARY", pack=False, align=None), "number": SimTypeInt(signed=False, label="UInt32")}, name="<anon>", label="None")}, name="WSMAN_DATA", pack=False, align=None), "commandState": SimTypePointer(SimTypeChar(label="Char"), offset=0), "exitCode": SimTypeInt(signed=False, label="UInt32")}, name="WSMAN_RECEIVE_DATA_RESULT", pack=False, align=None), "connectData": SimStruct({"data": SimStruct({"type": SimTypeInt(signed=False, label="WSManDataType"), "Anonymous": SimUnion({"text": SimStruct({"bufferLength": SimTypeInt(signed=False, label="UInt32"), "buffer": SimTypePointer(SimTypeChar(label="Char"), offset=0)}, name="WSMAN_DATA_TEXT", pack=False, align=None), "binaryData": SimStruct({"dataLength": SimTypeInt(signed=False, label="UInt32"), "data": SimTypePointer(SimTypeChar(label="Byte"), offset=0)}, name="WSMAN_DATA_BINARY", pack=False, align=None), "number": SimTypeInt(signed=False, label="UInt32")}, name="<anon>", label="None")}, name="WSMAN_DATA", pack=False, align=None)}, name="WSMAN_CONNECT_DATA", pack=False, align=None), "createData": SimStruct({"data": SimStruct({"type": SimTypeInt(signed=False, label="WSManDataType"), "Anonymous": SimUnion({"text": SimStruct({"bufferLength": SimTypeInt(signed=False, label="UInt32"), "buffer": SimTypePointer(SimTypeChar(label="Char"), offset=0)}, name="WSMAN_DATA_TEXT", pack=False, align=None), "binaryData": SimStruct({"dataLength": SimTypeInt(signed=False, label="UInt32"), "data": SimTypePointer(SimTypeChar(label="Byte"), offset=0)}, name="WSMAN_DATA_BINARY", pack=False, align=None), "number": SimTypeInt(signed=False, label="UInt32")}, name="<anon>", label="None")}, name="WSMAN_DATA", pack=False, align=None)}, name="WSMAN_CREATE_SHELL_DATA", pack=False, align=None)}, name="<anon>", label="None"), offset=0)], SimTypeBottom(label="Void"), arg_names=["operationContext", "flags", "error", "shell", "command", "operationHandle", "data"]), offset=0)}, name="WSMAN_SHELL_ASYNC", pack=False, align=None), offset=0)], SimTypeBottom(label="Void"), arg_names=["shell", "flags", "async"]),
#
'WSManReconnectShellCommand': SimTypeFunction([SimTypePointer(SimStruct({}, name="WSMAN_COMMAND", pack=False, align=None), offset=0), SimTypeInt(signed=False, label="UInt32"), SimTypePointer(SimStruct({"operationContext": SimTypePointer(SimTypeBottom(label="Void"), offset=0), "completionFunction": SimTypePointer(SimTypeFunction([SimTypePointer(SimTypeBottom(label="Void"), offset=0), SimTypeInt(signed=False, label="UInt32"), SimTypePointer(SimStruct({"code": SimTypeInt(signed=False, label="UInt32"), "errorDetail": SimTypePointer(SimTypeChar(label="Char"), offset=0), "language": SimTypePointer(SimTypeChar(label="Char"), offset=0), "machineName": SimTypePointer(SimTypeChar(label="Char"), offset=0), "pluginName": SimTypePointer(SimTypeChar(label="Char"), offset=0)}, name="WSMAN_ERROR", pack=False, align=None), offset=0), SimTypePointer(SimStruct({}, name="WSMAN_SHELL", pack=False, align=None), offset=0), SimTypePointer(SimStruct({}, name="WSMAN_COMMAND", pack=False, align=None), offset=0), SimTypePointer(SimStruct({}, name="WSMAN_OPERATION", pack=False, align=None), offset=0), SimTypePointer(SimUnion({"receiveData": SimStruct({"streamId": SimTypePointer(SimTypeChar(label="Char"), offset=0), "streamData": SimStruct({"type": SimTypeInt(signed=False, label="WSManDataType"), "Anonymous": SimUnion({"text": SimStruct({"bufferLength": SimTypeInt(signed=False, label="UInt32"), "buffer": SimTypePointer(SimTypeChar(label="Char"), offset=0)}, name="WSMAN_DATA_TEXT", pack=False, align=None), "binaryData": SimStruct({"dataLength": SimTypeInt(signed=False, label="UInt32"), "data": SimTypePointer(SimTypeChar(label="Byte"), offset=0)}, name="WSMAN_DATA_BINARY", pack=False, align=None), "number": SimTypeInt(signed=False, label="UInt32")}, name="<anon>", label="None")}, name="WSMAN_DATA", pack=False, align=None), "commandState": SimTypePointer(SimTypeChar(label="Char"), offset=0), "exitCode": SimTypeInt(signed=False, label="UInt32")}, name="WSMAN_RECEIVE_DATA_RESULT", pack=False, align=None), "connectData": SimStruct({"data": SimStruct({"type": SimTypeInt(signed=False, label="WSManDataType"), "Anonymous": SimUnion({"text": SimStruct({"bufferLength": SimTypeInt(signed=False, label="UInt32"), "buffer": SimTypePointer(SimTypeChar(label="Char"), offset=0)}, name="WSMAN_DATA_TEXT", pack=False, align=None), "binaryData": SimStruct({"dataLength": SimTypeInt(signed=False, label="UInt32"), "data": SimTypePointer(SimTypeChar(label="Byte"), offset=0)}, name="WSMAN_DATA_BINARY", pack=False, align=None), "number": SimTypeInt(signed=False, label="UInt32")}, name="<anon>", label="None")}, name="WSMAN_DATA", pack=False, align=None)}, name="WSMAN_CONNECT_DATA", pack=False, align=None), "createData": SimStruct({"data": SimStruct({"type": SimTypeInt(signed=False, label="WSManDataType"), "Anonymous": SimUnion({"text": SimStruct({"bufferLength": SimTypeInt(signed=False, label="UInt32"), "buffer": SimTypePointer(SimTypeChar(label="Char"), offset=0)}, name="WSMAN_DATA_TEXT", pack=False, align=None), "binaryData": SimStruct({"dataLength": SimTypeInt(signed=False, label="UInt32"), "data": SimTypePointer(SimTypeChar(label="Byte"), offset=0)}, name="WSMAN_DATA_BINARY", pack=False, align=None), "number": SimTypeInt(signed=False, label="UInt32")}, name="<anon>", label="None")}, name="WSMAN_DATA", pack=False, align=None)}, name="WSMAN_CREATE_SHELL_DATA", pack=False, align=None)}, name="<anon>", label="None"), offset=0)], SimTypeBottom(label="Void"), arg_names=["operationContext", "flags", "error", "shell", "command", "operationHandle", "data"]), offset=0)}, name="WSMAN_SHELL_ASYNC", pack=False, align=None), offset=0)], SimTypeBottom(label="Void"), arg_names=["commandHandle", "flags", "async"]),
#
'WSManConnectShell': SimTypeFunction([SimTypePointer(SimStruct({}, name="WSMAN_SESSION", pack=False, align=None), offset=0), SimTypeInt(signed=False, label="UInt32"), SimTypePointer(SimTypeChar(label="Char"), offset=0), SimTypePointer(SimTypeChar(label="Char"), offset=0), SimTypePointer(SimStruct({"optionsCount": SimTypeInt(signed=False, label="UInt32"), "options": SimTypePointer(SimStruct({"name": SimTypePointer(SimTypeChar(label="Char"), offset=0), "value": SimTypePointer(SimTypeChar(label="Char"), offset=0), "mustComply": SimTypeInt(signed=True, label="Int32")}, name="WSMAN_OPTION", pack=False, align=None), offset=0), "optionsMustUnderstand": SimTypeInt(signed=True, label="Int32")}, name="WSMAN_OPTION_SET", pack=False, align=None), offset=0), SimTypePointer(SimStruct({"type": SimTypeInt(signed=False, label="WSManDataType"), "Anonymous": SimUnion({"text": SimStruct({"bufferLength": SimTypeInt(signed=False, label="UInt32"), "buffer": SimTypePointer(SimTypeChar(label="Char"), offset=0)}, name="WSMAN_DATA_TEXT", pack=False, align=None), "binaryData": SimStruct({"dataLength": SimTypeInt(signed=False, label="UInt32"), "data": SimTypePointer(SimTypeChar(label="Byte"), offset=0)}, name="WSMAN_DATA_BINARY", pack=False, align=None), "number": SimTypeInt(signed=False, label="UInt32")}, name="<anon>", label="None")}, name="WSMAN_DATA", pack=False, align=None), offset=0), SimTypePointer(SimStruct({"operationContext": SimTypePointer(SimTypeBottom(label="Void"), offset=0), "completionFunction": SimTypePointer(SimTypeFunction([SimTypePointer(SimTypeBottom(label="Void"), offset=0), SimTypeInt(signed=False, label="UInt32"), SimTypePointer(SimStruct({"code": SimTypeInt(signed=False, label="UInt32"), "errorDetail": SimTypePointer(SimTypeChar(label="Char"), offset=0), "language": SimTypePointer(SimTypeChar(label="Char"), offset=0), "machineName": SimTypePointer(SimTypeChar(label="Char"), offset=0), "pluginName": SimTypePointer(SimTypeChar(label="Char"), offset=0)}, name="WSMAN_ERROR", pack=False, align=None), offset=0), SimTypePointer(SimStruct({}, name="WSMAN_SHELL", pack=False, align=None), offset=0), SimTypePointer(SimStruct({}, name="WSMAN_COMMAND", pack=False, align=None), offset=0), SimTypePointer(SimStruct({}, name="WSMAN_OPERATION", pack=False, align=None), offset=0), SimTypePointer(SimUnion({"receiveData": SimStruct({"streamId": SimTypePointer(SimTypeChar(label="Char"), offset=0), "streamData": SimStruct({"type": SimTypeInt(signed=False, label="WSManDataType"), "Anonymous": SimUnion({"text": SimStruct({"bufferLength": SimTypeInt(signed=False, label="UInt32"), "buffer": SimTypePointer(SimTypeChar(label="Char"), offset=0)}, name="WSMAN_DATA_TEXT", pack=False, align=None), "binaryData": SimStruct({"dataLength": SimTypeInt(signed=False, label="UInt32"), "data": SimTypePointer(SimTypeChar(label="Byte"), offset=0)}, name="WSMAN_DATA_BINARY", pack=False, align=None), "number": SimTypeInt(signed=False, label="UInt32")}, name="<anon>", label="None")}, name="WSMAN_DATA", pack=False, align=None), "commandState": SimTypePointer(SimTypeChar(label="Char"), offset=0), "exitCode": SimTypeInt(signed=False, label="UInt32")}, name="WSMAN_RECEIVE_DATA_RESULT", pack=False, align=None), "connectData": SimStruct({"data": SimStruct({"type": SimTypeInt(signed=False, label="WSManDataType"), "Anonymous": SimUnion({"text": SimStruct({"bufferLength": SimTypeInt(signed=False, label="UInt32"), "buffer": SimTypePointer(SimTypeChar(label="Char"), offset=0)}, name="WSMAN_DATA_TEXT", pack=False, align=None), "binaryData": SimStruct({"dataLength": SimTypeInt(signed=False, label="UInt32"), "data": SimTypePointer(SimTypeChar(label="Byte"), offset=0)}, name="WSMAN_DATA_BINARY", pack=False, align=None), "number": SimTypeInt(signed=False, label="UInt32")}, name="<anon>", label="None")}, name="WSMAN_DATA", pack=False, align=None)}, name="WSMAN_CONNECT_DATA", pack=False, align=None), "createData": SimStruct({"data": SimStruct({"type": SimTypeInt(signed=False, label="WSManDataType"), "Anonymous": SimUnion({"text": SimStruct({"bufferLength": SimTypeInt(signed=False, label="UInt32"), "buffer": SimTypePointer(SimTypeChar(label="Char"), offset=0)}, name="WSMAN_DATA_TEXT", pack=False, align=None), "binaryData": SimStruct({"dataLength": SimTypeInt(signed=False, label="UInt32"), "data": SimTypePointer(SimTypeChar(label="Byte"), offset=0)}, name="WSMAN_DATA_BINARY", pack=False, align=None), "number": SimTypeInt(signed=False, label="UInt32")}, name="<anon>", label="None")}, name="WSMAN_DATA", pack=False, align=None)}, name="WSMAN_CREATE_SHELL_DATA", pack=False, align=None)}, name="<anon>", label="None"), offset=0)], SimTypeBottom(label="Void"), arg_names=["operationContext", "flags", "error", "shell", "command", "operationHandle", "data"]), offset=0)}, name="WSMAN_SHELL_ASYNC", pack=False, align=None), offset=0), SimTypePointer(SimTypePointer(SimStruct({}, name="WSMAN_SHELL", pack=False, align=None), offset=0), offset=0)], SimTypeBottom(label="Void"), arg_names=["session", "flags", "resourceUri", "shellID", "options", "connectXml", "async", "shell"]),
#
'WSManConnectShellCommand': SimTypeFunction([SimTypePointer(SimStruct({}, name="WSMAN_SHELL", pack=False, align=None), offset=0), SimTypeInt(signed=False, label="UInt32"), SimTypePointer(SimTypeChar(label="Char"), offset=0), SimTypePointer(SimStruct({"optionsCount": SimTypeInt(signed=False, label="UInt32"), "options": SimTypePointer(SimStruct({"name": SimTypePointer(SimTypeChar(label="Char"), offset=0), "value": SimTypePointer(SimTypeChar(label="Char"), offset=0), "mustComply": SimTypeInt(signed=True, label="Int32")}, name="WSMAN_OPTION", pack=False, align=None), offset=0), "optionsMustUnderstand": SimTypeInt(signed=True, label="Int32")}, name="WSMAN_OPTION_SET", pack=False, align=None), offset=0), SimTypePointer(SimStruct({"type": SimTypeInt(signed=False, label="WSManDataType"), "Anonymous": SimUnion({"text": SimStruct({"bufferLength": SimTypeInt(signed=False, label="UInt32"), "buffer": SimTypePointer(SimTypeChar(label="Char"), offset=0)}, name="WSMAN_DATA_TEXT", pack=False, align=None), "binaryData": SimStruct({"dataLength": SimTypeInt(signed=False, label="UInt32"), "data": SimTypePointer(SimTypeChar(label="Byte"), offset=0)}, name="WSMAN_DATA_BINARY", pack=False, align=None), "number": SimTypeInt(signed=False, label="UInt32")}, name="<anon>", label="None")}, name="WSMAN_DATA", pack=False, align=None), offset=0), SimTypePointer(SimStruct({"operationContext": SimTypePointer(SimTypeBottom(label="Void"), offset=0), "completionFunction": SimTypePointer(SimTypeFunction([SimTypePointer(SimTypeBottom(label="Void"), offset=0), SimTypeInt(signed=False, label="UInt32"), SimTypePointer(SimStruct({"code": SimTypeInt(signed=False, label="UInt32"), "errorDetail": SimTypePointer(SimTypeChar(label="Char"), offset=0), "language": SimTypePointer(SimTypeChar(label="Char"), offset=0), "machineName": SimTypePointer(SimTypeChar(label="Char"), offset=0), "pluginName": SimTypePointer(SimTypeChar(label="Char"), offset=0)}, name="WSMAN_ERROR", pack=False, align=None), offset=0), SimTypePointer(SimStruct({}, name="WSMAN_SHELL", pack=False, align=None), offset=0), SimTypePointer(SimStruct({}, name="WSMAN_COMMAND", pack=False, align=None), offset=0), SimTypePointer(SimStruct({}, name="WSMAN_OPERATION", pack=False, align=None), offset=0), SimTypePointer(SimUnion({"receiveData": SimStruct({"streamId": SimTypePointer(SimTypeChar(label="Char"), offset=0), "streamData": SimStruct({"type": SimTypeInt(signed=False, label="WSManDataType"), "Anonymous": SimUnion({"text": SimStruct({"bufferLength": SimTypeInt(signed=False, label="UInt32"), "buffer": SimTypePointer(SimTypeChar(label="Char"), offset=0)}, name="WSMAN_DATA_TEXT", pack=False, align=None), "binaryData": SimStruct({"dataLength": SimTypeInt(signed=False, label="UInt32"), "data": SimTypePointer(SimTypeChar(label="Byte"), offset=0)}, name="WSMAN_DATA_BINARY", pack=False, align=None), "number": SimTypeInt(signed=False, label="UInt32")}, name="<anon>", label="None")}, name="WSMAN_DATA", pack=False, align=None), "commandState": SimTypePointer(SimTypeChar(label="Char"), offset=0), "exitCode": SimTypeInt(signed=False, label="UInt32")}, name="WSMAN_RECEIVE_DATA_RESULT", pack=False, align=None), "connectData": SimStruct({"data": SimStruct({"type": SimTypeInt(signed=False, label="WSManDataType"), "Anonymous": SimUnion({"text": SimStruct({"bufferLength": SimTypeInt(signed=False, label="UInt32"), "buffer": SimTypePointer(SimTypeChar(label="Char"), offset=0)}, name="WSMAN_DATA_TEXT", pack=False, align=None), "binaryData": SimStruct({"dataLength": SimTypeInt(signed=False, label="UInt32"), "data": SimTypePointer(SimTypeChar(label="Byte"), offset=0)}, name="WSMAN_DATA_BINARY", pack=False, align=None), "number": SimTypeInt(signed=False, label="UInt32")}, name="<anon>", label="None")}, name="WSMAN_DATA", pack=False, align=None)}, name="WSMAN_CONNECT_DATA", pack=False, align=None), "createData": SimStruct({"data": SimStruct({"type": SimTypeInt(signed=False, label="WSManDataType"), "Anonymous": SimUnion({"text": SimStruct({"bufferLength": SimTypeInt(signed=False, label="UInt32"), "buffer": SimTypePointer(SimTypeChar(label="Char"), offset=0)}, name="WSMAN_DATA_TEXT", pack=False, align=None), "binaryData": SimStruct({"dataLength": SimTypeInt(signed=False, label="UInt32"), "data": SimTypePointer(SimTypeChar(label="Byte"), offset=0)}, name="WSMAN_DATA_BINARY", pack=False, align=None), "number": SimTypeInt(signed=False, label="UInt32")}, name="<anon>", label="None")}, name="WSMAN_DATA", pack=False, align=None)}, name="WSMAN_CREATE_SHELL_DATA", pack=False, align=None)}, name="<anon>", label="None"), offset=0)], SimTypeBottom(label="Void"), arg_names=["operationContext", "flags", "error", "shell", "command", "operationHandle", "data"]), offset=0)}, name="WSMAN_SHELL_ASYNC", pack=False, align=None), offset=0), SimTypePointer(SimTypePointer(SimStruct({}, name="WSMAN_COMMAND", pack=False, align=None), offset=0), offset=0)], SimTypeBottom(label="Void"), arg_names=["shell", "flags", "commandID", "options", "connectXml", "async", "command"]),
#
'WSManPluginReportContext': SimTypeFunction([SimTypePointer(SimStruct({"senderDetails": SimTypePointer(SimStruct({"senderName": SimTypePointer(SimTypeChar(label="Char"), offset=0), "authenticationMechanism": SimTypePointer(SimTypeChar(label="Char"), offset=0), "certificateDetails": SimTypePointer(SimStruct({"subject": SimTypePointer(SimTypeChar(label="Char"), offset=0), "issuerName": SimTypePointer(SimTypeChar(label="Char"), offset=0), "issuerThumbprint": SimTypePointer(SimTypeChar(label="Char"), offset=0), "subjectName": SimTypePointer(SimTypeChar(label="Char"), offset=0)}, name="WSMAN_CERTIFICATE_DETAILS", pack=False, align=None), offset=0), "clientToken": SimTypePointer(SimTypeInt(signed=True, label="Int"), label="IntPtr", offset=0), "httpURL": SimTypePointer(SimTypeChar(label="Char"), offset=0)}, name="WSMAN_SENDER_DETAILS", pack=False, align=None), offset=0), "locale": SimTypePointer(SimTypeChar(label="Char"), offset=0), "resourceUri": SimTypePointer(SimTypeChar(label="Char"), offset=0), "operationInfo": SimTypePointer(SimStruct({"fragment": SimStruct({"path": SimTypePointer(SimTypeChar(label="Char"), offset=0), "dialect": SimTypePointer(SimTypeChar(label="Char"), offset=0)}, name="WSMAN_FRAGMENT", pack=False, align=None), "filter": SimStruct({"filter": SimTypePointer(SimTypeChar(label="Char"), offset=0), "dialect": SimTypePointer(SimTypeChar(label="Char"), offset=0)}, name="WSMAN_FILTER", pack=False, align=None), "selectorSet": SimStruct({"numberKeys": SimTypeInt(signed=False, label="UInt32"), "keys": SimTypePointer(SimStruct({"key": SimTypePointer(SimTypeChar(label="Char"), offset=0), "value": SimTypePointer(SimTypeChar(label="Char"), offset=0)}, name="WSMAN_KEY", pack=False, align=None), offset=0)}, name="WSMAN_SELECTOR_SET", pack=False, align=None), "optionSet": SimStruct({"optionsCount": SimTypeInt(signed=False, label="UInt32"), "options": SimTypePointer(SimStruct({"name": SimTypePointer(SimTypeChar(label="Char"), offset=0), "value": SimTypePointer(SimTypeChar(label="Char"), offset=0), "mustComply": SimTypeInt(signed=True, label="Int32")}, name="WSMAN_OPTION", pack=False, align=None), offset=0), "optionsMustUnderstand": SimTypeInt(signed=True, label="Int32")}, name="WSMAN_OPTION_SET", pack=False, align=None), "reserved": SimTypePointer(SimTypeBottom(label="Void"), offset=0), "version": SimTypeInt(signed=False, label="UInt32")}, name="WSMAN_OPERATION_INFO", pack=False, align=None), offset=0), "shutdownNotification": SimTypeInt(signed=True, label="Int32"), "shutdownNotificationHandle": SimTypePointer(SimTypeInt(signed=True, label="Int"), label="IntPtr", offset=0), "dataLocale": SimTypePointer(SimTypeChar(label="Char"), offset=0)}, name="WSMAN_PLUGIN_REQUEST", pack=False, align=None), offset=0), SimTypeInt(signed=False, label="UInt32"), SimTypePointer(SimTypeBottom(label="Void"), offset=0)], SimTypeInt(signed=False, label="UInt32"), arg_names=["requestDetails", "flags", "context"]),
#
'WSManPluginReceiveResult': SimTypeFunction([SimTypePointer(SimStruct({"senderDetails": SimTypePointer(SimStruct({"senderName": SimTypePointer(SimTypeChar(label="Char"), offset=0), "authenticationMechanism": SimTypePointer(SimTypeChar(label="Char"), offset=0), "certificateDetails": SimTypePointer(SimStruct({"subject": SimTypePointer(SimTypeChar(label="Char"), offset=0), "issuerName": SimTypePointer(SimTypeChar(label="Char"), offset=0), "issuerThumbprint": SimTypePointer(SimTypeChar(label="Char"), offset=0), "subjectName": SimTypePointer(SimTypeChar(label="Char"), offset=0)}, name="WSMAN_CERTIFICATE_DETAILS", pack=False, align=None), offset=0), "clientToken": SimTypePointer(SimTypeInt(signed=True, label="Int"), label="IntPtr", offset=0), "httpURL": SimTypePointer(SimTypeChar(label="Char"), offset=0)}, name="WSMAN_SENDER_DETAILS", pack=False, align=None), offset=0), "locale": SimTypePointer(SimTypeChar(label="Char"), offset=0), "resourceUri": SimTypePointer(SimTypeChar(label="Char"), offset=0), "operationInfo": SimTypePointer(SimStruct({"fragment": SimStruct({"path": SimTypePointer(SimTypeChar(label="Char"), offset=0), "dialect": SimTypePointer(SimTypeChar(label="Char"), offset=0)}, name="WSMAN_FRAGMENT", pack=False, align=None), "filter": SimStruct({"filter": SimTypePointer(SimTypeChar(label="Char"), offset=0), "dialect": SimTypePointer(SimTypeChar(label="Char"), offset=0)}, name="WSMAN_FILTER", pack=False, align=None), "selectorSet": SimStruct({"numberKeys": SimTypeInt(signed=False, label="UInt32"), "keys": SimTypePointer(SimStruct({"key": SimTypePointer(SimTypeChar(label="Char"), offset=0), "value": SimTypePointer(SimTypeChar(label="Char"), offset=0)}, name="WSMAN_KEY", pack=False, align=None), offset=0)}, name="WSMAN_SELECTOR_SET", pack=False, align=None), "optionSet": SimStruct({"optionsCount": SimTypeInt(signed=False, label="UInt32"), "options": SimTypePointer(SimStruct({"name": SimTypePointer(SimTypeChar(label="Char"), offset=0), "value": SimTypePointer(SimTypeChar(label="Char"), offset=0), "mustComply": SimTypeInt(signed=True, label="Int32")}, name="WSMAN_OPTION", pack=False, align=None), offset=0), "optionsMustUnderstand": SimTypeInt(signed=True, label="Int32")}, name="WSMAN_OPTION_SET", pack=False, align=None), "reserved": SimTypePointer(SimTypeBottom(label="Void"), offset=0), "version": SimTypeInt(signed=False, label="UInt32")}, name="WSMAN_OPERATION_INFO", pack=False, align=None), offset=0), "shutdownNotification": SimTypeInt(signed=True, label="Int32"), "shutdownNotificationHandle": SimTypePointer(SimTypeInt(signed=True, label="Int"), label="IntPtr", offset=0), "dataLocale": SimTypePointer(SimTypeChar(label="Char"), offset=0)}, name="WSMAN_PLUGIN_REQUEST", pack=False, align=None), offset=0), SimTypeInt(signed=False, label="UInt32"), SimTypePointer(SimTypeChar(label="Char"), offset=0), SimTypePointer(SimStruct({"type": SimTypeInt(signed=False, label="WSManDataType"), "Anonymous": SimUnion({"text": SimStruct({"bufferLength": SimTypeInt(signed=False, label="UInt32"), "buffer": SimTypePointer(SimTypeChar(label="Char"), offset=0)}, name="WSMAN_DATA_TEXT", pack=False, align=None), "binaryData": SimStruct({"dataLength": SimTypeInt(signed=False, label="UInt32"), "data": SimTypePointer(SimTypeChar(label="Byte"), offset=0)}, name="WSMAN_DATA_BINARY", pack=False, align=None), "number": SimTypeInt(signed=False, label="UInt32")}, name="<anon>", label="None")}, name="WSMAN_DATA", pack=False, align=None), offset=0), SimTypePointer(SimTypeChar(label="Char"), offset=0), SimTypeInt(signed=False, label="UInt32")], SimTypeInt(signed=False, label="UInt32"), arg_names=["requestDetails", "flags", "stream", "streamResult", "commandState", "exitCode"]),
#
'WSManPluginOperationComplete': SimTypeFunction([SimTypePointer(SimStruct({"senderDetails": SimTypePointer(SimStruct({"senderName": SimTypePointer(SimTypeChar(label="Char"), offset=0), "authenticationMechanism": SimTypePointer(SimTypeChar(label="Char"), offset=0), "certificateDetails": SimTypePointer(SimStruct({"subject": SimTypePointer(SimTypeChar(label="Char"), offset=0), "issuerName": SimTypePointer(SimTypeChar(label="Char"), offset=0), "issuerThumbprint": SimTypePointer(SimTypeChar(label="Char"), offset=0), "subjectName": SimTypePointer(SimTypeChar(label="Char"), offset=0)}, name="WSMAN_CERTIFICATE_DETAILS", pack=False, align=None), offset=0), "clientToken": SimTypePointer(SimTypeInt(signed=True, label="Int"), label="IntPtr", offset=0), "httpURL": SimTypePointer(SimTypeChar(label="Char"), offset=0)}, name="WSMAN_SENDER_DETAILS", pack=False, align=None), offset=0), "locale": SimTypePointer(SimTypeChar(label="Char"), offset=0), "resourceUri": SimTypePointer(SimTypeChar(label="Char"), offset=0), "operationInfo": SimTypePointer(SimStruct({"fragment": SimStruct({"path": SimTypePointer(SimTypeChar(label="Char"), offset=0), "dialect": SimTypePointer(SimTypeChar(label="Char"), offset=0)}, name="WSMAN_FRAGMENT", pack=False, align=None), "filter": SimStruct({"filter": SimTypePointer(SimTypeChar(label="Char"), offset=0), "dialect": SimTypePointer(SimTypeChar(label="Char"), offset=0)}, name="WSMAN_FILTER", pack=False, align=None), "selectorSet": SimStruct({"numberKeys": SimTypeInt(signed=False, label="UInt32"), "keys": SimTypePointer(SimStruct({"key": SimTypePointer(SimTypeChar(label="Char"), offset=0), "value": SimTypePointer(SimTypeChar(label="Char"), offset=0)}, name="WSMAN_KEY", pack=False, align=None), offset=0)}, name="WSMAN_SELECTOR_SET", pack=False, align=None), "optionSet": SimStruct({"optionsCount": SimTypeInt(signed=False, label="UInt32"), "options": SimTypePointer(SimStruct({"name": SimTypePointer(SimTypeChar(label="Char"), offset=0), "value": SimTypePointer(SimTypeChar(label="Char"), offset=0), "mustComply": SimTypeInt(signed=True, label="Int32")}, name="WSMAN_OPTION", pack=False, align=None), offset=0), "optionsMustUnderstand": SimTypeInt(signed=True, label="Int32")}, name="WSMAN_OPTION_SET", pack=False, align=None), "reserved": SimTypePointer(SimTypeBottom(label="Void"), offset=0), "version": SimTypeInt(signed=False, label="UInt32")}, name="WSMAN_OPERATION_INFO", pack=False, align=None), offset=0), "shutdownNotification": SimTypeInt(signed=True, label="Int32"), "shutdownNotificationHandle": SimTypePointer(SimTypeInt(signed=True, label="Int"), label="IntPtr", offset=0), "dataLocale": SimTypePointer(SimTypeChar(label="Char"), offset=0)}, name="WSMAN_PLUGIN_REQUEST", pack=False, align=None), offset=0), SimTypeInt(signed=False, label="UInt32"), SimTypeInt(signed=False, label="UInt32"), SimTypePointer(SimTypeChar(label="Char"), offset=0)], SimTypeInt(signed=False, label="UInt32"), arg_names=["requestDetails", "flags", "errorCode", "extendedInformation"]),
#
'WSManPluginGetOperationParameters': SimTypeFunction([SimTypePointer(SimStruct({"senderDetails": SimTypePointer(SimStruct({"senderName": SimTypePointer(SimTypeChar(label="Char"), offset=0), "authenticationMechanism": SimTypePointer(SimTypeChar(label="Char"), offset=0), "certificateDetails": SimTypePointer(SimStruct({"subject": SimTypePointer(SimTypeChar(label="Char"), offset=0), "issuerName": SimTypePointer(SimTypeChar(label="Char"), offset=0), "issuerThumbprint": SimTypePointer(SimTypeChar(label="Char"), offset=0), "subjectName": SimTypePointer(SimTypeChar(label="Char"), offset=0)}, name="WSMAN_CERTIFICATE_DETAILS", pack=False, align=None), offset=0), "clientToken": SimTypePointer(SimTypeInt(signed=True, label="Int"), label="IntPtr", offset=0), "httpURL": SimTypePointer(SimTypeChar(label="Char"), offset=0)}, name="WSMAN_SENDER_DETAILS", pack=False, align=None), offset=0), "locale": SimTypePointer(SimTypeChar(label="Char"), offset=0), "resourceUri": SimTypePointer(SimTypeChar(label="Char"), offset=0), "operationInfo": SimTypePointer(SimStruct({"fragment": SimStruct({"path": SimTypePointer(SimTypeChar(label="Char"), offset=0), "dialect": SimTypePointer(SimTypeChar(label="Char"), offset=0)}, name="WSMAN_FRAGMENT", pack=False, align=None), "filter": SimStruct({"filter": SimTypePointer(SimTypeChar(label="Char"), offset=0), "dialect": SimTypePointer(SimTypeChar(label="Char"), offset=0)}, name="WSMAN_FILTER", pack=False, align=None), "selectorSet": SimStruct({"numberKeys": SimTypeInt(signed=False, label="UInt32"), "keys": SimTypePointer(SimStruct({"key": SimTypePointer(SimTypeChar(label="Char"), offset=0), "value": SimTypePointer(SimTypeChar(label="Char"), offset=0)}, name="WSMAN_KEY", pack=False, align=None), offset=0)}, name="WSMAN_SELECTOR_SET", pack=False, align=None), "optionSet": SimStruct({"optionsCount": SimTypeInt(signed=False, label="UInt32"), "options": SimTypePointer(SimStruct({"name": SimTypePointer(SimTypeChar(label="Char"), offset=0), "value": SimTypePointer(SimTypeChar(label="Char"), offset=0), "mustComply": SimTypeInt(signed=True, label="Int32")}, name="WSMAN_OPTION", pack=False, align=None), offset=0), "optionsMustUnderstand": SimTypeInt(signed=True, label="Int32")}, name="WSMAN_OPTION_SET", pack=False, align=None), "reserved": SimTypePointer(SimTypeBottom(label="Void"), offset=0), "version": SimTypeInt(signed=False, label="UInt32")}, name="WSMAN_OPERATION_INFO", pack=False, align=None), offset=0), "shutdownNotification": SimTypeInt(signed=True, label="Int32"), "shutdownNotificationHandle": SimTypePointer(SimTypeInt(signed=True, label="Int"), label="IntPtr", offset=0), "dataLocale": SimTypePointer(SimTypeChar(label="Char"), offset=0)}, name="WSMAN_PLUGIN_REQUEST", pack=False, align=None), offset=0), SimTypeInt(signed=False, label="UInt32"), SimTypePointer(SimStruct({"type": SimTypeInt(signed=False, label="WSManDataType"), "Anonymous": SimUnion({"text": SimStruct({"bufferLength": SimTypeInt(signed=False, label="UInt32"), "buffer": SimTypePointer(SimTypeChar(label="Char"), offset=0)}, name="WSMAN_DATA_TEXT", pack=False, align=None), "binaryData": SimStruct({"dataLength": SimTypeInt(signed=False, label="UInt32"), "data": SimTypePointer(SimTypeChar(label="Byte"), offset=0)}, name="WSMAN_DATA_BINARY", pack=False, align=None), "number": SimTypeInt(signed=False, label="UInt32")}, name="<anon>", label="None")}, name="WSMAN_DATA", pack=False, align=None), offset=0)], SimTypeInt(signed=False, label="UInt32"), arg_names=["requestDetails", "flags", "data"]),
#
'WSManPluginGetConfiguration': SimTypeFunction([SimTypePointer(SimTypeBottom(label="Void"), offset=0), SimTypeInt(signed=False, label="UInt32"), SimTypePointer(SimStruct({"type": SimTypeInt(signed=False, label="WSManDataType"), "Anonymous": SimUnion({"text": SimStruct({"bufferLength": SimTypeInt(signed=False, label="UInt32"), "buffer": SimTypePointer(SimTypeChar(label="Char"), offset=0)}, name="WSMAN_DATA_TEXT", pack=False, align=None), "binaryData": SimStruct({"dataLength": SimTypeInt(signed=False, label="UInt32"), "data": SimTypePointer(SimTypeChar(label="Byte"), offset=0)}, name="WSMAN_DATA_BINARY", pack=False, align=None), "number": SimTypeInt(signed=False, label="UInt32")}, name="<anon>", label="None")}, name="WSMAN_DATA", pack=False, align=None), offset=0)], SimTypeInt(signed=False, label="UInt32"), arg_names=["pluginContext", "flags", "data"]),
#
'WSManPluginReportCompletion': SimTypeFunction([SimTypePointer(SimTypeBottom(label="Void"), offset=0), SimTypeInt(signed=False, label="UInt32")], SimTypeInt(signed=False, label="UInt32"), arg_names=["pluginContext", "flags"]),
#
'WSManPluginFreeRequestDetails': SimTypeFunction([SimTypePointer(SimStruct({"senderDetails": SimTypePointer(SimStruct({"senderName": SimTypePointer(SimTypeChar(label="Char"), offset=0), "authenticationMechanism": SimTypePointer(SimTypeChar(label="Char"), offset=0), "certificateDetails": SimTypePointer(SimStruct({"subject": SimTypePointer(SimTypeChar(label="Char"), offset=0), "issuerName": SimTypePointer(SimTypeChar(label="Char"), offset=0), "issuerThumbprint": SimTypePointer(SimTypeChar(label="Char"), offset=0), "subjectName": SimTypePointer(SimTypeChar(label="Char"), offset=0)}, name="WSMAN_CERTIFICATE_DETAILS", pack=False, align=None), offset=0), "clientToken": SimTypePointer(SimTypeInt(signed=True, label="Int"), label="IntPtr", offset=0), "httpURL": SimTypePointer(SimTypeChar(label="Char"), offset=0)}, name="WSMAN_SENDER_DETAILS", pack=False, align=None), offset=0), "locale": SimTypePointer(SimTypeChar(label="Char"), offset=0), "resourceUri": SimTypePointer(SimTypeChar(label="Char"), offset=0), "operationInfo": SimTypePointer(SimStruct({"fragment": SimStruct({"path": SimTypePointer(SimTypeChar(label="Char"), offset=0), "dialect": SimTypePointer(SimTypeChar(label="Char"), offset=0)}, name="WSMAN_FRAGMENT", pack=False, align=None), "filter": SimStruct({"filter": SimTypePointer(SimTypeChar(label="Char"), offset=0), "dialect": SimTypePointer(SimTypeChar(label="Char"), offset=0)}, name="WSMAN_FILTER", pack=False, align=None), "selectorSet": SimStruct({"numberKeys": SimTypeInt(signed=False, label="UInt32"), "keys": SimTypePointer(SimStruct({"key": SimTypePointer(SimTypeChar(label="Char"), offset=0), "value": SimTypePointer(SimTypeChar(label="Char"), offset=0)}, name="WSMAN_KEY", pack=False, align=None), offset=0)}, name="WSMAN_SELECTOR_SET", pack=False, align=None), "optionSet": SimStruct({"optionsCount": SimTypeInt(signed=False, label="UInt32"), "options": SimTypePointer(SimStruct({"name": SimTypePointer(SimTypeChar(label="Char"), offset=0), "value": SimTypePointer(SimTypeChar(label="Char"), offset=0), "mustComply": SimTypeInt(signed=True, label="Int32")}, name="WSMAN_OPTION", pack=False, align=None), offset=0), "optionsMustUnderstand": SimTypeInt(signed=True, label="Int32")}, name="WSMAN_OPTION_SET", pack=False, align=None), "reserved": SimTypePointer(SimTypeBottom(label="Void"), offset=0), "version": SimTypeInt(signed=False, label="UInt32")}, name="WSMAN_OPERATION_INFO", pack=False, align=None), offset=0), "shutdownNotification": SimTypeInt(signed=True, label="Int32"), "shutdownNotificationHandle": SimTypePointer(SimTypeInt(signed=True, label="Int"), label="IntPtr", offset=0), "dataLocale": SimTypePointer(SimTypeChar(label="Char"), offset=0)}, name="WSMAN_PLUGIN_REQUEST", pack=False, align=None), offset=0)], SimTypeInt(signed=False, label="UInt32"), arg_names=["requestDetails"]),
#
'WSManPluginAuthzUserComplete': SimTypeFunction([SimTypePointer(SimStruct({"senderName": SimTypePointer(SimTypeChar(label="Char"), offset=0), "authenticationMechanism": SimTypePointer(SimTypeChar(label="Char"), offset=0), "certificateDetails": SimTypePointer(SimStruct({"subject": SimTypePointer(SimTypeChar(label="Char"), offset=0), "issuerName": SimTypePointer(SimTypeChar(label="Char"), offset=0), "issuerThumbprint": SimTypePointer(SimTypeChar(label="Char"), offset=0), "subjectName": SimTypePointer(SimTypeChar(label="Char"), offset=0)}, name="WSMAN_CERTIFICATE_DETAILS", pack=False, align=None), offset=0), "clientToken": SimTypePointer(SimTypeInt(signed=True, label="Int"), label="IntPtr", offset=0), "httpURL": SimTypePointer(SimTypeChar(label="Char"), offset=0)}, name="WSMAN_SENDER_DETAILS", pack=False, align=None), offset=0), SimTypeInt(signed=False, label="UInt32"), SimTypePointer(SimTypeBottom(label="Void"), offset=0), SimTypePointer(SimTypeInt(signed=True, label="Int"), label="IntPtr", offset=0), SimTypeInt(signed=True, label="Int32"), SimTypeInt(signed=False, label="UInt32"), SimTypePointer(SimTypeChar(label="Char"), offset=0)], SimTypeInt(signed=False, label="UInt32"), arg_names=["senderDetails", "flags", "userAuthorizationContext", "impersonationToken", "userIsAdministrator", "errorCode", "extendedErrorInformation"]),
#
'WSManPluginAuthzOperationComplete': SimTypeFunction([SimTypePointer(SimStruct({"senderName": SimTypePointer(SimTypeChar(label="Char"), offset=0), "authenticationMechanism": SimTypePointer(SimTypeChar(label="Char"), offset=0), "certificateDetails": SimTypePointer(SimStruct({"subject": SimTypePointer(SimTypeChar(label="Char"), offset=0), "issuerName": SimTypePointer(SimTypeChar(label="Char"), offset=0), "issuerThumbprint": SimTypePointer(SimTypeChar(label="Char"), offset=0), "subjectName": SimTypePointer(SimTypeChar(label="Char"), offset=0)}, name="WSMAN_CERTIFICATE_DETAILS", pack=False, align=None), offset=0), "clientToken": SimTypePointer(SimTypeInt(signed=True, label="Int"), label="IntPtr", offset=0), "httpURL": SimTypePointer(SimTypeChar(label="Char"), offset=0)}, name="WSMAN_SENDER_DETAILS", pack=False, align=None), offset=0), SimTypeInt(signed=False, label="UInt32"), SimTypePointer(SimTypeBottom(label="Void"), offset=0), SimTypeInt(signed=False, label="UInt32"), SimTypePointer(SimTypeChar(label="Char"), offset=0)], SimTypeInt(signed=False, label="UInt32"), arg_names=["senderDetails", "flags", "userAuthorizationContext", "errorCode", "extendedErrorInformation"]),
#
'WSManPluginAuthzQueryQuotaComplete': SimTypeFunction([SimTypePointer(SimStruct({"senderName": SimTypePointer(SimTypeChar(label="Char"), offset=0), "authenticationMechanism": SimTypePointer(SimTypeChar(label="Char"), offset=0), "certificateDetails": SimTypePointer(SimStruct({"subject": SimTypePointer(SimTypeChar(label="Char"), offset=0), "issuerName": SimTypePointer(SimTypeChar(label="Char"), offset=0), "issuerThumbprint": SimTypePointer(SimTypeChar(label="Char"), offset=0), "subjectName": SimTypePointer(SimTypeChar(label="Char"), offset=0)}, name="WSMAN_CERTIFICATE_DETAILS", pack=False, align=None), offset=0), "clientToken": SimTypePointer(SimTypeInt(signed=True, label="Int"), label="IntPtr", offset=0), "httpURL": SimTypePointer(SimTypeChar(label="Char"), offset=0)}, name="WSMAN_SENDER_DETAILS", pack=False, align=None), offset=0), SimTypeInt(signed=False, label="UInt32"), SimTypePointer(SimStruct({"maxAllowedConcurrentShells": SimTypeInt(signed=False, label="UInt32"), "maxAllowedConcurrentOperations": SimTypeInt(signed=False, label="UInt32"), "timeslotSize": SimTypeInt(signed=False, label="UInt32"), "maxAllowedOperationsPerTimeslot": SimTypeInt(signed=False, label="UInt32")}, name="WSMAN_AUTHZ_QUOTA", pack=False, align=None), offset=0), SimTypeInt(signed=False, label="UInt32"), SimTypePointer(SimTypeChar(label="Char"), offset=0)], SimTypeInt(signed=False, label="UInt32"), arg_names=["senderDetails", "flags", "quota", "errorCode", "extendedErrorInformation"]),
}
lib.set_prototypes(prototypes)
| 1,036.465909
| 6,433
| 0.744082
| 10,350
| 91,209
| 6.486377
| 0.024155
| 0.065898
| 0.081538
| 0.104835
| 0.96933
| 0.965085
| 0.964072
| 0.962195
| 0.961525
| 0.960214
| 0
| 0.01516
| 0.056913
| 91,209
| 87
| 6,434
| 1,048.37931
| 0.765305
| 0.000307
| 0
| 0
| 0
| 0
| 0.250601
| 0.029666
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0.021277
| 0.106383
| 0
| 0.106383
| 0.191489
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
4307d8063e06e64e479cf930dd3456b183590f95
| 98
|
py
|
Python
|
test/regression/features/arithmetic/mult.py
|
ppelleti/berp
|
30925288376a6464695341445688be64ac6b2600
|
[
"BSD-3-Clause"
] | 137
|
2015-02-13T21:03:23.000Z
|
2021-11-24T03:53:55.000Z
|
test/regression/features/arithmetic/mult.py
|
ppelleti/berp
|
30925288376a6464695341445688be64ac6b2600
|
[
"BSD-3-Clause"
] | 4
|
2015-04-01T13:49:13.000Z
|
2019-07-09T19:28:56.000Z
|
test/regression/features/arithmetic/mult.py
|
bjpop/berp
|
30925288376a6464695341445688be64ac6b2600
|
[
"BSD-3-Clause"
] | 8
|
2015-04-25T03:47:52.000Z
|
2019-07-27T06:33:56.000Z
|
print(18 * 1234)
print(18 * 1234 * 2)
print(0 * 1)
print(1 * 0)
print(0.0 * 1.0)
print(1.0 * 0.0)
| 14
| 20
| 0.561224
| 23
| 98
| 2.391304
| 0.26087
| 0.109091
| 0.4
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.320513
| 0.204082
| 98
| 6
| 21
| 16.333333
| 0.384615
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
|
0
| 7
|
43233962745ef76d4115b7625720cc7b8baedc4d
| 178
|
py
|
Python
|
resource/pypi/cffi-1.9.1/testing/cffi0/snippets/distutils_module/setup.py
|
hipnusleo/Laserjet
|
f53e0b740f48f2feb0c0bb285ec6728b313b4ccc
|
[
"Apache-2.0"
] | null | null | null |
resource/pypi/cffi-1.9.1/testing/cffi0/snippets/distutils_module/setup.py
|
hipnusleo/Laserjet
|
f53e0b740f48f2feb0c0bb285ec6728b313b4ccc
|
[
"Apache-2.0"
] | null | null | null |
resource/pypi/cffi-1.9.1/testing/cffi0/snippets/distutils_module/setup.py
|
hipnusleo/Laserjet
|
f53e0b740f48f2feb0c0bb285ec6728b313b4ccc
|
[
"Apache-2.0"
] | null | null | null |
from distutils.core import setup
import snip_basic_verify
setup(
py_modules=['snip_basic_verify'],
ext_modules=[snip_basic_verify.ffi.verifier.get_extension()])
| 22.25
| 66
| 0.758427
| 24
| 178
| 5.25
| 0.625
| 0.214286
| 0.357143
| 0.349206
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.146067
| 178
| 7
| 67
| 25.428571
| 0.828947
| 0
| 0
| 0
| 0
| 0
| 0.1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.4
| 0
| 0.4
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 7
|
4a4e581c499165152bc4c54e7fe90ad3b4939698
| 48,733
|
py
|
Python
|
src/ralph/deployment/migrations/0005_auto__add_field_archiveddeployment_service__add_field_archiveddeployme.py
|
vi4m/ralph
|
2af767ee23d89be9e6cec0a537350a1ce8840bd1
|
[
"Apache-2.0"
] | 1
|
2018-09-01T14:14:08.000Z
|
2018-09-01T14:14:08.000Z
|
src/ralph/deployment/migrations/0005_auto__add_field_archiveddeployment_service__add_field_archiveddeployme.py
|
srikanth4372/sample
|
127b5742ae464d42909a14d71e3c10c241ec3a23
|
[
"Apache-2.0"
] | 1
|
2019-08-14T10:03:45.000Z
|
2019-08-14T10:03:45.000Z
|
src/ralph/deployment/migrations/0005_auto__add_field_archiveddeployment_service__add_field_archiveddeployme.py
|
srikanth4372/sample
|
127b5742ae464d42909a14d71e3c10c241ec3a23
|
[
"Apache-2.0"
] | 1
|
2019-08-14T09:59:42.000Z
|
2019-08-14T09:59:42.000Z
|
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'ArchivedDeployment.service'
db.add_column('deployment_archiveddeployment', 'service',
self.gf('django.db.models.fields.related.ForeignKey')(default=None, to=orm['cmdb.CI'], null=True, on_delete=models.SET_NULL),
keep_default=False)
# Adding field 'ArchivedDeployment.device_environment'
db.add_column('deployment_archiveddeployment', 'device_environment',
self.gf('django.db.models.fields.related.ForeignKey')(default=None, to=orm['cmdb.CI'], null=True, on_delete=models.SET_NULL),
keep_default=False)
# Adding field 'Deployment.service'
db.add_column('deployment_deployment', 'service',
self.gf('django.db.models.fields.related.ForeignKey')(default=None, to=orm['cmdb.CI'], null=True, on_delete=models.SET_NULL),
keep_default=False)
# Adding field 'Deployment.device_environment'
db.add_column('deployment_deployment', 'device_environment',
self.gf('django.db.models.fields.related.ForeignKey')(default=None, to=orm['cmdb.CI'], null=True, on_delete=models.SET_NULL),
keep_default=False)
def backwards(self, orm):
# Deleting field 'ArchivedDeployment.service'
db.delete_column('deployment_archiveddeployment', 'service_id')
# Deleting field 'ArchivedDeployment.device_environment'
db.delete_column('deployment_archiveddeployment', 'device_environment_id')
# Deleting field 'Deployment.service'
db.delete_column('deployment_deployment', 'service_id')
# Deleting field 'Deployment.device_environment'
db.delete_column('deployment_deployment', 'device_environment_id')
models = {
'account.profile': {
'Meta': {'object_name': 'Profile'},
'activation_token': ('django.db.models.fields.CharField', [], {'default': "u''", 'max_length': '40', 'blank': 'True'}),
'birth_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'city': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'company': ('django.db.models.fields.CharField', [], {'max_length': '64', 'blank': 'True'}),
'cost_center': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'blank': 'True'}),
'country': ('django.db.models.fields.PositiveIntegerField', [], {'default': '153'}),
'department': ('django.db.models.fields.CharField', [], {'max_length': '64', 'blank': 'True'}),
'employee_id': ('django.db.models.fields.CharField', [], {'max_length': '64', 'blank': 'True'}),
'gender': ('django.db.models.fields.PositiveIntegerField', [], {'default': '2'}),
'home_page': (u'dj.choices.fields.ChoiceField', [], {'unique': 'False', 'primary_key': 'False', 'db_column': 'None', 'blank': 'False', u'default': '1', 'null': 'False', '_in_south': 'True', 'db_index': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_active': ('django.db.models.fields.DateTimeField', [], {'default': 'None', 'null': 'True', 'blank': 'True'}),
'location': ('django.db.models.fields.CharField', [], {'max_length': '128', 'blank': 'True'}),
'manager': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'blank': 'True'}),
'nick': ('django.db.models.fields.CharField', [], {'default': "u''", 'max_length': '30', 'blank': 'True'}),
'profit_center': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'blank': 'True'}),
'time_zone': ('django.db.models.fields.FloatField', [], {'default': '1.0'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['auth.User']", 'unique': 'True'})
},
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'business.businesssegment': {
'Meta': {'object_name': 'BusinessSegment'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '75', 'db_index': 'True'})
},
'business.department': {
'Meta': {'ordering': "(u'name',)", 'object_name': 'Department'},
'icon': (u'dj.choices.fields.ChoiceField', [], {'unique': 'False', 'primary_key': 'False', 'db_column': 'None', 'blank': 'True', u'default': 'None', 'null': 'True', '_in_south': 'True', 'db_index': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '75', 'db_index': 'True'})
},
'business.profitcenter': {
'Meta': {'object_name': 'ProfitCenter'},
'description': ('django.db.models.fields.TextField', [], {'default': 'None', 'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '75', 'db_index': 'True'})
},
'business.venture': {
'Meta': {'ordering': "(u'parent__symbol', u'symbol')", 'unique_together': "((u'parent', u'symbol'),)", 'object_name': 'Venture'},
'business_segment': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'to': "orm['business.BusinessSegment']", 'null': 'True', 'on_delete': 'models.SET_NULL', 'blank': 'True'}),
'cache_version': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'data_center': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['discovery.DataCenter']", 'null': 'True', 'blank': 'True'}),
'department': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'to': "orm['business.Department']", 'null': 'True', 'on_delete': 'models.SET_NULL', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_infrastructure': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'margin_kind': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'to': "orm['discovery.MarginKind']", 'null': 'True', 'on_delete': 'models.SET_NULL', 'blank': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '75', 'db_index': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'related_name': "u'child_set'", 'null': 'True', 'blank': 'True', 'to': "orm['business.Venture']"}),
'path': ('django.db.models.fields.TextField', [], {'default': "u''", 'blank': 'True'}),
'preboot': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'to': "orm['deployment.Preboot']", 'null': 'True', 'on_delete': 'models.SET_NULL', 'blank': 'True'}),
'profit_center': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'to': "orm['business.ProfitCenter']", 'null': 'True', 'on_delete': 'models.SET_NULL', 'blank': 'True'}),
'show_in_ralph': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'symbol': ('django.db.models.fields.CharField', [], {'default': "u''", 'max_length': '32', 'blank': 'True'}),
'verified': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
'business.venturerole': {
'Meta': {'ordering': "(u'parent__name', u'name')", 'unique_together': "((u'name', u'venture'),)", 'object_name': 'VentureRole'},
'cache_version': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '75'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'related_name': "u'child_set'", 'null': 'True', 'blank': 'True', 'to': "orm['business.VentureRole']"}),
'path': ('django.db.models.fields.TextField', [], {'default': "u''", 'blank': 'True'}),
'preboot': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'to': "orm['deployment.Preboot']", 'null': 'True', 'on_delete': 'models.SET_NULL', 'blank': 'True'}),
'venture': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['business.Venture']"})
},
'cmdb.ci': {
'Meta': {'unique_together': "((u'content_type', u'object_id'),)", 'object_name': 'CI'},
'added_manually': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'barcode': ('django.db.models.fields.CharField', [], {'default': 'None', 'max_length': '255', 'unique': 'True', 'null': 'True', 'blank': 'True'}),
'business_service': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'cache_version': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']", 'null': 'True', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'layers': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['cmdb.CILayer']", 'symmetrical': 'False'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
'object_id': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'owners': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['cmdb.CIOwner']", 'through': "orm['cmdb.CIOwnership']", 'symmetrical': 'False'}),
'pci_scope': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'relations': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['cmdb.CI']", 'through': "orm['cmdb.CIRelation']", 'symmetrical': 'False'}),
'state': ('django.db.models.fields.IntegerField', [], {'default': '2', 'max_length': '11'}),
'status': ('django.db.models.fields.IntegerField', [], {'default': '2', 'max_length': '11'}),
'technical_service': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cmdb.CIType']"}),
'uid': ('django.db.models.fields.CharField', [], {'max_length': '100', 'unique': 'True', 'null': 'True', 'blank': 'True'}),
'zabbix_id': ('django.db.models.fields.CharField', [], {'max_length': '30', 'null': 'True', 'blank': 'True'})
},
'cmdb.cilayer': {
'Meta': {'ordering': "(u'name',)", 'object_name': 'CILayer'},
'cache_version': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'connected_types': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['cmdb.CIType']", 'symmetrical': 'False', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'icon': (u'dj.choices.fields.ChoiceField', [], {'unique': 'False', 'primary_key': 'False', 'db_column': 'None', 'blank': 'True', u'default': 'None', 'null': 'True', '_in_south': 'True', 'db_index': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50', 'db_index': 'True'})
},
'cmdb.ciowner': {
'Meta': {'object_name': 'CIOwner'},
'cache_version': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'profile': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['account.Profile']", 'unique': 'True'})
},
'cmdb.ciownership': {
'Meta': {'object_name': 'CIOwnership'},
'cache_version': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'ci': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cmdb.CI']"}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cmdb.CIOwner']"}),
'type': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1'})
},
'cmdb.cirelation': {
'Meta': {'unique_together': "((u'parent', u'child', u'type'),)", 'object_name': 'CIRelation'},
'cache_version': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'child': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'child'", 'to': "orm['cmdb.CI']"}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'parent'", 'to': "orm['cmdb.CI']"}),
'readonly': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'type': ('django.db.models.fields.IntegerField', [], {'max_length': '11'})
},
'cmdb.citype': {
'Meta': {'object_name': 'CIType'},
'cache_version': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'icon_class': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'name': ('django.db.models.fields.SlugField', [], {'max_length': '50'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'deployment.archiveddeployment': {
'Meta': {'ordering': "(u'-created',)", 'object_name': 'ArchivedDeployment'},
'cache_version': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'device': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['discovery.Device']"}),
'device_environment': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'to': "orm['cmdb.CI']", 'null': 'True', 'on_delete': 'models.SET_NULL'}),
'done_plugins': ('django.db.models.fields.TextField', [], {'default': "u''", 'blank': 'True'}),
'hostname': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ip': ('django.db.models.fields.IPAddressField', [], {'max_length': '15'}),
'is_running': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'mac': (u'lck.django.common.models.MACAddressField', [], {'unique': 'False', 'primary_key': 'False', 'db_column': 'None', 'blank': 'False', 'null': 'False', 'db_index': 'False'}),
'mass_deployment': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'to': "orm['deployment.MassDeployment']", 'null': 'True', 'on_delete': 'models.SET_NULL', 'blank': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'preboot': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['deployment.Preboot']", 'null': 'True', 'on_delete': 'models.SET_NULL'}),
'service': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'to': "orm['cmdb.CI']", 'null': 'True', 'on_delete': 'models.SET_NULL'}),
'status': ('django.db.models.fields.IntegerField', [], {'default': '1'}),
'status_lastchanged': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'to': "orm['auth.User']", 'null': 'True', 'on_delete': 'models.SET_NULL', 'blank': 'True'}),
'venture': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['business.Venture']", 'null': 'True', 'on_delete': 'models.SET_NULL'}),
'venture_role': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['business.VentureRole']", 'null': 'True', 'on_delete': 'models.SET_NULL'})
},
'deployment.deployment': {
'Meta': {'ordering': "(u'-created',)", 'object_name': 'Deployment'},
'cache_version': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'device': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['discovery.Device']"}),
'device_environment': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'to': "orm['cmdb.CI']", 'null': 'True', 'on_delete': 'models.SET_NULL'}),
'done_plugins': ('django.db.models.fields.TextField', [], {'default': "u''", 'blank': 'True'}),
'hostname': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ip': ('django.db.models.fields.IPAddressField', [], {'max_length': '15'}),
'is_running': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'mac': (u'lck.django.common.models.MACAddressField', [], {'unique': 'False', 'primary_key': 'False', 'db_column': 'None', 'blank': 'False', 'null': 'False', 'db_index': 'False'}),
'mass_deployment': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'to': "orm['deployment.MassDeployment']", 'null': 'True', 'on_delete': 'models.SET_NULL', 'blank': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'preboot': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['deployment.Preboot']", 'null': 'True', 'on_delete': 'models.SET_NULL'}),
'service': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'to': "orm['cmdb.CI']", 'null': 'True', 'on_delete': 'models.SET_NULL'}),
'status': ('django.db.models.fields.IntegerField', [], {'default': '1'}),
'status_lastchanged': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'to': "orm['auth.User']", 'null': 'True', 'on_delete': 'models.SET_NULL', 'blank': 'True'}),
'venture': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['business.Venture']", 'null': 'True', 'on_delete': 'models.SET_NULL'}),
'venture_role': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['business.VentureRole']", 'null': 'True', 'on_delete': 'models.SET_NULL'})
},
'deployment.deploymentpoll': {
'Meta': {'object_name': 'DeploymentPoll'},
'checked': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'date': ('django.db.models.fields.DateTimeField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'deployment.massdeployment': {
'Meta': {'ordering': "(u'-created',)", 'object_name': 'MassDeployment'},
'cache_version': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'+'", 'on_delete': 'models.SET_NULL', 'default': 'None', 'to': "orm['account.Profile']", 'blank': 'True', 'null': 'True'}),
'csv': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'generated_csv': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_done': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'modified_by': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'+'", 'on_delete': 'models.SET_NULL', 'default': 'None', 'to': "orm['account.Profile']", 'blank': 'True', 'null': 'True'})
},
'deployment.preboot': {
'Meta': {'ordering': "(u'name',)", 'object_name': 'Preboot'},
'cache_version': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'description': ('django.db.models.fields.TextField', [], {'default': "u''", 'blank': 'True'}),
'files': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['deployment.PrebootFile']", 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '75', 'db_index': 'True'})
},
'deployment.prebootfile': {
'Meta': {'object_name': 'PrebootFile'},
'description': ('django.db.models.fields.TextField', [], {'default': "u''", 'blank': 'True'}),
'file': ('django.db.models.fields.files.FileField', [], {'default': 'None', 'max_length': '100', 'null': 'True', 'blank': 'True'}),
'ftype': (u'dj.choices.fields.ChoiceField', [], {'unique': 'False', 'primary_key': 'False', 'db_column': 'None', 'blank': 'False', u'default': '101', 'null': 'False', '_in_south': 'True', 'db_index': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '75', 'db_index': 'True'}),
'raw_config': ('django.db.models.fields.TextField', [], {'blank': 'True'})
},
'discovery.connection': {
'Meta': {'object_name': 'Connection'},
'connection_type': ('django.db.models.fields.PositiveIntegerField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'inbound': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'inbound_connections'", 'on_delete': 'models.PROTECT', 'to': "orm['discovery.Device']"}),
'outbound': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'outbound_connections'", 'on_delete': 'models.PROTECT', 'to': "orm['discovery.Device']"})
},
'discovery.datacenter': {
'Meta': {'ordering': "(u'name',)", 'object_name': 'DataCenter'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '75', 'db_index': 'True'})
},
'discovery.deprecationkind': {
'Meta': {'object_name': 'DeprecationKind'},
'cache_version': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'default': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'months': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '75', 'db_index': 'True'}),
'remarks': ('django.db.models.fields.TextField', [], {'default': "u''", 'blank': 'True'})
},
'discovery.device': {
'Meta': {'object_name': 'Device'},
'barcode': ('django.db.models.fields.CharField', [], {'default': 'None', 'max_length': '255', 'unique': 'True', 'null': 'True', 'blank': 'True'}),
'boot_firmware': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'cache_version': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'cached_cost': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'cached_price': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'chassis_position': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'connections': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['discovery.Device']", 'through': "orm['discovery.Connection']", 'symmetrical': 'False'}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'dc': ('django.db.models.fields.CharField', [], {'default': 'None', 'max_length': '32', 'null': 'True', 'blank': 'True'}),
'deleted': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'deprecation_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'deprecation_kind': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'to': "orm['discovery.DeprecationKind']", 'null': 'True', 'on_delete': 'models.SET_NULL', 'blank': 'True'}),
'device_environment': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'to': "orm['cmdb.CI']", 'null': 'True', 'on_delete': 'models.PROTECT'}),
'diag_firmware': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'hard_firmware': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'logical_parent': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'logicalchild_set'", 'on_delete': 'models.SET_NULL', 'default': 'None', 'to': "orm['discovery.Device']", 'blank': 'True', 'null': 'True'}),
'management': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'managed_set'", 'on_delete': 'models.SET_NULL', 'default': 'None', 'to': "orm['discovery.IPAddress']", 'blank': 'True', 'null': 'True'}),
'margin_kind': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'to': "orm['discovery.MarginKind']", 'null': 'True', 'on_delete': 'models.SET_NULL', 'blank': 'True'}),
'max_save_priority': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'mgmt_firmware': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'model': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'device_set'", 'on_delete': 'models.SET_NULL', 'default': 'None', 'to': "orm['discovery.DeviceModel']", 'blank': 'True', 'null': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'name2': ('django.db.models.fields.CharField', [], {'default': 'None', 'max_length': '255', 'null': 'True', 'blank': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'child_set'", 'on_delete': 'models.SET_NULL', 'default': 'None', 'to': "orm['discovery.Device']", 'blank': 'True', 'null': 'True'}),
'position': ('django.db.models.fields.CharField', [], {'max_length': '16', 'null': 'True', 'blank': 'True'}),
'price': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'purchase_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'rack': ('django.db.models.fields.CharField', [], {'default': 'None', 'max_length': '32', 'null': 'True', 'blank': 'True'}),
'remarks': ('django.db.models.fields.TextField', [], {'default': "u''", 'blank': 'True'}),
'role': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'save_priorities': ('django.db.models.fields.TextField', [], {'default': "u''"}),
'service': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'to': "orm['cmdb.CI']", 'null': 'True', 'on_delete': 'models.PROTECT'}),
'sn': ('django.db.models.fields.CharField', [], {'default': 'None', 'max_length': '255', 'unique': 'True', 'null': 'True', 'blank': 'True'}),
'support_expiration_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'support_kind': ('django.db.models.fields.CharField', [], {'default': 'None', 'max_length': '255', 'null': 'True', 'blank': 'True'}),
'uptime_seconds': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'uptime_timestamp': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'venture': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'to': "orm['business.Venture']", 'null': 'True', 'on_delete': 'models.SET_NULL', 'blank': 'True'}),
'venture_role': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'to': "orm['business.VentureRole']", 'null': 'True', 'on_delete': 'models.SET_NULL', 'blank': 'True'}),
'verified': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'warranty_expiration_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'})
},
'discovery.devicemodel': {
'Meta': {'object_name': 'DeviceModel'},
'cache_version': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'chassis_size': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'max_save_priority': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'save_priorities': ('django.db.models.fields.TextField', [], {'default': "u''"}),
'type': ('django.db.models.fields.PositiveIntegerField', [], {'default': '401'})
},
'discovery.discoveryqueue': {
'Meta': {'ordering': "(u'name',)", 'object_name': 'DiscoveryQueue'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '75', 'db_index': 'True'})
},
'discovery.environment': {
'Meta': {'ordering': "(u'name',)", 'object_name': 'Environment'},
'data_center': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['discovery.DataCenter']"}),
'domain': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'hosts_naming_template': ('django.db.models.fields.CharField', [], {'max_length': '30'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '75', 'db_index': 'True'}),
'next_server': ('django.db.models.fields.CharField', [], {'default': "u''", 'max_length': '32', 'blank': 'True'}),
'queue': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['discovery.DiscoveryQueue']", 'null': 'True', 'on_delete': 'models.SET_NULL', 'blank': 'True'}),
'remarks': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'})
},
'discovery.ipaddress': {
'Meta': {'object_name': 'IPAddress'},
'address': ('django.db.models.fields.IPAddressField', [], {'default': 'None', 'max_length': '15', 'unique': 'True', 'null': 'True', 'blank': 'True'}),
'cache_version': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'dead_ping_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'device': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'to': "orm['discovery.Device']", 'null': 'True', 'on_delete': 'models.SET_NULL', 'blank': 'True'}),
'dns_info': ('django.db.models.fields.TextField', [], {'default': 'None', 'null': 'True', 'blank': 'True'}),
'hostname': ('django.db.models.fields.CharField', [], {'default': 'None', 'max_length': '255', 'null': 'True', 'blank': 'True'}),
'http_family': ('django.db.models.fields.TextField', [], {'default': 'None', 'max_length': '64', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_buried': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_management': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_public': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_plugins': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'last_puppet': ('django.db.models.fields.DateTimeField', [], {'default': 'None', 'null': 'True', 'blank': 'True'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'network': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'to': "orm['discovery.Network']", 'null': 'True', 'blank': 'True'}),
'number': ('django.db.models.fields.BigIntegerField', [], {'unique': 'True'}),
'scan_summary': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['scan.ScanSummary']", 'null': 'True', 'on_delete': 'models.SET_NULL', 'blank': 'True'}),
'snmp_community': ('django.db.models.fields.CharField', [], {'default': 'None', 'max_length': '64', 'null': 'True', 'blank': 'True'}),
'snmp_name': ('django.db.models.fields.TextField', [], {'default': 'None', 'null': 'True', 'blank': 'True'}),
'snmp_version': ('django.db.models.fields.CharField', [], {'default': 'None', 'max_length': '5', 'null': 'True', 'blank': 'True'}),
'venture': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'to': "orm['business.Venture']", 'null': 'True', 'on_delete': 'models.SET_NULL', 'blank': 'True'})
},
'discovery.marginkind': {
'Meta': {'object_name': 'MarginKind'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'margin': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '75', 'db_index': 'True'}),
'remarks': ('django.db.models.fields.TextField', [], {'default': "u''", 'blank': 'True'})
},
'discovery.network': {
'Meta': {'ordering': "(u'vlan',)", 'object_name': 'Network'},
'address': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '18'}),
'cache_version': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'custom_dns_servers': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['dnsedit.DNSServer']", 'null': 'True', 'blank': 'True'}),
'data_center': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['discovery.DataCenter']", 'null': 'True', 'blank': 'True'}),
'dhcp_broadcast': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'dhcp_config': ('django.db.models.fields.TextField', [], {'default': "u''", 'blank': 'True'}),
'environment': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['discovery.Environment']", 'null': 'True', 'on_delete': 'models.SET_NULL', 'blank': 'True'}),
'gateway': ('django.db.models.fields.IPAddressField', [], {'default': 'None', 'max_length': '15', 'null': 'True', 'blank': 'True'}),
'gateway_as_int': ('django.db.models.fields.PositiveIntegerField', [], {'default': 'None', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ignore_addresses': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'kind': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'to': "orm['discovery.NetworkKind']", 'null': 'True', 'on_delete': 'models.SET_NULL', 'blank': 'True'}),
'last_scan': ('django.db.models.fields.DateTimeField', [], {'default': 'None', 'null': 'True', 'blank': 'True'}),
'max_ip': ('django.db.models.fields.PositiveIntegerField', [], {'default': 'None', 'null': 'True', 'blank': 'True'}),
'min_ip': ('django.db.models.fields.PositiveIntegerField', [], {'default': 'None', 'null': 'True', 'blank': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '75', 'db_index': 'True'}),
'racks': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['discovery.Device']", 'symmetrical': 'False'}),
'remarks': ('django.db.models.fields.TextField', [], {'default': "u''", 'blank': 'True'}),
'reserved': ('django.db.models.fields.PositiveIntegerField', [], {'default': '10'}),
'reserved_top_margin': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'terminators': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['discovery.NetworkTerminator']", 'symmetrical': 'False'}),
'vlan': ('django.db.models.fields.PositiveIntegerField', [], {'default': 'None', 'null': 'True', 'blank': 'True'})
},
'discovery.networkkind': {
'Meta': {'ordering': "(u'name',)", 'object_name': 'NetworkKind'},
'icon': ('django.db.models.fields.CharField', [], {'default': 'None', 'max_length': '32', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '75', 'db_index': 'True'})
},
'discovery.networkterminator': {
'Meta': {'ordering': "(u'name',)", 'object_name': 'NetworkTerminator'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '75', 'db_index': 'True'})
},
'dnsedit.dnsserver': {
'Meta': {'object_name': 'DNSServer'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ip_address': ('django.db.models.fields.IPAddressField', [], {'unique': 'True', 'max_length': '15'}),
'is_default': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'})
},
'scan.scansummary': {
'Meta': {'object_name': 'ScanSummary'},
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'false_positive_checksum': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'job_id': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '36'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'auto_now_add': 'True', 'blank': 'True'}),
'previous_checksum': ('django.db.models.fields.CharField', [], {'max_length': '32'})
},
'tags.tag': {
'Meta': {'object_name': 'Tag'},
'author': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['account.Profile']"}),
'cache_version': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'tags_tag_tags'", 'to': "orm['contenttypes.ContentType']"}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.PositiveIntegerField', [], {'default': '39'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '75'}),
'object_id': ('django.db.models.fields.IntegerField', [], {'db_index': 'True'}),
'official': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'stem': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "u'related_tags'", 'null': 'True', 'to': "orm['tags.TagStem']"})
},
'tags.tagstem': {
'Meta': {'object_name': 'TagStem'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.PositiveIntegerField', [], {'default': '39'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '75'}),
'tag_count': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'})
}
}
complete_apps = ['deployment']
| 94.996101
| 239
| 0.572487
| 4,936
| 48,733
| 5.547812
| 0.066653
| 0.101957
| 0.177914
| 0.254163
| 0.859224
| 0.835561
| 0.799408
| 0.761503
| 0.72546
| 0.626059
| 0
| 0.006246
| 0.175364
| 48,733
| 513
| 240
| 94.996101
| 0.675169
| 0.007736
| 0
| 0.34898
| 0
| 0
| 0.60558
| 0.337973
| 0
| 0
| 0
| 0
| 0
| 1
| 0.004082
| false
| 0.002041
| 0.008163
| 0
| 0.018367
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
4a7c8678af28d04fe1e6fb14eef66f905c9017b0
| 164
|
py
|
Python
|
__init__.py
|
m3sserschmitt/basic-http
|
bc09a888b44a11154e2cc9bfaf46fc9fd3a79b82
|
[
"MIT"
] | null | null | null |
__init__.py
|
m3sserschmitt/basic-http
|
bc09a888b44a11154e2cc9bfaf46fc9fd3a79b82
|
[
"MIT"
] | null | null | null |
__init__.py
|
m3sserschmitt/basic-http
|
bc09a888b44a11154e2cc9bfaf46fc9fd3a79b82
|
[
"MIT"
] | null | null | null |
import basic_http.session
basic_http.session.LIB_VERSION = 'v0.0.4-beta'
basic_http.session.DEFAULT_AGENT = 'basic-http version ' + basic_http.session.LIB_VERSION
| 32.8
| 89
| 0.810976
| 26
| 164
| 4.846154
| 0.461538
| 0.357143
| 0.507937
| 0.301587
| 0.412698
| 0
| 0
| 0
| 0
| 0
| 0
| 0.019868
| 0.079268
| 164
| 4
| 90
| 41
| 0.81457
| 0
| 0
| 0
| 0
| 0
| 0.182927
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.333333
| 0
| 0.333333
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 7
|
4a9ad45bc6d5f8001c81f4145b812d1bf0d096f9
| 100
|
py
|
Python
|
HPOBenchExperimentUtils/resource_manager/__init__.py
|
PhMueller/TrajectoryParser
|
9c19d37a3ff29a593c9b6d3e7fd3857e8c2d724f
|
[
"Apache-2.0"
] | null | null | null |
HPOBenchExperimentUtils/resource_manager/__init__.py
|
PhMueller/TrajectoryParser
|
9c19d37a3ff29a593c9b6d3e7fd3857e8c2d724f
|
[
"Apache-2.0"
] | 1
|
2021-09-01T16:35:21.000Z
|
2021-11-05T19:53:25.000Z
|
HPOBenchExperimentUtils/resource_manager/__init__.py
|
automl/HPOBenchExperimentUtils
|
9c19d37a3ff29a593c9b6d3e7fd3857e8c2d724f
|
[
"Apache-2.0"
] | null | null | null |
from HPOBenchExperimentUtils.resource_manager.file_resource_manager import FileBasedResourceManager
| 50
| 99
| 0.94
| 9
| 100
| 10.111111
| 0.777778
| 0.32967
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.04
| 100
| 1
| 100
| 100
| 0.947917
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 1
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
435b8874fd825cd72ac9feb3e9f96907066c1541
| 141
|
py
|
Python
|
src/sunstruck/schemas/__init__.py
|
la-mar/sunstruck-api
|
90074a55d3b243f7f0eee6e897a98699d2cebc43
|
[
"MIT"
] | 3
|
2021-04-04T07:48:48.000Z
|
2022-02-19T17:42:12.000Z
|
src/sunstruck/schemas/__init__.py
|
la-mar/sunstruck-api
|
90074a55d3b243f7f0eee6e897a98699d2cebc43
|
[
"MIT"
] | null | null | null |
src/sunstruck/schemas/__init__.py
|
la-mar/sunstruck-api
|
90074a55d3b243f7f0eee6e897a98699d2cebc43
|
[
"MIT"
] | null | null | null |
# flake8: noqa
from schemas.client_credentials import *
from schemas.message import *
from schemas.token import *
from schemas.user import *
| 23.5
| 40
| 0.794326
| 19
| 141
| 5.842105
| 0.526316
| 0.396396
| 0.459459
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.008197
| 0.134752
| 141
| 5
| 41
| 28.2
| 0.901639
| 0.085106
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
600132a2e2c79c041002d7861851e7ef109318b7
| 14,276
|
py
|
Python
|
tests/test_api_network.py
|
devicehive/devicehive-plugin-python-template
|
ad532a57ebf9ae52f12afc98eeb867380707d47d
|
[
"Apache-2.0"
] | null | null | null |
tests/test_api_network.py
|
devicehive/devicehive-plugin-python-template
|
ad532a57ebf9ae52f12afc98eeb867380707d47d
|
[
"Apache-2.0"
] | 1
|
2018-03-07T07:36:44.000Z
|
2018-03-07T07:36:44.000Z
|
tests/test_api_network.py
|
devicehive/devicehive-plugin-python-template
|
ad532a57ebf9ae52f12afc98eeb867380707d47d
|
[
"Apache-2.0"
] | 4
|
2018-03-10T20:59:37.000Z
|
2021-10-18T23:25:30.000Z
|
# Copyright (C) 2018 DataArt
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
from six.moves import range
def test_subscribe_events(test):
test.only_admin_implementation()
plugin_api = test.plugin_api()
device_hive_api = test.device_hive_api()
def init_data():
net_name = test.generate_id('n-s-e', test.NETWORK_ENTITY)
net_description = '%s-description' % net_name
network = device_hive_api.create_network(net_name, net_description)
device_id = test.generate_id('n-s-e', test.DEVICE_ENTITY)
device = device_hive_api.put_device(device_id, network_id=network.id)
command_name = '%s-command' % device_id
notification_name = '%s-notification' % device_id
return {'device': device,
'network': network,
'command_name': command_name,
'notification_name': notification_name}
def send_data(device, command_name, notification_name):
command = device.send_command(command_name)
command.status = 'status'
command.save()
notification = device.send_notification(notification_name)
return command.id, command.id, notification.id
def handle_connect(handler):
event_ids = send_data(handler.data['device'],
handler.data['command_name'],
handler.data['notification_name'])
command_insert_id, command_update_id, notification_id = event_ids
handler.data['event_ids'] = [('command/insert', command_insert_id),
('command/update', command_update_id),
('notification/insert', notification_id)]
def handle_event(handler, event):
action_id_pair = (event.action, event.data.id)
assert action_id_pair in handler.data['event_ids']
handler.data['event_ids'].remove(action_id_pair)
if handler.data['event_ids']:
return
handler.data['device'].remove()
handler.disconnect()
data = init_data()
name = test.generate_id('n-s-e', test.PLUGIN_ENTITY)
description = '%s-description' % name
plugin = plugin_api.create_plugin(name, description,
network_ids=[data['network'].id])
test.run(plugin, handle_connect, handle_event, data=data)
plugin_api.remove_plugin(plugin['topicName'])
# =========================================================================
def handle_connect(handler):
event_ids = send_data(handler.data['device'],
handler.data['command_name'],
handler.data['notification_name'])
command_insert_id, command_update_id, notification_id = event_ids
handler.data['event_ids'] = [('command/insert', command_insert_id),
('command/update', command_update_id)]
data = init_data()
name = test.generate_id('n-s-e', test.PLUGIN_ENTITY)
description = '%s-description' % name
plugin = plugin_api.create_plugin(name, description,
network_ids=[data['network'].id],
subscribe_notifications=False)
test.run(plugin, handle_connect, handle_event, data=data)
plugin_api.remove_plugin(plugin['topicName'])
# =========================================================================
def handle_connect(handler):
event_ids = send_data(handler.data['device'],
handler.data['command_name'],
handler.data['notification_name'])
command_insert_id, command_update_id, notification_id = event_ids
handler.data['event_ids'] = [('command/insert', command_insert_id),
('notification/insert', notification_id)]
data = init_data()
name = test.generate_id('n-s-e', test.PLUGIN_ENTITY)
description = '%s-description' % name
plugin = plugin_api.create_plugin(name, description,
network_ids=[data['network'].id],
subscribe_update_commands=False)
test.run(plugin, handle_connect, handle_event, data=data)
plugin_api.remove_plugin(plugin['topicName'])
# =========================================================================
def handle_connect(handler):
event_ids = send_data(handler.data['device'],
handler.data['command_name'],
handler.data['notification_name'])
command_insert_id, command_update_id, notification_id = event_ids
handler.data['event_ids'] = [('command/update', command_update_id),
('notification/insert', notification_id)]
data = init_data()
name = test.generate_id('n-s-e', test.PLUGIN_ENTITY)
description = '%s-description' % name
plugin = plugin_api.create_plugin(name, description,
network_ids=[data['network'].id],
subscribe_insert_commands=False)
test.run(plugin, handle_connect, handle_event, data=data)
plugin_api.remove_plugin(plugin['topicName'])
def test_subscribe_insert_commands(test):
test.only_admin_implementation()
plugin_api = test.plugin_api()
device_hive_api = test.device_hive_api()
def init_data():
net_name = test.generate_id('n-s-i-c', test.NETWORK_ENTITY)
net_description = '%s-description' % net_name
network = device_hive_api.create_network(net_name, net_description)
device_id = test.generate_id('n-s-i-c', test.DEVICE_ENTITY)
device = device_hive_api.put_device(device_id, network_id=network.id)
command_names = ['%s-name-%s' % (device_id, i) for i in range(2)]
return {'device': device,
'network': network,
'command_names': command_names}
def send_data(device, command_names):
return [device.send_command(name).id for name in command_names]
def handle_connect(handler):
handler.data['command_ids'] = send_data(handler.data['device'],
handler.data['command_names'])
def handle_command_insert(handler, command):
assert command.id in handler.data['command_ids']
handler.data['command_ids'].remove(command.id)
if handler.data['command_ids']:
return
handler.data['device'].remove()
handler.disconnect()
data = init_data()
name = test.generate_id('n-s-i-c', test.PLUGIN_ENTITY)
description = '%s-description' % name
plugin = plugin_api.create_plugin(name, description,
network_ids=[data['network'].id],
subscribe_update_commands=False,
subscribe_notifications=False)
test.run(plugin, handle_connect,
handle_command_insert=handle_command_insert, data=data)
plugin_api.remove_plugin(plugin['topicName'])
# =========================================================================
def handle_connect(handler):
handler.data['command_ids'] = send_data(
handler.data['device'], handler.data['command_names'])[-1:]
data = init_data()
name = test.generate_id('n-s-i-c', test.PLUGIN_ENTITY)
description = '%s-description' % name
plugin = plugin_api.create_plugin(name, description,
network_ids=[data['network'].id],
names=data['command_names'][-1:],
subscribe_update_commands=False,
subscribe_notifications=False)
test.run(plugin, handle_connect,
handle_command_insert=handle_command_insert, data=data)
plugin_api.remove_plugin(plugin['topicName'])
def test_subscribe_update_commands(test):
test.only_admin_implementation()
plugin_api = test.plugin_api()
device_hive_api = test.device_hive_api()
def init_data():
net_name = test.generate_id('n-s-u-c', test.NETWORK_ENTITY)
net_description = '%s-description' % net_name
network = device_hive_api.create_network(net_name, net_description)
device_id = test.generate_id('n-s-u-c', test.DEVICE_ENTITY)
device = device_hive_api.put_device(device_id, network_id=network.id)
command_names = ['%s-name-%s' % (device_id, i) for i in range(2)]
return {'device': device,
'network': network,
'command_names': command_names}
def send_data(device, command_names):
command_ids = []
for name in command_names:
command = device.send_command(name)
command.status = 'status'
command.save()
command_ids.append(command.id)
return command_ids
def handle_connect(handler):
handler.data['command_ids'] = send_data(handler.data['device'],
handler.data['command_names'])
def handle_command_update(handler, command):
assert command.id in handler.data['command_ids']
assert command.status == 'status'
handler.data['command_ids'].remove(command.id)
if handler.data['command_ids']:
return
handler.data['device'].remove()
handler.disconnect()
data = init_data()
name = test.generate_id('n-s-u-c', test.PLUGIN_ENTITY)
description = '%s-description' % name
plugin = plugin_api.create_plugin(name, description,
network_ids=[data['network'].id],
subscribe_insert_commands=False,
subscribe_notifications=False)
test.run(plugin, handle_connect,
handle_command_update=handle_command_update, data=data)
plugin_api.remove_plugin(plugin['topicName'])
# =========================================================================
def handle_connect(handler):
handler.data['command_ids'] = send_data(
handler.data['device'], handler.data['command_names'])[-1:]
data = init_data()
name = test.generate_id('n-s-u-c', test.PLUGIN_ENTITY)
description = '%s-description' % name
plugin = plugin_api.create_plugin(name, description,
network_ids=[data['network'].id],
names=data['command_names'][-1:],
subscribe_insert_commands=False,
subscribe_notifications=False)
test.run(plugin, handle_connect,
handle_command_update=handle_command_update, data=data)
plugin_api.remove_plugin(plugin['topicName'])
def test_subscribe_notifications(test):
test.only_admin_implementation()
plugin_api = test.plugin_api()
device_hive_api = test.device_hive_api()
def init_data():
net_name = test.generate_id('n-s-n', test.NETWORK_ENTITY)
net_description = '%s-description' % net_name
network = device_hive_api.create_network(net_name, net_description)
device_id = test.generate_id('n-s-n', test.DEVICE_ENTITY)
device = device_hive_api.put_device(device_id, network_id=network.id)
notification_names = ['%s-name-%s' % (device_id, i) for i in range(2)]
return {'device': device,
'network': network,
'notification_names': notification_names}
def send_data(device, notification_names):
return [device.send_notification(name).id for name in
notification_names]
def handle_connect(handler):
handler.data['notification_ids'] = send_data(
handler.data['device'], handler.data['notification_names'])
def handle_notification(handler, notification):
assert notification.id in handler.data['notification_ids']
handler.data['notification_ids'].remove(notification.id)
if handler.data['notification_ids']:
return
handler.data['device'].remove()
handler.disconnect()
data = init_data()
name = test.generate_id('n-s-n', test.PLUGIN_ENTITY)
description = '%s-description' % name
plugin = plugin_api.create_plugin(name, description,
network_ids=[data['network'].id],
subscribe_insert_commands=False,
subscribe_update_commands=False)
test.run(plugin, handle_connect,
handle_notification=handle_notification, data=data)
plugin_api.remove_plugin(plugin['topicName'])
# =========================================================================
def handle_connect(handler):
handler.data['notification_ids'] = send_data(
handler.data['device'], handler.data['notification_names'])[-1:]
data = init_data()
name = test.generate_id('n-s-n', test.PLUGIN_ENTITY)
description = '%s-description' % name
plugin = plugin_api.create_plugin(name, description,
network_ids=[data['network'].id],
names=data['notification_names'][-1:],
subscribe_insert_commands=False,
subscribe_update_commands=False)
test.run(plugin, handle_connect,
handle_notification=handle_notification, data=data)
plugin_api.remove_plugin(plugin['topicName'])
| 44.061728
| 79
| 0.591202
| 1,542
| 14,276
| 5.205577
| 0.079118
| 0.068519
| 0.031394
| 0.033636
| 0.842158
| 0.829201
| 0.820979
| 0.808521
| 0.808023
| 0.799801
| 0
| 0.001628
| 0.268703
| 14,276
| 323
| 80
| 44.198142
| 0.767241
| 0.074951
| 0
| 0.791165
| 0
| 0
| 0.105119
| 0
| 0
| 0
| 0
| 0
| 0.02008
| 1
| 0.104418
| false
| 0
| 0.004016
| 0.008032
| 0.156627
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
60634a727fe7a278b36493fb58ad20aeb22882f6
| 2,151
|
py
|
Python
|
tests/webapp/test_webapp_actions.py
|
proofdock/chaos-azure
|
85302f8be18153862656c587988eafb5dd37ddf7
|
[
"Apache-2.0"
] | 1
|
2021-04-24T20:01:54.000Z
|
2021-04-24T20:01:54.000Z
|
tests/webapp/test_webapp_actions.py
|
proofdock/chaos-azure
|
85302f8be18153862656c587988eafb5dd37ddf7
|
[
"Apache-2.0"
] | 23
|
2020-05-22T06:43:14.000Z
|
2021-02-25T21:02:28.000Z
|
tests/webapp/test_webapp_actions.py
|
proofdock/chaos-azure
|
85302f8be18153862656c587988eafb5dd37ddf7
|
[
"Apache-2.0"
] | null | null | null |
from unittest.mock import patch, MagicMock
from pdchaosazure.webapp.actions import stop, restart, delete
from tests.data import config_provider, secrets_provider, webapp_provider
@patch('pdchaosazure.webapp.actions.fetch_webapps', autospec=True)
@patch('pdchaosazure.webapp.actions.client.init', autospec=True)
def test_happily_stop_webapp(init, fetch):
config = config_provider.provide_default_config()
secrets = secrets_provider.provide_secrets_public()
webapp = webapp_provider.default()
client = MagicMock()
init.return_value = client
resource_list = [webapp]
fetch.return_value = resource_list
f = "where resourceGroup=~'rg'"
stop(f, config, secrets)
fetch.assert_called_with(f, config, secrets)
client.web_apps.stop.assert_called_with(webapp['resourceGroup'], webapp['name'])
@patch('pdchaosazure.webapp.actions.fetch_webapps', autospec=True)
@patch('pdchaosazure.webapp.actions.client.init', autospec=True)
def test_happily_restart_webapp(init, fetch):
config = config_provider.provide_default_config()
secrets = secrets_provider.provide_secrets_public()
webapp = webapp_provider.default()
client = MagicMock()
init.return_value = client
resource_list = [webapp]
fetch.return_value = resource_list
f = "where resourceGroup=~'rg'"
restart(f, config, secrets)
fetch.assert_called_with(f, config, secrets)
client.web_apps.restart.assert_called_with(webapp['resourceGroup'], webapp['name'])
@patch('pdchaosazure.webapp.actions.fetch_webapps', autospec=True)
@patch('pdchaosazure.webapp.actions.client.init', autospec=True)
def test_happily_delete_webapp(init, fetch):
webapp = webapp_provider.default()
config = config_provider.provide_default_config()
secrets = secrets_provider.provide_secrets_public()
client = MagicMock()
init.return_value = client
resource_list = [webapp]
fetch.return_value = resource_list
f = "where resourceGroup=~'rg'"
delete(f, config, secrets)
fetch.assert_called_with(f, config, secrets)
client.web_apps.delete.assert_called_with(webapp['resourceGroup'], webapp['name'])
| 34.693548
| 87
| 0.755927
| 261
| 2,151
| 5.988506
| 0.164751
| 0.074856
| 0.111964
| 0.115163
| 0.84453
| 0.84453
| 0.84453
| 0.815739
| 0.815739
| 0.815739
| 0
| 0
| 0.132497
| 2,151
| 61
| 88
| 35.262295
| 0.837621
| 0
| 0
| 0.733333
| 0
| 0
| 0.170153
| 0.111576
| 0
| 0
| 0
| 0
| 0.133333
| 1
| 0.066667
| false
| 0
| 0.066667
| 0
| 0.133333
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
606629e6c71087f04da2a0bec8e5f2d2e0b13de3
| 3,218
|
py
|
Python
|
tests/test_is_valid_php_version_file_version.py
|
gerardroche/sublime-phpunit
|
73e96ec5e4ac573c5d5247cf87c38e8243da906b
|
[
"BSD-3-Clause"
] | 85
|
2015-02-18T00:05:54.000Z
|
2022-01-01T12:20:22.000Z
|
tests/test_is_valid_php_version_file_version.py
|
gerardroche/sublime-phpunit
|
73e96ec5e4ac573c5d5247cf87c38e8243da906b
|
[
"BSD-3-Clause"
] | 98
|
2015-01-07T22:23:48.000Z
|
2021-06-03T19:37:50.000Z
|
tests/test_is_valid_php_version_file_version.py
|
gerardroche/sublime-phpunit
|
73e96ec5e4ac573c5d5247cf87c38e8243da906b
|
[
"BSD-3-Clause"
] | 21
|
2015-08-12T01:02:17.000Z
|
2021-09-12T09:16:39.000Z
|
from PHPUnitKit.tests import unittest
from PHPUnitKit.plugin import is_valid_php_version_file_version
class TestIsValidPhpVersionFileVersion(unittest.TestCase):
def test_invalid_values(self):
self.assertFalse(is_valid_php_version_file_version(''))
self.assertFalse(is_valid_php_version_file_version(' '))
self.assertFalse(is_valid_php_version_file_version('foobar'))
self.assertFalse(is_valid_php_version_file_version('masterfoo'))
self.assertFalse(is_valid_php_version_file_version('.'))
self.assertFalse(is_valid_php_version_file_version('x'))
self.assertFalse(is_valid_php_version_file_version('x.x'))
self.assertFalse(is_valid_php_version_file_version('x.x.x'))
self.assertFalse(is_valid_php_version_file_version('x'))
self.assertFalse(is_valid_php_version_file_version('snapshot'))
def test_master_branch_version(self):
self.assertTrue(is_valid_php_version_file_version('master'))
def test_specific_semver_versions(self):
self.assertTrue(is_valid_php_version_file_version('5.0.0'))
self.assertTrue(is_valid_php_version_file_version('5.0.1'))
self.assertTrue(is_valid_php_version_file_version('5.0.7'))
self.assertTrue(is_valid_php_version_file_version('5.0.30'))
self.assertTrue(is_valid_php_version_file_version('5.0.32'))
self.assertTrue(is_valid_php_version_file_version('5.1.0'))
self.assertTrue(is_valid_php_version_file_version('5.1.1'))
self.assertTrue(is_valid_php_version_file_version('5.1.3'))
self.assertTrue(is_valid_php_version_file_version('5.1.27'))
self.assertTrue(is_valid_php_version_file_version('7.0.0'))
self.assertTrue(is_valid_php_version_file_version('7.1.19'))
def test_minor_versions(self):
self.assertTrue(is_valid_php_version_file_version('5.6'))
self.assertTrue(is_valid_php_version_file_version('7.1'))
self.assertTrue(is_valid_php_version_file_version('7.2'))
def test_major_dot_x_versions(self):
self.assertTrue(is_valid_php_version_file_version('5.x'))
self.assertTrue(is_valid_php_version_file_version('6.x'))
self.assertTrue(is_valid_php_version_file_version('7.x'))
self.assertTrue(is_valid_php_version_file_version('8.x'))
def test_major_dot_minor_dot_x_versions(self):
self.assertTrue(is_valid_php_version_file_version('7.0.x'))
self.assertTrue(is_valid_php_version_file_version('7.1.x'))
self.assertTrue(is_valid_php_version_file_version('7.2.x'))
def test_snapshot_versions(self):
self.assertTrue(is_valid_php_version_file_version('5.4snapshot'))
self.assertTrue(is_valid_php_version_file_version('5.5snapshot'))
self.assertTrue(is_valid_php_version_file_version('5.6snapshot'))
self.assertTrue(is_valid_php_version_file_version('7.0snapshot'))
self.assertTrue(is_valid_php_version_file_version('7.1snapshot'))
self.assertTrue(is_valid_php_version_file_version('7.0.0snapshot'))
self.assertTrue(is_valid_php_version_file_version('7.1.0snapshot'))
self.assertTrue(is_valid_php_version_file_version('7.1.1snapshot'))
| 52.754098
| 75
| 0.757613
| 468
| 3,218
| 4.724359
| 0.106838
| 0.129806
| 0.185436
| 0.315242
| 0.850294
| 0.850294
| 0.83763
| 0.83763
| 0.818182
| 0.700136
| 0
| 0.02576
| 0.131448
| 3,218
| 60
| 76
| 53.633333
| 0.765295
| 0
| 0
| 0.08
| 0
| 0
| 0.071473
| 0
| 0
| 0
| 0
| 0
| 0.8
| 1
| 0.14
| false
| 0
| 0.04
| 0
| 0.2
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 9
|
609c675a647587e5e1ba2913f0c1ade0647fff7d
| 17,264
|
py
|
Python
|
src/oci/service_catalog/service_catalog_client_composite_operations.py
|
LaudateCorpus1/oci-python-sdk
|
b0d3ce629d5113df4d8b83b7a6502b2c5bfa3015
|
[
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null |
src/oci/service_catalog/service_catalog_client_composite_operations.py
|
LaudateCorpus1/oci-python-sdk
|
b0d3ce629d5113df4d8b83b7a6502b2c5bfa3015
|
[
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null |
src/oci/service_catalog/service_catalog_client_composite_operations.py
|
LaudateCorpus1/oci-python-sdk
|
b0d3ce629d5113df4d8b83b7a6502b2c5bfa3015
|
[
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null |
# coding: utf-8
# Copyright (c) 2016, 2022, Oracle and/or its affiliates. All rights reserved.
# This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license.
import oci # noqa: F401
from oci.util import WAIT_RESOURCE_NOT_FOUND # noqa: F401
class ServiceCatalogClientCompositeOperations(object):
"""
This class provides a wrapper around :py:class:`~oci.service_catalog.ServiceCatalogClient` and offers convenience methods
for operations that would otherwise need to be chained together. For example, instead of performing an action
on a resource (e.g. launching an instance, creating a load balancer) and then using a waiter to wait for the resource
to enter a given state, you can call a single method in this class to accomplish the same functionality
"""
def __init__(self, client, **kwargs):
"""
Creates a new ServiceCatalogClientCompositeOperations object
:param ServiceCatalogClient client:
The service client which will be wrapped by this object
"""
self.client = client
def change_private_application_compartment_and_wait_for_state(self, private_application_id, change_private_application_compartment_details, wait_for_states=[], operation_kwargs={}, waiter_kwargs={}):
"""
Calls :py:func:`~oci.service_catalog.ServiceCatalogClient.change_private_application_compartment` and waits for the :py:class:`~oci.service_catalog.models.WorkRequest`
to enter the given state(s).
:param str private_application_id: (required)
The unique identifier for the private application.
:param oci.service_catalog.models.ChangePrivateApplicationCompartmentDetails change_private_application_compartment_details: (required)
The details of the request to change the compartment of a given private application.
:param list[str] wait_for_states:
An array of states to wait on. These should be valid values for :py:attr:`~oci.service_catalog.models.WorkRequest.status`
:param dict operation_kwargs:
A dictionary of keyword arguments to pass to :py:func:`~oci.service_catalog.ServiceCatalogClient.change_private_application_compartment`
:param dict waiter_kwargs:
A dictionary of keyword arguments to pass to the :py:func:`oci.wait_until` function. For example, you could pass ``max_interval_seconds`` or ``max_interval_seconds``
as dictionary keys to modify how long the waiter function will wait between retries and the maximum amount of time it will wait
"""
operation_result = self.client.change_private_application_compartment(private_application_id, change_private_application_compartment_details, **operation_kwargs)
if not wait_for_states:
return operation_result
lowered_wait_for_states = [w.lower() for w in wait_for_states]
wait_for_resource_id = operation_result.headers['opc-work-request-id']
try:
waiter_result = oci.wait_until(
self.client,
self.client.get_work_request(wait_for_resource_id),
evaluate_response=lambda r: getattr(r.data, 'status') and getattr(r.data, 'status').lower() in lowered_wait_for_states,
**waiter_kwargs
)
result_to_return = waiter_result
return result_to_return
except Exception as e:
raise oci.exceptions.CompositeOperationError(partial_results=[operation_result], cause=e)
def create_private_application_and_wait_for_state(self, create_private_application_details, wait_for_states=[], operation_kwargs={}, waiter_kwargs={}):
"""
Calls :py:func:`~oci.service_catalog.ServiceCatalogClient.create_private_application` and waits for the :py:class:`~oci.service_catalog.models.WorkRequest`
to enter the given state(s).
:param oci.service_catalog.models.CreatePrivateApplicationDetails create_private_application_details: (required)
Private application creation details.
:param list[str] wait_for_states:
An array of states to wait on. These should be valid values for :py:attr:`~oci.service_catalog.models.WorkRequest.status`
:param dict operation_kwargs:
A dictionary of keyword arguments to pass to :py:func:`~oci.service_catalog.ServiceCatalogClient.create_private_application`
:param dict waiter_kwargs:
A dictionary of keyword arguments to pass to the :py:func:`oci.wait_until` function. For example, you could pass ``max_interval_seconds`` or ``max_interval_seconds``
as dictionary keys to modify how long the waiter function will wait between retries and the maximum amount of time it will wait
"""
operation_result = self.client.create_private_application(create_private_application_details, **operation_kwargs)
if not wait_for_states:
return operation_result
lowered_wait_for_states = [w.lower() for w in wait_for_states]
wait_for_resource_id = operation_result.headers['opc-work-request-id']
try:
waiter_result = oci.wait_until(
self.client,
self.client.get_work_request(wait_for_resource_id),
evaluate_response=lambda r: getattr(r.data, 'status') and getattr(r.data, 'status').lower() in lowered_wait_for_states,
**waiter_kwargs
)
result_to_return = waiter_result
return result_to_return
except Exception as e:
raise oci.exceptions.CompositeOperationError(partial_results=[operation_result], cause=e)
def create_service_catalog_and_wait_for_state(self, create_service_catalog_details, wait_for_states=[], operation_kwargs={}, waiter_kwargs={}):
"""
Calls :py:func:`~oci.service_catalog.ServiceCatalogClient.create_service_catalog` and waits for the :py:class:`~oci.service_catalog.models.ServiceCatalog` acted upon
to enter the given state(s).
:param oci.service_catalog.models.CreateServiceCatalogDetails create_service_catalog_details: (required)
The details for creating a service catalog.
:param list[str] wait_for_states:
An array of states to wait on. These should be valid values for :py:attr:`~oci.service_catalog.models.ServiceCatalog.lifecycle_state`
:param dict operation_kwargs:
A dictionary of keyword arguments to pass to :py:func:`~oci.service_catalog.ServiceCatalogClient.create_service_catalog`
:param dict waiter_kwargs:
A dictionary of keyword arguments to pass to the :py:func:`oci.wait_until` function. For example, you could pass ``max_interval_seconds`` or ``max_interval_seconds``
as dictionary keys to modify how long the waiter function will wait between retries and the maximum amount of time it will wait
"""
operation_result = self.client.create_service_catalog(create_service_catalog_details, **operation_kwargs)
if not wait_for_states:
return operation_result
lowered_wait_for_states = [w.lower() for w in wait_for_states]
wait_for_resource_id = operation_result.data.id
try:
waiter_result = oci.wait_until(
self.client,
self.client.get_service_catalog(wait_for_resource_id),
evaluate_response=lambda r: getattr(r.data, 'lifecycle_state') and getattr(r.data, 'lifecycle_state').lower() in lowered_wait_for_states,
**waiter_kwargs
)
result_to_return = waiter_result
return result_to_return
except Exception as e:
raise oci.exceptions.CompositeOperationError(partial_results=[operation_result], cause=e)
def delete_private_application_and_wait_for_state(self, private_application_id, wait_for_states=[], operation_kwargs={}, waiter_kwargs={}):
"""
Calls :py:func:`~oci.service_catalog.ServiceCatalogClient.delete_private_application` and waits for the :py:class:`~oci.service_catalog.models.WorkRequest`
to enter the given state(s).
:param str private_application_id: (required)
The unique identifier for the private application.
:param list[str] wait_for_states:
An array of states to wait on. These should be valid values for :py:attr:`~oci.service_catalog.models.WorkRequest.status`
:param dict operation_kwargs:
A dictionary of keyword arguments to pass to :py:func:`~oci.service_catalog.ServiceCatalogClient.delete_private_application`
:param dict waiter_kwargs:
A dictionary of keyword arguments to pass to the :py:func:`oci.wait_until` function. For example, you could pass ``max_interval_seconds`` or ``max_interval_seconds``
as dictionary keys to modify how long the waiter function will wait between retries and the maximum amount of time it will wait
"""
operation_result = None
try:
operation_result = self.client.delete_private_application(private_application_id, **operation_kwargs)
except oci.exceptions.ServiceError as e:
if e.status == 404:
return WAIT_RESOURCE_NOT_FOUND
else:
raise e
if not wait_for_states:
return operation_result
lowered_wait_for_states = [w.lower() for w in wait_for_states]
wait_for_resource_id = operation_result.headers['opc-work-request-id']
try:
waiter_result = oci.wait_until(
self.client,
self.client.get_work_request(wait_for_resource_id),
evaluate_response=lambda r: getattr(r.data, 'status') and getattr(r.data, 'status').lower() in lowered_wait_for_states,
**waiter_kwargs
)
result_to_return = waiter_result
return result_to_return
except Exception as e:
raise oci.exceptions.CompositeOperationError(partial_results=[operation_result], cause=e)
def delete_service_catalog_and_wait_for_state(self, service_catalog_id, wait_for_states=[], operation_kwargs={}, waiter_kwargs={}):
"""
Calls :py:func:`~oci.service_catalog.ServiceCatalogClient.delete_service_catalog` and waits for the :py:class:`~oci.service_catalog.models.ServiceCatalog` acted upon
to enter the given state(s).
:param str service_catalog_id: (required)
The unique identifier for the service catalog.
:param list[str] wait_for_states:
An array of states to wait on. These should be valid values for :py:attr:`~oci.service_catalog.models.ServiceCatalog.lifecycle_state`
:param dict operation_kwargs:
A dictionary of keyword arguments to pass to :py:func:`~oci.service_catalog.ServiceCatalogClient.delete_service_catalog`
:param dict waiter_kwargs:
A dictionary of keyword arguments to pass to the :py:func:`oci.wait_until` function. For example, you could pass ``max_interval_seconds`` or ``max_interval_seconds``
as dictionary keys to modify how long the waiter function will wait between retries and the maximum amount of time it will wait
"""
initial_get_result = self.client.get_service_catalog(service_catalog_id)
operation_result = None
try:
operation_result = self.client.delete_service_catalog(service_catalog_id, **operation_kwargs)
except oci.exceptions.ServiceError as e:
if e.status == 404:
return WAIT_RESOURCE_NOT_FOUND
else:
raise e
if not wait_for_states:
return operation_result
lowered_wait_for_states = [w.lower() for w in wait_for_states]
try:
waiter_result = oci.wait_until(
self.client,
initial_get_result,
evaluate_response=lambda r: getattr(r.data, 'lifecycle_state') and getattr(r.data, 'lifecycle_state').lower() in lowered_wait_for_states,
succeed_on_not_found=True,
**waiter_kwargs
)
result_to_return = waiter_result
return result_to_return
except Exception as e:
raise oci.exceptions.CompositeOperationError(partial_results=[operation_result], cause=e)
def update_private_application_and_wait_for_state(self, private_application_id, update_private_application_details, wait_for_states=[], operation_kwargs={}, waiter_kwargs={}):
"""
Calls :py:func:`~oci.service_catalog.ServiceCatalogClient.update_private_application` and waits for the :py:class:`~oci.service_catalog.models.WorkRequest`
to enter the given state(s).
:param str private_application_id: (required)
The unique identifier for the private application.
:param oci.service_catalog.models.UpdatePrivateApplicationDetails update_private_application_details: (required)
The details for updating the private application.
:param list[str] wait_for_states:
An array of states to wait on. These should be valid values for :py:attr:`~oci.service_catalog.models.WorkRequest.status`
:param dict operation_kwargs:
A dictionary of keyword arguments to pass to :py:func:`~oci.service_catalog.ServiceCatalogClient.update_private_application`
:param dict waiter_kwargs:
A dictionary of keyword arguments to pass to the :py:func:`oci.wait_until` function. For example, you could pass ``max_interval_seconds`` or ``max_interval_seconds``
as dictionary keys to modify how long the waiter function will wait between retries and the maximum amount of time it will wait
"""
operation_result = self.client.update_private_application(private_application_id, update_private_application_details, **operation_kwargs)
if not wait_for_states:
return operation_result
lowered_wait_for_states = [w.lower() for w in wait_for_states]
wait_for_resource_id = operation_result.headers['opc-work-request-id']
try:
waiter_result = oci.wait_until(
self.client,
self.client.get_work_request(wait_for_resource_id),
evaluate_response=lambda r: getattr(r.data, 'status') and getattr(r.data, 'status').lower() in lowered_wait_for_states,
**waiter_kwargs
)
result_to_return = waiter_result
return result_to_return
except Exception as e:
raise oci.exceptions.CompositeOperationError(partial_results=[operation_result], cause=e)
def update_service_catalog_and_wait_for_state(self, service_catalog_id, update_service_catalog_details, wait_for_states=[], operation_kwargs={}, waiter_kwargs={}):
"""
Calls :py:func:`~oci.service_catalog.ServiceCatalogClient.update_service_catalog` and waits for the :py:class:`~oci.service_catalog.models.ServiceCatalog` acted upon
to enter the given state(s).
:param str service_catalog_id: (required)
The unique identifier for the service catalog.
:param oci.service_catalog.models.UpdateServiceCatalogDetails update_service_catalog_details: (required)
Details to update for a service catalog.
:param list[str] wait_for_states:
An array of states to wait on. These should be valid values for :py:attr:`~oci.service_catalog.models.ServiceCatalog.lifecycle_state`
:param dict operation_kwargs:
A dictionary of keyword arguments to pass to :py:func:`~oci.service_catalog.ServiceCatalogClient.update_service_catalog`
:param dict waiter_kwargs:
A dictionary of keyword arguments to pass to the :py:func:`oci.wait_until` function. For example, you could pass ``max_interval_seconds`` or ``max_interval_seconds``
as dictionary keys to modify how long the waiter function will wait between retries and the maximum amount of time it will wait
"""
operation_result = self.client.update_service_catalog(service_catalog_id, update_service_catalog_details, **operation_kwargs)
if not wait_for_states:
return operation_result
lowered_wait_for_states = [w.lower() for w in wait_for_states]
wait_for_resource_id = operation_result.data.id
try:
waiter_result = oci.wait_until(
self.client,
self.client.get_service_catalog(wait_for_resource_id),
evaluate_response=lambda r: getattr(r.data, 'lifecycle_state') and getattr(r.data, 'lifecycle_state').lower() in lowered_wait_for_states,
**waiter_kwargs
)
result_to_return = waiter_result
return result_to_return
except Exception as e:
raise oci.exceptions.CompositeOperationError(partial_results=[operation_result], cause=e)
| 54.460568
| 245
| 0.703313
| 2,187
| 17,264
| 5.301326
| 0.096936
| 0.079696
| 0.047093
| 0.037692
| 0.876919
| 0.854407
| 0.84837
| 0.837157
| 0.822839
| 0.814991
| 0
| 0.002019
| 0.225266
| 17,264
| 316
| 246
| 54.632911
| 0.864822
| 0.477815
| 0
| 0.791367
| 0
| 0
| 0.026177
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.057554
| false
| 0
| 0.014388
| 0
| 0.194245
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
60e8913b47be138bc8536280cabcc4db4221cdf3
| 1,179
|
py
|
Python
|
challenges/binary_search/test_binary_search.py
|
asakatida/data-structures-and-algorithms.py
|
587d1a66a6c15a3c7d7786275608f065687e1810
|
[
"MIT"
] | null | null | null |
challenges/binary_search/test_binary_search.py
|
asakatida/data-structures-and-algorithms.py
|
587d1a66a6c15a3c7d7786275608f065687e1810
|
[
"MIT"
] | 2
|
2020-09-24T13:13:49.000Z
|
2021-06-25T15:15:35.000Z
|
challenges/binary_search/test_binary_search.py
|
grandquista/data-structures-and-algorithms.py
|
587d1a66a6c15a3c7d7786275608f065687e1810
|
[
"MIT"
] | null | null | null |
from .binary_search import binary_search
def test_binary_search_empty_array():
assert binary_search([], 0) == -1
def test_binary_search_find_single_array():
assert binary_search([3], 3) == 0
def test_binary_search_not_found_single_array():
assert binary_search([1], 0) == -1
def test_binary_search_not_found_in_short_array():
assert binary_search([1, 2, 3], 0) == -1
def test_binary_search_found_at_begining():
assert binary_search([0, 1, 2, 3, 4, 5], 0) == 0
def test_binary_search_found_at_end():
assert binary_search([0, 1, 3, 4, 5], 5) == 4
def test_binary_search_found_at_middle_even():
assert binary_search([0, 1, 3, 5], 3) == 2
def test_binary_search_found_at_middle_odd():
assert binary_search([1, 3, 5], 3) == 1
def test_binary_search_high_value():
assert binary_search([1, 3, 5], 3) == 1
def test_binary_search_large_array_low():
assert binary_search(list(range(0xFFFFFF)), 0xFF) == 0xFF
def test_binary_search_large_array_high():
assert binary_search(list(range(0xFFFFFF)), 0xFFFFF) == 0xFFFFF
def test_binary_search_large_array_not_found():
assert binary_search(list(range(0xFFFFFF)), -4) == -1
| 23.58
| 67
| 0.721798
| 189
| 1,179
| 4.089947
| 0.190476
| 0.403622
| 0.201811
| 0.294955
| 0.798189
| 0.639069
| 0.191462
| 0.108668
| 0.108668
| 0.108668
| 0
| 0.052947
| 0.150975
| 1,179
| 49
| 68
| 24.061224
| 0.719281
| 0
| 0
| 0.08
| 0
| 0
| 0
| 0
| 0
| 0
| 0.039016
| 0
| 0.48
| 1
| 0.48
| true
| 0
| 0.04
| 0
| 0.52
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 1
| 1
| 0
| 0
| 0
| 1
| 0
|
0
| 7
|
60f7e54acc60354d75596811ff04f18911fc24eb
| 6,362
|
py
|
Python
|
tests/integration/insights/v1/call/test_metric.py
|
pazzy-stack/twilio
|
d3b9b9f1b17b9de89b2528e8d2ffd33edf9676e0
|
[
"MIT"
] | null | null | null |
tests/integration/insights/v1/call/test_metric.py
|
pazzy-stack/twilio
|
d3b9b9f1b17b9de89b2528e8d2ffd33edf9676e0
|
[
"MIT"
] | null | null | null |
tests/integration/insights/v1/call/test_metric.py
|
pazzy-stack/twilio
|
d3b9b9f1b17b9de89b2528e8d2ffd33edf9676e0
|
[
"MIT"
] | null | null | null |
# coding=utf-8
r"""
This code was generated by
\ / _ _ _| _ _
| (_)\/(_)(_|\/| |(/_ v1.0.0
/ /
"""
from tests import IntegrationTestCase
from tests.holodeck import Request
from twilio.base.exceptions import TwilioException
from twilio.http.response import Response
class MetricTestCase(IntegrationTestCase):
def test_list_request(self):
self.holodeck.mock(Response(500, ''))
with self.assertRaises(TwilioException):
self.client.insights.v1.calls(sid="CAXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.metrics.list()
self.holodeck.assert_has_request(Request(
'get',
'https://insights.twilio.com/v1/Voice/CAXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX/Metrics',
))
def test_read_response(self):
self.holodeck.mock(Response(
200,
'''
{
"meta": {
"page": 0,
"page_size": 50,
"first_page_url": "https://insights.twilio.com/v1/Voice/CAaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Metrics?PageSize=50&Page=0",
"previous_page_url": null,
"next_page_url": null,
"key": "metrics",
"url": "https://insights.twilio.com/v1/Voice/CAaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Metrics?PageSize=50&Page=0"
},
"metrics": [
{
"timestamp": "2019-10-07T22:32:06Z",
"call_sid": "CA7569efe0253644fa4a88aa97beca3310",
"account_sid": "AC998c10b68cbfda9f67277f7d8f4439c9",
"edge": "sdk_edge",
"direction": "both",
"sdk_edge": {
"interval": {
"packets_received": 50,
"packets_lost": 0,
"audio_in": {
"value": 81.0
},
"audio_out": {
"value": 5237.0
},
"jitter": {
"value": 9
},
"mos": {
"value": 4.39
},
"rtt": {
"value": 81
}
},
"cumulative": {
"bytes_received": 547788,
"bytes_sent": 329425,
"packets_received": 3900,
"packets_lost": 0,
"packets_sent": 3934
}
},
"client_edge": null,
"carrier_edge": null,
"sip_edge": null,
"gateway": null,
"client": null
}
]
}
'''
))
actual = self.client.insights.v1.calls(sid="CAXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.metrics.list()
self.assertIsNotNone(actual)
def test_read_full_response(self):
self.holodeck.mock(Response(
200,
'''
{
"meta": {
"page": 10,
"page_size": 5,
"first_page_url": "https://insights.twilio.com/v1/Voice/CAaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Metrics?Direction=both&Edge=sdk_edge&PageSize=5&Page=0",
"previous_page_url": "https://insights.twilio.com/v1/Voice/CAaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Metrics?Direction=both&Edge=sdk_edge&PageSize=5&Page=9&PageToken=DP10",
"next_page_url": null,
"key": "metrics",
"url": "https://insights.twilio.com/v1/Voice/CAaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Metrics?Direction=both&Edge=sdk_edge&PageSize=5&Page=10"
},
"metrics": [
{
"timestamp": "2019-10-07T22:32:06Z",
"call_sid": "CA7569efe0253644fa4a88aa97beca3310",
"account_sid": "AC998c10b68cbfda9f67277f7d8f4439c9",
"edge": "sdk_edge",
"direction": "both",
"sdk_edge": {
"interval": {
"packets_received": 50,
"packets_lost": 0,
"audio_in": {
"value": 81.0
},
"audio_out": {
"value": 5237.0
},
"jitter": {
"value": 9
},
"mos": {
"value": 4.39
},
"rtt": {
"value": 81
}
},
"cumulative": {
"bytes_received": 547788,
"bytes_sent": 329425,
"packets_received": 3900,
"packets_lost": 0,
"packets_sent": 3934
}
},
"client_edge": null,
"carrier_edge": null,
"sip_edge": null,
"gateway": null,
"client": null
}
]
}
'''
))
actual = self.client.insights.v1.calls(sid="CAXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.metrics.list()
self.assertIsNotNone(actual)
| 40.265823
| 185
| 0.365923
| 389
| 6,362
| 5.812339
| 0.269923
| 0.021672
| 0.05042
| 0.058381
| 0.814684
| 0.791685
| 0.778859
| 0.778859
| 0.778859
| 0.737284
| 0
| 0.076923
| 0.536152
| 6,362
| 157
| 186
| 40.522293
| 0.689258
| 0.017133
| 0
| 0.482759
| 1
| 0
| 0.145916
| 0.080888
| 0
| 0
| 0
| 0
| 0.137931
| 1
| 0.103448
| false
| 0
| 0.137931
| 0
| 0.275862
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
716fc75d575164c084b19d0f3c008a98785ed3a6
| 20,287
|
py
|
Python
|
OSAnalysisHelper.py
|
nassermarafi/SRCSWArchetypes
|
105a5e40ef0ba1951108dc52b382ae0c5457057a
|
[
"MIT"
] | 7
|
2020-04-29T08:44:12.000Z
|
2022-03-05T04:00:11.000Z
|
OSAnalysisHelper.py
|
nassermarafi/SRCSWArchetypes
|
105a5e40ef0ba1951108dc52b382ae0c5457057a
|
[
"MIT"
] | null | null | null |
OSAnalysisHelper.py
|
nassermarafi/SRCSWArchetypes
|
105a5e40ef0ba1951108dc52b382ae0c5457057a
|
[
"MIT"
] | 4
|
2019-12-20T04:38:11.000Z
|
2021-11-21T18:25:34.000Z
|
from __future__ import absolute_import
__author__ = 'marafi'
def SolutionAlgorithim(OData, Dt, Tol, Steps):
#Insert within the While loop, make sure parameter "ok" is defined
import OpenSeesAPI
OData.AddObject(OpenSeesAPI.TCL.TCLScript('if {$ok != 0} {'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript('puts "Trying Lower Dt: %f and Tol: %f ... "'%(Dt,Tol)))
OData.AddObject(OpenSeesAPI.TCL.TCLScript('puts "Trying Newton Line Search ... "'))
OData.AddObject(OpenSeesAPI.Analysis.Test.EnergyIncr(Tol,1000,0))
OData.AddObject(OpenSeesAPI.Analysis.Algorithm.NewtonLineSearch(Tolerance=0.8))
OData.AddObject(OpenSeesAPI.TCL.TCLScript('set ok [analyze %d %f ]'%(Steps,Dt)))
OData.AddObject(OpenSeesAPI.TCL.TCLScript('}'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript('if {$ok != 0} {'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript('puts "Trying Newton with Initial Tangent ... "'))
OData.AddObject(OpenSeesAPI.Analysis.Test.NormDispIncr(Tol,1000,0))
OData.AddObject(OpenSeesAPI.Analysis.Algorithm.Newton(Initial=True))
OData.AddObject(OpenSeesAPI.TCL.TCLScript('set ok [analyze %d %f ]'%(Steps,Dt)))
OData.AddObject(OpenSeesAPI.TCL.TCLScript('}'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript('if {$ok != 0} {'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript('puts "Trying Broyden ... "'))
OData.AddObject(OpenSeesAPI.Analysis.Test.EnergyIncr(Tol,1000,0))
OData.AddObject(OpenSeesAPI.Analysis.Algorithm.Broyden(8))
OData.AddObject(OpenSeesAPI.TCL.TCLScript('set ok [analyze %d %f ]'%(Steps,Dt)))
OData.AddObject(OpenSeesAPI.TCL.TCLScript('}'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript('if {$ok != 0} {'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript('puts "Trying KrylovNewton ... "'))
OData.AddObject(OpenSeesAPI.Analysis.Test.EnergyIncr(Tol,1000,0))
OData.AddObject(OpenSeesAPI.Analysis.Algorithm.KrylovNewton())
OData.AddObject(OpenSeesAPI.TCL.TCLScript('set ok [analyze %d %f ]'%(Steps,Dt)))
OData.AddObject(OpenSeesAPI.TCL.TCLScript('}'))
def SolutionAlgorithimV2(OData, Dt, Tol, Steps):
#Insert within the While loop, make sure parameter "ok" is defined
import OpenSeesAPI
OData.AddObject(OpenSeesAPI.TCL.TCLScript('if {$ok != 0} {'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript('puts "Trying Lower Dt: %f and Tol: %f ... "'%(Dt,Tol)))
OData.AddObject(OpenSeesAPI.TCL.TCLScript('puts "Trying Krylov... "'))
OData.AddObject(OpenSeesAPI.Analysis.Test.EnergyIncr(Tol,1000,0))
OData.AddObject(OpenSeesAPI.Analysis.Algorithm.KrylovNewton(MaxDim = 6))
OData.AddObject(OpenSeesAPI.TCL.TCLScript('set ok [analyze %d %f ]'%(Steps,Dt)))
OData.AddObject(OpenSeesAPI.TCL.TCLScript('}'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript('if {$ok != 0} {'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript('puts "Trying NewtonLineSearch... "'))
OData.AddObject(OpenSeesAPI.Analysis.Test.NormDispIncr(Tol,1000,0))
OData.AddObject(OpenSeesAPI.Analysis.Algorithm.NewtonLineSearch(Tolerance=0.8))
OData.AddObject(OpenSeesAPI.TCL.TCLScript('set ok [analyze %d %f ]'%(Steps,Dt)))
OData.AddObject(OpenSeesAPI.TCL.TCLScript('}'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript('if {$ok != 0} {'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript('puts "Trying NewtonLineSearch Bisection... "'))
OData.AddObject(OpenSeesAPI.Analysis.Test.EnergyIncr(Tol,1000,0))
OData.AddObject(OpenSeesAPI.Analysis.Algorithm.NewtonLineSearch('Bisection'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript('set ok [analyze %d %f ]'%(Steps,Dt)))
OData.AddObject(OpenSeesAPI.TCL.TCLScript('}'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript('if {$ok != 0} {'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript('puts "Trying NewtonLineSearch Secant... "'))
OData.AddObject(OpenSeesAPI.Analysis.Test.EnergyIncr(Tol,1000,0))
OData.AddObject(OpenSeesAPI.Analysis.Algorithm.NewtonLineSearch('Secant'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript('set ok [analyze %d %f ]'%(Steps,Dt)))
OData.AddObject(OpenSeesAPI.TCL.TCLScript('}'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript('if {$ok != 0} {'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript('puts "Trying NewtonLineSearch RegulaFalsi... "'))
OData.AddObject(OpenSeesAPI.Analysis.Test.EnergyIncr(Tol,1000,0))
OData.AddObject(OpenSeesAPI.Analysis.Algorithm.NewtonLineSearch('RegulaFalsi'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript('set ok [analyze %d %f ]'%(Steps,Dt)))
OData.AddObject(OpenSeesAPI.TCL.TCLScript('}'))
def SolutionAlgorithimKrylovOnly(OData, Dt, Tol, Steps, MaxDim = 6):
#Insert within the While loop, make sure parameter "ok" is defined
import OpenSeesAPI
OData.AddObject(OpenSeesAPI.TCL.TCLScript('if {$ok != 0} {'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript('puts "Trying Lower Dt: %e and Tol: %e ... "'%(Dt,Tol)))
OData.AddObject(OpenSeesAPI.TCL.TCLScript('puts "Trying Krylov... "'))
OData.AddObject(OpenSeesAPI.Analysis.Test.NormDispIncr(Tol, 1000, 2))
# OData.AddObject(OpenSeesAPI.Analysis.Test.EnergyIncr(Tol,1000,0))
OData.AddObject(OpenSeesAPI.Analysis.Algorithm.KrylovNewton(MaxDim = MaxDim))
OData.AddObject(OpenSeesAPI.TCL.TCLScript('set ok [analyze %d %e ]'%(Steps,Dt)))
OData.AddObject(OpenSeesAPI.TCL.TCLScript('}'))
def SenSolutionAlgorithim(OData, Dt, Steps, Tol = 1e-12, KrylovMaxDim = 12, MinDt = 1e-12, NoOfIterations=3000):
import OpenSeesAPI
OData.AddObject(OpenSeesAPI.TCL.TCLScript('set conv_tol %e'%Tol))
OData.AddObject(OpenSeesAPI.TCL.TCLScript('set max_iter %d;'%NoOfIterations))
OData.AddObject(OpenSeesAPI.Analysis.Test.NormDispIncr(Tol, 3000, 0))
# OData.AddObject(OpenSeesAPI.TCL.TCLScript('test EnergyIncr $conv_tol $max_iter;'))
# OData.AddObject(OpenSeesAPI.TCL.TCLScript('algorithm Newton;'))
# OData.AddObject(OpenSeesAPI.TCL.TCLScript('integrator Newmark 0.5 0.25;'))
# OData.AddObject(OpenSeesAPI.TCL.TCLScript('analysis Transient;'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript('set dt %e;'%Dt))
OData.AddObject(OpenSeesAPI.TCL.TCLScript('set min_dt %e;'%MinDt))
OData.AddObject(OpenSeesAPI.TCL.TCLScript('set n_steps %d;'%Steps))
OData.AddObject(OpenSeesAPI.TCL.TCLScript('set cur_step 1;'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript('set div 10.0;'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript('set tol 1.0e-12;'))
# OData.AddObject(OpenSeesAPI.TCL.TCLScript('set eigenvalue [eigen 9];'))
# OData.AddObject(OpenSeesAPI.TCL.TCLScript('modalDamping 0.02;'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript('while {$cur_step < $n_steps} {'))
OData.AddObject(OpenSeesAPI.Analysis.Test.NormDispIncr(Tol, NoOfIterations, 0))
# OData.AddObject(OpenSeesAPI.TCL.TCLScript(' test EnergyIncr $conv_tol $max_iter;'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript(' algorithm Newton;'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript(' set ok [analyze 1 $dt];'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript(' if {$ok != 0} {'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript(' set dt_temp [expr $dt];'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript(' puts "> analysis failed to converge at step $cur_step";'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript(' puts "> trying KrylovNewton";'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript(' algorithm KrylovNewton -maxDim %d;'%KrylovMaxDim))
OData.AddObject(OpenSeesAPI.TCL.TCLScript(' set ok [analyze 1 $dt];'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript(' if {$ok != 0} {'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript(' set t 0.0;'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript(' set mini_t 0.0;'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript(' set dt_temp [expr round($dt/$div/$tol)*$tol];'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript(' set mini_dt_temp 0.0;'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript(' while {$t < $dt} {'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript(' if {$dt_temp < $min_dt} {'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript(' puts "<< model did not converge (reason: time step less than $min_dt)";'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript(' puts "<< exiting safely";'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript(' wipe;'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript(' exit;'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript(' };'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript(' if {$dt_temp < [expr $dt/pow($div, 2)]} {'))
OData.AddObject(OpenSeesAPI.Analysis.Test.NormDispIncr(Tol*10, NoOfIterations, 0))
# OData.AddObject(OpenSeesAPI.TCL.TCLScript(' test EnergyIncr [expr $conv_tol*10.0] $max_iter;'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript(' };'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript(' set ok [analyze 1 $dt_temp];'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript(' if {$ok == 0} {'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript(' set t [expr round(($t + $dt_temp)/$tol)*$tol];'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript(' set mini_t [expr round(($mini_t + $dt_temp)/$tol)*$tol];'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript(' if {$mini_t >= $mini_dt_temp} {set dt_temp [expr round($dt_temp*$div/$tol)*$tol]};'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript(' } else {'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript(' set mini_t 0.0;'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript(' set mini_dt_temp [expr round($dt_temp/$tol)*$tol];'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript(' set dt_temp [expr round($dt_temp/$div/$tol)*$tol];'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript(' };'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript(' };'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript(' };'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript(' };'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript(' if {$cur_step % 1 == 0} {'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript(' puts "Running Tim History Step: $cur_step out of %d (Sen Algo.)";'%Steps))
OData.AddObject(OpenSeesAPI.TCL.TCLScript(' };'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript(' incr cur_step;'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript('};'))
def PushOverSolutionAlgorithim(OData, StepSize, Tol, ControlNode):
#Insert within the While loop, make sure parameter "ok" is defined
import OpenSeesAPI
OData.AddObject(OpenSeesAPI.TCL.TCLScript('if {$ok != 0} {'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript('puts "Trying Smaller Step: %f and Tol: %f ... "'%(StepSize,Tol)))
OData.AddObject(OpenSeesAPI.Analysis.Integrator.Static.DisplacementControl(ControlNode, 1, StepSize))
OData.AddObject(OpenSeesAPI.TCL.TCLScript('}'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript('if {$ok != 0} {'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript('puts "Trying KrylovNewton ... "'))
OData.AddObject(OpenSeesAPI.Analysis.Test.EnergyIncr(Tol,1000,0))
OData.AddObject(OpenSeesAPI.Analysis.Algorithm.KrylovNewton())
OData.AddObject(OpenSeesAPI.TCL.TCLScript('set ok [analyze 1]'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript('}'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript('if {$ok != 0} {'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript('puts "Trying Newton Line Search ... "'))
OData.AddObject(OpenSeesAPI.Analysis.Test.EnergyIncr(Tol,1000,0))
OData.AddObject(OpenSeesAPI.Analysis.Algorithm.NewtonLineSearch(Tolerance=0.8))
OData.AddObject(OpenSeesAPI.TCL.TCLScript('set ok [analyze 1]'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript('}'))
# OData.AddObject(OpenSeesAPI.TCL.TCLScript('if {$ok != 0} {'))
# OData.AddObject(OpenSeesAPI.TCL.TCLScript('puts "Trying Newton with Initial Tangent ... "'))
# OData.AddObject(OpenSeesAPI.Analysis.Test.NormDispIncr(Tol,1000,0))
# OData.AddObject(OpenSeesAPI.Analysis.Algorithm.Newton(Initial=True))
# OData.AddObject(OpenSeesAPI.TCL.TCLScript('set ok [analyze 1]'))
# OData.AddObject(OpenSeesAPI.TCL.TCLScript('}'))
#
# OData.AddObject(OpenSeesAPI.TCL.TCLScript('if {$ok != 0} {'))
# OData.AddObject(OpenSeesAPI.TCL.TCLScript('puts "Trying Broyden ... "'))
# OData.AddObject(OpenSeesAPI.Analysis.Test.EnergyIncr(Tol,1000,0))
# OData.AddObject(OpenSeesAPI.Analysis.Algorithm.Broyden(8))
# OData.AddObject(OpenSeesAPI.TCL.TCLScript('set ok [analyze 1]'))
# OData.AddObject(OpenSeesAPI.TCL.TCLScript('}'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript('if {$ok != 0} {'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript('puts "Trying Newton Line Search BiSection ... "'))
OData.AddObject(OpenSeesAPI.Analysis.Test.EnergyIncr(Tol,1000,0))
OData.AddObject(OpenSeesAPI.Analysis.Algorithm.NewtonLineSearch('Bisection'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript('set ok [analyze 1]'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript('}'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript('if {$ok != 0} {'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript('puts "Trying Newton Line Search Secant... "'))
OData.AddObject(OpenSeesAPI.Analysis.Test.EnergyIncr(Tol,1000,0))
OData.AddObject(OpenSeesAPI.Analysis.Algorithm.NewtonLineSearch('Secant'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript('set ok [analyze 1]'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript('}'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript('if {$ok != 0} {'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript('puts "Trying Newton Line Search RegulaFalsi ... "'))
OData.AddObject(OpenSeesAPI.Analysis.Test.EnergyIncr(Tol,1000,0))
OData.AddObject(OpenSeesAPI.Analysis.Algorithm.NewtonLineSearch('RegulaFalsi'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript('set ok [analyze 1]'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript('}'))
def PushOverSolutionAlgorithimDispIncr(OData, StepSize, Tol, ControlNode):
#Insert within the While loop, make sure parameter "ok" is defined
import OpenSeesAPI
OData.AddObject(OpenSeesAPI.TCL.TCLScript('if {$ok != 0} {'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript('puts "Trying Smaller Step: %f and Tol: %f ... "'%(StepSize,Tol)))
OData.AddObject(OpenSeesAPI.Analysis.Integrator.Static.DisplacementControl(ControlNode, 1, StepSize))
OData.AddObject(OpenSeesAPI.TCL.TCLScript('}'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript('if {$ok != 0} {'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript('puts "Trying KrylovNewton ... "'))
OData.AddObject(OpenSeesAPI.Analysis.Test.NormDispIncr(Tol,1000,0))
OData.AddObject(OpenSeesAPI.Analysis.Algorithm.KrylovNewton())
OData.AddObject(OpenSeesAPI.TCL.TCLScript('set ok [analyze 1]'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript('}'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript('if {$ok != 0} {'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript('puts "Trying Newton Line Search ... "'))
OData.AddObject(OpenSeesAPI.Analysis.Test.NormDispIncr(Tol,1000,0))
OData.AddObject(OpenSeesAPI.Analysis.Algorithm.NewtonLineSearch(Tolerance=0.8))
OData.AddObject(OpenSeesAPI.TCL.TCLScript('set ok [analyze 1]'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript('}'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript('if {$ok != 0} {'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript('puts "Trying Newton Line Search BiSection ... "'))
OData.AddObject(OpenSeesAPI.Analysis.Test.NormDispIncr(Tol,1000,0))
OData.AddObject(OpenSeesAPI.Analysis.Algorithm.NewtonLineSearch('Bisection'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript('set ok [analyze 1]'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript('}'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript('if {$ok != 0} {'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript('puts "Trying Newton Line Search Secant... "'))
OData.AddObject(OpenSeesAPI.Analysis.Test.NormDispIncr(Tol,1000,0))
OData.AddObject(OpenSeesAPI.Analysis.Algorithm.NewtonLineSearch('Secant'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript('set ok [analyze 1]'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript('}'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript('if {$ok != 0} {'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript('puts "Trying Newton Line Search RegulaFalsi ... "'))
OData.AddObject(OpenSeesAPI.Analysis.Test.NormDispIncr(Tol,1000,0))
OData.AddObject(OpenSeesAPI.Analysis.Algorithm.NewtonLineSearch('RegulaFalsi'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript('set ok [analyze 1]'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript('}'))
def PushOverSolutionAlgorithimConstantAlgorithm(OData, StepSize, Tol, ControlNode, Iter=1000):
import OpenSeesAPI
OData.AddObject(OpenSeesAPI.TCL.TCLScript('if {$ok != 0} {'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript('puts "Trying Smaller Step: %f and Tol: %f ... "'%(StepSize,Tol)))
OData.AddObject(OpenSeesAPI.TCL.TCLScript('}'))
OData.AddObject(OpenSeesAPI.Analysis.Integrator.Static.DisplacementControl(ControlNode, 1, StepSize))
OData.AddObject(OpenSeesAPI.TCL.TCLScript('if {$ok != 0} {'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript('puts "Trying KrylovNewton ... "'))
OData.AddObject(OpenSeesAPI.Analysis.Test.EnergyIncr(Tol,1000,0))
OData.AddObject(OpenSeesAPI.Analysis.Algorithm.KrylovNewton())
OData.AddObject(OpenSeesAPI.TCL.TCLScript('set ok [analyze 1]'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript('}'))
def PushOverSolutionAlgorithimConstantAlgorithmDispIncr(OData, StepSize, Tol, ControlNode, NoOfIterations=1000):
import OpenSeesAPI
OData.AddObject(OpenSeesAPI.TCL.TCLScript('if {$ok != 0} {'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript('puts "Trying Smaller Step: %f and Tol: %f ... "'%(StepSize,Tol)))
OData.AddObject(OpenSeesAPI.TCL.TCLScript('}'))
OData.AddObject(OpenSeesAPI.Analysis.Integrator.Static.DisplacementControl(ControlNode, 1, StepSize))
OData.AddObject(OpenSeesAPI.TCL.TCLScript('if {$ok != 0} {'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript('puts "Trying KrylovNewton ... "'))
OData.AddObject(OpenSeesAPI.Analysis.Test.NormDispIncr(Tol,NoOfIterations,2))
OData.AddObject(OpenSeesAPI.Analysis.Algorithm.KrylovNewton())
OData.AddObject(OpenSeesAPI.TCL.TCLScript('set ok [analyze 1]'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript('}'))
def PushOverSolutionAlgorithimConstantTol(OData, Tol, Iter=1000):
import OpenSeesAPI
OData.AddObject(OpenSeesAPI.TCL.TCLScript('if {$ok != 0} {'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript('puts "Trying KrylovNewton ... "'))
OData.AddObject(OpenSeesAPI.Analysis.Test.EnergyIncr(Tol,Iter,0))
OData.AddObject(OpenSeesAPI.Analysis.Algorithm.KrylovNewton())
OData.AddObject(OpenSeesAPI.TCL.TCLScript('set ok [analyze 1]'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript('}'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript('if {$ok != 0} {'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript('puts "Trying Newton Line Search ... "'))
OData.AddObject(OpenSeesAPI.Analysis.Test.EnergyIncr(Tol,Iter,0))
OData.AddObject(OpenSeesAPI.Analysis.Algorithm.NewtonLineSearch(Tolerance=0.8))
OData.AddObject(OpenSeesAPI.TCL.TCLScript('set ok [analyze 1]'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript('}'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript('if {$ok != 0} {'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript('puts "Trying Newton Line Search BiSection ... "'))
OData.AddObject(OpenSeesAPI.Analysis.Test.EnergyIncr(Tol,Iter,0))
OData.AddObject(OpenSeesAPI.Analysis.Algorithm.NewtonLineSearch('Bisection'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript('set ok [analyze 1]'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript('}'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript('if {$ok != 0} {'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript('puts "Trying Newton Line Search Secant... "'))
OData.AddObject(OpenSeesAPI.Analysis.Test.EnergyIncr(Tol,Iter,0))
OData.AddObject(OpenSeesAPI.Analysis.Algorithm.NewtonLineSearch('Secant'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript('set ok [analyze 1]'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript('}'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript('if {$ok != 0} {'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript('puts "Trying Newton Line Search RegulaFalsi ... "'))
OData.AddObject(OpenSeesAPI.Analysis.Test.EnergyIncr(Tol,Iter,0))
OData.AddObject(OpenSeesAPI.Analysis.Algorithm.NewtonLineSearch('RegulaFalsi'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript('set ok [analyze 1]'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript('}'))
| 63.199377
| 137
| 0.724257
| 2,353
| 20,287
| 6.222694
| 0.056524
| 0.242863
| 0.433684
| 0.359514
| 0.938328
| 0.928152
| 0.907663
| 0.8757
| 0.856645
| 0.843259
| 0
| 0.014839
| 0.113077
| 20,287
| 321
| 138
| 63.199377
| 0.798922
| 0.087642
| 0
| 0.762846
| 0
| 0.003953
| 0.202727
| 0.008819
| 0
| 0
| 0
| 0
| 0
| 1
| 0.035573
| false
| 0
| 0.039526
| 0
| 0.075099
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 10
|
71a38554040095f344a4dbd4dbed0540a3d29b06
| 505
|
py
|
Python
|
terrascript/dns/r.py
|
hugovk/python-terrascript
|
08fe185904a70246822f5cfbdc9e64e9769ec494
|
[
"BSD-2-Clause"
] | 4
|
2022-02-07T21:08:14.000Z
|
2022-03-03T04:41:28.000Z
|
terrascript/dns/r.py
|
hugovk/python-terrascript
|
08fe185904a70246822f5cfbdc9e64e9769ec494
|
[
"BSD-2-Clause"
] | null | null | null |
terrascript/dns/r.py
|
hugovk/python-terrascript
|
08fe185904a70246822f5cfbdc9e64e9769ec494
|
[
"BSD-2-Clause"
] | 2
|
2022-02-06T01:49:42.000Z
|
2022-02-08T14:15:00.000Z
|
# terrascript/dns/r.py
import terrascript
class dns_a_record_set(terrascript.Resource):
pass
class dns_aaaa_record_set(terrascript.Resource):
pass
class dns_cname_record(terrascript.Resource):
pass
class dns_mx_record_set(terrascript.Resource):
pass
class dns_ns_record_set(terrascript.Resource):
pass
class dns_ptr_record(terrascript.Resource):
pass
class dns_srv_record_set(terrascript.Resource):
pass
class dns_txt_record_set(terrascript.Resource):
pass
| 14.428571
| 48
| 0.778218
| 68
| 505
| 5.455882
| 0.264706
| 0.172507
| 0.495957
| 0.528302
| 0.824798
| 0.738544
| 0.539084
| 0
| 0
| 0
| 0
| 0
| 0.150495
| 505
| 34
| 49
| 14.852941
| 0.864802
| 0.039604
| 0
| 0.470588
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.470588
| 0.058824
| 0
| 0.529412
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 1
| 0
|
0
| 7
|
e0c1de96552a87c4acd6be415b90d60425c9c9cb
| 64,469
|
py
|
Python
|
nuage_tempest_plugin/tests/api/test_nuage_ports.py
|
nuagenetworks/nuage-tempest-plugin
|
ac1bfb0709c7bbaf04017af3050fb3ed1ad1324a
|
[
"Apache-1.1"
] | 1
|
2021-01-03T01:47:51.000Z
|
2021-01-03T01:47:51.000Z
|
nuage_tempest_plugin/tests/api/test_nuage_ports.py
|
nuagenetworks/nuage-tempest-plugin
|
ac1bfb0709c7bbaf04017af3050fb3ed1ad1324a
|
[
"Apache-1.1"
] | null | null | null |
nuage_tempest_plugin/tests/api/test_nuage_ports.py
|
nuagenetworks/nuage-tempest-plugin
|
ac1bfb0709c7bbaf04017af3050fb3ed1ad1324a
|
[
"Apache-1.1"
] | 1
|
2020-10-16T12:04:39.000Z
|
2020-10-16T12:04:39.000Z
|
# Copyright 2017 NOKIA
# All Rights Reserved.
from netaddr import IPNetwork
import testtools
from tempest.common import waiters
from tempest.lib import exceptions
from tempest.scenario import manager
from tempest.test import decorators
from nuage_tempest_plugin.lib.test.nuage_test import NuageAdminNetworksTest
from nuage_tempest_plugin.lib.test.nuage_test import NuageBaseTest
from nuage_tempest_plugin.lib.topology import Topology
from nuage_tempest_plugin.lib.utils import constants
from nuage_tempest_plugin.services.nuage_client import NuageRestClient
CONF = Topology.get_conf()
LOG = Topology.get_logger(__name__)
class PortsTest(NuageBaseTest, NuageAdminNetworksTest,
manager.NetworkScenarioTest):
@classmethod
def setup_clients(cls):
super(PortsTest, cls).setup_clients()
cls.vsd_client = NuageRestClient()
def show_port(self, port_id):
"""Wrapper utility that shows a given port."""
body = self.ports_client.show_port(port_id)
return body['port']
def _create_server(self, name, network, port_id=None):
keypair = self.create_keypair()
network = {'uuid': network['id']}
if port_id is not None:
network['port'] = port_id
return self.create_server(
name=name,
networks=[network],
key_name=keypair['name'],
wait_until='ACTIVE')
def _delete_server(self, server_id, clients=None):
if clients is None:
clients = self.os_primary
clients.servers_client.delete_server(server_id)
waiters.wait_for_server_termination(clients.servers_client, server_id)
@decorators.attr(type='smoke')
def test_nuage_dhcp_port_create_check_status(self):
network = self.create_network()
self.create_subnet(network, cidr=IPNetwork("10.0.0.0/24"),
mask_bits=24)
filters = {
'device_owner': 'network:dhcp:nuage',
'network_id': network['id']
}
dhcp_port = self.ports_client.list_ports(**filters)['ports'][0]
self.assertEqual('ACTIVE', dhcp_port['status'])
@decorators.attr(type='smoke')
def test_nuage_dhcp_port_with_router_detach_check_status(self):
network = self.create_network()
subnet = self.create_subnet(network, cidr=IPNetwork("10.0.0.0/24"),
mask_bits=24)
router = self.create_router(
admin_state_up=True,
external_network_id=CONF.network.public_network_id)
self.create_router_interface(router_id=router["id"],
subnet_id=subnet["id"],
cleanup=False)
self.routers_client.remove_router_interface(router_id=router["id"],
subnet_id=subnet["id"])
filters = {
'device_owner': 'network:dhcp:nuage',
'network_id': network['id']
}
dhcp_port = self.ports_client.list_ports(**filters)['ports'][0]
self.assertEqual('ACTIVE', dhcp_port['status'])
@decorators.attr(type='smoke')
def test_nuage_port_create_show_check_status(self):
network = self.create_network()
self.create_subnet(network, cidr=IPNetwork("10.0.0.0/24"),
mask_bits=24)
port = self.create_port(network)
self.assertEqual('DOWN', port['status'])
port = self.show_port(port['id'])
# state has to remain DOWN as long as port is not bound
self.assertEqual('DOWN', port['status'])
@decorators.attr(type='smoke')
def test_nuage_port_create_server_create_delete_check_status(self):
network = self.create_network()
self.create_subnet(network, cidr=IPNetwork("10.0.0.0/24"),
mask_bits=24)
port = self.create_port(network)
server = self._create_server('s1', network, port['id'])
port = self.show_port(port['id'])
self.assertEqual('ACTIVE', port['status'])
self._delete_server(server['id'])
port = self.show_port(port['id'])
self.assertEqual('DOWN', port['status'])
@decorators.attr(type='smoke')
def test_nuage_port_create_fixed_ips_negative(self):
# Set up resources
# Base resources
if self.is_dhcp_agent_present():
raise self.skipException(
'Cannot run this test case when DHCP agent is enabled')
network = self.create_network()
self.assertIsNotNone(network, "Unable to create network")
subnet = self.create_subnet(network, cidr=IPNetwork("10.0.0.0/24"),
mask_bits=28)
self.assertIsNotNone(subnet, "Unable to create subnet")
subnet2 = self.create_subnet(network, cidr=IPNetwork("20.0.0.0/24"),
mask_bits=28)
self.assertIsNotNone(subnet2, "Unable to create second subnet")
fixed_ips = [
{
"ip_address": "10.0.0.4",
"subnet_id": subnet["id"]
},
{
"ip_address": "20.0.0.4",
"subnet_id": subnet2["id"]
}
]
# Fail
msg = "Port can't have multiple IPv4 IPs of different subnets"
self.assertRaisesRegex(exceptions.BadRequest,
msg,
self.create_port,
network=network, fixed_ips=fixed_ips)
@testtools.skipIf(Topology.before_nuage('5.4'), 'Unsupported pre-5.4')
def test_nuage_os_managed_subnet_port_create_with_nuage_policy_negative(
self):
network = self.create_network()
self.assertIsNotNone(network, "Unable to create network")
subnet = self.create_subnet(network, cidr=IPNetwork("10.0.0.0/24"),
mask_bits=28)
self.assertIsNotNone(subnet, "Unable to create subnet")
msg = ("Cannot use VSP policy groups on OS managed subnets,"
" use neutron security groups instead.")
self.assertRaisesRegex(exceptions.BadRequest,
msg,
self.create_port,
network=network,
nuage_policy_groups=['Random_value'])
@testtools.skipIf(Topology.before_nuage('5.4'), 'Unsupported pre-5.4')
def test_nuage_os_managed_subnet_port_update_with_nuage_policy_negative(
self):
network = self.create_network()
self.assertIsNotNone(network, "Unable to create network")
subnet = self.create_subnet(network, cidr=IPNetwork("10.0.0.0/24"),
mask_bits=28)
self.assertIsNotNone(subnet, "Unable to create subnet")
port = self.create_port(network=network)
self.assertIsNotNone(port, "Unable to create port")
msg = ("Cannot use VSP policy groups on OS managed subnets,"
" use neutron security groups instead.")
self.assertRaisesRegex(exceptions.BadRequest,
msg,
self.update_port,
port=port,
nuage_policy_groups=['Random_value'])
@decorators.attr(type='smoke')
def test_nuage_port_update_fixed_ips_negative(self):
if self.is_dhcp_agent_present():
raise self.skipException(
'Multiple subnets in a network not supported when DHCP agent '
'is enabled.')
# Set up resources
# Base resources
network = self.create_network()
self.assertIsNotNone(network, "Unable to create network")
subnet = self.create_subnet(network, cidr=IPNetwork("10.0.0.0/24"),
mask_bits=28)
self.assertIsNotNone(subnet, "Unable to create subnet")
subnet2 = self.create_subnet(network, cidr=IPNetwork("20.0.0.0/24"),
mask_bits=28)
self.assertIsNotNone(subnet2, "Unable to create second subnet")
router = self.create_router(
admin_state_up=True,
external_network_id=CONF.network.public_network_id)
self.assertIsNotNone(router, "Unable to create router")
# Attach subnet
self.create_router_interface(router_id=router["id"],
subnet_id=subnet["id"])
self.create_router_interface(router_id=router["id"],
subnet_id=subnet2["id"])
# Create port
fixed_ips = [
{
"ip_address": "10.0.0.4",
"subnet_id": subnet["id"]
}
]
port = self.create_port(network=network, fixed_ips=fixed_ips)
self.assertIsNotNone(port, "Unable to create port on network")
# update within subnet should succeed
fixed_ips = [
{
"ip_address": "10.0.0.5",
"subnet_id": subnet["id"]
}
]
port = self.update_port(port=port, fixed_ips=fixed_ips)
self.assertIsNotNone(port, "Unable to update port")
self.assertEqual(port["fixed_ips"][0]["ip_address"], "10.0.0.5",
message="The port did not update properly.")
# Update to subnet2 should fail
fixed_ips = [
{
"ip_address": "20.0.0.4",
"subnet_id": subnet2["id"]
}
]
try:
self.update_port(port=port, fixed_ips=fixed_ips)
self.fail("Exception expected when updating to"
" a different subnet!")
except exceptions.BadRequest as e:
if "Updating fixed ip of port" in e._error_string:
pass
else:
# Differentiate between VSD failure and update failure
LOG.debug(e._error_string)
self.fail("A different NuageBadRequest exception"
" was expected for this operation.")
@decorators.attr(type='smoke')
def test_nuage_port_create_fixed_ips_same_subnet_l2(self):
# Set up resources
# Base resources
network = self.create_network()
self.assertIsNotNone(network, "Unable to create network")
subnet = self.create_subnet(network, cidr=IPNetwork("10.0.0.0/24"),
mask_bits=28)
self.assertIsNotNone(subnet, "Unable to create subnet")
fixed_ips = [
{
"ip_address": "10.0.0.4",
"subnet_id": subnet["id"]
},
{
"ip_address": "10.0.0.5",
"subnet_id": subnet["id"]
}
]
port = self.create_port(network=network, fixed_ips=fixed_ips)
self.assertIsNotNone(port, "Unable to create port on network")
vsd_vport_parent = self.vsd_client.get_global_resource(
constants.L2_DOMAIN,
filters='externalID',
filter_value=subnet['id'])[0]
nuage_vport = self.vsd_client.get_vport(
constants.L2_DOMAIN,
vsd_vport_parent['ID'],
filters='externalID',
filter_value=port['id'])
self.assertEqual(constants.INHERITED,
nuage_vport[0]['addressSpoofing'])
@decorators.attr(type='smoke')
def test_nuage_port_update_fixed_ips_same_subnet_l2(self):
# Set up resources
# Base resources
network = self.create_network()
self.assertIsNotNone(network, "Unable to create network")
subnet = self.create_subnet(network, cidr=IPNetwork("10.0.0.0/24"),
mask_bits=28)
self.assertIsNotNone(subnet, "Unable to create subnet")
fixed_ips = [
{
"ip_address": "10.0.0.4",
"subnet_id": subnet["id"]
}
]
port = self.create_port(network=network, fixed_ips=fixed_ips)
self.assertIsNotNone(port, "Unable to create port on network")
vsd_vport_parent = self.vsd_client.get_global_resource(
constants.L2_DOMAIN,
filters='externalID',
filter_value=subnet['id'])[0]
nuage_vport = self.vsd_client.get_vport(
constants.L2_DOMAIN,
vsd_vport_parent['ID'],
filters='externalID',
filter_value=port['id'])
self.assertEqual(constants.INHERITED,
nuage_vport[0]['addressSpoofing'])
# update within subnet should succeed
fixed_ips = [
{
"ip_address": "10.0.0.4",
"subnet_id": subnet["id"]
},
{
"ip_address": "10.0.0.5",
"subnet_id": subnet["id"]
}
]
port = self.update_port(port=port, fixed_ips=fixed_ips)
self.assertIsNotNone(port, "Unable to update port")
nuage_vport = self.vsd_client.get_vport(
constants.L2_DOMAIN,
vsd_vport_parent['ID'],
filters='externalID',
filter_value=port['id'])
self.assertEqual(constants.INHERITED,
nuage_vport[0]['addressSpoofing'])
@decorators.attr(type='smoke')
def test_nuage_port_create_fixed_ips_same_subnet_l3(self):
# Set up resources
# Base resources
network = self.create_network()
self.assertIsNotNone(network, "Unable to create network")
subnet = self.create_subnet(network, cidr=IPNetwork("10.0.0.0/24"),
mask_bits=28)
self.assertIsNotNone(subnet, "Unable to create subnet")
router = self.create_router(
admin_state_up=True,
external_network_id=CONF.network.public_network_id)
self.assertIsNotNone(router, "Unable to create router")
# Attach subnet
self.create_router_interface(router_id=router["id"],
subnet_id=subnet["id"])
fixed_ips = [
{
"ip_address": "10.0.0.4",
"subnet_id": subnet["id"]
},
{
"ip_address": "10.0.0.5",
"subnet_id": subnet["id"]
}
]
port = self.create_port(network=network, fixed_ips=fixed_ips)
self.assertIsNotNone(port, "Unable to create port on network")
vsd_vport_parent = self.vsd_client.get_global_resource(
constants.SUBNETWORK,
filters='externalID',
filter_value=subnet['id'])[0]
nuage_vport = self.vsd_client.get_vport(
constants.SUBNETWORK,
vsd_vport_parent['ID'],
filters='externalID',
filter_value=port['id'])
self.assertEqual(constants.INHERITED,
nuage_vport[0]['addressSpoofing'])
nuage_vport_vips = self.vsd_client.get_virtual_ip(
constants.VPORT,
nuage_vport[0]['ID'])
valid_vips = ['10.0.0.4']
vip_mismatch = False
mac_mismatch = False
if valid_vips and not nuage_vport_vips:
vip_mismatch = True
for nuage_vport_vip in nuage_vport_vips:
if nuage_vport_vip['virtualIP'] not in valid_vips:
vip_mismatch = True
if nuage_vport_vip['MAC'] != port['mac_address']:
mac_mismatch = True
self.assertEqual(vip_mismatch, False)
self.assertEqual(mac_mismatch, False)
@decorators.attr(type='smoke')
def test_nuage_port_create_fixed_ips_same_subnet_l3_no_security(self):
# Set up resources
# Base resources
network = self.create_network()
self.assertIsNotNone(network, "Unable to create network")
subnet = self.create_subnet(network, cidr=IPNetwork("10.0.0.0/24"),
mask_bits=28)
self.assertIsNotNone(subnet, "Unable to create subnet")
router = self.create_router(
admin_state_up=True,
external_network_id=CONF.network.public_network_id)
self.assertIsNotNone(router, "Unable to create router")
# Attach subnet
self.create_router_interface(router_id=router["id"],
subnet_id=subnet["id"])
fixed_ips = [
{
"ip_address": "10.0.0.4",
"subnet_id": subnet["id"]
},
{
"ip_address": "10.0.0.5",
"subnet_id": subnet["id"]
}
]
port = self.create_port(network=network, fixed_ips=fixed_ips,
port_security_enabled=False)
self.assertIsNotNone(port, "Unable to create port on network")
vsd_vport_parent = self.vsd_client.get_global_resource(
constants.SUBNETWORK,
filters='externalID',
filter_value=subnet['id'])[0]
nuage_vport = self.vsd_client.get_vport(
constants.SUBNETWORK,
vsd_vport_parent['ID'],
filters='externalID',
filter_value=port['id'])
self.assertEqual(constants.ENABLED,
nuage_vport[0]['addressSpoofing'])
nuage_vport_vips = self.vsd_client.get_virtual_ip(
constants.VPORT,
nuage_vport[0]['ID'])
valid_vips = ['10.0.0.4']
vip_mismatch = False
mac_mismatch = False
if valid_vips and not nuage_vport_vips:
vip_mismatch = True
for nuage_vport_vip in nuage_vport_vips:
if nuage_vport_vip['virtualIP'] not in valid_vips:
vip_mismatch = True
if nuage_vport_vip['MAC'] != port['mac_address']:
mac_mismatch = True
self.assertEqual(vip_mismatch, False)
self.assertEqual(mac_mismatch, False)
@decorators.attr(type='smoke')
def test_nuage_port_update_fixed_ips_same_subnet_l3_no_security(self):
# Set up resources
# Base resources
network = self.create_network()
self.assertIsNotNone(network, "Unable to create network")
subnet = self.create_subnet(network, cidr=IPNetwork("10.0.0.0/24"),
mask_bits=28)
self.assertIsNotNone(subnet, "Unable to create subnet")
router = self.create_router(
admin_state_up=True,
external_network_id=CONF.network.public_network_id)
self.assertIsNotNone(router, "Unable to create router")
# Attach subnet
self.create_router_interface(router_id=router["id"],
subnet_id=subnet["id"])
fixed_ips = [
{
"ip_address": "10.0.0.4",
"subnet_id": subnet["id"]
}
]
allowed_address_pairs = [{'ip_address': '10.0.0.5',
'mac_address': 'fe:a0:36:4b:c8:70'}]
port = self.create_port(network=network,
fixed_ips=fixed_ips,
allowed_address_pairs=allowed_address_pairs)
self.assertIsNotNone(port, "Unable to create port on network")
vsd_vport_parent = self.vsd_client.get_global_resource(
constants.SUBNETWORK,
filters='externalID',
filter_value=subnet['id'])[0]
nuage_vport = self.vsd_client.get_vport(
constants.SUBNETWORK,
vsd_vport_parent['ID'],
filters='externalID',
filter_value=port['id'])
self.assertEqual(constants.INHERITED,
nuage_vport[0]['addressSpoofing'])
# update within subnet should succeed
fixed_ips = [
{
"ip_address": "10.0.0.4",
"subnet_id": subnet["id"]
},
{
"ip_address": "10.0.0.5",
"subnet_id": subnet["id"]
}
]
port = self.update_port(port=port, fixed_ips=fixed_ips,
allowed_address_pairs=[],
security_groups=[],
port_security_enabled=False)
self.assertIsNotNone(port, "Unable to update port")
nuage_vport = self.vsd_client.get_vport(
constants.SUBNETWORK,
vsd_vport_parent['ID'],
filters='externalID',
filter_value=port['id'])
self.assertEqual(constants.ENABLED,
nuage_vport[0]['addressSpoofing'])
nuage_vport_vips = self.vsd_client.get_virtual_ip(
constants.VPORT,
nuage_vport[0]['ID'])
valid_vips = ['10.0.0.4']
vip_mismatch = False
mac_mismatch = False
if valid_vips and not nuage_vport_vips:
vip_mismatch = True
for nuage_vport_vip in nuage_vport_vips:
if nuage_vport_vip['virtualIP'] not in valid_vips:
vip_mismatch = True
if nuage_vport_vip['MAC'] != port['mac_address']:
mac_mismatch = True
self.assertEqual(vip_mismatch, False)
self.assertEqual(mac_mismatch, False)
@decorators.attr(type='smoke')
def test_nuage_port_update_fixed_ips_same_subnet_l3(self):
# Set up resources
# Base resources
network = self.create_network()
self.assertIsNotNone(network, "Unable to create network")
subnet = self.create_subnet(network, cidr=IPNetwork("10.0.0.0/24"),
mask_bits=28)
self.assertIsNotNone(subnet, "Unable to create subnet")
router = self.create_router(
admin_state_up=True,
external_network_id=CONF.network.public_network_id)
self.assertIsNotNone(router, "Unable to create router")
# Attach subnet
self.create_router_interface(router_id=router["id"],
subnet_id=subnet["id"])
fixed_ips = [
{
"ip_address": "10.0.0.4",
"subnet_id": subnet["id"]
}
]
port = self.create_port(network=network, fixed_ips=fixed_ips)
self.assertIsNotNone(port, "Unable to create port on network")
vsd_vport_parent = self.vsd_client.get_global_resource(
constants.SUBNETWORK,
filters='externalID',
filter_value=subnet['id'])[0]
nuage_vport = self.vsd_client.get_vport(
constants.SUBNETWORK,
vsd_vport_parent['ID'],
filters='externalID',
filter_value=port['id'])
self.assertEqual(constants.INHERITED,
nuage_vport[0]['addressSpoofing'])
# update within subnet should succeed
fixed_ips = [
{
"ip_address": "10.0.0.4",
"subnet_id": subnet["id"]
},
{
"ip_address": "10.0.0.5",
"subnet_id": subnet["id"]
}
]
port = self.update_port(port=port, fixed_ips=fixed_ips)
self.assertIsNotNone(port, "Unable to update port")
nuage_vport = self.vsd_client.get_vport(
constants.SUBNETWORK,
vsd_vport_parent['ID'],
filters='externalID',
filter_value=port['id'])
self.assertEqual(constants.INHERITED,
nuage_vport[0]['addressSpoofing'])
nuage_vport_vips = self.vsd_client.get_virtual_ip(
constants.VPORT,
nuage_vport[0]['ID'])
valid_vips = ['10.0.0.4']
vip_mismatch = False
mac_mismatch = False
if valid_vips and not nuage_vport_vips:
vip_mismatch = True
for nuage_vport_vip in nuage_vport_vips:
if nuage_vport_vip['virtualIP'] not in valid_vips:
vip_mismatch = True
if nuage_vport_vip['MAC'] != port['mac_address']:
mac_mismatch = True
self.assertEqual(vip_mismatch, False)
self.assertEqual(mac_mismatch, False)
@decorators.attr(type='smoke')
def test_nuage_port_create_fixed_ips_same_subnet_l2_with_aap(self):
# Set up resources
# Base resources
network = self.create_network()
self.assertIsNotNone(network, "Unable to create network")
subnet = self.create_subnet(network, cidr=IPNetwork("10.0.0.0/24"),
mask_bits=28)
self.assertIsNotNone(subnet, "Unable to create subnet")
fixed_ips = [
{
"ip_address": "10.0.0.4",
"subnet_id": subnet["id"]
},
{
"ip_address": "10.0.0.5",
"subnet_id": subnet["id"]
}
]
allowed_address_pairs = [{'ip_address': '10.0.0.50',
'mac_address': 'fe:a0:36:4b:c8:70'}]
port = self.create_port(network=network, fixed_ips=fixed_ips,
allowed_address_pairs=allowed_address_pairs)
self.assertIsNotNone(port, "Unable to create port on network")
vsd_vport_parent = self.vsd_client.get_global_resource(
constants.L2_DOMAIN,
filters='externalID',
filter_value=subnet['id'])[0]
nuage_vport = self.vsd_client.get_vport(
constants.L2_DOMAIN,
vsd_vport_parent['ID'],
filters='externalID',
filter_value=port['id'])
self.assertEqual(constants.ENABLED,
nuage_vport[0]['addressSpoofing'])
@decorators.attr(type='smoke')
def test_nuage_port_update_fixed_ips_same_subnet_l2_with_aap(self):
# Set up resources
# Base resources
network = self.create_network()
self.assertIsNotNone(network, "Unable to create network")
subnet = self.create_subnet(network, cidr=IPNetwork("10.0.0.0/24"),
mask_bits=28)
self.assertIsNotNone(subnet, "Unable to create subnet")
fixed_ips = [
{
"ip_address": "10.0.0.4",
"subnet_id": subnet["id"]
}
]
port = self.create_port(network=network, fixed_ips=fixed_ips)
self.assertIsNotNone(port, "Unable to create port on network")
vsd_vport_parent = self.vsd_client.get_global_resource(
constants.L2_DOMAIN,
filters='externalID',
filter_value=subnet['id'])[0]
nuage_vport = self.vsd_client.get_vport(
constants.L2_DOMAIN,
vsd_vport_parent['ID'],
filters='externalID',
filter_value=port['id'])
self.assertEqual(constants.INHERITED,
nuage_vport[0]['addressSpoofing'])
# update within subnet should succeed
fixed_ips = [
{
"ip_address": "10.0.0.4",
"subnet_id": subnet["id"]
},
{
"ip_address": "10.0.0.5",
"subnet_id": subnet["id"]
}
]
allowed_address_pairs = [{'ip_address': '10.0.0.50',
'mac_address': 'fe:a0:36:4b:c8:70'}]
port = self.update_port(port=port,
fixed_ips=fixed_ips,
allowed_address_pairs=allowed_address_pairs)
self.assertIsNotNone(port, "Unable to update port")
nuage_vport = self.vsd_client.get_vport(
constants.L2_DOMAIN,
vsd_vport_parent['ID'],
filters='externalID',
filter_value=port['id'])
self.assertEqual(constants.ENABLED,
nuage_vport[0]['addressSpoofing'])
@decorators.attr(type='smoke')
def test_nuage_port_create_fixed_ips_same_subnet_l3_with_aap(self):
# Set up resources
# Base resources
network = self.create_network()
self.assertIsNotNone(network, "Unable to create network")
subnet = self.create_subnet(network, cidr=IPNetwork("10.0.0.0/24"),
mask_bits=28)
self.assertIsNotNone(subnet, "Unable to create subnet")
router = self.create_router(
admin_state_up=True,
external_network_id=CONF.network.public_network_id)
self.assertIsNotNone(router, "Unable to create router")
# Attach subnet
self.create_router_interface(router_id=router["id"],
subnet_id=subnet["id"])
fixed_ips = [
{
"ip_address": "10.0.0.4",
"subnet_id": subnet["id"]
},
{
"ip_address": "10.0.0.5",
"subnet_id": subnet["id"]
}
]
allowed_address_pairs = [{'ip_address': '10.0.0.6',
'mac_address': 'fe:a0:36:4b:c8:70'}]
port = self.create_port(network=network, fixed_ips=fixed_ips,
allowed_address_pairs=allowed_address_pairs)
self.assertIsNotNone(port, "Unable to create port on network")
vsd_vport_parent = self.vsd_client.get_global_resource(
constants.SUBNETWORK,
filters='externalID',
filter_value=subnet['id'])[0]
nuage_vport = self.vsd_client.get_vport(
constants.SUBNETWORK,
vsd_vport_parent['ID'],
filters='externalID',
filter_value=port['id'])
self.assertEqual(constants.INHERITED,
nuage_vport[0]['addressSpoofing'])
nuage_vport_vips = self.vsd_client.get_virtual_ip(
constants.VPORT,
nuage_vport[0]['ID'])
valid_vips = ['10.0.0.4', allowed_address_pairs[0]['ip_address']]
vip_mismatch = False
if valid_vips and not nuage_vport_vips:
vip_mismatch = True
for nuage_vport_vip in nuage_vport_vips:
if nuage_vport_vip['virtualIP'] not in valid_vips:
vip_mismatch = True
self.assertEqual(vip_mismatch, False)
@decorators.attr(type='smoke')
def test_nuage_port_create_fixed_ips_same_subnet_l3_with_aap_outside_cidr(
self):
# Set up resources
# Base resources
network = self.create_network()
self.assertIsNotNone(network, "Unable to create network")
subnet = self.create_subnet(network, cidr=IPNetwork("10.0.0.0/24"),
mask_bits=28)
self.assertIsNotNone(subnet, "Unable to create subnet")
router = self.create_router(
admin_state_up=True,
external_network_id=CONF.network.public_network_id)
self.assertIsNotNone(router, "Unable to create router")
# Attach subnet
self.create_router_interface(router_id=router["id"],
subnet_id=subnet["id"])
fixed_ips = [
{
"ip_address": "10.0.0.4",
"subnet_id": subnet["id"]
},
{
"ip_address": "10.0.0.5",
"subnet_id": subnet["id"]
}
]
allowed_address_pairs = [{'ip_address': '1.1.1.5',
'mac_address': 'fe:a0:36:4b:c8:70'}]
port = self.create_port(network=network, fixed_ips=fixed_ips,
allowed_address_pairs=allowed_address_pairs)
self.assertIsNotNone(port, "Unable to create port on network")
vsd_vport_parent = self.vsd_client.get_global_resource(
constants.SUBNETWORK,
filters='externalID',
filter_value=subnet['id'])[0]
nuage_vport = self.vsd_client.get_vport(
constants.SUBNETWORK,
vsd_vport_parent['ID'],
filters='externalID',
filter_value=port['id'])
self.assertEqual(constants.ENABLED,
nuage_vport[0]['addressSpoofing'])
nuage_vport_vips = self.vsd_client.get_virtual_ip(
constants.VPORT,
nuage_vport[0]['ID'])
valid_vips = ['10.0.0.4']
vip_mismatch = False
if valid_vips and not nuage_vport_vips:
vip_mismatch = True
for nuage_vport_vip in nuage_vport_vips:
if nuage_vport_vip['virtualIP'] not in valid_vips:
vip_mismatch = True
self.assertEqual(vip_mismatch, False)
@decorators.attr(type='smoke')
def test_nuage_port_update_fixed_ips_same_subnet_l3_with_aap(self):
# Set up resources
# Base resources
network = self.create_network()
self.assertIsNotNone(network, "Unable to create network")
subnet = self.create_subnet(network, cidr=IPNetwork("10.0.0.0/24"),
mask_bits=28)
self.assertIsNotNone(subnet, "Unable to create subnet")
router = self.create_router(
admin_state_up=True,
external_network_id=CONF.network.public_network_id)
self.assertIsNotNone(router, "Unable to create router")
# Attach subnet
self.create_router_interface(router_id=router["id"],
subnet_id=subnet["id"])
fixed_ips = [
{
"ip_address": "10.0.0.4",
"subnet_id": subnet["id"]
}
]
port = self.create_port(network=network, fixed_ips=fixed_ips)
self.assertIsNotNone(port, "Unable to create port on network")
vsd_vport_parent = self.vsd_client.get_global_resource(
constants.SUBNETWORK,
filters='externalID',
filter_value=subnet['id'])[0]
nuage_vport = self.vsd_client.get_vport(
constants.SUBNETWORK,
vsd_vport_parent['ID'],
filters='externalID',
filter_value=port['id'])
self.assertEqual(constants.INHERITED,
nuage_vport[0]['addressSpoofing'])
# update within subnet should succeed
fixed_ips = [
{
"ip_address": "10.0.0.4",
"subnet_id": subnet["id"]
},
{
"ip_address": "10.0.0.5",
"subnet_id": subnet["id"]
}
]
allowed_address_pairs = [{'ip_address': '10.0.0.6',
'mac_address': 'fe:a0:36:4b:c8:70'}]
port = self.update_port(port=port, fixed_ips=fixed_ips,
allowed_address_pairs=allowed_address_pairs)
self.assertIsNotNone(port, "Unable to update port")
nuage_vport = self.vsd_client.get_vport(
constants.SUBNETWORK,
vsd_vport_parent['ID'],
filters='externalID',
filter_value=port['id'])
self.assertEqual(constants.INHERITED,
nuage_vport[0]['addressSpoofing'])
nuage_vport_vips = self.vsd_client.get_virtual_ip(
constants.VPORT,
nuage_vport[0]['ID'])
valid_vips = ['10.0.0.4', allowed_address_pairs[0]['ip_address']]
vip_mismatch = False
if valid_vips and not nuage_vport_vips:
vip_mismatch = True
for nuage_vport_vip in nuage_vport_vips:
if nuage_vport_vip['virtualIP'] not in valid_vips:
vip_mismatch = True
self.assertEqual(vip_mismatch, False)
@decorators.attr(type='smoke')
def test_nuage_port_update_fixed_ips_same_subnet_l3_with_aap_with_vm(self):
# Set up resources
# Base resources
network = self.create_network()
self.assertIsNotNone(network, "Unable to create network")
subnet = self.create_subnet(network, cidr=IPNetwork("10.0.0.0/24"),
mask_bits=28)
self.assertIsNotNone(subnet, "Unable to create subnet")
router = self.create_router(
admin_state_up=True,
external_network_id=CONF.network.public_network_id)
self.assertIsNotNone(router, "Unable to create router")
# Attach subnet
self.create_router_interface(router_id=router["id"],
subnet_id=subnet["id"])
fixed_ips = [
{
"ip_address": "10.0.0.4",
"subnet_id": subnet["id"]
},
{
"ip_address": "10.0.0.5",
"subnet_id": subnet["id"]
}
]
allowed_address_pairs = [{'ip_address': '10.0.0.10',
'mac_address': 'fe:a0:36:4b:c8:70'}]
port = self.create_port(network=network, fixed_ips=fixed_ips,
allowed_address_pairs=allowed_address_pairs)
self.assertIsNotNone(port, "Unable to create port on network")
vsd_vport_parent = self.vsd_client.get_global_resource(
constants.SUBNETWORK,
filters='externalID',
filter_value=subnet['id'])[0]
nuage_vport = self.vsd_client.get_vport(
constants.SUBNETWORK,
vsd_vport_parent['ID'],
filters='externalID',
filter_value=port['id'])
self.assertEqual(constants.INHERITED,
nuage_vport[0]['addressSpoofing'])
nuage_vport_vips = self.vsd_client.get_virtual_ip(
constants.VPORT,
nuage_vport[0]['ID'])
valid_vips = [fixed_ips[0]["ip_address"],
allowed_address_pairs[0]['ip_address']]
vip_mismatch = False
if valid_vips and not nuage_vport_vips:
vip_mismatch = True
for nuage_vport_vip in nuage_vport_vips:
if nuage_vport_vip['virtualIP'] not in valid_vips:
vip_mismatch = True
self.assertEqual(vip_mismatch, False)
self._create_server(name='vm-' + network['name'],
network=network, port_id=port['id'])
# update within subnet should succeed
fixed_ips = [
{
"ip_address": "10.0.0.5",
"subnet_id": subnet["id"]
},
{
"ip_address": "10.0.0.6",
"subnet_id": subnet["id"]
}
]
allowed_address_pairs = [{'ip_address': '10.0.0.7',
'mac_address': 'fe:a0:36:4b:c8:70'},
{'ip_address': '10.0.0.10',
'mac_address': 'fe:a0:36:4b:c8:70'}]
port = self.update_port(port=port, fixed_ips=fixed_ips,
allowed_address_pairs=allowed_address_pairs)
self.assertIsNotNone(port, "Unable to update port")
nuage_vport = self.vsd_client.get_vport(
constants.SUBNETWORK,
vsd_vport_parent['ID'],
filters='externalID',
filter_value=port['id'])
self.assertEqual(constants.INHERITED,
nuage_vport[0]['addressSpoofing'])
nuage_vport_vips = self.vsd_client.get_virtual_ip(
constants.VPORT,
nuage_vport[0]['ID'])
valid_vips = [fixed_ips[0]["ip_address"],
allowed_address_pairs[0]['ip_address'],
allowed_address_pairs[1]['ip_address']]
vip_mismatch = False
if valid_vips and not nuage_vport_vips:
vip_mismatch = True
for nuage_vport_vip in nuage_vport_vips:
if nuage_vport_vip['virtualIP'] not in valid_vips:
vip_mismatch = True
self.assertEqual(vip_mismatch, False)
@decorators.attr(type='smoke')
def test_nuage_port_update_app_to_fixed_ips_l3_with_vm(self):
# Set up resources
# Base resources
network = self.create_network()
self.assertIsNotNone(network, "Unable to create network")
subnet = self.create_subnet(network, cidr=IPNetwork("10.0.0.0/24"),
mask_bits=28)
self.assertIsNotNone(subnet, "Unable to create subnet")
router = self.create_router(
admin_state_up=True,
external_network_id=CONF.network.public_network_id)
self.assertIsNotNone(router, "Unable to create router")
# Attach subnet
self.create_router_interface(router_id=router["id"],
subnet_id=subnet["id"])
fixed_ips = [
{
"ip_address": "10.0.0.4",
"subnet_id": subnet["id"]
},
{
"ip_address": "10.0.0.5",
"subnet_id": subnet["id"]
}
]
allowed_address_pairs = [{'ip_address': '10.0.0.6',
'mac_address': 'fe:a0:36:4b:c8:70'}]
port = self.create_port(network=network, fixed_ips=fixed_ips,
allowed_address_pairs=allowed_address_pairs)
self.assertIsNotNone(port, "Unable to create port on network")
vsd_vport_parent = self.vsd_client.get_global_resource(
constants.SUBNETWORK,
filters='externalID',
filter_value=subnet['id'])[0]
nuage_vport = self.vsd_client.get_vport(
constants.SUBNETWORK,
vsd_vport_parent['ID'],
filters='externalID',
filter_value=port['id'])
self.assertEqual(constants.INHERITED,
nuage_vport[0]['addressSpoofing'])
nuage_vport_vips = self.vsd_client.get_virtual_ip(
constants.VPORT,
nuage_vport[0]['ID'])
valid_vips = [fixed_ips[0]["ip_address"],
allowed_address_pairs[0]['ip_address']]
vip_mismatch = False
if valid_vips and not nuage_vport_vips:
vip_mismatch = True
for nuage_vport_vip in nuage_vport_vips:
if nuage_vport_vip['virtualIP'] not in valid_vips:
vip_mismatch = True
self.assertEqual(vip_mismatch, False)
self._create_server(name='vm-' + network['name'],
network=network, port_id=port['id'])
# update within subnet should succeed
fixed_ips = [
{
"ip_address": "10.0.0.5",
"subnet_id": subnet["id"]
},
{
"ip_address": "10.0.0.6",
"subnet_id": subnet["id"]
}
]
allowed_address_pairs = [{'ip_address': '10.0.0.7',
'mac_address': 'fe:a0:36:4b:c8:70'},
{'ip_address': '10.0.0.10',
'mac_address': 'fe:a0:36:4b:c8:70'}]
port = self.update_port(port=port, fixed_ips=fixed_ips,
allowed_address_pairs=allowed_address_pairs)
self.assertIsNotNone(port, "Unable to update port")
nuage_vport = self.vsd_client.get_vport(
constants.SUBNETWORK,
vsd_vport_parent['ID'],
filters='externalID',
filter_value=port['id'])
self.assertEqual(constants.INHERITED,
nuage_vport[0]['addressSpoofing'])
nuage_vport_vips = self.vsd_client.get_virtual_ip(
constants.VPORT,
nuage_vport[0]['ID'])
valid_vips = [fixed_ips[0]["ip_address"],
allowed_address_pairs[0]['ip_address'],
allowed_address_pairs[1]['ip_address']]
vip_mismatch = False
if valid_vips and not nuage_vport_vips:
vip_mismatch = True
for nuage_vport_vip in nuage_vport_vips:
if nuage_vport_vip['virtualIP'] not in valid_vips:
vip_mismatch = True
self.assertEqual(vip_mismatch, False)
@decorators.attr(type='smoke')
def test_nuage_port_update_fixed_ip_with_vm_and_conflict_with_aap_neg(
self):
# Set up resources
# Base resources
network = self.create_network()
self.assertIsNotNone(network, "Unable to create network")
subnet = self.create_subnet(network, cidr=IPNetwork("10.0.0.0/24"),
mask_bits=28)
self.assertIsNotNone(subnet, "Unable to create subnet")
router = self.create_router(
admin_state_up=True,
external_network_id=CONF.network.public_network_id)
self.assertIsNotNone(router, "Unable to create router")
# Attach subnet
self.create_router_interface(router_id=router["id"],
subnet_id=subnet["id"])
fixed_ips = [
{
"ip_address": "10.0.0.4",
"subnet_id": subnet["id"]
},
{
"ip_address": "10.0.0.5",
"subnet_id": subnet["id"]
}
]
allowed_address_pairs = [{'ip_address': '10.0.0.10',
'mac_address': 'fe:a0:36:4b:c8:70'}]
port = self.create_port(network=network, fixed_ips=fixed_ips,
allowed_address_pairs=allowed_address_pairs)
self.assertIsNotNone(port, "Unable to create port on network")
vsd_vport_parent = self.vsd_client.get_global_resource(
constants.SUBNETWORK,
filters='externalID',
filter_value=subnet['id'])[0]
nuage_vport = self.vsd_client.get_vport(
constants.SUBNETWORK,
vsd_vport_parent['ID'],
filters='externalID',
filter_value=port['id'])
self.assertEqual(constants.INHERITED,
nuage_vport[0]['addressSpoofing'])
nuage_vport_vips = self.vsd_client.get_virtual_ip(
constants.VPORT,
nuage_vport[0]['ID'])
valid_vips = [fixed_ips[0]["ip_address"],
allowed_address_pairs[0]['ip_address']]
vip_mismatch = False
if valid_vips and not nuage_vport_vips:
vip_mismatch = True
for nuage_vport_vip in nuage_vport_vips:
if nuage_vport_vip['virtualIP'] not in valid_vips:
vip_mismatch = True
self.assertEqual(vip_mismatch, False)
self._create_server(name='vm-' + network['name'],
network=network, port_id=port['id'])
fixed_ips = [
{
"ip_address": "10.0.0.8",
"subnet_id": subnet["id"]
}
]
allowed_address_pairs = [{'ip_address': '10.0.0.6',
'mac_address': 'fe:a0:36:4b:c8:70'}]
self.create_port(network=network, fixed_ips=fixed_ips,
allowed_address_pairs=allowed_address_pairs)
# update within subnet should succeed
fixed_ips = [
{
"ip_address": "10.0.0.5",
"subnet_id": subnet["id"]
},
{
"ip_address": "10.0.0.6",
"subnet_id": subnet["id"]
}
]
# below update will fail with proper roll back
try:
self.update_port(port=port, fixed_ips=fixed_ips)
self.fail("Exception expected when updating to"
" a different subnet!")
except exceptions.BadRequest as e:
if ('Bad request: The IP Address 10.0.0.6 is'
' currently in use by subnet' in e._error_string):
vsd_vport_parent = self.vsd_client.get_global_resource(
constants.SUBNETWORK,
filters='externalID',
filter_value=subnet['id'])[0]
nuage_vport = self.vsd_client.get_vport(
constants.SUBNETWORK,
vsd_vport_parent['ID'],
filters='externalID',
filter_value=port['id'])
self.assertEqual(constants.INHERITED,
nuage_vport[0]['addressSpoofing'])
nuage_vport_vips = self.vsd_client.get_virtual_ip(
constants.VPORT,
nuage_vport[0]['ID'])
vip_mismatch = False
if valid_vips and not nuage_vport_vips:
vip_mismatch = True
for nuage_vport_vip in nuage_vport_vips:
if nuage_vport_vip['virtualIP'] not in valid_vips:
vip_mismatch = True
self.assertEqual(vip_mismatch, False)
pass
else:
# Differentiate between VSD failure and update failure
LOG.debug(e._error_string)
self.fail("A different NuageBadRequest exception"
" was expected for this operation.")
@decorators.attr(type='smoke')
def test_nuage_port_create_fixed_ip_same_as_aap(self):
# Set up resources
# Base resources
network = self.create_network()
self.assertIsNotNone(network, "Unable to create network")
subnet = self.create_subnet(network, cidr=IPNetwork("10.0.0.0/24"),
mask_bits=28)
self.assertIsNotNone(subnet, "Unable to create subnet")
router = self.create_router(
admin_state_up=True,
external_network_id=CONF.network.public_network_id)
self.assertIsNotNone(router, "Unable to create router")
# Attach subnet
self.create_router_interface(router_id=router["id"],
subnet_id=subnet["id"])
fixed_ips = [
{
"ip_address": "10.0.0.5",
"subnet_id": subnet["id"]
},
{
"ip_address": "10.0.0.6",
"subnet_id": subnet["id"]
}
]
allowed_address_pairs = [{'ip_address': '10.0.0.6',
'mac_address': 'fe:a0:36:4b:c8:70'}]
port = self.create_port(network=network, fixed_ips=fixed_ips,
allowed_address_pairs=allowed_address_pairs)
self.assertIsNotNone(port, "Unable to create port on network")
vsd_vport_parent = self.vsd_client.get_global_resource(
constants.SUBNETWORK,
filters='externalID',
filter_value=subnet['id'])[0]
nuage_vport = self.vsd_client.get_vport(
constants.SUBNETWORK,
vsd_vport_parent['ID'],
filters='externalID',
filter_value=port['id'])
self.assertEqual(constants.ENABLED,
nuage_vport[0]['addressSpoofing'])
nuage_vport_vips = self.vsd_client.get_virtual_ip(
constants.VPORT,
nuage_vport[0]['ID'])
valid_vips = [fixed_ips[0]["ip_address"],
allowed_address_pairs[0]['ip_address']]
vip_mismatch = False
mac_mismatch = False
if valid_vips and not nuage_vport_vips:
vip_mismatch = True
for nuage_vport_vip in nuage_vport_vips:
if nuage_vport_vip['virtualIP'] not in valid_vips:
vip_mismatch = True
if nuage_vport_vip['MAC'] != port['mac_address']:
mac_mismatch = True
self.assertEqual(vip_mismatch, False)
self.assertEqual(mac_mismatch, False)
@decorators.attr(type='smoke')
def test_nuage_port_update_fixed_ips_same_as_aap(self):
# Set up resources
# Base resources
network = self.create_network()
self.assertIsNotNone(network, "Unable to create network")
subnet = self.create_subnet(network, cidr=IPNetwork("10.0.0.0/24"),
mask_bits=28)
self.assertIsNotNone(subnet, "Unable to create subnet")
router = self.create_router(
admin_state_up=True,
external_network_id=CONF.network.public_network_id)
self.assertIsNotNone(router, "Unable to create router")
# Attach subnet
self.create_router_interface(router_id=router["id"],
subnet_id=subnet["id"])
fixed_ips = [
{
"ip_address": "10.0.0.4",
"subnet_id": subnet["id"]
},
{
"ip_address": "10.0.0.5",
"subnet_id": subnet["id"]
}
]
allowed_address_pairs = [{'ip_address': '10.0.0.6',
'mac_address': 'fe:a0:36:4b:c8:70'}]
port = self.create_port(network=network, fixed_ips=fixed_ips,
allowed_address_pairs=allowed_address_pairs)
self.assertIsNotNone(port, "Unable to create port on network")
vsd_vport_parent = self.vsd_client.get_global_resource(
constants.SUBNETWORK,
filters='externalID',
filter_value=subnet['id'])[0]
nuage_vport = self.vsd_client.get_vport(
constants.SUBNETWORK,
vsd_vport_parent['ID'],
filters='externalID',
filter_value=port['id'])
self.assertEqual(constants.INHERITED,
nuage_vport[0]['addressSpoofing'])
nuage_vport_vips = self.vsd_client.get_virtual_ip(
constants.VPORT,
nuage_vport[0]['ID'])
valid_vips = [fixed_ips[0]["ip_address"],
allowed_address_pairs[0]['ip_address']]
vip_mismatch = False
if valid_vips and not nuage_vport_vips:
vip_mismatch = True
for nuage_vport_vip in nuage_vport_vips:
if nuage_vport_vip['virtualIP'] not in valid_vips:
vip_mismatch = True
self.assertEqual(vip_mismatch, False)
fixed_ips = [
{
"ip_address": "10.0.0.5",
"subnet_id": subnet["id"]
},
{
"ip_address": "10.0.0.6",
"subnet_id": subnet["id"]
}
]
port = self.update_port(port=port, fixed_ips=fixed_ips)
self.assertIsNotNone(port, "Unable to update port")
nuage_vport = self.vsd_client.get_vport(
constants.SUBNETWORK,
vsd_vport_parent['ID'],
filters='externalID',
filter_value=port['id'])
self.assertEqual(constants.ENABLED,
nuage_vport[0]['addressSpoofing'])
nuage_vport_vips = self.vsd_client.get_virtual_ip(
constants.VPORT,
nuage_vport[0]['ID'])
valid_vips = [fixed_ips[0]["ip_address"],
allowed_address_pairs[0]['ip_address']]
vip_mismatch = False
mac_mismatch = False
if valid_vips and not nuage_vport_vips:
vip_mismatch = True
for nuage_vport_vip in nuage_vport_vips:
if nuage_vport_vip['virtualIP'] not in valid_vips:
vip_mismatch = True
self.assertEqual(vip_mismatch, False)
if nuage_vport_vip['MAC'] != port['mac_address']:
mac_mismatch = True
self.assertEqual(vip_mismatch, False)
self.assertEqual(mac_mismatch, False)
@decorators.attr(type='smoke')
def test_nuage_port_create_fixed_ips_same_subnet_with_aap_router_attach(
self):
# Set up resources
# Base resources
network = self.create_network()
self.assertIsNotNone(network, "Unable to create network")
subnet = self.create_subnet(network, cidr=IPNetwork("10.0.0.0/24"),
mask_bits=28)
self.assertIsNotNone(subnet, "Unable to create subnet")
router = self.create_router(
admin_state_up=True,
external_network_id=CONF.network.public_network_id)
self.assertIsNotNone(router, "Unable to create router")
fixed_ips = [
{
"ip_address": "10.0.0.4",
"subnet_id": subnet["id"]
},
{
"ip_address": "10.0.0.5",
"subnet_id": subnet["id"]
}
]
allowed_address_pairs = [{'ip_address': '10.0.0.6',
'mac_address': 'fe:a0:36:4b:c8:70'}]
port = self.create_port(network=network, fixed_ips=fixed_ips,
allowed_address_pairs=allowed_address_pairs)
self.assertIsNotNone(port, "Unable to create port on network")
vsd_vport_parent = self.vsd_client.get_global_resource(
constants.L2_DOMAIN,
filters='externalID',
filter_value=subnet['id'])[0]
nuage_vport = self.vsd_client.get_vport(
constants.L2_DOMAIN,
vsd_vport_parent['ID'],
filters='externalID',
filter_value=port['id'])
self.assertEqual(constants.ENABLED,
nuage_vport[0]['addressSpoofing'])
# Attach subnet
self.create_router_interface(router_id=router["id"],
subnet_id=subnet["id"])
vsd_vport_parent = self.vsd_client.get_global_resource(
constants.SUBNETWORK,
filters='externalID',
filter_value=subnet['id'])[0]
nuage_vport_vips = self.vsd_client.get_virtual_ip(
constants.VPORT,
nuage_vport[0]['ID'])
valid_vips = ['10.0.0.4', allowed_address_pairs[0]['ip_address']]
vip_mismatch = False
if valid_vips and not nuage_vport_vips:
vip_mismatch = True
for nuage_vport_vip in nuage_vport_vips:
if nuage_vport_vip['virtualIP'] not in valid_vips:
vip_mismatch = True
self.assertEqual(vip_mismatch, False)
nuage_vport = self.vsd_client.get_vport(
constants.SUBNETWORK,
vsd_vport_parent['ID'],
filters='externalID',
filter_value=port['id'])
self.assertEqual(constants.INHERITED,
nuage_vport[0]['addressSpoofing'])
@decorators.attr(type='smoke')
@testtools.skipIf(Topology.before_nuage('5.4'), 'Unsupported pre-5.4')
def test_nuage_port_update_fixed_ips_same_subnet_with_aap_router_detach(
self):
# Set up resources
# Base resources
network = self.create_network()
self.assertIsNotNone(network, "Unable to create network")
subnet = self.create_subnet(network, cidr=IPNetwork("10.0.0.0/24"),
mask_bits=28)
self.assertIsNotNone(subnet, "Unable to create subnet")
router = self.create_router(
admin_state_up=True,
external_network_id=CONF.network.public_network_id)
self.assertIsNotNone(router, "Unable to create router")
# Attach subnet
self.create_router_interface(router_id=router["id"],
subnet_id=subnet["id"], cleanup=False)
fixed_ips = [
{
"ip_address": "10.0.0.4",
"subnet_id": subnet["id"]
}
]
port = self.create_port(network=network, fixed_ips=fixed_ips)
self.assertIsNotNone(port, "Unable to create port on network")
vsd_vport_parent = self.vsd_client.get_global_resource(
constants.SUBNETWORK,
filters='externalID',
filter_value=subnet['id'])[0]
nuage_vport = self.vsd_client.get_vport(
constants.SUBNETWORK,
vsd_vport_parent['ID'],
filters='externalID',
filter_value=port['id'])
self.assertEqual(constants.INHERITED,
nuage_vport[0]['addressSpoofing'])
# update within subnet should succeed
fixed_ips = [
{
"ip_address": "10.0.0.4",
"subnet_id": subnet["id"]
},
{
"ip_address": "10.0.0.5",
"subnet_id": subnet["id"]
}
]
allowed_address_pairs = [{'ip_address': '10.0.0.6',
'mac_address': 'fe:a0:36:4b:c8:70'}]
port = self.update_port(port=port, fixed_ips=fixed_ips,
allowed_address_pairs=allowed_address_pairs)
self.assertIsNotNone(port, "Unable to update port")
nuage_vport = self.vsd_client.get_vport(
constants.SUBNETWORK,
vsd_vport_parent['ID'],
filters='externalID',
filter_value=port['id'])
self.assertEqual(constants.INHERITED,
nuage_vport[0]['addressSpoofing'])
valid_vips = ['10.0.0.4', allowed_address_pairs[0]['ip_address']]
nuage_vport_vips = self.vsd_client.get_virtual_ip(
constants.VPORT,
nuage_vport[0]['ID'])
vip_mismatch = False
if valid_vips and not nuage_vport_vips:
vip_mismatch = True
for nuage_vport_vip in nuage_vport_vips:
if nuage_vport_vip['virtualIP'] not in valid_vips:
vip_mismatch = True
self.assertEqual(vip_mismatch, False)
self.admin_routers_client.remove_router_interface(
router['id'],
subnet_id=subnet['id'])
vsd_vport_parent = self.vsd_client.get_global_resource(
constants.L2_DOMAIN,
filters='externalID',
filter_value=subnet['id'])[0]
nuage_vport = self.vsd_client.get_vport(
constants.L2_DOMAIN,
vsd_vport_parent['ID'],
filters='externalID',
filter_value=port['id'])
self.assertEqual(constants.ENABLED if Topology.from_nuage('5.4')
else constants.INHERITED,
nuage_vport[0]['addressSpoofing'])
@decorators.attr(type='smoke')
@testtools.skipIf(Topology.before_nuage('5.4'), 'Unsupported pre-5.4')
def test_delete_unbound_port_with_hanging_vminterface(self):
# OPENSTACK-2797
network = self.create_network()
self.assertIsNotNone(network, "Unable to create network")
subnet = self.create_subnet(network, cidr=IPNetwork("10.0.0.0/24"),
mask_bits=28)
self.assertIsNotNone(subnet, "Unable to create subnet")
port = self.create_port(network=network, cleanup=False)
self.addCleanup(self._try_delete,
self.manager.ports_client.delete_port,
port['id'])
# Find vport
l2domain = self.vsd.get_l2domain(by_subnet_id=subnet['id'])
vport = self.vsd.get_vport(l2domain=l2domain, by_port_id=port['id'])
# Create "Fake" VM interface to simulate following behavior:
# -> Port is being bound -> VM created -> port deleted ->
# Port not bound but leftover VM on VSD
vminterface = self.vsd.vspk.NUVMInterface(
name='test-fip-vm', vport_id=vport.id,
external_id=self.vsd.external_id(port['id']),
mac='E6:04:AA:7A:AA:86', ip_address='10.0.0.10')
vm = self.vsd.vspk.NUVM(name='test-port-delete-vm',
uuid='1339f7f4-f7a0-445f-b257-8dbfaf0d6fc8',
external_id=self.vsd.external_id(
'1339f7f4-f7a0-445f-b257-8dbfaf0d6fc8'),
interfaces=[vminterface])
# Impersonate tenant user for appropriate permissions on VM
self.vsd.session().impersonate(port['tenant_id'],
self.default_netpartition_name)
self.vsd.session().user.create_child(vm)
self.vsd.session().stop_impersonate()
# Delete port, VM should be deleted in this request
self.delete_port(port)
# Verify that vport is deleted
vport = self.vsd.get_vport(l2domain=l2domain, by_port_id=port['id'])
self.assertIsNone(vport, 'Vport not deleted by Port delete statement')
| 41.220588
| 79
| 0.560114
| 7,069
| 64,469
| 4.845664
| 0.041732
| 0.050797
| 0.012845
| 0.025924
| 0.92941
| 0.921527
| 0.917148
| 0.912886
| 0.912273
| 0.901004
| 0
| 0.024036
| 0.33658
| 64,469
| 1,563
| 80
| 41.246961
| 0.776853
| 0.028867
| 0
| 0.805535
| 0
| 0
| 0.13242
| 0.001152
| 0
| 0
| 0
| 0
| 0.115805
| 1
| 0.022578
| false
| 0.001457
| 0.008012
| 0
| 0.032775
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
e0f8ef5b2d2b11ceb48d819c7022ba608e70f8fd
| 17,241
|
py
|
Python
|
komodo2_rl/src/environments/Spawner.py
|
osheraz/komodo
|
d53759100ced7439dd501620f955f347087e4f63
|
[
"MIT"
] | 5
|
2020-08-11T08:47:25.000Z
|
2022-02-15T06:19:18.000Z
|
komodo2_rl/src/environments/Spawner.py
|
osheraz/komodo
|
d53759100ced7439dd501620f955f347087e4f63
|
[
"MIT"
] | null | null | null |
komodo2_rl/src/environments/Spawner.py
|
osheraz/komodo
|
d53759100ced7439dd501620f955f347087e4f63
|
[
"MIT"
] | 1
|
2021-05-06T14:25:17.000Z
|
2021-05-06T14:25:17.000Z
|
# !/usr/bin/env python
import rospy
import numpy as np
from gazebo_msgs.srv import SpawnModel, SpawnModelRequest, SpawnModelResponse
from copy import deepcopy
from tf.transformations import quaternion_from_euler
sdf_cube = """<?xml version="1.0" ?>
<sdf version="1.4">
<model name="MODELNAME">
<static>0</static>
<link name="link">
<inertial>
<mass>1.0</mass>
<inertia>
<ixx>0.01</ixx>
<ixy>0.0</ixy>
<ixz>0.0</ixz>
<iyy>0.01</iyy>
<iyz>0.0</iyz>
<izz>0.01</izz>
</inertia>
</inertial>
<collision name="stairs_collision0">
<pose>0 0 0 0 0 0</pose>
<geometry>
<box>
<size>SIZEXYZ</size>
</box>
</geometry>
<surface>
<bounce />
<friction>
<ode>
<mu>1.0</mu>
<mu2>1.0</mu2>
</ode>
</friction>
<contact>
<ode>
<kp>10000000.0</kp>
<kd>1.0</kd>
<min_depth>0.0</min_depth>
<max_vel>0.0</max_vel>
</ode>
</contact>
</surface>
</collision>
<visual name="stairs_visual0">
<pose>0 0 0 0 0 0</pose>
<geometry>
<box>
<size>SIZEXYZ</size>
</box>
</geometry>
<material>
<script>
<uri>file://media/materials/scripts/gazebo.material</uri>
<name>Gazebo/Wood</name>
</script>
</material>
</visual>
<velocity_decay>
<linear>0.000000</linear>
<angular>0.000000</angular>
</velocity_decay>
<self_collide>0</self_collide>
<kinematic>0</kinematic>
<gravity>1</gravity>
</link>
</model>
</sdf>
"""
sdf_sand = """<?xml version='1.0'?>
<sdf version='1.6'>
<model name="MODELNAME">
<link name='link'>
<pose frame=''>0 0 0.01 0 0 0 </pose>
<inertial>
<mass>1</mass>
<inertia>
<ixx>0.1</ixx>
<ixy>0</ixy>
<ixz>0</ixz>
<iyy>0.1</iyy>
<iyz>0</iyz>
<izz>0.1</izz>
</inertia>
</inertial>
<visual name='visual'>
<pose frame=''>0 0 0 0 -0 0</pose>
<geometry>
<mesh>
<scale>SIZEXYZ</scale>
<uri>model://sand/sand_particle.stl</uri>
</mesh>
</geometry>
<material>
<lighting>1</lighting>
<script>
<uri>file://media/materials/scripts/gazebo.material</uri>
<name>Gazebo/Yellow</name>
</script>
<ambient>0.3 0.25 0.1 1</ambient>
<diffuse>0.7 0.6 0.4 1</diffuse>
<specular>0.01 0.005 0.001 1</specular>
<emissive>0 0 0 1</emissive>
</material>
<transparency>0</transparency>
<cast_shadows>1</cast_shadows>
</visual>
<collision name='collision'>
<laser_retro>0</laser_retro>
<max_contacts>10</max_contacts>
<pose frame=''>0 0 0 0 -0 0</pose>
<geometry>
<mesh>
<scale>SIZEXYZ</scale>
<uri>model://sand/sand_particle.stl</uri>
</mesh>
</geometry>
<surface>
<friction>
<ode>
<mu>1</mu>
<mu2>1</mu2>
<fdir1>0 0 0</fdir1>
<slip1>0</slip1>
<slip2>0</slip2>
</ode>
<torsional>
<coefficient>1</coefficient>
<patch_radius>0</patch_radius>
<surface_radius>0</surface_radius>
<use_patch_radius>1</use_patch_radius>
<ode>
<slip>0</slip>
</ode>
</torsional>
</friction>
<bounce>
<restitution_coefficient>0.2</restitution_coefficient>
<threshold>1.01</threshold>
</bounce>
<contact>
<collide_without_contact>0</collide_without_contact>
<collide_without_contact_bitmask>1</collide_without_contact_bitmask>
<collide_bitmask>1</collide_bitmask>
<ode>
<soft_cfm>0</soft_cfm>
<soft_erp>0.2</soft_erp>
<kp>1e+13</kp>
<kd>1</kd>
<max_vel>0.01</max_vel>
<min_depth>0</min_depth>
</ode>
<bullet>
<split_impulse>1</split_impulse>
<split_impulse_penetration_threshold>-0.01</split_impulse_penetration_threshold>
<soft_cfm>0</soft_cfm>
<soft_erp>0.2</soft_erp>
<kp>1e+13</kp>
<kd>1</kd>
</bullet>
</contact>
</surface>
</collision>
</link>
<static>0</static>
<allow_auto_disable>1</allow_auto_disable>
</model>
</sdf>
"""
sdf_sand_box = """<sdf version='1.6'>
<model name='sand_box_osher'>
<link name='sand_box_osher'>
<pose frame=''>0 0 0 0 -0 0</pose>
<inertial>
<pose frame=''>-0.35285 -0.305 0.11027 0 -0 0</pose>
<mass>2000.892</mass>
<inertia>
<ixx>130.2204</ixx>
<ixy>-220.5538e-15</ixy>
<ixz>-4.85191</ixz>
<iyy>276.363</iyy>
<iyz>-77.9029e-15</iyz>
<izz>135.62</izz>
</inertia>
</inertial>
<collision name='sand_box_osher_collision'>
<pose frame=''>0 0 0 1.5708 -0 0</pose>
<geometry>
<mesh>
<scale>1 0.8 1</scale>
<uri>model://sand_box_osher/meshes/sand_box_osher.STL</uri>
</mesh>
</geometry>
</collision>
<visual name='sand_box_osher_visual'>
<pose frame=''>0 0 0 1.5708 -0 0</pose>
<geometry>
<mesh>
<scale>1 0.8 1</scale>
<uri>model://sand_box_osher/meshes/sand_box_osher.STL</uri>
</mesh>
</geometry>
<material>
<ambient>0.3 0.25 0.1 1</ambient>
<diffuse>0.7 0.6 0.4 1</diffuse>
<specular>0.01 0.005 0.001 1</specular>
<emissive>0 0 0 1</emissive>
</material>
<transparency>0.5</transparency>
</visual>
</link>
</model>
</sdf>
"""
sdf_unit_sphere = """<?xml version='1.0'?>
<sdf version='1.6'>
<model name="MODELNAME">
<link name='link'>
<pose frame=''>0 0 0 0 -0 0</pose>
<inertial>
<mass>0.1</mass>
<inertia>
<ixx>0.0000490147</ixx>
<ixy>0</ixy>
<ixz>0</ixz>
<iyy>0.000049147</iyy>
<iyz>0</iyz>
<izz>0.000049147</izz>
</inertia>
<pose frame=''>0 0 0 0 -0 0</pose>
</inertial>
<self_collide>0</self_collide>
<kinematic>0</kinematic>
<visual name='visual'>
<geometry>
<sphere>
<radius>RADIUS</radius>
</sphere>
</geometry>
<material>
<lighting>1</lighting>
<script>
<uri>file://media/materials/scripts/gazebo.material</uri>
<name>Gazebo/Yellow</name>
</script>
<ambient>0.3 0.25 0.1 1</ambient>
<diffuse>0.7 0.6 0.4 1</diffuse>
<specular>0.01 0.005 0.001 1</specular>
<emissive>0 0 0 1</emissive>
</material>
<pose frame=''>0 0 0 0 -0 0</pose>
<transparency>0</transparency>
<cast_shadows>1</cast_shadows>
</visual>
<collision name='collision'>
<laser_retro>0</laser_retro>
<max_contacts>10</max_contacts>
<pose frame=''>0 0 0 0 -0 0</pose>
<geometry>
<sphere>
<radius>RADIUS</radius>
</sphere>
</geometry>
<surface>
<friction>
<ode>
<mu>1</mu>
<mu2>1</mu2>
<fdir1>0 0 0</fdir1>
<slip1>0</slip1>
<slip2>0</slip2>
</ode>
<torsional>
<coefficient>1</coefficient>
<patch_radius>0</patch_radius>
<surface_radius>0</surface_radius>
<use_patch_radius>1</use_patch_radius>
<ode>
<slip>0</slip>
</ode>
</torsional>
</friction>
<bounce>
<restitution_coefficient>0</restitution_coefficient>
<threshold>1e+06</threshold>
</bounce>
<contact>
<collide_without_contact>0</collide_without_contact>
<collide_without_contact_bitmask>1</collide_without_contact_bitmask>
<collide_bitmask>1</collide_bitmask>
<ode>
<soft_cfm>0</soft_cfm>
<soft_erp>0.2</soft_erp>
<kp>1e+13</kp>
<kd>1</kd>
<max_vel>0.01</max_vel>
<min_depth>0</min_depth>
</ode>
<bullet>
<split_impulse>1</split_impulse>
<split_impulse_penetration_threshold>-0.01</split_impulse_penetration_threshold>
<soft_cfm>0</soft_cfm>
<soft_erp>0.2</soft_erp>
<kp>1e+13</kp>
<kd>1</kd>
</bullet>
</contact>
</surface>
</collision>
</link>
<static>0</static>
<allow_auto_disable>1</allow_auto_disable>
</model>
</sdf>
"""
sdf_sand2 = """<?xml version='1.0'?>
<sdf version='1.6'>
<model name="MODELNAME">
<link name='link'>
<pose frame=''>0 0 0.01 0 0 0 </pose>
<inertial>
<mass>1</mass>
<inertia>
<ixx>0.1</ixx>
<ixy>0</ixy>
<ixz>0</ixz>
<iyy>0.1</iyy>
<iyz>0</iyz>
<izz>0.1</izz>
</inertia>
</inertial>
<visual name='visual'>
<pose frame=''>0 0 0 0 -0 0</pose>
<geometry>
<mesh>
<scale>SIZEXYZ</scale>
<uri>model://sand/sand_particle.stl</uri>
</mesh>
</geometry>
<material>
<lighting>1</lighting>
<script>
<uri>file://media/materials/scripts/gazebo.material</uri>
<name>Gazebo/Yellow</name>
</script>
<ambient>0.3 0.25 0.1 1</ambient>
<diffuse>0.7 0.6 0.4 1</diffuse>
<specular>0.01 0.005 0.001 1</specular>
<emissive>0 0 0 1</emissive>
</material>
<transparency>0</transparency>
<cast_shadows>1</cast_shadows>
</visual>
<collision name='collision'>
<laser_retro>0</laser_retro>
<max_contacts>10</max_contacts>
<pose frame=''>0 0 0 0 -0 0</pose>
<geometry>
<mesh>
<scale>SIZEXYZ</scale>
<uri>model://sand/sand_particle.stl</uri>
</mesh>
</geometry>
<surface>
<friction>
<ode>
<mu>1</mu>
<mu2>1</mu2>
<fdir1>0 0 0</fdir1>
<slip1>0</slip1>
<slip2>0</slip2>
</ode>
<torsional>
<coefficient>1</coefficient>
<patch_radius>0</patch_radius>
<surface_radius>0</surface_radius>
<use_patch_radius>1</use_patch_radius>
<ode>
<slip>0</slip>
</ode>
</torsional>
</friction>
<bounce>
<restitution_coefficient>0</restitution_coefficient>
<threshold>1e+06</threshold>
</bounce>
<contact>
<collide_without_contact>0</collide_without_contact>
<collide_without_contact_bitmask>1</collide_without_contact_bitmask>
<collide_bitmask>1</collide_bitmask>
<ode>
<soft_cfm>0</soft_cfm>
<soft_erp>0.2</soft_erp>
<kp>1e+13</kp>
<kd>1</kd>
<max_vel>0.01</max_vel>
<min_depth>0</min_depth>
</ode>
<bullet>
<split_impulse>1</split_impulse>
<split_impulse_penetration_threshold>-0.01</split_impulse_penetration_threshold>
<soft_cfm>0</soft_cfm>
<soft_erp>0.2</soft_erp>
<kp>1e+13</kp>
<kd>1</kd>
</bullet>
</contact>
</surface>
</collision>
</link>
<static>0</static>
<allow_auto_disable>1</allow_auto_disable>
</model>
</sdf>
"""
class Spawner:
def __init__(self):
self.px = 0
self.py = 0
self.pz = 0
self.rr = 0
self.rp = 0
self.rz = 0
self.sx = 0
self.sy = 0
self.sz = 0
def create_cube_request(self,modelname, px, py, pz, rr, rp, ry, sx, sy, sz):
"""Create a SpawnModelRequest with the parameters of the cube given.
modelname: name of the model for gazebo
px py pz: position of the cube (and it's collision cube)
rr rp ry: rotation (roll, pitch, yaw) of the model
sx sy sz: size of the cube"""
cube = deepcopy(sdf_sand2)
# Replace size of model
size_str = str(round(sx, 3)) + " " + \
str(round(sy, 3)) + " " + str(round(sz, 3))
cube = cube.replace('SIZEXYZ', size_str)
# Replace modelname
cube = cube.replace('MODELNAME', str(modelname))
req = SpawnModelRequest()
req.model_name = modelname
req.model_xml = cube
req.initial_pose.position.x = px
req.initial_pose.position.y = py
req.initial_pose.position.z = pz
q = quaternion_from_euler(rr, rp, ry)
req.initial_pose.orientation.x = q[0]
req.initial_pose.orientation.y = q[1]
req.initial_pose.orientation.z = q[2]
req.initial_pose.orientation.w = q[3]
return req
def create_sphere_request(self,modelname, px, py, pz, rr, rp, ry, r):
"""Create a SpawnModelRequest with the parameters of the cube given.
modelname: name of the model for gazebo
px py pz: position of the cube (and it's collision cube)
rr rp ry: rotation (roll, pitch, yaw) of the model
sx sy sz: size of the cube"""
cube = deepcopy(sdf_unit_sphere)
# Replace size of model
cube = cube.replace('RADIUS', str(r))
# Replace modelname
cube = cube.replace('MODELNAME', str(modelname))
req = SpawnModelRequest()
req.model_name = modelname
req.model_xml = cube
req.initial_pose.position.x = px
req.initial_pose.position.y = py
req.initial_pose.position.z = pz
q = quaternion_from_euler(rr, rp, ry)
req.initial_pose.orientation.x = q[0]
req.initial_pose.orientation.y = q[1]
req.initial_pose.orientation.z = q[2]
req.initial_pose.orientation.w = q[3]
return req
def create_box_request(self,modelname, px, py, pz, rr, rp, ry):
"""Create a SpawnModelRequest with the parameters of the cube given.
modelname: name of the model for gazebo
px py pz: position of the cube (and it's collision cube)
rr rp ry: rotation (roll, pitch, yaw) of the model"""
cube = deepcopy(sdf_sand_box)
req = SpawnModelRequest()
req.model_name = modelname
req.model_xml = cube
req.initial_pose.position.x = px
req.initial_pose.position.y = py
req.initial_pose.position.z = pz
q = quaternion_from_euler(rr, rp, ry)
req.initial_pose.orientation.x = q[0]
req.initial_pose.orientation.y = q[1]
req.initial_pose.orientation.z = q[2]
req.initial_pose.orientation.w = q[3]
return req
| 33.283784
| 108
| 0.463198
| 1,881
| 17,241
| 4.112706
| 0.110579
| 0.023268
| 0.022492
| 0.017063
| 0.837901
| 0.826784
| 0.820967
| 0.809204
| 0.798345
| 0.780765
| 0
| 0.062115
| 0.407053
| 17,241
| 517
| 109
| 33.348162
| 0.69461
| 0.046227
| 0
| 0.844538
| 0
| 0.002101
| 0.839334
| 0.224957
| 0
| 0
| 0
| 0
| 0
| 1
| 0.008403
| false
| 0
| 0.010504
| 0
| 0.027311
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
1cdc98744b311e2367992861b764dff14f24294c
| 201
|
py
|
Python
|
agatecharts/charts/__init__.py
|
onyxfish/fever
|
8aef0cd4adff7fdde1f5950ffb1d01db9137e3b7
|
[
"MIT"
] | 4
|
2015-09-05T04:47:27.000Z
|
2015-09-16T15:14:43.000Z
|
agatecharts/charts/__init__.py
|
onyxfish/fever
|
8aef0cd4adff7fdde1f5950ffb1d01db9137e3b7
|
[
"MIT"
] | 18
|
2015-09-05T01:17:30.000Z
|
2015-09-23T13:08:27.000Z
|
agatecharts/charts/__init__.py
|
onyxfish/way
|
8aef0cd4adff7fdde1f5950ffb1d01db9137e3b7
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
from agatecharts.charts.bars import Bars
from agatecharts.charts.columns import Columns
from agatecharts.charts.lines import Lines
from agatecharts.charts.scatter import Scatter
| 28.714286
| 46
| 0.840796
| 28
| 201
| 6.035714
| 0.428571
| 0.35503
| 0.497041
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.094527
| 201
| 6
| 47
| 33.5
| 0.928571
| 0.099502
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
1ceb3eafc161d9fd9d9f5411f96898dcc0d87036
| 8,111
|
py
|
Python
|
src/compas_rhino/objects/_select.py
|
jf---/compas
|
cd878ece933013b8ac34e9d42cf6d5c62a5396ee
|
[
"MIT"
] | 2
|
2021-03-17T18:14:22.000Z
|
2021-09-19T13:50:02.000Z
|
src/compas_rhino/objects/_select.py
|
jf---/compas
|
cd878ece933013b8ac34e9d42cf6d5c62a5396ee
|
[
"MIT"
] | null | null | null |
src/compas_rhino/objects/_select.py
|
jf---/compas
|
cd878ece933013b8ac34e9d42cf6d5c62a5396ee
|
[
"MIT"
] | null | null | null |
from __future__ import print_function
from __future__ import absolute_import
from __future__ import division
import ast
import rhinoscriptsyntax as rs
__all__ = [
'mesh_select_vertex',
'mesh_select_vertices',
'mesh_select_face',
'mesh_select_faces',
'mesh_select_edge',
'mesh_select_edges',
'network_select_node',
'network_select_nodes',
'network_select_edge',
'network_select_edges',
]
def mesh_select_vertex(mesh, message="Select a vertex."):
"""Select a single vertex of a mesh.
Parameters
----------
mesh: :class:`compas.datastructures.Mesh`
message: str, optional
Returns
-------
int or None
"""
guid = rs.GetObject(message, preselect=True, filter=rs.filter.point | rs.filter.textdot)
if guid:
prefix = mesh.attributes['name']
name = rs.ObjectName(guid).split('.')
if 'vertex' in name:
if not prefix or prefix in name:
key = name[-1]
return ast.literal_eval(key)
return None
def mesh_select_vertices(mesh, message="Select vertices."):
"""Select multiple vertices of a mesh.
Parameters
----------
mesh: :class:`compas.datastructures.Mesh`
message: str, optional
Returns
-------
list of int
"""
keys = []
guids = rs.GetObjects(message, preselect=True, filter=rs.filter.point | rs.filter.textdot)
if guids:
prefix = mesh.attributes['name']
seen = set()
for guid in guids:
name = rs.ObjectName(guid).split('.')
if 'vertex' in name:
if not prefix or prefix in name:
key = name[-1]
if not seen.add(key):
key = ast.literal_eval(key)
keys.append(key)
return keys
def mesh_select_face(mesh, message="Select a face."):
"""Select a single face of a mesh.
Parameters
----------
mesh: :class:`compas.datastructures.Mesh`
message: str, optional
Returns
-------
int or None
"""
guid = rs.GetObject(message, preselect=True, filter=rs.filter.mesh | rs.filter.textdot)
if guid:
prefix = mesh.attributes['name']
name = rs.ObjectName(guid).split('.')
if 'face' in name:
if not prefix or prefix in name:
key = name[-1]
key = ast.literal_eval(key)
return key
return None
def mesh_select_faces(mesh, message="Select faces."):
"""Select multiple faces of a mesh.
Parameters
----------
mesh: :class:`compas.datastructures.Mesh`
message: str, optional
Returns
-------
list of int
"""
keys = []
guids = rs.GetObjects(message, preselect=True, filter=rs.filter.mesh | rs.filter.textdot)
if guids:
prefix = mesh.attributes['name']
seen = set()
for guid in guids:
name = rs.ObjectName(guid).split('.')
if 'face' in name:
if not prefix or prefix in name:
key = name[-1]
if not seen.add(key):
key = ast.literal_eval(key)
keys.append(key)
return keys
def mesh_select_edge(mesh, message="Select an edge."):
"""Select a single edge of a mesh.
Parameters
----------
mesh: :class:`compas.datastructures.Mesh`
message: str, optional
Returns
-------
tuple of int, or None
"""
guid = rs.GetObject(message, preselect=True, filter=rs.filter.curve | rs.filter.textdot)
if guid:
prefix = mesh.attributes['name']
name = rs.ObjectName(guid).split('.')
if 'edge' in name:
if not prefix or prefix in name:
key = name[-1]
u, v = key.split('-')
u = ast.literal_eval(u)
v = ast.literal_eval(v)
return u, v
return None
def mesh_select_edges(mesh, message="Select edges."):
"""Select multiple edges of a mesh.
Parameters
----------
mesh: :class:`compas.datastructures.Mesh`
message: str, optional
Returns
-------
list of tuple of int
"""
keys = []
guids = rs.GetObjects(message, preselect=True, filter=rs.filter.curve | rs.filter.textdot)
if guids:
prefix = mesh.attributes['name']
seen = set()
for guid in guids:
name = rs.ObjectName(guid).split('.')
if 'edge' in name:
if not prefix or prefix in name:
key = name[-1]
if not seen.add(key):
u, v = key.split('-')
u = ast.literal_eval(u)
v = ast.literal_eval(v)
keys.append((u, v))
return keys
def network_select_node(network, message="Select a node."):
"""Select a single node of a network.
Parameters
----------
network: :class:`compas.datastructures.Network`
message: str, optional
Returns
-------
hashable or None
"""
guid = rs.GetObject(message, preselect=True, filter=rs.filter.point | rs.filter.textdot)
if guid:
prefix = network.attributes['name']
name = rs.ObjectName(guid).split('.')
if 'node' in name:
if not prefix or prefix in name:
key = name[-1]
return ast.literal_eval(key)
return None
def network_select_nodes(network, message="Select nodes."):
"""Select multiple nodes of a network.
Parameters
----------
network: :class:`compas.datastructures.Network`
message: str, optional
Returns
-------
list of hashable
"""
keys = []
guids = rs.GetObjects(message, preselect=True, filter=rs.filter.point | rs.filter.textdot)
if guids:
prefix = network.attributes['name']
seen = set()
for guid in guids:
name = rs.ObjectName(guid).split('.')
if 'node' in name:
if not prefix or prefix in name:
key = name[-1]
if not seen.add(key):
key = ast.literal_eval(key)
keys.append(key)
return keys
def network_select_edge(network, message="Select an edge."):
"""Select a single edge of a network.
Parameters
----------
network: :class:`compas.datastructures.Network`
message: str, optional
Returns
-------
tuple of hashable, or None
"""
guid = rs.GetObject(message, preselect=True, filter=rs.filter.curve | rs.filter.textdot)
if guid:
prefix = network.attributes['name']
name = rs.ObjectName(guid).split('.')
if 'edge' in name:
if not prefix or prefix in name:
key = name[-1]
u, v = key.split('-')
u = ast.literal_eval(u)
v = ast.literal_eval(v)
return u, v
return None
def network_select_edges(network, message="Select edges."):
"""Select multiple edges of a network.
Parameters
----------
network: :class:`compas.datastructures.Network`
message: str, optional
Returns
-------
list of tuple of hashable
"""
keys = []
guids = rs.GetObjects(message, preselect=True, filter=rs.filter.curve | rs.filter.textdot)
if guids:
prefix = network.attributes['name']
seen = set()
for guid in guids:
name = rs.ObjectName(guid).split('.')
if 'edge' in name:
if not prefix or prefix in name:
key = name[-1]
if not seen.add(key):
u, v = key.split('-')
u = ast.literal_eval(u)
v = ast.literal_eval(v)
keys.append((u, v))
return keys
# ==============================================================================
# Main
# ==============================================================================
if __name__ == '__main__':
pass
| 27.494915
| 94
| 0.53298
| 915
| 8,111
| 4.637158
| 0.08306
| 0.037709
| 0.046194
| 0.058921
| 0.848456
| 0.827009
| 0.815225
| 0.815225
| 0.795428
| 0.795428
| 0
| 0.001826
| 0.324867
| 8,111
| 294
| 95
| 27.588435
| 0.77301
| 0.216619
| 0
| 0.793548
| 0
| 0
| 0.072015
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.064516
| false
| 0.006452
| 0.032258
| 0
| 0.193548
| 0.006452
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
1c35f69ad59be07090db7f3539f86ff7d6d0b4e8
| 4,203
|
py
|
Python
|
server/forestgame/game/test_world.py
|
Nick-Pearson/forestgame
|
8a37225adbe6da9df7851eba34ad06806da0ce48
|
[
"0BSD"
] | null | null | null |
server/forestgame/game/test_world.py
|
Nick-Pearson/forestgame
|
8a37225adbe6da9df7851eba34ad06806da0ce48
|
[
"0BSD"
] | 5
|
2021-03-10T14:18:45.000Z
|
2022-03-12T00:28:29.000Z
|
server/forestgame/game/test_world.py
|
Nick-Pearson/forestgame
|
8a37225adbe6da9df7851eba34ad06806da0ce48
|
[
"0BSD"
] | null | null | null |
import unittest
from forestgame.game.world import World
class WorldTest(unittest.TestCase):
def test_world_inits_to_empty_data(self):
world = World(None, "1", "0", 0, 0, [], [])
self.assertEqual(0, world.get_size_x())
self.assertEqual(0, world.get_size_y())
self.assertEqual([], world.get_tile_data())
def test_world_with_tiles_inits__with_tiles_to_empty_data(self):
world = World(None, "1", "0", 3, 3, [(1, 1, 0)], [])
expected_tile_data = [
[1, 1, 1],
[1, 0, 1],
[1, 1, 1],
]
self.assertEqual(expected_tile_data, world.get_tile_data())
self.assertEqual(3, world.get_size_x())
self.assertEqual(3, world.get_size_y())
def test_set_size_from_zero_initialsies_from_forest(self):
world = World(None, "1", "0", 0, 0, [], [])
world.set_size(3, 3)
expected_tile_data = [
[1, 1, 1],
[1, 1, 1],
[1, 1, 1],
]
self.assertEqual(expected_tile_data, world.get_tile_data())
self.assertEqual(3, world.get_size_x())
self.assertEqual(3, world.get_size_y())
def test_set_size_with_larger_x_y_pads_with_forest(self):
world = World(None, "1", "0", 0, 0, [], [])
world.set_size(2, 2)
world.set_size(3, 3)
expected_tile_data = [
[1, 1, 1],
[1, 1, 1],
[1, 1, 1],
]
self.assertEqual(expected_tile_data, world.get_tile_data())
self.assertEqual(3, world.get_size_x())
self.assertEqual(3, world.get_size_y())
def test_set_size_with_larger_x_pads_with_forest(self):
world = World(None, "1", "0", 0, 0, [], [])
world.set_size(2, 3)
world.set_size(3, 3)
expected_tile_data = [
[1, 1, 1],
[1, 1, 1],
[1, 1, 1],
]
self.assertEqual(expected_tile_data, world.get_tile_data())
self.assertEqual(3, world.get_size_x())
self.assertEqual(3, world.get_size_y())
def test_set_size_with_larger_y_pads_with_forest(self):
world = World(None, "1", "0", 0, 0, [], [])
world.set_size(3, 2)
world.set_size(3, 3)
expected_tile_data = [
[1, 1, 1],
[1, 1, 1],
[1, 1, 1],
]
self.assertEqual(expected_tile_data, world.get_tile_data())
self.assertEqual(3, world.get_size_x())
self.assertEqual(3, world.get_size_y())
def test_set_size_with_smaller_x_y_removes_data(self):
world = World(None, "1", "0", 0, 0, [], [])
world.set_size(3, 3)
world.set_size(2, 2)
expected_tile_data = [
[1, 1],
[1, 1],
]
self.assertEqual(expected_tile_data, world.get_tile_data())
self.assertEqual(2, world.get_size_x())
self.assertEqual(2, world.get_size_y())
def test_set_size_with_smaller_x_removes_data(self):
world = World(None, "1", "0", 0, 0, [], [])
world.set_size(3, 3)
world.set_size(2, 3)
expected_tile_data = [
[1, 1],
[1, 1],
[1, 1],
]
self.assertEqual(expected_tile_data, world.get_tile_data())
self.assertEqual(2, world.get_size_x())
self.assertEqual(3, world.get_size_y())
def test_set_size_with_smaller_y_removes_data(self):
world = World(None, "1", "0", 0, 0, [], [])
world.set_size(3, 3)
world.set_size(3, 2)
expected_tile_data = [
[1, 1, 1],
[1, 1, 1],
]
self.assertEqual(expected_tile_data, world.get_tile_data())
self.assertEqual(3, world.get_size_x())
self.assertEqual(2, world.get_size_y())
def test_set_size_with_same_x_y_does_nothing(self):
world = World(None, "1", "0", 0, 0, [], [])
world.set_size(3, 3)
world.set_size(3, 3)
expected_tile_data = [
[1, 1, 1],
[1, 1, 1],
[1, 1, 1],
]
self.assertEqual(expected_tile_data, world.get_tile_data())
self.assertEqual(3, world.get_size_x())
self.assertEqual(3, world.get_size_y())
# set tile range checks
def test_set_tile_changes_tile_data(self):
world = World(None, "1", "0", 0, 0, [], [])
world.set_size(5, 5)
world.set_tile_at(2, 3, 0)
self.assertEqual(0, world.get_tile_at(2, 3))
expected_tile_data = [
[1, 1, 1, 1, 1],
[1, 1, 1, 1, 1],
[1, 1, 1, 1, 1],
[1, 1, 0, 1, 1],
[1, 1, 1, 1, 1]
]
self.assertEqual(expected_tile_data, world.get_tile_data())
| 26.601266
| 66
| 0.610278
| 667
| 4,203
| 3.53973
| 0.068966
| 0.069462
| 0.087675
| 0.096569
| 0.883947
| 0.883947
| 0.849216
| 0.84244
| 0.834816
| 0.805167
| 0
| 0.06105
| 0.220557
| 4,203
| 157
| 67
| 26.770701
| 0.659646
| 0.004996
| 0
| 0.725806
| 0
| 0
| 0.005263
| 0
| 0
| 0
| 0
| 0
| 0.258065
| 1
| 0.08871
| false
| 0
| 0.016129
| 0
| 0.112903
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
1c43093fa85de4f6e1de23a0ecc3b43530f42260
| 126
|
py
|
Python
|
sourcecode/GAN/FID/__init__.py
|
toufeeqahamedns/GeneratingHumanFaces
|
93048bf5f6ae99424f918b0d0fea46d21abee0cb
|
[
"MIT"
] | null | null | null |
sourcecode/GAN/FID/__init__.py
|
toufeeqahamedns/GeneratingHumanFaces
|
93048bf5f6ae99424f918b0d0fea46d21abee0cb
|
[
"MIT"
] | null | null | null |
sourcecode/GAN/FID/__init__.py
|
toufeeqahamedns/GeneratingHumanFaces
|
93048bf5f6ae99424f918b0d0fea46d21abee0cb
|
[
"MIT"
] | null | null | null |
""" Package has implementation for the FID score calculation
"""
from GAN.FID import fid_score
from GAN.FID import inception
| 21
| 60
| 0.785714
| 19
| 126
| 5.157895
| 0.631579
| 0.163265
| 0.204082
| 0.326531
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.150794
| 126
| 5
| 61
| 25.2
| 0.915888
| 0.444444
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
1c71ba0a22523d640266f7845ef799a8f73cbe39
| 243
|
py
|
Python
|
pawpyseed/compiler.py
|
akashkumarsingh612/pawpyseed
|
6f5aa0b8ca8c28a0221e5256afeb939c3344560b
|
[
"BSD-3-Clause"
] | null | null | null |
pawpyseed/compiler.py
|
akashkumarsingh612/pawpyseed
|
6f5aa0b8ca8c28a0221e5256afeb939c3344560b
|
[
"BSD-3-Clause"
] | null | null | null |
pawpyseed/compiler.py
|
akashkumarsingh612/pawpyseed
|
6f5aa0b8ca8c28a0221e5256afeb939c3344560b
|
[
"BSD-3-Clause"
] | null | null | null |
import os, subprocess
def compile_core(comp, scilib):
"""
ATTENTION, NOT FINISHED
"""
subprocess.call(("make pawpy_%s"%comp).split())
def compile_core(comp, scilib):
"""
ATTENTION, NOT FINISHED
"""
subprocess.call("make hfc".split())
| 18.692308
| 48
| 0.691358
| 31
| 243
| 5.322581
| 0.548387
| 0.121212
| 0.169697
| 0.218182
| 0.751515
| 0.751515
| 0.751515
| 0.751515
| 0.751515
| 0.751515
| 0
| 0
| 0.131687
| 243
| 13
| 49
| 18.692308
| 0.781991
| 0.193416
| 0
| 0.4
| 0
| 0
| 0.117978
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.4
| false
| 0
| 0.2
| 0
| 0.6
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 8
|
1c7b1135efb3bd7f94a1f1a7d47294ebfd74cbde
| 10,416
|
py
|
Python
|
tests/test_nanoevents_vector.py
|
danbarto/coffea
|
2b28e28f602f8b81a1449ee85578187a7f52b602
|
[
"BSD-3-Clause"
] | null | null | null |
tests/test_nanoevents_vector.py
|
danbarto/coffea
|
2b28e28f602f8b81a1449ee85578187a7f52b602
|
[
"BSD-3-Clause"
] | null | null | null |
tests/test_nanoevents_vector.py
|
danbarto/coffea
|
2b28e28f602f8b81a1449ee85578187a7f52b602
|
[
"BSD-3-Clause"
] | null | null | null |
import awkward as ak
from coffea.nanoevents.methods import vector
import pytest
ATOL = 1e-8
def record_arrays_equal(a, b):
return (ak.fields(a) == ak.fields(b)) and all(ak.all(a[f] == b[f]) for f in ak.fields(a))
def test_two_vector():
a = ak.zip(
{
"x": [[1, 2], [], [3], [4]],
"y": [[5, 6], [], [7], [8]]
},
with_name="TwoVector",
highlevel=False
)
a = ak.Array(a, behavior=vector.behavior)
b = ak.zip(
{
"x": [[11, 12], [], [13], [14]],
"y": [[15, 16], [], [17], [18]]
},
with_name="TwoVector",
highlevel=False
)
b = ak.Array(b, behavior=vector.behavior)
assert record_arrays_equal(- a, ak.zip(
{
"x": [[-1, -2], [], [-3], [-4]],
"y": [[-5, -6], [], [-7], [-8]]
}
))
assert record_arrays_equal(a + b, ak.zip(
{
"x": [[12, 14], [], [16], [18]],
"y": [[20, 22], [], [24], [26]]
}
))
assert record_arrays_equal(a - b, ak.zip(
{
"x": [[-10, -10], [], [-10], [-10]],
"y": [[-10, -10], [], [-10], [-10]]
}
))
assert record_arrays_equal(a * 2, ak.zip(
{
"x": [[2, 4], [], [6], [8]],
"y": [[10, 12], [], [14], [16]]
}
))
assert record_arrays_equal(a / 2, ak.zip(
{
"x": [[0.5, 1], [], [1.5], [2]],
"y": [[2.5, 3], [], [3.5], [4]]
}
))
assert record_arrays_equal(a.dot(b), ak.Array([[86, 120], [], [158], [200]]))
assert record_arrays_equal(b.dot(a), ak.Array([[86, 120], [], [158], [200]]))
assert ak.all(abs(a.unit.r - 1) < ATOL)
assert ak.all(abs(a.unit.phi - a.phi) < ATOL)
def test_polar_two_vector():
a = ak.zip(
{
"r": [[1, 2], [], [3], [4]],
"phi": [[0.3, 0.4], [], [0.5], [0.6]],
},
with_name="PolarTwoVector",
highlevel=False
)
a = ak.Array(a, behavior=vector.behavior)
assert record_arrays_equal(a * 2, ak.zip(
{
"r": [[2, 4], [], [6], [8]],
"phi": [[0.3, 0.4], [], [0.5], [0.6]]
}
))
assert ak.all((a * (-2)).r == [[2, 4], [], [6], [8]])
assert ak.all((a * (-2)).phi - ak.Array([
[-2.8415926535, -2.7415926535],
[],
[-2.6415926535],
[-2.5415926535]
]) < ATOL)
assert record_arrays_equal(a / 2, ak.zip(
{
"r": [[0.5, 1], [], [1.5], [2]],
"phi": [[0.3, 0.4], [], [0.5], [0.6]]
}
))
assert ak.all(abs((-a).x + a.x) < ATOL)
assert ak.all(abs((-a).y + a.y) < ATOL)
assert record_arrays_equal(a * (-1), -a)
assert ak.all(a.unit.phi == a.phi)
def test_three_vector():
a = ak.zip(
{
"x": [[1, 2], [], [3], [4]],
"y": [[5, 6], [], [7], [8]],
"z": [[9, 10], [], [11], [12]]
},
with_name="ThreeVector",
highlevel=False
)
a = ak.Array(a, behavior=vector.behavior)
b = ak.zip(
{
"x": [[4, 1], [], [10], [11]],
"y": [[17, 7], [], [11], [6]],
"z": [[9, 11], [], [5], [16]]
},
with_name="ThreeVector",
highlevel=False
)
b = ak.Array(b, behavior=vector.behavior)
assert record_arrays_equal(- a, ak.zip(
{
"x": [[-1, -2], [], [-3], [-4]],
"y": [[-5, -6], [], [-7], [-8]],
"z": [[-9, -10], [], [-11], [-12]]
}
))
assert record_arrays_equal(a + b, ak.zip(
{
"x": [[5, 3], [], [13], [15]],
"y": [[22, 13], [], [18], [14]],
"z": [[18, 21], [], [16], [28]]
}
))
assert record_arrays_equal(a - b, ak.zip(
{
"x": [[-3, 1], [], [-7], [-7]],
"y": [[-12, -1], [], [-4], [2]],
"z": [[0, -1], [], [6], [-4]]
}
))
assert record_arrays_equal(a * 2, ak.zip(
{
"x": [[2, 4], [], [6], [8]],
"y": [[10, 12], [], [14], [16]],
"z": [[18, 20], [], [22], [24]]
}
))
assert record_arrays_equal(a / 2, ak.zip(
{
"x": [[0.5, 1], [], [1.5], [2]],
"y": [[2.5, 3], [], [3.5], [4]],
"z": [[4.5, 5], [], [5.5], [6]]
}
))
assert ak.all(a.dot(b) == ak.Array([[170, 154], [], [162], [284]]))
assert ak.all(b.dot(a) == ak.Array([[170, 154], [], [162], [284]]))
assert record_arrays_equal(a.cross(b), ak.zip(
{
"x": [[-108, -4], [], [-86], [56]],
"y": [[27, -12], [], [95], [68]],
"z": [[-3, 8], [], [-37], [-64]]
}
))
assert record_arrays_equal(b.cross(a), ak.zip(
{
"x": [[108, 4], [], [86], [-56]],
"y": [[-27, 12], [], [-95], [-68]],
"z": [[3, -8], [], [37], [64]]
}
))
assert ak.all(abs(a.unit.rho - 1) < ATOL)
assert ak.all(abs(a.unit.phi - a.phi) < ATOL)
def test_spherical_three_vector():
a = ak.zip(
{
"rho": [[1.0, 2.0], [], [3.0], [4.0]],
"theta": [[1.2, 0.7], [], [1.8], [1.9]],
"phi": [[0.3, 0.4], [], [0.5], [0.6]],
},
with_name="SphericalThreeVector",
highlevel=False
)
a = ak.Array(a, behavior=vector.behavior)
assert ak.all(abs((-a).x + a.x) < ATOL)
assert ak.all(abs((-a).y + a.y) < ATOL)
assert ak.all(abs((-a).z + a.z) < ATOL)
assert record_arrays_equal(a * (-1), -a)
def test_lorentz_vector():
a = ak.zip(
{
"x": [[1, 2], [], [3], [4]],
"y": [[5, 6], [], [7], [8]],
"z": [[9, 10], [], [11], [12]],
"t": [[50, 51], [], [52], [53]]
},
with_name="LorentzVector",
highlevel=False
)
a = ak.Array(a, behavior=vector.behavior)
b = ak.zip(
{
"x": [[4, 1], [], [10], [11]],
"y": [[17, 7], [], [11], [6]],
"z": [[9, 11], [], [5], [16]],
"t": [[60, 61], [], [62], [63]]
},
with_name="LorentzVector",
highlevel=False
)
b = ak.Array(b, behavior=vector.behavior)
assert record_arrays_equal(- a, ak.zip(
{
"x": [[-1, -2], [], [-3], [-4]],
"y": [[-5, -6], [], [-7], [-8]],
"z": [[-9, -10], [], [-11], [-12]],
"t": [[-50, -51], [], [-52], [-53]]
}
))
assert record_arrays_equal(a + b, ak.zip(
{
"x": [[5, 3], [], [13], [15]],
"y": [[22, 13], [], [18], [14]],
"z": [[18, 21], [], [16], [28]],
"t": [[110, 112], [], [114], [116]]
}
))
assert record_arrays_equal(a - b, ak.zip(
{
"x": [[-3, 1], [], [-7], [-7]],
"y": [[-12, -1], [], [-4], [2]],
"z": [[0, -1], [], [6], [-4]],
"t": [[-10, -10], [], [-10], [-10]]
}
))
assert record_arrays_equal(a * 2, ak.zip(
{
"x": [[2, 4], [], [6], [8]],
"y": [[10, 12], [], [14], [16]],
"z": [[18, 20], [], [22], [24]],
"t": [[100, 102], [], [104], [106]]
}
))
assert record_arrays_equal(a / 2, ak.zip(
{
"x": [[0.5, 1], [], [1.5], [2]],
"y": [[2.5, 3], [], [3.5], [4]],
"z": [[4.5, 5], [], [5.5], [6]],
"t": [[25, 25.5], [], [26], [26.5]]
}
))
assert record_arrays_equal(a.pvec, ak.zip(
{
"x": [[1, 2], [], [3], [4]],
"y": [[5, 6], [], [7], [8]],
"z": [[9, 10], [], [11], [12]],
}
))
boosted = a.boost(-a.boostvec)
assert ak.all(abs(boosted.x) < ATOL)
assert ak.all(abs(boosted.y) < ATOL)
assert ak.all(abs(boosted.z) < ATOL)
def test_pt_eta_phi_m_lorentz_vector():
a = ak.zip(
{
"pt": [[1, 2], [], [3], [4]],
"eta": [[1.2, 1.4], [], [1.6], [3.4]],
"phi": [[0.3, 0.4], [], [0.5], [0.6]],
"mass": [[0.5, 0.9], [], [1.3], [4.5]]
},
with_name="PtEtaPhiMLorentzVector",
highlevel=False
)
a = ak.Array(a, behavior=vector.behavior)
assert ak.all((a * (-2)).pt == ak.Array([[2, 4], [], [6], [8]]))
assert ak.all((a * (-2)).theta - ak.Array([
[2.556488570968, 2.65804615357],
[],
[2.74315571762],
[3.07487087733]
]) < ATOL)
assert ak.all((a * (-2)).phi - ak.Array([
[-2.8415926535, -2.7415926535],
[],
[-2.6415926535],
[-2.5415926535]
]) < ATOL)
assert record_arrays_equal(a / 2, ak.zip(
{
"pt": [[0.5, 1], [], [1.5], [2]],
"eta": [[1.2, 1.4], [], [1.6], [3.4]],
"phi": [[0.3, 0.4], [], [0.5], [0.6]],
"mass": [[0.25, 0.45], [], [0.65], [2.25]]
}
))
assert record_arrays_equal(a * (-1), -a)
boosted = a.boost(-a.boostvec)
assert ak.all(abs(boosted.x) < ATOL)
assert ak.all(abs(boosted.y) < ATOL)
assert ak.all(abs(boosted.z) < ATOL)
def test_pt_eta_phi_e_lorentz_vector():
a = ak.zip(
{
"pt": [[1, 2], [], [3], [4]],
"eta": [[1.2, 1.4], [], [1.6], [3.4]],
"phi": [[0.3, 0.4], [], [0.5], [0.6]],
"energy": [[50, 51], [], [52], [60]]
},
with_name="PtEtaPhiELorentzVector",
highlevel=False
)
a = ak.Array(a, behavior=vector.behavior)
assert ak.all((a * (-2)).pt == ak.Array([[2, 4], [], [6], [8]]))
assert ak.all((a * (-2)).theta - ak.Array([
[2.556488570968, 2.65804615357],
[],
[2.74315571762],
[3.07487087733]
]) < ATOL)
assert ak.all((a * (-2)).phi - ak.Array([
[-2.8415926535, -2.7415926535],
[],
[-2.6415926535],
[-2.5415926535]
]) < ATOL)
assert record_arrays_equal(a / 2, ak.zip(
{
"pt": [[0.5, 1], [], [1.5], [2]],
"eta": [[1.2, 1.4], [], [1.6], [3.4]],
"phi": [[0.3, 0.4], [], [0.5], [0.6]],
"energy": [[25, 25.5], [], [26], [30]]
}
))
assert record_arrays_equal(a * (-1), -a)
boosted = a.boost(-a.boostvec)
assert ak.all(abs(boosted.x) < ATOL)
assert ak.all(abs(boosted.y) < ATOL)
assert ak.all(abs(boosted.z) < ATOL)
| 28.075472
| 93
| 0.368856
| 1,345
| 10,416
| 2.788104
| 0.101115
| 0.042667
| 0.131467
| 0.171733
| 0.880267
| 0.797333
| 0.78
| 0.753867
| 0.740533
| 0.716
| 0
| 0.141142
| 0.352439
| 10,416
| 370
| 94
| 28.151351
| 0.414826
| 0
| 0
| 0.55287
| 0
| 0
| 0.027938
| 0.004224
| 0
| 0
| 0
| 0
| 0.172205
| 1
| 0.024169
| false
| 0
| 0.009063
| 0.003021
| 0.036254
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
c70bc413822aaad70486fa31ce67b5a7d9e44d76
| 49,568
|
py
|
Python
|
cave/com.raytheon.viz.gfe/python/autotest/VTEC_GHG_FFA_TestScript.py
|
srcarter3/awips2
|
37f31f5e88516b9fd576eaa49d43bfb762e1d174
|
[
"Apache-2.0"
] | null | null | null |
cave/com.raytheon.viz.gfe/python/autotest/VTEC_GHG_FFA_TestScript.py
|
srcarter3/awips2
|
37f31f5e88516b9fd576eaa49d43bfb762e1d174
|
[
"Apache-2.0"
] | null | null | null |
cave/com.raytheon.viz.gfe/python/autotest/VTEC_GHG_FFA_TestScript.py
|
srcarter3/awips2
|
37f31f5e88516b9fd576eaa49d43bfb762e1d174
|
[
"Apache-2.0"
] | 1
|
2021-10-30T00:03:05.000Z
|
2021-10-30T00:03:05.000Z
|
##
# This software was developed and / or modified by Raytheon Company,
# pursuant to Contract DG133W-05-CQ-1067 with the US Government.
#
# U.S. EXPORT CONTROLLED TECHNICAL DATA
# This software product contains export-restricted data whose
# export/transfer/disclosure is restricted by U.S. law. Dissemination
# to non-U.S. persons whether in the United States or abroad requires
# an export license or other authorization.
#
# Contractor Name: Raytheon Company
# Contractor Address: 6825 Pine Street, Suite 340
# Mail Stop B8
# Omaha, NE 68106
# 402.291.0100
#
# See the AWIPS II Master Rights File ("Master Rights File.pdf") for
# further licensing information.
##
# ----------------------------------------------------------------------------
# This software is in the public domain, furnished "as is", without technical
# support, and with no warranty, express or implied, as to its usefulness for
# any purpose.
#
# Headlines Timing
#
# Author:
# ----------------------------------------------------------------------------
#set up to test area names and part of states
# without locationName defined
areaT1 = """
AreaDictionary['FLZ050']['fullStateName'] = 'Florida'
AreaDictionary['FLZ050']['partOfState'] = 'western'
AreaDictionary['FLZ057']['fullStateName'] = 'Florida'
AreaDictionary['FLZ057']['partOfState'] = 'western'
AreaDictionary['FLZ160']['fullStateName'] = 'Florida'
AreaDictionary['FLZ160']['partOfState'] = 'central'
AreaDictionary['FLZ151']['fullStateName'] = 'Florida'
AreaDictionary['FLZ151']['partOfState'] = 'central'
AreaDictionary['FLZ043']['fullStateName'] = 'Florida'
AreaDictionary['FLZ043']['partOfState'] = 'central'
AreaDictionary['FLZ162']['fullStateName'] = 'Florida'
AreaDictionary['FLZ162']['partOfState'] = 'central'
AreaDictionary['FLZ165']['fullStateName'] = 'Florida'
AreaDictionary['FLZ165']['partOfState'] = 'central'
AreaDictionary['FLZ056']['fullStateName'] = 'Florida'
AreaDictionary['FLZ056']['partOfState'] = 'southern'
AreaDictionary['FLZ052']['fullStateName'] = 'Georgia'
AreaDictionary['FLZ052']['partOfState'] = 'western'
AreaDictionary['FLZ155']['fullStateName'] = 'Georgia'
AreaDictionary['FLZ155']['partOfState'] = 'western'
AreaDictionary['FLZ061']['fullStateName'] = 'Georgia'
AreaDictionary['FLZ061']['partOfState'] = 'southern'
AreaDictionary['FLZ148']['fullStateName'] = 'Georgia'
AreaDictionary['FLZ148']['partOfState'] = 'southern'
AreaDictionary['FLZ142']['fullStateName'] = 'South Carolina'
AreaDictionary['FLZ142']['partOfState'] = 'western'
AreaDictionary['FLZ043']['fullStateName'] = 'South Carolina'
AreaDictionary['FLZ043']['partOfState'] = 'western'
"""
#with location name defined
areaT2= """
AreaDictionary['FLZ050']['fullStateName'] = 'Florida'
AreaDictionary['FLZ050']['partOfState'] = 'western'
AreaDictionary['FLZ050']['locationName'] = 'Clearfield'
AreaDictionary['FLZ057']['fullStateName'] = 'Florida'
AreaDictionary['FLZ057']['partOfState'] = 'western'
AreaDictionary['FLZ057']['locationName'] = 'Clearfield'
AreaDictionary['FLZ160']['fullStateName'] = 'Florida'
AreaDictionary['FLZ160']['partOfState'] = 'central'
AreaDictionary['FLZ160']['locationName'] = 'Aunt Ruby'
AreaDictionary['FLZ151']['fullStateName'] = 'Florida'
AreaDictionary['FLZ151']['partOfState'] = 'central'
AreaDictionary['FLZ151']['locationName'] = 'Aunt Ruby'
AreaDictionary['FLZ043']['fullStateName'] = 'Florida'
AreaDictionary['FLZ043']['partOfState'] = 'central'
AreaDictionary['FLZ043']['locationName'] = 'Adams'
AreaDictionary['FLZ162']['fullStateName'] = 'Florida'
AreaDictionary['FLZ162']['partOfState'] = 'central'
AreaDictionary['FLZ162']['locationName'] = 'Adams'
AreaDictionary['FLZ165']['fullStateName'] = 'Florida'
AreaDictionary['FLZ165']['partOfState'] = 'central'
#AreaDictionary['FLZ165']['locationName'] = 'western'
AreaDictionary['FLZ056']['fullStateName'] = 'Florida'
AreaDictionary['FLZ056']['partOfState'] = 'southern'
AreaDictionary['FLZ056']['locationName'] = 'Tampa'
AreaDictionary['FLZ052']['fullStateName'] = 'Georgia'
AreaDictionary['FLZ052']['partOfState'] = 'western'
AreaDictionary['FLZ052']['locationName'] = 'Tampa'
AreaDictionary['FLZ155']['fullStateName'] = 'Georgia'
AreaDictionary['FLZ155']['partOfState'] = 'western'
AreaDictionary['FLZ155']['locationName'] = 'Atlanta'
AreaDictionary['FLZ061']['fullStateName'] = 'Georgia'
AreaDictionary['FLZ061']['partOfState'] = 'southern'
AreaDictionary['FLZ061']['locationName'] = 'Beach'
AreaDictionary['FLZ148']['fullStateName'] = 'Georgia'
AreaDictionary['FLZ148']['partOfState'] = 'southern'
AreaDictionary['FLZ148']['locationName'] = 'Beach'
AreaDictionary['FLZ142']['fullStateName'] = 'South Carolina'
AreaDictionary['FLZ142']['partOfState'] = 'western'
AreaDictionary['FLZ142']['locationName'] = 'South Park'
AreaDictionary['FLZ043']['fullStateName'] = 'South Carolina'
AreaDictionary['FLZ043']['partOfState'] = 'western'
AreaDictionary['FLZ043']['locationName'] = 'South Park'
"""
#for testing of parishes, counties, and areas
areaT3 = """
AreaDictionary['FLC017']['fullStateName'] = 'Louisiana'
AreaDictionary['FLC017']['partOfState'] = 'western'
AreaDictionary['FLC017']['independentCity'] = 1
AreaDictionary['FLC105']['fullStateName'] = 'Louisiana'
AreaDictionary['FLC105']['partOfState'] = 'western'
AreaDictionary['FLC027']['fullStateName'] = 'Louisiana'
AreaDictionary['FLC027']['partOfState'] = 'western'
AreaDictionary['FLC053']['fullStateName'] = 'Florida'
AreaDictionary['FLC053']['partOfState'] = 'western'
"""
areaT3FIPS0= '#Definition["areaType"] = "FIPS"'
areaT3FIPS1= 'Definition["areaType"] = "FIPS"'
scripts = [
{
"commentary": "Clear out all Hazards Table and Grids.",
"name": "Hazard_FFA_0",
"productType": None,
"clearHazardsTable": 1,
"checkStrings": [],
},
{
"commentary": "NEW FFA",
"name": "Hazard_FFA_1",
"drtTime": "20100101_0510",
"productType": "Hazard_FFA_Local",
"cmdLineVars": "{('Flood Reason', 'floodReason'): 'ER '}",
"createGrids": [
("Fcst", "Hazards", "DISCRETE", -100, 100, "<None>", "all"),
("Fcst", "Hazards", "DISCRETE", 0, 3, "FA.A", ["FLZ149"]),
],
"checkStrings": ["URGENT - IMMEDIATE BROADCAST REQUESTED",
"Flood Watch",
"National Weather Service Tampa Bay Ruskin FL",
"FLZ149-",
"/X.NEW.KTBW.FA.A.0001.100101T0510Z-100101T0800Z/",
"/00000.0.ER.000000T0000Z.000000T0000Z.000000T0000Z.OO/",
"Coastal Pasco-",
"1210 AM EST Fri Jan 1 2010",
"...FLOOD WATCH IN EFFECT UNTIL 3 AM EST EARLY THIS MORNING...",
"The National Weather Service in Tampa Bay Ruskin has issued a",
"* Flood Watch for a portion of west central Florida, including the following area, Coastal Pasco.",
"* Until 3 AM EST early this morning",
],
},
{
"commentary": "CON FFA",
"name": "Hazard_FFA_2",
"drtTime": "20100101_0530",
"productType": "Hazard_FFA_Local",
"cmdLineVars": "{('Flood Reason', 'floodReason'): 'SM '}",
"createGrids": [
("Fcst", "Hazards", "DISCRETE", -100, 100, "<None>", "all"),
("Fcst", "Hazards", "DISCRETE", 0, 3, "FA.A", ["FLZ149"]),
],
"checkStrings": ["Flood Watch",
"National Weather Service Tampa Bay Ruskin FL",
"FLZ149-",
"/X.CON.KTBW.FA.A.0001.000000T0000Z-100101T0800Z/",
"/00000.0.SM.000000T0000Z.000000T0000Z.000000T0000Z.OO/",
"...FLOOD WATCH REMAINS IN EFFECT UNTIL 3 AM EST EARLY THIS MORNING...",
"The Flood Watch continues for",
"* A portion of west central Florida, including the following area, Coastal Pasco.",
"* Until 3 AM EST early this morning",
],
},
{
"commentary": "EXA FFA",
"name": "Hazard_FFA_3",
"drtTime": "20100101_0700",
"productType": "Hazard_FFA_Local",
"cmdLineVars": "{('Flood Reason', 'floodReason'): 'DM '}",
"createGrids": [
("Fcst", "Hazards", "DISCRETE", -100, 100, "<None>", "all"),
("Fcst", "Hazards", "DISCRETE", 0, 3, "FA.A", ["FLZ149","FLZ057"]),
],
"checkStrings": ["URGENT - IMMEDIATE BROADCAST REQUESTED",
"Flood Watch",
"National Weather Service Tampa Bay Ruskin FL",
"FLZ057-",
"/X.EXA.KTBW.FA.A.0001.000000T0000Z-100101T0800Z/",
"/00000.0.DM.000000T0000Z.000000T0000Z.000000T0000Z.OO/",
"...FLOOD WATCH IN EFFECT UNTIL 3 AM EST EARLY THIS MORNING...",
"The National Weather Service in Tampa Bay Ruskin has expanded the",
"* Flood Watch to include a portion of south central Florida, including the following area, Highlands.",
"* Until 3 AM EST early this morning",
"FLZ149-",
"/X.CON.KTBW.FA.A.0001.000000T0000Z-100101T0800Z/",
"/00000.0.DM.000000T0000Z.000000T0000Z.000000T0000Z.OO/",
"...FLOOD WATCH REMAINS IN EFFECT UNTIL 3 AM EST EARLY THIS MORNING...",
"The Flood Watch continues for",
"* A portion of west central Florida, including the following area, Coastal Pasco.",
"* Until 3 AM EST early this morning",
],
},
{
"commentary": "CAN FFA, NEW FFA",
"name": "Hazard_FFA_4",
"drtTime": "20100101_0720",
"productType": "Hazard_FFA_Local",
"cmdLineVars": "{('Flood Reason', 'floodReason'): 'IJ '}",
"createGrids": [
("Fcst", "Hazards", "DISCRETE", -100, 100, "<None>", "all"),
("Fcst", "Hazards", "DISCRETE", 0, 8, "FF.A", ["FLZ057"]),
("Fcst", "Hazards", "DISCRETE", 24, 32, "FF.A", ["FLZ057"]),
],
"checkStrings": ["URGENT - IMMEDIATE BROADCAST REQUESTED",
"Flood Watch",
"National Weather Service Tampa Bay Ruskin FL",
"FLZ057-",
"/X.CAN.KTBW.FA.A.0001.000000T0000Z-100101T0800Z/",
"/X.NEW.KTBW.FF.A.0001.100101T0720Z-100101T1300Z/",
"/X.NEW.KTBW.FF.A.0002.100102T0500Z-100102T1300Z/",
"/00000.0.IJ.000000T0000Z.000000T0000Z.000000T0000Z.OO/",
"...FLASH FLOOD WATCH IN EFFECT UNTIL 8 AM EST THIS MORNING...",
"...FLASH FLOOD WATCH IN EFFECT FROM LATE TONIGHT THROUGH SATURDAY MORNING...",
"...FLOOD WATCH IS CANCELLED...",
"The National Weather Service in Tampa Bay Ruskin has issued a",
"* Flash Flood Watch for a portion of south central Florida, including the following area, Highlands.",
"* Until 8 AM EST this morning",
"The National Weather Service in Tampa Bay Ruskin has issued a",
"* Flash Flood Watch for a portion of south central Florida, including the following area, Highlands.",
"* From late tonight through Saturday morning",
"The Flood Watch for a portion of south central Florida has been cancelled.",
"FLZ149-",
"/X.CAN.KTBW.FA.A.0001.000000T0000Z-100101T0800Z/",
"/00000.0.IJ.000000T0000Z.000000T0000Z.000000T0000Z.OO/",
"...FLOOD WATCH IS CANCELLED...",
"The Flood Watch for a portion of west central Florida has been cancelled."
],
},
{
"commentary": "EXP FFA, 2 NEW FFA",
"name": "Hazard_FFA_5",
"drtTime": "20100101_1300",
"productType": "Hazard_FFA_Local",
"cmdLineVars": "{('Flood Reason', 'floodReason'): 'FS '}",
"createGrids": [
("Fcst", "Hazards", "DISCRETE", -100, 100, "<None>", "all"),
("Fcst", "Hazards", "DISCRETE", 24, 32, "FF.A", ["FLZ057"]),
("Fcst", "Hazards", "DISCRETE", 46, 62, "FF.A", ["FLZ057"]),
("Fcst", "Hazards", "DISCRETE", 45, 46, "FA.A", ["FLZ149"]),
("Fcst", "Hazards", "DISCRETE", 46, 62, "FA.A", ["FLZ149"]),
("Fcst", "Hazards", "DISCRETE", 62, 68, "FA.A", ["FLZ149"]),
],
"checkStrings": ["URGENT - IMMEDIATE BROADCAST REQUESTED",
"Flood Watch",
"National Weather Service Tampa Bay Ruskin FL",
"FLZ057-",
"/X.EXP.KTBW.FF.A.0001.000000T0000Z-100101T1300Z/",
"/X.NEW.KTBW.FF.A.0003.100103T0300Z-100103T1900Z/",
"/X.CON.KTBW.FF.A.0002.100102T0500Z-100102T1300Z/",
"/00000.0.FS.000000T0000Z.000000T0000Z.000000T0000Z.OO/",
"...FLASH FLOOD WATCH REMAINS IN EFFECT FROM LATE TONIGHT THROUGH SATURDAY MORNING...",
"...FLASH FLOOD WATCH IN EFFECT FROM SATURDAY EVENING THROUGH SUNDAY AFTERNOON...",
"...FLASH FLOOD WATCH HAS EXPIRED...",
"The Flash Flood Watch continues for",
"* A portion of south central Florida, including the following area, Highlands.",
"* From late tonight through Saturday morning",
"The National Weather Service in Tampa Bay Ruskin has issued a",
"* Flash Flood Watch for a portion of south central Florida, including the following area, Highlands.",
"* From Saturday evening through Sunday afternoon",
"The Flash Flood Watch for a portion of south central Florida has expired.",
"FLZ149-",
"/X.NEW.KTBW.FA.A.0002.100103T0200Z-100104T0100Z/",
"/00000.0.FS.000000T0000Z.000000T0000Z.000000T0000Z.OO/",
"...FLOOD WATCH IN EFFECT FROM SATURDAY EVENING THROUGH SUNDAY EVENING...",
"The National Weather Service in Tampa Bay Ruskin has issued a",
"* Flood Watch for a portion of west central Florida, including the following area, Coastal Pasco.",
"* From Saturday evening through Sunday evening",
],
},
{
"commentary": "CON test of multiple events",
"name": "Hazard_FFA_6",
"drtTime": "20100102_0300",
"productType": "Hazard_FFA_Local",
"cmdLineVars": "{('Flood Reason', 'floodReason'): 'RS '}",
"createGrids": [
("Fcst", "Hazards", "DISCRETE", -100, 100, "<None>", "all"),
("Fcst", "Hazards", "DISCRETE", 24, 32, "FF.A", ["FLZ057"]),
("Fcst", "Hazards", "DISCRETE", 46, 62, "FF.A", ["FLZ057"]),
("Fcst", "Hazards", "DISCRETE", 45, 46, "FA.A", ["FLZ149"]),
("Fcst", "Hazards", "DISCRETE", 46, 62, "FA.A", ["FLZ149"]),
("Fcst", "Hazards", "DISCRETE", 62, 68, "FA.A", ["FLZ149"]),
],
"checkStrings": ["Flood Watch",
"National Weather Service Tampa Bay Ruskin FL",
"FLZ057-",
"/X.CON.KTBW.FF.A.0002.100102T0500Z-100102T1300Z/",
"/X.CON.KTBW.FF.A.0003.100103T0300Z-100103T1900Z/",
"/00000.0.RS.000000T0000Z.000000T0000Z.000000T0000Z.OO/",
"...FLASH FLOOD WATCH REMAINS IN EFFECT UNTIL 8 AM EST SATURDAY...",
"...FLASH FLOOD WATCH REMAINS IN EFFECT FROM SATURDAY EVENING THROUGH SUNDAY AFTERNOON...",
"The Flash Flood Watch continues for",
"* A portion of south central Florida, including the following area, Highlands.",
"* Until 8 AM EST Saturday",
"The Flash Flood Watch continues for",
"* A portion of south central Florida, including the following area, Highlands.",
"* From Saturday evening through Sunday afternoon",
"FLZ149-",
"/X.CON.KTBW.FA.A.0002.100103T0200Z-100104T0100Z/",
"/00000.0.RS.000000T0000Z.000000T0000Z.000000T0000Z.OO/",
"...FLOOD WATCH REMAINS IN EFFECT FROM SATURDAY EVENING THROUGH SUNDAY EVENING...",
"The Flood Watch continues for",
"* A portion of west central Florida, including the following area, Coastal Pasco.",
"* From Saturday evening through Sunday evening",
],
},
{
"commentary": "middle of 1st event",
"name": "Hazard_FFA_7",
"drtTime": "20100102_0700",
"productType": "Hazard_FFA_Local",
"cmdLineVars": "{('Flood Reason', 'floodReason'): 'ER '}",
"createGrids": [
("Fcst", "Hazards", "DISCRETE", -100, 100, "<None>", "all"),
("Fcst", "Hazards", "DISCRETE", 24, 32, "FF.A", ["FLZ057"]),
("Fcst", "Hazards", "DISCRETE", 46, 62, "FF.A", ["FLZ057"]),
("Fcst", "Hazards", "DISCRETE", 45, 46, "FA.A", ["FLZ149"]),
("Fcst", "Hazards", "DISCRETE", 46, 62, "FA.A", ["FLZ149"]),
("Fcst", "Hazards", "DISCRETE", 62, 68, "FA.A", ["FLZ149"]),
],
"checkStrings": ["Flood Watch",
"National Weather Service Tampa Bay Ruskin FL",
"FLZ057-",
"/X.CON.KTBW.FF.A.0002.000000T0000Z-100102T1300Z/",
"/X.CON.KTBW.FF.A.0003.100103T0300Z-100103T1900Z/",
"/00000.0.ER.000000T0000Z.000000T0000Z.000000T0000Z.OO/",
"...FLASH FLOOD WATCH REMAINS IN EFFECT UNTIL 8 AM EST THIS MORNING...",
"...FLASH FLOOD WATCH REMAINS IN EFFECT FROM THIS EVENING THROUGH SUNDAY AFTERNOON...",
"The Flash Flood Watch continues for",
"* A portion of south central Florida, including the following area, Highlands.",
"* Until 8 AM EST this morning",
"The Flash Flood Watch continues for",
"* A portion of south central Florida, including the following area, Highlands.",
"* From this evening through Sunday afternoon",
"FLZ149-",
"/X.CON.KTBW.FA.A.0002.100103T0200Z-100104T0100Z/",
"/00000.0.ER.000000T0000Z.000000T0000Z.000000T0000Z.OO/",
"...FLOOD WATCH REMAINS IN EFFECT FROM THIS EVENING THROUGH SUNDAY EVENING...",
"The Flood Watch continues for",
"* A portion of west central Florida, including the following area, Coastal Pasco.",
"* From this evening through Sunday evening",
],
},
{
"commentary": "joining two events",
"name": "Hazard_FFA_8",
"drtTime": "20100102_1200",
"productType": "Hazard_FFA_Local",
"cmdLineVars": "{('Flood Reason', 'floodReason'): 'IC '}",
"createGrids": [
("Fcst", "Hazards", "DISCRETE", -100, 100, "<None>", "all"),
("Fcst", "Hazards", "DISCRETE", 24, 45, "FF.A", ["FLZ057"]),
("Fcst", "Hazards", "DISCRETE", 45, 62, "FF.A", ["FLZ057"]),
("Fcst", "Hazards", "DISCRETE", 45, 62, "FA.A", ["FLZ149"]),
("Fcst", "Hazards", "DISCRETE", 62, 68, "FA.A", ["FLZ149"]),
],
"checkStrings": ["URGENT - IMMEDIATE BROADCAST REQUESTED",
"Flood Watch",
"National Weather Service Tampa Bay Ruskin FL",
"FLZ057-",
"/X.CAN.KTBW.FF.A.0002.000000T0000Z-100102T1300Z/",
"/X.EXT.KTBW.FF.A.0003.100102T1200Z-100103T1900Z/",
"/00000.0.IC.000000T0000Z.000000T0000Z.000000T0000Z.OO/",
"...FLASH FLOOD WATCH NOW IN EFFECT THROUGH SUNDAY AFTERNOON...",
"The Flash Flood Watch is now in effect for",
"* A portion of south central Florida, including the following area, Highlands.",
"* Through Sunday afternoon",
"FLZ149-",
"/X.CON.KTBW.FA.A.0002.100103T0200Z-100104T0100Z/",
"/00000.0.IC.000000T0000Z.000000T0000Z.000000T0000Z.OO/",
"...FLOOD WATCH REMAINS IN EFFECT FROM THIS EVENING THROUGH SUNDAY EVENING...",
"The Flood Watch continues for",
"* A portion of west central Florida, including the following area, Coastal Pasco.",
"* From this evening through Sunday evening",
],
},
{
"commentary": "into the tail end of the events",
"name": "Hazard_FFA_9",
"drtTime": "20100103_1100",
"productType": "Hazard_FFA_Local",
"cmdLineVars": "{('Flood Reason', 'floodReason'): 'SM '}",
"createGrids": [
("Fcst", "Hazards", "DISCRETE", -100, 100, "<None>", "all"),
("Fcst", "Hazards", "DISCRETE", 24, 45, "FF.A", ["FLZ057"]),
("Fcst", "Hazards", "DISCRETE", 45, 62, "FF.A", ["FLZ057"]),
("Fcst", "Hazards", "DISCRETE", 45, 62, "FA.A", ["FLZ149"]),
("Fcst", "Hazards", "DISCRETE", 62, 68, "FA.A", ["FLZ149"]),
],
"checkStrings": ["Flood Watch",
"National Weather Service Tampa Bay Ruskin FL",
"FLZ057-",
"/X.CON.KTBW.FF.A.0003.000000T0000Z-100103T1900Z/",
"/00000.0.SM.000000T0000Z.000000T0000Z.000000T0000Z.OO/",
"...FLASH FLOOD WATCH REMAINS IN EFFECT UNTIL 2 PM EST THIS AFTERNOON...",
"The Flash Flood Watch continues for",
"* A portion of south central Florida, including the following area, Highlands.",
"* Until 2 PM EST this afternoon",
"FLZ149-",
"/X.CON.KTBW.FA.A.0002.000000T0000Z-100104T0100Z/",
"/00000.0.SM.000000T0000Z.000000T0000Z.000000T0000Z.OO/",
"...FLOOD WATCH REMAINS IN EFFECT THROUGH THIS EVENING...",
"The Flood Watch continues for",
"* A portion of west central Florida, including the following area, Coastal Pasco.",
"* Through this evening",
],
},
{
"commentary": "exp 1st event, continue 2nd event",
"name": "Hazard_FFA_10",
"drtTime": "20100103_1855",
"productType": "Hazard_FFA_Local",
"cmdLineVars": "{('Flood Reason', 'floodReason'): 'DR '}",
"createGrids": [
("Fcst", "Hazards", "DISCRETE", -100, 100, "<None>", "all"),
("Fcst", "Hazards", "DISCRETE", 24, 45, "FF.A", ["FLZ057"]),
("Fcst", "Hazards", "DISCRETE", 45, 62, "FF.A", ["FLZ057"]),
("Fcst", "Hazards", "DISCRETE", 45, 62, "FA.A", ["FLZ149"]),
("Fcst", "Hazards", "DISCRETE", 62, 68, "FA.A", ["FLZ149"]),
],
"checkStrings": ["Flood Watch",
"National Weather Service Tampa Bay Ruskin FL",
"FLZ057-",
"/X.EXP.KTBW.FF.A.0003.000000T0000Z-100103T1900Z/",
"/00000.0.DR.000000T0000Z.000000T0000Z.000000T0000Z.OO/",
"...FLASH FLOOD WATCH WILL EXPIRE AT 2 PM EST THIS AFTERNOON...",
"The Flash Flood Watch for a portion of south central Florida will expire at 2 PM EST this afternoon.",
"FLZ149-",
"/X.CON.KTBW.FA.A.0002.000000T0000Z-100104T0100Z/",
"/00000.0.DR.000000T0000Z.000000T0000Z.000000T0000Z.OO/",
"...FLOOD WATCH REMAINS IN EFFECT UNTIL 8 PM EST THIS EVENING...",
"The Flood Watch continues for",
"* A portion of west central Florida, including the following area, Coastal Pasco.",
"* Until 8 PM EST this evening",
],
},
{
"commentary": "cancel 2nd event",
"name": "Hazard_FFA_11",
"drtTime": "20100104_0000",
"productType": "Hazard_FFA_Local",
"cmdLineVars": "{('Flood Reason', 'floodReason'): 'GO '}",
"createGrids": [
("Fcst", "Hazards", "DISCRETE", -100, 100, "<None>", "all"),
],
"checkStrings": ["Flood Watch",
"National Weather Service Tampa Bay Ruskin FL",
"FLZ149-",
"/X.CAN.KTBW.FA.A.0002.000000T0000Z-100104T0100Z/",
"/00000.0.GO.000000T0000Z.000000T0000Z.000000T0000Z.OO/",
"...FLOOD WATCH IS CANCELLED...",
"The Flood Watch for a portion of west central Florida has been cancelled.",
],
},
{
"commentary": "Deleting hazard grids.",
"name": "Hazard_FFA_12",
"productType": None,
"checkStrings": [],
"clearHazardsTable": 1,
},
# Begin detailed phrasing of location tests
{
"commentary": "one state, single area, w/o location",
"name": "Hazard_FFA_13a",
"drtTime": "20100101_0510",
"productType": "Hazard_FFA_Local",
"cmdLineVars": "{('Flood Reason', 'floodReason'): 'ER '}",
"decodeVTEC": 0,
"vtecMode": "O",
"fileChanges": [("AreaDictionary", "TextUtility", "add", areaT1, "delete"),],
"createGrids": [
("Fcst", "Hazards", "DISCRETE", -100, 100, "<None>", "all"),
("Fcst", "Hazards", "DISCRETE", 0, 3, "FA.A", ["FLZ050"]),
],
"checkStrings": [
"WGUS62 KTBW 010510",
"FFATBW",
"URGENT - IMMEDIATE BROADCAST REQUESTED",
"Flood Watch",
"National Weather Service Tampa Bay Ruskin FL",
"1210 AM EST Fri Jan 1 2010",
"...|*Overview headline (must edit)*|...",
".|*Overview (must edit)*|.",
"FLZ050-010800-",
"/O.NEW.KTBW.FA.A.0001.100101T0510Z-100101T0800Z/",
"/00000.0.ER.000000T0000Z.000000T0000Z.000000T0000Z.OO/",
"Pinellas-",
"1210 AM EST Fri Jan 1 2010",
"...FLOOD WATCH IN EFFECT UNTIL 3 AM EST EARLY THIS MORNING...",
"The National Weather Service in Tampa Bay Ruskin has issued a",
"* Flood Watch for a portion of western Florida, including the following area, Pinellas.",
"* Until 3 AM EST early this morning",
"* |* Basis for the watch *|",
"* |* (optional) potential impacts of flooding *|",
"PRECAUTIONARY/PREPAREDNESS ACTIONS...",
"A Flood Watch means there is a potential for flooding based on current forecasts.",
"You should monitor later forecasts and be alert for possible Flood Warnings. Those living in areas prone to flooding should be prepared to take action should flooding develop.",
"&&",
"$$",
],
},
{
"commentary": "one state, single area, w location",
"name": "Hazard_FFA_13b",
"drtTime": "20100101_0510",
"productType": "Hazard_FFA_Local",
"cmdLineVars": "{('Flood Reason', 'floodReason'): 'ER '}",
"decodeVTEC": 0,
"vtecMode": "O",
"fileChanges": [("AreaDictionary", "TextUtility", "add", areaT2, "delete"),],
"createGrids": [
("Fcst", "Hazards", "DISCRETE", -100, 100, "<None>", "all"),
("Fcst", "Hazards", "DISCRETE", 0, 3, "FA.A", ["FLZ050"]),
],
"checkStrings": [
"WGUS62 KTBW 010510",
"FFATBW",
"URGENT - IMMEDIATE BROADCAST REQUESTED",
"Flood Watch",
"National Weather Service Tampa Bay Ruskin FL",
"1210 AM EST Fri Jan 1 2010",
"...|*Overview headline (must edit)*|...",
".|*Overview (must edit)*|.",
"FLZ050-010800-",
"/O.NEW.KTBW.FA.A.0001.100101T0510Z-100101T0800Z/",
"/00000.0.ER.000000T0000Z.000000T0000Z.000000T0000Z.OO/",
"Pinellas-",
"1210 AM EST Fri Jan 1 2010",
"...FLOOD WATCH IN EFFECT UNTIL 3 AM EST EARLY THIS MORNING...",
"The National Weather Service in Tampa Bay Ruskin has issued a",
"* Flood Watch for a portion of western Florida, including the following area, Clearfield.",
"* Until 3 AM EST early this morning",
"* |* Basis for the watch *|",
"* |* (optional) potential impacts of flooding *|",
"PRECAUTIONARY/PREPAREDNESS ACTIONS...",
"A Flood Watch means there is a potential for flooding based on current forecasts.",
"You should monitor later forecasts and be alert for possible Flood Warnings. Those living in areas prone to flooding should be prepared to take action should flooding develop.",
"&&",
"$$",
],
},
{
"commentary": "two states, single area, w/o location",
"name": "Hazard_FFA_14a",
"drtTime": "20100101_0510",
"productType": "Hazard_FFA_Local",
"cmdLineVars": "{('Flood Reason', 'floodReason'): 'ER '}",
"decodeVTEC": 0,
"vtecMode": "O",
"fileChanges": [("AreaDictionary", "TextUtility", "add", areaT1, "delete"),],
"createGrids": [
("Fcst", "Hazards", "DISCRETE", -100, 100, "<None>", "all"),
("Fcst", "Hazards", "DISCRETE", 0, 3, "FA.A", ["FLZ050","FLZ057",
"FLZ052","FLZ155"]),
],
"checkStrings": [
"WGUS62 KTBW 010510",
"FFATBW",
"URGENT - IMMEDIATE BROADCAST REQUESTED",
"Flood Watch",
"National Weather Service Tampa Bay Ruskin FL",
"1210 AM EST Fri Jan 1 2010",
"...|*Overview headline (must edit)*|...",
".|*Overview (must edit)*|.",
"FLZ050-052-057-155-010800-",
"/O.NEW.KTBW.FA.A.0001.100101T0510Z-100101T0800Z/",
"/00000.0.ER.000000T0000Z.000000T0000Z.000000T0000Z.OO/",
"Pinellas-Polk-Highlands-Coastal Manatee-",
# "Including the cities of St. Petersburg, Clearwater, Largo, ",
# "Lakeland, Winter Haven, Bradenton, Bayshore Gardens, ",
# "Palmetto, Sebring, Avon Park, Placid Lakes",
"1210 AM EST Fri Jan 1 2010",
"...FLOOD WATCH IN EFFECT UNTIL 3 AM EST EARLY THIS MORNING...",
"The National Weather Service in Tampa Bay Ruskin has issued a",
"* Flood Watch for portions of western Florida and western Georgia, including the following areas, in western Florida, Highlands and Pinellas. In western Georgia, Coastal Manatee and Polk.",
"* Until 3 AM EST early this morning",
"* |* Basis for the watch *|",
"* |* (optional) potential impacts of flooding *|",
"PRECAUTIONARY/PREPAREDNESS ACTIONS...",
"A Flood Watch means there is a potential for flooding based on current forecasts.",
"You should monitor later forecasts and be alert for possible Flood Warnings. Those living in areas prone to flooding should be prepared to take action should flooding develop.",
"&&",
"$$",
],
},
{
"commentary": "two states, single area, w location",
"name": "Hazard_FFA_14b",
"drtTime": "20100101_0510",
"productType": "Hazard_FFA_Local",
"cmdLineVars": "{('Flood Reason', 'floodReason'): 'ER '}",
"decodeVTEC": 0,
"vtecMode": "O",
"fileChanges": [("AreaDictionary", "TextUtility", "add", areaT2, "delete"),],
"createGrids": [
("Fcst", "Hazards", "DISCRETE", -100, 100, "<None>", "all"),
("Fcst", "Hazards", "DISCRETE", 0, 3, "FA.A", ["FLZ050","FLZ057",
"FLZ052","FLZ155"]),
],
"checkStrings": [
"WGUS62 KTBW 010510",
"FFATBW",
"URGENT - IMMEDIATE BROADCAST REQUESTED",
"Flood Watch",
"National Weather Service Tampa Bay Ruskin FL",
"1210 AM EST Fri Jan 1 2010",
"...|*Overview headline (must edit)*|...",
".|*Overview (must edit)*|.",
"FLZ050-052-057-155-010800-",
"/O.NEW.KTBW.FA.A.0001.100101T0510Z-100101T0800Z/",
"/00000.0.ER.000000T0000Z.000000T0000Z.000000T0000Z.OO/",
"Pinellas-Polk-Highlands-Coastal Manatee-",
"1210 AM EST Fri Jan 1 2010",
"...FLOOD WATCH IN EFFECT UNTIL 3 AM EST EARLY THIS MORNING...",
"The National Weather Service in Tampa Bay Ruskin has issued a",
"* Flood Watch for portions of western Florida and western Georgia, including the following areas, in western Florida, Clearfield. In western Georgia, Atlanta and Tampa.",
"* Until 3 AM EST early this morning",
"* |* Basis for the watch *|",
"* |* (optional) potential impacts of flooding *|",
"PRECAUTIONARY/PREPAREDNESS ACTIONS...",
"A Flood Watch means there is a potential for flooding based on current forecasts.",
"You should monitor later forecasts and be alert for possible Flood Warnings. Those living in areas prone to flooding should be prepared to take action should flooding develop.",
"&&",
"$$",
],
},
{
"commentary": "one state, multiple areas, w/o location",
"name": "Hazard_FFA_15a",
"drtTime": "20100101_0510",
"productType": "Hazard_FFA_Local",
"cmdLineVars": "{('Flood Reason', 'floodReason'): 'ER '}",
"decodeVTEC": 0,
"vtecMode": "O",
"fileChanges": [("AreaDictionary", "TextUtility", "add", areaT1, "delete"),],
"createGrids": [
("Fcst", "Hazards", "DISCRETE", -100, 100, "<None>", "all"),
("Fcst", "Hazards", "DISCRETE", 0, 3, "FA.A", ["FLZ050","FLZ160",
"FLZ057","FLZ151","FLZ056"]),
],
"checkStrings": [
"WGUS62 KTBW 010510",
"FFATBW",
"URGENT - IMMEDIATE BROADCAST REQUESTED",
"Flood Watch",
"National Weather Service Tampa Bay Ruskin FL",
"1210 AM EST Fri Jan 1 2010",
"...|*Overview headline (must edit)*|...",
".|*Overview (must edit)*|.",
"FLZ050-056-057-151-160-010800-",
"/O.NEW.KTBW.FA.A.0001.100101T0510Z-100101T0800Z/",
"/00000.0.ER.000000T0000Z.000000T0000Z.000000T0000Z.OO/",
"Pinellas-Hardee-Highlands-Coastal Hillsborough-Coastal Sarasota-",
"1210 AM EST Fri Jan 1 2010",
"...FLOOD WATCH IN EFFECT UNTIL 3 AM EST EARLY THIS MORNING...",
"The National Weather Service in Tampa Bay Ruskin has issued a",
"* Flood Watch for portions of central Florida, southern Florida, and western Florida, including the following areas, in central Florida, Coastal Hillsborough and Coastal Sarasota. In southern Florida, Hardee. In western Florida, Highlands and Pinellas.",
"* Until 3 AM EST early this morning",
"* |* Basis for the watch *|",
"* |* (optional) potential impacts of flooding *|",
"PRECAUTIONARY/PREPAREDNESS ACTIONS...",
"A Flood Watch means there is a potential for flooding based on current forecasts.",
"You should monitor later forecasts and be alert for possible Flood Warnings. Those living in areas prone to flooding should be prepared to take action should flooding develop.",
"&&",
"$$",
],
},
{
"commentary": "one state, multiple areas, w location",
"name": "Hazard_FFA_15b",
"drtTime": "20100101_0510",
"productType": "Hazard_FFA_Local",
"cmdLineVars": "{('Flood Reason', 'floodReason'): 'ER '}",
"decodeVTEC": 0,
"vtecMode": "O",
"fileChanges": [("AreaDictionary", "TextUtility", "add", areaT2, "delete"),],
"createGrids": [
("Fcst", "Hazards", "DISCRETE", -100, 100, "<None>", "all"),
("Fcst", "Hazards", "DISCRETE", 0, 3, "FA.A", ["FLZ050","FLZ160",
"FLZ057","FLZ151","FLZ056"]),
],
"checkStrings": [
"WGUS62 KTBW 010510",
"FFATBW",
"URGENT - IMMEDIATE BROADCAST REQUESTED",
"Flood Watch",
"National Weather Service Tampa Bay Ruskin FL",
"1210 AM EST Fri Jan 1 2010",
"...|*Overview headline (must edit)*|...",
".|*Overview (must edit)*|.",
"FLZ050-056-057-151-160-010800-",
"/O.NEW.KTBW.FA.A.0001.100101T0510Z-100101T0800Z/",
"/00000.0.ER.000000T0000Z.000000T0000Z.000000T0000Z.OO/",
"Pinellas-Hardee-Highlands-Coastal Hillsborough-Coastal Sarasota-",
"1210 AM EST Fri Jan 1 2010",
"...FLOOD WATCH IN EFFECT UNTIL 3 AM EST EARLY THIS MORNING...",
"The National Weather Service in Tampa Bay Ruskin has issued a",
"* Flood Watch for portions of central Florida, southern Florida, and western Florida, including the following areas, in central Florida, Aunt Ruby. In southern Florida, Tampa. In western Florida, Clearfield.",
"* Until 3 AM EST early this morning",
"* |* Basis for the watch *|",
"* |* (optional) potential impacts of flooding *|",
"PRECAUTIONARY/PREPAREDNESS ACTIONS...",
"A Flood Watch means there is a potential for flooding based on current forecasts.",
"You should monitor later forecasts and be alert for possible Flood Warnings. Those living in areas prone to flooding should be prepared to take action should flooding develop.",
"&&",
"$$",
],
},
{
"commentary": "two states, single area 1st, mulitple area 2nd, w/o location",
"name": "Hazard_FFA_16a",
"drtTime": "20100101_0510",
"productType": "Hazard_FFA_Local",
"cmdLineVars": "{('Flood Reason', 'floodReason'): 'ER '}",
"decodeVTEC": 0,
"vtecMode": "O",
"fileChanges": [("AreaDictionary", "TextUtility", "add", areaT1, "delete"),],
"createGrids": [
("Fcst", "Hazards", "DISCRETE", -100, 100, "<None>", "all"),
("Fcst", "Hazards", "DISCRETE", 0, 3, "FA.A", ["FLZ050","FLZ052",
"FLZ155","FLZ061"]),
],
"checkStrings": [
"WGUS62 KTBW 010510",
"FFATBW",
"URGENT - IMMEDIATE BROADCAST REQUESTED",
"Flood Watch",
"National Weather Service Tampa Bay Ruskin FL",
"1210 AM EST Fri Jan 1 2010",
"...|*Overview headline (must edit)*|...",
".|*Overview (must edit)*|.",
"FLZ050-052-061-155-010800-",
"/O.NEW.KTBW.FA.A.0001.100101T0510Z-100101T0800Z/",
"/00000.0.ER.000000T0000Z.000000T0000Z.000000T0000Z.OO/",
"Pinellas-Polk-DeSoto-Coastal Manatee-",
"1210 AM EST Fri Jan 1 2010",
"...FLOOD WATCH IN EFFECT UNTIL 3 AM EST EARLY THIS MORNING...",
"The National Weather Service in Tampa Bay Ruskin has issued a",
"* Flood Watch for portions of western Florida and Georgia, including the following areas, in western Florida, Pinellas. In Georgia, Coastal Manatee, DeSoto, and Polk.",
"* Until 3 AM EST early this morning",
"* |* Basis for the watch *|",
"* |* (optional) potential impacts of flooding *|",
"PRECAUTIONARY/PREPAREDNESS ACTIONS...",
"A Flood Watch means there is a potential for flooding based on current forecasts.",
"You should monitor later forecasts and be alert for possible Flood Warnings. Those living in areas prone to flooding should be prepared to take action should flooding develop.",
"&&",
"$$",
],
},
{
"commentary": "two states, single area 1st, mulitple area 2nd, w location",
"name": "Hazard_FFA_16b",
"drtTime": "20100101_0510",
"productType": "Hazard_FFA_Local",
"cmdLineVars": "{('Flood Reason', 'floodReason'): 'ER '}",
"decodeVTEC": 0,
"vtecMode": "O",
"fileChanges": [("AreaDictionary", "TextUtility", "add", areaT2, "delete"),],
"createGrids": [
("Fcst", "Hazards", "DISCRETE", -100, 100, "<None>", "all"),
("Fcst", "Hazards", "DISCRETE", 0, 3, "FA.A", ["FLZ050","FLZ052",
"FLZ155","FLZ061"]),
],
"checkStrings": [
"WGUS62 KTBW 010510",
"FFATBW",
"URGENT - IMMEDIATE BROADCAST REQUESTED",
"Flood Watch",
"National Weather Service Tampa Bay Ruskin FL",
"1210 AM EST Fri Jan 1 2010",
"...|*Overview headline (must edit)*|...",
".|*Overview (must edit)*|.",
"FLZ050-052-061-155-010800-",
"/O.NEW.KTBW.FA.A.0001.100101T0510Z-100101T0800Z/",
"/00000.0.ER.000000T0000Z.000000T0000Z.000000T0000Z.OO/",
"Pinellas-Polk-DeSoto-Coastal Manatee-",
"1210 AM EST Fri Jan 1 2010",
"...FLOOD WATCH IN EFFECT UNTIL 3 AM EST EARLY THIS MORNING...",
"The National Weather Service in Tampa Bay Ruskin has issued a",
"* Flood Watch for portions of western Florida and Georgia, including the following areas, in western Florida, Clearfield. In Georgia, Atlanta, Beach, and Tampa.",
"* Until 3 AM EST early this morning",
"* |* Basis for the watch *|",
"* |* (optional) potential impacts of flooding *|",
"PRECAUTIONARY/PREPAREDNESS ACTIONS...",
"A Flood Watch means there is a potential for flooding based on current forecasts.",
"You should monitor later forecasts and be alert for possible Flood Warnings. Those living in areas prone to flooding should be prepared to take action should flooding develop.",
"&&",
"$$",
],
},
{
"commentary": "two states, multiple areas, w/o location",
"name": "Hazard_FFA_17a",
"drtTime": "20100101_0510",
"productType": "Hazard_FFA_Local",
"cmdLineVars": "{('Flood Reason', 'floodReason'): 'ER '}",
"decodeVTEC": 0,
"vtecMode": "O",
"fileChanges": [("AreaDictionary", "TextUtility", "add", areaT1, "delete"),],
"createGrids": [
("Fcst", "Hazards", "DISCRETE", -100, 100, "<None>", "all"),
("Fcst", "Hazards", "DISCRETE", 0, 3, "FA.A", ["FLZ050","FLZ057",
"FLZ160","FLZ151","FLZ052","FLZ155","FLZ061","FLZ148"]),
],
"checkStrings": [
"WGUS62 KTBW 010510",
"FFATBW",
"URGENT - IMMEDIATE BROADCAST REQUESTED",
"Flood Watch",
"National Weather Service Tampa Bay Ruskin FL",
"1210 AM EST Fri Jan 1 2010",
"...|*Overview headline (must edit)*|...",
".|*Overview (must edit)*|.",
"FLZ050-052-057-061-148-151-155-160-010800-",
"/O.NEW.KTBW.FA.A.0001.100101T0510Z-100101T0800Z/",
"/00000.0.ER.000000T0000Z.000000T0000Z.000000T0000Z.OO/",
"Pinellas-Polk-Highlands-DeSoto-Coastal Hernando-",
"Coastal Hillsborough-Coastal Manatee-Coastal Sarasota-",
"1210 AM EST Fri Jan 1 2010",
"...FLOOD WATCH IN EFFECT UNTIL 3 AM EST EARLY THIS MORNING...",
"The National Weather Service in Tampa Bay Ruskin has issued a",
"* Flood Watch for portions of Florida and Georgia, including the following areas, in Florida, Coastal Hillsborough, Coastal Sarasota, Highlands, and Pinellas. In Georgia, Coastal Hernando, Coastal Manatee, DeSoto, and Polk.",
"* Until 3 AM EST early this morning",
"* |* Basis for the watch *|",
"* |* (optional) potential impacts of flooding *|",
"PRECAUTIONARY/PREPAREDNESS ACTIONS...",
"A Flood Watch means there is a potential for flooding based on current forecasts.",
"You should monitor later forecasts and be alert for possible Flood Warnings. Those living in areas prone to flooding should be prepared to take action should flooding develop.",
"&&",
"$$",
],
},
{
"commentary": "two states, multiple areas, w location",
"name": "Hazard_FFA_17b",
"drtTime": "20100101_0510",
"productType": "Hazard_FFA_Local",
"cmdLineVars": "{('Flood Reason', 'floodReason'): 'ER '}",
"decodeVTEC": 0,
"vtecMode": "O",
"fileChanges": [("AreaDictionary", "TextUtility", "add", areaT2, "delete"),],
"createGrids": [
("Fcst", "Hazards", "DISCRETE", -100, 100, "<None>", "all"),
("Fcst", "Hazards", "DISCRETE", 0, 3, "FA.A", ["FLZ050","FLZ057",
"FLZ160","FLZ151","FLZ052","FLZ155","FLZ061","FLZ148"]),
],
"checkStrings": [
"WGUS62 KTBW 010510",
"FFATBW",
"URGENT - IMMEDIATE BROADCAST REQUESTED",
"Flood Watch",
"National Weather Service Tampa Bay Ruskin FL",
"1210 AM EST Fri Jan 1 2010",
"...|*Overview headline (must edit)*|...",
".|*Overview (must edit)*|.",
"FLZ050-052-057-061-148-151-155-160-010800-",
"/O.NEW.KTBW.FA.A.0001.100101T0510Z-100101T0800Z/",
"/00000.0.ER.000000T0000Z.000000T0000Z.000000T0000Z.OO/",
"Pinellas-Polk-Highlands-DeSoto-Coastal Hernando-",
"Coastal Hillsborough-Coastal Manatee-Coastal Sarasota-",
"1210 AM EST Fri Jan 1 2010",
"...FLOOD WATCH IN EFFECT UNTIL 3 AM EST EARLY THIS MORNING...",
"The National Weather Service in Tampa Bay Ruskin has issued a",
"* Flood Watch for portions of Florida and Georgia, including the following areas, in Florida, Aunt Ruby and Clearfield. In Georgia, Atlanta, Beach, and Tampa.",
"* Until 3 AM EST early this morning",
"* |* Basis for the watch *|",
"* |* (optional) potential impacts of flooding *|",
"PRECAUTIONARY/PREPAREDNESS ACTIONS...",
"A Flood Watch means there is a potential for flooding based on current forecasts.",
"You should monitor later forecasts and be alert for possible Flood Warnings. Those living in areas prone to flooding should be prepared to take action should flooding develop.",
"&&",
"$$",
],
},
{
"commentary": "parishes 1, independent 1, counties 1",
"name": "Hazard_FFA_18a",
"drtTime": "20100101_0510",
"productType": "Hazard_FFA_Local",
"cmdLineVars": "{('Flood Reason', 'floodReason'): 'ER '}",
"decodeVTEC": 0,
"vtecMode": "O",
"fileChanges": [
("AreaDictionary", "TextUtility", "add", areaT3, "delete"),
("Hazard_FFA_Local", "TextProduct", "replace",
(areaT3FIPS0, areaT3FIPS1), "delete"),
],
"createGrids": [
("Fcst", "Hazards", "DISCRETE", -100, 100, "<None>", "all"),
("Fcst", "Hazards", "DISCRETE", 0, 3, "FA.A", ["FLC017","FLC027",
"FLC053"]),
],
"checkStrings": [
"WGUS62 KTBW 010510",
"FFATBW",
"URGENT - IMMEDIATE BROADCAST REQUESTED",
"Flood Watch",
"National Weather Service Tampa Bay Ruskin FL",
"1210 AM EST Fri Jan 1 2010",
"...|*Overview headline (must edit)*|...",
".|*Overview (must edit)*|.",
"FLC017-027-053-010800-",
"/O.NEW.KTBW.FA.A.0001.100101T0510Z-100101T0800Z/",
"/00000.0.ER.000000T0000Z.000000T0000Z.000000T0000Z.OO/",
"Citrus-DeSoto-Hernando-",
"1210 AM EST Fri Jan 1 2010",
"...FLOOD WATCH IN EFFECT UNTIL 3 AM EST EARLY THIS MORNING...",
"The National Weather Service in Tampa Bay Ruskin has issued a",
"* Flood Watch for portions of western Florida and western Louisiana, including the following county, independent city, and parish, in western Florida, Hernando. In western Louisiana, Citrus and DeSoto.",
"* Until 3 AM EST early this morning",
"* |* Basis for the watch *|",
"* |* (optional) potential impacts of flooding *|",
"PRECAUTIONARY/PREPAREDNESS ACTIONS...",
"A Flood Watch means there is a potential for flooding based on current forecasts.",
"You should monitor later forecasts and be alert for possible Flood Warnings. Those living in areas prone to flooding should be prepared to take action should flooding develop.",
"&&",
"$$",
],
},
{
"commentary": "parishes 2, independent 1, counties 1",
"name": "Hazard_FFA_18b",
"drtTime": "20100101_0510",
"productType": "Hazard_FFA_Local",
"cmdLineVars": "{('Flood Reason', 'floodReason'): 'ER '}",
"decodeVTEC": 0,
"vtecMode": "O",
"fileChanges": [
("AreaDictionary", "TextUtility", "add", areaT3, "delete"),
("Hazard_FFA_Local", "TextProduct", "replace",
(areaT3FIPS0, areaT3FIPS1), "delete"),
],
"createGrids": [
("Fcst", "Hazards", "DISCRETE", -100, 100, "<None>", "all"),
("Fcst", "Hazards", "DISCRETE", 0, 3, "FA.A", ["FLC017","FLC027",
"FLC053","FLC105"]),
],
"checkStrings": [
"WGUS62 KTBW 010510",
"FFATBW",
"URGENT - IMMEDIATE BROADCAST REQUESTED",
"Flood Watch",
"National Weather Service Tampa Bay Ruskin FL",
"1210 AM EST Fri Jan 1 2010",
"...|*Overview headline (must edit)*|...",
".|*Overview (must edit)*|.",
"FLC017-027-053-105-010800-",
"/O.NEW.KTBW.FA.A.0001.100101T0510Z-100101T0800Z/",
"/00000.0.ER.000000T0000Z.000000T0000Z.000000T0000Z.OO/",
"Citrus-DeSoto-Hernando-Polk-",
"1210 AM EST Fri Jan 1 2010",
"...FLOOD WATCH IN EFFECT UNTIL 3 AM EST EARLY THIS MORNING...",
"The National Weather Service in Tampa Bay Ruskin has issued a",
"* Flood Watch for portions of western Florida and western Louisiana, including the following county, independent city, and parishes, in western Florida, Hernando. In western Louisiana, Citrus, DeSoto, and Polk.",
"* Until 3 AM EST early this morning",
"* |* Basis for the watch *|",
"* |* (optional) potential impacts of flooding *|",
"PRECAUTIONARY/PREPAREDNESS ACTIONS...",
"A Flood Watch means there is a potential for flooding based on current forecasts.",
"You should monitor later forecasts and be alert for possible Flood Warnings. Those living in areas prone to flooding should be prepared to take action should flooding develop.",
"&&",
"$$",
],
},
]
import TestScript
def testScript(self, dataMgr):
defaults = {
"database": "<site>_GRID__Fcst_00000000_0000",
"publishGrids": 0,
"decodeVTEC": 1,
"gridsStartTime": "20100101_0500",
"orderStrings": 1,
"vtecMode": "X",
"deleteGrids": [("Fcst", "Hazards", "SFC", "all", "all")],
}
return TestScript.generalTestScript(self, dataMgr, scripts, defaults)
| 47.342884
| 261
| 0.588162
| 5,218
| 49,568
| 5.562476
| 0.08394
| 0.037554
| 0.043859
| 0.012127
| 0.891301
| 0.885065
| 0.877864
| 0.861637
| 0.824117
| 0.741671
| 0
| 0.105066
| 0.255366
| 49,568
| 1,046
| 262
| 47.388145
| 0.681306
| 0.028829
| 0
| 0.764458
| 0
| 0.023134
| 0.699565
| 0.159232
| 0
| 0
| 0
| 0
| 0
| 1
| 0.001052
| false
| 0
| 0.001052
| 0
| 0.003155
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
c71c00b730b4e3cf508cdefb7968765436ad7ce3
| 68,625
|
py
|
Python
|
benchmarks/SimResults/combinations_spec_mylocality/oldstuff/cmp_soplexmcfcalculixgcc/power.py
|
TugberkArkose/MLScheduler
|
e493b6cbf7b9d29a2c9300d7dd6f0c2f102e4061
|
[
"Unlicense"
] | null | null | null |
benchmarks/SimResults/combinations_spec_mylocality/oldstuff/cmp_soplexmcfcalculixgcc/power.py
|
TugberkArkose/MLScheduler
|
e493b6cbf7b9d29a2c9300d7dd6f0c2f102e4061
|
[
"Unlicense"
] | null | null | null |
benchmarks/SimResults/combinations_spec_mylocality/oldstuff/cmp_soplexmcfcalculixgcc/power.py
|
TugberkArkose/MLScheduler
|
e493b6cbf7b9d29a2c9300d7dd6f0c2f102e4061
|
[
"Unlicense"
] | null | null | null |
power = {'BUSES': {'Area': 1.33155,
'Bus/Area': 1.33155,
'Bus/Gate Leakage': 0.00662954,
'Bus/Peak Dynamic': 0.0,
'Bus/Runtime Dynamic': 0.0,
'Bus/Subthreshold Leakage': 0.0691322,
'Bus/Subthreshold Leakage with power gating': 0.0259246,
'Gate Leakage': 0.00662954,
'Peak Dynamic': 0.0,
'Runtime Dynamic': 0.0,
'Subthreshold Leakage': 0.0691322,
'Subthreshold Leakage with power gating': 0.0259246},
'Core': [{'Area': 32.6082,
'Execution Unit/Area': 8.2042,
'Execution Unit/Complex ALUs/Area': 0.235435,
'Execution Unit/Complex ALUs/Gate Leakage': 0.0132646,
'Execution Unit/Complex ALUs/Peak Dynamic': 0.181181,
'Execution Unit/Complex ALUs/Runtime Dynamic': 0.344996,
'Execution Unit/Complex ALUs/Subthreshold Leakage': 0.20111,
'Execution Unit/Complex ALUs/Subthreshold Leakage with power gating': 0.0754163,
'Execution Unit/Floating Point Units/Area': 4.6585,
'Execution Unit/Floating Point Units/Gate Leakage': 0.0656156,
'Execution Unit/Floating Point Units/Peak Dynamic': 0.977935,
'Execution Unit/Floating Point Units/Runtime Dynamic': 0.304033,
'Execution Unit/Floating Point Units/Subthreshold Leakage': 0.994829,
'Execution Unit/Floating Point Units/Subthreshold Leakage with power gating': 0.373061,
'Execution Unit/Gate Leakage': 0.122718,
'Execution Unit/Instruction Scheduler/Area': 2.17927,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Area': 0.328073,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Gate Leakage': 0.00115349,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Peak Dynamic': 1.20978,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Runtime Dynamic': 0.486054,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage': 0.017004,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage with power gating': 0.00962066,
'Execution Unit/Instruction Scheduler/Gate Leakage': 0.00730101,
'Execution Unit/Instruction Scheduler/Instruction Window/Area': 1.00996,
'Execution Unit/Instruction Scheduler/Instruction Window/Gate Leakage': 0.00529112,
'Execution Unit/Instruction Scheduler/Instruction Window/Peak Dynamic': 2.07911,
'Execution Unit/Instruction Scheduler/Instruction Window/Runtime Dynamic': 0.841669,
'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage': 0.0800117,
'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage with power gating': 0.0455351,
'Execution Unit/Instruction Scheduler/Peak Dynamic': 4.84781,
'Execution Unit/Instruction Scheduler/ROB/Area': 0.841232,
'Execution Unit/Instruction Scheduler/ROB/Gate Leakage': 0.000856399,
'Execution Unit/Instruction Scheduler/ROB/Peak Dynamic': 1.55892,
'Execution Unit/Instruction Scheduler/ROB/Runtime Dynamic': 0.482721,
'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage': 0.0178624,
'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage with power gating': 0.00897339,
'Execution Unit/Instruction Scheduler/Runtime Dynamic': 1.81044,
'Execution Unit/Instruction Scheduler/Subthreshold Leakage': 0.114878,
'Execution Unit/Instruction Scheduler/Subthreshold Leakage with power gating': 0.0641291,
'Execution Unit/Integer ALUs/Area': 0.47087,
'Execution Unit/Integer ALUs/Gate Leakage': 0.0265291,
'Execution Unit/Integer ALUs/Peak Dynamic': 0.330514,
'Execution Unit/Integer ALUs/Runtime Dynamic': 0.101344,
'Execution Unit/Integer ALUs/Subthreshold Leakage': 0.40222,
'Execution Unit/Integer ALUs/Subthreshold Leakage with power gating': 0.150833,
'Execution Unit/Peak Dynamic': 7.28395,
'Execution Unit/Register Files/Area': 0.570804,
'Execution Unit/Register Files/Floating Point RF/Area': 0.208131,
'Execution Unit/Register Files/Floating Point RF/Gate Leakage': 0.000232788,
'Execution Unit/Register Files/Floating Point RF/Peak Dynamic': 0.184753,
'Execution Unit/Register Files/Floating Point RF/Runtime Dynamic': 0.0176198,
'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage': 0.00399698,
'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage with power gating': 0.00176968,
'Execution Unit/Register Files/Gate Leakage': 0.000622708,
'Execution Unit/Register Files/Integer RF/Area': 0.362673,
'Execution Unit/Register Files/Integer RF/Gate Leakage': 0.00038992,
'Execution Unit/Register Files/Integer RF/Peak Dynamic': 0.195265,
'Execution Unit/Register Files/Integer RF/Runtime Dynamic': 0.130309,
'Execution Unit/Register Files/Integer RF/Subthreshold Leakage': 0.00614175,
'Execution Unit/Register Files/Integer RF/Subthreshold Leakage with power gating': 0.00246675,
'Execution Unit/Register Files/Peak Dynamic': 0.380018,
'Execution Unit/Register Files/Runtime Dynamic': 0.147929,
'Execution Unit/Register Files/Subthreshold Leakage': 0.0101387,
'Execution Unit/Register Files/Subthreshold Leakage with power gating': 0.00423643,
'Execution Unit/Results Broadcast Bus/Area Overhead': 0.0442632,
'Execution Unit/Results Broadcast Bus/Gate Leakage': 0.00607074,
'Execution Unit/Results Broadcast Bus/Peak Dynamic': 0.521478,
'Execution Unit/Results Broadcast Bus/Runtime Dynamic': 1.08927,
'Execution Unit/Results Broadcast Bus/Subthreshold Leakage': 0.0920413,
'Execution Unit/Results Broadcast Bus/Subthreshold Leakage with power gating': 0.0345155,
'Execution Unit/Runtime Dynamic': 3.79801,
'Execution Unit/Subthreshold Leakage': 1.83518,
'Execution Unit/Subthreshold Leakage with power gating': 0.709678,
'Gate Leakage': 0.372997,
'Instruction Fetch Unit/Area': 5.86007,
'Instruction Fetch Unit/Branch Predictor/Area': 0.138516,
'Instruction Fetch Unit/Branch Predictor/Chooser/Area': 0.0435221,
'Instruction Fetch Unit/Branch Predictor/Chooser/Gate Leakage': 0.000278362,
'Instruction Fetch Unit/Branch Predictor/Chooser/Peak Dynamic': 0.0168831,
'Instruction Fetch Unit/Branch Predictor/Chooser/Runtime Dynamic': 0.00272158,
'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage': 0.00759719,
'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage with power gating': 0.0039236,
'Instruction Fetch Unit/Branch Predictor/Gate Leakage': 0.000757657,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Area': 0.0435221,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Gate Leakage': 0.000278362,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Peak Dynamic': 0.0168831,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Runtime Dynamic': 0.00272158,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage': 0.00759719,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage with power gating': 0.0039236,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Area': 0.0257064,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Gate Leakage': 0.000154548,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Peak Dynamic': 0.0142575,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Runtime Dynamic': 0.0023766,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage': 0.00384344,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage with power gating': 0.00198631,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Area': 0.0151917,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Gate Leakage': 8.00196e-05,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Peak Dynamic': 0.00527447,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Runtime Dynamic': 0.000923356,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage': 0.00181347,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage with power gating': 0.000957045,
'Instruction Fetch Unit/Branch Predictor/Peak Dynamic': 0.0597838,
'Instruction Fetch Unit/Branch Predictor/RAS/Area': 0.0105732,
'Instruction Fetch Unit/Branch Predictor/RAS/Gate Leakage': 4.63858e-05,
'Instruction Fetch Unit/Branch Predictor/RAS/Peak Dynamic': 0.0117602,
'Instruction Fetch Unit/Branch Predictor/RAS/Runtime Dynamic': 0.00187191,
'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage': 0.000932505,
'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage with power gating': 0.000494733,
'Instruction Fetch Unit/Branch Predictor/Runtime Dynamic': 0.00969166,
'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage': 0.0199703,
'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage with power gating': 0.0103282,
'Instruction Fetch Unit/Branch Target Buffer/Area': 0.64954,
'Instruction Fetch Unit/Branch Target Buffer/Gate Leakage': 0.00272758,
'Instruction Fetch Unit/Branch Target Buffer/Peak Dynamic': 0.177867,
'Instruction Fetch Unit/Branch Target Buffer/Runtime Dynamic': 0.0258763,
'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage': 0.0811682,
'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage with power gating': 0.0435357,
'Instruction Fetch Unit/Gate Leakage': 0.0590479,
'Instruction Fetch Unit/Instruction Buffer/Area': 0.0226323,
'Instruction Fetch Unit/Instruction Buffer/Gate Leakage': 6.83558e-05,
'Instruction Fetch Unit/Instruction Buffer/Peak Dynamic': 0.606827,
'Instruction Fetch Unit/Instruction Buffer/Runtime Dynamic': 0.12527,
'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage': 0.00151885,
'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage with power gating': 0.000701682,
'Instruction Fetch Unit/Instruction Cache/Area': 3.14635,
'Instruction Fetch Unit/Instruction Cache/Gate Leakage': 0.029931,
'Instruction Fetch Unit/Instruction Cache/Peak Dynamic': 6.43323,
'Instruction Fetch Unit/Instruction Cache/Runtime Dynamic': 0.372767,
'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage': 0.367022,
'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage with power gating': 0.180386,
'Instruction Fetch Unit/Instruction Decoder/Area': 1.85799,
'Instruction Fetch Unit/Instruction Decoder/Gate Leakage': 0.0222493,
'Instruction Fetch Unit/Instruction Decoder/Peak Dynamic': 1.37404,
'Instruction Fetch Unit/Instruction Decoder/Runtime Dynamic': 0.425473,
'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage': 0.442943,
'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage with power gating': 0.166104,
'Instruction Fetch Unit/Peak Dynamic': 8.96874,
'Instruction Fetch Unit/Runtime Dynamic': 0.959077,
'Instruction Fetch Unit/Subthreshold Leakage': 0.932587,
'Instruction Fetch Unit/Subthreshold Leakage with power gating': 0.408542,
'L2/Area': 4.53318,
'L2/Gate Leakage': 0.015464,
'L2/Peak Dynamic': 0.090727,
'L2/Runtime Dynamic': 0.0127692,
'L2/Subthreshold Leakage': 0.834142,
'L2/Subthreshold Leakage with power gating': 0.401066,
'Load Store Unit/Area': 8.80969,
'Load Store Unit/Data Cache/Area': 6.84535,
'Load Store Unit/Data Cache/Gate Leakage': 0.0279261,
'Load Store Unit/Data Cache/Peak Dynamic': 4.08122,
'Load Store Unit/Data Cache/Runtime Dynamic': 1.38167,
'Load Store Unit/Data Cache/Subthreshold Leakage': 0.527675,
'Load Store Unit/Data Cache/Subthreshold Leakage with power gating': 0.25085,
'Load Store Unit/Gate Leakage': 0.0351387,
'Load Store Unit/LoadQ/Area': 0.0836782,
'Load Store Unit/LoadQ/Gate Leakage': 0.00059896,
'Load Store Unit/LoadQ/Peak Dynamic': 0.0920133,
'Load Store Unit/LoadQ/Runtime Dynamic': 0.0920133,
'Load Store Unit/LoadQ/Subthreshold Leakage': 0.00941961,
'Load Store Unit/LoadQ/Subthreshold Leakage with power gating': 0.00536918,
'Load Store Unit/Peak Dynamic': 4.51749,
'Load Store Unit/Runtime Dynamic': 1.92746,
'Load Store Unit/StoreQ/Area': 0.322079,
'Load Store Unit/StoreQ/Gate Leakage': 0.00329971,
'Load Store Unit/StoreQ/Peak Dynamic': 0.226889,
'Load Store Unit/StoreQ/Runtime Dynamic': 0.453778,
'Load Store Unit/StoreQ/Subthreshold Leakage': 0.0345621,
'Load Store Unit/StoreQ/Subthreshold Leakage with power gating': 0.0197004,
'Load Store Unit/Subthreshold Leakage': 0.591622,
'Load Store Unit/Subthreshold Leakage with power gating': 0.283406,
'Memory Management Unit/Area': 0.434579,
'Memory Management Unit/Dtlb/Area': 0.0879726,
'Memory Management Unit/Dtlb/Gate Leakage': 0.00088729,
'Memory Management Unit/Dtlb/Peak Dynamic': 0.0805237,
'Memory Management Unit/Dtlb/Runtime Dynamic': 0.0817258,
'Memory Management Unit/Dtlb/Subthreshold Leakage': 0.0155699,
'Memory Management Unit/Dtlb/Subthreshold Leakage with power gating': 0.00887485,
'Memory Management Unit/Gate Leakage': 0.00813591,
'Memory Management Unit/Itlb/Area': 0.301552,
'Memory Management Unit/Itlb/Gate Leakage': 0.00393464,
'Memory Management Unit/Itlb/Peak Dynamic': 0.399995,
'Memory Management Unit/Itlb/Runtime Dynamic': 0.061585,
'Memory Management Unit/Itlb/Subthreshold Leakage': 0.0413758,
'Memory Management Unit/Itlb/Subthreshold Leakage with power gating': 0.0235842,
'Memory Management Unit/Peak Dynamic': 0.697703,
'Memory Management Unit/Runtime Dynamic': 0.143311,
'Memory Management Unit/Subthreshold Leakage': 0.0769113,
'Memory Management Unit/Subthreshold Leakage with power gating': 0.0399462,
'Peak Dynamic': 26.1203,
'Renaming Unit/Area': 0.369768,
'Renaming Unit/FP Front End RAT/Area': 0.168486,
'Renaming Unit/FP Front End RAT/Gate Leakage': 0.00489731,
'Renaming Unit/FP Front End RAT/Peak Dynamic': 3.33511,
'Renaming Unit/FP Front End RAT/Runtime Dynamic': 0.644561,
'Renaming Unit/FP Front End RAT/Subthreshold Leakage': 0.0437281,
'Renaming Unit/FP Front End RAT/Subthreshold Leakage with power gating': 0.024925,
'Renaming Unit/Free List/Area': 0.0414755,
'Renaming Unit/Free List/Gate Leakage': 4.15911e-05,
'Renaming Unit/Free List/Peak Dynamic': 0.0401324,
'Renaming Unit/Free List/Runtime Dynamic': 0.0326103,
'Renaming Unit/Free List/Subthreshold Leakage': 0.000670426,
'Renaming Unit/Free List/Subthreshold Leakage with power gating': 0.000377987,
'Renaming Unit/Gate Leakage': 0.00863632,
'Renaming Unit/Int Front End RAT/Area': 0.114751,
'Renaming Unit/Int Front End RAT/Gate Leakage': 0.00038343,
'Renaming Unit/Int Front End RAT/Peak Dynamic': 0.86945,
'Renaming Unit/Int Front End RAT/Runtime Dynamic': 0.237087,
'Renaming Unit/Int Front End RAT/Subthreshold Leakage': 0.00611897,
'Renaming Unit/Int Front End RAT/Subthreshold Leakage with power gating': 0.00348781,
'Renaming Unit/Peak Dynamic': 4.56169,
'Renaming Unit/Runtime Dynamic': 0.914258,
'Renaming Unit/Subthreshold Leakage': 0.070483,
'Renaming Unit/Subthreshold Leakage with power gating': 0.0362779,
'Runtime Dynamic': 7.75489,
'Subthreshold Leakage': 6.21877,
'Subthreshold Leakage with power gating': 2.58311},
{'Area': 32.0201,
'Execution Unit/Area': 7.68434,
'Execution Unit/Complex ALUs/Area': 0.235435,
'Execution Unit/Complex ALUs/Gate Leakage': 0.0132646,
'Execution Unit/Complex ALUs/Peak Dynamic': 0.11996,
'Execution Unit/Complex ALUs/Runtime Dynamic': 0.29691,
'Execution Unit/Complex ALUs/Subthreshold Leakage': 0.20111,
'Execution Unit/Complex ALUs/Subthreshold Leakage with power gating': 0.0754163,
'Execution Unit/Floating Point Units/Area': 4.6585,
'Execution Unit/Floating Point Units/Gate Leakage': 0.0656156,
'Execution Unit/Floating Point Units/Peak Dynamic': 0.64733,
'Execution Unit/Floating Point Units/Runtime Dynamic': 0.304033,
'Execution Unit/Floating Point Units/Subthreshold Leakage': 0.994829,
'Execution Unit/Floating Point Units/Subthreshold Leakage with power gating': 0.373061,
'Execution Unit/Gate Leakage': 0.120359,
'Execution Unit/Instruction Scheduler/Area': 1.66526,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Area': 0.275653,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Gate Leakage': 0.000977433,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Peak Dynamic': 1.04181,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Runtime Dynamic': 0.234954,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage': 0.0143453,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage with power gating': 0.00810519,
'Execution Unit/Instruction Scheduler/Gate Leakage': 0.00568913,
'Execution Unit/Instruction Scheduler/Instruction Window/Area': 0.805223,
'Execution Unit/Instruction Scheduler/Instruction Window/Gate Leakage': 0.00414562,
'Execution Unit/Instruction Scheduler/Instruction Window/Peak Dynamic': 1.6763,
'Execution Unit/Instruction Scheduler/Instruction Window/Runtime Dynamic': 0.378972,
'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage': 0.0625755,
'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage with power gating': 0.0355964,
'Execution Unit/Instruction Scheduler/Peak Dynamic': 3.82262,
'Execution Unit/Instruction Scheduler/ROB/Area': 0.584388,
'Execution Unit/Instruction Scheduler/ROB/Gate Leakage': 0.00056608,
'Execution Unit/Instruction Scheduler/ROB/Peak Dynamic': 1.10451,
'Execution Unit/Instruction Scheduler/ROB/Runtime Dynamic': 0.191292,
'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage': 0.00906853,
'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage with power gating': 0.00364446,
'Execution Unit/Instruction Scheduler/Runtime Dynamic': 0.805218,
'Execution Unit/Instruction Scheduler/Subthreshold Leakage': 0.0859892,
'Execution Unit/Instruction Scheduler/Subthreshold Leakage with power gating': 0.047346,
'Execution Unit/Integer ALUs/Area': 0.47087,
'Execution Unit/Integer ALUs/Gate Leakage': 0.0265291,
'Execution Unit/Integer ALUs/Peak Dynamic': 0.169475,
'Execution Unit/Integer ALUs/Runtime Dynamic': 0.101344,
'Execution Unit/Integer ALUs/Subthreshold Leakage': 0.40222,
'Execution Unit/Integer ALUs/Subthreshold Leakage with power gating': 0.150833,
'Execution Unit/Peak Dynamic': 5.2954,
'Execution Unit/Register Files/Area': 0.570804,
'Execution Unit/Register Files/Floating Point RF/Area': 0.208131,
'Execution Unit/Register Files/Floating Point RF/Gate Leakage': 0.000232788,
'Execution Unit/Register Files/Floating Point RF/Peak Dynamic': 0.122295,
'Execution Unit/Register Files/Floating Point RF/Runtime Dynamic': 0.00985502,
'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage': 0.00399698,
'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage with power gating': 0.00176968,
'Execution Unit/Register Files/Gate Leakage': 0.000622708,
'Execution Unit/Register Files/Integer RF/Area': 0.362673,
'Execution Unit/Register Files/Integer RF/Gate Leakage': 0.00038992,
'Execution Unit/Register Files/Integer RF/Peak Dynamic': 0.116195,
'Execution Unit/Register Files/Integer RF/Runtime Dynamic': 0.0728839,
'Execution Unit/Register Files/Integer RF/Subthreshold Leakage': 0.00614175,
'Execution Unit/Register Files/Integer RF/Subthreshold Leakage with power gating': 0.00246675,
'Execution Unit/Register Files/Peak Dynamic': 0.23849,
'Execution Unit/Register Files/Runtime Dynamic': 0.0827389,
'Execution Unit/Register Files/Subthreshold Leakage': 0.0101387,
'Execution Unit/Register Files/Subthreshold Leakage with power gating': 0.00423643,
'Execution Unit/Results Broadcast Bus/Area Overhead': 0.0390912,
'Execution Unit/Results Broadcast Bus/Gate Leakage': 0.00537402,
'Execution Unit/Results Broadcast Bus/Peak Dynamic': 0.274787,
'Execution Unit/Results Broadcast Bus/Runtime Dynamic': 0.565173,
'Execution Unit/Results Broadcast Bus/Subthreshold Leakage': 0.081478,
'Execution Unit/Results Broadcast Bus/Subthreshold Leakage with power gating': 0.0305543,
'Execution Unit/Runtime Dynamic': 2.15542,
'Execution Unit/Subthreshold Leakage': 1.79543,
'Execution Unit/Subthreshold Leakage with power gating': 0.688821,
'Gate Leakage': 0.368936,
'Instruction Fetch Unit/Area': 5.85939,
'Instruction Fetch Unit/Branch Predictor/Area': 0.138516,
'Instruction Fetch Unit/Branch Predictor/Chooser/Area': 0.0435221,
'Instruction Fetch Unit/Branch Predictor/Chooser/Gate Leakage': 0.000278362,
'Instruction Fetch Unit/Branch Predictor/Chooser/Peak Dynamic': 0.0168831,
'Instruction Fetch Unit/Branch Predictor/Chooser/Runtime Dynamic': 0.00133282,
'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage': 0.00759719,
'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage with power gating': 0.0039236,
'Instruction Fetch Unit/Branch Predictor/Gate Leakage': 0.000757657,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Area': 0.0435221,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Gate Leakage': 0.000278362,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Peak Dynamic': 0.0168831,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Runtime Dynamic': 0.00133282,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage': 0.00759719,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage with power gating': 0.0039236,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Area': 0.0257064,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Gate Leakage': 0.000154548,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Peak Dynamic': 0.0142575,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Runtime Dynamic': 0.00118494,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage': 0.00384344,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage with power gating': 0.00198631,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Area': 0.0151917,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Gate Leakage': 8.00196e-05,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Peak Dynamic': 0.00527447,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Runtime Dynamic': 0.000471861,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage': 0.00181347,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage with power gating': 0.000957045,
'Instruction Fetch Unit/Branch Predictor/Peak Dynamic': 0.0597838,
'Instruction Fetch Unit/Branch Predictor/RAS/Area': 0.0105732,
'Instruction Fetch Unit/Branch Predictor/RAS/Gate Leakage': 4.63858e-05,
'Instruction Fetch Unit/Branch Predictor/RAS/Peak Dynamic': 0.0117602,
'Instruction Fetch Unit/Branch Predictor/RAS/Runtime Dynamic': 0.00104698,
'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage': 0.000932505,
'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage with power gating': 0.000494733,
'Instruction Fetch Unit/Branch Predictor/Runtime Dynamic': 0.00489756,
'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage': 0.0199703,
'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage with power gating': 0.0103282,
'Instruction Fetch Unit/Branch Target Buffer/Area': 0.64954,
'Instruction Fetch Unit/Branch Target Buffer/Gate Leakage': 0.00272758,
'Instruction Fetch Unit/Branch Target Buffer/Peak Dynamic': 0.177867,
'Instruction Fetch Unit/Branch Target Buffer/Runtime Dynamic': 0.0119197,
'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage': 0.0811682,
'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage with power gating': 0.0435357,
'Instruction Fetch Unit/Gate Leakage': 0.0589979,
'Instruction Fetch Unit/Instruction Buffer/Area': 0.0226323,
'Instruction Fetch Unit/Instruction Buffer/Gate Leakage': 6.83558e-05,
'Instruction Fetch Unit/Instruction Buffer/Peak Dynamic': 0.606827,
'Instruction Fetch Unit/Instruction Buffer/Runtime Dynamic': 0.0700652,
'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage': 0.00151885,
'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage with power gating': 0.000701682,
'Instruction Fetch Unit/Instruction Cache/Area': 3.14635,
'Instruction Fetch Unit/Instruction Cache/Gate Leakage': 0.029931,
'Instruction Fetch Unit/Instruction Cache/Peak Dynamic': 4.45674,
'Instruction Fetch Unit/Instruction Cache/Runtime Dynamic': 0.197355,
'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage': 0.367022,
'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage with power gating': 0.180386,
'Instruction Fetch Unit/Instruction Decoder/Area': 1.85799,
'Instruction Fetch Unit/Instruction Decoder/Gate Leakage': 0.0222493,
'Instruction Fetch Unit/Instruction Decoder/Peak Dynamic': 1.37404,
'Instruction Fetch Unit/Instruction Decoder/Runtime Dynamic': 0.237973,
'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage': 0.442943,
'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage with power gating': 0.166104,
'Instruction Fetch Unit/Peak Dynamic': 6.89155,
'Instruction Fetch Unit/Runtime Dynamic': 0.522211,
'Instruction Fetch Unit/Subthreshold Leakage': 0.932286,
'Instruction Fetch Unit/Subthreshold Leakage with power gating': 0.40843,
'L2/Area': 4.53318,
'L2/Gate Leakage': 0.015464,
'L2/Peak Dynamic': 0.0504299,
'L2/Runtime Dynamic': 0.0069462,
'L2/Subthreshold Leakage': 0.834142,
'L2/Subthreshold Leakage with power gating': 0.401066,
'Load Store Unit/Area': 8.80901,
'Load Store Unit/Data Cache/Area': 6.84535,
'Load Store Unit/Data Cache/Gate Leakage': 0.0279261,
'Load Store Unit/Data Cache/Peak Dynamic': 2.70196,
'Load Store Unit/Data Cache/Runtime Dynamic': 0.713329,
'Load Store Unit/Data Cache/Subthreshold Leakage': 0.527675,
'Load Store Unit/Data Cache/Subthreshold Leakage with power gating': 0.25085,
'Load Store Unit/Gate Leakage': 0.0350888,
'Load Store Unit/LoadQ/Area': 0.0836782,
'Load Store Unit/LoadQ/Gate Leakage': 0.00059896,
'Load Store Unit/LoadQ/Peak Dynamic': 0.0473909,
'Load Store Unit/LoadQ/Runtime Dynamic': 0.0473909,
'Load Store Unit/LoadQ/Subthreshold Leakage': 0.00941961,
'Load Store Unit/LoadQ/Subthreshold Leakage with power gating': 0.00536918,
'Load Store Unit/Peak Dynamic': 2.92575,
'Load Store Unit/Runtime Dynamic': 0.994436,
'Load Store Unit/StoreQ/Area': 0.322079,
'Load Store Unit/StoreQ/Gate Leakage': 0.00329971,
'Load Store Unit/StoreQ/Peak Dynamic': 0.116858,
'Load Store Unit/StoreQ/Runtime Dynamic': 0.233716,
'Load Store Unit/StoreQ/Subthreshold Leakage': 0.0345621,
'Load Store Unit/StoreQ/Subthreshold Leakage with power gating': 0.0197004,
'Load Store Unit/Subthreshold Leakage': 0.591321,
'Load Store Unit/Subthreshold Leakage with power gating': 0.283293,
'Memory Management Unit/Area': 0.4339,
'Memory Management Unit/Dtlb/Area': 0.0879726,
'Memory Management Unit/Dtlb/Gate Leakage': 0.00088729,
'Memory Management Unit/Dtlb/Peak Dynamic': 0.0414733,
'Memory Management Unit/Dtlb/Runtime Dynamic': 0.0421754,
'Memory Management Unit/Dtlb/Subthreshold Leakage': 0.0155699,
'Memory Management Unit/Dtlb/Subthreshold Leakage with power gating': 0.00887485,
'Memory Management Unit/Gate Leakage': 0.00808595,
'Memory Management Unit/Itlb/Area': 0.301552,
'Memory Management Unit/Itlb/Gate Leakage': 0.00393464,
'Memory Management Unit/Itlb/Peak Dynamic': 0.277104,
'Memory Management Unit/Itlb/Runtime Dynamic': 0.0325171,
'Memory Management Unit/Itlb/Subthreshold Leakage': 0.0413758,
'Memory Management Unit/Itlb/Subthreshold Leakage with power gating': 0.0235842,
'Memory Management Unit/Peak Dynamic': 0.504457,
'Memory Management Unit/Runtime Dynamic': 0.0746925,
'Memory Management Unit/Subthreshold Leakage': 0.0766103,
'Memory Management Unit/Subthreshold Leakage with power gating': 0.0398333,
'Peak Dynamic': 19.2571,
'Renaming Unit/Area': 0.303608,
'Renaming Unit/FP Front End RAT/Area': 0.131045,
'Renaming Unit/FP Front End RAT/Gate Leakage': 0.00351123,
'Renaming Unit/FP Front End RAT/Peak Dynamic': 2.51468,
'Renaming Unit/FP Front End RAT/Runtime Dynamic': 0.321701,
'Renaming Unit/FP Front End RAT/Subthreshold Leakage': 0.0308571,
'Renaming Unit/FP Front End RAT/Subthreshold Leakage with power gating': 0.0175885,
'Renaming Unit/Free List/Area': 0.0340654,
'Renaming Unit/Free List/Gate Leakage': 2.5481e-05,
'Renaming Unit/Free List/Peak Dynamic': 0.0306032,
'Renaming Unit/Free List/Runtime Dynamic': 0.0145155,
'Renaming Unit/Free List/Subthreshold Leakage': 0.000370144,
'Renaming Unit/Free List/Subthreshold Leakage with power gating': 0.000201064,
'Renaming Unit/Gate Leakage': 0.00708398,
'Renaming Unit/Int Front End RAT/Area': 0.0941223,
'Renaming Unit/Int Front End RAT/Gate Leakage': 0.000283242,
'Renaming Unit/Int Front End RAT/Peak Dynamic': 0.731965,
'Renaming Unit/Int Front End RAT/Runtime Dynamic': 0.111753,
'Renaming Unit/Int Front End RAT/Subthreshold Leakage': 0.00435488,
'Renaming Unit/Int Front End RAT/Subthreshold Leakage with power gating': 0.00248228,
'Renaming Unit/Peak Dynamic': 3.58947,
'Renaming Unit/Runtime Dynamic': 0.44797,
'Renaming Unit/Subthreshold Leakage': 0.0552466,
'Renaming Unit/Subthreshold Leakage with power gating': 0.0276461,
'Runtime Dynamic': 4.20167,
'Subthreshold Leakage': 6.16288,
'Subthreshold Leakage with power gating': 2.55328},
{'Area': 32.0201,
'Execution Unit/Area': 7.68434,
'Execution Unit/Complex ALUs/Area': 0.235435,
'Execution Unit/Complex ALUs/Gate Leakage': 0.0132646,
'Execution Unit/Complex ALUs/Peak Dynamic': 0.0065108,
'Execution Unit/Complex ALUs/Runtime Dynamic': 0.207803,
'Execution Unit/Complex ALUs/Subthreshold Leakage': 0.20111,
'Execution Unit/Complex ALUs/Subthreshold Leakage with power gating': 0.0754163,
'Execution Unit/Floating Point Units/Area': 4.6585,
'Execution Unit/Floating Point Units/Gate Leakage': 0.0656156,
'Execution Unit/Floating Point Units/Peak Dynamic': 0.0335685,
'Execution Unit/Floating Point Units/Runtime Dynamic': 0.304033,
'Execution Unit/Floating Point Units/Subthreshold Leakage': 0.994829,
'Execution Unit/Floating Point Units/Subthreshold Leakage with power gating': 0.373061,
'Execution Unit/Gate Leakage': 0.120359,
'Execution Unit/Instruction Scheduler/Area': 1.66526,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Area': 0.275653,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Gate Leakage': 0.000977433,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Peak Dynamic': 1.04181,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Runtime Dynamic': 0.102536,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage': 0.0143453,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage with power gating': 0.00810519,
'Execution Unit/Instruction Scheduler/Gate Leakage': 0.00568913,
'Execution Unit/Instruction Scheduler/Instruction Window/Area': 0.805223,
'Execution Unit/Instruction Scheduler/Instruction Window/Gate Leakage': 0.00414562,
'Execution Unit/Instruction Scheduler/Instruction Window/Peak Dynamic': 1.6763,
'Execution Unit/Instruction Scheduler/Instruction Window/Runtime Dynamic': 0.165386,
'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage': 0.0625755,
'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage with power gating': 0.0355964,
'Execution Unit/Instruction Scheduler/Peak Dynamic': 3.82262,
'Execution Unit/Instruction Scheduler/ROB/Area': 0.584388,
'Execution Unit/Instruction Scheduler/ROB/Gate Leakage': 0.00056608,
'Execution Unit/Instruction Scheduler/ROB/Peak Dynamic': 1.10451,
'Execution Unit/Instruction Scheduler/ROB/Runtime Dynamic': 0.0834813,
'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage': 0.00906853,
'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage with power gating': 0.00364446,
'Execution Unit/Instruction Scheduler/Runtime Dynamic': 0.351403,
'Execution Unit/Instruction Scheduler/Subthreshold Leakage': 0.0859892,
'Execution Unit/Instruction Scheduler/Subthreshold Leakage with power gating': 0.047346,
'Execution Unit/Integer ALUs/Area': 0.47087,
'Execution Unit/Integer ALUs/Gate Leakage': 0.0265291,
'Execution Unit/Integer ALUs/Peak Dynamic': 0.112125,
'Execution Unit/Integer ALUs/Runtime Dynamic': 0.101344,
'Execution Unit/Integer ALUs/Subthreshold Leakage': 0.40222,
'Execution Unit/Integer ALUs/Subthreshold Leakage with power gating': 0.150833,
'Execution Unit/Peak Dynamic': 4.10223,
'Execution Unit/Register Files/Area': 0.570804,
'Execution Unit/Register Files/Floating Point RF/Area': 0.208131,
'Execution Unit/Register Files/Floating Point RF/Gate Leakage': 0.000232788,
'Execution Unit/Register Files/Floating Point RF/Peak Dynamic': 0.00634181,
'Execution Unit/Register Files/Floating Point RF/Runtime Dynamic': 0.0043008,
'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage': 0.00399698,
'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage with power gating': 0.00176968,
'Execution Unit/Register Files/Gate Leakage': 0.000622708,
'Execution Unit/Register Files/Integer RF/Area': 0.362673,
'Execution Unit/Register Files/Integer RF/Gate Leakage': 0.00038992,
'Execution Unit/Register Files/Integer RF/Peak Dynamic': 0.0336025,
'Execution Unit/Register Files/Integer RF/Runtime Dynamic': 0.0318071,
'Execution Unit/Register Files/Integer RF/Subthreshold Leakage': 0.00614175,
'Execution Unit/Register Files/Integer RF/Subthreshold Leakage with power gating': 0.00246675,
'Execution Unit/Register Files/Peak Dynamic': 0.0399443,
'Execution Unit/Register Files/Runtime Dynamic': 0.0361079,
'Execution Unit/Register Files/Subthreshold Leakage': 0.0101387,
'Execution Unit/Register Files/Subthreshold Leakage with power gating': 0.00423643,
'Execution Unit/Results Broadcast Bus/Area Overhead': 0.0390912,
'Execution Unit/Results Broadcast Bus/Gate Leakage': 0.00537402,
'Execution Unit/Results Broadcast Bus/Peak Dynamic': 0.0724192,
'Execution Unit/Results Broadcast Bus/Runtime Dynamic': 0.179703,
'Execution Unit/Results Broadcast Bus/Subthreshold Leakage': 0.081478,
'Execution Unit/Results Broadcast Bus/Subthreshold Leakage with power gating': 0.0305543,
'Execution Unit/Runtime Dynamic': 1.18039,
'Execution Unit/Subthreshold Leakage': 1.79543,
'Execution Unit/Subthreshold Leakage with power gating': 0.688821,
'Gate Leakage': 0.368936,
'Instruction Fetch Unit/Area': 5.85939,
'Instruction Fetch Unit/Branch Predictor/Area': 0.138516,
'Instruction Fetch Unit/Branch Predictor/Chooser/Area': 0.0435221,
'Instruction Fetch Unit/Branch Predictor/Chooser/Gate Leakage': 0.000278362,
'Instruction Fetch Unit/Branch Predictor/Chooser/Peak Dynamic': 0.0168831,
'Instruction Fetch Unit/Branch Predictor/Chooser/Runtime Dynamic': 0.00112696,
'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage': 0.00759719,
'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage with power gating': 0.0039236,
'Instruction Fetch Unit/Branch Predictor/Gate Leakage': 0.000757657,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Area': 0.0435221,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Gate Leakage': 0.000278362,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Peak Dynamic': 0.0168831,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Runtime Dynamic': 0.00112696,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage': 0.00759719,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage with power gating': 0.0039236,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Area': 0.0257064,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Gate Leakage': 0.000154548,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Peak Dynamic': 0.0142575,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Runtime Dynamic': 0.000995662,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage': 0.00384344,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage with power gating': 0.00198631,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Area': 0.0151917,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Gate Leakage': 8.00196e-05,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Peak Dynamic': 0.00527447,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Runtime Dynamic': 0.000393137,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage': 0.00181347,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage with power gating': 0.000957045,
'Instruction Fetch Unit/Branch Predictor/Peak Dynamic': 0.0597838,
'Instruction Fetch Unit/Branch Predictor/RAS/Area': 0.0105732,
'Instruction Fetch Unit/Branch Predictor/RAS/Gate Leakage': 4.63858e-05,
'Instruction Fetch Unit/Branch Predictor/RAS/Peak Dynamic': 0.0117602,
'Instruction Fetch Unit/Branch Predictor/RAS/Runtime Dynamic': 0.000456911,
'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage': 0.000932505,
'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage with power gating': 0.000494733,
'Instruction Fetch Unit/Branch Predictor/Runtime Dynamic': 0.0037065,
'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage': 0.0199703,
'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage with power gating': 0.0103282,
'Instruction Fetch Unit/Branch Target Buffer/Area': 0.64954,
'Instruction Fetch Unit/Branch Target Buffer/Gate Leakage': 0.00272758,
'Instruction Fetch Unit/Branch Target Buffer/Peak Dynamic': 0.177867,
'Instruction Fetch Unit/Branch Target Buffer/Runtime Dynamic': 0.0103022,
'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage': 0.0811682,
'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage with power gating': 0.0435357,
'Instruction Fetch Unit/Gate Leakage': 0.0589979,
'Instruction Fetch Unit/Instruction Buffer/Area': 0.0226323,
'Instruction Fetch Unit/Instruction Buffer/Gate Leakage': 6.83558e-05,
'Instruction Fetch Unit/Instruction Buffer/Peak Dynamic': 0.606827,
'Instruction Fetch Unit/Instruction Buffer/Runtime Dynamic': 0.0305769,
'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage': 0.00151885,
'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage with power gating': 0.000701682,
'Instruction Fetch Unit/Instruction Cache/Area': 3.14635,
'Instruction Fetch Unit/Instruction Cache/Gate Leakage': 0.029931,
'Instruction Fetch Unit/Instruction Cache/Peak Dynamic': 1.94496,
'Instruction Fetch Unit/Instruction Cache/Runtime Dynamic': 0.0958958,
'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage': 0.367022,
'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage with power gating': 0.180386,
'Instruction Fetch Unit/Instruction Decoder/Area': 1.85799,
'Instruction Fetch Unit/Instruction Decoder/Gate Leakage': 0.0222493,
'Instruction Fetch Unit/Instruction Decoder/Peak Dynamic': 1.37404,
'Instruction Fetch Unit/Instruction Decoder/Runtime Dynamic': 0.103853,
'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage': 0.442943,
'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage with power gating': 0.166104,
'Instruction Fetch Unit/Peak Dynamic': 4.25787,
'Instruction Fetch Unit/Runtime Dynamic': 0.244335,
'Instruction Fetch Unit/Subthreshold Leakage': 0.932286,
'Instruction Fetch Unit/Subthreshold Leakage with power gating': 0.40843,
'L2/Area': 4.53318,
'L2/Gate Leakage': 0.015464,
'L2/Peak Dynamic': 0.0538499,
'L2/Runtime Dynamic': 0.0148173,
'L2/Subthreshold Leakage': 0.834142,
'L2/Subthreshold Leakage with power gating': 0.401066,
'Load Store Unit/Area': 8.80901,
'Load Store Unit/Data Cache/Area': 6.84535,
'Load Store Unit/Data Cache/Gate Leakage': 0.0279261,
'Load Store Unit/Data Cache/Peak Dynamic': 2.02873,
'Load Store Unit/Data Cache/Runtime Dynamic': 0.40237,
'Load Store Unit/Data Cache/Subthreshold Leakage': 0.527675,
'Load Store Unit/Data Cache/Subthreshold Leakage with power gating': 0.25085,
'Load Store Unit/Gate Leakage': 0.0350888,
'Load Store Unit/LoadQ/Area': 0.0836782,
'Load Store Unit/LoadQ/Gate Leakage': 0.00059896,
'Load Store Unit/LoadQ/Peak Dynamic': 0.0256105,
'Load Store Unit/LoadQ/Runtime Dynamic': 0.0256104,
'Load Store Unit/LoadQ/Subthreshold Leakage': 0.00941961,
'Load Store Unit/LoadQ/Subthreshold Leakage with power gating': 0.00536918,
'Load Store Unit/Peak Dynamic': 2.14967,
'Load Store Unit/Runtime Dynamic': 0.554282,
'Load Store Unit/StoreQ/Area': 0.322079,
'Load Store Unit/StoreQ/Gate Leakage': 0.00329971,
'Load Store Unit/StoreQ/Peak Dynamic': 0.063151,
'Load Store Unit/StoreQ/Runtime Dynamic': 0.126302,
'Load Store Unit/StoreQ/Subthreshold Leakage': 0.0345621,
'Load Store Unit/StoreQ/Subthreshold Leakage with power gating': 0.0197004,
'Load Store Unit/Subthreshold Leakage': 0.591321,
'Load Store Unit/Subthreshold Leakage with power gating': 0.283293,
'Memory Management Unit/Area': 0.4339,
'Memory Management Unit/Dtlb/Area': 0.0879726,
'Memory Management Unit/Dtlb/Gate Leakage': 0.00088729,
'Memory Management Unit/Dtlb/Peak Dynamic': 0.0224125,
'Memory Management Unit/Dtlb/Runtime Dynamic': 0.0232096,
'Memory Management Unit/Dtlb/Subthreshold Leakage': 0.0155699,
'Memory Management Unit/Dtlb/Subthreshold Leakage with power gating': 0.00887485,
'Memory Management Unit/Gate Leakage': 0.00808595,
'Memory Management Unit/Itlb/Area': 0.301552,
'Memory Management Unit/Itlb/Gate Leakage': 0.00393464,
'Memory Management Unit/Itlb/Peak Dynamic': 0.12093,
'Memory Management Unit/Itlb/Runtime Dynamic': 0.0157552,
'Memory Management Unit/Itlb/Subthreshold Leakage': 0.0413758,
'Memory Management Unit/Itlb/Subthreshold Leakage with power gating': 0.0235842,
'Memory Management Unit/Peak Dynamic': 0.31554,
'Memory Management Unit/Runtime Dynamic': 0.0389648,
'Memory Management Unit/Subthreshold Leakage': 0.0766103,
'Memory Management Unit/Subthreshold Leakage with power gating': 0.0398333,
'Peak Dynamic': 14.4686,
'Renaming Unit/Area': 0.303608,
'Renaming Unit/FP Front End RAT/Area': 0.131045,
'Renaming Unit/FP Front End RAT/Gate Leakage': 0.00351123,
'Renaming Unit/FP Front End RAT/Peak Dynamic': 2.51468,
'Renaming Unit/FP Front End RAT/Runtime Dynamic': 0.0166828,
'Renaming Unit/FP Front End RAT/Subthreshold Leakage': 0.0308571,
'Renaming Unit/FP Front End RAT/Subthreshold Leakage with power gating': 0.0175885,
'Renaming Unit/Free List/Area': 0.0340654,
'Renaming Unit/Free List/Gate Leakage': 2.5481e-05,
'Renaming Unit/Free List/Peak Dynamic': 0.0306032,
'Renaming Unit/Free List/Runtime Dynamic': 0.00482915,
'Renaming Unit/Free List/Subthreshold Leakage': 0.000370144,
'Renaming Unit/Free List/Subthreshold Leakage with power gating': 0.000201064,
'Renaming Unit/Gate Leakage': 0.00708398,
'Renaming Unit/Int Front End RAT/Area': 0.0941223,
'Renaming Unit/Int Front End RAT/Gate Leakage': 0.000283242,
'Renaming Unit/Int Front End RAT/Peak Dynamic': 0.731965,
'Renaming Unit/Int Front End RAT/Runtime Dynamic': 0.0520126,
'Renaming Unit/Int Front End RAT/Subthreshold Leakage': 0.00435488,
'Renaming Unit/Int Front End RAT/Subthreshold Leakage with power gating': 0.00248228,
'Renaming Unit/Peak Dynamic': 3.58947,
'Renaming Unit/Runtime Dynamic': 0.0735245,
'Renaming Unit/Subthreshold Leakage': 0.0552466,
'Renaming Unit/Subthreshold Leakage with power gating': 0.0276461,
'Runtime Dynamic': 2.10632,
'Subthreshold Leakage': 6.16288,
'Subthreshold Leakage with power gating': 2.55328},
{'Area': 32.0201,
'Execution Unit/Area': 7.68434,
'Execution Unit/Complex ALUs/Area': 0.235435,
'Execution Unit/Complex ALUs/Gate Leakage': 0.0132646,
'Execution Unit/Complex ALUs/Peak Dynamic': 0.00682822,
'Execution Unit/Complex ALUs/Runtime Dynamic': 0.208052,
'Execution Unit/Complex ALUs/Subthreshold Leakage': 0.20111,
'Execution Unit/Complex ALUs/Subthreshold Leakage with power gating': 0.0754163,
'Execution Unit/Floating Point Units/Area': 4.6585,
'Execution Unit/Floating Point Units/Gate Leakage': 0.0656156,
'Execution Unit/Floating Point Units/Peak Dynamic': 0.0364806,
'Execution Unit/Floating Point Units/Runtime Dynamic': 0.304033,
'Execution Unit/Floating Point Units/Subthreshold Leakage': 0.994829,
'Execution Unit/Floating Point Units/Subthreshold Leakage with power gating': 0.373061,
'Execution Unit/Gate Leakage': 0.120359,
'Execution Unit/Instruction Scheduler/Area': 1.66526,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Area': 0.275653,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Gate Leakage': 0.000977433,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Peak Dynamic': 1.04181,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Runtime Dynamic': 0.106185,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage': 0.0143453,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage with power gating': 0.00810519,
'Execution Unit/Instruction Scheduler/Gate Leakage': 0.00568913,
'Execution Unit/Instruction Scheduler/Instruction Window/Area': 0.805223,
'Execution Unit/Instruction Scheduler/Instruction Window/Gate Leakage': 0.00414562,
'Execution Unit/Instruction Scheduler/Instruction Window/Peak Dynamic': 1.6763,
'Execution Unit/Instruction Scheduler/Instruction Window/Runtime Dynamic': 0.171272,
'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage': 0.0625755,
'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage with power gating': 0.0355964,
'Execution Unit/Instruction Scheduler/Peak Dynamic': 3.82262,
'Execution Unit/Instruction Scheduler/ROB/Area': 0.584388,
'Execution Unit/Instruction Scheduler/ROB/Gate Leakage': 0.00056608,
'Execution Unit/Instruction Scheduler/ROB/Peak Dynamic': 1.10451,
'Execution Unit/Instruction Scheduler/ROB/Runtime Dynamic': 0.0864526,
'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage': 0.00906853,
'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage with power gating': 0.00364446,
'Execution Unit/Instruction Scheduler/Runtime Dynamic': 0.36391,
'Execution Unit/Instruction Scheduler/Subthreshold Leakage': 0.0859892,
'Execution Unit/Instruction Scheduler/Subthreshold Leakage with power gating': 0.047346,
'Execution Unit/Integer ALUs/Area': 0.47087,
'Execution Unit/Integer ALUs/Gate Leakage': 0.0265291,
'Execution Unit/Integer ALUs/Peak Dynamic': 0.115853,
'Execution Unit/Integer ALUs/Runtime Dynamic': 0.101344,
'Execution Unit/Integer ALUs/Subthreshold Leakage': 0.40222,
'Execution Unit/Integer ALUs/Subthreshold Leakage with power gating': 0.150833,
'Execution Unit/Peak Dynamic': 4.11398,
'Execution Unit/Register Files/Area': 0.570804,
'Execution Unit/Register Files/Floating Point RF/Area': 0.208131,
'Execution Unit/Register Files/Floating Point RF/Gate Leakage': 0.000232788,
'Execution Unit/Register Files/Floating Point RF/Peak Dynamic': 0.00689197,
'Execution Unit/Register Files/Floating Point RF/Runtime Dynamic': 0.00445387,
'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage': 0.00399698,
'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage with power gating': 0.00176968,
'Execution Unit/Register Files/Gate Leakage': 0.000622708,
'Execution Unit/Register Files/Integer RF/Area': 0.362673,
'Execution Unit/Register Files/Integer RF/Gate Leakage': 0.00038992,
'Execution Unit/Register Files/Integer RF/Peak Dynamic': 0.0347798,
'Execution Unit/Register Files/Integer RF/Runtime Dynamic': 0.0329391,
'Execution Unit/Register Files/Integer RF/Subthreshold Leakage': 0.00614175,
'Execution Unit/Register Files/Integer RF/Subthreshold Leakage with power gating': 0.00246675,
'Execution Unit/Register Files/Peak Dynamic': 0.0416718,
'Execution Unit/Register Files/Runtime Dynamic': 0.037393,
'Execution Unit/Register Files/Subthreshold Leakage': 0.0101387,
'Execution Unit/Register Files/Subthreshold Leakage with power gating': 0.00423643,
'Execution Unit/Results Broadcast Bus/Area Overhead': 0.0390912,
'Execution Unit/Results Broadcast Bus/Gate Leakage': 0.00537402,
'Execution Unit/Results Broadcast Bus/Peak Dynamic': 0.0749788,
'Execution Unit/Results Broadcast Bus/Runtime Dynamic': 0.202833,
'Execution Unit/Results Broadcast Bus/Subthreshold Leakage': 0.081478,
'Execution Unit/Results Broadcast Bus/Subthreshold Leakage with power gating': 0.0305543,
'Execution Unit/Runtime Dynamic': 1.21756,
'Execution Unit/Subthreshold Leakage': 1.79543,
'Execution Unit/Subthreshold Leakage with power gating': 0.688821,
'Gate Leakage': 0.368936,
'Instruction Fetch Unit/Area': 5.85939,
'Instruction Fetch Unit/Branch Predictor/Area': 0.138516,
'Instruction Fetch Unit/Branch Predictor/Chooser/Area': 0.0435221,
'Instruction Fetch Unit/Branch Predictor/Chooser/Gate Leakage': 0.000278362,
'Instruction Fetch Unit/Branch Predictor/Chooser/Peak Dynamic': 0.0168831,
'Instruction Fetch Unit/Branch Predictor/Chooser/Runtime Dynamic': 0.000625326,
'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage': 0.00759719,
'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage with power gating': 0.0039236,
'Instruction Fetch Unit/Branch Predictor/Gate Leakage': 0.000757657,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Area': 0.0435221,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Gate Leakage': 0.000278362,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Peak Dynamic': 0.0168831,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Runtime Dynamic': 0.000625326,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage': 0.00759719,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage with power gating': 0.0039236,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Area': 0.0257064,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Gate Leakage': 0.000154548,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Peak Dynamic': 0.0142575,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Runtime Dynamic': 0.000550159,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage': 0.00384344,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage with power gating': 0.00198631,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Area': 0.0151917,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Gate Leakage': 8.00196e-05,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Peak Dynamic': 0.00527447,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Runtime Dynamic': 0.000215984,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage': 0.00181347,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage with power gating': 0.000957045,
'Instruction Fetch Unit/Branch Predictor/Peak Dynamic': 0.0597838,
'Instruction Fetch Unit/Branch Predictor/RAS/Area': 0.0105732,
'Instruction Fetch Unit/Branch Predictor/RAS/Gate Leakage': 4.63858e-05,
'Instruction Fetch Unit/Branch Predictor/RAS/Peak Dynamic': 0.0117602,
'Instruction Fetch Unit/Branch Predictor/RAS/Runtime Dynamic': 0.000473173,
'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage': 0.000932505,
'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage with power gating': 0.000494733,
'Instruction Fetch Unit/Branch Predictor/Runtime Dynamic': 0.00227399,
'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage': 0.0199703,
'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage with power gating': 0.0103282,
'Instruction Fetch Unit/Branch Target Buffer/Area': 0.64954,
'Instruction Fetch Unit/Branch Target Buffer/Gate Leakage': 0.00272758,
'Instruction Fetch Unit/Branch Target Buffer/Peak Dynamic': 0.177867,
'Instruction Fetch Unit/Branch Target Buffer/Runtime Dynamic': 0.00579905,
'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage': 0.0811682,
'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage with power gating': 0.0435357,
'Instruction Fetch Unit/Gate Leakage': 0.0589979,
'Instruction Fetch Unit/Instruction Buffer/Area': 0.0226323,
'Instruction Fetch Unit/Instruction Buffer/Gate Leakage': 6.83558e-05,
'Instruction Fetch Unit/Instruction Buffer/Peak Dynamic': 0.606827,
'Instruction Fetch Unit/Instruction Buffer/Runtime Dynamic': 0.0316652,
'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage': 0.00151885,
'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage with power gating': 0.000701682,
'Instruction Fetch Unit/Instruction Cache/Area': 3.14635,
'Instruction Fetch Unit/Instruction Cache/Gate Leakage': 0.029931,
'Instruction Fetch Unit/Instruction Cache/Peak Dynamic': 2.01418,
'Instruction Fetch Unit/Instruction Cache/Runtime Dynamic': 0.0689457,
'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage': 0.367022,
'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage with power gating': 0.180386,
'Instruction Fetch Unit/Instruction Decoder/Area': 1.85799,
'Instruction Fetch Unit/Instruction Decoder/Gate Leakage': 0.0222493,
'Instruction Fetch Unit/Instruction Decoder/Peak Dynamic': 1.37404,
'Instruction Fetch Unit/Instruction Decoder/Runtime Dynamic': 0.107549,
'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage': 0.442943,
'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage with power gating': 0.166104,
'Instruction Fetch Unit/Peak Dynamic': 4.33045,
'Instruction Fetch Unit/Runtime Dynamic': 0.216233,
'Instruction Fetch Unit/Subthreshold Leakage': 0.932286,
'Instruction Fetch Unit/Subthreshold Leakage with power gating': 0.40843,
'L2/Area': 4.53318,
'L2/Gate Leakage': 0.015464,
'L2/Peak Dynamic': 0.0418086,
'L2/Runtime Dynamic': 0.00989266,
'L2/Subthreshold Leakage': 0.834142,
'L2/Subthreshold Leakage with power gating': 0.401066,
'Load Store Unit/Area': 8.80901,
'Load Store Unit/Data Cache/Area': 6.84535,
'Load Store Unit/Data Cache/Gate Leakage': 0.0279261,
'Load Store Unit/Data Cache/Peak Dynamic': 2.36015,
'Load Store Unit/Data Cache/Runtime Dynamic': 0.554162,
'Load Store Unit/Data Cache/Subthreshold Leakage': 0.527675,
'Load Store Unit/Data Cache/Subthreshold Leakage with power gating': 0.25085,
'Load Store Unit/Gate Leakage': 0.0350888,
'Load Store Unit/LoadQ/Area': 0.0836782,
'Load Store Unit/LoadQ/Gate Leakage': 0.00059896,
'Load Store Unit/LoadQ/Peak Dynamic': 0.0363327,
'Load Store Unit/LoadQ/Runtime Dynamic': 0.0363327,
'Load Store Unit/LoadQ/Subthreshold Leakage': 0.00941961,
'Load Store Unit/LoadQ/Subthreshold Leakage with power gating': 0.00536918,
'Load Store Unit/Peak Dynamic': 2.53172,
'Load Store Unit/Runtime Dynamic': 0.769675,
'Load Store Unit/StoreQ/Area': 0.322079,
'Load Store Unit/StoreQ/Gate Leakage': 0.00329971,
'Load Store Unit/StoreQ/Peak Dynamic': 0.0895903,
'Load Store Unit/StoreQ/Runtime Dynamic': 0.17918,
'Load Store Unit/StoreQ/Subthreshold Leakage': 0.0345621,
'Load Store Unit/StoreQ/Subthreshold Leakage with power gating': 0.0197004,
'Load Store Unit/Subthreshold Leakage': 0.591321,
'Load Store Unit/Subthreshold Leakage with power gating': 0.283293,
'Memory Management Unit/Area': 0.4339,
'Memory Management Unit/Dtlb/Area': 0.0879726,
'Memory Management Unit/Dtlb/Gate Leakage': 0.00088729,
'Memory Management Unit/Dtlb/Peak Dynamic': 0.0317959,
'Memory Management Unit/Dtlb/Runtime Dynamic': 0.0324228,
'Memory Management Unit/Dtlb/Subthreshold Leakage': 0.0155699,
'Memory Management Unit/Dtlb/Subthreshold Leakage with power gating': 0.00887485,
'Memory Management Unit/Gate Leakage': 0.00808595,
'Memory Management Unit/Itlb/Area': 0.301552,
'Memory Management Unit/Itlb/Gate Leakage': 0.00393464,
'Memory Management Unit/Itlb/Peak Dynamic': 0.125234,
'Memory Management Unit/Itlb/Runtime Dynamic': 0.0113054,
'Memory Management Unit/Itlb/Subthreshold Leakage': 0.0413758,
'Memory Management Unit/Itlb/Subthreshold Leakage with power gating': 0.0235842,
'Memory Management Unit/Peak Dynamic': 0.335963,
'Memory Management Unit/Runtime Dynamic': 0.0437282,
'Memory Management Unit/Subthreshold Leakage': 0.0766103,
'Memory Management Unit/Subthreshold Leakage with power gating': 0.0398333,
'Peak Dynamic': 14.9434,
'Renaming Unit/Area': 0.303608,
'Renaming Unit/FP Front End RAT/Area': 0.131045,
'Renaming Unit/FP Front End RAT/Gate Leakage': 0.00351123,
'Renaming Unit/FP Front End RAT/Peak Dynamic': 2.51468,
'Renaming Unit/FP Front End RAT/Runtime Dynamic': 0.0181291,
'Renaming Unit/FP Front End RAT/Subthreshold Leakage': 0.0308571,
'Renaming Unit/FP Front End RAT/Subthreshold Leakage with power gating': 0.0175885,
'Renaming Unit/Free List/Area': 0.0340654,
'Renaming Unit/Free List/Gate Leakage': 2.5481e-05,
'Renaming Unit/Free List/Peak Dynamic': 0.0306032,
'Renaming Unit/Free List/Runtime Dynamic': 0.0050114,
'Renaming Unit/Free List/Subthreshold Leakage': 0.000370144,
'Renaming Unit/Free List/Subthreshold Leakage with power gating': 0.000201064,
'Renaming Unit/Gate Leakage': 0.00708398,
'Renaming Unit/Int Front End RAT/Area': 0.0941223,
'Renaming Unit/Int Front End RAT/Gate Leakage': 0.000283242,
'Renaming Unit/Int Front End RAT/Peak Dynamic': 0.731965,
'Renaming Unit/Int Front End RAT/Runtime Dynamic': 0.0551057,
'Renaming Unit/Int Front End RAT/Subthreshold Leakage': 0.00435488,
'Renaming Unit/Int Front End RAT/Subthreshold Leakage with power gating': 0.00248228,
'Renaming Unit/Peak Dynamic': 3.58947,
'Renaming Unit/Runtime Dynamic': 0.0782462,
'Renaming Unit/Subthreshold Leakage': 0.0552466,
'Renaming Unit/Subthreshold Leakage with power gating': 0.0276461,
'Runtime Dynamic': 2.33534,
'Subthreshold Leakage': 6.16288,
'Subthreshold Leakage with power gating': 2.55328}],
'DRAM': {'Area': 0,
'Gate Leakage': 0,
'Peak Dynamic': 3.868411224021876,
'Runtime Dynamic': 3.868411224021876,
'Subthreshold Leakage': 4.252,
'Subthreshold Leakage with power gating': 4.252},
'L3': [{'Area': 61.9075,
'Gate Leakage': 0.0484137,
'Peak Dynamic': 0.371973,
'Runtime Dynamic': 0.183113,
'Subthreshold Leakage': 6.80085,
'Subthreshold Leakage with power gating': 3.32364}],
'Processor': {'Area': 191.908,
'Gate Leakage': 1.53485,
'Peak Dynamic': 75.1614,
'Peak Power': 108.274,
'Runtime Dynamic': 16.5813,
'Subthreshold Leakage': 31.5774,
'Subthreshold Leakage with power gating': 13.9484,
'Total Cores/Area': 128.669,
'Total Cores/Gate Leakage': 1.4798,
'Total Cores/Peak Dynamic': 74.7894,
'Total Cores/Runtime Dynamic': 16.3982,
'Total Cores/Subthreshold Leakage': 24.7074,
'Total Cores/Subthreshold Leakage with power gating': 10.2429,
'Total L3s/Area': 61.9075,
'Total L3s/Gate Leakage': 0.0484137,
'Total L3s/Peak Dynamic': 0.371973,
'Total L3s/Runtime Dynamic': 0.183113,
'Total L3s/Subthreshold Leakage': 6.80085,
'Total L3s/Subthreshold Leakage with power gating': 3.32364,
'Total Leakage': 33.1122,
'Total NoCs/Area': 1.33155,
'Total NoCs/Gate Leakage': 0.00662954,
'Total NoCs/Peak Dynamic': 0.0,
'Total NoCs/Runtime Dynamic': 0.0,
'Total NoCs/Subthreshold Leakage': 0.0691322,
'Total NoCs/Subthreshold Leakage with power gating': 0.0259246}}
| 75.082057
| 124
| 0.682157
| 8,082
| 68,625
| 5.786315
| 0.067681
| 0.123511
| 0.112905
| 0.093403
| 0.939185
| 0.931316
| 0.91793
| 0.885833
| 0.862611
| 0.842019
| 0
| 0.132209
| 0.224277
| 68,625
| 914
| 125
| 75.082057
| 0.746271
| 0
| 0
| 0.642232
| 0
| 0
| 0.657258
| 0.048087
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
c7226ff1219f925df17003fe42d233729469035d
| 4,187
|
py
|
Python
|
tests/test_models/test_backbones/test_sr_backbones/test_edvr_net.py
|
wangruohui/mmediting
|
6577d307caf9edfb34c6e46547994e6314fffc37
|
[
"Apache-2.0"
] | 45
|
2022-03-05T06:54:34.000Z
|
2022-03-30T02:15:42.000Z
|
tests/test_models/test_backbones/test_sr_backbones/test_edvr_net.py
|
wangruohui/mmediting
|
6577d307caf9edfb34c6e46547994e6314fffc37
|
[
"Apache-2.0"
] | 1
|
2022-03-25T14:04:39.000Z
|
2022-03-31T04:48:38.000Z
|
tests/test_models/test_backbones/test_sr_backbones/test_edvr_net.py
|
wangruohui/mmediting
|
6577d307caf9edfb34c6e46547994e6314fffc37
|
[
"Apache-2.0"
] | 1
|
2022-03-10T01:00:24.000Z
|
2022-03-10T01:00:24.000Z
|
# Copyright (c) OpenMMLab. All rights reserved.
import pytest
import torch
from mmedit.models.backbones.sr_backbones.edvr_net import (EDVRNet,
PCDAlignment,
TSAFusion)
def test_pcd_alignment():
"""Test PCDAlignment."""
# cpu
pcd_alignment = PCDAlignment(mid_channels=4, deform_groups=2)
input_list = []
for i in range(3, 0, -1):
input_list.append(torch.rand(1, 4, 2**i, 2**i))
pcd_alignment = pcd_alignment
input_list = [v for v in input_list]
output = pcd_alignment(input_list, input_list)
assert output.shape == (1, 4, 8, 8)
with pytest.raises(AssertionError):
pcd_alignment(input_list[0:2], input_list)
# gpu
if torch.cuda.is_available():
pcd_alignment = PCDAlignment(mid_channels=4, deform_groups=2)
input_list = []
for i in range(3, 0, -1):
input_list.append(torch.rand(1, 4, 2**i, 2**i))
pcd_alignment = pcd_alignment.cuda()
input_list = [v.cuda() for v in input_list]
output = pcd_alignment(input_list, input_list)
assert output.shape == (1, 4, 8, 8)
with pytest.raises(AssertionError):
pcd_alignment(input_list[0:2], input_list)
def test_tsa_fusion():
"""Test TSAFusion."""
# cpu
tsa_fusion = TSAFusion(mid_channels=4, num_frames=5, center_frame_idx=2)
input_tensor = torch.rand(1, 5, 4, 8, 8)
output = tsa_fusion(input_tensor)
assert output.shape == (1, 4, 8, 8)
# gpu
if torch.cuda.is_available():
tsa_fusion = tsa_fusion.cuda()
input_tensor = input_tensor.cuda()
output = tsa_fusion(input_tensor)
assert output.shape == (1, 4, 8, 8)
def test_edvrnet():
"""Test EDVRNet."""
# cpu
# with tsa
edvrnet = EDVRNet(
3,
3,
mid_channels=8,
num_frames=5,
deform_groups=2,
num_blocks_extraction=1,
num_blocks_reconstruction=1,
center_frame_idx=2,
with_tsa=True)
input_tensor = torch.rand(1, 5, 3, 8, 8)
edvrnet.init_weights(pretrained=None)
output = edvrnet(input_tensor)
assert output.shape == (1, 3, 32, 32)
# without tsa
edvrnet = EDVRNet(
3,
3,
mid_channels=8,
num_frames=5,
deform_groups=2,
num_blocks_extraction=1,
num_blocks_reconstruction=1,
center_frame_idx=2,
with_tsa=False)
output = edvrnet(input_tensor)
assert output.shape == (1, 3, 32, 32)
with pytest.raises(AssertionError):
# The height and width of inputs should be a multiple of 4
input_tensor = torch.rand(1, 5, 3, 3, 3)
edvrnet(input_tensor)
with pytest.raises(TypeError):
# pretrained should be str or None
edvrnet.init_weights(pretrained=[1])
# gpu
if torch.cuda.is_available():
# with tsa
edvrnet = EDVRNet(
3,
3,
mid_channels=8,
num_frames=5,
deform_groups=2,
num_blocks_extraction=1,
num_blocks_reconstruction=1,
center_frame_idx=2,
with_tsa=True).cuda()
input_tensor = torch.rand(1, 5, 3, 8, 8).cuda()
edvrnet.init_weights(pretrained=None)
output = edvrnet(input_tensor)
assert output.shape == (1, 3, 32, 32)
# without tsa
edvrnet = EDVRNet(
3,
3,
mid_channels=8,
num_frames=5,
deform_groups=2,
num_blocks_extraction=1,
num_blocks_reconstruction=1,
center_frame_idx=2,
with_tsa=False).cuda()
output = edvrnet(input_tensor)
assert output.shape == (1, 3, 32, 32)
with pytest.raises(AssertionError):
# The height and width of inputs should be a multiple of 4
input_tensor = torch.rand(1, 5, 3, 3, 3).cuda()
edvrnet(input_tensor)
with pytest.raises(TypeError):
# pretrained should be str or None
edvrnet.init_weights(pretrained=[1])
| 28.482993
| 76
| 0.578696
| 534
| 4,187
| 4.335206
| 0.162921
| 0.062203
| 0.058747
| 0.062203
| 0.834125
| 0.834125
| 0.792225
| 0.792225
| 0.792225
| 0.770626
| 0
| 0.044133
| 0.318128
| 4,187
| 146
| 77
| 28.678082
| 0.766725
| 0.081443
| 0
| 0.762376
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.118812
| 1
| 0.029703
| false
| 0
| 0.029703
| 0
| 0.059406
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
c7235d9e02846d039085054a4375d4bc687a9231
| 12,229
|
py
|
Python
|
enjoliver-api/tests/test_generate_groups.py
|
netturpin/enjoliver
|
9700470939da40ff84304af6e8c7210a5fd693a4
|
[
"MIT"
] | 11
|
2017-11-06T08:42:55.000Z
|
2021-01-08T11:01:02.000Z
|
enjoliver-api/tests/test_generate_groups.py
|
netturpin/enjoliver
|
9700470939da40ff84304af6e8c7210a5fd693a4
|
[
"MIT"
] | 7
|
2017-12-28T12:05:50.000Z
|
2021-04-02T15:04:46.000Z
|
enjoliver-api/tests/test_generate_groups.py
|
netturpin/enjoliver
|
9700470939da40ff84304af6e8c7210a5fd693a4
|
[
"MIT"
] | 4
|
2017-11-08T10:03:31.000Z
|
2018-06-03T17:59:43.000Z
|
import os
from shutil import rmtree
from tempfile import mkdtemp
from unittest import TestCase
from enjoliver import generator
class GenerateGroupTestCase(TestCase):
api_uri = None
test_matchbox_path = None
test_resources_path = None
tests_path = None
@classmethod
def setUpClass(cls):
cls.tests_path = mkdtemp(dir='/tmp')
cls.test_matchbox_path = os.path.join(cls.tests_path, 'test_matchbox')
cls.test_resources_path = os.path.join(cls.tests_path, 'test_resources')
os.mkdir(cls.test_matchbox_path)
os.mkdir(cls.test_resources_path)
os.mkdir(os.path.join(cls.test_matchbox_path, 'groups'))
cls.api_uri = "http://127.0.0.1:5000"
@classmethod
def tearDownClass(cls):
rmtree(cls.tests_path)
class TestGenerateGroups(GenerateGroupTestCase):
@classmethod
def setUpClass(cls):
super().setUpClass()
cls.gen = generator.GenerateGroup(
api_uri=cls.api_uri,
_id="etcd-proxy",
name="etcd-proxy",
profile="TestGenerateProfiles",
matchbox_path=cls.test_matchbox_path
)
cls.gen.profiles_path = cls.test_resources_path
def test_instantiate_generate_group_with_incorrect_parameters(self):
with self.assertRaises(TypeError):
generator.GenerateGroup()
def test_instantiate_generate_group_with_non_existing_matchbox_path(self):
with self.assertRaises(OSError):
generator.GenerateGroup(
api_uri='foobar',
_id='foo',
name='foo-bar',
profile='foo-bar-baz',
matchbox_path='/foo/bar'
)
def test_instantiate_generate_group(self):
sandbox = mkdtemp(dir='/tmp')
os.mkdir(os.path.join(sandbox, 'groups'))
generator.GenerateGroup(
api_uri='foobar',
_id='foo',
name='foo-bar',
profile='foo-bar-baz',
matchbox_path=sandbox
)
rmtree(sandbox)
def test_00_uri(self):
ip = self.gen.api_uri
self.assertIsNotNone(ip)
def test_01_metadata(self):
expect = {'etcd_initial_cluster': '',
'api_uri': '%s' % self.gen.api_uri,
'ssh_authorized_keys': []}
self.gen._metadata()
self.assertEqual(expect['api_uri'], self.gen._target_data["metadata"]["api_uri"])
def test_990_generate(self):
expect = {
'profile': 'etcd-proxy.yaml',
'metadata': {
'api_uri': '%s' % self.gen.api_uri,
'ssh_authorized_keys': []
},
'id': 'etcd-proxy',
'name': 'etcd-proxy'
}
new = generator.GenerateGroup(
api_uri=self.api_uri,
_id="etcd-proxy",
name="etcd-proxy",
profile="etcd-proxy.yaml",
matchbox_path=self.test_matchbox_path
)
result = new.generate()
self.assertEqual(expect["profile"], result["profile"])
self.assertEqual(expect["id"], result["id"])
self.assertEqual(expect["name"], result["name"])
self.assertEqual(expect["metadata"]["api_uri"], result["metadata"]["api_uri"])
def test_991_dump(self):
_id = "etcd-test-%s" % self.test_991_dump.__name__
new = generator.GenerateGroup(
api_uri=self.api_uri,
_id=_id,
name="etcd-test",
profile="etcd-test.yaml",
matchbox_path=self.test_matchbox_path
)
self.assertTrue(new.dump())
self.assertTrue(os.path.isfile("%s/groups/%s.json" % (self.test_matchbox_path, _id)))
self.assertFalse(new.dump())
self.assertTrue(os.path.isfile("%s/groups/%s.json" % (self.test_matchbox_path, _id)))
new = generator.GenerateGroup(
api_uri=self.api_uri,
_id=_id,
name="etcd-test",
profile="etcd-test.yaml",
matchbox_path=self.test_matchbox_path,
selector={"one": "selector"}
)
self.assertTrue(new.dump())
self.assertTrue(os.path.isfile("%s/groups/%s.json" % (self.test_matchbox_path, _id)))
os.remove("%s/groups/%s.json" % (self.test_matchbox_path, _id))
class TestGenerateGroupsSelectorLower(GenerateGroupTestCase):
@classmethod
def setUpClass(cls):
super().setUpClass()
os.environ["MATCHBOX_URI"] = "http://127.0.0.1:8080"
os.environ["API_URI"] = "http://127.0.0.1:5000"
cls.gen = generator.GenerateGroup(
api_uri=cls.api_uri,
_id="etcd-proxy",
name="etcd-proxy",
profile="TestGenerateProfiles",
selector={"mac": "08:00:27:37:28:2e"},
matchbox_path=cls.test_matchbox_path
)
def test_00_api_uri(self):
ip = self.gen.api_uri
self.assertIsNotNone(ip)
def test_01_metadata(self):
expect = {
'api_uri': "%s" % self.gen.api_uri,
'ssh_authorized_keys': []
}
self.gen._metadata()
self.gen._target_data["metadata"]['ssh_authorized_keys'] = []
self.assertEqual(expect, self.gen._target_data["metadata"])
def test_02_selector(self):
expect = {'mac': '08:00:27:37:28:2e'}
self.gen._selector()
self.assertEqual(expect, self.gen._target_data["selector"])
def test_990_generate(self):
expect = {
'profile': 'etcd-proxy.yaml',
'metadata': {
'api_uri': self.gen.api_uri,
'selector': {'mac': '08:00:27:37:28:2e'},
'ssh_authorized_keys': []
},
'id': 'etcd-proxy',
'name': 'etcd-proxy',
'selector': {'mac': '08:00:27:37:28:2e'}
}
new = generator.GenerateGroup(
api_uri=self.api_uri,
_id="etcd-proxy", name="etcd-proxy", profile="etcd-proxy.yaml",
selector={"mac": "08:00:27:37:28:2e"},
matchbox_path=self.test_matchbox_path)
result = new.generate()
result["metadata"]['ssh_authorized_keys'] = []
self.assertEqual(expect, result)
def test_991_dump(self):
_id = "etcd-test-%s" % self.test_991_dump.__name__
new = generator.GenerateGroup(
api_uri=self.api_uri,
_id="%s" % _id, name="etcd-test", profile="etcd-test.yaml",
matchbox_path=self.test_matchbox_path,
selector={"mac": "08:00:27:37:28:2e"}
)
self.assertTrue(new.dump())
self.assertTrue(os.path.isfile("%s/groups/%s.json" % (self.test_matchbox_path, _id)))
os.remove("%s/groups/%s.json" % (self.test_matchbox_path, _id))
class TestGenerateGroupsSelectorUpper(GenerateGroupTestCase):
@classmethod
def setUpClass(cls):
super().setUpClass()
os.environ["MATCHBOX_URI"] = "http://127.0.0.1:8080"
os.environ["API_URI"] = "http://127.0.0.1:5000"
cls.gen = generator.GenerateGroup(
api_uri=cls.api_uri,
_id="etcd-proxy",
name="etcd-proxy",
profile="TestGenerateProfiles",
selector={"mac": "08:00:27:37:28:2E"},
matchbox_path=cls.test_matchbox_path
)
def test_00_ip_address(self):
ip = self.gen.api_uri
self.assertIsNotNone(ip)
def test_01_metadata(self):
expect = {
'api_uri': "%s" % self.gen.api_uri,
'ssh_authorized_keys': []
}
self.gen._metadata()
self.gen._target_data["metadata"]['ssh_authorized_keys'] = []
self.assertEqual(expect, self.gen._target_data["metadata"])
def test_02_selector(self):
expect = {'mac': '08:00:27:37:28:2e'}
self.gen._selector()
self.assertEqual(expect, self.gen._target_data["selector"])
def test_990_generate(self):
expect = {
'profile': 'etcd-proxy.yaml',
'metadata': {
'api_uri': "%s" % self.gen.api_uri,
'selector': {'mac': '08:00:27:37:28:2e'},
'ssh_authorized_keys': []
},
'id': 'etcd-proxy',
'name': 'etcd-proxy',
'selector': {'mac': '08:00:27:37:28:2e'}
}
new = generator.GenerateGroup(
api_uri=self.api_uri, _id="etcd-proxy",
name="etcd-proxy",
profile="etcd-proxy.yaml",
selector={"mac": "08:00:27:37:28:2e"},
matchbox_path=self.test_matchbox_path
)
result = new.generate()
result["metadata"]['ssh_authorized_keys'] = []
self.assertEqual(expect, result)
def test_991_dump(self):
_id = "etcd-test-%s" % self.test_991_dump.__name__
new = generator.GenerateGroup(
api_uri=self.api_uri,
_id="%s" % _id, name="etcd-test", profile="etcd-test.yaml",
matchbox_path=self.test_matchbox_path,
selector={"mac": "08:00:27:37:28:2e"}
)
new.dump()
self.assertTrue(os.path.isfile("%s/groups/%s.json" % (self.test_matchbox_path, _id)))
os.remove("%s/groups/%s.json" % (self.test_matchbox_path, _id))
class TestGenerateGroupsExtraMetadata(GenerateGroupTestCase):
@classmethod
def setUpClass(cls):
super().setUpClass()
os.environ["MATCHBOX_URI"] = "http://127.0.0.1:8080"
os.environ["API_URI"] = "http://127.0.0.1:5000"
cls.gen = generator.GenerateGroup(
api_uri=cls.api_uri,
_id="etcd-proxy",
name="etcd-proxy",
profile="TestGenerateProfiles",
selector={"mac": "08:00:27:37:28:2E"},
metadata={"etcd_initial_cluster": "static0=http://192.168.1.1:2379",
"api_seed": "http://192.168.1.2:5000"},
matchbox_path=cls.test_matchbox_path
)
def test_00_api_uri(self):
ip = self.gen.api_uri
self.assertIsNotNone(ip)
def test_01_metadata(self):
expect = {'etcd_initial_cluster': 'static0=http://192.168.1.1:2379',
'api_uri': "%s" % self.gen.api_uri,
'api_seed': 'http://192.168.1.2:5000',
'ssh_authorized_keys': []}
self.gen._metadata()
self.gen._target_data["metadata"]['ssh_authorized_keys'] = []
self.assertEqual(expect, self.gen._target_data["metadata"])
def test_02_selector(self):
expect = {'mac': '08:00:27:37:28:2e'}
self.gen._selector()
self.assertEqual(expect, self.gen._target_data["selector"])
def test_990_generate(self):
expect = {
'profile': 'etcd-proxy.yaml',
'metadata': {
'api_uri': "%s" % self.gen.api_uri,
'selector': {'mac': '08:00:27:37:28:2e'},
'ssh_authorized_keys': []
},
'id': 'etcd-proxy',
'name': 'etcd-proxy',
'selector': {'mac': '08:00:27:37:28:2e'}
}
new = generator.GenerateGroup(
api_uri=self.api_uri,
_id="etcd-proxy", name="etcd-proxy", profile="etcd-proxy.yaml",
selector={"mac": "08:00:27:37:28:2e"},
matchbox_path=self.test_matchbox_path
)
result = new.generate()
result["metadata"]["ssh_authorized_keys"] = []
self.assertEqual(expect, result)
def test_991_dump(self):
_id = "etcd-test-%s" % self.test_991_dump.__name__
new = generator.GenerateGroup(
api_uri=self.api_uri,
_id="%s" % _id, name="etcd-test", profile="etcd-test.yaml",
matchbox_path=self.test_matchbox_path,
selector={"mac": "08:00:27:37:28:2e"}
)
self.assertTrue(new.dump())
self.assertTrue(os.path.isfile("%s/groups/%s.json" % (self.test_matchbox_path, _id)))
os.remove("%s/groups/%s.json" % (self.test_matchbox_path, _id))
self.assertTrue(new.dump())
for i in range(10):
self.assertFalse(new.dump())
new.api_uri = "http://google.com"
self.assertTrue(new.dump())
self.assertFalse(new.dump())
| 35.446377
| 93
| 0.568485
| 1,435
| 12,229
| 4.618118
| 0.08223
| 0.054323
| 0.065188
| 0.057341
| 0.846084
| 0.814848
| 0.799608
| 0.787536
| 0.768975
| 0.75796
| 0
| 0.043057
| 0.282116
| 12,229
| 344
| 94
| 35.549419
| 0.711812
| 0
| 0
| 0.735974
| 1
| 0
| 0.188568
| 0
| 0
| 0
| 0
| 0
| 0.115512
| 1
| 0.092409
| false
| 0
| 0.016502
| 0
| 0.138614
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
c7551a216f55773fcf2668fcef4ad367660f3169
| 21,599
|
py
|
Python
|
aispace/layers/callbacks/qa_evaluators.py
|
SmileGoat/AiSpace
|
35fc120667e4263c99b300815e0bf018f5064a40
|
[
"Apache-2.0"
] | 32
|
2020-01-16T07:59:03.000Z
|
2022-03-31T09:24:00.000Z
|
aispace/layers/callbacks/qa_evaluators.py
|
SmileGoat/AiSpace
|
35fc120667e4263c99b300815e0bf018f5064a40
|
[
"Apache-2.0"
] | 9
|
2020-06-05T03:27:06.000Z
|
2022-03-12T01:00:17.000Z
|
aispace/layers/callbacks/qa_evaluators.py
|
SmileGoat/AiSpace
|
35fc120667e4263c99b300815e0bf018f5064a40
|
[
"Apache-2.0"
] | 3
|
2020-06-09T02:22:50.000Z
|
2021-07-19T06:07:07.000Z
|
# -*- coding: utf-8 -*-
# @Time : 2020-07-30 15:06
# @Author : yingyuankai
# @Email : [email protected]
# @File : qa_evaluators.py
import os
import logging
import numpy as np
import tensorflow as tf
import json
from pprint import pprint
from collections import defaultdict
from aispace.utils.eval_utils import calc_em_score, calc_f1_score
from aispace.utils.io_utils import save_json
from aispace.utils.print_utils import print_boxed
from aispace.utils.metrics_utils import ConfusionMatrix
__all__ = [
'EvaluatorForQaSimple',
'EvaluatorForQaWithImpossible'
]
logger = logging.getLogger(__name__)
class EvaluatorForQaSimple(tf.keras.callbacks.Callback):
"""
start_top_log_prob and end_top_log_prob's shape is [batch, k]
ref: https://keras.io/examples/nlp/text_extraction_with_bert/
"""
def __init__(self, validation_dataset, validation_steps, test_dataset, test_steps, report_dir, max_answer_length=64, n_best_size=5):
self.validation_dataset = validation_dataset
self.validation_steps = validation_steps
self.test_dataset = test_dataset
self.test_steps = test_steps
self.max_answer_length = max_answer_length
self.n_best_size = n_best_size
self.report_dir = report_dir
def on_epoch_end(self, epoch, logs=None):
new_logs = self.eval_process(self.validation_dataset, self.validation_steps)
logs = logs or {}
logs.update(new_logs)
print(f"Epoch: {epoch + 1}, val_f1_score: {logs['val_f1_score']:.4f}, val_em_score: {logs['val_em_score']:.4f}, "
f"val_f1_em_avg_score: {logs['val_f1_em_avg_score']:.4f}")
def on_train_end(self, logs=None):
logger.info("Start Evaluate.")
if not os.path.exists(self.report_dir):
os.makedirs(self.report_dir)
new_logs = self.eval_process(self.test_dataset, self.test_steps)
save_json(os.path.join(self.report_dir, 'performance.json'), new_logs)
print_boxed(f"Question Answer Evaluation")
pprint(new_logs)
logger.info(f"Save question answer reports in {self.report_dir}")
def eval_process(self, dataset, n_steps=None):
f1 = 0
em = 0
total_count = 0
skip_count = 0
start_top_res, end_top_res, unique_id_res = self.model.predict(dataset, steps=n_steps)
start_top_log_prob, start_top_index = start_top_res[:, :, 0], start_top_res[:, :, 1].astype(np.int) # [b, k]
end_top_log_prob, end_top_index = end_top_res[:, :, 0], end_top_res[:, :, 1].astype(np.int) # [b, k]
unique_id_res = unique_id_res.astype(np.int)
# predict results
results = {}
for i in range(end_top_index.shape[0]):
unique_id = unique_id_res[i][0]
itm = {
'unique_id': unique_id,
'start_top_log_prob': start_top_log_prob[i],
'start_top_index': start_top_index[i],
'end_top_log_prob': end_top_log_prob[i],
'end_top_index': end_top_index[i],
}
results[unique_id] = itm
# raw inputs
start_n_top, end_n_top = start_top_index.shape[-1], end_top_index.shape[-1]
qas_id_to_examples = defaultdict(list)
unique_id_to_examples = {}
for idx, (inputs, outputs) in enumerate(dataset):
if n_steps is not None and idx >= n_steps:
break
unique_ids = inputs['unique_id'].numpy().astype(np.int).tolist()
offsets = inputs['offset'].numpy().astype(np.int).tolist()
qas_ids = inputs['qas_id'].numpy().astype(str).tolist()
doc_token2char_raw_start_indexs = inputs['doc_token2char_raw_start_index'].numpy().astype(str).tolist()
doc_token2char_raw_end_indexs = inputs['doc_token2char_raw_end_index'].numpy().astype(str).tolist()
doc_token2doc_indexs = inputs['doc_token2doc_index'].numpy().astype(str).tolist()
all_answers = inputs['all_answers'].numpy().astype(str).tolist()
answer_texts = inputs['answer_text'].numpy().tolist()
context_texts = inputs['context_text'].numpy().tolist()
question_texts = inputs['question_text'].numpy().tolist()
is_impossibles = inputs['is_impossible'].numpy().tolist()
p_masks = inputs['p_mask'].numpy().astype(np.int).tolist()
for t in range(len(unique_ids)):
itm = {
'unique_id': unique_ids[t],
'qas_id': qas_ids[t],
'question_text': question_texts[t].decode("utf8"),
'context_text': context_texts[t].decode("utf8"),
'answer_text': answer_texts[t].decode("utf8"),
'all_answers': json.loads(all_answers[t]),
'doc_token2char_raw_start_index': json.loads(doc_token2char_raw_start_indexs[t]),
'doc_token2char_raw_end_index': json.loads(doc_token2char_raw_end_indexs[t]),
'doc_token2doc_index': json.loads(doc_token2doc_indexs[t]),
'is_impossible': is_impossibles[t],
'p_mask': p_masks[t],
'offset': offsets[t]
}
unique_id_to_examples[unique_ids[t]] = itm
qas_id_to_examples[qas_ids[t]].append(itm)
for qas_id, examples in qas_id_to_examples.items():
example_all_predicts = []
answers = set()
for example in examples:
cur_unique_id = example['unique_id']
if cur_unique_id not in results:
continue
if example['is_impossible'] == 1:
continue
# if example['answer_text'] not in answers:
# answers.append(example['answer_text'])
answers |= set(example['all_answers'])
cur_result = results.get(cur_unique_id)
cur_start_top_log_prob = cur_result['start_top_log_prob']
cur_start_top_index = cur_result['start_top_index']
cur_end_top_log_prob = cur_result['end_top_log_prob']
cur_end_top_index = cur_result['end_top_index']
cur_p_mask = example['p_mask']
for i in range(start_n_top):
start_prob = cur_start_top_log_prob[i]
start_index = cur_start_top_index[i]
if not cur_p_mask[start_index]:
continue
for j in range(end_n_top):
end_prob = cur_end_top_log_prob[j]
end_index = cur_end_top_index[j]
if not cur_p_mask[end_index]:
continue
answer_length = end_index - start_index + 1
if end_index < start_index or answer_length > self.max_answer_length:
continue
itm = {
'unique_id': cur_unique_id,
'start_prob': start_prob,
'start_index': start_index,
'end_prob': end_prob,
'end_index': end_index,
'predict_score': np.log(start_prob) + np.log(end_prob)
}
example_all_predicts.append(itm)
if len(answers) != 0:
total_count += 1
else:
skip_count += 1
continue
example_all_predicts.sort(key=lambda s: s['predict_score'], reverse=True)
example_top_predicts = []
is_visited = set()
for example_predict in example_all_predicts:
if len(example_top_predicts) >= self.n_best_size:
break
example_feature = unique_id_to_examples[example_predict['unique_id']]
if example_predict['start_index'] - example_feature['offset'] < 0 or example_predict['end_index'] - example_feature['offset'] < 0:
predict_text = ""
else:
predict_start = example_feature['doc_token2char_raw_start_index'][
example_predict['start_index'] - example_feature['offset']]
predict_end = example_feature['doc_token2char_raw_end_index'][
example_predict['end_index'] - example_feature['offset']]
predict_text = example_feature['context_text'][predict_start: predict_end + 1].strip()
if predict_text in is_visited:
continue
is_visited.add(predict_text)
itm = {
'predict_text': predict_text,
'start_prob': example_predict['start_prob'],
'end_prob': example_predict['end_prob'],
'predict_score': example_predict['predict_score']
}
example_top_predicts.append(itm)
if len(example_top_predicts) == 0:
example_top_predicts.append(
{
'predict_text': "",
'start_prob': 0.,
'end_prob': 0.,
'predict_score': 0.
}
)
example_best_predict = example_top_predicts[0]
cur_f1 = calc_f1_score(list(answers), example_best_predict['predict_text'])
cur_em = calc_em_score(list(answers), example_best_predict['predict_text'])
f1 += cur_f1
em += cur_em
# debug
if cur_f1 == 0 or cur_em == 0:
example_output = {}
example_output.update(example_best_predict)
example_output['question'] = examples[0]['question_text']
example_output['answer'] = answers
example_output['f1'] = cur_f1
example_output['em'] = cur_em
print(example_output)
# total_count = len(qas_id_to_examples)
f1_score = f1 / total_count
em_score = em / total_count
logs = {}
logs['skip_count'] = skip_count
logs['total'] = total_count
logs['val_f1_score'] = f1_score
logs['val_em_score'] = em_score
logs['val_f1_em_avg_score'] = (em_score + f1_score) / 2.
return logs
class EvaluatorForQaWithImpossible(tf.keras.callbacks.Callback):
"""
start_top_log_prob and end_top_log_prob's shape is [batch, k, k]
ref: https://keras.io/examples/nlp/text_extraction_with_bert/
"""
def __init__(self, validation_dataset, validation_steps, test_dataset, test_steps,
report_dir, max_answer_length=64, n_best_size=5, is_impossible_threshold=0.5, weights=[1., 1., 1.]):
self.validation_dataset = validation_dataset
self.validation_steps = validation_steps
self.test_dataset = test_dataset
self.test_steps = test_steps
self.max_answer_length = max_answer_length
self.n_best_size = n_best_size
self.report_dir = report_dir
self.is_impossible_threshold = is_impossible_threshold
self.weights = weights
def on_epoch_end(self, epoch, logs=None):
new_logs = self.eval_process(self.validation_dataset, self.validation_steps)
logs = logs or {}
logs.update(new_logs)
print(f"\nEpoch: {epoch + 1}, val_f1_score: {logs['val_f1_score']:.4f}, "
f"val_em_score: {logs['val_em_score']:.4f}, "
f"val_f1_em_avg_score: {logs['val_f1_em_avg_score']:.4f},"
f" val_f1_for_impossible: {logs['val_f1_for_impossible']:.4f},"
f" val_f1_avg_score: {logs['val_f1_avg_score']:.4f},")
def on_train_end(self, logs=None):
logger.info("Start Evaluate.")
if not os.path.exists(self.report_dir):
os.makedirs(self.report_dir)
new_logs = self.eval_process(self.test_dataset, self.test_steps)
save_json(os.path.join(self.report_dir, 'performance.json'), new_logs)
print_boxed(f"Question Answer Evaluation")
pprint(new_logs)
logger.info(f"Save question answer reports in {self.report_dir}")
def eval_process(self, dataset, n_steps=None):
f1 = 0
em = 0
total_count = 0
skip_count = 0
start_top_res, end_top_res, answer_prob, unique_id_res = self.model.predict(dataset, steps=n_steps)
start_top_log_prob, start_top_index = start_top_res[:, :, 0], start_top_res[:, :, 1].astype(np.int) # [b, k]
end_top_log_prob, end_top_index = end_top_res[:, :, :, 0], end_top_res[:, :, :, 1].astype(np.int) # [b, k, k]
unique_id_res = unique_id_res.astype(np.int)
# predict results
results = {}
for i in range(end_top_index.shape[0]):
unique_id = unique_id_res[i][0]
itm = {
'unique_id': unique_id,
'start_top_log_prob': start_top_log_prob[i],
'start_top_index': start_top_index[i],
'end_top_log_prob': end_top_log_prob[i],
'end_top_index': end_top_index[i],
'is_impossible_prob': answer_prob[i][0]
}
results[unique_id] = itm
# raw inputs
start_n_top, end_n_top = end_top_index.shape[1:]
qas_id_to_examples = defaultdict(list)
unique_id_to_examples = {}
for idx, (inputs, outputs) in enumerate(dataset):
if n_steps is not None and idx >= n_steps:
break
unique_ids = inputs['unique_id'].numpy().astype(np.int).tolist()
offsets = inputs['offset'].numpy().astype(np.int).tolist()
qas_ids = inputs['qas_id'].numpy().astype(str).tolist()
doc_token2char_raw_start_indexs = inputs['doc_token2char_raw_start_index'].numpy().astype(str).tolist()
doc_token2char_raw_end_indexs = inputs['doc_token2char_raw_end_index'].numpy().astype(str).tolist()
doc_token2doc_indexs = inputs['doc_token2doc_index'].numpy().astype(str).tolist()
all_answers = inputs['all_answers'].numpy().astype(str).tolist()
answer_texts = inputs['answer_text'].numpy().tolist()
context_texts = inputs['context_text'].numpy().tolist()
question_texts = inputs['question_text'].numpy().tolist()
is_impossibles = inputs['is_impossible'].numpy().tolist()
p_masks = inputs['p_mask'].numpy().astype(np.int).tolist()
for t in range(len(unique_ids)):
itm = {
'unique_id': unique_ids[t],
'qas_id': qas_ids[t],
'question_text': question_texts[t].decode("utf8"),
'context_text': context_texts[t].decode("utf8"),
'answer_text': answer_texts[t].decode("utf8"),
'all_answers': json.loads(all_answers[t]),
'doc_token2char_raw_start_index': json.loads(doc_token2char_raw_start_indexs[t]),
'doc_token2char_raw_end_index': json.loads(doc_token2char_raw_end_indexs[t]),
'doc_token2doc_index': json.loads(doc_token2doc_indexs[t]),
'is_impossible': is_impossibles[t],
'p_mask': p_masks[t],
'offset': offsets[t]
}
unique_id_to_examples[unique_ids[t]] = itm
qas_id_to_examples[qas_ids[t]].append(itm)
ground_truth_for_impossible, predictions_for_impossible = [], []
for qas_id, examples in qas_id_to_examples.items():
example_all_predicts = []
answers = set()
for example in examples:
cur_unique_id = example['unique_id']
if cur_unique_id not in results:
continue
# if example['answer_text'] not in answers:
# answers.append(example['answer_text'])
answers |= set(example['all_answers'])
cur_result = results.get(cur_unique_id)
cur_start_top_log_prob = cur_result['start_top_log_prob']
cur_start_top_index = cur_result['start_top_index']
cur_end_top_log_prob = cur_result['end_top_log_prob']
cur_end_top_index = cur_result['end_top_index']
ground_truth_for_impossible.append(example['is_impossible'])
predictions_for_impossible.append(int(cur_result['is_impossible_prob'] >= self.is_impossible_threshold))
if example['is_impossible'] == 1:
continue
cur_p_mask = example['p_mask']
for i in range(start_n_top):
start_prob = cur_start_top_log_prob[i]
start_index = cur_start_top_index[i]
if not cur_p_mask[start_index]:
continue
for j in range(end_n_top):
end_prob = cur_end_top_log_prob[i, j]
end_index = cur_end_top_index[i, j]
if not cur_p_mask[end_index]:
continue
answer_length = end_index - start_index + 1
if end_index < start_index or answer_length > self.max_answer_length:
continue
itm = {
'unique_id': cur_unique_id,
'start_prob': start_prob,
'start_index': start_index,
'end_prob': end_prob,
'end_index': end_index,
'predict_score': np.log(end_prob)
}
example_all_predicts.append(itm)
if len(answers) != 0 and "" not in answers:
total_count += 1
else:
skip_count += 1
continue
example_all_predicts.sort(key=lambda s: s['predict_score'], reverse=True)
example_top_predicts = []
is_visited = set()
for example_predict in example_all_predicts:
if len(example_top_predicts) >= self.n_best_size:
break
example_feature = unique_id_to_examples[example_predict['unique_id']]
if example_predict['start_index'] - example_feature['offset'] < 0 or example_predict['end_index'] - example_feature['offset'] < 0:
predict_text = ""
else:
predict_start = example_feature['doc_token2char_raw_start_index'][
example_predict['start_index'] - example_feature['offset']]
predict_end = example_feature['doc_token2char_raw_end_index'][
example_predict['end_index'] - example_feature['offset']]
predict_text = example_feature['context_text'][predict_start: predict_end + 1].strip()
if predict_text in is_visited:
continue
is_visited.add(predict_text)
itm = {
'predict_text': predict_text,
'start_prob': example_predict['start_prob'],
'end_prob': example_predict['end_prob'],
'predict_score': example_predict['predict_score']
}
example_top_predicts.append(itm)
if len(example_top_predicts) == 0:
example_top_predicts.append(
{
'predict_text': "",
'start_prob': 0.,
'end_prob': 0.,
'predict_score': 0.
}
)
example_best_predict = example_top_predicts[0]
cur_f1 = calc_f1_score(list(answers), example_best_predict['predict_text'])
cur_em = calc_em_score(list(answers), example_best_predict['predict_text'])
f1 += cur_f1
em += cur_em
# debug
if cur_f1 == 0 or cur_em == 0:
example_output = {}
example_output.update(example_best_predict)
example_output['question'] = examples[0]['question_text']
example_output['answer'] = answers
example_output['f1'] = cur_f1
example_output['em'] = cur_em
print(example_output)
# total_count = len(qas_id_to_examples)
f1_score = f1 / total_count
em_score = em / total_count
cm = ConfusionMatrix(ground_truth_for_impossible, predictions_for_impossible)
logs = {}
logs['skip_count'] = skip_count
logs['total'] = total_count
logs['val_f1_score'] = f1_score
logs['val_em_score'] = em_score
logs['val_f1_em_avg_score'] = (em_score * self.weights[0] + f1_score * self.weights[1]) / sum(self.weights[:2])
logs['val_f1_for_impossible'] = cm.avg_f1_score(average='macro')
logs['val_accuracy_for_impossible'] = cm.overall_accuracy()
logs['val_f1_avg_score'] = (em_score * self.weights[0] + f1_score * self.weights[1] +
logs['val_f1_for_impossible'] * self.weights[2]) / sum(self.weights)
return logs
| 44.997917
| 146
| 0.570582
| 2,567
| 21,599
| 4.40748
| 0.078691
| 0.028284
| 0.024748
| 0.018561
| 0.909758
| 0.901008
| 0.897207
| 0.884656
| 0.884656
| 0.884656
| 0
| 0.012167
| 0.32645
| 21,599
| 479
| 147
| 45.091858
| 0.765535
| 0.033752
| 0
| 0.821705
| 0
| 0.002584
| 0.13186
| 0.03407
| 0
| 0
| 0
| 0
| 0
| 1
| 0.020672
| false
| 0
| 0.028424
| 0
| 0.059432
| 0.02584
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
c780e591cbad3129663e73ce7d7f50fa3fb44f8f
| 3,675
|
py
|
Python
|
cms/migrations/0006_auto_20170122_1545.py
|
josemlp91/django-landingcms
|
9d9270204369e9663ff15eb0bd4c4093b3727c6c
|
[
"Apache-2.0"
] | null | null | null |
cms/migrations/0006_auto_20170122_1545.py
|
josemlp91/django-landingcms
|
9d9270204369e9663ff15eb0bd4c4093b3727c6c
|
[
"Apache-2.0"
] | null | null | null |
cms/migrations/0006_auto_20170122_1545.py
|
josemlp91/django-landingcms
|
9d9270204369e9663ff15eb0bd4c4093b3727c6c
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-01-22 15:45
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('content', '0002_auto_20170122_1509'),
('cms', '0005_auto_20170122_1534'),
]
operations = [
migrations.AddField(
model_name='paginahome',
name='posts1_imagen',
field=models.OneToOneField(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='posts1_imagen', to='content.ImageContent'),
),
migrations.AddField(
model_name='paginahome',
name='posts1_texto',
field=models.OneToOneField(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='posts1_texto', to='content.TextContent'),
),
migrations.AddField(
model_name='paginahome',
name='posts1_titulo',
field=models.OneToOneField(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='posts1_titulo', to='content.TitleContent'),
),
migrations.AddField(
model_name='paginahome',
name='posts2_imagen',
field=models.OneToOneField(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='posts2_imagen', to='content.ImageContent'),
),
migrations.AddField(
model_name='paginahome',
name='posts2_texto',
field=models.OneToOneField(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='posts2_texto', to='content.TextContent'),
),
migrations.AddField(
model_name='paginahome',
name='posts2_titulo',
field=models.OneToOneField(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='posts2_titulo', to='content.TitleContent'),
),
migrations.AddField(
model_name='paginahome',
name='posts3_imagen',
field=models.OneToOneField(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='posts3_imagen', to='content.ImageContent'),
),
migrations.AddField(
model_name='paginahome',
name='posts3_texto',
field=models.OneToOneField(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='posts3_texto', to='content.TextContent'),
),
migrations.AddField(
model_name='paginahome',
name='posts3_titulo',
field=models.OneToOneField(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='posts3_titulo', to='content.TitleContent'),
),
migrations.AddField(
model_name='paginahome',
name='posts4_imagen',
field=models.OneToOneField(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='posts4_imagen', to='content.ImageContent'),
),
migrations.AddField(
model_name='paginahome',
name='posts4_texto',
field=models.OneToOneField(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='posts4_texto', to='content.TextContent'),
),
migrations.AddField(
model_name='paginahome',
name='posts4_titulo',
field=models.OneToOneField(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='posts4_titulo', to='content.TitleContent'),
),
]
| 47.115385
| 164
| 0.653605
| 395
| 3,675
| 5.901266
| 0.149367
| 0.048048
| 0.078078
| 0.122694
| 0.88417
| 0.88417
| 0.88417
| 0.835693
| 0.835693
| 0.835693
| 0
| 0.025453
| 0.219592
| 3,675
| 77
| 165
| 47.727273
| 0.787308
| 0.018503
| 0
| 0.514286
| 1
| 0
| 0.198668
| 0.012764
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.042857
| 0
| 0.085714
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
c7a995a9727073409d096c9586ccf8c67b8e8dc3
| 7,320
|
py
|
Python
|
sketchduino/template.py
|
rodrigopmatias/sketchduino
|
567023d69cd21bf1f573d2a26fc855183abdef7e
|
[
"Apache-2.0"
] | null | null | null |
sketchduino/template.py
|
rodrigopmatias/sketchduino
|
567023d69cd21bf1f573d2a26fc855183abdef7e
|
[
"Apache-2.0"
] | 3
|
2015-01-09T20:31:22.000Z
|
2015-01-09T20:31:22.000Z
|
sketchduino/template.py
|
rodrigopmatias/sketchduino
|
567023d69cd21bf1f573d2a26fc855183abdef7e
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
'''
Copyright 2012 Rodrigo Pinheiro Matias <[email protected]>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
templates = {
'static_link': '''
\t@$(AR) rcs %(lib)s %(obj)s
\t@echo " [\033[33m\033[1mAR\033[0m] - \033[37m\033[1m%(obj)s\033[0m to \033[37m\033[1m%(lib)s\033[0m"''',
'c_obj_ruler': '''%(obj)s: %(source)s
\t@$(CC) $(CFLAGS) $(INCLUDE) -c %(source)s -o %(obj)s 1>> compile.log 2>> compile.err
\t@echo " [\033[33m\033[1mCC\033[0m] - \033[37m\033[1m%(source)s\033[0m"''',
'asm_obj_ruler': '''%(obj)s: %(source)s
\t@$(AS) $(ASFLAGS) -o %(obj)s %(source)s 1>> compile.log 2>> compile.err
\t@echo " [\033[33m\033[1mAS\033[0m] - \033[37m\033[1m%(source)s\033[0m"''',
'c_asm_ruler': '''%(obj)s: %(source)s
\t@$(CC) $(CFLAGS) $(INCLUDE) -c %(source)s -S -o %(obj)s 1>> compile.log 2>> compile.err
\t@echo " [\033[33m\033[1mCC\033[0m] - \033[37m\033[1m%(source)s\033[0m"''',
'cxx_obj_ruler': '''%(obj)s: %(source)s
\t@$(CXX) $(CXXFLAGS) $(INCLUDE) -c %(source)s -o %(obj)s 1>> compile.log 2>> compile.err
\t@echo " [\033[33m\033[1mCXX\033[0m] - \033[37m\033[1m%(source)s\033[0m"''',
'cxx_asm_ruler': '''%(obj)s: %(source)s
\t@$(CXX) $(CXXFLAGS) $(INCLUDE) -c %(source)s -S -o %(obj)s 1>> compile.log 2>> compile.err
\t@echo " [\033[33m\033[1mCXX\033[0m] - \033[37m\033[1m%(source)s\033[0m"''',
'avr-main.cc': '''/**
* Generated with sketch %(version)s
**/
#include <avr/sleep.h>
int main(void) {
for(;;)
sleep_mode();
return 0;
}''',
'main.cc': '''/**
* Generated with sketch %(version)s
**/
#include <Arduino.h>
/**
* Setup of the firmware
**/
void setup() {
}
/**
* Schedule events for firmware program
**/
void loop() {
delay(250);
}''',
'Makefile': '''##########################################
# Makefile generated with sketch %(version)s
##########################################
# Defines of Arduino
ARDUINO_HOME=%(sdk_home)s
ARDUINO_CORE=$(ARDUINO_HOME)/hardware/arduino/cores
ARDUINO_VARIANT=$(ARDUINO_HOME)/hardware/arduino/variants/%(variant)s
# Define toolchain
CC=%(cc)s
CXX=%(cxx)s
AS=%(asm)s
LD=%(ld)s
AR=%(ar)s
OBJCOPY=%(objcopy)s
SIZE=%(size)s
AVRDUDE=%(avrdude)s
PROGRAMER=%(programer)s
LIB=
INCLUDE=-I$(ARDUINO_CORE)/arduino -I$(ARDUINO_VARIANT) -I$(ARDUINO_CORE) -I lib/
#Define of MCU
MCU=%(mcu)s
CLOCK=%(clock_hz)sUL
ARDUINO=%(sdk_version)s
# Define compiler flags
_CFLAGS=-Os -Wall -fno-exceptions -ffunction-sections -fdata-sections -mmcu=$(MCU) \\
-DF_CPU=$(CLOCK) -MMD -DARDUINO=$(ARDUINO) \\
-fpermissive -lm -Wl,-u,vfprintf -lprintf_min
CFLAGS=$(_CFLAGS) -std=c99
CXXFLAGS=$(_CFLAGS) -std=c++98
ASFLAGS=-mmcu $(MCU)
# Define compiler rulers
OBJ=%(obj_dep)s
CORE_OBJ=%(core_obj_dep)s
AOUT=binary/%(project_name)s-%(mcu)s.elf
HEX=binary/%(project_name)s-%(mcu)s.hex
EPP=binary/%(project_name)s-%(mcu)s.epp
CORE_LIB=binary/core.a
LIB_DEPS=%(lib_deps)s
LD_FLAGS=-Os -Wl,--gc-sections -mmcu=$(MCU) -lm
AVRDUDE_OPTIONS = -p$(MCU) -c$(PROGRAMER) %(pgrextra)s -Uflash:w:$(HEX):i
SIZE_OPTS=-C --mcu=$(MCU)
CONFIG_EXISTS=$(shell [ -e "Makefile.config" ] && echo 1 || echo 0)
ifeq ($(CONFIG_EXISTS), 1)
include Makefile.config
endif
all: $(HEX) $(EPP)
rebuild: clean all
deploy: $(HEX)
\t$(AVRDUDE) $(AVRDUDE_OPTIONS)
$(HEX): $(EPP)
\t@echo " [\033[33m\033[1mOBJCOPY\033[0m] - \033[37m\033[1mFirmware\033[0m"
\t@$(OBJCOPY) -O ihex -R .eeprom $(AOUT) $(HEX)
$(EPP): $(AOUT)
\t@echo " [\033[33m\033[1mOBJCOPY\033[0m] - \033[37m\033[1mMemory of EEPROM\033[0m"
\t@$(OBJCOPY) -O ihex -j .eeprom --set-section-flags=.eeprom=alloc,load --no-change-warnings --change-section-lma .eeprom=0 $(AOUT) $(EPP)
size: $(AOUT)
\t@$(SIZE) $(SIZE_OPTS) $(AOUT)
$(AOUT): clear-compiler $(OBJ) $(CORE_LIB) $(LIB_DEPS)
\t@echo " [\033[33m\033[1mLD\033[0m] - \033[37m\033[1m$(AOUT)\033[0m"
\t@$(CXX) $(LD_FLAGS) $(LIB) $(OBJ) $(CORE_LIB) $(LIB_DEPS) -o $(AOUT)
$(CORE_LIB): $(CORE_OBJ)%(core_ruler)s
%(asm_rulers)s
%(obj_rulers)s
%(libs_rulers)s
%(core_asm_rulers)s
%(core_obj_rulers)s
clear-compiler:
\t@echo " [\033[33m\033[1mRM\033[0m] - Clear compiler logs"
\trm -f compile.*
clean-tmp:
\t@echo " [\033[33m\033[1mRM\033[0m] - Clear temporary files"
\t@rm -f tmp/*
clean-bin:
\t@echo " [\033[33m\033[1mRM\033[0m] - Clear binary files"
\t@rm -f binary/*
clean:
\t@echo " [\033[33m\033[1mRM\033[0m] - Clear temporary files"
\t@rm -f tmp/*
\t@echo " [\033[33m\033[1mRM\033[0m] - Clear binary files"
\t@rm -f binary/*
''',
'avr-Makefile': '''##########################################
# Makefile generated with sketch %(version)s
##########################################
# Define toolchain
CC=%(cc)s
CXX=%(cxx)s
AS=%(asm)s
LD=%(ld)s
AR=%(ar)s
OBJCOPY=%(objcopy)s
SIZE=%(size)s
AVRDUDE=%(avrdude)s
PROGRAMER=%(programer)s
LIB=
INCLUDE=-I lib/
#Define of MCU
MCU=%(mcu)s
CLOCK=%(clock_hz)sUL
# Define compiler flags
_CFLAGS=-Os -Wall -fno-exceptions -ffunction-sections -fdata-sections -mmcu=$(MCU) \\
-DF_CPU=$(CLOCK) -fpermissive -lm -Wl,-u,vfprintf -lprintf_min
CFLAGS=$(_CFLAGS) -std=c99
CXXFLAGS=$(_CFLAGS) -std=c++98
ASFLAGS=-mmcu $(MCU)
# Define compiler rulers
ASM=%(asm_dep)s
OBJ=%(obj_dep)s
LIB_DEPS=%(lib_deps)s
AOUT=binary/%(project_name)s-%(mcu)s.elf
HEX=binary/%(project_name)s-%(mcu)s.hex
EPP=binary/%(project_name)s-%(mcu)s.epp
LD_FLAGS=-Os -Wl,--gc-sections -mmcu=$(MCU) -lm
AVRDUDE_OPTIONS = -p$(MCU) -c$(PROGRAMER) %(pgrextra)s -Uflash:w:$(HEX):i
SIZE_OPTS=-A
CONFIG_EXISTS=$(shell [ -e "Makefile.config" ] && echo 1 || echo 0)
ifeq ($(CONFIG_EXISTS), 1)
include Makefile.config
endif
all: $(HEX) $(EPP)
rebuild: clean all
deploy: $(HEX)
\t$(AVRDUDE) $(AVRDUDE_OPTIONS)
$(HEX): $(EPP)
\t@echo " [\033[33m\033[1mOBJCOPY\033[0m] - \033[37m\033[1mFirmware\033[0m"
\t@$(OBJCOPY) -O ihex -R .eeprom $(AOUT) $(HEX)
$(EPP): $(AOUT)
\t@echo " [\033[33m\033[1mOBJCOPY\033[0m] - \033[37m\033[1mMemory of EEPROM\033[0m"
\t@$(OBJCOPY) -O ihex -j .eeprom --set-section-flags=.eeprom=alloc,load --no-change-warnings --change-section-lma .eeprom=0 $(AOUT) $(EPP)
size: $(AOUT)
\t@$(SIZE) $(SIZE_OPTS) $(AOUT)
$(AOUT): clear-compiler $(OBJ) $(LIB_DEPS)
\t@echo " [\033[33m\033[1mLD\033[0m] - \033[37m\033[1m$(AOUT)\033[0m"
\t@$(CXX) $(LD_FLAGS) $(LIB) $(OBJ) $(LIB_DEPS) -o $(AOUT)
%(asm_rulers)s
%(obj_rulers)s
%(libs_rulers)s
clear-compiler:
\t@echo " [\033[33m\033[1mRM\033[0m] - Clear compiler logs"
\t@rm -f compile.*
clean-tmp:
\t@echo " [\033[33m\033[1mRM\033[0m] - Clear temporary files"
\t@rm -f tmp/*
clean-bin:
\t@echo " [\033[33m\033[1mRM\033[0m] - Clear binary files"
\t@rm -f binary/*
clean:
\t@echo " [\033[33m\033[1mRM\033[0m] - Clear temporary files"
\t@rm -f tmp/*
\t@echo " [\033[33m\033[1mRM\033[0m] - Clear binary files"
\t@rm -f binary/*
'''
}
| 27.518797
| 138
| 0.630464
| 1,187
| 7,320
| 3.815501
| 0.187026
| 0.03864
| 0.038861
| 0.053433
| 0.765732
| 0.751601
| 0.748068
| 0.722234
| 0.704571
| 0.692427
| 0
| 0.077551
| 0.129781
| 7,320
| 265
| 139
| 27.622642
| 0.633438
| 0.083607
| 0
| 0.704082
| 0
| 0.183673
| 0.973433
| 0.278358
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0.005102
| 0.010204
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
c7b0f4e12943a98dbd413a45f48a80cdcaf7bcf6
| 6,517
|
py
|
Python
|
testData/devSeedData.py
|
bgporter/wastebook
|
79885a8d503452e1fbeb8ff445cedd2daafff2a0
|
[
"MIT"
] | null | null | null |
testData/devSeedData.py
|
bgporter/wastebook
|
79885a8d503452e1fbeb8ff445cedd2daafff2a0
|
[
"MIT"
] | null | null | null |
testData/devSeedData.py
|
bgporter/wastebook
|
79885a8d503452e1fbeb8ff445cedd2daafff2a0
|
[
"MIT"
] | null | null | null |
'''
fake posts to bootstrap a development database. Put any interesting cases
useful for development in here.
'''
from datetime import datetime
POST_DATA_1 = [
{
"created" : datetime(2015, 10, 1),
"published": datetime(2015, 10, 1),
"edited": datetime(2015, 10, 1),
"rendered": None,
"author": "bgporter",
"public": True,
"status": "published",
"title": "First Post",
"slug": "",
"text": "a bunch of words #foo #bar",
"tags": [],
"type": "Post"
},
{
"created" : datetime(2015, 10, 2),
"published": datetime(2015, 10, 2),
"edited": datetime(2015, 10, 1),
"rendered": None,
"author": "bgporter",
"public": False,
"status": "published",
"title": "Second Post",
"slug": "",
"text": "This is a #secret #post",
"tags": [],
"type": "Post"
},
{
"created" : datetime(2015, 10, 2),
"published": datetime(2015, 10, 2),
"edited": datetime(2015, 10, 1),
"rendered": None,
"author": "bgporter",
"public": False,
"status": "draft",
"title": "Third Post",
"slug": "",
"text": "This is a #draft #post",
"tags": [],
"type": "Post"
},
{
"created" : datetime(2015, 10, 2),
"published": datetime(2015, 10, 2),
"edited": datetime(2015, 10, 1),
"rendered": None,
"author": "bgporter",
"public": True,
"status": "draft",
"title": "Fourth Post",
"slug": "",
"text": "This is a #draft #post",
"tags": [],
"type": "Post"
},
]
POST_DATA_2 = [
{
"created" : datetime(2015, 3, 2),
"published": datetime(2015, 3, 2),
"edited": datetime(2015, 10, 1),
"rendered": None,
"author": "bgporter",
"public": True,
"status": "published",
"title": "Post 1",
"slug": "",
"text": "This is a #secret #post",
"tags": [],
"type": "Post"
},
{
"created" : datetime(2015, 4, 2),
"published": datetime(2015, 4, 2),
"edited": datetime(2015, 10, 1),
"rendered": None,
"author": "bgporter",
"public": True,
"status": "published",
"title": "Post 2",
"slug": "",
"text": "This is a #secret #post",
"tags": [],
"type": "Post"
},
{
"created" : datetime(2015, 5, 2),
"published": datetime(2015, 5, 2),
"edited": datetime(2015, 10, 1),
"rendered": None,
"author": "bgporter",
"public": True,
"status": "published",
"title": "Post 3",
"slug": "",
"text": "This is a #secret #post",
"tags": [],
"type": "Post"
},
{
"created" : datetime(2015, 5, 2),
"published": datetime(2015, 5, 2),
"edited": datetime(2015, 10, 1),
"rendered": None,
"author": "bgporter",
"public": True,
"status": "published",
"title": "Post 4",
"slug": "",
"text": "This is a #secret #post",
"tags": [],
"type": "Post"
},
{
"created" : datetime(2015, 6, 2),
"published": datetime(2015, 6, 2),
"edited": datetime(2015, 10, 1),
"rendered": None,
"author": "bgporter",
"public": True,
"status": "published",
"title": "Post 5",
"slug": "",
"text": "This is a #secret #post",
"tags": [],
"type": "Post"
},
{
"created" : datetime(2015, 6, 2),
"published": datetime(2015, 6, 2),
"edited": datetime(2015, 10, 1),
"rendered": None,
"author": "bgporter",
"public": True,
"status": "published",
"title": "Post 6",
"slug": "",
"text": "This is a #secret #post",
"tags": [],
"type": "Post"
},
{
"created" : datetime(2015, 6, 2),
"published": datetime(2015, 6, 2),
"edited": datetime(2015, 10, 1),
"rendered": None,
"author": "bgporter",
"public": True,
"status": "published",
"title": "Post 7",
"slug": "",
"text": "This is a #secret #post",
"tags": [],
"type": "Post"
},
{
"created" : datetime(2015, 7, 2),
"published": datetime(2015, 7, 2),
"edited": datetime(2015, 10, 1),
"rendered": None,
"author": "bgporter",
"public": True,
"status": "published",
"title": "Post 8",
"slug": "",
"text": "This is a #secret #post",
"tags": [],
"type": "Post"
},
{
"created" : datetime(2015, 8, 2),
"published": datetime(2015, 8, 2),
"edited": datetime(2015, 10, 1),
"rendered": None,
"author": "bgporter",
"public": True,
"status": "published",
"title": "Post 9",
"slug": "",
"text": "This is a #secret #post",
"tags": [],
"type": "Post"
},
{
"created" : datetime(2015, 9, 2),
"published": datetime(2015, 9, 2),
"edited": datetime(2015, 10, 1),
"rendered": None,
"author": "bgporter",
"public": True,
"status": "published",
"title": "Post 10",
"slug": "",
"text": "This is a #secret #post",
"tags": [],
"type": "Post"
},
{
"created" : datetime(2015, 10, 2),
"published": datetime(2015, 10, 2),
"edited": datetime(2015, 10, 1),
"rendered": None,
"author": "bgporter",
"public": True,
"status": "published",
"title": "Post 11",
"slug": "",
"text": "This is a #secret #post",
"tags": [],
"type": "Post"
},
]
| 29.224215
| 77
| 0.399724
| 567
| 6,517
| 4.587302
| 0.107584
| 0.207612
| 0.134564
| 0.098039
| 0.839677
| 0.839677
| 0.838139
| 0.838139
| 0.838139
| 0.838139
| 0
| 0.081816
| 0.418598
| 6,517
| 223
| 78
| 29.224215
| 0.604645
| 0.016265
| 0
| 0.706977
| 0
| 0
| 0.281069
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.004651
| 0
| 0.004651
| 0
| 0
| 0
| 0
| null | 1
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
c7b88fe5b2537ef40175e1a577b998fdb2d3a5c9
| 1,233
|
py
|
Python
|
SummaryExternalClient.py
|
Hackillinois2k18/Main-Repo
|
e998cc3283e0469b98a842220a30a72c5b105dad
|
[
"MIT"
] | 5
|
2020-03-10T03:23:18.000Z
|
2021-11-12T17:06:51.000Z
|
SummaryExternalClient.py
|
Hackillinois2k18/FyveBot
|
e998cc3283e0469b98a842220a30a72c5b105dad
|
[
"MIT"
] | 3
|
2018-02-24T05:25:28.000Z
|
2018-02-24T05:43:49.000Z
|
SummaryExternalClient.py
|
Hackillinois2k18/Main-Repo
|
e998cc3283e0469b98a842220a30a72c5b105dad
|
[
"MIT"
] | 3
|
2019-01-20T14:50:11.000Z
|
2021-11-12T17:06:55.000Z
|
import requests
import credentials
class SummaryExternalClient:
def pullSummaryForUrl(self, artUrl, title):
url = "https://api.aylien.com/api/v1/summarize"
headers = {"X-AYLIEN-TextAPI-Application-Key": credentials.AYLIEN_APP_KEY,
"X-AYLIEN-TextAPI-Application-ID" : credentials.AYLIEN_APP_ID}
params = {"url" : artUrl,
"title" : title,
"sentences_number": 7}
summary = requests.get(url=url, headers=headers, params=params)
try:
sentences = summary.json()['sentences']
except:
sentences = []
return sentences
def pullSummaryForText(self, text, title):
url = "https://api.aylien.com/api/v1/summarize"
headers = {"X-AYLIEN-TextAPI-Application-Key": credentials.AYLIEN_APP_KEY,
"X-AYLIEN-TextAPI-Application-ID" : credentials.AYLIEN_APP_ID}
params = {"text": text,
"title": title,
"sentences_number": 7}
summary = requests.get(url=url, headers=headers, params=params)
try:
sentences = summary.json()['sentences']
except:
sentences = []
return sentences
| 35.228571
| 82
| 0.586375
| 122
| 1,233
| 5.844262
| 0.295082
| 0.039271
| 0.078541
| 0.140252
| 0.813464
| 0.813464
| 0.813464
| 0.813464
| 0.813464
| 0.813464
| 0
| 0.004608
| 0.296026
| 1,233
| 34
| 83
| 36.264706
| 0.81682
| 0
| 0
| 0.758621
| 0
| 0
| 0.219968
| 0.102273
| 0
| 0
| 0
| 0
| 0
| 1
| 0.068966
| false
| 0
| 0.068966
| 0
| 0.241379
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
c7cb2a8553964cb9e86d2c3de96decefdde5eb6c
| 89
|
py
|
Python
|
tf2stats/__init__.py
|
TheAntecedent/Quintessence
|
f32dc1b11ded212121ebc0f925d15c845cb6ea4b
|
[
"MIT"
] | 1
|
2019-10-08T04:38:08.000Z
|
2019-10-08T04:38:08.000Z
|
tf2stats/__init__.py
|
TheAntecedent/Quintessence
|
f32dc1b11ded212121ebc0f925d15c845cb6ea4b
|
[
"MIT"
] | 1
|
2021-04-30T20:51:05.000Z
|
2021-04-30T20:51:05.000Z
|
tf2stats/__init__.py
|
TheAntecedent/Quintessence
|
f32dc1b11ded212121ebc0f925d15c845cb6ea4b
|
[
"MIT"
] | null | null | null |
from .aggregated_stats import *
from .game_stats import *
from .stat_definitions import *
| 29.666667
| 31
| 0.808989
| 12
| 89
| 5.75
| 0.583333
| 0.318841
| 0.434783
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.123596
| 89
| 3
| 32
| 29.666667
| 0.884615
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
c7d37af76275d31df153580818ea0db96b86762e
| 1,210
|
py
|
Python
|
supermario/supermario 1117/start_state.py
|
Kimmiryeong/2DGP_GameProject
|
ad3fb197aab27227fc92fd404b2c310f8d0827ca
|
[
"MIT"
] | null | null | null |
supermario/supermario 1117/start_state.py
|
Kimmiryeong/2DGP_GameProject
|
ad3fb197aab27227fc92fd404b2c310f8d0827ca
|
[
"MIT"
] | null | null | null |
supermario/supermario 1117/start_state.py
|
Kimmiryeong/2DGP_GameProject
|
ad3fb197aab27227fc92fd404b2c310f8d0827ca
|
[
"MIT"
] | null | null | null |
import game_framework
from pico2d import *
import title_state
name = "StartState"
image = None
logo_time = 0.0
def enter():
global image
image = load_image('kpu_credit.png')
def exit():
global image
del(image)
def update():
global logo_time
if (logo_time > 1.0):
logo_time = 0.8
game_framework.change_state(title_state)
delay(0.01)
logo_time += 0.05import game_framework
from pico2d import *
import title_state
name = "StartState"
image = None
logo_time = 0.0
def enter():
global image
image = load_image('kpu_credit.png')
def exit():
global image
del(image)
def update():
global logo_time
if (logo_time > 1.0):
logo_time = 0.8
game_framework.change_state(title_state)
delay(0.01)
logo_time += 0.05
def draw():
global image
clear_canvas()
image.draw(400,300)
update_canvas()
def handle_events():
events = get_events()
pass
def pause(): pass
def resume(): pass
def draw():
global image
clear_canvas()
image.draw(400,300)
update_canvas()
def handle_events():
events = get_events()
pass
def pause(): pass
def resume(): pass
| 11.747573
| 48
| 0.634711
| 169
| 1,210
| 4.35503
| 0.254438
| 0.108696
| 0.07337
| 0.0625
| 0.978261
| 0.978261
| 0.978261
| 0.978261
| 0.978261
| 0.978261
| 0
| 0.042506
| 0.261157
| 1,210
| 102
| 49
| 11.862745
| 0.780761
| 0
| 0
| 0.947368
| 0
| 0
| 0.039801
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0.105263
| 0.105263
| null | null | 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 9
|
c7d524f7dbf8736dbbb40f3bb15a61c60aba8191
| 22,620
|
py
|
Python
|
egs/librispeech/ASR/transducer/test_rnn.py
|
rosrad/icefall
|
6f282731286a6855658c6882c3c938437448e05e
|
[
"Apache-2.0"
] | null | null | null |
egs/librispeech/ASR/transducer/test_rnn.py
|
rosrad/icefall
|
6f282731286a6855658c6882c3c938437448e05e
|
[
"Apache-2.0"
] | null | null | null |
egs/librispeech/ASR/transducer/test_rnn.py
|
rosrad/icefall
|
6f282731286a6855658c6882c3c938437448e05e
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python3
# Copyright 2021 Xiaomi Corp. (authors: Fangjun Kuang)
#
# See ../../../../LICENSE for clarification regarding multiple authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
import torch.nn as nn
from transducer.rnn import (
LayerNormGRU,
LayerNormGRUCell,
LayerNormGRULayer,
LayerNormLSTM,
LayerNormLSTMCell,
LayerNormLSTMLayer,
)
def get_devices():
devices = [torch.device("cpu")]
if torch.cuda.is_available():
devices.append(torch.device("cuda", 0))
return devices
def assert_allclose(a: torch.Tensor, b: torch.Tensor, atol=1e-6, **kwargs):
assert torch.allclose(
a, b, atol=atol, **kwargs
), f"{(a - b).abs().max()}, {a.numel()}"
def test_layernorm_lstm_cell_jit(device="cpu"):
input_size = 10
hidden_size = 20
bias = torch.randint(low=0, high=1000, size=(1,)).item() & 2 == 0
cell = LayerNormLSTMCell(
input_size=input_size,
hidden_size=hidden_size,
bias=bias,
device=device,
)
torch.jit.script(cell)
def test_layernorm_lstm_cell_constructor(device="cpu"):
input_size = torch.randint(low=2, high=100, size=(1,)).item()
hidden_size = torch.randint(low=2, high=100, size=(1,)).item()
self_cell = LayerNormLSTMCell(
input_size,
hidden_size,
ln=nn.Identity,
device=device,
)
torch_cell = nn.LSTMCell(
input_size,
hidden_size,
).to(device)
for name, param in self_cell.named_parameters():
assert param.shape == getattr(torch_cell, name).shape
assert len(self_cell.state_dict()) == len(torch_cell.state_dict())
def test_layernorm_lstm_cell_with_projection_jit(device="cpu"):
input_size = 10
hidden_size = 20
proj_size = 5
self_cell = LayerNormLSTMCell(
input_size,
hidden_size,
proj_size=proj_size,
device=device,
)
torch.jit.script(self_cell)
def test_layernorm_lstm_cell_forward(device="cpu"):
input_size = torch.randint(low=2, high=100, size=(1,)).item()
hidden_size = torch.randint(low=2, high=100, size=(1,)).item()
bias = torch.randint(low=0, high=1000, size=(1,)).item() & 2 == 0
self_cell = LayerNormLSTMCell(
input_size,
hidden_size,
bias=bias,
ln=nn.Identity,
device=device,
)
torch_cell = nn.LSTMCell(
input_size,
hidden_size,
bias=bias,
).to(device)
with torch.no_grad():
for name, torch_param in torch_cell.named_parameters():
self_param = getattr(self_cell, name)
torch_param.copy_(self_param)
N = torch.randint(low=2, high=100, size=(1,))
x = torch.rand(N, input_size, device=device).requires_grad_()
h = torch.rand(N, hidden_size, device=device)
c = torch.rand(N, hidden_size, device=device)
x_clone = x.detach().clone().requires_grad_()
self_h, self_c = self_cell(x.clone(), (h, c))
torch_h, torch_c = torch_cell(x_clone, (h, c))
assert_allclose(self_h, torch_h)
assert_allclose(self_c, torch_c)
self_hc = self_h * self_c
torch_hc = torch_h * torch_c
(
self_hc.reshape(-1) * torch.arange(self_hc.numel(), device=device)
).sum().backward()
(
torch_hc.reshape(-1) * torch.arange(torch_hc.numel(), device=device)
).sum().backward()
assert_allclose(x.grad, x_clone.grad, atol=1e-3)
def test_layernorm_lstm_cell_with_projection_forward(device="cpu"):
input_size = torch.randint(low=2, high=100, size=(1,)).item()
hidden_size = torch.randint(low=10, high=100, size=(1,)).item()
bias = torch.randint(low=0, high=1000, size=(1,)).item() & 2 == 0
proj_size = torch.randint(low=2, high=hidden_size, size=(1,)).item()
self_cell = LayerNormLSTMCell(
input_size,
hidden_size,
bias=bias,
ln=nn.Identity,
proj_size=proj_size,
device=device,
)
torch_cell = nn.LSTM(
input_size,
hidden_size,
bias=bias,
proj_size=proj_size,
batch_first=True,
).to(device)
with torch.no_grad():
for name, self_param in self_cell.named_parameters():
getattr(torch_cell, f"{name}_l0").copy_(self_param)
N = torch.randint(low=2, high=100, size=(1,))
x = torch.rand(N, input_size, device=device).requires_grad_()
h = torch.rand(N, proj_size, device=device)
c = torch.rand(N, hidden_size, device=device)
x_clone = x.detach().clone().requires_grad_()
self_h, self_c = self_cell(x.clone(), (h, c))
_, (torch_h, torch_c) = torch_cell(
x_clone.unsqueeze(1), (h.unsqueeze(0), c.unsqueeze(0))
)
torch_h = torch_h.squeeze(0)
torch_c = torch_c.squeeze(0)
assert_allclose(self_h, torch_h)
assert_allclose(self_c, torch_c)
(self_h.sum() * self_c.sum()).backward()
(torch_h.sum() * torch_c.sum()).backward()
assert_allclose(x.grad, x_clone.grad, atol=1e-5)
def test_layernorm_lstm_layer_jit(device="cpu"):
input_size = 10
hidden_size = 20
layer = LayerNormLSTMLayer(
input_size,
hidden_size=hidden_size,
device=device,
)
torch.jit.script(layer)
def test_layernorm_lstm_layer_with_project_jit(device="cpu"):
input_size = 10
hidden_size = 20
proj_size = 5
layer = LayerNormLSTMLayer(
input_size,
hidden_size=hidden_size,
proj_size=proj_size,
device=device,
)
torch.jit.script(layer)
def test_layernorm_lstm_layer_with_projection_forward(device="cpu"):
input_size = torch.randint(low=2, high=100, size=(1,)).item()
hidden_size = torch.randint(low=10, high=100, size=(1,)).item()
bias = torch.randint(low=0, high=1000, size=(1,)).item() & 2 == 0
proj_size = torch.randint(low=2, high=hidden_size, size=(1,)).item()
self_layer = LayerNormLSTMLayer(
input_size,
hidden_size,
bias=bias,
proj_size=proj_size,
ln=nn.Identity,
device=device,
)
N = torch.randint(low=2, high=100, size=(1,))
T = torch.randint(low=2, high=100, size=(1,))
x = torch.rand(N, T, input_size, device=device).requires_grad_()
h = torch.rand(N, proj_size, device=device)
c = torch.rand(N, hidden_size, device=device)
x_clone = x.detach().clone().requires_grad_()
self_y, (self_h, self_c) = self_layer(x, (h, c))
torch_layer = nn.LSTM(
input_size=input_size,
hidden_size=hidden_size,
num_layers=1,
bias=bias,
proj_size=proj_size,
batch_first=True,
dropout=0,
bidirectional=False,
).to(device)
with torch.no_grad():
for name, self_param in self_layer.cell.named_parameters():
getattr(torch_layer, f"{name}_l0").copy_(self_param)
torch_y, (torch_h, torch_c) = torch_layer(
x_clone, (h.unsqueeze(0), c.unsqueeze(0))
)
assert_allclose(self_y, torch_y)
assert_allclose(self_h, torch_h)
assert_allclose(self_c, torch_c)
self_y.sum().backward()
torch_y.sum().backward()
assert_allclose(x.grad, x_clone.grad, atol=1e-5)
def test_layernorm_lstm_layer_forward(device="cpu"):
input_size = torch.randint(low=2, high=100, size=(1,)).item()
hidden_size = torch.randint(low=2, high=100, size=(1,)).item()
bias = torch.randint(low=0, high=1000, size=(1,)).item() & 2 == 0
self_layer = LayerNormLSTMLayer(
input_size,
hidden_size,
bias=bias,
ln=nn.Identity,
device=device,
)
N = torch.randint(low=2, high=100, size=(1,))
T = torch.randint(low=2, high=100, size=(1,))
x = torch.rand(N, T, input_size, device=device).requires_grad_()
h = torch.rand(N, hidden_size, device=device)
c = torch.rand(N, hidden_size, device=device)
x_clone = x.detach().clone().requires_grad_()
self_y, (self_h, self_c) = self_layer(x, (h, c))
torch_layer = nn.LSTM(
input_size=input_size,
hidden_size=hidden_size,
num_layers=1,
bias=bias,
batch_first=True,
dropout=0,
bidirectional=False,
).to(device)
with torch.no_grad():
for name, self_param in self_layer.cell.named_parameters():
getattr(torch_layer, f"{name}_l0").copy_(self_param)
torch_y, (torch_h, torch_c) = torch_layer(
x_clone, (h.unsqueeze(0), c.unsqueeze(0))
)
assert_allclose(self_y, torch_y)
assert_allclose(self_h, torch_h)
assert_allclose(self_c, torch_c)
self_hc = self_h * self_c
torch_hc = torch_h * torch_c
self_hc_sum = (
self_hc.reshape(-1) * torch.arange(self_hc.numel(), device=device)
).sum()
torch_hc_sum = (
torch_hc.reshape(-1) * torch.arange(torch_hc.numel(), device=device)
).sum()
self_y_sum = (
self_y.reshape(-1) * torch.arange(self_y.numel(), device=device)
).sum()
torch_y_sum = (
torch_y.reshape(-1) * torch.arange(torch_y.numel(), device=device)
).sum()
(self_hc_sum + self_y_sum).backward()
(torch_hc_sum + torch_y_sum).backward()
assert_allclose(x.grad, x_clone.grad, atol=0.1)
def test_layernorm_lstm_jit(device="cpu"):
input_size = 2
hidden_size = 3
num_layers = 4
bias = True
lstm = LayerNormLSTM(
input_size=input_size,
hidden_size=hidden_size,
num_layers=num_layers,
bias=bias,
ln=nn.Identity,
device=device,
)
torch.jit.script(lstm)
def test_layernorm_lstm_with_projection_jit(device="cpu"):
input_size = 2
hidden_size = 5
proj_size = 3
num_layers = 4
bias = True
lstm = LayerNormLSTM(
input_size=input_size,
hidden_size=hidden_size,
num_layers=num_layers,
bias=bias,
proj_size=proj_size,
ln=nn.Identity,
device=device,
)
torch.jit.script(lstm)
def test_layernorm_lstm_forward(device="cpu"):
input_size = torch.randint(low=2, high=100, size=(1,)).item()
hidden_size = torch.randint(low=2, high=100, size=(1,)).item()
num_layers = torch.randint(low=2, high=100, size=(1,)).item()
bias = torch.randint(low=0, high=1000, size=(1,)).item() & 2 == 0
self_lstm = LayerNormLSTM(
input_size=input_size,
hidden_size=hidden_size,
num_layers=num_layers,
bias=bias,
ln=nn.Identity,
device=device,
)
torch_lstm = nn.LSTM(
input_size=input_size,
hidden_size=hidden_size,
num_layers=num_layers,
bias=bias,
batch_first=True,
bidirectional=False,
).to(device)
assert len(self_lstm.state_dict()) == len(torch_lstm.state_dict())
with torch.no_grad():
for name, param in self_lstm.named_parameters():
# name has the form layers.0.cell.weight_hh
parts = name.split(".")
layer_num = parts[1]
getattr(torch_lstm, f"{parts[-1]}_l{layer_num}").copy_(param)
N = torch.randint(low=2, high=100, size=(1,))
T = torch.randint(low=2, high=100, size=(1,))
x = torch.rand(N, T, input_size, device=device).requires_grad_()
hs = [torch.rand(N, hidden_size, device=device) for _ in range(num_layers)]
cs = [torch.rand(N, hidden_size, device=device) for _ in range(num_layers)]
states = list(zip(hs, cs))
x_clone = x.detach().clone().requires_grad_()
self_y, self_states = self_lstm(x, states)
h = torch.stack(hs)
c = torch.stack(cs)
torch_y, (torch_h, torch_c) = torch_lstm(x_clone, (h, c))
assert_allclose(self_y, torch_y)
self_h = torch.stack([s[0] for s in self_states])
self_c = torch.stack([s[1] for s in self_states])
assert_allclose(self_h, torch_h)
assert_allclose(self_c, torch_c)
s = self_y.reshape(-1)
t = torch_y.reshape(-1)
s_sum = (s * torch.arange(s.numel(), device=device)).sum()
t_sum = (t * torch.arange(t.numel(), device=device)).sum()
shc_sum = s_sum + self_h.sum() + self_c.sum()
thc_sum = t_sum + torch_h.sum() + torch_c.sum()
shc_sum.backward()
thc_sum.backward()
assert_allclose(x.grad, x_clone.grad)
def test_layernorm_lstm_with_projection_forward(device="cpu"):
input_size = torch.randint(low=2, high=100, size=(1,)).item()
hidden_size = torch.randint(low=10, high=100, size=(1,)).item()
proj_size = torch.randint(low=2, high=hidden_size, size=(1,)).item()
num_layers = torch.randint(low=2, high=100, size=(1,)).item()
bias = torch.randint(low=0, high=1000, size=(1,)).item() & 2 == 0
self_lstm = LayerNormLSTM(
input_size=input_size,
hidden_size=hidden_size,
num_layers=num_layers,
bias=bias,
proj_size=proj_size,
ln=nn.Identity,
device=device,
)
torch_lstm = nn.LSTM(
input_size=input_size,
hidden_size=hidden_size,
num_layers=num_layers,
bias=bias,
proj_size=proj_size,
batch_first=True,
bidirectional=False,
).to(device)
assert len(self_lstm.state_dict()) == len(torch_lstm.state_dict())
with torch.no_grad():
for name, param in self_lstm.named_parameters():
# name has the form layers.0.cell.weight_hh
parts = name.split(".")
layer_num = parts[1]
getattr(torch_lstm, f"{parts[-1]}_l{layer_num}").copy_(param)
N = torch.randint(low=2, high=100, size=(1,))
T = torch.randint(low=2, high=100, size=(1,))
x = torch.rand(N, T, input_size, device=device).requires_grad_()
hs = [torch.rand(N, proj_size, device=device) for _ in range(num_layers)]
cs = [torch.rand(N, hidden_size, device=device) for _ in range(num_layers)]
states = list(zip(hs, cs))
x_clone = x.detach().clone().requires_grad_()
self_y, self_states = self_lstm(x, states)
h = torch.stack(hs)
c = torch.stack(cs)
torch_y, (torch_h, torch_c) = torch_lstm(x_clone, (h, c))
assert_allclose(self_y, torch_y)
self_h = torch.stack([s[0] for s in self_states])
self_c = torch.stack([s[1] for s in self_states])
assert_allclose(self_h, torch_h)
assert_allclose(self_c, torch_c)
s = self_y.reshape(-1)
t = torch_y.reshape(-1)
s_sum = (s * torch.arange(s.numel(), device=device)).sum()
t_sum = (t * torch.arange(t.numel(), device=device)).sum()
shc_sum = s_sum + self_h.sum() + self_c.sum()
thc_sum = t_sum + torch_h.sum() + torch_c.sum()
shc_sum.backward()
thc_sum.backward()
assert_allclose(x.grad, x_clone.grad)
def test_layernorm_gru_cell_jit(device="cpu"):
input_size = 10
hidden_size = 20
cell = LayerNormGRUCell(
input_size=input_size,
hidden_size=hidden_size,
bias=True,
device=device,
)
torch.jit.script(cell)
def test_layernorm_gru_cell_constructor(device="cpu"):
input_size = torch.randint(low=2, high=100, size=(1,)).item()
hidden_size = torch.randint(low=2, high=100, size=(1,)).item()
self_cell = LayerNormGRUCell(
input_size,
hidden_size,
ln=nn.Identity,
device=device,
)
torch_cell = nn.GRUCell(
input_size,
hidden_size,
).to(device)
for name, param in self_cell.named_parameters():
assert param.shape == getattr(torch_cell, name).shape
assert len(self_cell.state_dict()) == len(torch_cell.state_dict())
def test_layernorm_gru_cell_forward(device="cpu"):
input_size = torch.randint(low=2, high=100, size=(1,)).item()
hidden_size = torch.randint(low=2, high=100, size=(1,)).item()
bias = torch.randint(low=0, high=1000, size=(1,)).item() & 2 == 0
self_cell = LayerNormGRUCell(
input_size,
hidden_size,
bias=bias,
ln=nn.Identity,
device=device,
)
torch_cell = nn.GRUCell(
input_size,
hidden_size,
bias=bias,
).to(device)
with torch.no_grad():
for name, torch_param in torch_cell.named_parameters():
self_param = getattr(self_cell, name)
torch_param.copy_(self_param)
N = torch.randint(low=2, high=100, size=(1,))
x = torch.rand(N, input_size, device=device).requires_grad_()
h = torch.rand(N, hidden_size, device=device)
x_clone = x.detach().clone().requires_grad_()
self_h = self_cell(x.clone(), h)
torch_h = torch_cell(x_clone, h)
assert_allclose(self_h, torch_h, atol=1e-5)
(
self_h.reshape(-1) * torch.arange(self_h.numel(), device=device)
).sum().backward()
(
torch_h.reshape(-1) * torch.arange(torch_h.numel(), device=device)
).sum().backward()
assert_allclose(x.grad, x_clone.grad, atol=1e-3)
def test_layernorm_gru_layer_jit(device="cpu"):
input_size = 10
hidden_size = 20
layer = LayerNormGRULayer(
input_size,
hidden_size=hidden_size,
device=device,
)
torch.jit.script(layer)
def test_layernorm_gru_layer_forward(device="cpu"):
input_size = torch.randint(low=2, high=100, size=(1,)).item()
hidden_size = torch.randint(low=2, high=100, size=(1,)).item()
bias = torch.randint(low=0, high=1000, size=(1,)).item() & 2 == 0
self_layer = LayerNormGRULayer(
input_size,
hidden_size,
bias=bias,
ln=nn.Identity,
device=device,
)
N = torch.randint(low=2, high=100, size=(1,))
T = torch.randint(low=2, high=100, size=(1,))
x = torch.rand(N, T, input_size, device=device).requires_grad_()
h = torch.rand(N, hidden_size, device=device)
x_clone = x.detach().clone().requires_grad_()
self_y, self_h = self_layer(x, h.clone())
torch_layer = nn.GRU(
input_size=input_size,
hidden_size=hidden_size,
num_layers=1,
bias=bias,
batch_first=True,
dropout=0,
bidirectional=False,
).to(device)
with torch.no_grad():
for name, self_param in self_layer.cell.named_parameters():
getattr(torch_layer, f"{name}_l0").copy_(self_param)
torch_y, torch_h = torch_layer(x_clone, h.unsqueeze(0))
assert_allclose(self_y, torch_y)
assert_allclose(self_h, torch_h)
self_y_sum = (
self_y.reshape(-1) * torch.arange(self_y.numel(), device=device)
).sum()
torch_y_sum = (
torch_y.reshape(-1) * torch.arange(torch_y.numel(), device=device)
).sum()
self_y_sum.backward()
torch_y_sum.backward()
assert_allclose(x.grad, x_clone.grad, atol=0.1)
def test_layernorm_gru_jit(device="cpu"):
input_size = 2
hidden_size = 3
num_layers = 4
bias = True
gru = LayerNormGRU(
input_size=input_size,
hidden_size=hidden_size,
num_layers=num_layers,
bias=bias,
ln=nn.Identity,
device=device,
)
torch.jit.script(gru)
def test_layernorm_gru_forward(device="cpu"):
input_size = torch.randint(low=2, high=100, size=(1,)).item()
hidden_size = torch.randint(low=2, high=100, size=(1,)).item()
num_layers = torch.randint(low=2, high=100, size=(1,)).item()
bias = torch.randint(low=0, high=1000, size=(1,)).item() & 2 == 0
self_gru = LayerNormGRU(
input_size=input_size,
hidden_size=hidden_size,
num_layers=num_layers,
bias=bias,
ln=nn.Identity,
device=device,
)
torch_gru = nn.GRU(
input_size=input_size,
hidden_size=hidden_size,
num_layers=num_layers,
bias=bias,
batch_first=True,
bidirectional=False,
).to(device)
assert len(self_gru.state_dict()) == len(torch_gru.state_dict())
with torch.no_grad():
for name, param in self_gru.named_parameters():
# name has the form layers.0.cell.weight_hh
parts = name.split(".")
layer_num = parts[1]
getattr(torch_gru, f"{parts[-1]}_l{layer_num}").copy_(param)
N = torch.randint(low=2, high=100, size=(1,))
T = torch.randint(low=2, high=100, size=(1,))
x = torch.rand(N, T, input_size, device=device).requires_grad_()
states = [
torch.rand(N, hidden_size, device=device) for _ in range(num_layers)
]
x_clone = x.detach().clone().requires_grad_()
self_y, self_states = self_gru(x, states)
torch_y, torch_states = torch_gru(x_clone, torch.stack(states))
assert_allclose(self_y, torch_y)
self_states = torch.stack(self_states)
assert_allclose(self_states, torch_states)
s = self_y.reshape(-1)
t = torch_y.reshape(-1)
s_sum = (s * torch.arange(s.numel(), device=device)).sum()
t_sum = (t * torch.arange(t.numel(), device=device)).sum()
s_state_sum = s_sum + self_states.sum()
t_state_sum = t_sum + torch_states.sum()
s_state_sum.backward()
t_state_sum.backward()
assert_allclose(x.grad, x_clone.grad, atol=1e-2)
def _test_lstm(device):
test_layernorm_lstm_cell_jit(device)
test_layernorm_lstm_cell_constructor(device)
test_layernorm_lstm_cell_with_projection_jit(device)
test_layernorm_lstm_cell_forward(device)
test_layernorm_lstm_cell_with_projection_forward(device)
#
test_layernorm_lstm_layer_jit(device)
test_layernorm_lstm_layer_with_project_jit(device)
test_layernorm_lstm_layer_forward(device)
test_layernorm_lstm_layer_with_projection_forward(device)
test_layernorm_lstm_jit(device)
test_layernorm_lstm_with_projection_jit(device)
test_layernorm_lstm_forward(device)
test_layernorm_lstm_with_projection_forward(device)
def _test_gru(device):
test_layernorm_gru_cell_jit(device)
test_layernorm_gru_cell_constructor(device)
test_layernorm_gru_cell_forward(device)
#
test_layernorm_gru_layer_jit(device)
test_layernorm_gru_layer_forward(device)
#
test_layernorm_gru_jit(device)
test_layernorm_gru_forward(device)
torch.set_num_threads(1)
torch.set_num_interop_threads(1)
def main():
for device in get_devices():
print("device", device)
_test_lstm(device)
_test_gru(device)
if __name__ == "__main__":
torch.manual_seed(20211202)
main()
| 29.530026
| 79
| 0.642706
| 3,254
| 22,620
| 4.201291
| 0.062385
| 0.060712
| 0.058152
| 0.046814
| 0.895399
| 0.878356
| 0.837539
| 0.81384
| 0.79131
| 0.773243
| 0
| 0.023413
| 0.22206
| 22,620
| 765
| 80
| 29.568627
| 0.753481
| 0.035455
| 0
| 0.740678
| 0
| 0
| 0.010369
| 0.003303
| 0
| 0
| 0
| 0
| 0.066102
| 1
| 0.042373
| false
| 0
| 0.005085
| 0
| 0.049153
| 0.001695
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
1bf02d45108f641ace7558443cc9e030c46ebd2f
| 65
|
py
|
Python
|
python/UdemyCourse/2022_Python_Bootcamp/basics/errors_exception_handling/__init__.py
|
pradyotprksh/development_learning
|
b6c5494196842f3c273965063815ad222a18b4da
|
[
"MIT"
] | 9
|
2021-09-03T06:20:48.000Z
|
2022-03-19T12:43:30.000Z
|
python/UdemyCourse/2022_Python_Bootcamp/basics/errors_exception_handling/__init__.py
|
pradyotprksh/development_learning
|
b6c5494196842f3c273965063815ad222a18b4da
|
[
"MIT"
] | null | null | null |
python/UdemyCourse/2022_Python_Bootcamp/basics/errors_exception_handling/__init__.py
|
pradyotprksh/development_learning
|
b6c5494196842f3c273965063815ad222a18b4da
|
[
"MIT"
] | 6
|
2021-08-16T01:13:36.000Z
|
2022-03-19T12:44:10.000Z
|
from .errors_exception_handling import errors_exception_handling
| 32.5
| 64
| 0.923077
| 8
| 65
| 7
| 0.625
| 0.535714
| 0.821429
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.061538
| 65
| 1
| 65
| 65
| 0.918033
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
4037b4c2546a2c9d2335471a4c5869528e8d4f28
| 2,399
|
py
|
Python
|
apex/contrib/conv_bias_relu/conv_bias_relu.py
|
XL-Kong/Painter_GAN
|
23cfb57638497fdd1f2d8c09728b439b0e83efde
|
[
"BSD-3-Clause"
] | null | null | null |
apex/contrib/conv_bias_relu/conv_bias_relu.py
|
XL-Kong/Painter_GAN
|
23cfb57638497fdd1f2d8c09728b439b0e83efde
|
[
"BSD-3-Clause"
] | null | null | null |
apex/contrib/conv_bias_relu/conv_bias_relu.py
|
XL-Kong/Painter_GAN
|
23cfb57638497fdd1f2d8c09728b439b0e83efde
|
[
"BSD-3-Clause"
] | null | null | null |
import torch
import pdb
from torch.autograd import gradcheck
import fused_conv_bias_relu
class ConvBiasReLU_(torch.autograd.Function):
@staticmethod
@torch.cuda.amp.custom_fwd(cast_inputs=torch.half)
def forward(ctx, x, weight, bias, padding, stride):
outputs = fused_conv_bias_relu.forward([x, weight, bias], padding, stride)
ctx.save_for_backward(x, weight, outputs[0])
ctx.padding = padding
ctx.stride = stride
return outputs[0]
@staticmethod
@torch.cuda.amp.custom_bwd
def backward(ctx, grad_output):
bwd_args = [*ctx.saved_tensors, grad_output]
padding = ctx.padding
stride = ctx.stride
grads = fused_conv_bias_relu.backward(bwd_args, padding, stride)
return grads[0], grads[1], grads[2], None, None
class ConvBiasMaskReLU_(torch.autograd.Function):
@staticmethod
@torch.cuda.amp.custom_fwd(cast_inputs=torch.half)
def forward(ctx, x, weight, bias, mask, padding, stride):
outputs = fused_conv_bias_relu.forward_mask([x, weight, bias, mask], padding, stride)
ctx.save_for_backward(x, weight, outputs[0])
ctx.padding = padding
ctx.stride = stride
return outputs[0]
@staticmethod
@torch.cuda.amp.custom_bwd
def backward(ctx, grad_output):
bwd_args = [*ctx.saved_tensors, grad_output]
padding = ctx.padding
stride = ctx.stride
grads = fused_conv_bias_relu.backward(bwd_args, padding, stride)
return grads[0], grads[1], grads[2], None, None, None
class ConvBias_(torch.autograd.Function):
@staticmethod
@torch.cuda.amp.custom_fwd(cast_inputs=torch.half)
def forward(ctx, x, weight, bias, padding, stride):
outputs = fused_conv_bias_relu.forward_no_relu([x, weight, bias], padding, stride)
ctx.save_for_backward(x, weight)
ctx.padding = padding
ctx.stride = stride
return outputs[0]
@staticmethod
@torch.cuda.amp.custom_bwd
def backward(ctx, grad_output):
bwd_args = [*ctx.saved_tensors, grad_output]
padding = ctx.padding
stride = ctx.stride
grads = fused_conv_bias_relu.backward_no_relu(bwd_args, padding, stride)
return grads[0], grads[1], grads[2], None, None
ConvBiasReLU = ConvBiasReLU_.apply
ConvBiasMaskReLU = ConvBiasMaskReLU_.apply
ConvBias = ConvBias_.apply
| 31.155844
| 93
| 0.681951
| 311
| 2,399
| 5.061093
| 0.157556
| 0.099111
| 0.057814
| 0.075604
| 0.85197
| 0.85197
| 0.839898
| 0.839898
| 0.811944
| 0.811944
| 0
| 0.007435
| 0.21509
| 2,399
| 76
| 94
| 31.565789
| 0.828465
| 0
| 0
| 0.706897
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.103448
| false
| 0
| 0.068966
| 0
| 0.327586
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
403b3bdafa5f824c48528757629f8e664b7cbcd3
| 9,018
|
py
|
Python
|
DesksReminder/Desks/accounts_desk.py
|
flopezag/fiware-management-scripts
|
3e9ccdb62a11ec0ffd0747511f5512bcdb0df729
|
[
"Apache-2.0"
] | null | null | null |
DesksReminder/Desks/accounts_desk.py
|
flopezag/fiware-management-scripts
|
3e9ccdb62a11ec0ffd0747511f5512bcdb0df729
|
[
"Apache-2.0"
] | 21
|
2017-01-17T12:19:47.000Z
|
2021-06-03T07:56:56.000Z
|
DesksReminder/Desks/accounts_desk.py
|
flopezag/fiware-management-scripts
|
3e9ccdb62a11ec0ffd0747511f5512bcdb0df729
|
[
"Apache-2.0"
] | 1
|
2017-05-03T21:42:49.000Z
|
2017-05-03T21:42:49.000Z
|
from datetime import date, datetime
from DesksReminder.Basics.dataFinder import Data
from DesksReminder.Basics.nickNames import ContactBook
from Config.settings import JIRA_URL
__author__ = 'Manuel Escriche'
class AccountsDesk:
def __init__(self):
self.contactBook = ContactBook()
def open(self):
messages = list()
for issue in Data().getAccountsDeskOpen():
created = datetime.strptime(issue.fields.created[:10], '%Y-%m-%d').date()
unanswered = (date.today() - created).days
if unanswered <= 1:
continue
summary = issue.fields.summary
displayName = issue.fields.assignee.displayName.strip()
nickName = self.contactBook.getNickName(displayName)
emailAddress = issue.fields.assignee.emailAddress
url = 'http://{}/browse/{}'.format(JIRA_URL, issue)
subject = 'FIWARE: Accounts Desk : Open Issue'
message = 'Dear {},'.format(nickName.encode('utf-8')) +\
"\n\nI noticed the issue {} is still OPEN, i.e. not replied for {} days.".format(issue, unanswered) +\
"\nLet me remind you of our rule to reply in the first 24 hours during working days." +\
"\nI would appreciate you spent a minute to reply to this request and to progress it " \
"on its workflow." +\
"\n\nIssue Summary: {}".format(summary.encode('utf-8')) +\
"\nYou can access it at {}".format(url) +\
"\n\nIssues in the Accounts Desk are available at\n\thttp://backlog.fiware.org/lab/upgradeAccount" +\
'\n\nThanks in advance for cooperation!!' +\
'\n\nKind Regards,' +\
'\nFernando'
messages.append(dict(issue=issue, summary=summary.encode('utf-8'),
email=emailAddress, nickname=nickName.encode('utf-8'), displayname=displayName,
subject=subject, body=message))
return messages
def inProgress(self):
messages = list()
for issue in Data().getAccountsDeskInProgress():
updated = datetime.strptime(issue.fields.updated[:10], '%Y-%m-%d').date()
noupdated = (date.today() - updated).days
if noupdated < 7:
continue
summary = issue.fields.summary
displayName = issue.fields.assignee.displayName.strip()
nickName = self.contactBook.getNickName(displayName)
emailAddress = issue.fields.assignee.emailAddress
url = 'http://{}/browse/{}'.format(JIRA_URL, issue)
subject = 'FIWARE: Accounts Desk: stalled Issue?'
message = 'Dear {},'.format(nickName.encode('utf-8')) +\
"\n\nI noticed issue {} is In Progress but no update happened in the last {} days.".format(issue,
noupdated) +\
"\nI would appreciate you spent a minute to update it by reporting its progress in a comment" +\
"\n\tor if ready for analysing, please, evolve it" +\
"\n\nIssue Summary: {}".format(summary.encode('utf-8')) +\
"\nYou can access it at {}".format(url) +\
"\n\nIssues in the Accounts Desk are available at\n\thttp://backlog.fiware.org/lab/upgradeAccount" +\
'\n\nThanks in advance for cooperation!!' +\
'\n\nKind Regards,' +\
'\nFernando'
messages.append(dict(issue=issue, summary=summary.encode('utf-8'),
email=emailAddress, nickname=nickName.encode('utf-8'), displayname=displayName,
subject=subject, body=message))
return messages
def scheduled(self):
messages = list()
for issue in Data().getAccountsDeskScheduled():
updated = datetime.strptime(issue.fields.updated[:10], '%Y-%m-%d').date()
noupdated = (date.today() - updated).days
if noupdated < 7:
continue
summary = issue.fields.summary
displayName = issue.fields.assignee.displayName.strip()
nickName = self.contactBook.getNickName(displayName)
emailAddress = issue.fields.assignee.emailAddress
url = 'http://{}/browse/{}'.format(JIRA_URL, issue)
subject = 'FIWARE: Accounts Desk: stalled Issue?'
message = 'Dear {},'.format(nickName.encode('utf-8')) +\
"\n\nI noticed issue {} is Scheduled but no update happened in the last {} days.".format(issue,
noupdated) +\
"\nI would appreciate you spent a minute to update it by reporting its progress in a comment" +\
"\n\tor if ready for Answered, please, evolve it" +\
"\n\nIssue Summary: {}".format(summary.encode('utf-8')) +\
"\nYou can access it at {}".format(url) +\
"\n\nIssues in the Accounts Desk are available at\n\thttp://backlog.fiware.org/lab/upgradeAccount" +\
'\n\nThanks in advance for cooperation!!' +\
'\n\nKind Regards,' +\
'\nFernando'
messages.append(dict(issue=issue, summary=summary.encode('utf-8'),
email=emailAddress, nickname=nickName.encode('utf-8'), displayname=displayName,
subject=subject, body=message))
return messages
def answered(self):
messages = list()
for issue in Data().getAccountsDeskAnswered():
updated = datetime.strptime(issue.fields.updated[:10], '%Y-%m-%d').date()
noupdated = (date.today() - updated).days
if noupdated < 7:
continue
summary = issue.fields.summary
displayName = issue.fields.assignee.displayName.strip()
nickName = self.contactBook.getNickName(displayName)
emailAddress = issue.fields.assignee.emailAddress
url = 'http://{}/browse/{}'.format(JIRA_URL, issue)
subject = 'FIWARE: Accounts Desk: Closed Issue?'
message = 'Dear {},'.format(nickName.encode('utf-8')) +\
"\n\nI noticed issue {} has been Answered but no update happened in the " \
"last {} days.".format(issue, noupdated) +\
"\nI would appreciate you spent a minute to close it" \
"\n\tor if the exchange continues, please, update its progress in a comment" \
"\n\nIssue Summary: {}".format(summary.encode('utf-8')) +\
"\nYou can access it at {}".format(url) +\
"\n\nIssues in the Accounts Desk are available at\n\thttp://backlog.fiware.org/lab/upgradeAccount" +\
'\n\nThanks in advance for cooperation!!' +\
'\n\nKind Regards,' +\
'\nFernando'
messages.append(dict(issue=issue, summary=summary.encode('utf-8'),
email=emailAddress, nickname=nickName.encode('utf-8'), displayname=displayName,
subject=subject, body=message))
return messages
def rejected(self):
messages = list()
for issue in Data().getAccountsDeskRejected():
updated = datetime.strptime(issue.fields.updated[:10], '%Y-%m-%d').date()
noupdated = (date.today() - updated).days
if noupdated < 1:
continue
summary = issue.fields.summary
displayName = issue.fields.assignee.displayName.strip()
nickName = self.contactBook.getNickName(displayName)
emailAddress = issue.fields.assignee.emailAddress
url = 'http://{}/browse/{}'.format(JIRA_URL, issue)
subject = 'FIWARE: Accounts Desk: Close the procedure'
message = 'Dear {},'.format(nickName.encode('utf-8')) +\
"\n\nI noticed issue {} has been Rejected.".format(issue) +\
"\nI would appreciate you spent a minute to close the procedure" \
"\n\nIssue Summary: {}".format(summary.encode('utf-8')) +\
"\nYou can access it at {}".format(url) +\
"\n\nIssues in the Accounts Desk are available at\n\thttp://backlog.fiware.org/lab/upgradeAccount" +\
'\n\nThanks in advance for cooperation!!' +\
'\n\nKind Regards,' +\
'\nFernando'
messages.append(dict(issue=issue, summary=summary.encode('utf-8'),
email=emailAddress, nickname=nickName.encode('utf-8'), displayname=displayName,
subject=subject, body=message))
return messages
if __name__ == "__main__":
pass
| 49.01087
| 120
| 0.555001
| 920
| 9,018
| 5.416304
| 0.170652
| 0.04415
| 0.040136
| 0.036123
| 0.84106
| 0.839253
| 0.835039
| 0.804937
| 0.798114
| 0.789284
| 0
| 0.006051
| 0.321912
| 9,018
| 183
| 121
| 49.278689
| 0.808831
| 0
| 0
| 0.722973
| 0
| 0.040541
| 0.280217
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.040541
| false
| 0.006757
| 0.027027
| 0
| 0.108108
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
4040a877bb3e28b9851ff90970e6bf5e768e303c
| 31,211
|
py
|
Python
|
alembic/versions/92235b77ea53_check_new.py
|
go-lab/appcomposer
|
c2468f11b8398edc9b16e1552ac8d609d8347677
|
[
"BSD-2-Clause"
] | 1
|
2018-01-20T14:56:01.000Z
|
2018-01-20T14:56:01.000Z
|
alembic/versions/92235b77ea53_check_new.py
|
go-lab/appcomposer
|
c2468f11b8398edc9b16e1552ac8d609d8347677
|
[
"BSD-2-Clause"
] | 25
|
2015-01-21T09:16:26.000Z
|
2021-12-13T20:01:21.000Z
|
alembic/versions/92235b77ea53_check_new.py
|
go-lab/appcomposer
|
c2468f11b8398edc9b16e1552ac8d609d8347677
|
[
"BSD-2-Clause"
] | 3
|
2015-07-28T18:40:05.000Z
|
2017-03-28T08:14:37.000Z
|
"""Check new
Revision ID: 92235b77ea53
Revises: 381fdb66ec27
Create Date: 2017-10-14 02:38:51.007307
"""
# revision identifiers, used by Alembic.
revision = '92235b77ea53'
down_revision = '381fdb66ec27'
from alembic import op
import sqlalchemy as sa
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_index('ix_ActiveTranslationMessages_category', table_name='ActiveTranslationMessages')
op.drop_index('ix_ActiveTranslationMessages_datetime', table_name='ActiveTranslationMessages')
op.drop_index('ix_ActiveTranslationMessages_fmt', table_name='ActiveTranslationMessages')
op.drop_index('ix_ActiveTranslationMessages_from_developer', table_name='ActiveTranslationMessages')
op.drop_index('ix_ActiveTranslationMessages_key', table_name='ActiveTranslationMessages')
op.drop_index('ix_ActiveTranslationMessages_namespace', table_name='ActiveTranslationMessages')
op.drop_index('ix_ActiveTranslationMessages_position', table_name='ActiveTranslationMessages')
op.drop_index('ix_ActiveTranslationMessages_same_tool', table_name='ActiveTranslationMessages')
op.drop_index('ix_ActiveTranslationMessages_taken_from_default', table_name='ActiveTranslationMessages')
op.drop_index('ix_ActiveTranslationMessages_tool_id', table_name='ActiveTranslationMessages')
op.drop_index('ix_Apps_composer', table_name='Apps')
op.drop_index('ix_Apps_creation_date', table_name='Apps')
op.drop_index('ix_Apps_last_access_date', table_name='Apps')
op.drop_index('ix_Apps_modification_date', table_name='Apps')
op.drop_index('ix_Apps_name', table_name='Apps')
op.drop_index('ix_Apps_owner_id', table_name='Apps')
op.drop_index('ix_Apps_unique_id', table_name='Apps')
op.drop_index('ix_GoLabOAuthUsers_display_name', table_name='GoLabOAuthUsers')
op.drop_index('ix_GoLabOAuthUsers_email', table_name='GoLabOAuthUsers')
op.drop_index('ix_Languages_language', table_name='Languages')
op.drop_index('ix_RepositoryApps_adaptable', table_name='RepositoryApps')
op.drop_index('ix_RepositoryApps_contents_hash', table_name='RepositoryApps')
op.drop_index('ix_RepositoryApps_downloaded_hash', table_name='RepositoryApps')
op.drop_index('ix_RepositoryApps_external_id', table_name='RepositoryApps')
op.drop_index('ix_RepositoryApps_failing', table_name='RepositoryApps')
op.drop_index('ix_RepositoryApps_failing_since', table_name='RepositoryApps')
op.drop_index('ix_RepositoryApps_last_change', table_name='RepositoryApps')
op.drop_index('ix_RepositoryApps_last_check', table_name='RepositoryApps')
op.drop_index('ix_RepositoryApps_last_download_change', table_name='RepositoryApps')
op.drop_index('ix_RepositoryApps_last_processed_contents_hash', table_name='RepositoryApps')
op.drop_index('ix_RepositoryApps_last_processed_downloaded_hash', table_name='RepositoryApps')
op.drop_index('ix_RepositoryApps_last_processed_time', table_name='RepositoryApps')
op.drop_index('ix_RepositoryApps_name', table_name='RepositoryApps')
op.drop_index('ix_RepositoryApps_repository', table_name='RepositoryApps')
op.drop_index('ix_RepositoryApps_translatable', table_name='RepositoryApps')
op.drop_index('ix_RepositoryApps_url', table_name='RepositoryApps')
op.drop_index('ix_TranslatedApps_url', table_name='TranslatedApps')
op.drop_index('ix_TranslationBundles_from_developer', table_name='TranslationBundles')
op.drop_index('ix_TranslationBundles_language', table_name='TranslationBundles')
op.drop_index('ix_TranslationBundles_target', table_name='TranslationBundles')
op.drop_index('ix_TranslationCurrentActiveUsers_last_check', table_name='TranslationCurrentActiveUsers')
op.drop_index('ix_TranslationExternalSuggestions_engine', table_name='TranslationExternalSuggestions')
op.drop_index('ix_TranslationExternalSuggestions_human_key', table_name='TranslationExternalSuggestions')
op.drop_index('ix_TranslationExternalSuggestions_human_key_hash', table_name='TranslationExternalSuggestions')
op.drop_index('ix_TranslationExternalSuggestions_language', table_name='TranslationExternalSuggestions')
op.drop_index('ix_TranslationExternalSuggestions_origin_language', table_name='TranslationExternalSuggestions')
op.drop_index('ix_TranslationKeySuggestions_key', table_name='TranslationKeySuggestions')
op.drop_index('ix_TranslationKeySuggestions_language', table_name='TranslationKeySuggestions')
op.drop_index('ix_TranslationKeySuggestions_target', table_name='TranslationKeySuggestions')
op.drop_index('ix_TranslationMessageHistory_category', table_name='TranslationMessageHistory')
op.drop_index('ix_TranslationMessageHistory_datetime', table_name='TranslationMessageHistory')
op.drop_index('ix_TranslationMessageHistory_fmt', table_name='TranslationMessageHistory')
op.drop_index('ix_TranslationMessageHistory_from_developer', table_name='TranslationMessageHistory')
op.drop_index('ix_TranslationMessageHistory_key', table_name='TranslationMessageHistory')
op.drop_index('ix_TranslationMessageHistory_namespace', table_name='TranslationMessageHistory')
op.drop_index('ix_TranslationMessageHistory_parent_translation_id', table_name='TranslationMessageHistory')
op.drop_index('ix_TranslationMessageHistory_position', table_name='TranslationMessageHistory')
op.drop_index('ix_TranslationMessageHistory_same_tool', table_name='TranslationMessageHistory')
op.drop_index('ix_TranslationMessageHistory_taken_from_default', table_name='TranslationMessageHistory')
op.drop_index('ix_TranslationMessageHistory_tool_id', table_name='TranslationMessageHistory')
op.drop_index('ix_TranslationNotificationRecipients_created', table_name='TranslationNotificationRecipients')
op.drop_index('ix_TranslationNotificationRecipients_email', table_name='TranslationNotificationRecipients')
op.drop_index('ix_TranslationSubscriptions_last_check', table_name='TranslationSubscriptions')
op.drop_index('ix_TranslationSubscriptions_mechanism', table_name='TranslationSubscriptions')
op.drop_index('ix_TranslationSyncLogs_end_datetime', table_name='TranslationSyncLogs')
op.drop_index('ix_TranslationSyncLogs_start_datetime', table_name='TranslationSyncLogs')
op.drop_index('ix_TranslationUrls_automatic', table_name='TranslationUrls')
op.drop_index('ix_TranslationUrls_url', table_name='TranslationUrls')
op.drop_index('ix_TranslationValueSuggestions_human_key', table_name='TranslationValueSuggestions')
op.drop_index('ix_TranslationValueSuggestions_language', table_name='TranslationValueSuggestions')
op.drop_index('ix_TranslationValueSuggestions_target', table_name='TranslationValueSuggestions')
op.drop_index('ix_Users_creation_date', table_name='Users')
op.drop_index('ix_Users_last_access_date', table_name='Users')
op.create_index(op.f('ix_ActiveTranslationMessages_category'), 'ActiveTranslationMessages', ['category'], unique=False)
op.create_index(op.f('ix_ActiveTranslationMessages_datetime'), 'ActiveTranslationMessages', ['datetime'], unique=False)
op.create_index(op.f('ix_ActiveTranslationMessages_fmt'), 'ActiveTranslationMessages', ['fmt'], unique=False)
op.create_index(op.f('ix_ActiveTranslationMessages_from_developer'), 'ActiveTranslationMessages', ['from_developer'], unique=False)
op.create_index(op.f('ix_ActiveTranslationMessages_key'), 'ActiveTranslationMessages', ['key'], unique=False)
op.create_index(op.f('ix_ActiveTranslationMessages_namespace'), 'ActiveTranslationMessages', ['namespace'], unique=False)
op.create_index(op.f('ix_ActiveTranslationMessages_position'), 'ActiveTranslationMessages', ['position'], unique=False)
op.create_index(op.f('ix_ActiveTranslationMessages_same_tool'), 'ActiveTranslationMessages', ['same_tool'], unique=False)
op.create_index(op.f('ix_ActiveTranslationMessages_taken_from_default'), 'ActiveTranslationMessages', ['taken_from_default'], unique=False)
op.create_index(op.f('ix_ActiveTranslationMessages_tool_id'), 'ActiveTranslationMessages', ['tool_id'], unique=False)
op.create_index(op.f('ix_Apps_composer'), 'Apps', ['composer'], unique=False)
op.create_index(op.f('ix_Apps_creation_date'), 'Apps', ['creation_date'], unique=False)
op.create_index(op.f('ix_Apps_last_access_date'), 'Apps', ['last_access_date'], unique=False)
op.create_index(op.f('ix_Apps_modification_date'), 'Apps', ['modification_date'], unique=False)
op.create_index(op.f('ix_Apps_name'), 'Apps', ['name'], unique=False)
op.create_index(op.f('ix_Apps_owner_id'), 'Apps', ['owner_id'], unique=False)
op.create_index(op.f('ix_Apps_unique_id'), 'Apps', ['unique_id'], unique=True)
op.create_index(op.f('ix_GoLabOAuthUsers_display_name'), 'GoLabOAuthUsers', ['display_name'], unique=False)
op.create_index(op.f('ix_GoLabOAuthUsers_email'), 'GoLabOAuthUsers', ['email'], unique=True)
op.create_index(op.f('ix_Languages_language'), 'Languages', ['language'], unique=True)
op.create_index(op.f('ix_RepositoryApps_adaptable'), 'RepositoryApps', ['adaptable'], unique=False)
op.create_index(op.f('ix_RepositoryApps_contents_hash'), 'RepositoryApps', ['contents_hash'], unique=False)
op.create_index(op.f('ix_RepositoryApps_downloaded_hash'), 'RepositoryApps', ['downloaded_hash'], unique=False)
op.create_index(op.f('ix_RepositoryApps_external_id'), 'RepositoryApps', ['external_id'], unique=False)
op.create_index(op.f('ix_RepositoryApps_failing_since'), 'RepositoryApps', ['failing_since'], unique=False)
op.create_index(op.f('ix_RepositoryApps_failing'), 'RepositoryApps', ['failing'], unique=False)
op.create_index(op.f('ix_RepositoryApps_last_change'), 'RepositoryApps', ['last_change'], unique=False)
op.create_index(op.f('ix_RepositoryApps_last_check'), 'RepositoryApps', ['last_check'], unique=False)
op.create_index(op.f('ix_RepositoryApps_last_download_change'), 'RepositoryApps', ['last_download_change'], unique=False)
op.create_index(op.f('ix_RepositoryApps_last_processed_contents_hash'), 'RepositoryApps', ['last_processed_contents_hash'], unique=False)
op.create_index(op.f('ix_RepositoryApps_last_processed_downloaded_hash'), 'RepositoryApps', ['last_processed_downloaded_hash'], unique=False)
op.create_index(op.f('ix_RepositoryApps_last_processed_time'), 'RepositoryApps', ['last_processed_time'], unique=False)
op.create_index(op.f('ix_RepositoryApps_name'), 'RepositoryApps', ['name'], unique=False)
op.create_index(op.f('ix_RepositoryApps_repository'), 'RepositoryApps', ['repository'], unique=False)
op.create_index(op.f('ix_RepositoryApps_translatable'), 'RepositoryApps', ['translatable'], unique=False)
op.create_index(op.f('ix_RepositoryApps_url'), 'RepositoryApps', ['url'], unique=False)
op.create_index(op.f('ix_TranslatedApps_url'), 'TranslatedApps', ['url'], unique=True)
op.create_index(op.f('ix_TranslationBundles_from_developer'), 'TranslationBundles', ['from_developer'], unique=False)
op.create_index(op.f('ix_TranslationBundles_language'), 'TranslationBundles', ['language'], unique=False)
op.create_index(op.f('ix_TranslationBundles_target'), 'TranslationBundles', ['target'], unique=False)
op.create_index(op.f('ix_TranslationCurrentActiveUsers_last_check'), 'TranslationCurrentActiveUsers', ['last_check'], unique=False)
op.create_index(op.f('ix_TranslationExternalSuggestions_engine'), 'TranslationExternalSuggestions', ['engine'], unique=False)
op.create_index(op.f('ix_TranslationExternalSuggestions_human_key_hash'), 'TranslationExternalSuggestions', ['human_key_hash'], unique=False)
op.create_index(op.f('ix_TranslationExternalSuggestions_human_key'), 'TranslationExternalSuggestions', ['human_key'], unique=False)
op.create_index(op.f('ix_TranslationExternalSuggestions_language'), 'TranslationExternalSuggestions', ['language'], unique=False)
op.create_index(op.f('ix_TranslationExternalSuggestions_origin_language'), 'TranslationExternalSuggestions', ['origin_language'], unique=False)
op.create_index(op.f('ix_TranslationKeySuggestions_key'), 'TranslationKeySuggestions', ['key'], unique=False)
op.create_index(op.f('ix_TranslationKeySuggestions_language'), 'TranslationKeySuggestions', ['language'], unique=False)
op.create_index(op.f('ix_TranslationKeySuggestions_target'), 'TranslationKeySuggestions', ['target'], unique=False)
op.create_index(op.f('ix_TranslationMessageHistory_category'), 'TranslationMessageHistory', ['category'], unique=False)
op.create_index(op.f('ix_TranslationMessageHistory_datetime'), 'TranslationMessageHistory', ['datetime'], unique=False)
op.create_index(op.f('ix_TranslationMessageHistory_fmt'), 'TranslationMessageHistory', ['fmt'], unique=False)
op.create_index(op.f('ix_TranslationMessageHistory_from_developer'), 'TranslationMessageHistory', ['from_developer'], unique=False)
op.create_index(op.f('ix_TranslationMessageHistory_key'), 'TranslationMessageHistory', ['key'], unique=False)
op.create_index(op.f('ix_TranslationMessageHistory_namespace'), 'TranslationMessageHistory', ['namespace'], unique=False)
op.create_index(op.f('ix_TranslationMessageHistory_parent_translation_id'), 'TranslationMessageHistory', ['parent_translation_id'], unique=False)
op.create_index(op.f('ix_TranslationMessageHistory_position'), 'TranslationMessageHistory', ['position'], unique=False)
op.create_index(op.f('ix_TranslationMessageHistory_same_tool'), 'TranslationMessageHistory', ['same_tool'], unique=False)
op.create_index(op.f('ix_TranslationMessageHistory_taken_from_default'), 'TranslationMessageHistory', ['taken_from_default'], unique=False)
op.create_index(op.f('ix_TranslationMessageHistory_tool_id'), 'TranslationMessageHistory', ['tool_id'], unique=False)
op.create_index(op.f('ix_TranslationNotificationRecipients_created'), 'TranslationNotificationRecipients', ['created'], unique=False)
op.create_index(op.f('ix_TranslationNotificationRecipients_email'), 'TranslationNotificationRecipients', ['email'], unique=True)
op.create_index(op.f('ix_TranslationSubscriptions_last_check'), 'TranslationSubscriptions', ['last_check'], unique=False)
op.create_index(op.f('ix_TranslationSubscriptions_mechanism'), 'TranslationSubscriptions', ['mechanism'], unique=False)
op.create_index(op.f('ix_TranslationSyncLogs_end_datetime'), 'TranslationSyncLogs', ['end_datetime'], unique=False)
op.create_index(op.f('ix_TranslationSyncLogs_start_datetime'), 'TranslationSyncLogs', ['start_datetime'], unique=False)
op.create_index(op.f('ix_TranslationUrls_automatic'), 'TranslationUrls', ['automatic'], unique=False)
op.create_index(op.f('ix_TranslationUrls_url'), 'TranslationUrls', ['url'], unique=True)
op.create_index(op.f('ix_TranslationValueSuggestions_human_key'), 'TranslationValueSuggestions', ['human_key'], unique=False)
op.create_index(op.f('ix_TranslationValueSuggestions_language'), 'TranslationValueSuggestions', ['language'], unique=False)
op.create_index(op.f('ix_TranslationValueSuggestions_target'), 'TranslationValueSuggestions', ['target'], unique=False)
op.create_index(op.f('ix_Users_creation_date'), 'Users', ['creation_date'], unique=False)
op.create_index(op.f('ix_Users_last_access_date'), 'Users', ['last_access_date'], unique=False)
# op.create_unique_constraint(None, 'ActiveTranslationMessages', ['bundle_id', 'key'])
# op.create_unique_constraint(None, 'RepositoryApp2languages', ['repository_app_id', 'language_id'])
# op.create_unique_constraint(None, 'TranslationBundles', ['translation_url_id', 'language', 'target'])
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_index(op.f('ix_Users_last_access_date'), table_name='Users')
op.drop_index(op.f('ix_Users_creation_date'), table_name='Users')
op.drop_index(op.f('ix_TranslationValueSuggestions_target'), table_name='TranslationValueSuggestions')
op.drop_index(op.f('ix_TranslationValueSuggestions_language'), table_name='TranslationValueSuggestions')
op.drop_index(op.f('ix_TranslationValueSuggestions_human_key'), table_name='TranslationValueSuggestions')
op.drop_index(op.f('ix_TranslationUrls_url'), table_name='TranslationUrls')
op.drop_index(op.f('ix_TranslationUrls_automatic'), table_name='TranslationUrls')
op.drop_index(op.f('ix_TranslationSyncLogs_start_datetime'), table_name='TranslationSyncLogs')
op.drop_index(op.f('ix_TranslationSyncLogs_end_datetime'), table_name='TranslationSyncLogs')
op.drop_index(op.f('ix_TranslationSubscriptions_mechanism'), table_name='TranslationSubscriptions')
op.drop_index(op.f('ix_TranslationSubscriptions_last_check'), table_name='TranslationSubscriptions')
op.drop_index(op.f('ix_TranslationNotificationRecipients_email'), table_name='TranslationNotificationRecipients')
op.drop_index(op.f('ix_TranslationNotificationRecipients_created'), table_name='TranslationNotificationRecipients')
op.drop_index(op.f('ix_TranslationMessageHistory_tool_id'), table_name='TranslationMessageHistory')
op.drop_index(op.f('ix_TranslationMessageHistory_taken_from_default'), table_name='TranslationMessageHistory')
op.drop_index(op.f('ix_TranslationMessageHistory_same_tool'), table_name='TranslationMessageHistory')
op.drop_index(op.f('ix_TranslationMessageHistory_position'), table_name='TranslationMessageHistory')
op.drop_index(op.f('ix_TranslationMessageHistory_parent_translation_id'), table_name='TranslationMessageHistory')
op.drop_index(op.f('ix_TranslationMessageHistory_namespace'), table_name='TranslationMessageHistory')
op.drop_index(op.f('ix_TranslationMessageHistory_key'), table_name='TranslationMessageHistory')
op.drop_index(op.f('ix_TranslationMessageHistory_from_developer'), table_name='TranslationMessageHistory')
op.drop_index(op.f('ix_TranslationMessageHistory_fmt'), table_name='TranslationMessageHistory')
op.drop_index(op.f('ix_TranslationMessageHistory_datetime'), table_name='TranslationMessageHistory')
op.drop_index(op.f('ix_TranslationMessageHistory_category'), table_name='TranslationMessageHistory')
op.drop_index(op.f('ix_TranslationKeySuggestions_target'), table_name='TranslationKeySuggestions')
op.drop_index(op.f('ix_TranslationKeySuggestions_language'), table_name='TranslationKeySuggestions')
op.drop_index(op.f('ix_TranslationKeySuggestions_key'), table_name='TranslationKeySuggestions')
op.drop_index(op.f('ix_TranslationExternalSuggestions_origin_language'), table_name='TranslationExternalSuggestions')
op.drop_index(op.f('ix_TranslationExternalSuggestions_language'), table_name='TranslationExternalSuggestions')
op.drop_index(op.f('ix_TranslationExternalSuggestions_human_key'), table_name='TranslationExternalSuggestions')
op.drop_index(op.f('ix_TranslationExternalSuggestions_human_key_hash'), table_name='TranslationExternalSuggestions')
op.drop_index(op.f('ix_TranslationExternalSuggestions_engine'), table_name='TranslationExternalSuggestions')
op.drop_index(op.f('ix_TranslationBundles_target'), table_name='TranslationBundles')
op.drop_index(op.f('ix_TranslationBundles_language'), table_name='TranslationBundles')
op.drop_index(op.f('ix_TranslationBundles_from_developer'), table_name='TranslationBundles')
op.drop_index(op.f('ix_TranslationCurrentActiveUsers_last_check'), table_name='TranslationCurrentActiveUsers')
# op.drop_constraint(None, 'TranslationBundles', type_='unique')
op.drop_index(op.f('ix_RepositoryApps_url'), table_name='RepositoryApps')
op.drop_index(op.f('ix_RepositoryApps_translatable'), table_name='RepositoryApps')
op.drop_index(op.f('ix_RepositoryApps_repository'), table_name='RepositoryApps')
op.drop_index(op.f('ix_RepositoryApps_name'), table_name='RepositoryApps')
op.drop_index(op.f('ix_RepositoryApps_last_processed_time'), table_name='RepositoryApps')
op.drop_index(op.f('ix_RepositoryApps_last_processed_downloaded_hash'), table_name='RepositoryApps')
op.drop_index(op.f('ix_RepositoryApps_last_processed_contents_hash'), table_name='RepositoryApps')
op.drop_index(op.f('ix_RepositoryApps_last_download_change'), table_name='RepositoryApps')
op.drop_index(op.f('ix_RepositoryApps_last_check'), table_name='RepositoryApps')
op.drop_index(op.f('ix_RepositoryApps_last_change'), table_name='RepositoryApps')
op.drop_index(op.f('ix_RepositoryApps_failing'), table_name='RepositoryApps')
op.drop_index(op.f('ix_RepositoryApps_failing_since'), table_name='RepositoryApps')
op.drop_index(op.f('ix_RepositoryApps_external_id'), table_name='RepositoryApps')
op.drop_index(op.f('ix_RepositoryApps_downloaded_hash'), table_name='RepositoryApps')
op.drop_index(op.f('ix_RepositoryApps_contents_hash'), table_name='RepositoryApps')
op.drop_index(op.f('ix_RepositoryApps_adaptable'), table_name='RepositoryApps')
# op.drop_constraint(None, 'RepositoryApp2languages', type_='unique')
op.drop_index(op.f('ix_TranslatedApps_url'), table_name='TranslatedApps')
op.drop_index(op.f('ix_Languages_language'), table_name='Languages')
op.drop_index(op.f('ix_GoLabOAuthUsers_email'), table_name='GoLabOAuthUsers')
op.drop_index(op.f('ix_GoLabOAuthUsers_display_name'), table_name='GoLabOAuthUsers')
op.drop_index(op.f('ix_Apps_unique_id'), table_name='Apps')
op.drop_index(op.f('ix_Apps_owner_id'), table_name='Apps')
op.drop_index(op.f('ix_Apps_name'), table_name='Apps')
op.drop_index(op.f('ix_Apps_modification_date'), table_name='Apps')
op.drop_index(op.f('ix_Apps_last_access_date'), table_name='Apps')
op.drop_index(op.f('ix_Apps_creation_date'), table_name='Apps')
op.drop_index(op.f('ix_Apps_composer'), table_name='Apps')
# op.drop_constraint(None, 'ActiveTranslationMessages', type_='unique')
op.drop_index(op.f('ix_ActiveTranslationMessages_tool_id'), table_name='ActiveTranslationMessages')
op.drop_index(op.f('ix_ActiveTranslationMessages_taken_from_default'), table_name='ActiveTranslationMessages')
op.drop_index(op.f('ix_ActiveTranslationMessages_same_tool'), table_name='ActiveTranslationMessages')
op.drop_index(op.f('ix_ActiveTranslationMessages_position'), table_name='ActiveTranslationMessages')
op.drop_index(op.f('ix_ActiveTranslationMessages_namespace'), table_name='ActiveTranslationMessages')
op.drop_index(op.f('ix_ActiveTranslationMessages_key'), table_name='ActiveTranslationMessages')
op.drop_index(op.f('ix_ActiveTranslationMessages_from_developer'), table_name='ActiveTranslationMessages')
op.drop_index(op.f('ix_ActiveTranslationMessages_fmt'), table_name='ActiveTranslationMessages')
op.drop_index(op.f('ix_ActiveTranslationMessages_datetime'), table_name='ActiveTranslationMessages')
op.drop_index(op.f('ix_ActiveTranslationMessages_category'), table_name='ActiveTranslationMessages')
op.create_index('ix_Users_last_access_date', 'Users', ['last_access_date'], unique=False)
op.create_index('ix_Users_creation_date', 'Users', ['creation_date'], unique=False)
op.create_index('ix_TranslationValueSuggestions_target', 'TranslationValueSuggestions', ['target'], unique=False)
op.create_index('ix_TranslationValueSuggestions_language', 'TranslationValueSuggestions', ['language'], unique=False)
op.create_index('ix_TranslationValueSuggestions_human_key', 'TranslationValueSuggestions', ['human_key'], unique=False)
op.create_index('ix_TranslationUrls_url', 'TranslationUrls', ['url'], unique=True)
op.create_index('ix_TranslationUrls_automatic', 'TranslationUrls', ['automatic'], unique=False)
op.create_index('ix_TranslationSyncLogs_start_datetime', 'TranslationSyncLogs', ['start_datetime'], unique=False)
op.create_index('ix_TranslationSyncLogs_end_datetime', 'TranslationSyncLogs', ['end_datetime'], unique=False)
op.create_index('ix_TranslationSubscriptions_mechanism', 'TranslationSubscriptions', ['mechanism'], unique=False)
op.create_index('ix_TranslationSubscriptions_last_check', 'TranslationSubscriptions', ['last_check'], unique=False)
op.create_index('ix_TranslationNotificationRecipients_email', 'TranslationNotificationRecipients', ['email'], unique=True)
op.create_index('ix_TranslationNotificationRecipients_created', 'TranslationNotificationRecipients', ['created'], unique=False)
op.create_index('ix_TranslationMessageHistory_tool_id', 'TranslationMessageHistory', ['tool_id'], unique=False)
op.create_index('ix_TranslationMessageHistory_taken_from_default', 'TranslationMessageHistory', ['taken_from_default'], unique=False)
op.create_index('ix_TranslationMessageHistory_same_tool', 'TranslationMessageHistory', ['same_tool'], unique=False)
op.create_index('ix_TranslationMessageHistory_position', 'TranslationMessageHistory', ['position'], unique=False)
op.create_index('ix_TranslationMessageHistory_parent_translation_id', 'TranslationMessageHistory', ['parent_translation_id'], unique=False)
op.create_index('ix_TranslationMessageHistory_namespace', 'TranslationMessageHistory', ['namespace'], unique=False)
op.create_index('ix_TranslationMessageHistory_key', 'TranslationMessageHistory', ['key'], unique=False)
op.create_index('ix_TranslationMessageHistory_from_developer', 'TranslationMessageHistory', ['from_developer'], unique=False)
op.create_index('ix_TranslationMessageHistory_fmt', 'TranslationMessageHistory', ['fmt'], unique=False)
op.create_index('ix_TranslationMessageHistory_datetime', 'TranslationMessageHistory', ['datetime'], unique=False)
op.create_index('ix_TranslationMessageHistory_category', 'TranslationMessageHistory', ['category'], unique=False)
op.create_index('ix_TranslationKeySuggestions_target', 'TranslationKeySuggestions', ['target'], unique=False)
op.create_index('ix_TranslationKeySuggestions_language', 'TranslationKeySuggestions', ['language'], unique=False)
op.create_index('ix_TranslationKeySuggestions_key', 'TranslationKeySuggestions', ['key'], unique=False)
op.create_index('ix_TranslationExternalSuggestions_origin_language', 'TranslationExternalSuggestions', ['origin_language'], unique=False)
op.create_index('ix_TranslationExternalSuggestions_language', 'TranslationExternalSuggestions', ['language'], unique=False)
op.create_index('ix_TranslationExternalSuggestions_human_key_hash', 'TranslationExternalSuggestions', ['human_key_hash'], unique=False)
op.create_index('ix_TranslationExternalSuggestions_human_key', 'TranslationExternalSuggestions', ['human_key'], unique=False)
op.create_index('ix_TranslationExternalSuggestions_engine', 'TranslationExternalSuggestions', ['engine'], unique=False)
op.create_index('ix_TranslationCurrentActiveUsers_last_check', 'TranslationCurrentActiveUsers', ['last_check'], unique=False)
op.create_index('ix_TranslationBundles_target', 'TranslationBundles', ['target'], unique=False)
op.create_index('ix_TranslationBundles_language', 'TranslationBundles', ['language'], unique=False)
op.create_index('ix_TranslationBundles_from_developer', 'TranslationBundles', ['from_developer'], unique=False)
op.create_index('ix_TranslatedApps_url', 'TranslatedApps', ['url'], unique=True)
op.create_index('ix_RepositoryApps_url', 'RepositoryApps', ['url'], unique=False)
op.create_index('ix_RepositoryApps_translatable', 'RepositoryApps', ['translatable'], unique=False)
op.create_index('ix_RepositoryApps_repository', 'RepositoryApps', ['repository'], unique=False)
op.create_index('ix_RepositoryApps_name', 'RepositoryApps', ['name'], unique=False)
op.create_index('ix_RepositoryApps_last_processed_time', 'RepositoryApps', ['last_processed_time'], unique=False)
op.create_index('ix_RepositoryApps_last_processed_downloaded_hash', 'RepositoryApps', ['last_processed_downloaded_hash'], unique=False)
op.create_index('ix_RepositoryApps_last_processed_contents_hash', 'RepositoryApps', ['last_processed_contents_hash'], unique=False)
op.create_index('ix_RepositoryApps_last_download_change', 'RepositoryApps', ['last_download_change'], unique=False)
op.create_index('ix_RepositoryApps_last_check', 'RepositoryApps', ['last_check'], unique=False)
op.create_index('ix_RepositoryApps_last_change', 'RepositoryApps', ['last_change'], unique=False)
op.create_index('ix_RepositoryApps_failing_since', 'RepositoryApps', ['failing_since'], unique=False)
op.create_index('ix_RepositoryApps_failing', 'RepositoryApps', ['failing'], unique=False)
op.create_index('ix_RepositoryApps_external_id', 'RepositoryApps', ['external_id'], unique=False)
op.create_index('ix_RepositoryApps_downloaded_hash', 'RepositoryApps', ['downloaded_hash'], unique=False)
op.create_index('ix_RepositoryApps_contents_hash', 'RepositoryApps', ['contents_hash'], unique=False)
op.create_index('ix_RepositoryApps_adaptable', 'RepositoryApps', ['adaptable'], unique=False)
op.create_index('ix_Languages_language', 'Languages', ['language'], unique=True)
op.create_index('ix_GoLabOAuthUsers_email', 'GoLabOAuthUsers', ['email'], unique=True)
op.create_index('ix_GoLabOAuthUsers_display_name', 'GoLabOAuthUsers', ['display_name'], unique=False)
op.create_index('ix_Apps_unique_id', 'Apps', ['unique_id'], unique=True)
op.create_index('ix_Apps_owner_id', 'Apps', ['owner_id'], unique=False)
op.create_index('ix_Apps_name', 'Apps', ['name'], unique=False)
op.create_index('ix_Apps_modification_date', 'Apps', ['modification_date'], unique=False)
op.create_index('ix_Apps_last_access_date', 'Apps', ['last_access_date'], unique=False)
op.create_index('ix_Apps_creation_date', 'Apps', ['creation_date'], unique=False)
op.create_index('ix_Apps_composer', 'Apps', ['composer'], unique=False)
op.create_index('ix_ActiveTranslationMessages_tool_id', 'ActiveTranslationMessages', ['tool_id'], unique=False)
op.create_index('ix_ActiveTranslationMessages_taken_from_default', 'ActiveTranslationMessages', ['taken_from_default'], unique=False)
op.create_index('ix_ActiveTranslationMessages_same_tool', 'ActiveTranslationMessages', ['same_tool'], unique=False)
op.create_index('ix_ActiveTranslationMessages_position', 'ActiveTranslationMessages', ['position'], unique=False)
op.create_index('ix_ActiveTranslationMessages_namespace', 'ActiveTranslationMessages', ['namespace'], unique=False)
op.create_index('ix_ActiveTranslationMessages_key', 'ActiveTranslationMessages', ['key'], unique=False)
op.create_index('ix_ActiveTranslationMessages_from_developer', 'ActiveTranslationMessages', ['from_developer'], unique=False)
op.create_index('ix_ActiveTranslationMessages_fmt', 'ActiveTranslationMessages', ['fmt'], unique=False)
op.create_index('ix_ActiveTranslationMessages_datetime', 'ActiveTranslationMessages', ['datetime'], unique=False)
op.create_index('ix_ActiveTranslationMessages_category', 'ActiveTranslationMessages', ['category'], unique=False)
# ### end Alembic commands ###
| 90.205202
| 149
| 0.79834
| 3,464
| 31,211
| 6.809469
| 0.032333
| 0.037901
| 0.068085
| 0.061896
| 0.979142
| 0.975454
| 0.975114
| 0.965152
| 0.952815
| 0.795447
| 0
| 0.001862
| 0.070808
| 31,211
| 345
| 150
| 90.466667
| 0.811489
| 0.0256
| 0
| 0
| 0
| 0
| 0.557345
| 0.447364
| 0
| 0
| 0
| 0
| 0
| 1
| 0.006711
| false
| 0
| 0.006711
| 0
| 0.013423
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
40686bfbfab402b52cf133e6f6f5366a147289d1
| 14,107
|
py
|
Python
|
appengine/findit/handlers/test/completed_build_pubsub_ingestor_test.py
|
xswz8015/infra
|
f956b78ce4c39cc76acdda47601b86794ae0c1ba
|
[
"BSD-3-Clause"
] | null | null | null |
appengine/findit/handlers/test/completed_build_pubsub_ingestor_test.py
|
xswz8015/infra
|
f956b78ce4c39cc76acdda47601b86794ae0c1ba
|
[
"BSD-3-Clause"
] | 4
|
2022-03-17T18:58:21.000Z
|
2022-03-17T18:58:22.000Z
|
appengine/findit/handlers/test/completed_build_pubsub_ingestor_test.py
|
xswz8015/infra
|
f956b78ce4c39cc76acdda47601b86794ae0c1ba
|
[
"BSD-3-Clause"
] | null | null | null |
# Copyright 2017 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import base64
import json
import mock
import webapp2
from google.appengine.api import taskqueue
from go.chromium.org.luci.buildbucket.proto.build_pb2 import Build
from testing_utils.testing import AppengineTestCase
from common.findit_http_client import FinditHttpClient
from common.waterfall import buildbucket_client
from handlers import completed_build_pubsub_ingestor
from model.isolated_target import IsolatedTarget
class CompletedBuildPubsubIngestorTest(AppengineTestCase):
app_module = webapp2.WSGIApplication([
('/index-isolated-builds',
completed_build_pubsub_ingestor.CompletedBuildPubsubIngestor),
],
debug=True)
@mock.patch.object(completed_build_pubsub_ingestor,
'_HandlePossibleFailuresInBuild')
@mock.patch.object(completed_build_pubsub_ingestor,
'_HandlePossibleCodeCoverageBuild')
@mock.patch.object(buildbucket_client, 'GetV2Build')
@mock.patch.object(FinditHttpClient, 'Post')
def testSucessfulPushCIBuild(self, mock_post, mock_get_build, *_):
mock_build = Build()
mock_build.id = 8945610992972640896
mock_build.status = 12
mock_build.input.properties['builder_group'] = 'chromium.linux'
mock_build.output.properties['buildername'] = 'Linux Builder'
mock_build.output.properties.get_or_create_struct(
'swarm_hashes_ref/heads/mockmaster(at){#123}'
)['mock_target'] = 'mock_hash'
gitiles_commit = mock_build.input.gitiles_commit
gitiles_commit.host = 'gitiles.host'
gitiles_commit.project = 'gitiles/project'
gitiles_commit.ref = 'refs/heads/mockmaster'
mock_build.builder.project = 'mock_luci_project'
mock_build.builder.bucket = 'mock_bucket'
mock_build.builder.builder = 'Linux Builder'
mock_headers = {'X-Prpc-Grpc-Code': '0'}
binary_data = mock_build.SerializeToString()
mock_post.return_value = (200, binary_data, mock_headers)
mock_get_build.return_value = mock_build
request_body = json.dumps({
'message': {
'attributes': {
'build_id': str(mock_build.id),
},
'data':
base64.b64encode(
json.dumps({
'build': {
'project': 'chromium',
'bucket': 'luci.chromium.ci',
'status': 'COMPLETED',
'parameters_json': '{"builder_name": "builder"}',
}
})),
},
})
response = self.test_app.post(
'/index-isolated-builds?format=json', params=request_body)
self.assertEqual(200, response.status_int)
self.assertEqual(
123,
IsolatedTarget.get_by_id(
'8945610992972640896/mock_target').commit_position)
self.assertEqual(
8945610992972640896,
IsolatedTarget.get_by_id('8945610992972640896/mock_target').build_id)
self.assertEqual(1, len(json.loads(response.body)['created_rows']))
@mock.patch.object(completed_build_pubsub_ingestor,
'_HandlePossibleFailuresInBuild')
@mock.patch.object(completed_build_pubsub_ingestor,
'_HandlePossibleCodeCoverageBuild')
@mock.patch.object(FinditHttpClient, 'Post')
def testPushNoBuild(self, mock_post, *_):
mock_headers = {'X-Prpc-Grpc-Code': '5'}
mock_post.return_value = (404, 'Build not found', mock_headers)
request_body = json.dumps({
'message': {
'attributes': {
'build_id': '123456',
},
'data':
base64.b64encode(
json.dumps({
'build': {
'project': 'chromium',
'bucket': 'luci.chromium.ci',
'status': 'COMPLETED',
'result': 'SUCCESS',
'parameters_json': '{"builder_name": "builder"}',
}
})),
},
})
response = self.test_app.post(
'/index-isolated-builds?format=json', params=request_body, status=200)
self.assertEqual(200, response.status_int)
@mock.patch.object(completed_build_pubsub_ingestor,
'_HandlePossibleFailuresInBuild')
@mock.patch.object(completed_build_pubsub_ingestor,
'_HandlePossibleCodeCoverageBuild')
@mock.patch.object(FinditHttpClient, 'Post')
def testPushPendingBuild(self, mock_post, *_):
request_body = json.dumps({
'message': {
'attributes': {
'build_id': '123456',
},
'data':
base64.b64encode(
json.dumps({
'build': {
'project': 'chromium',
'bucket': 'luci.chromium.ci',
'status': 'PENDING',
'parameters_json': '{"builder_name": "builder"}',
}
})),
},
})
response = self.test_app.post(
'/index-isolated-builds?format=json', params=request_body)
self.assertFalse(mock_post.called)
self.assertEqual(200, response.status_int)
@mock.patch.object(completed_build_pubsub_ingestor,
'_HandlePossibleFailuresInBuild')
@mock.patch.object(completed_build_pubsub_ingestor,
'_HandlePossibleCodeCoverageBuild')
@mock.patch.object(FinditHttpClient, 'Post')
def testSucessfulPushBadFormat(self, mock_post, *_):
request_body = json.dumps({
'message': {},
})
response = self.test_app.post(
'/index-isolated-builds?format=json', params=request_body)
self.assertFalse(mock_post.called)
self.assertEqual(200, response.status_int)
@mock.patch.object(completed_build_pubsub_ingestor,
'_HandlePossibleFailuresInBuild')
@mock.patch.object(completed_build_pubsub_ingestor,
'_HandlePossibleCodeCoverageBuild')
@mock.patch.object(buildbucket_client, 'GetV2Build')
@mock.patch.object(FinditHttpClient, 'Post')
def testNonIsolateBuild(self, mock_post, mock_get_build, *_):
# This build does not isolate any targets.
mock_build = Build()
mock_build.id = 8945610992972640896
mock_build.status = 12
mock_build.input.properties['builder_group'] = 'chromium.linux'
mock_build.output.properties['buildername'] = 'Linux Tester'
gitiles_commit = mock_build.input.gitiles_commit
gitiles_commit.host = 'gitiles.host'
gitiles_commit.project = 'gitiles/project'
gitiles_commit.ref = 'refs/heads/mockmaster'
mock_build.builder.project = 'mock_luci_project'
mock_build.builder.bucket = 'mock_bucket'
mock_build.builder.builder = 'Linux Tester'
mock_headers = {'X-Prpc-Grpc-Code': '0'}
binary_data = mock_build.SerializeToString()
mock_post.return_value = (200, binary_data, mock_headers)
mock_get_build.return_value = mock_build
request_body = json.dumps({
'message': {
'attributes': {
'build_id': str(mock_build.id),
},
'data':
base64.b64encode(
json.dumps({
'build': {
'project': 'chromium',
'bucket': 'luci.chromium.ci',
'status': 'COMPLETED',
'parameters_json': '{"builder_name": "builder"}',
}
})),
},
})
response = self.test_app.post(
'/index-isolated-builds?format=json', params=request_body)
self.assertEqual(200, response.status_int)
self.assertNotIn('created_rows', response.body)
@mock.patch.object(completed_build_pubsub_ingestor,
'_HandlePossibleFailuresInBuild')
@mock.patch.object(completed_build_pubsub_ingestor,
'_HandlePossibleCodeCoverageBuild')
@mock.patch.object(buildbucket_client, 'GetV2Build')
@mock.patch.object(FinditHttpClient, 'Post')
def testNoMasternameBuild(self, mock_post, mock_get_build, *_):
mock_build = Build()
mock_build.id = 8945610992972640896
mock_build.status = 12
mock_build.output.properties['buildername'] = 'Linux Builder'
mock_build.output.properties.get_or_create_struct(
'swarm_hashes_ref/heads/mockmaster(at){#123}'
)['mock_target'] = 'mock_hash'
gitiles_commit = mock_build.input.gitiles_commit
gitiles_commit.host = 'gitiles.host'
gitiles_commit.project = 'gitiles/project'
gitiles_commit.ref = 'refs/heads/mockmaster'
mock_build.builder.project = 'mock_luci_project'
mock_build.builder.bucket = 'mock_bucket'
mock_build.builder.builder = 'Linux Builder'
mock_headers = {'X-Prpc-Grpc-Code': '0'}
binary_data = mock_build.SerializeToString()
mock_post.return_value = (200, binary_data, mock_headers)
mock_get_build.return_value = mock_build
request_body = json.dumps({
'message': {
'attributes': {
'build_id': str(mock_build.id),
},
'data':
base64.b64encode(
json.dumps({
'build': {
'project': 'chromium',
'bucket': 'luci.chromium.ci',
'status': 'COMPLETED',
'parameters_json': '{"builder_name": "builder"}',
}
})),
},
})
response = self.test_app.post(
'/index-isolated-builds?format=json', params=request_body)
self.assertEqual(200, response.status_int)
self.assertNotIn('created_rows', response.body)
@mock.patch.object(completed_build_pubsub_ingestor,
'_HandlePossibleFailuresInBuild')
@mock.patch.object(completed_build_pubsub_ingestor,
'_HandlePossibleCodeCoverageBuild')
@mock.patch.object(buildbucket_client, 'GetV2Build')
@mock.patch.object(FinditHttpClient, 'Post')
def testSucessfulPushTryJob(self, mock_post, mock_get_build, *_):
mock_build = Build()
mock_build.id = 8945610992972640896
mock_build.status = 12
mock_build.input.properties['builder_group'] = 'luci.chromium.findit'
mock_build.input.properties['target_builder_group'] = 'chromium.linux'
mock_build.output.properties['buildername'] = ('findit_variable')
mock_build.output.properties['target_buildername'] = (
'linux_chromium_compile_dbg_ng')
mock_build.output.properties.get_or_create_struct(
'swarm_hashes_ref/heads/mockmaster(at){#123}_with_patch'
)['mock_target'] = 'mock_hash'
mock_build.output.properties.get_or_create_struct(
'swarm_hashes_ref/heads/mockmaster(at){#123}_without_patch'
)['mock_target'] = 'mock_hash_without'
mock_build.output.properties['repository'] = (
'https://test.googlesource.com/team/project.git')
mock_build.output.properties['gitiles_ref'] = 'refs/heads/mockmaster'
mock_change = mock_build.input.gerrit_changes.add()
mock_change.host = 'mock.gerrit.host'
mock_change.change = 12345
mock_change.patchset = 1
mock_build.builder.project = 'mock_luci_project'
mock_build.builder.bucket = 'mock_bucket'
mock_build.builder.builder = 'findit_variable'
mock_headers = {'X-Prpc-Grpc-Code': '0'}
binary_data = mock_build.SerializeToString()
mock_post.return_value = (200, binary_data, mock_headers)
mock_get_build.return_value = mock_build
request_body = json.dumps({
'message': {
'attributes': {
'build_id': str(mock_build.id),
},
'data':
base64.b64encode(
json.dumps({
'build': {
'project': 'chromium',
'bucket': 'luci.chromium.ci',
'status': 'COMPLETED',
'parameters_json': '{"builder_name": "builder"}',
}
})),
},
})
response = self.test_app.post(
'/index-isolated-builds?format=json', params=request_body)
self.assertEqual(200, response.status_int)
self.assertEqual(
123,
IsolatedTarget.get_by_id(
'8945610992972640896/mock_target').commit_position)
self.assertEqual(2, len(json.loads(response.body)['created_rows']))
# Ensure target values were used.
entry = IsolatedTarget.get_by_id('8945610992972640896/mock_target')
self.assertEqual('chromium.linux', entry.master_name)
self.assertEqual('linux_chromium_compile_dbg_ng', entry.builder_name)
@mock.patch.object(completed_build_pubsub_ingestor,
'_HandlePossibleFailuresInBuild')
@mock.patch.object(completed_build_pubsub_ingestor,
'_HandlePossibleCodeCoverageBuild')
@mock.patch.object(FinditHttpClient, 'Post')
def testPushIgnoreV2Push(self, mock_post, *_):
request_body = json.dumps({
'message': {
'attributes': {
'build_id': '123456',
'version': 'v2',
},
'data':
base64.b64encode(
json.dumps({
'build': {
'project': 'chromium',
'bucket': 'luci.chromium.ci',
'status': 'COMPLETED',
'parameters_json': '{"builder_name": "builder"}',
}
})),
},
})
response = self.test_app.post(
'/index-isolated-builds?format=json', params=request_body)
self.assertFalse(mock_post.called)
self.assertEqual(200, response.status_int)
| 40.654179
| 78
| 0.607712
| 1,352
| 14,107
| 6.075444
| 0.140533
| 0.060263
| 0.051132
| 0.061359
| 0.822255
| 0.807402
| 0.801558
| 0.780862
| 0.776114
| 0.768688
| 0
| 0.030753
| 0.278514
| 14,107
| 346
| 79
| 40.771676
| 0.776282
| 0.016162
| 0
| 0.757009
| 0
| 0
| 0.222086
| 0.090319
| 0
| 0
| 0
| 0
| 0.062305
| 1
| 0.024922
| false
| 0
| 0.034268
| 0
| 0.065421
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
407e66ad31400c201f52210276cc27484a563068
| 22,314
|
py
|
Python
|
google/ads/google_ads/v5/__init__.py
|
arammaliachi/google-ads-python
|
a4fe89567bd43eb784410523a6306b5d1dd9ee67
|
[
"Apache-2.0"
] | 1
|
2021-04-09T04:28:47.000Z
|
2021-04-09T04:28:47.000Z
|
google/ads/google_ads/v5/__init__.py
|
arammaliachi/google-ads-python
|
a4fe89567bd43eb784410523a6306b5d1dd9ee67
|
[
"Apache-2.0"
] | null | null | null |
google/ads/google_ads/v5/__init__.py
|
arammaliachi/google-ads-python
|
a4fe89567bd43eb784410523a6306b5d1dd9ee67
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import importlib
import sys
from google.ads.google_ads import util
if sys.version_info < (3, 6):
raise ImportError("This module requires Python 3.6 or later.")
_lazy_name_to_package_map = {
"account_budget_proposal_service_client": "google.ads.google_ads.v5.services",
"account_budget_service_client": "google.ads.google_ads.v5.services",
"account_link_service_client": "google.ads.google_ads.v5.services",
"ad_group_ad_asset_view_service_client": "google.ads.google_ads.v5.services",
"ad_group_ad_label_service_client": "google.ads.google_ads.v5.services",
"ad_group_ad_service_client": "google.ads.google_ads.v5.services",
"ad_group_audience_view_service_client": "google.ads.google_ads.v5.services",
"ad_group_bid_modifier_service_client": "google.ads.google_ads.v5.services",
"ad_group_criterion_label_service_client": "google.ads.google_ads.v5.services",
"ad_group_criterion_service_client": "google.ads.google_ads.v5.services",
"ad_group_criterion_simulation_service_client": "google.ads.google_ads.v5.services",
"ad_group_extension_setting_service_client": "google.ads.google_ads.v5.services",
"ad_group_feed_service_client": "google.ads.google_ads.v5.services",
"ad_group_label_service_client": "google.ads.google_ads.v5.services",
"ad_group_service_client": "google.ads.google_ads.v5.services",
"ad_group_simulation_service_client": "google.ads.google_ads.v5.services",
"ad_parameter_service_client": "google.ads.google_ads.v5.services",
"ad_schedule_view_service_client": "google.ads.google_ads.v5.services",
"ad_service_client": "google.ads.google_ads.v5.services",
"age_range_view_service_client": "google.ads.google_ads.v5.services",
"asset_service_client": "google.ads.google_ads.v5.services",
"batch_job_service_client": "google.ads.google_ads.v5.services",
"bidding_strategy_service_client": "google.ads.google_ads.v5.services",
"billing_setup_service_client": "google.ads.google_ads.v5.services",
"campaign_asset_service_client": "google.ads.google_ads.v5.services",
"campaign_audience_view_service_client": "google.ads.google_ads.v5.services",
"campaign_bid_modifier_service_client": "google.ads.google_ads.v5.services",
"campaign_budget_service_client": "google.ads.google_ads.v5.services",
"campaign_criterion_service_client": "google.ads.google_ads.v5.services",
"campaign_criterion_simulation_service_client": "google.ads.google_ads.v5.services",
"campaign_draft_service_client": "google.ads.google_ads.v5.services",
"campaign_experiment_service_client": "google.ads.google_ads.v5.services",
"campaign_extension_setting_service_client": "google.ads.google_ads.v5.services",
"campaign_feed_service_client": "google.ads.google_ads.v5.services",
"campaign_label_service_client": "google.ads.google_ads.v5.services",
"campaign_service_client": "google.ads.google_ads.v5.services",
"campaign_shared_set_service_client": "google.ads.google_ads.v5.services",
"carrier_constant_service_client": "google.ads.google_ads.v5.services",
"change_status_service_client": "google.ads.google_ads.v5.services",
"click_view_service_client": "google.ads.google_ads.v5.services",
"conversion_action_service_client": "google.ads.google_ads.v5.services",
"conversion_adjustment_upload_service_client": "google.ads.google_ads.v5.services",
"conversion_upload_service_client": "google.ads.google_ads.v5.services",
"currency_constant_service_client": "google.ads.google_ads.v5.services",
"custom_interest_service_client": "google.ads.google_ads.v5.services",
"customer_client_link_service_client": "google.ads.google_ads.v5.services",
"customer_client_service_client": "google.ads.google_ads.v5.services",
"customer_extension_setting_service_client": "google.ads.google_ads.v5.services",
"customer_feed_service_client": "google.ads.google_ads.v5.services",
"customer_label_service_client": "google.ads.google_ads.v5.services",
"customer_manager_link_service_client": "google.ads.google_ads.v5.services",
"customer_negative_criterion_service_client": "google.ads.google_ads.v5.services",
"customer_service_client": "google.ads.google_ads.v5.services",
"detail_placement_view_service_client": "google.ads.google_ads.v5.services",
"display_keyword_view_service_client": "google.ads.google_ads.v5.services",
"distance_view_service_client": "google.ads.google_ads.v5.services",
"domain_category_service_client": "google.ads.google_ads.v5.services",
"dynamic_search_ads_search_term_view_service_client": "google.ads.google_ads.v5.services",
"expanded_landing_page_view_service_client": "google.ads.google_ads.v5.services",
"extension_feed_item_service_client": "google.ads.google_ads.v5.services",
"feed_item_service_client": "google.ads.google_ads.v5.services",
"feed_item_target_service_client": "google.ads.google_ads.v5.services",
"feed_mapping_service_client": "google.ads.google_ads.v5.services",
"feed_placeholder_view_service_client": "google.ads.google_ads.v5.services",
"feed_service_client": "google.ads.google_ads.v5.services",
"gender_view_service_client": "google.ads.google_ads.v5.services",
"geo_target_constant_service_client": "google.ads.google_ads.v5.services",
"geographic_view_service_client": "google.ads.google_ads.v5.services",
"google_ads_field_service_client": "google.ads.google_ads.v5.services",
"google_ads_service_client": "google.ads.google_ads.v5.services",
"group_placement_view_service_client": "google.ads.google_ads.v5.services",
"hotel_group_view_service_client": "google.ads.google_ads.v5.services",
"hotel_performance_view_service_client": "google.ads.google_ads.v5.services",
"income_range_view_service_client": "google.ads.google_ads.v5.services",
"invoice_service_client": "google.ads.google_ads.v5.services",
"keyword_plan_ad_group_keyword_service_client": "google.ads.google_ads.v5.services",
"keyword_plan_ad_group_service_client": "google.ads.google_ads.v5.services",
"keyword_plan_campaign_keyword_service_client": "google.ads.google_ads.v5.services",
"keyword_plan_campaign_service_client": "google.ads.google_ads.v5.services",
"keyword_plan_idea_service_client": "google.ads.google_ads.v5.services",
"keyword_plan_service_client": "google.ads.google_ads.v5.services",
"keyword_view_service_client": "google.ads.google_ads.v5.services",
"label_service_client": "google.ads.google_ads.v5.services",
"landing_page_view_service_client": "google.ads.google_ads.v5.services",
"language_constant_service_client": "google.ads.google_ads.v5.services",
"location_view_service_client": "google.ads.google_ads.v5.services",
"managed_placement_view_service_client": "google.ads.google_ads.v5.services",
"media_file_service_client": "google.ads.google_ads.v5.services",
"merchant_center_link_service_client": "google.ads.google_ads.v5.services",
"mobile_app_category_constant_service_client": "google.ads.google_ads.v5.services",
"mobile_device_constant_service_client": "google.ads.google_ads.v5.services",
"offline_user_data_job_service_client": "google.ads.google_ads.v5.services",
"operating_system_version_constant_service_client": "google.ads.google_ads.v5.services",
"paid_organic_search_term_view_service_client": "google.ads.google_ads.v5.services",
"parental_status_view_service_client": "google.ads.google_ads.v5.services",
"payments_account_service_client": "google.ads.google_ads.v5.services",
"product_bidding_category_constant_service_client": "google.ads.google_ads.v5.services",
"product_group_view_service_client": "google.ads.google_ads.v5.services",
"reach_plan_service_client": "google.ads.google_ads.v5.services",
"recommendation_service_client": "google.ads.google_ads.v5.services",
"remarketing_action_service_client": "google.ads.google_ads.v5.services",
"search_term_view_service_client": "google.ads.google_ads.v5.services",
"shared_criterion_service_client": "google.ads.google_ads.v5.services",
"shared_set_service_client": "google.ads.google_ads.v5.services",
"shopping_performance_view_service_client": "google.ads.google_ads.v5.services",
"third_party_app_analytics_link_service_client": "google.ads.google_ads.v5.services",
"topic_constant_service_client": "google.ads.google_ads.v5.services",
"topic_view_service_client": "google.ads.google_ads.v5.services",
"user_data_service_client": "google.ads.google_ads.v5.services",
"user_interest_service_client": "google.ads.google_ads.v5.services",
"user_list_service_client": "google.ads.google_ads.v5.services",
"user_location_view_service_client": "google.ads.google_ads.v5.services",
"video_service_client": "google.ads.google_ads.v5.services",
"account_budget_proposal_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"account_budget_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"account_link_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"ad_group_ad_asset_view_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"ad_group_ad_label_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"ad_group_ad_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"ad_group_audience_view_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"ad_group_bid_modifier_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"ad_group_criterion_label_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"ad_group_criterion_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"ad_group_criterion_simulation_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"ad_group_extension_setting_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"ad_group_feed_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"ad_group_label_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"ad_group_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"ad_group_simulation_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"ad_parameter_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"ad_schedule_view_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"ad_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"age_range_view_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"asset_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"batch_job_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"bidding_strategy_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"billing_setup_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"campaign_asset_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"campaign_audience_view_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"campaign_bid_modifier_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"campaign_budget_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"campaign_criterion_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"campaign_criterion_simulation_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"campaign_draft_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"campaign_experiment_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"campaign_extension_setting_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"campaign_feed_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"campaign_label_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"campaign_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"campaign_shared_set_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"carrier_constant_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"change_status_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"click_view_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"conversion_action_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"conversion_adjustment_upload_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"conversion_upload_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"currency_constant_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"custom_interest_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"customer_client_link_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"customer_client_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"customer_extension_setting_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"customer_feed_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"customer_label_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"customer_manager_link_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"customer_negative_criterion_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"customer_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"detail_placement_view_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"display_keyword_view_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"distance_view_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"domain_category_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"dynamic_search_ads_search_term_view_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"expanded_landing_page_view_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"extension_feed_item_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"feed_item_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"feed_item_target_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"feed_mapping_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"feed_placeholder_view_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"feed_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"gender_view_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"geo_target_constant_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"geographic_view_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"google_ads_field_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"google_ads_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"group_placement_view_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"hotel_group_view_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"hotel_performance_view_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"income_range_view_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"invoice_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"keyword_plan_ad_group_keyword_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"keyword_plan_ad_group_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"keyword_plan_campaign_keyword_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"keyword_plan_campaign_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"keyword_plan_idea_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"keyword_plan_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"keyword_view_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"label_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"landing_page_view_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"language_constant_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"location_view_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"managed_placement_view_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"media_file_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"merchant_center_link_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"mobile_app_category_constant_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"mobile_device_constant_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"offline_user_data_job_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"operating_system_version_constant_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"paid_organic_search_term_view_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"parental_status_view_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"payments_account_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"product_bidding_category_constant_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"product_group_view_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"reach_plan_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"recommendation_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"remarketing_action_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"search_term_view_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"shared_criterion_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"shared_set_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"shopping_performance_view_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"third_party_app_analytics_link_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"topic_constant_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"topic_view_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"user_data_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"user_interest_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"user_list_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"user_location_view_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"video_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
}
# Background on how this behaves: https://www.python.org/dev/peps/pep-0562/
def __getattr__(name): # Requires Python >= 3.7
"""Lazily perform imports and class definitions on first demand."""
if name == "__all__":
converted = (
util.convert_snake_case_to_upper_case(key)
for key in _lazy_name_to_package_map
)
all_names = sorted(converted)
globals()["__all__"] = all_names
return all_names
elif name.endswith("Transport"):
module = __getattr__(util.convert_upper_case_to_snake_case(name))
sub_mod_class = getattr(module, name)
klass = type(name, (sub_mod_class,), {"__doc__": sub_mod_class.__doc__})
globals()[name] = klass
return klass
elif name.endswith("ServiceClient"):
module = __getattr__(util.convert_upper_case_to_snake_case(name))
enums = __getattr__("enums")
sub_mod_class = getattr(module, name)
klass = type(
name,
(sub_mod_class,),
{"__doc__": sub_mod_class.__doc__, "enums": enums},
)
globals()[name] = klass
return klass
elif name == "enums":
path = "google.ads.google_ads.v5.services.enums"
module = importlib.import_module(path)
globals()[name] = module
return module
elif name == "types":
path = "google.ads.google_ads.v5.types"
module = importlib.import_module(path)
globals()[name] = module
return module
elif name in _lazy_name_to_package_map:
module = importlib.import_module(
f"{_lazy_name_to_package_map[name]}.{name}"
)
globals()[name] = module
return module
else:
raise AttributeError(f"unknown sub-module {name!r}.")
def __dir__():
return globals().get("__all__") or __getattr__("__all__")
if not sys.version_info >= (3, 7):
from pep562 import Pep562
Pep562(__name__)
| 71.519231
| 113
| 0.791118
| 2,988
| 22,314
| 5.47925
| 0.079987
| 0.25397
| 0.209809
| 0.251771
| 0.886697
| 0.875764
| 0.865013
| 0.858783
| 0.841253
| 0.696433
| 0
| 0.012659
| 0.090168
| 22,314
| 311
| 114
| 71.749196
| 0.793764
| 0.031729
| 0
| 0.056738
| 0
| 0
| 0.793431
| 0.781896
| 0
| 0
| 0
| 0
| 0
| 1
| 0.007092
| false
| 0
| 0.028369
| 0.003546
| 0.060284
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 11
|
40c4517b7bccc080e6b7ec11639bdde005bb213a
| 739
|
py
|
Python
|
tests/test_config.py
|
savilard/flask-ecom-api
|
d94ee7873b9ec80645c05422e3355e8dc045ebeb
|
[
"MIT"
] | 1
|
2021-04-17T15:25:36.000Z
|
2021-04-17T15:25:36.000Z
|
tests/test_config.py
|
savilard/flask-ecom-api
|
d94ee7873b9ec80645c05422e3355e8dc045ebeb
|
[
"MIT"
] | null | null | null |
tests/test_config.py
|
savilard/flask-ecom-api
|
d94ee7873b9ec80645c05422e3355e8dc045ebeb
|
[
"MIT"
] | 1
|
2021-04-18T15:47:02.000Z
|
2021-04-18T15:47:02.000Z
|
import os
def test_development_config(test_app):
test_app.config.from_object('flask_ecom_api.config.DevelopmentConfig')
assert not test_app.config['TESTING']
assert test_app.config['SQLALCHEMY_DATABASE_URI'] == os.environ.get('DATABASE_URL')
def test_testing_config(test_app):
test_app.config.from_object('flask_ecom_api.config.TestingConfig')
assert test_app.config['TESTING']
assert test_app.config['SQLALCHEMY_DATABASE_URI'] == os.environ.get('DATABASE_TEST_URL')
def test_production_config(test_app):
test_app.config.from_object('flask_ecom_api.config.ProductionConfig')
assert not test_app.config['TESTING']
assert test_app.config['SQLALCHEMY_DATABASE_URI'] == os.environ.get('DATABASE_URL')
| 36.95
| 92
| 0.783491
| 104
| 739
| 5.211538
| 0.240385
| 0.154982
| 0.215867
| 0.140221
| 0.785978
| 0.785978
| 0.785978
| 0.785978
| 0.785978
| 0.785978
| 0
| 0
| 0.098782
| 739
| 19
| 93
| 38.894737
| 0.813814
| 0
| 0
| 0.307692
| 0
| 0
| 0.328823
| 0.244926
| 0
| 0
| 0
| 0
| 0.461538
| 1
| 0.230769
| false
| 0
| 0.076923
| 0
| 0.307692
| 0
| 0
| 0
| 0
| null | 0
| 1
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
90a4ede6bfdb471d923545a3e19b34b37a9df384
| 7,038
|
py
|
Python
|
parser/fase2/team28/models/Other/funcion.py
|
jossiebk/tytus
|
de6ce433d61609d4eaa5d0dbbd2ce13aaa573544
|
[
"MIT"
] | null | null | null |
parser/fase2/team28/models/Other/funcion.py
|
jossiebk/tytus
|
de6ce433d61609d4eaa5d0dbbd2ce13aaa573544
|
[
"MIT"
] | null | null | null |
parser/fase2/team28/models/Other/funcion.py
|
jossiebk/tytus
|
de6ce433d61609d4eaa5d0dbbd2ce13aaa573544
|
[
"MIT"
] | null | null | null |
from models.instructions.shared import Instruction
from models.Other.ambito import Ambito
from controllers.three_address_code import ThreeAddressCode
from controllers.procedures import Procedures
from models.instructions.Expression.expression import DATA_TYPE, PrimitiveData
class Parametro(Instruction):
def __init__(self, id, data_type, line, column):
self.id = id
self.data_type = data_type
self.line = line
self.column = column
self._tac = ''
def compile(self):
pass
def process(self, environment):
pass
def __repr__(self):
return str(vars(self))
class Funcion(Instruction):
def __init__(self, id, params, body, val_return, isNew, isCall, line, column):
self.id = id
self.params = params
self.body = body
self.val_return = val_return
self.isNew = isNew
self.isCall = isCall
self.environment = None
self.line = line
self.column = column
def __repr__(self):
return str(vars(self))
def process(self, environment):
pass
def compile(self, environment):
params = len(self.params)
temporal = None
if self.isNew:
self.environment = environment # TODO verificar
if Procedures().saveProcedure(self.id, self, self.line, self.column):
var_array = self.print(environment)
temporal = self.setVariables(var_array, environment)
else:
var_array = Procedures().getProcedure(self.id, params, self.line, self.column)
if var_array:
temporal = self.setVariables(var_array, environment)
fun = ThreeAddressCode().searchFunction(self.id)
if fun:
temporal = self.setVariables(fun['variables'], environment)
return temporal
#temp = ThreeAddressCode().newTemp()
def print(self, environment):
if ThreeAddressCode().searchFunction(self.id):
return None
ThreeAddressCode().newFunction(self.id)
newAmbito = Ambito(environment)
pos = 0
var_array = []
for var in self.params:
pos = ThreeAddressCode().stackCounter
var_array.append(newAmbito.addVar(var.id, var.data_type, None,
pos, var.line, var.column))
ThreeAddressCode().incStackCounter()
pos = ThreeAddressCode().stackCounter
#Generando etiqueta de salida para la funcion
lbl_exit = ThreeAddressCode().newLabel()
newAmbito.lbl_return = lbl_exit
#Agregando cuerpo de la funcion
self.body.compile(newAmbito)
# Agregando etiqueta de salida
ThreeAddressCode().addCode(f"label .{lbl_exit}")
# Imprime primera variable declarada, NO parametro
# ThreeAddressCode().addCode(f"print(Stack[{pos}])")
ThreeAddressCode().createFunction(self.id, self.params, var_array)
return var_array
def setVariables(self, var_array, environment):
if self.isCall:
value = 0
for index, var in enumerate(var_array):
value = self.params[index].compile(environment)
if isinstance(value, PrimitiveData):
if value.data_type == DATA_TYPE.STRING:
value.value = f"\'{value.value}\'"
ThreeAddressCode().addCode(f"Stack[{var.position}] = {value.value}")
temp = ThreeAddressCode().newTemp()
#Llamando a la funcion
ThreeAddressCode().addCode(f"{self.id}()")
#Obteniendo el valor de retorno de la funcion
ThreeAddressCode().addCode("#Obteniendo valor de retorno--------")
ThreeAddressCode().addCode(f"{temp} = Stack[P]")
return temp
return None
class DropFuncion(Instruction):
def __init__(self, id, params, line, column):
self.id = id
self.params = params
self.line = line
self.column = column
class ProcedimientoAlmacenado(Instruction):
def __init__(self, id, params, body, isNew, isCall, line, column):
self.id = id
self.params = params
self.body = body
self.isNew = isNew
self.isCall = isCall
self.environment = None
self.line = line
self.column = column
def __repr__(self):
return str(vars(self))
def process(self, environment):
pass
def compile(self, environment):
params = len(self.params)
if self.isNew:
self.environment = environment # TODO verificar
if Procedures().saveProcedure(self.id, self, self.line, self.column):
var_array = self.print(environment)
self.setVariables(var_array, environment)
else:
var_array = Procedures().getProcedure(self.id, params, self.line, self.column)
if var_array:
self.setVariables(var_array, environment)
fun = ThreeAddressCode().searchFunction(self.id)
if fun:
self.setVariables(fun['variables'], environment)
#temp = ThreeAddressCode().newTemp()
def print(self, environment):
if ThreeAddressCode().searchFunction(self.id):
return None
ThreeAddressCode().newFunction(self.id)
newAmbito = Ambito(environment)
pos = 0
var_array = []
for var in self.params:
pos = ThreeAddressCode().stackCounter
var_array.append(newAmbito.addVar(var.id, var.data_type, None,
pos, var.line, var.column))
ThreeAddressCode().incStackCounter()
pos = ThreeAddressCode().stackCounter
#Generando etiqueta de salida para la funcion
lbl_exit = ThreeAddressCode().newLabel()
newAmbito.lbl_return = lbl_exit
#Agregando cuerpo de la funcion
self.body.compile(newAmbito)
# Agregando etiqueta de salida
ThreeAddressCode().addCode(f"label .{lbl_exit}")
# Imprime primera variable declarada, NO parametro
ThreeAddressCode().addCode(f"print(Stack[{pos}])")
ThreeAddressCode().createFunction(self.id, self.params, var_array)
return var_array
def setVariables(self, var_array, environment):
if self.isCall:
value = 0
for index, var in enumerate(var_array):
value = self.params[index].compile(environment)
if isinstance(value, PrimitiveData):
if value.data_type == DATA_TYPE.STRING:
value.value = f"\'{value.value}\'"
ThreeAddressCode().addCode(f"Stack[{var.position}] = {value.value}")
#Llamando a la funcion
ThreeAddressCode().addCode(f"{self.id}()")
#Una procedimiento almacenado NO devuelve nada
| 34.331707
| 90
| 0.596903
| 717
| 7,038
| 5.755927
| 0.153417
| 0.031984
| 0.052338
| 0.021323
| 0.848316
| 0.823601
| 0.787497
| 0.764236
| 0.764236
| 0.731766
| 0
| 0.00082
| 0.306763
| 7,038
| 205
| 91
| 34.331707
| 0.84505
| 0.08312
| 0
| 0.823129
| 0
| 0
| 0.039466
| 0.006526
| 0
| 0
| 0
| 0.004878
| 0
| 1
| 0.115646
| false
| 0.027211
| 0.034014
| 0.020408
| 0.244898
| 0.034014
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
90d6fa60d16e379bff07b720da304a90340377ce
| 1,135
|
py
|
Python
|
safe_control_gym/controllers/__init__.py
|
gokhanalcan/safe-control-gym
|
e9086e102663a60a66f2cc9c8cd7610888744056
|
[
"MIT"
] | null | null | null |
safe_control_gym/controllers/__init__.py
|
gokhanalcan/safe-control-gym
|
e9086e102663a60a66f2cc9c8cd7610888744056
|
[
"MIT"
] | null | null | null |
safe_control_gym/controllers/__init__.py
|
gokhanalcan/safe-control-gym
|
e9086e102663a60a66f2cc9c8cd7610888744056
|
[
"MIT"
] | null | null | null |
"""Register controllers.
"""
from safe_control_gym.utils.registration import register
register(id="mpc",
entry_point="safe_control_gym.controllers.mpc.mpc:MPC",
config_entry_point="safe_control_gym.controllers.mpc:mpc.yaml")
register(id="linear_mpc",
entry_point="safe_control_gym.controllers.mpc.linear_mpc:LinearMPC",
config_entry_point="safe_control_gym.controllers.mpc:linear_mpc.yaml")
register(id="gp_mpc",
entry_point="safe_control_gym.controllers.mpc.gp_mpc:GPMPC",
config_entry_point="safe_control_gym.controllers.mpc:gp_mpc.yaml")
register(id="mpsc",
entry_point="safe_control_gym.controllers.mpsc.mpsc:MPSC",
config_entry_point="safe_control_gym.controllers.mpsc:mpsc.yaml")
register(id="ppo",
entry_point="safe_control_gym.controllers.ppo.ppo:PPO",
config_entry_point="safe_control_gym.controllers.ppo:ppo.yaml")
register(id="safe_explorer_ppo",
entry_point="safe_control_gym.controllers.safe_explorer.safe_ppo:SafeExplorerPPO",
config_entry_point="safe_control_gym.controllers.safe_explorer:safe_ppo.yaml")
| 39.137931
| 91
| 0.757709
| 154
| 1,135
| 5.220779
| 0.149351
| 0.177861
| 0.226368
| 0.313433
| 0.732587
| 0.732587
| 0.732587
| 0.702736
| 0.358209
| 0.134328
| 0
| 0
| 0.123348
| 1,135
| 28
| 92
| 40.535714
| 0.80804
| 0.018502
| 0
| 0
| 0
| 0
| 0.546112
| 0.507233
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.052632
| 0
| 0.052632
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
90fa1b52a86892da98479fc272386682615fa765
| 17,478
|
py
|
Python
|
Termux-pkg-apt.py
|
Hironotori/Termux-pkg-apt
|
db1c33b750e82943c8c5b2780d69654ab4afde96
|
[
"BSL-1.0"
] | 1
|
2021-04-12T18:33:25.000Z
|
2021-04-12T18:33:25.000Z
|
Termux-pkg-apt.py
|
Hironotori/Termux-pkg-apt
|
db1c33b750e82943c8c5b2780d69654ab4afde96
|
[
"BSL-1.0"
] | null | null | null |
Termux-pkg-apt.py
|
Hironotori/Termux-pkg-apt
|
db1c33b750e82943c8c5b2780d69654ab4afde96
|
[
"BSL-1.0"
] | 1
|
2021-10-17T00:44:37.000Z
|
2021-10-17T00:44:37.000Z
|
#!/usr/bin/python3
import os
import time
import sys
os.system("clear")
print('''\033[91m
CREATED BY Hironotori
''')
def slowprint(s):
for c in s + '\n' :
sys.stdout.write(c)
sys.stdout.flush()
slowprint(''' \033[93m
[1] apt-pkg pip-pip3 [2] apt-pkg python
[3] apt-pkg python2 [4] apt-pkg bash
[5] apt-pkg git [6] apt-pkg perl
[7] apt-pkg nano [8] apt-pkg curl
[9] apt-pkg openssl [10] apt-pkg openssh
[11] apt-pkg wget [12] apt-pkg clang
[13] apt-pkg nmap [14] apt-pkg w3m
[15] apt-pkg ruby [16] apt-pkg dnsutils
[17] apt-pkg coreutils [18] apt-pkg fish.
[19] apt-pkg zip [20] apt-pkg figlet.
[21] apt-pkg cowsay [22] apt-pkg unzip.
[23] apt-pkg vim [24] apt-pkg wcalc.
[25] apt-pkg bmon [26] apt-pkg unrar.
[27] apt-pkg proot [28] apt-pkg golang.
[29] apt-pkg tsu [30] apt-pkg tor.
[31] apt-pkg php
[00] Установить все Вместе [0] Выход''')
print (" ")
choice = input("\033[93mВыберите пункт : ")
if choice == '0' : sys.exit()
if choice == '1' : os.system ("apt upgrade -y")
os.system ("pkg install")
os.system ("pkg upgrade")
os.system ("apt install")
os.system ("apt upgrade")
os.system ("apt update")
os.system ("pkg update")
os.system("python -m pip install --upgrade pip")
os.system ("pip3 install --upgrade setuptools pip")
os.system ("termux-setup-storage")
sys.exit ()
if choice == '2' : os.system ("apt upgrade -y")
os.system ("pkg install")
os.system ("pkg upgrade")
os.system ("apt install")
os.system ("apt upgrade")
os.system ("apt update")
os.system ("pkg update")
os.system ("pkg install python -y")
os.system ("pkg upgrade python -y")
os.system ("apt install python -y")
os.system ("apt upgrade python -y")
os.system ("termux-setup-storage")
sys.exit ()
if choice == '3' : os.system ("apt upgrade -y")
os.system ("pkg install")
os.system ("pkg upgrade")
os.system ("apt install")
os.system ("apt upgrade")
os.system ("apt update")
os.system ("pkg update")
os.system ("pkg install python2 -y")
os.system ("pkg upgrade python2 -y")
os.system ("apt install python2 -y")
os.system ("apt upgrade python2 -y")
os.system ("termux-setup-storage")
sys.exit ()
if choice == '4' : os.system ("apt upgrade -y")
os.system ("pkg install")
os.system ("pkg upgrade")
os.system ("apt install")
os.system ("apt upgrade")
os.system ("apt update")
os.system ("pkg update")
os.system ("pkg install bash")
os.system ("apt install bash")
os.system ("pkg upgrade bash")
os.system ("apt upgrade bash")
os.system ("termux-setup-storage")
sys.exit ()
if choice == '5' : os.system ("apt upgrade -y")
os.system ("pkg install")
os.system ("pkg upgrade")
os.system ("apt install")
os.system ("apt upgrade")
os.system ("apt update")
os.system ("pkg update")
os.system ("apt install git -y")
os.system ("pkg install git -y")
os.system ("pkg upgrade git -y")
os.system ("apt upgrade git -y")
os.system ("termux-setup-storage")
sys.exit ()
if choice == '6' : os.system ("apt upgrade -y")
os.system ("pkg install")
os.system ("pkg upgrade")
os.system ("apt install")
os.system ("apt upgrade")
os.system ("apt update")
os.system ("pkg update")
os.system ("pkg install perl -y")
os.system ("apt install perl -y")
os.system ("pkg upgrade perl -y")
os.system ("apt upgrade perl -y")
os.system ("termux-setup-storage")
sys.exit ()
if choice == '7' : os.system ("apt upgrade -y")
os.system ("pkg install")
os.system ("pkg upgrade")
os.system ("apt install")
os.system ("apt upgrade")
os.system ("apt update")
os.system ("pkg update")
os.system ("pkg install nano -y")
os.system ("apt install nano -y")
os.system ("pkg upgrade nano -y")
os.system ("apt upgrade nano -y")
os.system ("termux-setup-storage")
sys.exit ()
if choice == '8' : os.system ("apt upgrade -y")
os.system ("pkg install")
os.system ("pkg upgrade")
os.system ("apt install")
os.system ("apt upgrade")
os.system ("apt update")
os.system ("pkg update")
os.system ("pkg install curl -y")
os.system ("apt install curl -y")
os.system ("pkg upgrade curl -y")
os.system ("apt upgrade curl -y")
os.system ("termux-setup-storage")
sys.exit ()
if choice == '9' : os.system ("apt upgrade -y")
os.system ("pkg install")
os.system ("pkg upgrade")
os.system ("apt install")
os.system ("apt upgrate")
os.system ("apt update")
os.system ("pkg update")
os.system ("pkg install openssl -y")
os.system ("apt install openssl -y")
os.system ("pkg upgrade openssl -y")
os.system ("apt upgrade openssl -y")
os.system ("termux-setup-storage")
sys.exit ()
if choice == '10' : os.system ("apt upgrade -y")
os.system ("pkg install")
os.system ("pkg upgrade")
os.system ("apt install")
os.system ("apt upgrate")
os.system ("apt update")
os.system ("pkg update")
os.system ("pkg install openssh -y")
os.system ("apt install openssh -y")
os.system ("pkg upgrade openssh -y")
os.system ("apt upgrade openssh -y")
os.system ("termux-setup-storage")
sys.exit ()
if choice == '11' : os.system ("apt upgrade -y")
os.system ("pkg install")
os.system ("pkg upgrade")
os.system ("apt install")
os.system ("apt upgrate")
os.system ("apt update")
os.system ("pkg update")
os.system ("pkg install wget -y")
os.system ("apt install wget -y")
os.system ("pkg upgrade wget -y")
os.system ("apt upgrade wget -y")
os.system ("termux-setup-storage")
sys.exit ()
if choice == '12' : os.system ("apt upgrade -y")
os.system ("pkg install")
os.system ("pkg upgrade")
os.system ("apt install")
os.system ("apt upgrate")
os.system ("apt update")
os.system ("pkg update")
os.system ("pkg install clang -y")
os.system ("apt install clang -y")
os.system ("pkg upgrade clang -y")
os.system ("apt upgrade clang -y")
os.system ("termux-setup-storage")
sys.exit ()
if choice == '13' : os.system ("apt upgrade -y")
os.system ("pkg install")
os.system ("pkg upgrade")
os.system ("apt install")
os.system ("apt upgrate")
os.system ("apt update")
os.system ("pkg update")
os.system ("pkg install nmap -y")
os.system ("apt install nmap -y")
os.system ("pkg upgrade nmap -y")
os.system ("apt upgrade nmap -y")
os.system ("termux-setup-storage")
sys.exit ()
if choice == '14' : os.system ("apt upgrade -y")
os.system ("pkg install")
os.system ("pkg upgrade")
os.system ("apt install")
os.system ("apt upgrate")
os.system ("apt update")
os.system ("pkg update")
os.system ("pkg install w3m -y")
os.system ("apt install w3m -y")
os.system ("pkg upgrade w3m -y")
os.system ("apt upgrade w3m -y")
os.system ("termux-setup-storage")
sys.exit ()
if choice == '15' : os.system ("apt upgrade -y")
os.system ("pkg install")
os.system ("pkg upgrade")
os.system ("apt install")
os.system ("apt upgrate")
os.system ("apt update")
os.system ("pkg update")
os.system ("pkg install ruby -y")
os.system ("apt install ruby -y")
os.system ("pkg upgrade ruby -y")
os.system ("apt upgrade ruby -y")
os.system ("termux-setup-storage")
sys.exit ()
if choice == '16' : os.system ("apt upgrade -y")
os.system ("pkg install")
os.system ("pkg upgrade")
os.system ("apt install")
os.system ("apt upgrate")
os.system ("apt update")
os.system ("pkg update")
os.system ("pkg install dnsutils -y")
os.system ("apt install dnsutils -y")
os.system ("pkg upgrade dnsutils -y")
os.system ("apt upgrade dnsutils -y")
os.system ("termux-setup-storage")
sys.exit ()
if choice == '17' : os.system ("apt upgrade -y")
os.system ("pkg install")
os.system ("pkg upgrade")
os.system ("apt install")
os.system ("apt upgrate")
os.system ("apt update")
os.system ("pkg update")
os.system ("pkg install coreutils -y")
os.system ("apt install coreutils -y")
os.system ("pkg upgrade coreutils -y")
os.system ("apt upgrade coreutils -y")
os.system ("termux-setup-storage")
sys.exit ()
if choice == '18' : os.system ("apt upgrade -y")
os.system ("pkg install")
os.system ("pkg upgrade")
os.system ("apt install")
os.system ("apt upgrate")
os.system ("apt update")
os.system ("pkg update")
os.system ("pkg install fish -y")
os.system ("apt install fish -y")
os.system ("pkg upgrade fish -y")
os.system ("apt upgrade fish -y")
os.system ("termux-setup-storage")
sys.exit ()
if choice == '19' : os.system ("apt upgrade -y")
os.system ("pkg install")
os.system ("pkg upgrade")
os.system ("apt install")
os.system ("apt upgrate")
os.system ("apt update")
os.system ("pkg update")
os.system ("pkg install zip -y")
os.system ("apt install zip -y")
os.system ("pkg upgrade zip -y")
os.system ("apt upgrade zip -y")
os.system ("termux-setup-storage")
sys.exit ()
if choice == '20' : os.system ("apt upgrade -y")
os.system ("pkg install")
os.system ("pkg upgrade")
os.system ("apt install")
os.system ("apt upgrate")
os.system ("apt update")
os.system ("pkg update")
os.system ("pkg install figlet -y")
os.system ("apt install figlet -y")
os.system ("pkg upgrade figlet -y")
os.system ("apt upgrade figlet -y")
os.system ("termux-setup-storage")
sys.exit ()
if choice == '21' : os.system ("apt upgrade -y")
os.system ("pkg install")
os.system ("pkg upgrade")
os.system ("apt install")
os.system ("apt upgrate")
os.system ("apt update")
os.system ("pkg update")
os.system ("pkg install cowsay -y")
os.system ("apt install cowsay -y")
os.system ("pkg upgrade cowsay -y")
os.system ("apt upgrade cowsay -y")
os.system ("termux-setup-storage")
sys.exit ()
if choice == '22' : os.system ("apt upgrade -y")
os.system ("pkg install")
os.system ("pkg upgrade")
os.system ("apt install")
os.system ("apt upgrate")
os.system ("apt update")
os.system ("pkg update")
os.system ("pkg install unzip -y")
os.system ("apt install unzip -y")
os.system ("pkg upgrade unzip -y")
os.system ("apt upgrade unzip -y")
os.system ("termux-setup-storage")
sys.exit ()
if choice == '23' : os.system ("apt upgrade -y")
os.system ("pkg install")
os.system ("pkg upgrade")
os.system ("apt install")
os.system ("apt upgrate")
os.system ("apt update")
os.system ("pkg update")
os.system ("pkg install vim -y")
os.system ("apt install vim -y")
os.system ("pkg upgrade vim -y")
os.system ("apt upgrade vim -y")
os.system ("termux-setup-storage")
sys.exit ()
if choice == '24' : os.system ("apt upgrade -y")
os.system ("pkg install")
os.system ("pkg upgrade")
os.system ("apt install")
os.system ("apt upgrate")
os.system ("apt update")
os.system ("pkg update")
os.system ("pkg install wcalc -y")
os.system ("apt install wcalc -y")
os.system ("pkg upgrade wcalc -y")
os.system ("apt upgrade wcalc -y")
os.system ("termux-setup-storage")
sys.exit ()
if choice == '25' : os.system ("apt upgrade -y")
os.system ("pkg install")
os.system ("pkg upgrade")
os.system ("apt install")
os.system ("apt upgrate")
os.system ("apt update")
os.system ("pkg update")
os.system ("pkg install bmon -y")
os.system ("apt install bmon -y")
os.system ("pkg upgrade bmon -y")
os.system ("apt upgrade bmon -y")
os.system ("termux-setup-storage")
sys.exit ()
if choice == '26' : os.system ("apt upgrade -y")
os.system ("pkg install")
os.system ("pkg upgrade")
os.system ("apt install")
os.system ("apt upgrate")
os.system ("apt update")
os.system ("pkg update")
os.system ("pkg install unrar -y")
os.system ("apt install unrar -y")
os.system ("pkg upgrade unrar -y")
os.system ("apt upgrade unrar -y")
os.system ("termux-setup-storage")
sys.exit ()
if choice == '27' : os.system ("apt upgrade -y")
os.system ("pkg install")
os.system ("pkg upgrade")
os.system ("apt install")
os.system ("apt upgrate")
os.system ("apt update")
os.system ("pkg update")
os.system ("pkg install proot -y")
os.system ("apt install proot -y")
os.system ("pkg upgrade proot -y")
os.system ("apt upgrade proot -y")
os.system ("termux-setup-storage")
sys.exit ()
if choice == '28' : os.system ("apt upgrade -y")
os.system ("pkg install")
os.system ("pkg upgrade")
os.system ("apt install")
os.system ("apt upgrate")
os.system ("apt update")
os.system ("pkg update")
os.system ("pkg install golang -y")
os.system ("apt install golang -y")
os.system ("pkg upgrade golang -y")
os.system ("apt upgrade golang -y")
os.system ("termux-setup-storage")
sys.exit ()
if choice == '29' : os.system ("apt upgrade -y")
os.system ("pkg install")
os.system ("pkg upgrade")
os.system ("apt install")
os.system ("apt upgrate")
os.system ("apt update")
os.system ("pkg update")
os.system("pkg install tsu-y")
os.system ("apt install tsu -y")
os.system ("pkg upgrade tsu -y")
os.system ("apt upgrade tsu -y")
os.system ("termux-setup-storage")
sys.exit ()
if choice == '30' : os.system ("apt upgrade -y")
os.system ("pkg install")
os.system ("pkg upgrade")
os.system ("apt install")
os.system ("apt upgrate")
os.system ("apt update")
os.system ("pkg update")
os.system ("pkg install tor")
os.system ("termux-setup-storage")
sys.exit ()
if choice == '31' : os.system ("apt upgrade -y")
os.system ("pkg install")
os.system ("pkg upgrade")
os.system ("apt install")
os.system ("apt upgrate")
os.system ("apt update")
os.system ("pkg update")
os.system ("pkg install php -y")
os.system ("pkg upgrade php -y")
os.system ("apt install php -y")
os.system ("apt upgrade php -y")
os.system ("termux-setup-storage")
sys.exit ()
if choice == '00' : os.system ("apt upgrade -y")
os.system ("pkg install")
os.system ("pkg upgrade")
os.system ("apt install")
os.system ("apt upgrate")
os.system ("apt update")
os.system ("pkg update")
os.system("python -m pip install --upgrade pip")
os.system ("pip3 install --upgrade setuptools pip")
os.system ("pkg install python -y")
os.system ("pkg upgrade python -y")
os.system ("apt install python -y")
os.system ("apt upgrade python -y")
os.system ("pkg install python2 -y")
os.system ("pkg upgrade python2 -y")
os.system ("apt install python2 -y")
os.system ("apt upgrade python2 -y")
os.system ("pkg install php -y")
os.system ("pkg upgrade php -y")
os.system ("apt install php -y")
os.system ("apt upgrade php -y")
os.system ("pkg install bash")
os.system ("apt install bash")
os.system ("pkg upgrade bash")
os.system ("apt upgrade bash")
os.system ("apt install git -y")
os.system ("pkg install git -y")
os.system ("pkg upgrade git -y")
os.system ("apt upgrade git -y")
os.system ("pkg install perl -y")
os.system ("apt install perl -y")
os.system ("pkg upgrade perl -y")
os.system ("apt upgrade perl -y")
os.system ("pkg install nano -y")
os.system ("apt install nano -y")
os.system ("pkg upgrade nano -y")
os.system ("apt upgrade nano -y")
os.system ("pkg install curl -y")
os.system ("apt install curl -y")
os.system ("pkg upgrade curl -y")
os.system ("apt upgrade curl -y")
os.system ("pkg install openssl -y")
os.system ("apt install openssl -y")
os.system ("pkg upgrade openssl -y")
os.system ("apt upgrade openssl -y")
os.system ("pkg install openssh -y")
os.system ("apt install openssh -y")
os.system ("pkg upgrade openssh -y")
os.system ("apt upgrade openssh -y")
os.system ("pkg install wget -y")
os.system ("apt install wget -y")
os.system ("pkg upgrade wget -y")
os.system ("apt upgrade wget -y")
os.system ("pkg install clang -y")
os.system ("apt install clang -y")
os.system ("pkg upgrade clang -y")
os.system ("apt upgrade clang -y")
os.system ("pkg install nmap -y")
os.system ("apt install nmap -y")
os.system ("pkg upgrade nmap -y")
os.system ("apt upgrade nmap -y")
os.system ("pkg install w3m -y")
os.system ("apt install w3m -y")
os.system ("pkg upgrade w3m -y")
os.system ("apt upgrade w3m -y")
os.system ("pkg install ruby -y")
os.system ("apt install ruby -y")
os.system ("pkg upgrade ruby -y")
os.system ("apt upgrade ruby -y")
os.system ("pkg install dnsutils -y")
os.system ("apt install dnsutils -y")
os.system ("pkg upgrade dnsutils -y")
os.system ("apt upgrade dnsutils -y")
os.system ("pkg install coreutils -y")
os.system ("apt install coreutils -y")
os.system ("pkg upgrade coreutils -y")
os.system ("apt upgrade coreutils -y")
os.system ("pkg install fish -y")
os.system ("apt install fish -y")
os.system ("pkg upgrade fish -y")
os.system ("apt upgrade fish -y")
os.system ("pkg install zip -y")
os.system ("apt install zip -y")
os.system ("pkg upgrade zip -y")
os.system ("apt upgrade zip -y")
os.system ("pkg install figlet -y")
os.system ("apt install figlet -y")
os.system ("pkg upgrade figlet -y")
os.system ("apt upgrade figlet -y")
os.system ("pkg install cowsay -y")
os.system ("apt install cowsay -y")
os.system ("pkg upgrade cowsay -y")
os.system ("apt upgrade cowsay -y")
os.system ("pkg install unzip -y")
os.system ("apt install unzip -y")
os.system ("pkg upgrade unzip -y")
os.system ("apt upgrade unzip -y")
os.system ("pkg install vim -y")
os.system ("apt install vim -y")
os.system ("pkg upgrade vim -y")
os.system ("apt upgrade vim -y")
os.system ("pkg install wcalc -y")
os.system ("apt install wcalc -y")
os.system ("pkg upgrade wcalc -y")
os.system ("apt upgrade wcalc -y")
os.system ("pkg install bmon -y")
os.system ("apt install bmon -y")
os.system ("pkg upgrade bmon -y")
os.system ("apt upgrade bmon -y")
os.system ("pkg install unrar -y")
os.system ("apt install unrar -y")
os.system ("pkg upgrade unrar -y")
os.system ("apt upgrade unrar -y")
os.system ("pkg install proot -y")
os.system ("apt install proot -y")
os.system ("pkg upgrade proot -y")
os.system ("apt upgrade proot -y")
os.system ("pkg install golang -y")
os.system ("apt install golang -y")
os.system ("pkg upgrade golang -y")
os.system ("apt upgrade golang -y")
os.system("pkg install tsu-y")
os.system ("apt install tsu -y")
os.system ("pkg upgrade tsu -y")
os.system ("apt upgrade tsu -y")
os.system ("pkg install tor")
os.system ("termux-setup-storage")
sys.exit ()
| 31.099644
| 54
| 0.66947
| 2,850
| 17,478
| 4.105614
| 0.039649
| 0.338433
| 0.196906
| 0.121015
| 0.941971
| 0.941971
| 0.941971
| 0.941971
| 0.941971
| 0.935647
| 0
| 0.009981
| 0.145898
| 17,478
| 562
| 55
| 31.099644
| 0.773848
| 0.000973
| 0
| 0.885305
| 0
| 0
| 0.507302
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.001792
| false
| 0
| 0.005376
| 0
| 0.007168
| 0.007168
| 0
| 0
| 0
| null | 1
| 1
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 9
|
d3d3a5b087e35b140a4cca72077a3d96a9f4d93b
| 42,865
|
py
|
Python
|
grafana/common/dashboards/aggregated/client_subnet_statistics_detail.py
|
MikeAT/visualizer
|
946b98d82eaf7ec508861115585afd683fc49e5c
|
[
"MIT"
] | 6
|
2021-03-03T17:52:24.000Z
|
2022-02-10T11:45:22.000Z
|
grafana/common/dashboards/aggregated/client_subnet_statistics_detail.py
|
Acidburn0zzz/visualizer
|
20fba91f0d26b98531f97f643c8329640d1c0d11
|
[
"MIT"
] | 1
|
2021-04-29T12:34:04.000Z
|
2021-04-29T14:50:17.000Z
|
grafana/common/dashboards/aggregated/client_subnet_statistics_detail.py
|
Acidburn0zzz/visualizer
|
20fba91f0d26b98531f97f643c8329640d1c0d11
|
[
"MIT"
] | 2
|
2021-04-27T14:02:03.000Z
|
2021-11-12T10:34:32.000Z
|
# Copyright 2021 Internet Corporation for Assigned Names and Numbers.
#
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, you can obtain one at https://mozilla.org/MPL/2.0/.
#
# Developed by Sinodun IT (sinodun.com)
#
# Aggregation client subnet statistics
import textwrap
import grafanalib.core as GCore
import grafanacommon as GCommon
def query_classification_chart(chart_title, yaxis_label, prefix_field, agginfo, nodesel):
return GCommon.BarChart(
title = chart_title,
orientation = GCommon.BAR_CHART_ORIENTATION_HORIZONTAL,
layout = GCommon.BarChartLayout(
barmode = GCommon.BAR_CHART_LAYOUT_MODE_STACK,
showlegend = True,
xaxis = GCommon.BarChartAxis(
title = 'Queries per second',
),
yaxis = GCommon.BarChartAxis(
autotick = False,
axtype = GCommon.BAR_CHART_AXIS_TYPE_CATEGORY,
tickmargin = 110,
title = yaxis_label,
),
),
traces = [
GCommon.BarChartTrace(
name = 'AForA',
x = 'AForA',
y = 'AForAPrefix',
text = 'AForA',
),
GCommon.BarChartTrace(
name = 'AForRoot',
x = 'AForRoot',
y = 'AForRootPrefix',
text = 'AForRoot',
),
GCommon.BarChartTrace(
name = 'FunnyQueryClass',
x = 'FunnyQueryClass',
y = 'FunnyQueryClassPrefix',
text = 'FunnyQueryClass',
),
GCommon.BarChartTrace(
name = 'FunnyQueryType',
x = 'FunnyQueryType',
y = 'FunnyQueryTypePrefix',
text = 'FunnyQueryType',
),
GCommon.BarChartTrace(
name = 'Localhost',
x = 'Localhost',
y = 'LocalhostPrefix',
text = 'Localhost',
),
GCommon.BarChartTrace(
name = 'NonAuthTld',
x = 'NonAuthTld',
y = 'NonAuthTldPrefix',
text = 'NonAuthTld',
),
GCommon.BarChartTrace(
name = 'Ok',
x = 'Ok',
y = 'OkPrefix',
text = 'Ok',
),
GCommon.BarChartTrace(
name = 'RFC1918Ptr',
x = 'RFC1918Ptr',
y = 'RFC1918PtrPrefix',
text = 'RFC1918Ptr',
),
GCommon.BarChartTrace(
name = 'RootServersNet',
x = 'RootServersNet',
y = 'RootServersNetPrefix',
text = 'RootServersNet',
),
GCommon.BarChartTrace(
name = 'SrcPortZero',
x = 'SrcPortZero',
y = 'SrcPortZeroPrefix',
text = 'SrcPortZero',
),
],
targets = [
GCommon.ClickHouseTableTarget(
database = agginfo['database'],
table = 'QueryClassifications' + agginfo['table_suffix'],
round = agginfo['round'],
query = textwrap.dedent("""\
SELECT
Prefix AS AForAPrefix,
AForA,
Count
FROM
(
SELECT
{prefix_field} AS Prefix,
sum(Count) AS Count,
sum(AForACount)/($to - $from) AS AForA
FROM $table
WHERE $timeFilter
AND NodeID IN {nodesel}
GROUP BY Prefix
ORDER BY Count DESC
LIMIT 40
)
ORDER BY Count ASC
""".format(
prefix_field=prefix_field,
nodesel=nodesel,
nodeinfo_database=agginfo['nodeinfo_database'])),
refId = 'A'
),
GCommon.ClickHouseTableTarget(
database = agginfo['database'],
table = 'QueryClassifications' + agginfo['table_suffix'],
round = agginfo['round'],
query = textwrap.dedent("""\
SELECT
Prefix AS AForRootPrefix,
AForRoot,
Count
FROM
(
SELECT
{prefix_field} AS Prefix,
sum(Count) AS Count,
sum(AForRootCount)/($to - $from) AS AForRoot
FROM $table
WHERE $timeFilter
AND NodeID IN {nodesel}
GROUP BY Prefix
ORDER BY Count DESC
LIMIT 40
)
ORDER BY Count ASC
""".format(
prefix_field=prefix_field,
nodesel=nodesel,
nodeinfo_database=agginfo['nodeinfo_database'])),
refId = 'B'
),
GCommon.ClickHouseTableTarget(
database = agginfo['database'],
table = 'QueryClassifications' + agginfo['table_suffix'],
round = agginfo['round'],
query = textwrap.dedent("""\
SELECT
Prefix AS FunnyQueryClassPrefix,
FunnyQueryClass,
Count
FROM
(
SELECT
{prefix_field} AS Prefix,
sum(Count) AS Count,
sum(FunnyQueryClassCount)/($to - $from) AS FunnyQueryClass
FROM $table
WHERE $timeFilter
AND NodeID IN {nodesel}
GROUP BY Prefix
ORDER BY Count DESC
LIMIT 40
)
ORDER BY Count ASC
""".format(
prefix_field=prefix_field,
nodesel=nodesel,
nodeinfo_database=agginfo['nodeinfo_database'])),
refId = 'C'
),
GCommon.ClickHouseTableTarget(
database = agginfo['database'],
table = 'QueryClassifications' + agginfo['table_suffix'],
round = agginfo['round'],
query = textwrap.dedent("""\
SELECT
Prefix AS FunnyQueryTypePrefix,
FunnyQueryType,
Count
FROM
(
SELECT
{prefix_field} AS Prefix,
sum(Count) AS Count,
sum(FunnyQueryTypeCount)/($to - $from) AS FunnyQueryType
FROM $table
WHERE $timeFilter
AND NodeID IN {nodesel}
GROUP BY Prefix
ORDER BY Count DESC
LIMIT 40
)
ORDER BY Count DESC
""".format(
prefix_field=prefix_field,
nodesel=nodesel,
nodeinfo_database=agginfo['nodeinfo_database'])),
refId = 'D'
),
GCommon.ClickHouseTableTarget(
database = agginfo['database'],
table = 'QueryClassifications' + agginfo['table_suffix'],
round = agginfo['round'],
query = textwrap.dedent("""\
SELECT
Prefix AS LocalhostPrefix,
Localhost,
Count
FROM
(
SELECT
{prefix_field} AS Prefix,
sum(Count) AS Count,
sum(LocalhostCount)/($to - $from) AS Localhost
FROM $table
WHERE $timeFilter
AND NodeID IN {nodesel}
GROUP BY Prefix
ORDER BY Count DESC
LIMIT 40
)
ORDER BY Count ASC
""".format(
prefix_field=prefix_field,
nodesel=nodesel,
nodeinfo_database=agginfo['nodeinfo_database'])),
refId = 'E'
),
GCommon.ClickHouseTableTarget(
database = agginfo['database'],
table = 'QueryClassifications' + agginfo['table_suffix'],
round = agginfo['round'],
query = textwrap.dedent("""\
SELECT
Prefix AS NonAuthTldPrefix,
NonAuthTld,
Count
FROM
(
SELECT
{prefix_field} AS Prefix,
sum(Count) AS Count,
sum(NonAuthTldCount)/($to - $from) AS NonAuthTld
FROM $table
WHERE $timeFilter
AND NodeID IN {nodesel}
GROUP BY Prefix
ORDER BY Count DESC
LIMIT 40
)
ORDER BY Count ASC
""".format(
prefix_field=prefix_field,
nodesel=nodesel,
nodeinfo_database=agginfo['nodeinfo_database'])),
refId = 'F'
),
GCommon.ClickHouseTableTarget(
database = agginfo['database'],
table = 'QueryClassifications' + agginfo['table_suffix'],
round = agginfo['round'],
query = textwrap.dedent("""\
SELECT
Prefix AS OkPrefix,
Ok,
TotalCount
FROM
(
SELECT
{prefix_field} AS Prefix,
sum(Count) AS TotalCount,
sum(Count -
(AForACount +
AForRootCount +
FunnyQueryClassCount +
FunnyQueryTypeCount +
LocalhostCount +
NonAuthTldCount +
RFC1918PtrCount +
RootServersNetCount +
SrcPortZeroCount))/($to - $from) AS Ok
FROM $table
WHERE $timeFilter
AND NodeID IN {nodesel}
GROUP BY Prefix
ORDER BY TotalCount DESC
LIMIT 40
)
ORDER BY TotalCount ASC
""".format(
prefix_field=prefix_field,
nodesel=nodesel,
nodeinfo_database=agginfo['nodeinfo_database'])),
refId = 'G'
),
GCommon.ClickHouseTableTarget(
database = agginfo['database'],
table = 'QueryClassifications' + agginfo['table_suffix'],
round = agginfo['round'],
query = textwrap.dedent("""\
SELECT
Prefix AS RFC1918PtrPrefix,
RFC1918Ptr,
Count
FROM
(
SELECT
{prefix_field} AS Prefix,
sum(Count) AS Count,
sum(RFC1918PtrCount)/($to - $from) AS RFC1918Ptr
FROM $table
WHERE $timeFilter
AND NodeID IN {nodesel}
GROUP BY Prefix
ORDER BY Count DESC
LIMIT 40
)
ORDER BY Count ASC
""".format(
prefix_field=prefix_field,
nodesel=nodesel,
nodeinfo_database=agginfo['nodeinfo_database'])),
refId = 'H'
),
GCommon.ClickHouseTableTarget(
database = agginfo['database'],
table = 'QueryClassifications' + agginfo['table_suffix'],
round = agginfo['round'],
query = textwrap.dedent("""\
SELECT
Prefix AS RootServersNetPrefix,
RootServersNet,
Count
FROM
(
SELECT
{prefix_field} AS Prefix,
sum(Count) AS Count,
sum(RootServersNetCount)/($to - $from) AS RootServersNet
FROM $table
WHERE $timeFilter
AND NodeID IN {nodesel}
GROUP BY Prefix
ORDER BY Count DESC
LIMIT 40
)
ORDER BY Count ASC
""".format(
prefix_field=prefix_field,
nodesel=nodesel,
nodeinfo_database=agginfo['nodeinfo_database'])),
refId = 'I'
),
GCommon.ClickHouseTableTarget(
database = agginfo['database'],
table = 'QueryClassifications' + agginfo['table_suffix'],
round = agginfo['round'],
query = textwrap.dedent("""\
SELECT
Prefix AS SrcPortZeroPrefix,
SrcPortZero,
Count
FROM
(
SELECT
{prefix_field} AS Prefix,
sum(Count) AS Count,
sum(SrcPortZeroCount)/($to - $from) AS SrcPortZero
FROM $table
WHERE $timeFilter
AND NodeID IN {nodesel}
GROUP BY Prefix
ORDER BY Count DESC
LIMIT 40
)
ORDER BY Count ASC
""".format(
prefix_field=prefix_field,
nodesel=nodesel,
nodeinfo_database=agginfo['nodeinfo_database'])),
refId = 'J'
),
],
)
def dash(myuid, agginfo, nodesel, **kwargs):
return GCommon.Dashboard(
title = "Client subnet statistics detail",
tags = [
agginfo['graph_tag']
],
uid = myuid,
rows = [
GCore.Row(
height = GCore.Pixels(50),
panels = [
GCommon.HTMLPanel('grafana/common/dashboards/aggregated/client_subnet_statistics_header.html', transparent=True),
],
),
GCore.Row(
height = GCore.Pixels(GCore.DEFAULT_ROW_HEIGHT.num * 2),
panels = [
GCommon.BarChart(
title = 'Clients by fixed subnet',
orientation = GCommon.BAR_CHART_ORIENTATION_HORIZONTAL,
layout = GCommon.BarChartLayout(
xaxis = GCommon.BarChartAxis(
title = 'Queries per second',
),
yaxis = GCommon.BarChartAxis(
autotick = False,
axtype = GCommon.BAR_CHART_AXIS_TYPE_CATEGORY,
tickmargin = 110,
title = 'Fixed Subnet',
),
),
traces = [
GCommon.BarChartTrace(
name = 'Subnet',
color = '#A352CC',
x = 'QPS',
y = 'Subnet',
text = 'QPS',
),
],
targets = [
GCommon.ClickHouseTableTarget(
database = agginfo['database'],
table = 'BusiestClientSubnets' + agginfo['table_suffix'],
round = agginfo['round'],
query = textwrap.dedent("""\
SELECT
Subnet,
QPS
FROM
(
SELECT
Prefix AS Subnet,
sum(Count)/($to - $from) AS QPS
FROM $table
WHERE $timeFilter
AND NodeID IN {nodesel}
GROUP BY Prefix
ORDER BY QPS DESC
LIMIT 30
)
ORDER BY QPS ASC""".format(
nodesel=nodesel)),
refId = 'A'
)
],
),
],
),
GCore.Row(
height = GCore.Pixels(GCore.DEFAULT_ROW_HEIGHT.num * 2),
panels = [
GCommon.BarChart(
title = 'RCODE by clients by ASN',
orientation = GCommon.BAR_CHART_ORIENTATION_HORIZONTAL,
layout = GCommon.BarChartLayout(
barmode = GCommon.BAR_CHART_LAYOUT_MODE_STACK,
showlegend = True,
xaxis = GCommon.BarChartAxis(
title = 'Queries per second',
),
yaxis = GCommon.BarChartAxis(
autotick = False,
axtype = GCommon.BAR_CHART_AXIS_TYPE_CATEGORY,
tickmargin = 110,
title = 'ASN',
),
),
autotrace = True,
targets = [
GCommon.ClickHouseTableTarget(
database = agginfo['database'],
table = 'BusiestClientSubnets' + agginfo['table_suffix'],
round = agginfo['round'],
query = textwrap.dedent("""\
SELECT
notEmpty(rcodeText) ? rcodeText : concat('RCODE', toString(rcode)) AS DisplayRcode,
sum(rcodeCount) / ($to - $from) AS rcodeCount,
ClientASN
FROM
(
SELECT
ClientASN,
rcode,
sum(rcodeCount) AS rcodeCount,
any(sCount) AS sCount
FROM
(
SELECT
ClientASN,
sum(RcodeMap.Count) AS sCount
FROM $table
ARRAY JOIN RcodeMap
WHERE $timeFilter
AND NodeID IN {nodesel}
GROUP BY
ClientASN
ORDER BY sCount DESC, ClientASN ASC
LIMIT 30
) AS ClientASNCounts
ALL LEFT JOIN
(
SELECT
ClientASN,
RcodeMap.ResponseRcode AS rcode,
sum(RcodeMap.Count) AS rcodeCount
FROM $table
ARRAY JOIN RcodeMap
WHERE
$timeFilter
AND NodeID IN {nodesel}
GROUP BY
ClientASN,
rcode
UNION ALL
(
SELECT
ClientASN,
rcode,
CAST(0 AS UInt64) AS rcodeCount
FROM
(
SELECT
0 AS Zero,
ClientASN
FROM $table
WHERE
$timeFilter
AND NodeID IN {nodesel}
GROUP BY ClientASN
) AS ZeroClientASN
ALL LEFT JOIN
(
SELECT
0 AS Zero,
RcodeMap.ResponseRcode AS rcode
FROM $table
ARRAY JOIN RcodeMap
WHERE
$timeFilter
AND NodeID IN {nodesel}
GROUP BY rcode
) AS ZeroRcode USING Zero
)
) AS ClientASNRcodeCounts USING ClientASN
GROUP BY
ClientASN,
rcode
) AS ClientASNRcodeCountsTotal
ALL INNER JOIN
(
SELECT
value_name AS rcodeText,
toUInt16(value) AS rcode
FROM {nodeinfo_database}.iana_text
WHERE registry_name = 'RCODE'
) AS ClientASNNameCountsTotal USING rcode
GROUP BY
ClientASN,
rcode,
rcodeText
ORDER BY
sum(sCount) ASC,
rcodeText ASC,
ClientASN DESC""".format(
nodesel=nodesel,
nodeinfo_database=agginfo['nodeinfo_database'])),
refId = 'A'
)
],
),
],
),
GCore.Row(
height = GCore.Pixels(GCore.DEFAULT_ROW_HEIGHT.num * 2),
panels = [
GCommon.BarChart(
title = 'RCODE by clients by AS subnet',
orientation = GCommon.BAR_CHART_ORIENTATION_HORIZONTAL,
layout = GCommon.BarChartLayout(
barmode = GCommon.BAR_CHART_LAYOUT_MODE_STACK,
showlegend = True,
xaxis = GCommon.BarChartAxis(
title = 'Queries per second',
),
yaxis = GCommon.BarChartAxis(
autotick = False,
axtype = GCommon.BAR_CHART_AXIS_TYPE_CATEGORY,
tickmargin = 110,
title = 'AS Subnet',
),
),
autotrace = True,
targets = [
GCommon.ClickHouseTableTarget(
database = agginfo['database'],
table = 'BGPPrefix' + agginfo['table_suffix'],
round = agginfo['round'],
query = textwrap.dedent("""\
SELECT
notEmpty(rcodeText) ? rcodeText : concat('RCODE', toString(rcode)) AS DisplayRcode,
sum(rcodeCount) / ($to - $from) AS rcodeCount,
Prefix
FROM
(
SELECT
Prefix,
rcode,
sum(rcodeCount) AS rcodeCount,
any(sCount) AS sCount
FROM
(
SELECT
Prefix,
sum(RcodeMap.Count) AS sCount
FROM $table
ARRAY JOIN RcodeMap
WHERE $timeFilter
AND NodeID IN {nodesel}
GROUP BY
Prefix
ORDER BY sCount DESC, Prefix ASC
LIMIT 30
) AS PrefixCount
ALL LEFT JOIN
(
SELECT
Prefix,
RcodeMap.ResponseRcode AS rcode,
sum(RcodeMap.Count) AS rcodeCount
FROM $table
ARRAY JOIN RcodeMap
WHERE
$timeFilter
AND NodeID IN {nodesel}
GROUP BY
Prefix,
rcode
UNION ALL
(
SELECT
Prefix,
rcode,
CAST(0 AS UInt64) AS rcodeCount
FROM
(
SELECT
0 AS Zero,
Prefix
FROM $table
WHERE
$timeFilter
AND NodeID IN {nodesel}
GROUP BY Prefix
) AS ZeroPrefox
ALL LEFT JOIN
(
SELECT
0 AS Zero,
RcodeMap.ResponseRcode AS rcode
FROM $table
ARRAY JOIN RcodeMap
WHERE
$timeFilter
AND NodeID IN {nodesel}
GROUP BY rcode
) AS ZeroRcode USING Zero
)
) AS PrefixRcodeCounts USING Prefix
GROUP BY
Prefix,
rcode
) AS PrefixRcodeCountsTotal
ALL INNER JOIN
(
SELECT
value_name AS rcodeText,
toUInt16(value) AS rcode
FROM {nodeinfo_database}.iana_text
WHERE registry_name = 'RCODE'
) AS PrefixNameCountsTotal USING rcode
GROUP BY
Prefix,
rcode,
rcodeText
ORDER BY
sum(sCount) ASC,
rcodeText ASC,
Prefix DESC""".format(
nodesel=nodesel,
nodeinfo_database=agginfo['nodeinfo_database'])),
refId = 'A'
)
],
),
],
),
GCore.Row(
height = GCore.Pixels(GCore.DEFAULT_ROW_HEIGHT.num * 2),
panels = [
GCommon.BarChart(
title = 'RCODE by clients by fixed subnet',
orientation = GCommon.BAR_CHART_ORIENTATION_HORIZONTAL,
layout = GCommon.BarChartLayout(
barmode = GCommon.BAR_CHART_LAYOUT_MODE_STACK,
showlegend = True,
xaxis = GCommon.BarChartAxis(
title = 'Queries per second',
),
yaxis = GCommon.BarChartAxis(
autotick = False,
axtype = GCommon.BAR_CHART_AXIS_TYPE_CATEGORY,
tickmargin = 110,
title = 'Fixed Subnet',
),
),
autotrace = True,
targets = [
GCommon.ClickHouseTableTarget(
database = agginfo['database'],
table = 'BusiestClientSubnets' + agginfo['table_suffix'],
round = agginfo['round'],
query = textwrap.dedent("""\
SELECT
notEmpty(rcodeText) ? rcodeText : concat('RCODE', toString(rcode)) AS DisplayRcode,
sum(rcodeCount) / ($to - $from) AS rcodeCount,
Prefix
FROM
(
SELECT
Prefix,
rcode,
sum(rcodeCount) AS rcodeCount,
any(sCount) AS sCount
FROM
(
SELECT
Prefix,
sum(RcodeMap.Count) AS sCount
FROM $table
ARRAY JOIN RcodeMap
WHERE $timeFilter
AND NodeID IN {nodesel}
GROUP BY
Prefix
ORDER BY sCount DESC, Prefix ASC
LIMIT 30
) AS PrefixCount
ALL LEFT JOIN
(
SELECT
Prefix,
RcodeMap.ResponseRcode AS rcode,
sum(RcodeMap.Count) AS rcodeCount
FROM $table
ARRAY JOIN RcodeMap
WHERE
$timeFilter
AND NodeID IN {nodesel}
GROUP BY
Prefix,
rcode
UNION ALL
(
SELECT
Prefix,
rcode,
CAST(0 AS UInt64) AS rcodeCount
FROM
(
SELECT
0 AS Zero,
Prefix
FROM $table
WHERE
$timeFilter
AND NodeID IN {nodesel}
GROUP BY Prefix
) AS ZeroPrefix
ALL LEFT JOIN
(
SELECT
0 AS Zero,
RcodeMap.ResponseRcode AS rcode
FROM $table
ARRAY JOIN RcodeMap
WHERE
$timeFilter
AND NodeID IN {nodesel}
GROUP BY rcode
) AS ZeroRcode USING Zero
)
) AS PrefixRcodeCounts USING Prefix
GROUP BY
Prefix,
rcode
) AS PrefixRcodeCountsTotal
ALL INNER JOIN
(
SELECT
value_name AS rcodeText,
toUInt16(value) AS rcode
FROM {nodeinfo_database}.iana_text
WHERE registry_name = 'RCODE'
) AS PrefixNameCountsTotal USING rcode
GROUP BY
Prefix,
rcode,
rcodeText
ORDER BY
sum(sCount) ASC,
rcodeText ASC,
Prefix DESC""".format(
nodesel=nodesel,
nodeinfo_database=agginfo['nodeinfo_database'])),
refId = 'A'
)
],
),
],
),
GCore.Row(
height = GCore.Pixels(GCore.DEFAULT_ROW_HEIGHT.num * 2),
panels = [
GCommon.BarChart(
title = 'Root abusers by fixed subnet',
orientation = GCommon.BAR_CHART_ORIENTATION_HORIZONTAL,
layout = GCommon.BarChartLayout(
xaxis = GCommon.BarChartAxis(
title = 'Queries per second',
),
yaxis = GCommon.BarChartAxis(
autotick = False,
axtype = GCommon.BAR_CHART_AXIS_TYPE_CATEGORY,
tickmargin = 110,
title = 'Fixed Subnet',
),
),
traces = [
GCommon.BarChartTrace(
name = 'Subnet',
color = '#A352CC',
x = 'QPS',
y = 'Subnet',
text = 'QPS',
),
],
targets = [
GCommon.ClickHouseTableTarget(
database = agginfo['database'],
table = 'QueryClassifications' + agginfo['table_suffix'],
round = agginfo['round'],
query = textwrap.dedent("""\
SELECT
Subnet,
QPS
FROM
(
SELECT
FixedPrefix AS Subnet,
sum(RootAbuseCount)/($to - $from) AS QPS
FROM $table
WHERE $timeFilter
AND NodeID IN {nodesel}
GROUP BY FixedPrefix
ORDER BY QPS DESC
LIMIT 40
)
ORDER BY QPS ASC""".format(
nodesel=nodesel)),
refId = 'A'
)
],
),
],
),
GCore.Row(
height = GCore.Pixels(GCore.DEFAULT_ROW_HEIGHT.num * 2),
panels = [
query_classification_chart(
'Query classification by busiest fixed subnet',
'Fixed Subnet',
'FixedPrefix',
agginfo,
nodesel)
],
),
GCore.Row(
height = GCore.Pixels(GCore.DEFAULT_ROW_HEIGHT.num * 2),
panels = [
query_classification_chart(
'Query classification by busiest ASN',
'ASN',
'ClientASN',
agginfo,
nodesel)
],
),
GCore.Row(
height = GCore.Pixels(GCore.DEFAULT_ROW_HEIGHT.num * 2),
panels = [
query_classification_chart(
'Query classification by busiest AS subnet',
'AS subnet',
'ASPrefix',
agginfo,
nodesel)
],
),
]
)
| 46.04189
| 133
| 0.30778
| 2,201
| 42,865
| 5.915039
| 0.110404
| 0.026192
| 0.033182
| 0.044243
| 0.790537
| 0.784699
| 0.784699
| 0.784699
| 0.784699
| 0.777479
| 0
| 0.008435
| 0.643205
| 42,865
| 930
| 134
| 46.091398
| 0.842814
| 0.007839
| 0
| 0.785792
| 0
| 0
| 0.620405
| 0.01811
| 0
| 0
| 0
| 0
| 0
| 1
| 0.002186
| false
| 0
| 0.003279
| 0.002186
| 0.00765
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
d3dacb32ea41d2fb0546ec04640a3b17315faa08
| 118,963
|
py
|
Python
|
h1/api/insight_project_journal_api.py
|
hyperonecom/h1-client-python
|
4ce355852ba3120ec1b8f509ab5894a5c08da730
|
[
"MIT"
] | null | null | null |
h1/api/insight_project_journal_api.py
|
hyperonecom/h1-client-python
|
4ce355852ba3120ec1b8f509ab5894a5c08da730
|
[
"MIT"
] | null | null | null |
h1/api/insight_project_journal_api.py
|
hyperonecom/h1-client-python
|
4ce355852ba3120ec1b8f509ab5894a5c08da730
|
[
"MIT"
] | null | null | null |
"""
HyperOne
HyperOne API # noqa: E501
The version of the OpenAPI document: 0.1.0
Generated by: https://openapi-generator.tech
"""
import re # noqa: F401
import sys # noqa: F401
from h1.api_client import ApiClient, Endpoint as _Endpoint
from h1.model_utils import ( # noqa: F401
check_allowed_values,
check_validations,
date,
datetime,
file_type,
none_type,
validate_and_convert_types
)
from h1.model.event import Event
from h1.model.inline_response400 import InlineResponse400
from h1.model.insight_project_journal_create import InsightProjectJournalCreate
from h1.model.insight_project_journal_credential_patch import InsightProjectJournalCredentialPatch
from h1.model.insight_project_journal_transfer import InsightProjectJournalTransfer
from h1.model.insight_project_journal_update import InsightProjectJournalUpdate
from h1.model.journal import Journal
from h1.model.journal_credential import JournalCredential
from h1.model.resource_service import ResourceService
from h1.model.tag import Tag
from h1.model.tag_array import TagArray
class InsightProjectJournalApi(object):
"""NOTE: This class is auto generated by OpenAPI Generator
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def __insight_project_journal_create(
self,
project_id,
location_id,
insight_project_journal_create,
**kwargs
):
"""Create insight/journal # noqa: E501
Create journal # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.insight_project_journal_create(project_id, location_id, insight_project_journal_create, async_req=True)
>>> result = thread.get()
Args:
project_id (str): Project Id
location_id (str): Location Id
insight_project_journal_create (InsightProjectJournalCreate):
Keyword Args:
x_idempotency_key (str): Idempotency key. [optional]
x_dry_run (str): Dry run. [optional]
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (float/tuple): timeout setting for this request. If one
number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
Journal
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['project_id'] = \
project_id
kwargs['location_id'] = \
location_id
kwargs['insight_project_journal_create'] = \
insight_project_journal_create
return self.call_with_http_info(**kwargs)
self.insight_project_journal_create = _Endpoint(
settings={
'response_type': (Journal,),
'auth': [
'BearerAuth'
],
'endpoint_path': '/insight/{locationId}/project/{projectId}/journal',
'operation_id': 'insight_project_journal_create',
'http_method': 'POST',
'servers': None,
},
params_map={
'all': [
'project_id',
'location_id',
'insight_project_journal_create',
'x_idempotency_key',
'x_dry_run',
],
'required': [
'project_id',
'location_id',
'insight_project_journal_create',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'project_id':
(str,),
'location_id':
(str,),
'insight_project_journal_create':
(InsightProjectJournalCreate,),
'x_idempotency_key':
(str,),
'x_dry_run':
(str,),
},
'attribute_map': {
'project_id': 'projectId',
'location_id': 'locationId',
'x_idempotency_key': 'x-idempotency-key',
'x_dry_run': 'x-dry-run',
},
'location_map': {
'project_id': 'path',
'location_id': 'path',
'insight_project_journal_create': 'body',
'x_idempotency_key': 'header',
'x_dry_run': 'header',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [
'application/json'
]
},
api_client=api_client,
callable=__insight_project_journal_create
)
def __insight_project_journal_credential_create(
self,
project_id,
location_id,
journal_id,
journal_credential,
**kwargs
):
"""Create insight/journal.credential # noqa: E501
Create insight/journal.credential # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.insight_project_journal_credential_create(project_id, location_id, journal_id, journal_credential, async_req=True)
>>> result = thread.get()
Args:
project_id (str): Project Id
location_id (str): Location Id
journal_id (str): Journal Id
journal_credential (JournalCredential):
Keyword Args:
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (float/tuple): timeout setting for this request. If one
number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
JournalCredential
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['project_id'] = \
project_id
kwargs['location_id'] = \
location_id
kwargs['journal_id'] = \
journal_id
kwargs['journal_credential'] = \
journal_credential
return self.call_with_http_info(**kwargs)
self.insight_project_journal_credential_create = _Endpoint(
settings={
'response_type': (JournalCredential,),
'auth': [
'BearerAuth'
],
'endpoint_path': '/insight/{locationId}/project/{projectId}/journal/{journalId}/credential',
'operation_id': 'insight_project_journal_credential_create',
'http_method': 'POST',
'servers': None,
},
params_map={
'all': [
'project_id',
'location_id',
'journal_id',
'journal_credential',
],
'required': [
'project_id',
'location_id',
'journal_id',
'journal_credential',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'project_id':
(str,),
'location_id':
(str,),
'journal_id':
(str,),
'journal_credential':
(JournalCredential,),
},
'attribute_map': {
'project_id': 'projectId',
'location_id': 'locationId',
'journal_id': 'journalId',
},
'location_map': {
'project_id': 'path',
'location_id': 'path',
'journal_id': 'path',
'journal_credential': 'body',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [
'application/json'
]
},
api_client=api_client,
callable=__insight_project_journal_credential_create
)
def __insight_project_journal_credential_delete(
self,
project_id,
location_id,
journal_id,
credential_id,
**kwargs
):
"""Delete insight/journal.credential # noqa: E501
Delete insight/journal.credential # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.insight_project_journal_credential_delete(project_id, location_id, journal_id, credential_id, async_req=True)
>>> result = thread.get()
Args:
project_id (str): Project Id
location_id (str): Location Id
journal_id (str): Journal Id
credential_id (str): credentialId
Keyword Args:
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (float/tuple): timeout setting for this request. If one
number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
Journal
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['project_id'] = \
project_id
kwargs['location_id'] = \
location_id
kwargs['journal_id'] = \
journal_id
kwargs['credential_id'] = \
credential_id
return self.call_with_http_info(**kwargs)
self.insight_project_journal_credential_delete = _Endpoint(
settings={
'response_type': (Journal,),
'auth': [
'BearerAuth'
],
'endpoint_path': '/insight/{locationId}/project/{projectId}/journal/{journalId}/credential/{credentialId}',
'operation_id': 'insight_project_journal_credential_delete',
'http_method': 'DELETE',
'servers': None,
},
params_map={
'all': [
'project_id',
'location_id',
'journal_id',
'credential_id',
],
'required': [
'project_id',
'location_id',
'journal_id',
'credential_id',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'project_id':
(str,),
'location_id':
(str,),
'journal_id':
(str,),
'credential_id':
(str,),
},
'attribute_map': {
'project_id': 'projectId',
'location_id': 'locationId',
'journal_id': 'journalId',
'credential_id': 'credentialId',
},
'location_map': {
'project_id': 'path',
'location_id': 'path',
'journal_id': 'path',
'credential_id': 'path',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [],
},
api_client=api_client,
callable=__insight_project_journal_credential_delete
)
def __insight_project_journal_credential_get(
self,
project_id,
location_id,
journal_id,
credential_id,
**kwargs
):
"""Get insight/journal.credential # noqa: E501
Get insight/journal.credential # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.insight_project_journal_credential_get(project_id, location_id, journal_id, credential_id, async_req=True)
>>> result = thread.get()
Args:
project_id (str): Project Id
location_id (str): Location Id
journal_id (str): Journal Id
credential_id (str): credentialId
Keyword Args:
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (float/tuple): timeout setting for this request. If one
number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
JournalCredential
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['project_id'] = \
project_id
kwargs['location_id'] = \
location_id
kwargs['journal_id'] = \
journal_id
kwargs['credential_id'] = \
credential_id
return self.call_with_http_info(**kwargs)
self.insight_project_journal_credential_get = _Endpoint(
settings={
'response_type': (JournalCredential,),
'auth': [
'BearerAuth'
],
'endpoint_path': '/insight/{locationId}/project/{projectId}/journal/{journalId}/credential/{credentialId}',
'operation_id': 'insight_project_journal_credential_get',
'http_method': 'GET',
'servers': None,
},
params_map={
'all': [
'project_id',
'location_id',
'journal_id',
'credential_id',
],
'required': [
'project_id',
'location_id',
'journal_id',
'credential_id',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'project_id':
(str,),
'location_id':
(str,),
'journal_id':
(str,),
'credential_id':
(str,),
},
'attribute_map': {
'project_id': 'projectId',
'location_id': 'locationId',
'journal_id': 'journalId',
'credential_id': 'credentialId',
},
'location_map': {
'project_id': 'path',
'location_id': 'path',
'journal_id': 'path',
'credential_id': 'path',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [],
},
api_client=api_client,
callable=__insight_project_journal_credential_get
)
def __insight_project_journal_credential_list(
self,
project_id,
location_id,
journal_id,
**kwargs
):
"""List insight/journal.credential # noqa: E501
List insight/journal.credential # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.insight_project_journal_credential_list(project_id, location_id, journal_id, async_req=True)
>>> result = thread.get()
Args:
project_id (str): Project Id
location_id (str): Location Id
journal_id (str): Journal Id
Keyword Args:
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (float/tuple): timeout setting for this request. If one
number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
[JournalCredential]
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['project_id'] = \
project_id
kwargs['location_id'] = \
location_id
kwargs['journal_id'] = \
journal_id
return self.call_with_http_info(**kwargs)
self.insight_project_journal_credential_list = _Endpoint(
settings={
'response_type': ([JournalCredential],),
'auth': [
'BearerAuth'
],
'endpoint_path': '/insight/{locationId}/project/{projectId}/journal/{journalId}/credential',
'operation_id': 'insight_project_journal_credential_list',
'http_method': 'GET',
'servers': None,
},
params_map={
'all': [
'project_id',
'location_id',
'journal_id',
],
'required': [
'project_id',
'location_id',
'journal_id',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'project_id':
(str,),
'location_id':
(str,),
'journal_id':
(str,),
},
'attribute_map': {
'project_id': 'projectId',
'location_id': 'locationId',
'journal_id': 'journalId',
},
'location_map': {
'project_id': 'path',
'location_id': 'path',
'journal_id': 'path',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [],
},
api_client=api_client,
callable=__insight_project_journal_credential_list
)
def __insight_project_journal_credential_patch(
self,
project_id,
location_id,
journal_id,
credential_id,
insight_project_journal_credential_patch,
**kwargs
):
"""Update insight/journal.credential # noqa: E501
Update insight/journal.credential # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.insight_project_journal_credential_patch(project_id, location_id, journal_id, credential_id, insight_project_journal_credential_patch, async_req=True)
>>> result = thread.get()
Args:
project_id (str): Project Id
location_id (str): Location Id
journal_id (str): Journal Id
credential_id (str): credentialId
insight_project_journal_credential_patch (InsightProjectJournalCredentialPatch):
Keyword Args:
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (float/tuple): timeout setting for this request. If one
number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
JournalCredential
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['project_id'] = \
project_id
kwargs['location_id'] = \
location_id
kwargs['journal_id'] = \
journal_id
kwargs['credential_id'] = \
credential_id
kwargs['insight_project_journal_credential_patch'] = \
insight_project_journal_credential_patch
return self.call_with_http_info(**kwargs)
self.insight_project_journal_credential_patch = _Endpoint(
settings={
'response_type': (JournalCredential,),
'auth': [
'BearerAuth'
],
'endpoint_path': '/insight/{locationId}/project/{projectId}/journal/{journalId}/credential/{credentialId}',
'operation_id': 'insight_project_journal_credential_patch',
'http_method': 'PATCH',
'servers': None,
},
params_map={
'all': [
'project_id',
'location_id',
'journal_id',
'credential_id',
'insight_project_journal_credential_patch',
],
'required': [
'project_id',
'location_id',
'journal_id',
'credential_id',
'insight_project_journal_credential_patch',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'project_id':
(str,),
'location_id':
(str,),
'journal_id':
(str,),
'credential_id':
(str,),
'insight_project_journal_credential_patch':
(InsightProjectJournalCredentialPatch,),
},
'attribute_map': {
'project_id': 'projectId',
'location_id': 'locationId',
'journal_id': 'journalId',
'credential_id': 'credentialId',
},
'location_map': {
'project_id': 'path',
'location_id': 'path',
'journal_id': 'path',
'credential_id': 'path',
'insight_project_journal_credential_patch': 'body',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [
'application/json'
]
},
api_client=api_client,
callable=__insight_project_journal_credential_patch
)
def __insight_project_journal_delete(
self,
project_id,
location_id,
journal_id,
**kwargs
):
"""Delete insight/journal # noqa: E501
Delete journal # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.insight_project_journal_delete(project_id, location_id, journal_id, async_req=True)
>>> result = thread.get()
Args:
project_id (str): Project Id
location_id (str): Location Id
journal_id (str): Journal Id
Keyword Args:
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (float/tuple): timeout setting for this request. If one
number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
None
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['project_id'] = \
project_id
kwargs['location_id'] = \
location_id
kwargs['journal_id'] = \
journal_id
return self.call_with_http_info(**kwargs)
self.insight_project_journal_delete = _Endpoint(
settings={
'response_type': None,
'auth': [
'BearerAuth'
],
'endpoint_path': '/insight/{locationId}/project/{projectId}/journal/{journalId}',
'operation_id': 'insight_project_journal_delete',
'http_method': 'DELETE',
'servers': None,
},
params_map={
'all': [
'project_id',
'location_id',
'journal_id',
],
'required': [
'project_id',
'location_id',
'journal_id',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'project_id':
(str,),
'location_id':
(str,),
'journal_id':
(str,),
},
'attribute_map': {
'project_id': 'projectId',
'location_id': 'locationId',
'journal_id': 'journalId',
},
'location_map': {
'project_id': 'path',
'location_id': 'path',
'journal_id': 'path',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [],
},
api_client=api_client,
callable=__insight_project_journal_delete
)
def __insight_project_journal_event_get(
self,
project_id,
location_id,
journal_id,
event_id,
**kwargs
):
"""Get insight/journal.event # noqa: E501
Get insight/journal.event # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.insight_project_journal_event_get(project_id, location_id, journal_id, event_id, async_req=True)
>>> result = thread.get()
Args:
project_id (str): Project Id
location_id (str): Location Id
journal_id (str): Journal Id
event_id (str): eventId
Keyword Args:
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (float/tuple): timeout setting for this request. If one
number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
Event
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['project_id'] = \
project_id
kwargs['location_id'] = \
location_id
kwargs['journal_id'] = \
journal_id
kwargs['event_id'] = \
event_id
return self.call_with_http_info(**kwargs)
self.insight_project_journal_event_get = _Endpoint(
settings={
'response_type': (Event,),
'auth': [
'BearerAuth'
],
'endpoint_path': '/insight/{locationId}/project/{projectId}/journal/{journalId}/event/{eventId}',
'operation_id': 'insight_project_journal_event_get',
'http_method': 'GET',
'servers': None,
},
params_map={
'all': [
'project_id',
'location_id',
'journal_id',
'event_id',
],
'required': [
'project_id',
'location_id',
'journal_id',
'event_id',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'project_id':
(str,),
'location_id':
(str,),
'journal_id':
(str,),
'event_id':
(str,),
},
'attribute_map': {
'project_id': 'projectId',
'location_id': 'locationId',
'journal_id': 'journalId',
'event_id': 'eventId',
},
'location_map': {
'project_id': 'path',
'location_id': 'path',
'journal_id': 'path',
'event_id': 'path',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [],
},
api_client=api_client,
callable=__insight_project_journal_event_get
)
def __insight_project_journal_event_list(
self,
project_id,
location_id,
journal_id,
**kwargs
):
"""List insight/journal.event # noqa: E501
List insight/journal.event # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.insight_project_journal_event_list(project_id, location_id, journal_id, async_req=True)
>>> result = thread.get()
Args:
project_id (str): Project Id
location_id (str): Location Id
journal_id (str): Journal Id
Keyword Args:
limit (float): $limit. [optional] if omitted the server will use the default value of 100
skip (float): $skip. [optional]
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (float/tuple): timeout setting for this request. If one
number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
[Event]
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['project_id'] = \
project_id
kwargs['location_id'] = \
location_id
kwargs['journal_id'] = \
journal_id
return self.call_with_http_info(**kwargs)
self.insight_project_journal_event_list = _Endpoint(
settings={
'response_type': ([Event],),
'auth': [
'BearerAuth'
],
'endpoint_path': '/insight/{locationId}/project/{projectId}/journal/{journalId}/event',
'operation_id': 'insight_project_journal_event_list',
'http_method': 'GET',
'servers': None,
},
params_map={
'all': [
'project_id',
'location_id',
'journal_id',
'limit',
'skip',
],
'required': [
'project_id',
'location_id',
'journal_id',
],
'nullable': [
],
'enum': [
],
'validation': [
'limit',
]
},
root_map={
'validations': {
('limit',): {
'inclusive_maximum': 1000,
'inclusive_minimum': 1,
},
},
'allowed_values': {
},
'openapi_types': {
'project_id':
(str,),
'location_id':
(str,),
'journal_id':
(str,),
'limit':
(float,),
'skip':
(float,),
},
'attribute_map': {
'project_id': 'projectId',
'location_id': 'locationId',
'journal_id': 'journalId',
'limit': '$limit',
'skip': '$skip',
},
'location_map': {
'project_id': 'path',
'location_id': 'path',
'journal_id': 'path',
'limit': 'query',
'skip': 'query',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [],
},
api_client=api_client,
callable=__insight_project_journal_event_list
)
def __insight_project_journal_get(
self,
project_id,
location_id,
journal_id,
**kwargs
):
"""Get insight/journal # noqa: E501
Returns a single journal # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.insight_project_journal_get(project_id, location_id, journal_id, async_req=True)
>>> result = thread.get()
Args:
project_id (str): Project Id
location_id (str): Location Id
journal_id (str): Journal Id
Keyword Args:
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (float/tuple): timeout setting for this request. If one
number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
Journal
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['project_id'] = \
project_id
kwargs['location_id'] = \
location_id
kwargs['journal_id'] = \
journal_id
return self.call_with_http_info(**kwargs)
self.insight_project_journal_get = _Endpoint(
settings={
'response_type': (Journal,),
'auth': [
'BearerAuth'
],
'endpoint_path': '/insight/{locationId}/project/{projectId}/journal/{journalId}',
'operation_id': 'insight_project_journal_get',
'http_method': 'GET',
'servers': None,
},
params_map={
'all': [
'project_id',
'location_id',
'journal_id',
],
'required': [
'project_id',
'location_id',
'journal_id',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'project_id':
(str,),
'location_id':
(str,),
'journal_id':
(str,),
},
'attribute_map': {
'project_id': 'projectId',
'location_id': 'locationId',
'journal_id': 'journalId',
},
'location_map': {
'project_id': 'path',
'location_id': 'path',
'journal_id': 'path',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [],
},
api_client=api_client,
callable=__insight_project_journal_get
)
def __insight_project_journal_list(
self,
project_id,
location_id,
**kwargs
):
"""List insight/journal # noqa: E501
List journal # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.insight_project_journal_list(project_id, location_id, async_req=True)
>>> result = thread.get()
Args:
project_id (str): Project Id
location_id (str): Location Id
Keyword Args:
name (str): Filter by name. [optional]
tag_value (str): Filter by tag.value. [optional]
tag_key (str): Filter by tag.key. [optional]
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (float/tuple): timeout setting for this request. If one
number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
[Journal]
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['project_id'] = \
project_id
kwargs['location_id'] = \
location_id
return self.call_with_http_info(**kwargs)
self.insight_project_journal_list = _Endpoint(
settings={
'response_type': ([Journal],),
'auth': [
'BearerAuth'
],
'endpoint_path': '/insight/{locationId}/project/{projectId}/journal',
'operation_id': 'insight_project_journal_list',
'http_method': 'GET',
'servers': None,
},
params_map={
'all': [
'project_id',
'location_id',
'name',
'tag_value',
'tag_key',
],
'required': [
'project_id',
'location_id',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'project_id':
(str,),
'location_id':
(str,),
'name':
(str,),
'tag_value':
(str,),
'tag_key':
(str,),
},
'attribute_map': {
'project_id': 'projectId',
'location_id': 'locationId',
'name': 'name',
'tag_value': 'tag.value',
'tag_key': 'tag.key',
},
'location_map': {
'project_id': 'path',
'location_id': 'path',
'name': 'query',
'tag_value': 'query',
'tag_key': 'query',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [],
},
api_client=api_client,
callable=__insight_project_journal_list
)
def __insight_project_journal_log_get(
self,
project_id,
location_id,
journal_id,
**kwargs
):
"""Get insight/journal.log # noqa: E501
websocket is also supported # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.insight_project_journal_log_get(project_id, location_id, journal_id, async_req=True)
>>> result = thread.get()
Args:
project_id (str): Project Id
location_id (str): Location Id
journal_id (str): Journal Id
Keyword Args:
since (datetime): since. [optional]
until (datetime): until. [optional]
follow (bool): follow. [optional] if omitted the server will use the default value of False
tail (float): tail. [optional]
tag (TagArray): tag. [optional]
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (float/tuple): timeout setting for this request. If one
number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
None
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['project_id'] = \
project_id
kwargs['location_id'] = \
location_id
kwargs['journal_id'] = \
journal_id
return self.call_with_http_info(**kwargs)
self.insight_project_journal_log_get = _Endpoint(
settings={
'response_type': None,
'auth': [
'BearerAuth'
],
'endpoint_path': '/insight/{locationId}/project/{projectId}/journal/{journalId}/log',
'operation_id': 'insight_project_journal_log_get',
'http_method': 'GET',
'servers': None,
},
params_map={
'all': [
'project_id',
'location_id',
'journal_id',
'since',
'until',
'follow',
'tail',
'tag',
],
'required': [
'project_id',
'location_id',
'journal_id',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'project_id':
(str,),
'location_id':
(str,),
'journal_id':
(str,),
'since':
(datetime,),
'until':
(datetime,),
'follow':
(bool,),
'tail':
(float,),
'tag':
(TagArray,),
},
'attribute_map': {
'project_id': 'projectId',
'location_id': 'locationId',
'journal_id': 'journalId',
'since': 'since',
'until': 'until',
'follow': 'follow',
'tail': 'tail',
'tag': 'tag',
},
'location_map': {
'project_id': 'path',
'location_id': 'path',
'journal_id': 'path',
'since': 'query',
'until': 'query',
'follow': 'query',
'tail': 'query',
'tag': 'query',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [],
},
api_client=api_client,
callable=__insight_project_journal_log_get
)
def __insight_project_journal_service_get(
self,
project_id,
location_id,
journal_id,
service_id,
**kwargs
):
"""Get insight/journal.service # noqa: E501
Get insight/journal.service # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.insight_project_journal_service_get(project_id, location_id, journal_id, service_id, async_req=True)
>>> result = thread.get()
Args:
project_id (str): Project Id
location_id (str): Location Id
journal_id (str): Journal Id
service_id (str): serviceId
Keyword Args:
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (float/tuple): timeout setting for this request. If one
number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
ResourceService
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['project_id'] = \
project_id
kwargs['location_id'] = \
location_id
kwargs['journal_id'] = \
journal_id
kwargs['service_id'] = \
service_id
return self.call_with_http_info(**kwargs)
self.insight_project_journal_service_get = _Endpoint(
settings={
'response_type': (ResourceService,),
'auth': [
'BearerAuth'
],
'endpoint_path': '/insight/{locationId}/project/{projectId}/journal/{journalId}/service/{serviceId}',
'operation_id': 'insight_project_journal_service_get',
'http_method': 'GET',
'servers': None,
},
params_map={
'all': [
'project_id',
'location_id',
'journal_id',
'service_id',
],
'required': [
'project_id',
'location_id',
'journal_id',
'service_id',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'project_id':
(str,),
'location_id':
(str,),
'journal_id':
(str,),
'service_id':
(str,),
},
'attribute_map': {
'project_id': 'projectId',
'location_id': 'locationId',
'journal_id': 'journalId',
'service_id': 'serviceId',
},
'location_map': {
'project_id': 'path',
'location_id': 'path',
'journal_id': 'path',
'service_id': 'path',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [],
},
api_client=api_client,
callable=__insight_project_journal_service_get
)
def __insight_project_journal_service_list(
self,
project_id,
location_id,
journal_id,
**kwargs
):
"""List insight/journal.service # noqa: E501
List insight/journal.service # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.insight_project_journal_service_list(project_id, location_id, journal_id, async_req=True)
>>> result = thread.get()
Args:
project_id (str): Project Id
location_id (str): Location Id
journal_id (str): Journal Id
Keyword Args:
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (float/tuple): timeout setting for this request. If one
number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
[ResourceService]
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['project_id'] = \
project_id
kwargs['location_id'] = \
location_id
kwargs['journal_id'] = \
journal_id
return self.call_with_http_info(**kwargs)
self.insight_project_journal_service_list = _Endpoint(
settings={
'response_type': ([ResourceService],),
'auth': [
'BearerAuth'
],
'endpoint_path': '/insight/{locationId}/project/{projectId}/journal/{journalId}/service',
'operation_id': 'insight_project_journal_service_list',
'http_method': 'GET',
'servers': None,
},
params_map={
'all': [
'project_id',
'location_id',
'journal_id',
],
'required': [
'project_id',
'location_id',
'journal_id',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'project_id':
(str,),
'location_id':
(str,),
'journal_id':
(str,),
},
'attribute_map': {
'project_id': 'projectId',
'location_id': 'locationId',
'journal_id': 'journalId',
},
'location_map': {
'project_id': 'path',
'location_id': 'path',
'journal_id': 'path',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [],
},
api_client=api_client,
callable=__insight_project_journal_service_list
)
def __insight_project_journal_tag_create(
self,
project_id,
location_id,
journal_id,
tag,
**kwargs
):
"""Create insight/journal.tag # noqa: E501
Create insight/journal.tag # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.insight_project_journal_tag_create(project_id, location_id, journal_id, tag, async_req=True)
>>> result = thread.get()
Args:
project_id (str): Project Id
location_id (str): Location Id
journal_id (str): Journal Id
tag (Tag):
Keyword Args:
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (float/tuple): timeout setting for this request. If one
number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
Tag
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['project_id'] = \
project_id
kwargs['location_id'] = \
location_id
kwargs['journal_id'] = \
journal_id
kwargs['tag'] = \
tag
return self.call_with_http_info(**kwargs)
self.insight_project_journal_tag_create = _Endpoint(
settings={
'response_type': (Tag,),
'auth': [
'BearerAuth'
],
'endpoint_path': '/insight/{locationId}/project/{projectId}/journal/{journalId}/tag',
'operation_id': 'insight_project_journal_tag_create',
'http_method': 'POST',
'servers': None,
},
params_map={
'all': [
'project_id',
'location_id',
'journal_id',
'tag',
],
'required': [
'project_id',
'location_id',
'journal_id',
'tag',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'project_id':
(str,),
'location_id':
(str,),
'journal_id':
(str,),
'tag':
(Tag,),
},
'attribute_map': {
'project_id': 'projectId',
'location_id': 'locationId',
'journal_id': 'journalId',
},
'location_map': {
'project_id': 'path',
'location_id': 'path',
'journal_id': 'path',
'tag': 'body',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [
'application/json'
]
},
api_client=api_client,
callable=__insight_project_journal_tag_create
)
def __insight_project_journal_tag_delete(
self,
project_id,
location_id,
journal_id,
tag_id,
**kwargs
):
"""Delete insight/journal.tag # noqa: E501
Delete insight/journal.tag # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.insight_project_journal_tag_delete(project_id, location_id, journal_id, tag_id, async_req=True)
>>> result = thread.get()
Args:
project_id (str): Project Id
location_id (str): Location Id
journal_id (str): Journal Id
tag_id (str): tagId
Keyword Args:
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (float/tuple): timeout setting for this request. If one
number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
None
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['project_id'] = \
project_id
kwargs['location_id'] = \
location_id
kwargs['journal_id'] = \
journal_id
kwargs['tag_id'] = \
tag_id
return self.call_with_http_info(**kwargs)
self.insight_project_journal_tag_delete = _Endpoint(
settings={
'response_type': None,
'auth': [
'BearerAuth'
],
'endpoint_path': '/insight/{locationId}/project/{projectId}/journal/{journalId}/tag/{tagId}',
'operation_id': 'insight_project_journal_tag_delete',
'http_method': 'DELETE',
'servers': None,
},
params_map={
'all': [
'project_id',
'location_id',
'journal_id',
'tag_id',
],
'required': [
'project_id',
'location_id',
'journal_id',
'tag_id',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'project_id':
(str,),
'location_id':
(str,),
'journal_id':
(str,),
'tag_id':
(str,),
},
'attribute_map': {
'project_id': 'projectId',
'location_id': 'locationId',
'journal_id': 'journalId',
'tag_id': 'tagId',
},
'location_map': {
'project_id': 'path',
'location_id': 'path',
'journal_id': 'path',
'tag_id': 'path',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [],
},
api_client=api_client,
callable=__insight_project_journal_tag_delete
)
def __insight_project_journal_tag_get(
self,
project_id,
location_id,
journal_id,
tag_id,
**kwargs
):
"""Get insight/journal.tag # noqa: E501
Get insight/journal.tag # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.insight_project_journal_tag_get(project_id, location_id, journal_id, tag_id, async_req=True)
>>> result = thread.get()
Args:
project_id (str): Project Id
location_id (str): Location Id
journal_id (str): Journal Id
tag_id (str): tagId
Keyword Args:
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (float/tuple): timeout setting for this request. If one
number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
Tag
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['project_id'] = \
project_id
kwargs['location_id'] = \
location_id
kwargs['journal_id'] = \
journal_id
kwargs['tag_id'] = \
tag_id
return self.call_with_http_info(**kwargs)
self.insight_project_journal_tag_get = _Endpoint(
settings={
'response_type': (Tag,),
'auth': [
'BearerAuth'
],
'endpoint_path': '/insight/{locationId}/project/{projectId}/journal/{journalId}/tag/{tagId}',
'operation_id': 'insight_project_journal_tag_get',
'http_method': 'GET',
'servers': None,
},
params_map={
'all': [
'project_id',
'location_id',
'journal_id',
'tag_id',
],
'required': [
'project_id',
'location_id',
'journal_id',
'tag_id',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'project_id':
(str,),
'location_id':
(str,),
'journal_id':
(str,),
'tag_id':
(str,),
},
'attribute_map': {
'project_id': 'projectId',
'location_id': 'locationId',
'journal_id': 'journalId',
'tag_id': 'tagId',
},
'location_map': {
'project_id': 'path',
'location_id': 'path',
'journal_id': 'path',
'tag_id': 'path',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [],
},
api_client=api_client,
callable=__insight_project_journal_tag_get
)
def __insight_project_journal_tag_list(
self,
project_id,
location_id,
journal_id,
**kwargs
):
"""List insight/journal.tag # noqa: E501
List insight/journal.tag # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.insight_project_journal_tag_list(project_id, location_id, journal_id, async_req=True)
>>> result = thread.get()
Args:
project_id (str): Project Id
location_id (str): Location Id
journal_id (str): Journal Id
Keyword Args:
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (float/tuple): timeout setting for this request. If one
number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
[Tag]
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['project_id'] = \
project_id
kwargs['location_id'] = \
location_id
kwargs['journal_id'] = \
journal_id
return self.call_with_http_info(**kwargs)
self.insight_project_journal_tag_list = _Endpoint(
settings={
'response_type': ([Tag],),
'auth': [
'BearerAuth'
],
'endpoint_path': '/insight/{locationId}/project/{projectId}/journal/{journalId}/tag',
'operation_id': 'insight_project_journal_tag_list',
'http_method': 'GET',
'servers': None,
},
params_map={
'all': [
'project_id',
'location_id',
'journal_id',
],
'required': [
'project_id',
'location_id',
'journal_id',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'project_id':
(str,),
'location_id':
(str,),
'journal_id':
(str,),
},
'attribute_map': {
'project_id': 'projectId',
'location_id': 'locationId',
'journal_id': 'journalId',
},
'location_map': {
'project_id': 'path',
'location_id': 'path',
'journal_id': 'path',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [],
},
api_client=api_client,
callable=__insight_project_journal_tag_list
)
def __insight_project_journal_tag_put(
self,
project_id,
location_id,
journal_id,
tag_array,
**kwargs
):
"""Replace insight/journal.tag # noqa: E501
Replace insight/journal.tag # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.insight_project_journal_tag_put(project_id, location_id, journal_id, tag_array, async_req=True)
>>> result = thread.get()
Args:
project_id (str): Project Id
location_id (str): Location Id
journal_id (str): Journal Id
tag_array (TagArray):
Keyword Args:
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (float/tuple): timeout setting for this request. If one
number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
[Tag]
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['project_id'] = \
project_id
kwargs['location_id'] = \
location_id
kwargs['journal_id'] = \
journal_id
kwargs['tag_array'] = \
tag_array
return self.call_with_http_info(**kwargs)
self.insight_project_journal_tag_put = _Endpoint(
settings={
'response_type': ([Tag],),
'auth': [
'BearerAuth'
],
'endpoint_path': '/insight/{locationId}/project/{projectId}/journal/{journalId}/tag',
'operation_id': 'insight_project_journal_tag_put',
'http_method': 'PUT',
'servers': None,
},
params_map={
'all': [
'project_id',
'location_id',
'journal_id',
'tag_array',
],
'required': [
'project_id',
'location_id',
'journal_id',
'tag_array',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'project_id':
(str,),
'location_id':
(str,),
'journal_id':
(str,),
'tag_array':
(TagArray,),
},
'attribute_map': {
'project_id': 'projectId',
'location_id': 'locationId',
'journal_id': 'journalId',
},
'location_map': {
'project_id': 'path',
'location_id': 'path',
'journal_id': 'path',
'tag_array': 'body',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [
'application/json'
]
},
api_client=api_client,
callable=__insight_project_journal_tag_put
)
def __insight_project_journal_transfer(
self,
project_id,
location_id,
journal_id,
insight_project_journal_transfer,
**kwargs
):
"""Transfer insight/journal # noqa: E501
action transfer # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.insight_project_journal_transfer(project_id, location_id, journal_id, insight_project_journal_transfer, async_req=True)
>>> result = thread.get()
Args:
project_id (str): Project Id
location_id (str): Location Id
journal_id (str): Journal Id
insight_project_journal_transfer (InsightProjectJournalTransfer):
Keyword Args:
x_idempotency_key (str): Idempotency key. [optional]
x_dry_run (str): Dry run. [optional]
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (float/tuple): timeout setting for this request. If one
number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
Journal
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['project_id'] = \
project_id
kwargs['location_id'] = \
location_id
kwargs['journal_id'] = \
journal_id
kwargs['insight_project_journal_transfer'] = \
insight_project_journal_transfer
return self.call_with_http_info(**kwargs)
self.insight_project_journal_transfer = _Endpoint(
settings={
'response_type': (Journal,),
'auth': [
'BearerAuth'
],
'endpoint_path': '/insight/{locationId}/project/{projectId}/journal/{journalId}/actions/transfer',
'operation_id': 'insight_project_journal_transfer',
'http_method': 'POST',
'servers': None,
},
params_map={
'all': [
'project_id',
'location_id',
'journal_id',
'insight_project_journal_transfer',
'x_idempotency_key',
'x_dry_run',
],
'required': [
'project_id',
'location_id',
'journal_id',
'insight_project_journal_transfer',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'project_id':
(str,),
'location_id':
(str,),
'journal_id':
(str,),
'insight_project_journal_transfer':
(InsightProjectJournalTransfer,),
'x_idempotency_key':
(str,),
'x_dry_run':
(str,),
},
'attribute_map': {
'project_id': 'projectId',
'location_id': 'locationId',
'journal_id': 'journalId',
'x_idempotency_key': 'x-idempotency-key',
'x_dry_run': 'x-dry-run',
},
'location_map': {
'project_id': 'path',
'location_id': 'path',
'journal_id': 'path',
'insight_project_journal_transfer': 'body',
'x_idempotency_key': 'header',
'x_dry_run': 'header',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [
'application/json'
]
},
api_client=api_client,
callable=__insight_project_journal_transfer
)
def __insight_project_journal_update(
self,
project_id,
location_id,
journal_id,
insight_project_journal_update,
**kwargs
):
"""Update insight/journal # noqa: E501
Returns modified journal # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.insight_project_journal_update(project_id, location_id, journal_id, insight_project_journal_update, async_req=True)
>>> result = thread.get()
Args:
project_id (str): Project Id
location_id (str): Location Id
journal_id (str): Journal Id
insight_project_journal_update (InsightProjectJournalUpdate):
Keyword Args:
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (float/tuple): timeout setting for this request. If one
number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
Journal
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['project_id'] = \
project_id
kwargs['location_id'] = \
location_id
kwargs['journal_id'] = \
journal_id
kwargs['insight_project_journal_update'] = \
insight_project_journal_update
return self.call_with_http_info(**kwargs)
self.insight_project_journal_update = _Endpoint(
settings={
'response_type': (Journal,),
'auth': [
'BearerAuth'
],
'endpoint_path': '/insight/{locationId}/project/{projectId}/journal/{journalId}',
'operation_id': 'insight_project_journal_update',
'http_method': 'PATCH',
'servers': None,
},
params_map={
'all': [
'project_id',
'location_id',
'journal_id',
'insight_project_journal_update',
],
'required': [
'project_id',
'location_id',
'journal_id',
'insight_project_journal_update',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'project_id':
(str,),
'location_id':
(str,),
'journal_id':
(str,),
'insight_project_journal_update':
(InsightProjectJournalUpdate,),
},
'attribute_map': {
'project_id': 'projectId',
'location_id': 'locationId',
'journal_id': 'journalId',
},
'location_map': {
'project_id': 'path',
'location_id': 'path',
'journal_id': 'path',
'insight_project_journal_update': 'body',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [
'application/json'
]
},
api_client=api_client,
callable=__insight_project_journal_update
)
| 37.362751
| 179
| 0.451342
| 9,773
| 118,963
| 5.196153
| 0.02333
| 0.04094
| 0.059962
| 0.039286
| 0.953409
| 0.932122
| 0.909043
| 0.907448
| 0.890315
| 0.889035
| 0
| 0.00297
| 0.465002
| 118,963
| 3,183
| 180
| 37.374489
| 0.794925
| 0.287619
| 0
| 0.732151
| 1
| 0
| 0.239957
| 0.055432
| 0
| 0
| 0
| 0
| 0
| 1
| 0.009756
| false
| 0
| 0.006652
| 0
| 0.026164
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
3175aae311124c706359c0501c4ffc334907f123
| 24,640
|
py
|
Python
|
sarnet_td3/common/gpu_multithread.py
|
JingdiC/SARNet
|
05d668c2d1c0d3f8009ecb98ab33cd5a496cd4ea
|
[
"MIT"
] | 16
|
2020-11-04T10:12:09.000Z
|
2022-03-26T13:25:16.000Z
|
sarnet_td3/common/gpu_multithread.py
|
JingdiC/SARNet
|
05d668c2d1c0d3f8009ecb98ab33cd5a496cd4ea
|
[
"MIT"
] | 5
|
2020-11-18T13:07:11.000Z
|
2022-03-06T08:40:01.000Z
|
sarnet_td3/common/gpu_multithread.py
|
JingdiC/SARNet
|
05d668c2d1c0d3f8009ecb98ab33cd5a496cd4ea
|
[
"MIT"
] | 5
|
2020-11-26T09:17:23.000Z
|
2022-03-06T08:40:53.000Z
|
import threading, queue, time, os, pickle
# from queue import Queue
import numpy as np
import tensorflow as tf
import sarnet_td3.common.tf_util as U
from tensorflow.python.keras.backend import set_session
lock = threading.Lock()
class MultiTrainTD3(threading.Thread):
def __init__(self, input_queue, output_queue, args=(), kwargs=None):
threading.Thread.__init__(self, args=(), kwargs=None)
self.input_queue = input_queue
self.output_queue = output_queue
self.daemon = True
self.trainers = args[0]
self.args = args[1]
self.buffer_op = args[2]
self.num_env = args[3]
self.sess = args[4]
self.num_agents = args[5]
self.num_adversaries = args[6]
self.ep_rewards = [[0.0] for _ in range(self.num_env)]
self.ep_end_rewards = [[0.0] for _ in range(self.num_env)]
self.ep_success = [[0.0] for _ in range(self.num_env)]
self.agent_rewards = [[[0.0] for _ in range(self.num_agents)] for _ in range(self.num_env)]
self.agent_info = [[[[]] for i in range(self.num_agents)] for _ in range(self.num_env)]
# self.agent_info = [[[[]]] for _ in range(self.num_env)]
self.final_ep_rewards = [] # Shape: (batch, #) sum of rewards for training curve
self.final_ep_end_rewards = []
self.final_ep_ag_rewards = [] # agent rewards for training curve
self.save_rate = self.args.max_episode_len * 100
self.save_n_ep = self.num_env * 10
self.print_step = -int(self.save_n_ep / self.num_env)
self.q_h_init = np.zeros(shape=(self.num_env, self.args.critic_units))
self.mem_init = np.zeros(shape=(self.num_env, self.args.value_units))
self.time_prev = time.time()
def run(self):
# print(threading.currentThread().getName(), self.receive_messages)
with self.sess.as_default():
# Freeze graph to avoid memory leaks
# self.sess.graph.finalize()
while True:
try:
action, p_index, data = self.input_queue.get()
if action is "None": # If you send `None`, the thread will exit.
return
elif action is "get_action":
out = self.get_action(data, p_index)
self.output_queue.put(out)
elif action is "get_qdebug":
out = self.get_qdebug(data, p_index)
self.output_queue.put(out)
elif action is "get_loss":
out = self.get_loss(data, p_index)
self.output_queue.put(out)
elif action is "write_tboard":
self.write_tboard(data)
elif action is "add_to_buffer":
self.buffer_op.collect_exp(data)
elif action is "save_rew_info":
self.save_rew_info(data)
elif action is "save_benchmark":
out = self.save_benchmark(data)
self.output_queue.put(out)
elif action is "reset_rew_info":
self.reset_rew_info()
elif action is "save_model_rew":
if not (self.args.benchmark or self.args.display):
self.save_model(data)
self.plot_rewards(data)
except queue.Empty:
continue
def get_action(self, data, p_index):
with lock:
agent = self.trainers[p_index]
obs_n_t, h_n_t, c_n_t, mem_n_t, q1_h_t, is_train = data
obs_n_t = np.stack(obs_n_t, axis=-2) # This returns [agent, batch, dim]
obs_n_t = np.expand_dims(obs_n_t, axis=1) # This adds [agent, time, batch, dim]
p_input_j = agent.prep_input(obs_n_t, h_n_t, c_n_t, mem_n_t, q1_h_t[p_index], is_train)
# print(np.shape(obs_n_t))
act_j_t, state_j_t1, mem_j_t1, attn_j_t = agent.action(p_input_j, is_train)
if self.args.encoder_model == "LSTM" or self.args.encoder_model != "DDPG":
c_j_t1, h_j_t1 = state_j_t1
else:
h_j_t1 = state_j_t1
c_j_t1 = state_j_t1
if agent.comm_type in {"DDPG", "COMMNET", "IC3NET"}:
mem_j_t1 = np.zeros(shape=(self.num_env, self.args.value_units))
return act_j_t, h_j_t1, c_j_t1, mem_j_t1, attn_j_t
def get_qdebug(self, data, p_index):
with lock:
# with sess.as_default():
agent = self.trainers[p_index]
obs_n_t, action_n_t, q1_h_n_t, q2_h_n_t = data
obs_n_t = np.stack(obs_n_t, axis=-2) # This returns [agent, batch, dim]
obs_n_t = np.expand_dims(obs_n_t, axis=1) # This adds [agent, time, batch, dim]
q1_j_input = agent.prep_q_input(obs_n_t, action_n_t, q1_h_n_t[p_index])
_, q1_h_j_t1 = agent.q1_debug['q_values'](*(q1_j_input))
if self.args.td3:
q2_input = agent.prep_q_input(obs_n_t, action_n_t, q2_h_n_t[p_index])
_, q2_h_j_t1 = agent.q2_debug['q_values'](*(q2_input))
else:
q2_h_j_t1 = []
return q1_h_j_t1, q2_h_j_t1
def get_loss(self, data, p_index):
with lock:
# with sess.as_default():
agent = self.trainers[p_index]
train_step = data
loss = agent.update(self.trainers, self.buffer_op, train_step)
return loss
def write_tboard(self, data):
with lock:
loss, train_step, writer, summary_ops, summary_vars, num_agents = data
# Tensorboard
episode_b_rewards = []
for j in range(self.num_env):
if self.args.env_type == "mpe":
episode_b_rewards.append(np.mean(self.ep_rewards[j][self.print_step:]))
else:
episode_b_rewards.append(np.mean(self.ep_success[j][self.print_step:]))
episode_b_rewards = np.mean(np.array(episode_b_rewards))
num_steps = train_step * self.num_env
# Add to tensorboard only when actor agent is updated
if loss[0][1] is not None:
fd = {}
for i, key in enumerate(summary_vars):
if i == 0:
fd[key] = episode_b_rewards
else:
agnt_idx = int((i - 1) / 5)
if agnt_idx == num_agents: agnt_idx -= 1
if loss[agnt_idx] is not None:
fd[key] = loss[agnt_idx][int((i - 1) % 5)]
summary_str = U.get_session().run(summary_ops, feed_dict=fd)
writer.add_summary(summary_str, num_steps)
writer.flush()
def save_rew_info(self, data):
with lock:
rew_n, info_n, ep_step = data
# rew_n (num_env, num_agents)
if self.args.env_type == "mpe":
for j in range(self.num_env):
for i, rew in enumerate(rew_n[j]):
if ep_step >= self.args.max_episode_len - 10: # Compute only last 10 episode step rewards
self.ep_end_rewards[j][-1] += rew
self.ep_rewards[j][-1] += rew
self.agent_rewards[j][i][-1] += rew
elif self.args.env_type == "ic3net":
for j in range(self.num_env):
self.ep_success[j][-1] += info_n[j]
if self.args.benchmark and self.args.env_type == "mpe":
for j in range(self.num_env):
for i, info in enumerate(info_n[j]):
self.agent_info[j][i][-1].append(info)
def reset_rew_info(self):
with lock:
for j in range(self.num_env):
self.ep_rewards[j].append(0)
self.ep_success[j].append(0)
self.ep_end_rewards[j].append(0)
for i in range(self.num_agents):
self.agent_rewards[j][i].append(0)
if self.args.benchmark:
for j in range(self.num_env):
for i in range(self.num_agents):
self.agent_info[j][i].append([[]])
def save_benchmark(self, data):
with lock:
exp_name, exp_itr = data
benchmark_dir = os.path.join('./exp_data', exp_name, exp_itr, self.args.benchmark_dir)
if not os.path.exists(benchmark_dir):
os.mkdir(benchmark_dir)
file_name = './exp_data/' + exp_name + '/' + exp_itr + '/' + self.args.benchmark_dir + '/' + exp_name + '.pkl'
print('Finished benchmarking, now saving...')
# pickle_info = [self.agent_info[j] for j in range(self.num_env)]
with open(file_name, 'wb') as fp:
# Dump files as [num_env, [# agents, [#ep, [#stps, [dim]]]]
pickle.dump(self.agent_info, fp)
return "bench_saved"
def save_model(self, data):
with lock:
# train_step = t_step * num_env
train_step, num_episodes, time_taken, exp_name, exp_itr, data_file, saver = data
# Policy File
if num_episodes % (self.save_n_ep) == 0:
save_dir = './exp_data/' + exp_name + '/' + exp_itr + '/' + self.args.save_dir + str(train_step)
U.save_state(save_dir, self.sess, saver=saver)
# episode_rewards, agent_rewards, final_ep_rewards, final_ep_ag_rewards = rewards
if self.args.env_type == "mpe":
# print statement depends on whether or not there are adversaries
if self.num_adversaries == 0:
episode_b_rewards = []
ep_end_b_rewards = []
ep_ag_b_rewards = []
for j in range(self.num_env):
episode_b_rewards.append(np.mean(self.ep_rewards[j][self.print_step:]))
ep_end_b_rewards.append(np.mean(self.ep_end_rewards[j][self.print_step:]))
episode_b_rewards = np.mean(np.array(episode_b_rewards))
ep_end_b_rewards = np.mean(ep_end_b_rewards) / 10.
for i in range(self.num_agents):
temp_ag_reward = []
for j in range(self.num_env):
temp_ag_reward.append(np.mean(self.agent_rewards[j][i][self.print_step:]))
ep_ag_b_rewards.append(np.mean(np.array(temp_ag_reward)))
print("steps: {}, episodes: {}, mean episode reward: {}, mean end rewards: {}, time: {}".format(
train_step, num_episodes, episode_b_rewards, ep_end_b_rewards, round(time.time() - self.time_prev, 3)))
with open(data_file, "a+") as f:
f.write("\n" + "steps: {}, episodes: {}, mean episode reward: {}, mean end rewards: {}, time: {}".format(
train_step, num_episodes, episode_b_rewards, ep_end_b_rewards, round(time.time() - self.time_prev, 3)) + "\n")
else:
episode_b_rewards = []
ep_end_b_rewards = []
ep_ag_b_rewards = []
for j in range(self.num_env):
episode_b_rewards.append(np.mean(self.ep_rewards[j][self.print_step:]))
ep_end_b_rewards.append(np.mean(self.ep_end_rewards[j][self.print_step:]))
episode_b_rewards = np.mean(np.array(episode_b_rewards))
ep_end_b_rewards = np.mean(ep_end_b_rewards)
for i in range(self.num_agents):
temp_ag_reward = []
for j in range(self.num_env):
temp_ag_reward.append(np.mean(self.agent_rewards[j][i][self.print_step:]))
ep_ag_b_rewards.append(np.mean(np.array(temp_ag_reward)))
print("steps: {}, episodes: {}, mean episode reward: {}, mean end rewards: {}, agent episode reward: {}, time: {}".format(
train_step, num_episodes, episode_b_rewards, ep_end_b_rewards, [rew for rew in ep_ag_b_rewards],
round(time.time() - self.time_prev, 3)) + "\n")
with open(data_file, "a+") as f:
f.write("\n" + "steps: {}, episodes: {}, mean episode reward: {}, mean end rewards: {}, agent episode reward: {}, time: {}".format(
train_step, num_episodes, episode_b_rewards, ep_end_b_rewards, [rew for rew in ep_ag_b_rewards],
round(time.time() - self.time_prev, 3)) + "\n")
# Keep track of final episode reward
self.final_ep_rewards.append(episode_b_rewards)
self.final_ep_end_rewards.append(ep_end_b_rewards)
for rew in ep_ag_b_rewards:
self.final_ep_ag_rewards.append(rew)
self.time_prev = time.time()
def plot_rewards(self, data):
with lock:
train_step, num_episodes, t_start, exp_name, exp_itr, data_file, saver = data
plot_dir = os.path.join('./exp_data', exp_name, exp_itr, self.args.plots_dir)
if not os.path.exists(plot_dir):
os.mkdir(plot_dir)
rew_file_name = './exp_data/' + exp_name + '/' + exp_itr + '/' + self.args.plots_dir + '/' + exp_name + '_rewards.pkl'
with open(rew_file_name, 'wb') as fp:
pickle.dump(self.final_ep_rewards, fp)
rew_ep_end_file_name = './exp_data/' + exp_name + '/' + exp_itr + '/' + self.args.plots_dir + '/' + exp_name + '_rewards_ep_end.pkl'
with open(rew_ep_end_file_name, 'wb') as fp:
pickle.dump(self.final_ep_end_rewards, fp)
agrew_file_name = './exp_data/' + exp_name + '/' + exp_itr + '/' + self.args.plots_dir + '/' + exp_name + '_agrewards.pkl'
with open(agrew_file_name, 'wb') as fp:
pickle.dump(self.final_ep_ag_rewards, fp)
"""
REINFORCE Threads
"""
class MultiTrainVPG(threading.Thread):
def __init__(self, input_queue, output_queue, args=(), kwargs=None):
threading.Thread.__init__(self, args=(), kwargs=None)
self.input_queue = input_queue
self.output_queue = output_queue
self.daemon = True
self.trainers = args[0]
self.args = args[1]
self.buffer_op = args[2]
self.num_env = args[3]
self.sess = args[4]
self.num_agents = args[5]
self.num_adversaries = args[6]
self.ep_rewards = [[0.0] for _ in range(self.num_env)]
self.ep_success = [[0.0] for _ in range(self.num_env)]
self.agent_rewards = [[[0.0] for _ in range(self.num_agents)] for _ in range(self.num_env)]
self.agent_info = [[[[]]] for _ in range(self.num_env)]
self.final_ep_rewards = [] # Shape: (batch, #) sum of rewards for training curve
self.final_ep_ag_rewards = [] # agent rewards for training curve
self.save_rate = self.args.max_episode_len * 100
if self.args.env_type == "mpe":
self.print_step = -int(self.save_rate / self.num_env)
else: # print for episode end only (success rate)
self.print_step = -int(self.save_rate / (self.num_env * self.args.max_episode_len))
self.q_h_init = np.zeros(shape=(self.num_env, self.args.critic_units))
self.mem_init = np.zeros(shape=(self.num_env, self.args.value_units))
self.time_prev = time.time()
def run(self):
# print(threading.currentThread().getName(), self.receive_messages)
with self.sess.as_default():
# Freeze graph to avoid memory leaks
# self.sess.graph.finalize()
while True:
try:
action, p_index, data = self.input_queue.get()
if action is "None": # If you send `None`, the thread will exit.
return
elif action is "get_action":
out = self.get_action(data, p_index)
self.output_queue.put(out)
elif action is "get_loss":
out = self.get_loss(data, p_index)
self.output_queue.put(out)
elif action is "write_tboard":
self.write_tboard(data)
elif action is "add_to_buffer":
self.buffer_op.collect_exp(data)
elif action is "add_to_buffer_reinforce":
self.buffer_op.collect_exp(data)
elif action is "save_rew_info":
self.save_rew_info(data)
elif action is "save_benchmark":
out = self.save_benchmark(data)
self.output_queue.put(out)
elif action is "reset_rew_info":
self.reset_rew_info()
elif action is "save_model_rew":
if not (self.args.benchmark or self.args.display):
self.save_model(data)
self.plot_rewards(data)
except queue.Empty:
continue
def get_action(self, data, p_index):
with lock:
agent = self.trainers[p_index]
obs_n_t, h_n_t, c_n_t, mem_n_t, is_train = data
obs_n_t = np.stack(obs_n_t, axis=-2)
obs_n_t = np.expand_dims(obs_n_t, axis=1) # This adds [agent, time, batch, dim]
p_input_j = agent.prep_input(obs_n_t, h_n_t, c_n_t, mem_n_t, is_train)
act_j_t, act_soft_j_t, state_j_t1, mem_j_t1, attn_j_t, value_j_t = agent.action(p_input_j, is_train)
if self.args.encoder_model == "LSTM":
c_j_t1, h_j_t1 = state_j_t1
else:
h_j_t1 = state_j_t1
c_j_t1 = state_j_t1
if agent.comm_type in {"DDPG", "COMMNET", "IC3NET"}:
mem_j_t1 = np.zeros(shape=(self.num_env, self.args.value_units))
return act_j_t, act_soft_j_t, h_j_t1, c_j_t1, mem_j_t1, attn_j_t, value_j_t
def get_loss(self, data, p_index):
with lock:
# with sess.as_default():
train_step, buffer_data = data
agent = self.trainers[p_index]
loss = agent.update(self.trainers, buffer_data, train_step)
return loss
def write_tboard(self, data):
with lock:
loss, train_step, writer, summary_ops, summary_vars, num_agents = data
# Tensorboard
episode_b_rewards = []
for j in range(self.num_env):
if self.args.env_type == "mpe":
episode_b_rewards.append(np.mean(self.ep_rewards[j][self.print_step:]))
else:
episode_b_rewards.append(np.mean(self.ep_success[j][self.print_step:]))
episode_b_rewards = np.mean(np.array(episode_b_rewards))
num_steps = train_step * self.num_env
# Add to tensorboard only when actor agent is updated
if loss[0][1] is not None:
fd = {}
for i, key in enumerate(summary_vars):
if i == 0:
fd[key] = episode_b_rewards
else:
agnt_idx = int((i - 1) / 5)
if agnt_idx == num_agents: agnt_idx -= 1
if loss[agnt_idx] is not None:
fd[key] = loss[agnt_idx][int((i - 1) % 5)]
summary_str = U.get_session().run(summary_ops, feed_dict=fd)
writer.add_summary(summary_str, num_steps)
writer.flush()
def save_rew_info(self, data):
with lock:
rew_n, info_n, terminal = data
if self.args.env_type == "mpe":
for j in range(self.num_env):
for i, rew in enumerate(rew_n[j]):
self.ep_rewards[j][-1] += rew
self.agent_rewards[j][i][-1] += rew
elif self.args.env_type == "ic3net":
for j in range(self.num_env):
self.ep_success[j][-1] += info_n[j]
if self.args.benchmark and self.args.env_type == "mpe":
for j in range(self.num_env):
for i, info in enumerate(info_n[j]):
self.agent_info[-1][i].append(info_n[0]['n'])
def reset_rew_info(self):
with lock:
for j in range(self.num_env):
self.ep_rewards[j].append(0)
self.ep_success[j].append(0)
for i in range(self.num_agents):
self.agent_rewards[j][i].append(0)
if self.args.benchmark:
for j in range(self.num_env):
self.agent_info[j].append([[]])
def save_benchmark(self, data):
with lock:
exp_name, exp_itr = data
benchmark_dir = os.path.join('./exp_data', exp_name, exp_itr, self.args.benchmark_dir)
if not os.path.exists(benchmark_dir):
os.mkdir(benchmark_dir)
file_name = './exp_data/' + exp_name + '/' + exp_itr + '/' + self.args.benchmark_dir + '/' + exp_name + '.pkl'
print('Finished benchmarking, now saving...')
with open(file_name, 'wb') as fp:
pickle.dump(self.ep_success, fp)
return "bench_saved"
def save_model(self, data):
with lock:
# train_step = t_step * num_env
train_step, num_episodes, time_taken, exp_name, exp_itr, data_file, saver = data
# Policy File
save_dir = './exp_data/' + exp_name + '/' + exp_itr + '/' + self.args.save_dir + str(train_step)
U.save_state(save_dir, self.sess, saver=saver)
episode_b_success = []
for j in range(self.num_env):
episode_b_success.append(np.mean(self.ep_success[j][self.print_step:]))
episode_b_success = np.mean(np.array(episode_b_success)) / self.args.max_episode_len
print("steps: {}, episodes: {}, mean episode success: {}, time: {}".format(
train_step, num_episodes, episode_b_success, round(time.time() - self.time_prev, 3)) + "\n")
with open(data_file, "a+") as f:
f.write("\n" + "steps: {}, episodes: {}, mean episode success: {}, time: {}".format(
train_step, num_episodes, episode_b_success, round(time.time() - self.time_prev, 3)) + "\n")
self.final_ep_rewards.append(episode_b_success)
def plot_rewards(self, data):
with lock:
train_step, num_episodes, t_start, exp_name, exp_itr, data_file, saver = data
plot_dir = os.path.join('./exp_data', exp_name, exp_itr, self.args.plots_dir)
if not os.path.exists(plot_dir):
os.mkdir(plot_dir)
rew_file_name = './exp_data/' + exp_name + '/' + exp_itr + '/' + self.args.plots_dir + '/' + exp_name + '_rewards.pkl'
with open(rew_file_name, 'wb') as fp:
pickle.dump(self.final_ep_rewards, fp)
def get_gputhreads(trainers, args, buffer_op, num_env, num_agents, num_adv):
threads = []
sess = tf.compat.v1.get_default_session()
for t in range(args.num_gpu_threads):
input_q = queue.Queue()
output_q = queue.Queue()
if args.policy_grad == "maddpg":
threads.append(MultiTrainTD3(input_q, output_q, args=(trainers, args, buffer_op, num_env, sess, num_agents, num_adv)))
elif args.policy_grad == "reinforce":
threads.append(
MultiTrainVPG(input_q, output_q, args=(trainers, args, buffer_op, num_env, sess, num_agents, num_adv)))
threads[t].start()
time.sleep(1)
return threads
def close_gputhreads(threads):
for t in threads:
t.input_queue.put(("None", None, None))
for t in threads:
t.join()
print('GPU trainers cancelled')
return
| 48.888889
| 159
| 0.544562
| 3,293
| 24,640
| 3.781051
| 0.077133
| 0.030921
| 0.033732
| 0.040479
| 0.890531
| 0.863384
| 0.850534
| 0.833829
| 0.830536
| 0.825556
| 0
| 0.009107
| 0.344927
| 24,640
| 503
| 160
| 48.986084
| 0.762282
| 0.060065
| 0
| 0.768496
| 0
| 0.004773
| 0.052404
| 0.000996
| 0
| 0
| 0
| 0
| 0
| 1
| 0.054893
| false
| 0
| 0.011933
| 0
| 0.097852
| 0.047733
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
31bdffc8c81e843699509af2486f317c1a1c36b7
| 35,087
|
py
|
Python
|
gips/gistmodel/post_processing.py
|
accsc/gips
|
6b20b2b0fa76ee24b04237b1edd5c8a26738d460
|
[
"MIT"
] | 1
|
2021-04-24T10:29:39.000Z
|
2021-04-24T10:29:39.000Z
|
gips/gistmodel/post_processing.py
|
accsc/gips
|
6b20b2b0fa76ee24b04237b1edd5c8a26738d460
|
[
"MIT"
] | null | null | null |
gips/gistmodel/post_processing.py
|
accsc/gips
|
6b20b2b0fa76ee24b04237b1edd5c8a26738d460
|
[
"MIT"
] | 2
|
2021-02-16T14:18:59.000Z
|
2021-06-04T05:09:22.000Z
|
import numpy as np
import copy
from gips import FLOAT
from gips import DOUBLE
class post_processing(object):
def __init__(self, fitter, x, pairs=False, prefix=None):
self.fitter = fitter
self.x = x
self.pairs = pairs
self.case = 0
score_dict = { 4 : self.parms4,
5 : self.parms5,
6 : self.parms6
}
mode_dict = { 0 : self.mode0,
1 : self.mode1,
3 : self.mode3,
4 : self.mode4,
5 : self.mode5,
6 : self.mode6,
7 : self.mode7
}
self.score = score_dict[self.fitter.parms]
self.process = mode_dict[self.fitter.mode]
self.prefix = prefix
if type(self.prefix)==type(None) \
or self.prefix=="":
self.prefix = ""
else:
self.prefix = "%s" %self.prefix
self.set_x(self.x)
self.set_case(0)
self.process_rec = False
self.process_cplx = False
self.process_lig = False
def set_x(self, x):
self.x = copy.copy(x)
### Apply the solution to the scoring function
self.fitter.gist_functional(self.x)
self.fitter._f_process(self.x)
def set_case(self, case):
self.case = case
self.name = self.fitter.name[case]
### |~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~|
### |OVERVIEW OF THE DATA STRUCTURE IN THE FITTER OBJECT|
### |~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~|
###
### Experimental data stored with gdat_fit_lib
### ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
### self.dg = np.zeros(self.N_case, dtype=DOUBLE)
### self.dh = np.zeros(self.N_case, dtype=DOUBLE)
### self.ds = np.zeros(self.N_case, dtype=DOUBLE)
###
###
### GIST data generated with gdat_fit_lib (receptor)
### ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
### self.E = np.zeros((self.N_rec, self.maxdim[0], self.maxdim[1], self.maxdim[2]), dtype=DOUBLE)
### self.S = np.zeros((self.N_rec, self.maxdim[0], self.maxdim[1], self.maxdim[2]), dtype=DOUBLE)
### self.g = np.zeros((self.N_rec, self.maxdim[0], self.maxdim[1], self.maxdim[2]), dtype=DOUBLE)
### self.w = np.zeros(self.N_pos, dtype=DOUBLE)
### self.vol = np.zeros((self.N_pos, self.maxdim[0], self.maxdim[1], self.maxdim[2]), dtype=DOUBLE)
### Which pose belongs to which receptor/gistdata
### self.ind_rec = np.zeros(self.N_pos, dtype=np.int32)
### Which pose belongs to which case
### self.ind_case = np.zeros(self.N_pos, dtype=np.int32)
###
###
### GIST data generated with gdat_fit_lib (complex)
### ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
### self.E_cplx = np.zeros((self.N_cplx, self.maxdim[0], self.maxdim[1], self.maxdim[2]), dtype=DOUBLE)
### self.S_cplx = np.zeros((self.N_cplx, self.maxdim[0], self.maxdim[1], self.maxdim[2]), dtype=DOUBLE)
### self.g_cplx = np.zeros((self.N_cplx, self.maxdim[0], self.maxdim[1], self.maxdim[2]), dtype=DOUBLE)
### self.w_cplx = np.zeros(self.N_cplx, dtype=DOUBLE)
### self.vol_cplx = np.zeros((self.N_cplx, self.maxdim[0], self.maxdim[1], self.maxdim[2]), dtype=DOUBLE)
### self.ind_rec_cplx = np.arange(self.N_cplx, dtype=np.int32)
### self.ind_case_cplx = np.zeros(self.N_cplx, dtype=np.int32)
###
###
### GIST data generated with gdat_fit_lib (ligand)
### ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
### self.E_lig = np.zeros((self.N_lig, self.maxdim[0], self.maxdim[1], self.maxdim[2]), dtype=DOUBLE)
### self.S_lig = np.zeros((self.N_lig, self.maxdim[0], self.maxdim[1], self.maxdim[2]), dtype=DOUBLE)
### self.g_lig = np.zeros((self.N_lig, self.maxdim[0], self.maxdim[1], self.maxdim[2]), dtype=DOUBLE)
### self.w_lig = np.zeros(self.N_lig, dtype=DOUBLE)
### self.vol_lig = np.zeros((self.N_lig, self.maxdim[0], self.maxdim[1], self.maxdim[2]), dtype=DOUBLE)
### self.ind_rec_lig = np.arange(self.N_lig, dtype=np.int32)
### self.ind_case_lig = np.zeros(self.N_lig, dtype=np.int32)
###
def mode0(self, callback=None):
### The receptor
### ~~~~~~~~~~~~
if self.process_rec:
valid_poses = np.where(self.fitter.ind_case==self.case)[0]
valid_recep = self.fitter.ind_rec[valid_poses]
i=0
for pose, recep in zip(valid_poses, valid_recep):
E_grid_val, S_grid_val, gv_grid_val = self.score(self.fitter.E[recep],
self.fitter.S[recep],
self.fitter.g[recep],
self.fitter.vol[pose],
self.x)
if callback != None:
kwargs = { "pose" : pose,
"radius" : self.fitter.radiusadd[0],
"C" : self.x[-1]
}
callback(E_grid_val,
S_grid_val,
gv_grid_val,
self.fitter.gdat[recep],
self.fitter.pdat[pose],
prefix="%s.%d.%s" %(self.name, i, "rec"),
**kwargs)
i += 1
def mode1(self, callback=None):
### The receptor
### ~~~~~~~~~~~~
if self.process_rec:
valid_poses = np.where(self.fitter.ind_case==self.case)[0]
valid_recep = self.fitter.ind_rec[valid_poses]
i=0
for pose, recep in zip(valid_poses, valid_recep):
E_grid_val, S_grid_val, gv_grid_val = self.score(self.fitter.E[recep],
self.fitter.S[recep],
self.fitter.g[recep],
self.fitter.vol[pose],
self.x)
if callback != None:
kwargs = { "pose" : pose,
"radius" : self.fitter.radiusadd[0],
"C" : self.x[-2:]
}
callback(E_grid_val,
S_grid_val,
gv_grid_val,
self.fitter.gdat[recep],
self.fitter.pdat[pose],
prefix="%s.%d.%s" %(self.name, i, "rec"),
**kwargs)
i += 1
def mode2(self, callback=None):
pass
def mode3(self, callback=None):
if not self.pairs:
### The receptor
### ~~~~~~~~~~~~
if self.process_rec:
valid_poses = np.where(self.fitter.ind_case==self.case)[0]
valid_recep = self.fitter.ind_rec[valid_poses]
i=0
for pose, recep in zip(valid_poses, valid_recep):
E_grid_val, S_grid_val, gv_grid_val = self.score(self.fitter.E[recep],
self.fitter.S[recep],
self.fitter.g[recep],
self.fitter.vol[pose],
self.x)
if callback != None:
kwargs = { "pose" : pose,
"radius" : self.fitter.radiusadd[0],
"C" : self.x[-1]
}
callback(E_grid_val,
S_grid_val,
gv_grid_val,
self.fitter.gdat[recep],
self.fitter.pdat[pose],
prefix="%s.%d.%s" %(self.name, i, "rec"),
**kwargs)
i += 1
### The complex
### ~~~~~~~~~~~
if self.process_cplx:
valid_poses_cplx = np.where(self.fitter.ind_case_cplx==self.case)[0]
valid_recep_cplx = self.fitter.ind_rec_cplx[valid_poses_cplx]
i=0
for pose, recep in zip(valid_poses_cplx, valid_recep_cplx):
E_grid_val, S_grid_val, gv_grid_val = self.score(self.fitter.E_cplx[recep],
self.fitter.S_cplx[recep],
self.fitter.g_cplx[recep],
self.fitter.vol_cplx[pose],
self.x)
if callback != None:
kwargs = { "pose" : pose,
"radius" : self.fitter.radiusadd[1],
"C" : self.x[-1]
}
callback(E_grid_val,
S_grid_val,
gv_grid_val,
self.fitter.gdat_cplx[recep],
self.fitter.pdat_cplx[pose],
prefix="%s.%d.%s" %(self.name, i, "cplx"),
**kwargs)
i += 1
### The ligand
### ~~~~~~~~~~
if self.process_lig:
valid_poses_lig = np.where(self.fitter.ind_case_lig==self.case)[0]
valid_recep_lig = self.fitter.ind_rec_lig[valid_poses_lig]
i=0
for pose, recep in zip(valid_poses_lig, valid_recep_lig):
E_grid_val, S_grid_val, gv_grid_val = self.score(self.fitter.E_lig[recep],
self.fitter.S_lig[recep],
self.fitter.g_lig[recep],
self.fitter.vol_lig[pose],
self.x)
if callback != None:
kwargs = { "pose" : pose,
"radius" : self.fitter.radiusadd[1],
"C" : self.x[-1]
}
callback(E_grid_val,
S_grid_val,
gv_grid_val,
self.fitter.gdat_lig[recep],
self.fitter.pdat_lig[pose],
prefix="%s.%d.%s" %(self.name, i, "lig"),
**kwargs)
i += 1
def mode4(self, callback=None):
if not self.pairs:
### The receptor
### ~~~~~~~~~~~~
if self.process_rec:
valid_poses = np.where(self.fitter.ind_case==self.case)[0]
valid_recep = self.fitter.ind_rec[valid_poses]
i=0
for pose, recep in zip(valid_poses, valid_recep):
E_grid_val, S_grid_val, gv_grid_val = self.score(self.fitter.E[recep],
self.fitter.S[recep],
self.fitter.g[recep],
self.fitter.vol[pose],
self.x)
if callback != None:
kwargs = { "pose" : pose,
"radius" : self.fitter.radiusadd[0],
"C" : self.x[-2:]
}
callback(E_grid_val,
S_grid_val,
gv_grid_val,
self.fitter.gdat[recep],
self.fitter.pdat[pose],
prefix="%s.%d.%s" %(self.name, i, "rec"),
**kwargs)
i += 1
### The complex
### ~~~~~~~~~~~
if self.process_cplx:
valid_poses_cplx = np.where(self.fitter.ind_case_cplx==self.case)[0]
valid_recep_cplx = self.fitter.ind_rec_cplx[valid_poses_cplx]
i=0
for pose, recep in zip(valid_poses_cplx, valid_recep_cplx):
E_grid_val, S_grid_val, gv_grid_val = self.score(self.fitter.E_cplx[recep],
self.fitter.S_cplx[recep],
self.fitter.g_cplx[recep],
self.fitter.vol_cplx[pose],
self.x)
if callback != None:
kwargs = { "pose" : pose,
"radius" : self.fitter.radiusadd[1],
"C" : self.x[-2:]
}
callback(E_grid_val,
S_grid_val,
gv_grid_val,
self.fitter.gdat_cplx[recep],
self.fitter.pdat_cplx[pose],
prefix="%s.%d.%s" %(self.name, i, "cplx"),
**kwargs)
i += 1
### The ligand
### ~~~~~~~~~~
if self.process_lig:
valid_poses_lig = np.where(self.fitter.ind_case_lig==self.case)[0]
valid_recep_lig = self.fitter.ind_rec_lig[valid_poses_lig]
i=0
for pose, recep in zip(valid_poses_lig, valid_recep_lig):
E_grid_val, S_grid_val, gv_grid_val = self.score(self.fitter.E_lig[recep],
self.fitter.S_lig[recep],
self.fitter.g_lig[recep],
self.fitter.vol_lig[pose],
self.x)
if callback != None:
kwargs = { "pose" : pose,
"radius" : self.fitter.radiusadd[1],
"C" : self.x[-2:]
}
callback(E_grid_val,
S_grid_val,
gv_grid_val,
self.fitter.gdat_lig[recep],
self.fitter.pdat_lig[pose],
prefix="%s.%d.%s" %(self.name, i, "lig"),
**kwargs)
i += 1
def mode5(self, callback=None):
if not self.pairs:
### The receptor
### ~~~~~~~~~~~~
if self.process_rec:
valid_poses = np.where(self.fitter.ind_case==self.case)[0]
valid_recep = self.fitter.ind_rec[valid_poses]
_xr = np.zeros(self.fitter.parms, dtype=DOUBLE)
_xr[:-2] = self.x[:-4]
_xr[-2] = self.x[-4]
i=0
for pose, recep in zip(valid_poses, valid_recep):
E_grid_val, S_grid_val, gv_grid_val = self.score(self.fitter.E[recep],
self.fitter.S[recep],
self.fitter.g[recep],
self.fitter.vol[pose],
_xr)
if callback != None:
kwargs = { "pose" : pose,
"radius" : self.fitter.radiusadd[0],
"C" : self.x[-1]
}
callback(E_grid_val,
S_grid_val,
gv_grid_val,
self.fitter.gdat[recep],
self.fitter.pdat[pose],
prefix="%s.%d.%s" %(self.name, i, "rec"),
**kwargs)
i += 1
### The complex
### ~~~~~~~~~~~
if self.process_cplx:
valid_poses_cplx = np.where(self.fitter.ind_case_cplx==self.case)[0]
valid_recep_cplx = self.fitter.ind_rec_cplx[valid_poses_cplx]
_xc = np.zeros(self.fitter.parms, dtype=DOUBLE)
_xc[:-2] = self.x[:-4]
_xc[-2] = self.x[-3]
i=0
for pose, recep in zip(valid_poses_cplx, valid_recep_cplx):
E_grid_val, S_grid_val, gv_grid_val = self.score(self.fitter.E_cplx[recep],
self.fitter.S_cplx[recep],
self.fitter.g_cplx[recep],
self.fitter.vol_cplx[pose],
_xc)
if callback != None:
kwargs = { "pose" : pose,
"radius" : self.fitter.radiusadd[1],
"C" : self.x[-1]
}
callback(E_grid_val,
S_grid_val,
gv_grid_val,
self.fitter.gdat_cplx[recep],
self.fitter.pdat_cplx[pose],
prefix="%s.%d.%s" %(self.name, i, "cplx"),
**kwargs)
i += 1
### The ligand
### ~~~~~~~~~~
if self.process_lig:
_xl = np.zeros(self.fitter.parms, dtype=DOUBLE)
_xl[:-2] = self.x[:-4]
_xl[-2] = self.x[-2]
valid_poses_lig = np.where(self.fitter.ind_case_lig==self.case)[0]
valid_recep_lig = self.fitter.ind_rec_lig[valid_poses_lig]
i=0
for pose, recep in zip(valid_poses_lig, valid_recep_lig):
E_grid_val, S_grid_val, gv_grid_val = self.score(self.fitter.E_lig[recep],
self.fitter.S_lig[recep],
self.fitter.g_lig[recep],
self.fitter.vol_lig[pose],
_xl)
if callback != None:
kwargs = { "pose" : pose,
"radius" : self.fitter.radiusadd[1],
"C" : self.x[-1]
}
callback(E_grid_val,
S_grid_val,
gv_grid_val,
self.fitter.gdat_lig[recep],
self.fitter.pdat_lig[pose],
prefix="%s.%d.%s" %(self.name, i, "lig"),
**kwargs)
i += 1
def mode6(self, callback=None):
if not self.pairs:
### The receptor
### ~~~~~~~~~~~~
if self.process_rec:
valid_poses = np.where(self.fitter.ind_case==self.case)[0]
valid_recep = self.fitter.ind_rec[valid_poses]
_xr = np.zeros(self.fitter.parms+1, dtype=DOUBLE)
_xr[:-3] = self.x[:-5]
_xr[-3] = self.x[-5]
i=0
for pose, recep in zip(valid_poses, valid_recep):
E_grid_val, S_grid_val, gv_grid_val = self.score(self.fitter.E[recep],
self.fitter.S[recep],
self.fitter.g[recep],
self.fitter.vol[pose],
_xr)
if callback != None:
kwargs = { "pose" : pose,
"radius" : self.fitter.radiusadd[0],
"C" : _xr[-2:]
}
callback(E_grid_val,
S_grid_val,
gv_grid_val,
self.fitter.gdat[recep],
self.fitter.pdat[pose],
prefix="%s.%d.%s" %(self.name, i, "rec"),
**kwargs)
i += 1
### The complex
### ~~~~~~~~~~~
if self.process_cplx:
valid_poses_cplx = np.where(self.fitter.ind_case_cplx==self.case)[0]
valid_recep_cplx = self.fitter.ind_rec_cplx[valid_poses_cplx]
_xc = np.zeros(self.fitter.parms+1, dtype=DOUBLE)
_xc[:-3] = self.x[:-5]
_xc[-3] = self.x[-4]
i=0
for pose, recep in zip(valid_poses_cplx, valid_recep_cplx):
E_grid_val, S_grid_val, gv_grid_val = self.score(self.fitter.E_cplx[recep],
self.fitter.S_cplx[recep],
self.fitter.g_cplx[recep],
self.fitter.vol_cplx[pose],
_xc)
if callback != None:
kwargs = { "pose" : pose,
"radius" : self.fitter.radiusadd[1],
"C" : _xc[-2:]
}
callback(E_grid_val,
S_grid_val,
gv_grid_val,
self.fitter.gdat_cplx[recep],
self.fitter.pdat_cplx[pose],
prefix="%s.%d.%s" %(self.name, i, "cplx"),
**kwargs)
i += 1
### The ligand
### ~~~~~~~~~~
if self.process_lig:
valid_poses_lig = np.where(self.fitter.ind_case_lig==self.case)[0]
valid_recep_lig = self.fitter.ind_rec_lig[valid_poses_lig]
_xl = np.zeros(self.fitter.parms+1, dtype=DOUBLE)
_xl[:-3] = self.x[:-5]
_xl[-3] = self.x[-3]
i=0
for pose, recep in zip(valid_poses_lig, valid_recep_lig):
E_grid_val, S_grid_val, gv_grid_val = self.score(self.fitter.E_lig[recep],
self.fitter.S_lig[recep],
self.fitter.g_lig[recep],
self.fitter.vol_lig[pose],
_xl)
if callback != None:
kwargs = { "pose" : pose,
"radius" : self.fitter.radiusadd[1],
"C" : _xl[-2:]
}
callback(E_grid_val,
S_grid_val,
gv_grid_val,
self.fitter.gdat_lig[recep],
self.fitter.pdat_lig[pose],
prefix="%s.%d.%s" %(self.name, i, "lig"),
**kwargs)
i += 1
def mode7(self, callback=None):
if self.process_rec and not self.pairs:
_xr = np.zeros(self.fitter.parms+1, dtype=DOUBLE)
if self.process_cplx:
_xc = np.zeros(self.fitter.parms+1, dtype=DOUBLE)
if self.process_lig:
_xl = np.zeros(self.fitter.parms+1, dtype=DOUBLE)
###
### For parms=4:
###
### with pairs:
### -----------
### x[0] = e_co (Cplx)
### x[1] = e_co (Lig)
### x[2] = s_co (Cplx)
### x[3] = s_co (Lig)
### x[4] = g_co (Cplx)
### x[5] = g_co (Lig)
### x[6] = C_E
### x[7] = C_S
###
### without pairs:
### --------------
### x[0] = e_co (Rec)
### x[1] = e_co (Cplx)
### x[2] = e_co (Lig)
### x[3] = s_co (Rec)
### x[4] = s_co (Cplx)
### x[5] = s_co (Lig)
### x[6] = g_co (Rec)
### x[7] = g_co (Cplx)
### x[8] = g_co (Lig)
### x[9] = C_E
### x[10] = C_S
if self.fitter.parms==4:
if self.pairs:
if self.process_cplx:
_xc[:-2] = self.x[[0,2,4]]
if self.process_lig:
_xl[:-2] = self.x[[1,3,5]]
else:
if self.process_rec:
_xr[:-2] = self.x[[0,3,6]]
if self.process_cplx:
_xc[:-2] = self.x[[1,4,7]]
if self.process_lig:
_xl[:-2] = self.x[[2,5,8]]
###
### For parms=5:
###
### with pairs:
### -----------
### x[0] = A
### x[1] = e_co (Cplx)
### x[2] = e_co (Lig)
### x[3] = s_co (Cplx)
### x[4] = s_co (Lig)
### x[5] = g_co (Cplx)
### x[6] = g_co (Lig)
### x[7] = C_E
### x[8] = C_S
###
### without pairs:
### --------------
### x[0] = A
### x[1] = e_co (Rec)
### x[2] = e_co (Cplx)
### x[3] = e_co (Lig)
### x[4] = s_co (Rec)
### x[5] = s_co (Cplx)
### x[6] = s_co (Lig)
### x[7] = g_co (Rec)
### x[8] = g_co (Cplx)
### x[9] = g_co (Lig)
### x[10] = C_E
### x[11] = C_S
elif self.fitter.parms==5:
if self.pairs:
if self.process_cplx:
_xc[:-2] = self.x[[0,1,3,5]]
if self.process_lig:
_xl[:-2] = self.x[[0,2,4,6]]
else:
if self.process_rec:
_xr[:-2] = self.x[[0,1,4,7]]
if self.process_cplx:
_xc[:-2] = self.x[[0,2,5,8]]
if self.process_lig:
_xl[:-2] = self.x[[0,3,6,9]]
###
### For parms=6:
###
### with pairs:
### -----------
### x[0] = E_aff
### x[1] = e_co (Cplx)
### x[2] = e_co (Lig)
### x[3] = S_aff
### x[4] = s_co (Cplx)
### x[5] = s_co (Lig)
### x[6] = g_co (Cplx)
### x[7] = g_co (Lig)
### x[8] = C_E
### x[9] = C_S
###
### without pairs:
### --------------
### x[0] = E_aff
### x[1] = e_co (Rec)
### x[2] = e_co (Cplx)
### x[3] = e_co (Lig)
### x[4] = S_aff
### x[5] = s_co (Rec)
### x[6] = s_co (Cplx)
### x[7] = s_co (Lig)
### x[8] = g_co (Rec)
### x[9] = g_co (Cplx)
### x[10] = g_co (Lig)
### x[11] = C_E
### x[12] = C_S
elif self.fitter.parms==6:
if self.pairs:
if self.process_cplx:
_xc[:-2] = self.x[[0,1,3,4,6]]
if self.process_lig:
_xl[:-2] = self.x[[0,2,3,5,7]]
else:
if self.process_rec:
_xr[:-2] = self.x[[0,1,4,5,8]]
if self.process_cplx:
_xc[:-2] = self.x[[0,2,4,6,9]]
if self.process_lig:
_xl[:-2] = self.x[[0,3,4,7,10]]
if not self.pairs:
### The receptor
### ~~~~~~~~~~~~
if self.process_rec:
valid_poses = np.where(self.fitter.ind_case==self.case)[0]
valid_recep = self.fitter.ind_rec[valid_poses]
i=0
for pose, recep in zip(valid_poses, valid_recep):
E_grid_val, S_grid_val, gv_grid_val = self.score(self.fitter.E[recep],
self.fitter.S[recep],
self.fitter.g[recep],
self.fitter.vol[pose],
_xr)
if callback != None:
kwargs = { "pose" : pose,
"radius" : self.fitter.radiusadd[0],
"C" : _xr[-2:]
}
callback(E_grid_val,
S_grid_val,
gv_grid_val,
self.fitter.gdat[recep],
self.fitter.pdat[pose],
prefix="%s.%d.%s" %(self.name, i, "rec"),
**kwargs)
i += 1
### The complex
### ~~~~~~~~~~~
if self.process_cplx:
valid_poses_cplx = np.where(self.fitter.ind_case_cplx==self.case)[0]
valid_recep_cplx = self.fitter.ind_rec_cplx[valid_poses_cplx]
i=0
for pose, recep in zip(valid_poses_cplx, valid_recep_cplx):
E_grid_val, S_grid_val, gv_grid_val = self.score(self.fitter.E_cplx[recep],
self.fitter.S_cplx[recep],
self.fitter.g_cplx[recep],
self.fitter.vol_cplx[pose],
_xc)
if callback != None:
kwargs = { "pose" : pose,
"radius" : self.fitter.radiusadd[1],
"C" : _xc[-2:]
}
callback(E_grid_val,
S_grid_val,
gv_grid_val,
self.fitter.gdat_cplx[recep],
self.fitter.pdat_cplx[pose],
prefix="%s.%d.%s" %(self.name, i, "cplx"),
**kwargs)
i += 1
### The ligand
### ~~~~~~~~~~
if self.process_lig:
valid_poses_lig = np.where(self.fitter.ind_case_lig==self.case)[0]
valid_recep_lig = self.fitter.ind_rec_lig[valid_poses_lig]
i=0
for pose, recep in zip(valid_poses_lig, valid_recep_lig):
E_grid_val, S_grid_val, gv_grid_val = self.score(self.fitter.E_lig[recep],
self.fitter.S_lig[recep],
self.fitter.g_lig[recep],
self.fitter.vol_lig[pose],
_xl)
if callback != None:
kwargs = { "pose" : pose,
"radius" : self.fitter.radiusadd[1],
"C" : _xl[-2:]
}
callback(E_grid_val,
S_grid_val,
gv_grid_val,
self.fitter.gdat_lig[recep],
self.fitter.pdat_lig[pose],
prefix="%s.%d.%s" %(self.name, i, "lig"),
**kwargs)
i += 1
def parms4(self, E_grid, S_grid, g_grid, vol_grid, x):
E = np.zeros_like(E_grid)
S = np.zeros_like(S_grid)
g = np.zeros_like(g_grid)
valids_E = np.where(E_grid>x[0])
valids_S = np.where(S_grid>x[1])
valids_g = np.where(g_grid>x[2])
E[valids_E] = np.copy(E_grid[valids_E])
S[valids_S] = np.copy(S_grid[valids_S])
g[valids_g] = np.copy(g_grid[valids_g])
E_grid_val = np.zeros_like(E)
S_grid_val = np.zeros_like(S)
gv_grid_val = np.zeros_like(g)
### This is probably wrong:
#E_grid_val[valids_g] = E[valids_g] * vol_grid[valids_g] / g[valids_g] * 0.0332
#S_grid_val[valids_g] = S[valids_g] * vol_grid[valids_g] / g[valids_g] * 0.0332 * -1.
### This is how it should be:
### Note: 0.125 is the volume of one voxel
E_grid_val[valids_g] = E[valids_g] * vol_grid[valids_g] * g[valids_g] * 0.0332 * 0.125
S_grid_val[valids_g] = S[valids_g] * vol_grid[valids_g] * g[valids_g] * 0.0332 * 0.125
gv_grid_val[valids_g] = vol_grid[valids_g]*g[valids_g]
return E_grid_val, S_grid_val, gv_grid_val
def parms5(self, E_grid, S_grid, g_grid, vol_grid, x):
E = np.zeros_like(E_grid)
S = np.zeros_like(S_grid)
g = np.zeros_like(g_grid)
E[np.where(E_grid>x[1])] = 1.
S[np.where(S_grid>x[2])] = 1.
g[np.where(g_grid>x[3])] = 1.
E_grid_val = E*g*vol_grid*x[0]
S_grid_val = S*g*vol_grid*x[0]
gv_grid_val = vol_grid*g
return E_grid_val, S_grid_val, gv_grid_val
def parms6(self, E_grid, S_grid, g_grid, vol_grid, x):
E = np.zeros_like(E_grid)
S = np.zeros_like(S_grid)
g = np.zeros_like(g_grid)
E[np.where(E_grid>x[1])] = 1.
S[np.where(S_grid>x[3])] = 1.
g[np.where(g_grid>x[4])] = 1.
E_grid_val = E*g*vol_grid*x[0]
S_grid_val = S*g*vol_grid*x[2]
gv_grid_val = vol_grid*g
return E_grid_val, S_grid_val, gv_grid_val
| 38.899113
| 109
| 0.378545
| 3,672
| 35,087
| 3.398965
| 0.04085
| 0.137809
| 0.090137
| 0.026681
| 0.872446
| 0.855941
| 0.840718
| 0.821409
| 0.805945
| 0.800817
| 0
| 0.022386
| 0.492006
| 35,087
| 902
| 110
| 38.899113
| 0.67785
| 0.126457
| 0
| 0.777778
| 0
| 0
| 0.012642
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.025501
| false
| 0.001821
| 0.007286
| 0
| 0.040073
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
31c78d6966a9d84a523a15b22e795f490c2201f9
| 44
|
py
|
Python
|
vertex-server/signals/__init__.py
|
aoswalt/greenlite-hardware
|
056ed78829519f49adab60dbcf67878243fe764e
|
[
"MIT"
] | null | null | null |
vertex-server/signals/__init__.py
|
aoswalt/greenlite-hardware
|
056ed78829519f49adab60dbcf67878243fe764e
|
[
"MIT"
] | 1
|
2016-11-01T23:55:07.000Z
|
2016-11-01T23:55:07.000Z
|
vertex-server/signals/__init__.py
|
aoswalt/greenlite-hardware
|
056ed78829519f49adab60dbcf67878243fe764e
|
[
"MIT"
] | null | null | null |
from . import lights
from . import schedule
| 14.666667
| 22
| 0.772727
| 6
| 44
| 5.666667
| 0.666667
| 0.588235
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.181818
| 44
| 2
| 23
| 22
| 0.944444
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
31c871b146933705ca94093543636c2b4a72c392
| 22,970
|
py
|
Python
|
test_training_data.py
|
miermans/gym-2048
|
39f2cf375ef936284677a97b373aa2b97c8e45fc
|
[
"MIT"
] | null | null | null |
test_training_data.py
|
miermans/gym-2048
|
39f2cf375ef936284677a97b373aa2b97c8e45fc
|
[
"MIT"
] | 2
|
2021-05-26T20:24:09.000Z
|
2021-05-27T08:44:54.000Z
|
test_training_data.py
|
miermans/gym-2048
|
39f2cf375ef936284677a97b373aa2b97c8e45fc
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
from __future__ import absolute_import
import numpy as np
import os
import pytest
import tempfile
import training_data
class TestTrainingData():
def test_add(self):
td = training_data.training_data()
assert np.array_equal(td.get_x(), np.empty([0, 4, 4], dtype=np.int))
assert np.array_equal(td.get_y_digit(), np.empty([0, 1], dtype=np.int))
assert np.allclose(td.get_reward(), np.empty([0, 1], dtype=np.float))
assert np.array_equal(td.get_next_x(), np.empty([0, 4, 4], dtype=np.int))
assert np.array_equal(td.get_done(), np.empty([0, 1], dtype=np.bool))
td.add(np.ones([1, 4, 4]), 1, 4, np.zeros([1, 4, 4]), True)
assert np.array_equal(td.get_x(), np.ones([1, 4, 4], dtype=np.int))
assert np.array_equal(td.get_y_digit(), np.array([[1]], dtype=np.int))
assert np.allclose(td.get_reward(), np.array([[4]], dtype=np.float))
assert np.array_equal(td.get_next_x(), np.zeros([1, 4, 4], dtype=np.int))
assert np.array_equal(td.get_done(), np.array([[1]], dtype=np.bool))
def test_get_x_stacked(self):
td = training_data.training_data()
td.add(np.full([4, 4], 2), 0, 4, np.zeros([4, 4]))
td.add(np.full([4, 4], 8), 1, 8, np.ones([4, 4]))
td.add(np.full([4, 4], 2048), 1, 8, np.ones([4, 4]))
expected_x_stacked = np.array([
[
[[1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]],
[[1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]],
[[1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]],
[[1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]
],
[
[[0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]],
[[0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]],
[[0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]],
[[0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]
],
[
[[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0]],
[[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0]],
[[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0]],
[[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0]]
]
], dtype=np.int)
assert np.array_equal(td.get_x_stacked(), expected_x_stacked)
def test_get_y_one_hot(self):
td = training_data.training_data()
td.add(np.ones([4, 4]), 0, 4, np.zeros([4, 4]))
td.add(np.zeros([4, 4]), 1, 8, np.ones([4, 4]))
td.add(np.zeros([4, 4]), 3, 8, np.ones([4, 4]))
td.add(np.zeros([4, 4]), 2, 8, np.ones([4, 4]))
expected_y_one_hot = np.array([
[1, 0, 0, 0],
[0, 1, 0, 0],
[0, 0, 0, 1],
[0, 0, 1, 0]
], dtype=np.int)
assert np.array_equal(td.get_y_one_hot(), expected_y_one_hot)
def test_get_total_reward(self):
td = training_data.training_data()
td.add(np.ones([4, 4]), 0, 4, np.zeros([4, 4]))
td.add(np.zeros([4, 4]), 1, 8, np.ones([4, 4]))
td.add(np.zeros([4, 4]), 3, 16, np.ones([4, 4]))
td.add(np.zeros([4, 4]), 2, 32, np.ones([4, 4]))
assert td.get_total_reward() == 60
def test_get_highest_tile(self):
td = training_data.training_data()
td.add(np.full((4, 4), 1), 0, 4, np.full((4, 4), 2))
td.add(np.full((4, 4), 2), 0, 4, np.full((4, 4), 4))
assert td.get_highest_tile() == 4
def test_get_n(self):
td = training_data.training_data()
td.add(np.ones([4, 4]), 1, 4, np.zeros([4, 4]))
td.add(np.zeros([4, 4]), 2, 8, np.ones([4, 4]))
(state, action, reward, next_state, done) = td.get_n(1)
assert np.array_equal(state, np.zeros([4, 4], dtype=np.int))
assert action == 2
assert reward == pytest.approx(8.)
assert np.array_equal(next_state, np.ones([4, 4], dtype=np.int))
def test_hflip(self):
td = training_data.training_data()
board1 = np.array([[1, 1, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0]])
board2 = np.array([[0, 0, 0, 0],
[2, 4, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0]])
td.add(board1, 1, 2, board2)
td.add(board2, 2, 0, board1)
td.hflip()
expected_x = np.array([
[[0, 0, 1, 1], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]],
[[0, 0, 0, 0], [0, 0, 4, 2], [0, 0, 0, 0], [0, 0, 0, 0]]
], dtype=np.int)
expected_y_digit = np.array([
[3],
[2]
], dtype=np.int)
expected_reward = np.array([
[2],
[0],
], dtype=np.float)
expected_next_x = np.array([
[[0, 0, 0, 0], [0, 0, 4, 2], [0, 0, 0, 0], [0, 0, 0, 0]],
[[0, 0, 1, 1], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]]
], dtype=np.int)
assert np.array_equal(td.get_x(), expected_x)
assert np.array_equal(td.get_y_digit(), expected_y_digit)
assert np.allclose(td.get_reward(), expected_reward)
assert np.allclose(td.get_next_x(), expected_next_x)
def test_rotate(self):
td = training_data.training_data()
board1 = np.array([[1, 1, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0]])
board2 = np.array([[0, 0, 0, 0],
[2, 4, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0]])
td.add(board1, 1, 2, board2)
td.add(board2, 2, 0, board1)
td.rotate(3)
expected_x = np.array([
[[0, 0, 0, 0], [0, 0, 0, 0], [1, 0, 0, 0], [1, 0, 0, 0]],
[[0, 0, 0, 0], [0, 0, 0, 0], [0, 4, 0, 0], [0, 2, 0, 0]]
], dtype=np.int)
expected_y_digit = np.array([
[0],
[1]
], dtype=np.int)
expected_reward = np.array([
[2],
[0],
], dtype=np.float)
expected_next_x = np.array([
[[0, 0, 0, 0], [0, 0, 0, 0], [0, 4, 0, 0], [0, 2, 0, 0]],
[[0, 0, 0, 0], [0, 0, 0, 0], [1, 0, 0, 0], [1, 0, 0, 0]]
], dtype=np.int)
assert np.array_equal(td.get_x(), expected_x)
assert np.array_equal(td.get_y_digit(), expected_y_digit)
assert np.allclose(td.get_reward(), expected_reward)
assert np.array_equal(td.get_next_x(), expected_next_x)
def test_augment(self):
td = training_data.training_data()
initial_board = np.array([[1, 1, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0]])
next_board = np.array([[0, 0, 0, 2],
[0, 2, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0]])
td.add(initial_board, 1, 4, next_board)
td.augment()
assert td.size() == 8
expected_x = np.array([
[[1, 1, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]],
[[0, 0, 1, 1], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]],
[[0, 0, 0, 1], [0, 0, 0, 1], [0, 0, 0, 0], [0, 0, 0, 0]],
[[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 1], [0, 0, 0, 1]],
[[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 1, 1]],
[[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [1, 1, 0, 0]],
[[0, 0, 0, 0], [0, 0, 0, 0], [1, 0, 0, 0], [1, 0, 0, 0]],
[[1, 0, 0, 0], [1, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]]
], dtype=np.int)
expected_y_digit = np.array([
[1],
[3],
[2],
[0],
[3],
[1],
[0],
[2]
], dtype=np.int)
expected_reward = np.array([
[4],
[4],
[4],
[4],
[4],
[4],
[4],
[4]
], dtype=np.float)
expected_next_x = np.array([
[[0, 0, 0, 2], [0, 2, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]], # Original
[[2, 0, 0, 0], [0, 0, 2, 0], [0, 0, 0, 0], [0, 0, 0, 0]], # Hflip'd
[[0, 0, 0, 0], [0, 0, 2, 0], [0, 0, 0, 0], [0, 0, 0, 2]], # Original, rotated 90 degrees
[[0, 0, 0, 2], [0, 0, 0, 0], [0, 0, 2, 0], [0, 0, 0, 0]], # Hflip, rotated 90 degrees
[[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 2, 0], [2, 0, 0, 0]], # Original, rotated 180 degrees
[[0, 0, 0, 0], [0, 0, 0, 0], [0, 2, 0, 0], [0, 0, 0, 2]], # Hflip, rotated 180 degrees
[[2, 0, 0, 0], [0, 0, 0, 0], [0, 2, 0, 0], [0, 0, 0, 0]], # Original, rotate 270 degrees
[[0, 0, 0, 0], [0, 2, 0, 0], [0, 0, 0, 0], [2, 0, 0, 0]] # Hflip, rotated 270 degrees
], dtype=np.int)
assert np.array_equal(td.get_x(), expected_x)
assert np.array_equal(td.get_y_digit(), expected_y_digit)
assert np.allclose(td.get_reward(), expected_reward)
assert np.array_equal(td.get_next_x(), expected_next_x)
def test_merge(self):
td = training_data.training_data()
td.add(np.ones([1, 4, 4]), 1, 16, np.zeros([1, 4, 4]))
td2 = training_data.training_data()
td2.add(np.zeros([1, 4, 4]), 2, 0, np.ones([1, 4, 4]))
td.merge(td2)
expected_x = np.array([
[[1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1]],
[[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]]
], dtype=np.int)
expected_y_digit = np.array([
[1],
[2]
], dtype=np.int)
expected_reward = np.array([
[16],
[0]
], dtype=np.float)
expected_next_x = np.array([
[[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]],
[[1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1]]
], dtype=np.int)
assert np.array_equal(td.get_x(), expected_x)
assert np.array_equal(td.get_y_digit(), expected_y_digit)
assert np.allclose(td.get_reward(), expected_reward)
assert np.array_equal(td.get_next_x(), expected_next_x)
def test_split(self):
td = training_data.training_data()
td.add(np.ones([1, 4, 4]), 1, 16, np.zeros([1, 4, 4]))
td2 = training_data.training_data()
td2.add(np.zeros([1, 4, 4]), 2, 0, np.ones([1, 4, 4]))
td.merge(td2)
a, b = td.split()
assert np.array_equal(a.get_x(), np.ones([1, 4, 4]))
assert np.array_equal(a.get_y_digit(), [[1]])
assert np.array_equal(a.get_reward(), [[16]])
assert np.array_equal(a.get_next_x(), np.zeros([1, 4, 4]))
assert np.array_equal(b.get_x(), np.zeros([1, 4, 4]))
assert np.array_equal(b.get_y_digit(), [[2]])
assert np.array_equal(b.get_reward(), [[0]])
assert np.array_equal(b.get_next_x(), np.ones([1, 4, 4]))
def test_sample(self):
td = training_data.training_data()
td.add(np.zeros([1, 4, 4]), 0, 0, np.zeros([1, 4, 4]))
td.add(np.ones([1, 4, 4]), 1, 1, np.ones([1, 4, 4]))
sample = td.sample([1])
assert sample.size() == 1
assert sample.get_y_digit() in [[[0]], [[1]]]
if sample.get_y_digit() == 0:
assert np.array_equal(sample.get_x(), np.zeros([1, 4, 4]))
if sample.get_y_digit() == 1:
assert np.array_equal(sample.get_x(), np.ones([1, 4, 4]))
def test_size(self):
td = training_data.training_data()
assert td.size() == 0
td.add(np.ones([1, 4, 4]), 0, 4, np.zeros([1, 4, 4]))
assert td.size() == 1
def test_log2_rewards(self):
# Set up training data
td = training_data.training_data()
td.add(np.ones([1, 4, 4]), 0, 0, np.zeros([1, 4, 4]))
td.add(np.ones([1, 4, 4]), 1, 2, np.zeros([1, 4, 4]))
td.add(np.ones([1, 4, 4]), 2, 4, np.zeros([1, 4, 4]))
td.add(np.ones([1, 4, 4]), 3, 16, np.zeros([1, 4, 4]))
td.add(np.ones([1, 4, 4]), 0, 75, np.zeros([1, 4, 4]))
td.add(np.ones([1, 4, 4]), 1, 2048, np.zeros([1, 4, 4]))
td.log2_rewards()
expected_reward = np.array([
[0], [1], [2], [4], [6.2288], [11]
], dtype=np.float)
assert np.allclose(td.get_reward(), expected_reward)
expected_action = np.array([
[0], [1], [2], [3], [0], [1]
], dtype=np.int)
assert np.allclose(td.get_y_digit(), expected_action)
def test_get_discounted_return(self):
# Set up training data
td = training_data.training_data()
td.add(np.ones([1, 4, 4]), 0, 4, np.zeros([1, 4, 4]))
td.add(np.ones([1, 4, 4]), 1, 2, np.zeros([1, 4, 4]))
td.add(np.ones([1, 4, 4]), 2, 16, np.zeros([1, 4, 4]))
td.add(np.ones([1, 4, 4]), 3, 2, np.zeros([1, 4, 4]))
# Test using default gamma value of 0.9
td2 = td.copy()
discounted_return = td2.get_discounted_return()
expected_return = np.array([
[20.218], [18.02], [17.8], [2.0]
], dtype=np.float)
assert np.allclose(discounted_return, expected_return)
# Test using gamma value of 0, should have no effect on rewards
td2 = td.copy()
discounted_return = td2.get_discounted_return(gamma=0.0)
expected_return = np.array([
[4], [2], [16], [2]
], dtype=np.float)
assert np.allclose(discounted_return, expected_return)
# Test end of episode
td3 = training_data.training_data()
td3.add(np.ones([1, 4, 4]), 0, 4, np.zeros([1, 4, 4]), False)
td3.add(np.ones([1, 4, 4]), 1, 2, np.zeros([1, 4, 4]), True)
td3.add(np.ones([1, 4, 4]), 2, 16, np.zeros([1, 4, 4]), False)
td3.add(np.ones([1, 4, 4]), 3, 2, np.zeros([1, 4, 4]), True)
discounted_return = td3.get_discounted_return()
expected_return = np.array([
[5.8], [2.0], [17.8], [2.0]
], dtype=np.float)
assert np.allclose(discounted_return, expected_return)
def test_normalize_rewards(self):
# Test calculating mean and standard deviation
td = training_data.training_data()
td.add(np.ones([1, 4, 4]), 1, 4, np.zeros([1, 4, 4]))
td.add(np.ones([1, 4, 4]), 2, 4, np.zeros([1, 4, 4]))
td.add(np.ones([1, 4, 4]), 3, 8, np.zeros([1, 4, 4]))
td.add(np.ones([1, 4, 4]), 0, 16, np.zeros([1, 4, 4]))
td.normalize_rewards()
expected_reward = np.array([
[-0.8165], [-0.8165], [0.], [1.633],
], dtype=np.float)
assert np.allclose(td.get_reward(), expected_reward)
# Test specifying mean and standard deviation
td = training_data.training_data()
td.add(np.ones([1, 4, 4]), 1, 4, np.zeros([1, 4, 4]))
td.add(np.ones([1, 4, 4]), 2, 4, np.zeros([1, 4, 4]))
td.add(np.ones([1, 4, 4]), 3, 8, np.zeros([1, 4, 4]))
td.add(np.ones([1, 4, 4]), 0, 16, np.zeros([1, 4, 4]))
td.normalize_rewards(mean=8, sd=1)
expected_reward = np.array([
[-4.], [-4.], [0.], [8.],
], dtype=np.float)
assert np.allclose(td.get_reward(), expected_reward)
def test_normalize_boards(self):
# Test calculating mean and standard deviation
td = training_data.training_data()
td.add(np.full((1, 4, 4), 4), 1, 4, np.full((1, 4, 4), 8))
td.add(np.full((1, 4, 4), 8), 2, 4, np.full((1, 4, 4), 16))
td.add(np.full((1, 4, 4), 16), 3, 4, np.full((1, 4, 4), 32))
td.add(np.full((1, 4, 4), 32), 4, 4, np.full((1, 4, 4), 64))
td.normalize_boards()
mean = 15.
sd = 10.7238052947636
a = (4. - mean) / sd
b = (8. - mean) / sd
c = (16. - mean) / sd
d = (32. - mean) / sd
e = (64. - mean) / sd
expected_x = np.array([
[[a, a, a, a], [a, a, a, a], [a, a, a, a], [a, a, a, a]],
[[b, b, b, b], [b, b, b, b], [b, b, b, b], [b, b, b, b]],
[[c, c, c, c], [c, c, c, c], [c, c, c, c], [c, c, c, c]],
[[d, d, d, d], [d, d, d, d], [d, d, d, d], [d, d, d, d]]
], dtype=np.float)
assert np.allclose(td.get_x(), expected_x)
expected_next_x = np.array([
[[b, b, b, b], [b, b, b, b], [b, b, b, b], [b, b, b, b]],
[[c, c, c, c], [c, c, c, c], [c, c, c, c], [c, c, c, c]],
[[d, d, d, d], [d, d, d, d], [d, d, d, d], [d, d, d, d]],
[[e, e, e, e], [e, e, e, e], [e, e, e, e], [e, e, e, e]]
], dtype=np.float)
assert np.allclose(td.get_next_x(), expected_next_x)
def test_save_restore(self):
# Set up training data
td = training_data.training_data()
td.add(np.ones([1, 4, 4]), 0, 4, np.zeros([1, 4, 4]))
td.add(np.zeros([1, 4, 4]), 1, 2, np.ones([1, 4, 4]))
td.add(np.ones([1, 4, 4]), 2, 16, np.zeros([1, 4, 4]))
td.add(np.zeros([1, 4, 4]), 3, 2, np.ones([1, 4, 4]))
temp_dir = tempfile.mkdtemp()
temp_filename = os.path.join(temp_dir, 'data.csv')
td.export_csv(temp_filename)
td2 = training_data.training_data()
td2.import_csv(temp_filename)
expected_x = np.array([
[[1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1]],
[[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]],
[[1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1]],
[[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]]
], dtype=np.int)
expected_y_digit = np.array([
[0],
[1],
[2],
[3]
], dtype=np.int)
expected_reward = np.array([
[4],
[2],
[16],
[2]
], dtype=np.float)
expected_next_x = np.array([
[[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]],
[[1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1]],
[[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]],
[[1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1]]
], dtype=np.int)
assert np.array_equal(td2.get_x(), expected_x)
assert np.array_equal(td2.get_y_digit(), expected_y_digit)
assert np.allclose(td2.get_reward(), expected_reward)
assert np.array_equal(td2.get_next_x(), expected_next_x)
os.remove(temp_filename)
os.rmdir(temp_dir)
def test_shuffle(self):
td = training_data.training_data()
n = 5
for i in range(n):
# Use "is odd" for done
td.add(np.full((1, 4, 4), i), i, i, np.full((1, 4, 4), i), (i % 2) == 1)
td.shuffle()
for i in range(n):
# Find where this has been shuffled too
index_of_val = np.where(td.get_y_digit() == i)[0].item(0)
# Check that all parts of this equal i
arrays = td.get_n(index_of_val)
for a in arrays:
if a.dtype is np.dtype(np.bool):
assert((a == ((i % 2) == 1)).all())
else:
assert((a == i).all())
def test_make_boards_unique(self):
td = training_data.training_data()
td.add(np.ones([1, 4, 4]), 0, 4, np.zeros([1, 4, 4]))
td.add(np.zeros([1, 4, 4]), 1, 2, np.ones([1, 4, 4]))
td.add(np.ones([1, 4, 4]), 2, 16, np.zeros([1, 4, 4]))
td.add(np.zeros([1, 4, 4]), 3, 2, np.ones([1, 4, 4]))
td.make_boards_unique()
expected_x = np.array([
[[1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1]],
[[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]]
], dtype=np.int)
expected_y_digit = np.array([
[0],
[1]
], dtype=np.int)
expected_reward = np.array([
[4],
[2]
], dtype=np.float)
expected_next_x = np.array([
[[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]],
[[1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1]]
], dtype=np.int)
assert np.array_equal(td.get_x(), expected_x)
assert np.array_equal(td.get_y_digit(), expected_y_digit)
assert np.allclose(td.get_reward(), expected_reward)
assert np.array_equal(td.get_next_x(), expected_next_x)
if __name__ == '__main__':
import pytest
pytest.main()
| 44.173077
| 101
| 0.428515
| 4,059
| 22,970
| 2.33358
| 0.043114
| 0.245988
| 0.331926
| 0.395693
| 0.826541
| 0.805743
| 0.75
| 0.718011
| 0.677048
| 0.649916
| 0
| 0.151582
| 0.350631
| 22,970
| 519
| 102
| 44.258189
| 0.483441
| 0.026948
| 0
| 0.612632
| 0
| 0
| 0.000717
| 0
| 0
| 0
| 0
| 0
| 0.143158
| 1
| 0.042105
| false
| 0
| 0.016842
| 0
| 0.061053
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 9
|
31d43ead09e1c7effc26eae228b072a20a8b0310
| 3,261
|
py
|
Python
|
simple_retry/decorators.py
|
nicolasmota/retry_decorator
|
65eab450e65fe8c08d07cd213628e655baa5ae55
|
[
"MIT"
] | 11
|
2018-03-06T17:09:50.000Z
|
2018-10-26T04:31:50.000Z
|
simple_retry/decorators.py
|
nicolasmota/retry_decorator
|
65eab450e65fe8c08d07cd213628e655baa5ae55
|
[
"MIT"
] | 9
|
2018-03-06T03:56:44.000Z
|
2018-10-26T04:48:42.000Z
|
simple_retry/decorators.py
|
nicolasmota/retry_decorator
|
65eab450e65fe8c08d07cd213628e655baa5ae55
|
[
"MIT"
] | 2
|
2018-03-15T03:11:14.000Z
|
2018-07-07T17:11:06.000Z
|
import time
from functools import wraps
import asyncio
from simple_retry.simple_retry.helpers import (
format_retry_message,
has_retries_to_go,
log_message
)
def retry(Except, retries=5, delay=0, logger=None, level='info', multiple=1):
def deco_retry(function):
@wraps(function)
def f_retry(*args, **kwargs):
tries = 1
mdelay = delay
while has_retries_to_go(
tries_performed=tries,
retries_limit=retries
):
try:
return function(*args, **kwargs)
except Except as e:
log_message(
logger=logger,
level=level,
exception=e,
tries_performed=tries,
retries_limit=retries,
wait_delay_multiple=multiple
)
time.sleep(mdelay)
mdelay *= multiple
tries += 1
return function(*args, **kwargs)
return f_retry
return deco_retry
def coroutine_retry(
Except,
retries=5,
delay=0,
logger=None,
level='info',
multiple=1
):
def deco_retry(function):
@asyncio.coroutine
@wraps(function)
def f_retry(*args, **kwargs):
tries = 1
mdelay = delay
while has_retries_to_go(
tries_performed=tries,
retries_limit=retries
):
try:
return (yield from (function(*args, **kwargs)))
except Except as e:
log_message(
logger=logger,
level=level,
exception=e,
tries_performed=tries,
retries_limit=retries,
wait_delay_multiple=multiple
)
yield from (asyncio.sleep(mdelay))
mdelay *= multiple
tries += 1
return (yield from function(*args, **kwargs))
return f_retry
return deco_retry
def async_retry(
Except,
retries=5,
delay=0,
logger=None,
level='info',
multiple=1
):
def deco_retry(function):
@wraps(function)
async def f_retry(*args, **kwargs):
tries = 1
mdelay = delay
while has_retries_to_go(
tries_performed=tries,
retries_limit=retries
):
try:
return await (function(*args, **kwargs))
except Except as e:
log_message(
logger=logger,
level=level,
exception=e,
tries_performed=tries,
retries_limit=retries,
wait_delay_multiple=multiple
)
await (asyncio.sleep(mdelay))
mdelay *= multiple
tries += 1
return await (function(*args, **kwargs))
return f_retry
return deco_retry
| 26.512195
| 77
| 0.462435
| 287
| 3,261
| 5.073171
| 0.167247
| 0.061813
| 0.078297
| 0.107143
| 0.873626
| 0.866758
| 0.854396
| 0.828984
| 0.768544
| 0.737637
| 0
| 0.008621
| 0.466421
| 3,261
| 122
| 78
| 26.729508
| 0.828161
| 0
| 0
| 0.798077
| 0
| 0
| 0.00368
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.076923
| false
| 0
| 0.038462
| 0
| 0.230769
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
31d44c5f099da57a280d3e04440215f00f79e111
| 153
|
py
|
Python
|
environment.py
|
bopopescu/cbrc-devteam-blog
|
eb4f7977d112b1ee692dad60ed46802d2ee243f4
|
[
"Apache-2.0"
] | null | null | null |
environment.py
|
bopopescu/cbrc-devteam-blog
|
eb4f7977d112b1ee692dad60ed46802d2ee243f4
|
[
"Apache-2.0"
] | null | null | null |
environment.py
|
bopopescu/cbrc-devteam-blog
|
eb4f7977d112b1ee692dad60ed46802d2ee243f4
|
[
"Apache-2.0"
] | 1
|
2020-07-24T03:59:01.000Z
|
2020-07-24T03:59:01.000Z
|
# application environment
import settings
import sys
sys.path.append(settings.app_home_dir)
sys.path.append(settings.app_settings["app_lib_dir"])
| 21.857143
| 54
| 0.797386
| 22
| 153
| 5.318182
| 0.5
| 0.282051
| 0.222222
| 0.358974
| 0.410256
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.104575
| 153
| 6
| 55
| 25.5
| 0.854015
| 0.150327
| 0
| 0
| 0
| 0
| 0.090164
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.5
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 7
|
730824ac4dba3e614be06b76613a0a6b290846f5
| 46
|
py
|
Python
|
src/utils.py
|
sequoia-tree/cs370
|
47bf7f56d20bd81abbdbd0502477afcd5f62bbbe
|
[
"CC-BY-4.0"
] | 1
|
2019-01-14T08:31:45.000Z
|
2019-01-14T08:31:45.000Z
|
src/utils.py
|
sequoia-tree/teaching-cs
|
47bf7f56d20bd81abbdbd0502477afcd5f62bbbe
|
[
"CC-BY-4.0"
] | null | null | null |
src/utils.py
|
sequoia-tree/teaching-cs
|
47bf7f56d20bd81abbdbd0502477afcd5f62bbbe
|
[
"CC-BY-4.0"
] | null | null | null |
from md_utils import *
from py_utils import *
| 15.333333
| 22
| 0.782609
| 8
| 46
| 4.25
| 0.625
| 0.647059
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.173913
| 46
| 2
| 23
| 23
| 0.894737
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
7317deb1560647aa925ec2a580d6d0908f2796af
| 155
|
py
|
Python
|
GasBotty/models/utils.py
|
GreenCUBIC/GasBotty
|
158f5991201c80bf4cbbbb9deabc9954ff19bbb1
|
[
"MIT"
] | 353
|
2020-12-10T10:47:17.000Z
|
2022-03-31T23:08:29.000Z
|
GasBotty/models/utils.py
|
GreenCUBIC/GasBotty
|
158f5991201c80bf4cbbbb9deabc9954ff19bbb1
|
[
"MIT"
] | 80
|
2020-12-10T09:54:22.000Z
|
2022-03-30T22:08:45.000Z
|
GasBotty/models/utils.py
|
GreenCUBIC/GasBotty
|
158f5991201c80bf4cbbbb9deabc9954ff19bbb1
|
[
"MIT"
] | 63
|
2020-12-10T17:10:34.000Z
|
2022-03-28T16:27:07.000Z
|
try:
from torch.hub import load_state_dict_from_url
except ImportError:
from torch.utils.model_zoo import load_url as load_state_dict_from_url
| 31
| 75
| 0.806452
| 26
| 155
| 4.423077
| 0.576923
| 0.156522
| 0.226087
| 0.295652
| 0.347826
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.16129
| 155
| 4
| 76
| 38.75
| 0.884615
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.75
| 0
| 0.75
| 0
| 1
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
73425bf1b2ce90f77e267345bd3b090b0208b790
| 16,334
|
py
|
Python
|
tests/service/ai/test_not_killing_itself_ai.py
|
jonashellmann/informaticup21-team-chillow
|
f2e519af0a5d9a9368d62556703cfb1066ebb58f
|
[
"MIT"
] | 3
|
2021-01-17T23:32:07.000Z
|
2022-01-30T14:49:16.000Z
|
tests/service/ai/test_not_killing_itself_ai.py
|
jonashellmann/informaticup21-team-chillow
|
f2e519af0a5d9a9368d62556703cfb1066ebb58f
|
[
"MIT"
] | 2
|
2021-01-17T13:37:56.000Z
|
2021-04-14T12:28:49.000Z
|
tests/service/ai/test_not_killing_itself_ai.py
|
jonashellmann/informaticup21-team-chillow
|
f2e519af0a5d9a9368d62556703cfb1066ebb58f
|
[
"MIT"
] | 2
|
2021-04-02T14:53:38.000Z
|
2021-04-20T11:10:17.000Z
|
import unittest
from datetime import datetime, timezone
from typing import List
from chillow.service.ai.not_killing_itself_ai import NotKillingItselfAI
from chillow.model.action import Action
from chillow.model.cell import Cell
from chillow.model.direction import Direction
from chillow.model.game import Game
from chillow.model.player import Player
from chillow.service.game_service import GameService
class NotKillingItselfAITest(unittest.TestCase):
def test_ai_should_choose_the_own_non_killing_itself_action(self):
player1 = Player(1, 0, 0, Direction.up, 1, True, "")
player2 = Player(2, 4, 4, Direction.down, 3, True, "")
players = [player1, player2]
cells = [[Cell([player1]), Cell(), Cell(), Cell(), Cell()],
[Cell(), Cell(), Cell(), Cell(), Cell()],
[Cell(), Cell(), Cell(), Cell(), Cell()],
[Cell(), Cell(), Cell(), Cell(), Cell()],
[Cell(), Cell(), Cell(), Cell(), Cell([player2])]]
time = datetime(2020, 10, 1, 12, 5, 13, 0, timezone.utc)
game = Game(5, 5, cells, players, 2, True, time)
game_service = GameService(game)
sut = NotKillingItselfAI(player1, [], 3, 0, 3)
actions: List[Action] = sut.find_surviving_actions(game_service, 3)
self.assertTrue(Action.turn_right in actions)
self.assertTrue(len(actions) == 1)
def test_ai_should_choose_the_correct_list_of_actions_non_killing_itself(self):
player1 = Player(1, 0, 1, Direction.up, 1, True, "")
player2 = Player(2, 4, 4, Direction.down, 3, True, "")
players = [player1, player2]
cells = [[Cell(), Cell(), Cell(), Cell(), Cell()],
[Cell([player1]), Cell(), Cell(), Cell(), Cell()],
[Cell(), Cell(), Cell(), Cell(), Cell()],
[Cell(), Cell(), Cell(), Cell(), Cell()],
[Cell(), Cell(), Cell(), Cell(), Cell([player2])]]
time = datetime(2020, 10, 1, 12, 5, 13, 0, timezone.utc)
game = Game(5, 5, cells, players, 2, True, time)
game_service = GameService(game)
sut = NotKillingItselfAI(player1, [], 3, 0, 3)
actions: List[Action] = sut.find_surviving_actions(game_service, 3)
self.assertTrue(Action.change_nothing in actions)
self.assertTrue(Action.turn_right in actions)
self.assertTrue(len(actions) == 2)
def test_ai_should_choose_the_correct_list_of_actions_non_killing_itself2(self):
player1 = Player(1, 1, 2, Direction.up, 1, True, "")
player2 = Player(2, 1, 1, Direction.down, 3, True, "")
players = [player1, player2]
cells = [[Cell(), Cell(), Cell(), Cell(), Cell()],
[Cell(), Cell([player2]), Cell(), Cell(), Cell()],
[Cell(), Cell([player1]), Cell(), Cell(), Cell()],
[Cell(), Cell(), Cell(), Cell(), Cell()],
[Cell(), Cell(), Cell(), Cell(), Cell()]]
time = datetime(2020, 10, 1, 12, 5, 13, 0, timezone.utc)
game = Game(5, 5, cells, players, 2, True, time)
game_service = GameService(game)
sut = NotKillingItselfAI(player1, [], 3, 0, 3)
actions: List[Action] = sut.find_surviving_actions(game_service, 3)
self.assertTrue(Action.turn_left in actions)
self.assertTrue(Action.turn_right in actions)
self.assertTrue(len(actions) == 2)
def test_ai_should_choose_the_correct_list_of_actions_non_killing_itself_in_turn_6(self):
player1 = Player(1, 0, 4, Direction.up, 3, True, "")
player2 = Player(2, 0, 1, Direction.down, 3, True, "")
players = [player1, player2]
cells = [[Cell(), Cell(), Cell(), Cell(), Cell()],
[Cell([player2]), Cell(), Cell(), Cell(), Cell()],
[Cell(), Cell(), Cell(), Cell(), Cell()],
[Cell(), Cell(), Cell(), Cell(), Cell()],
[Cell([player1]), Cell(), Cell(), Cell(), Cell()]]
time = datetime(2020, 10, 1, 12, 5, 13, 0, timezone.utc)
game = Game(5, 5, cells, players, 2, True, time)
game_service = GameService(game)
game_service.turn.turn_ctr = 6
sut = NotKillingItselfAI(player1, [], 4, 0, 3)
actions: List[Action] = sut.find_surviving_actions(game_service, 1)
self.assertTrue(Action.slow_down in actions)
self.assertTrue(Action.turn_right in actions)
self.assertTrue(Action.speed_up in actions)
self.assertTrue(len(actions) == 3)
def test_ai_should_not_choose_speed_up_if_max_speed_is_allready_reached(self):
MAX_SPEED = 3
player1 = Player(1, 0, 4, Direction.up, MAX_SPEED, True, "")
player2 = Player(2, 0, 1, Direction.down, 3, True, "")
players = [player1, player2]
cells = [[Cell(), Cell(), Cell(), Cell(), Cell()],
[Cell([player2]), Cell(), Cell(), Cell(), Cell()],
[Cell(), Cell(), Cell(), Cell(), Cell()],
[Cell(), Cell(), Cell(), Cell(), Cell()],
[Cell([player1]), Cell(), Cell(), Cell(), Cell()]]
time = datetime(2020, 10, 1, 12, 5, 13, 0, timezone.utc)
game = Game(5, 5, cells, players, 2, True, time)
game_service = GameService(game)
sut = NotKillingItselfAI(player1, [], MAX_SPEED, 0, 3)
actions: List[Action] = sut.find_surviving_actions(game_service, 1)
self.assertTrue(Action.slow_down in actions)
self.assertTrue(Action.turn_right in actions)
self.assertTrue(len(actions) == 2)
def test_ai_should_calc_action_with_max_distance(self):
player1 = Player(1, 0, 4, Direction.up, 1, True, "")
player2 = Player(2, 0, 1, Direction.down, 3, True, "")
players = [player1, player2]
cells = [[Cell(), Cell(), Cell(), Cell(), Cell()],
[Cell([player2]), Cell(), Cell(), Cell(), Cell()],
[Cell(), Cell(), Cell(), Cell(), Cell()],
[Cell(), Cell(), Cell(), Cell(), Cell()],
[Cell([player1]), Cell(), Cell(), Cell(), Cell()]]
time = datetime(2020, 10, 1, 12, 5, 13, 0, timezone.utc)
game = Game(5, 5, cells, players, 2, True, time)
game_service = GameService(game)
sut = NotKillingItselfAI(player1, [], 3, 0, 3)
actions: List[Action] = sut.calc_action_with_max_distance_to_visited_cells(game_service, [Action.speed_up,
Action.change_nothing,
Action.turn_right])
self.assertTrue(Action.turn_right in actions)
self.assertTrue(len(actions) == 1)
def test_ai_should_calc_all_action_with_max_distance_with_max_worse_distance(self):
MAX_WORSE_DISTANCE = 1
player1 = Player(1, 0, 4, Direction.up, 1, True, "")
player2 = Player(2, 4, 4, Direction.down, 3, True, "")
players = [player1, player2]
cells = [[Cell(), Cell(), Cell(), Cell(), Cell()],
[Cell(), Cell(), Cell(), Cell(), Cell()],
[Cell(), Cell(), Cell(), Cell(), Cell()],
[Cell(), Cell(), Cell(), Cell(), Cell()],
[Cell([player1]), Cell(), Cell(), Cell(), Cell([player2])]]
time = datetime(2020, 10, 1, 12, 5, 13, 0, timezone.utc)
game = Game(5, 5, cells, players, 2, True, time)
game_service = GameService(game)
sut = NotKillingItselfAI(player1, [], 3, MAX_WORSE_DISTANCE, 3)
actions: List[Action] = sut.calc_action_with_max_distance_to_visited_cells(game_service, [Action.speed_up,
Action.change_nothing,
Action.turn_right])
self.assertTrue(Action.speed_up in actions)
self.assertTrue(Action.change_nothing in actions)
self.assertTrue(Action.turn_right in actions)
self.assertTrue(len(actions) == 3)
def test_get_information(self):
player = Player(1, 0, 4, Direction.up, 1, True, "")
sut = NotKillingItselfAI(player, [], 3, 1, 3)
expected = "max_speed=3, max_worse_distance=1, depth=3"
result = sut.get_information()
self.assertEqual(expected, result)
def test_ai_should_choose_the_correct_list_of_actions_non_killing_itself_with_depth_greater_than_one(self):
player1 = Player(1, 1, 2, Direction.up, 1, True, "")
player2 = Player(2, 1, 1, Direction.down, 3, True, "")
players = [player1, player2]
cells = [[Cell(), Cell(), Cell(), Cell(), Cell()],
[Cell([player2]), Cell([player2]), Cell(), Cell(), Cell()],
[Cell(), Cell([player1]), Cell(), Cell(), Cell()],
[Cell([player2]), Cell(), Cell(), Cell(), Cell()],
[Cell(), Cell(), Cell(), Cell(), Cell()]]
time = datetime(2020, 10, 1, 12, 5, 13, 0, timezone.utc)
game = Game(5, 5, cells, players, 2, True, time)
game_service = GameService(game)
sut = NotKillingItselfAI(player1, [], 3, 0, 2)
actions: List[Action] = sut.find_surviving_actions(game_service, 2)
self.assertTrue(Action.turn_right in actions)
self.assertTrue(len(actions) == 1)
def test_ai_should_choose_empty_list_with_depth_greater_than_one_and_no_surviving_action(self):
player1 = Player(1, 1, 2, Direction.up, 1, True, "")
player2 = Player(2, 1, 1, Direction.down, 3, True, "")
players = [player1, player2]
cells = [[Cell(), Cell(), Cell(), Cell(), Cell()],
[Cell([player2]), Cell([player2]), Cell([player2]), Cell(), Cell()],
[Cell(), Cell([player1]), Cell(), Cell([player2]), Cell()],
[Cell([player2]), Cell(), Cell([player2]), Cell(), Cell()],
[Cell(), Cell(), Cell(), Cell(), Cell()]]
time = datetime(2020, 10, 1, 12, 5, 13, 0, timezone.utc)
game = Game(5, 5, cells, players, 2, True, time)
game_service = GameService(game)
sut = NotKillingItselfAI(player1, [], 3, 0, 2)
actions: List[Action] = sut.find_surviving_actions(game_service, 2)
self.assertTrue(len(actions) == 0)
def test_ai_should_choose_correct_list_with_depth_three_and_surviving_action(self):
player1 = Player(1, 1, 2, Direction.up, 1, True, "")
player2 = Player(2, 1, 1, Direction.down, 3, True, "")
players = [player1, player2]
cells = [[Cell(), Cell(), Cell(), Cell(), Cell()],
[Cell([player2]), Cell([player2]), Cell([player2]), Cell(), Cell()],
[Cell(), Cell([player1]), Cell(), Cell([player2]), Cell()],
[Cell([player2]), Cell(), Cell(), Cell(), Cell()],
[Cell(), Cell(), Cell(), Cell(), Cell()]]
time = datetime(2020, 10, 1, 12, 5, 13, 0, timezone.utc)
game = Game(5, 5, cells, players, 2, True, time)
game_service = GameService(game)
sut = NotKillingItselfAI(player1, [], 3, 0, 3)
actions: List[Action] = sut.find_surviving_actions(game_service, 3)
self.assertTrue(Action.turn_right in actions)
self.assertTrue(len(actions) == 1)
def test_ai_should_choose_empty_list_with_depth_three_and_no_surviving_action(self):
player1 = Player(1, 1, 2, Direction.up, 1, True, "")
player2 = Player(2, 1, 1, Direction.down, 3, True, "")
players = [player1, player2]
cells = [[Cell(), Cell(), Cell(), Cell(), Cell()],
[Cell([player2]), Cell([player2]), Cell([player2]), Cell(), Cell()],
[Cell(), Cell([player1]), Cell(), Cell([player2]), Cell()],
[Cell([player2]), Cell([player2]), Cell(), Cell([player2]), Cell()],
[Cell(), Cell(), Cell([player2]), Cell(), Cell()]]
time = datetime(2020, 10, 1, 12, 5, 13, 0, timezone.utc)
game = Game(5, 5, cells, players, 2, True, time)
game_service = GameService(game)
sut = NotKillingItselfAI(player1, [], 3, 0, 3)
actions: List[Action] = sut.find_surviving_actions(game_service, 3)
self.assertTrue(len(actions) == 0)
def test_ai_should_choose_best_list_of_actions_by_depth_from_lower_depth(self):
player1 = Player(1, 1, 2, Direction.up, 1, True, "")
player2 = Player(2, 1, 1, Direction.down, 3, True, "")
players = [player1, player2]
cells = [[Cell(), Cell(), Cell(), Cell(), Cell()],
[Cell([player2]), Cell([player2]), Cell([player2]), Cell(), Cell()],
[Cell(), Cell([player1]), Cell(), Cell([player2]), Cell()],
[Cell([player2]), Cell([player2]), Cell(), Cell([player2]), Cell()],
[Cell(), Cell(), Cell([player2]), Cell(), Cell()]]
time = datetime(2020, 10, 1, 12, 5, 13, 0, timezone.utc)
game = Game(5, 5, cells, players, 2, True, time)
game_service = GameService(game)
sut = NotKillingItselfAI(player1, [], 3, 0, 5)
actions: List[Action] = sut.find_surviving_actions_with_best_depth(game_service)
self.assertTrue(Action.turn_right in actions)
self.assertTrue(len(actions) == 1)
def test_ai_should_choose_best_list_of_actions_by_depth(self):
player1 = Player(1, 1, 2, Direction.up, 1, True, "")
player2 = Player(2, 1, 1, Direction.down, 3, True, "")
players = [player1, player2]
cells = [[Cell(), Cell(), Cell(), Cell(), Cell()],
[Cell([player2]), Cell([player2]), Cell([player2]), Cell(), Cell()],
[Cell(), Cell([player1]), Cell(), Cell([player2]), Cell()],
[Cell([player2]), Cell(), Cell(), Cell([player2]), Cell()],
[Cell(), Cell(), Cell([player2]), Cell(), Cell()]]
time = datetime(2020, 10, 1, 12, 5, 13, 0, timezone.utc)
game = Game(5, 5, cells, players, 2, True, time)
game_service = GameService(game)
sut = NotKillingItselfAI(player1, [], 3, 0, 5)
actions: List[Action] = sut.find_surviving_actions_with_best_depth(game_service)
self.assertTrue(Action.turn_right in actions)
self.assertTrue(len(actions) == 1)
def test_ai_should_choose_best_list_of_actions_in_lowest_possible_depth(self):
player1 = Player(1, 1, 2, Direction.up, 1, True, "")
player2 = Player(2, 1, 1, Direction.down, 3, True, "")
players = [player1, player2]
cells = [[Cell(), Cell(), Cell(), Cell(), Cell()],
[Cell([player2]), Cell([player2]), Cell([player2]), Cell(), Cell()],
[Cell(), Cell([player1]), Cell(), Cell([player2]), Cell()],
[Cell([player2]), Cell(), Cell([player2]), Cell([player2]), Cell()],
[Cell(), Cell(), Cell([player2]), Cell(), Cell()]]
time = datetime(2020, 10, 1, 12, 5, 13, 0, timezone.utc)
game = Game(5, 5, cells, players, 2, True, time)
game_service = GameService(game)
sut = NotKillingItselfAI(player1, [], 3, 0, 5)
actions: List[Action] = sut.find_surviving_actions_with_best_depth(game_service)
self.assertTrue(Action.turn_left in actions)
self.assertTrue(Action.turn_right in actions)
self.assertTrue(len(actions) == 2)
| 52.185304
| 120
| 0.53355
| 1,846
| 16,334
| 4.568797
| 0.054171
| 0.258952
| 0.303059
| 0.328195
| 0.903486
| 0.887361
| 0.884515
| 0.881314
| 0.8742
| 0.864951
| 0
| 0.048528
| 0.307396
| 16,334
| 312
| 121
| 52.352564
| 0.696986
| 0
| 0
| 0.807087
| 0
| 0
| 0.002571
| 0.001286
| 0
| 0
| 0
| 0
| 0.137795
| 1
| 0.059055
| false
| 0
| 0.03937
| 0
| 0.102362
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 10
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.