language
stringclasses 1
value | repo
stringclasses 346
values | path
stringlengths 6
201
| class_span
dict | source
stringlengths 21
2.38M
| target
stringlengths 1
96
|
|---|---|---|---|---|---|
python
|
spack__spack
|
var/spack/test_repos/spack_repo/builtin_mock/packages/mirror_sourceforge_broken/package.py
|
{
"start": 309,
"end": 623
}
|
class ____(AutotoolsPackage, SourceforgePackage):
"""Simple sourceforge.net package"""
homepage = "http://www.tcl.tk"
url = "http://prdownloads.sourceforge.net/tcl/tcl8.6.5-src.tar.gz"
version("8.6.8", sha256="c43cb0c1518ce42b00e7c8f6eaddd5195c53a98f94adc717234a65cbcfd3f96a")
|
MirrorSourceforgeBroken
|
python
|
apache__airflow
|
providers/cncf/kubernetes/tests/unit/cncf/kubernetes/models/test_secret.py
|
{
"start": 1121,
"end": 5465
}
|
class ____:
def test_to_env_secret(self):
secret = Secret("env", "name", "secret", "key")
assert secret.to_env_secret() == k8s.V1EnvVar(
name="NAME",
value_from=k8s.V1EnvVarSource(secret_key_ref=k8s.V1SecretKeySelector(name="secret", key="key")),
)
def test_to_env_from_secret(self):
secret = Secret("env", None, "secret")
assert secret.to_env_from_secret() == k8s.V1EnvFromSource(
secret_ref=k8s.V1SecretEnvSource(name="secret")
)
@mock.patch("uuid.uuid4")
def test_to_volume_secret(self, mock_uuid):
mock_uuid.return_value = "0"
secret = Secret("volume", "/etc/foo", "secret_b")
assert secret.to_volume_secret() == (
k8s.V1Volume(name="secretvol0", secret=k8s.V1SecretVolumeSource(secret_name="secret_b")),
k8s.V1VolumeMount(mount_path="/etc/foo", name="secretvol0", read_only=True),
)
@mock.patch("uuid.uuid4")
def test_only_mount_sub_secret(self, mock_uuid):
mock_uuid.return_value = "0"
items = [k8s.V1KeyToPath(key="my-username", path="/extra/path")]
secret = Secret("volume", "/etc/foo", "secret_b", items=items)
assert secret.to_volume_secret() == (
k8s.V1Volume(
name="secretvol0", secret=k8s.V1SecretVolumeSource(secret_name="secret_b", items=items)
),
k8s.V1VolumeMount(mount_path="/etc/foo", name="secretvol0", read_only=True),
)
@mock.patch("uuid.uuid4")
def test_attach_to_pod(self, mock_uuid, data_file):
static_uuid = uuid.UUID("cf4a56d2-8101-4217-b027-2af6216feb48")
mock_uuid.return_value = static_uuid
template_file = data_file("pods/generator_base.yaml").as_posix()
pod = PodGenerator(pod_template_file=template_file).ud_pod
secrets = [
# This should be a secretRef
Secret("env", None, "secret_a"),
# This should be a single secret mounted in volumeMounts
Secret("volume", "/etc/foo", "secret_b"),
# This should produce a single secret mounted in env
Secret("env", "TARGET", "secret_b", "source_b"),
]
k8s_client = ApiClient()
pod = append_to_pod(pod, secrets)
result = k8s_client.sanitize_for_serialization(pod)
assert result == {
"apiVersion": "v1",
"kind": "Pod",
"metadata": {
"labels": {"app": "myapp"},
"name": "myapp-pod",
"namespace": "default",
},
"spec": {
"containers": [
{
"command": ["sh", "-c", "echo Hello Kubernetes!"],
"env": [
{"name": "ENVIRONMENT", "value": "prod"},
{"name": "LOG_LEVEL", "value": "warning"},
{
"name": "TARGET",
"valueFrom": {"secretKeyRef": {"key": "source_b", "name": "secret_b"}},
},
],
"envFrom": [
{"configMapRef": {"name": "configmap_a"}},
{"secretRef": {"name": "secret_a"}},
],
"image": "busybox",
"name": "base",
"ports": [{"containerPort": 1234, "name": "foo"}],
"resources": {"limits": {"memory": "200Mi"}, "requests": {"memory": "100Mi"}},
"volumeMounts": [
{
"mountPath": "/etc/foo",
"name": f"secretvol{static_uuid}",
"readOnly": True,
},
],
},
],
"hostNetwork": True,
"imagePullSecrets": [{"name": "pull_secret_a"}, {"name": "pull_secret_b"}],
"securityContext": {"fsGroup": 2000, "runAsUser": 1000},
"volumes": [
{"name": f"secretvol{static_uuid}", "secret": {"secretName": "secret_b"}},
],
},
}
|
TestSecret
|
python
|
openai__openai-python
|
src/openai/_base_client.py
|
{
"start": 46702,
"end": 47023
}
|
class ____(DefaultAsyncHttpxClient):
def __del__(self) -> None:
if self.is_closed:
return
try:
# TODO(someday): support non asyncio runtimes here
asyncio.get_running_loop().create_task(self.aclose())
except Exception:
pass
|
AsyncHttpxClientWrapper
|
python
|
facebookresearch__faiss
|
tests/test_search_params.py
|
{
"start": 387,
"end": 13308
}
|
class ____(unittest.TestCase):
"""
Test the IDSelector filtering for as many (index class, id selector class)
combinations as possible.
"""
def do_test_id_selector(
self,
index_key,
id_selector_type="batch",
mt=faiss.METRIC_L2,
k=10,
use_heap=True
):
""" Verify that the id selector returns the subset of results that are
members according to the IDSelector.
Supports id_selector_type="batch", "bitmap", "range", "range_sorted", "and", "or", "xor"
"""
d = 32 # make sure dimension is multiple of 8 for binary
ds = datasets.SyntheticDataset(d, 1000, 100, 20)
if index_key == "BinaryFlat":
rs = np.random.RandomState(123)
xb = rs.randint(256, size=(ds.nb, d // 8), dtype='uint8')
xq = rs.randint(256, size=(ds.nq, d // 8), dtype='uint8')
index = faiss.IndexBinaryFlat(d)
index.use_heap = use_heap
# Use smaller radius for Hamming distance
base_radius = 4
is_binary = True
else:
xb = ds.get_database()
xq = ds.get_queries()
xt = ds.get_train()
index = faiss.index_factory(d, index_key, mt)
index.train(xt)
base_radius = float('inf') # Will be set based on results
is_binary = False
# reference result
if "range" in id_selector_type:
subset = np.arange(30, 80).astype('int64')
elif id_selector_type == "or":
lhs_rs = np.random.RandomState(123)
lhs_subset = lhs_rs.choice(ds.nb, 50, replace=False).astype("int64")
rhs_rs = np.random.RandomState(456)
rhs_subset = rhs_rs.choice(ds.nb, 20, replace=False).astype("int64")
subset = np.union1d(lhs_subset, rhs_subset)
elif id_selector_type == "and":
lhs_rs = np.random.RandomState(123)
lhs_subset = lhs_rs.choice(ds.nb, 50, replace=False).astype("int64")
rhs_rs = np.random.RandomState(456)
rhs_subset = rhs_rs.choice(ds.nb, 10, replace=False).astype("int64")
subset = np.intersect1d(lhs_subset, rhs_subset)
elif id_selector_type == "xor":
lhs_rs = np.random.RandomState(123)
lhs_subset = lhs_rs.choice(ds.nb, 50, replace=False).astype("int64")
rhs_rs = np.random.RandomState(456)
rhs_subset = rhs_rs.choice(ds.nb, 40, replace=False).astype("int64")
subset = np.setxor1d(lhs_subset, rhs_subset)
else:
rs = np.random.RandomState(123)
subset = rs.choice(ds.nb, 50, replace=False).astype('int64')
index.add(xb[subset])
if "IVF" in index_key and id_selector_type == "range_sorted":
self.assertTrue(index.check_ids_sorted())
Dref, Iref0 = index.search(xq, k)
Iref = subset[Iref0]
Iref[Iref0 < 0] = -1
if base_radius == float('inf'):
radius = float(Dref[Iref > 0].max()) * 1.01
else:
radius = base_radius
try:
Rlims_ref, RDref, RIref = index.range_search(xq, radius)
except RuntimeError as e:
if "not implemented" in str(e):
have_range_search = False
else:
raise
else:
RIref = subset[RIref]
# normalize the range search results
RDref, RIref = sort_range_res_2(Rlims_ref, RDref, RIref)
have_range_search = True
# result with selector: fill full database and search with selector
index.reset()
index.add(xb)
if id_selector_type == "range":
sel = faiss.IDSelectorRange(30, 80)
elif id_selector_type == "range_sorted":
sel = faiss.IDSelectorRange(30, 80, True)
elif id_selector_type == "array":
sel = faiss.IDSelectorArray(subset)
elif id_selector_type == "bitmap":
bitmap = np.zeros(ds.nb, dtype=bool)
bitmap[subset] = True
bitmap = np.packbits(bitmap, bitorder='little')
sel = faiss.IDSelectorBitmap(bitmap)
elif id_selector_type == "not":
ssubset = set(subset)
inverse_subset = np.array([
i for i in range(ds.nb)
if i not in ssubset
]).astype('int64')
sel = faiss.IDSelectorNot(faiss.IDSelectorBatch(inverse_subset))
elif id_selector_type == "or":
sel = faiss.IDSelectorOr(
faiss.IDSelectorBatch(lhs_subset),
faiss.IDSelectorBatch(rhs_subset)
)
elif id_selector_type == "and":
sel = faiss.IDSelectorAnd(
faiss.IDSelectorBatch(lhs_subset),
faiss.IDSelectorBatch(rhs_subset)
)
elif id_selector_type == "xor":
sel = faiss.IDSelectorXOr(
faiss.IDSelectorBatch(lhs_subset),
faiss.IDSelectorBatch(rhs_subset)
)
else:
sel = faiss.IDSelectorBatch(subset)
params = (
faiss.SearchParametersIVF(sel=sel) if "IVF" in index_key else
faiss.SearchParametersPQ(sel=sel) if "PQ" in index_key else
faiss.SearchParameters(sel=sel)
)
Dnew, Inew = index.search(xq, k, params=params)
if is_binary:
# For binary indexes, we need to check:
# 1. All returned IDs are valid (in the subset or -1)
# 2. The distances match
# Check that all returned IDs are valid
valid_ids = np.ones_like(Inew, dtype=bool)
# Create a mask of valid IDs (those in subset)
subset_set = set(subset) # Convert to set for O(1) lookups
# Handle -1 values separately (they're always valid)
valid_ids = np.logical_or(
Inew == -1,
np.isin(Inew, list(subset_set))
)
self.assertTrue(np.all(valid_ids), "Some returned IDs are not in the subset")
# Check that distances match
np.testing.assert_almost_equal(Dref, Dnew, decimal=5)
else:
# For non-binary indexes, we can do exact comparison
np.testing.assert_array_equal(Iref, Inew)
np.testing.assert_almost_equal(Dref, Dnew, decimal=5)
if have_range_search:
Rlims_new, RDnew, RInew = index.range_search(xq, radius, params=params)
np.testing.assert_array_equal(Rlims_ref, Rlims_new)
RDref, RIref = sort_range_res_2(Rlims_ref, RDref, RIref)
if is_binary:
# For binary indexes, check that all returned IDs are valid
valid_ids = np.ones(len(RInew), dtype=bool)
# Use vectorized operation instead of loop
subset_set = set(subset) # Convert to set for O(1) lookups
valid_ids = np.isin(RInew, list(subset_set))
self.assertTrue(np.all(valid_ids), "Some range search IDs are not in the subset")
# Check that distances match
np.testing.assert_almost_equal(RDref, RDnew, decimal=5)
else:
# For non-binary indexes, we can do exact comparison
np.testing.assert_array_equal(RIref, RInew)
np.testing.assert_almost_equal(RDref, RDnew, decimal=5)
def test_IVFFlat(self):
self.do_test_id_selector("IVF32,Flat")
def test_IVFFlat_range_sorted(self):
self.do_test_id_selector("IVF32,Flat", id_selector_type="range_sorted")
def test_IVFPQ(self):
self.do_test_id_selector("IVF32,PQ4x4np")
def test_IVFPQfs(self):
self.do_test_id_selector("IVF32,PQ4x4fs")
def test_IVFPQfs_k1(self):
self.do_test_id_selector("IVF32,PQ4x4fs", k=1)
def test_IVFPQfs_k40(self):
# test reservoir codepath
self.do_test_id_selector("IVF32,PQ4x4fs", k=40)
def test_IVFSQ(self):
self.do_test_id_selector("IVF32,SQ8")
def test_pretrans(self):
self.do_test_id_selector("PCA16,IVF32,Flat")
def test_SQ(self):
self.do_test_id_selector("SQ8")
def test_Flat(self):
self.do_test_id_selector("Flat")
def test_Flat_IP(self):
self.do_test_id_selector("Flat", mt=faiss.METRIC_INNER_PRODUCT)
def test_Flat_id_range(self):
self.do_test_id_selector("Flat", id_selector_type="range")
def test_Flat_IP_id_range(self):
self.do_test_id_selector(
"Flat", id_selector_type="range",
mt=faiss.METRIC_INNER_PRODUCT
)
def test_Flat_id_array(self):
self.do_test_id_selector("Flat", id_selector_type="array")
def test_Flat_IP_id_array(self):
self.do_test_id_selector(
"Flat", id_selector_type="array",
mt=faiss.METRIC_INNER_PRODUCT
)
def test_Flat_id_bitmap(self):
self.do_test_id_selector("Flat", id_selector_type="bitmap")
def test_Flat_id_not(self):
self.do_test_id_selector("Flat", id_selector_type="not")
def test_Flat_id_or(self):
self.do_test_id_selector("Flat", id_selector_type="or")
# not implemented
# def test_PQ(self):
# self.do_test_id_selector("PQ4x4np")
# def test_AQ(self):
# self.do_test_id_selector("RQ3x4")
def do_test_id_selector_weak(self, index_key):
""" verify that the selected subset is the subset in the list"""
ds = datasets.SyntheticDataset(32, 1000, 100, 20)
index = faiss.index_factory(ds.d, index_key)
index.train(ds.get_train())
index.add(ds.get_database())
k = 10
Dref, Iref = index.search(ds.get_queries(), k)
# reference result
rs = np.random.RandomState(123)
subset = rs.choice(ds.nb, 50, replace=False).astype("int64")
sel = faiss.IDSelectorBatch(subset)
params = faiss.SearchParametersHNSW()
params.sel = sel
Dnew, Inew = index.search(ds.get_queries(), k, params=params)
mask = np.zeros(ds.nb, dtype=bool)
mask[subset] = True
for q in range(len(Iref)):
mask_q, = np.where(mask[Iref[q]])
l = len(mask_q)
np.testing.assert_array_equal(Iref[q, mask_q], Inew[q, :l])
np.testing.assert_array_equal(Dref[q, mask_q], Dnew[q, :l])
def test_HSNW(self):
self.do_test_id_selector_weak("HNSW")
def test_idmap(self):
ds = datasets.SyntheticDataset(32, 100, 100, 20)
rs = np.random.RandomState(123)
ids = rs.choice(10000, size=100, replace=False)
mask = ids % 2 == 0
index = faiss.index_factory(ds.d, "IDMap,SQ8")
index.train(ds.get_train())
# ref result
index.add_with_ids(ds.get_database()[mask], ids[mask])
Dref, Iref = index.search(ds.get_queries(), 10)
# with selector
index.reset()
index.add_with_ids(ds.get_database(), ids)
valid_ids = ids[mask]
sel = faiss.IDSelectorTranslated(
index, faiss.IDSelectorBatch(valid_ids))
Dnew, Inew = index.search(
ds.get_queries(), 10,
params=faiss.SearchParameters(sel=sel)
)
np.testing.assert_array_equal(Iref, Inew)
np.testing.assert_array_almost_equal(Dref, Dnew, decimal=5)
# let the IDMap::search add the translation...
Dnew, Inew = index.search(
ds.get_queries(), 10,
params=faiss.SearchParameters(sel=faiss.IDSelectorBatch(valid_ids))
)
np.testing.assert_array_equal(Iref, Inew)
np.testing.assert_array_almost_equal(Dref, Dnew, decimal=5)
def test_bounds(self):
# https://github.com/facebookresearch/faiss/issues/3156
d = 64 # dimension
nb = 100000 # database size
xb = np.random.random((nb, d))
index_ip = faiss.IndexFlatIP(d)
index_ip.add(xb)
index_l2 = faiss.IndexFlatIP(d)
index_l2.add(xb)
out_of_bounds_id = nb + 15 # + 14 or lower will work fine
id_selector = faiss.IDSelectorArray([out_of_bounds_id])
search_params = faiss.SearchParameters(sel=id_selector)
# ignores out of bound, does not crash
distances, indices = index_ip.search(xb[:2], k=3, params=search_params)
distances, indices = index_l2.search(xb[:2], k=3, params=search_params)
def test_BinaryFlat(self):
self.do_test_id_selector("BinaryFlat")
def test_BinaryFlat_id_range(self):
self.do_test_id_selector("BinaryFlat", id_selector_type="range")
def test_BinaryFlat_id_array(self):
self.do_test_id_selector("BinaryFlat", id_selector_type="array")
def test_BinaryFlat_no_heap(self):
self.do_test_id_selector("BinaryFlat", use_heap=False)
|
TestSelector
|
python
|
pytorch__pytorch
|
test/quantization/core/experimental/test_floatx.py
|
{
"start": 7330,
"end": 13956
}
|
class ____(TestCase):
@dtypes(*FLOAT8_DTYPES)
@dtypesIfCUDA(*CUDA_FLOAT8_DTYPES)
def test_creation_with_zeros(self, dtype, device):
"""Sanity test, round-trip casting of zeros."""
x8 = torch.zeros(8, dtype=dtype, device=device)
if dtype is torch.float8_e8m0fnu:
# zeros are not supported for this dtype, values get clamped
# to 2 ^ -127
x = torch.full((8,), 2**-127, dtype=torch.float, device=device)
self.assertEqual(x, x8.float(), atol=0, rtol=0)
else:
x = torch.zeros(8, dtype=torch.float, device=device)
self.assertEqual(x, x8.float(), atol=0, rtol=0)
@dtypes(*FLOAT8_DTYPES)
@dtypesIfCUDA(*CUDA_FLOAT8_DTYPES)
@parametrize("get_input", ROUND_TRIP_TEST_CASES)
def test_cast_round_trip(self, dtype, get_input, device):
"""Numerical test of float8 conversion, by performing a round-trip cast
to the float8 dtype and back to float32, comparing against simulated
lower precision."""
if dtype is torch.float8_e8m0fnu:
return unittest.skip("numerics for e8m0fnu are tested elsewhere")
x = get_input(dtype, device)
x = torch.cat((x, -x))
x8 = x.to(dtype)
x8_simulated = simulate_fp8_precision(x, dtype)
self.assertEqual(x8_simulated, x8.float())
def test_float8_e8m0fnu_rne_rounding(self, device):
"""
For every possible e8m0 exponent (256 options) and for every possible
g, r, s bits of the float32 mantissa, verify that RNE rounding is
correctly applied when casting from float32 to e8m0
Note: this code is morally similar to `test_cast_round_trip`, but
IMO simpler to special case e8m0 here.
"""
for biased_exponent in range(256):
# iterate through all the possible options of guard, round, sticky bits
# for the current exponent
for grs in range(8):
# create a positive floating point number with the specified exponent
# and mantissa guard, round, sticky bits
uint32_t_start = (biased_exponent << 23) + (grs << 20)
fp32_start = _int_bits_to_float(uint32_t_start)
# create an RNE rounded version of the exponent
if biased_exponent == 255:
new_biased_exponent = biased_exponent
else:
lsb = biased_exponent > 0
g = grs >> 2
r = (grs >> 1) & 0b1
s = grs & 0b1
new_biased_exponent = _round_e8m0_rne(biased_exponent, lsb, g, r, s)
# create an RNE rounded version of the float
fp32_e8m0_fp32_emulated = _int_bits_to_float(new_biased_exponent << 23)
# now, do the same in PyTorch and see if results match
fp32_pt_start = torch.full(
(1,), fp32_start, device=device, dtype=torch.float
)
fp32_pt_e8m0 = fp32_pt_start.to(torch.float8_e8m0fnu)
fp32_pt_e8m0_fp32 = fp32_pt_e8m0.to(torch.float)
expected = fp32_e8m0_fp32_emulated
if biased_exponent == 254 and grs >= 4:
# special case rounding up from the largest representable float32 exponent, which
# saturates to nan
expected = float("nan")
elif biased_exponent == 255:
# special case inf and nan, which becomes nan
expected = float("nan")
actual = fp32_pt_e8m0_fp32.item()
self.assertEqual(
expected, actual, f"expected: {expected}, actual: {actual}"
)
@dtypes(*FLOAT8_DTYPES)
@dtypesIfCUDA(*CUDA_FLOAT8_DTYPES)
def test_special_numbers(self, dtype, device):
"""Test special numbers."""
def compare_binary_with_decimal(binary, decimal, number_name, dtype, device):
bits_int = int(binary, 2)
tensor_int = torch.tensor([bits_int], dtype=torch.uint8, device=device)
tensor_fp8 = tensor_int.view(dtype)
if number_name == "nan":
assert tensor_fp8.isnan()
else:
tensor_fp32 = tensor_fp8.float()
ref_tensor_fp32 = torch.tensor(
[decimal], dtype=torch.float, device=device
)
self.assertEqual(tensor_fp32, ref_tensor_fp32, atol=0, rtol=0)
for number in SPECIAL_NUMBERS[dtype]:
compare_binary_with_decimal(*number, dtype, device)
@dtypes(*FLOAT8_DTYPES)
@dtypesIfCUDA(*CUDA_FLOAT8_DTYPES)
def test_type_promotion_fails(self, dtype, device):
"""Test that float8 is not promoted to higher precision Float Type."""
for other_dtype in [
torch.float16,
torch.bfloat16,
torch.float32,
torch.float64,
]:
x = torch.randn(8, device=device).to(dtype)
y = torch.randn(8, device=device).to(other_dtype)
with self.assertRaisesRegex(
RuntimeError, "Promotion for Float8 Types is not supported"
):
x + y
@dtypes(*FLOAT8_DTYPES)
@dtypesIfCUDA(*CUDA_FLOAT8_DTYPES)
def test_empty(self, dtype, device):
with DeterministicGuard(torch.are_deterministic_algorithms_enabled()):
for use_deterministic in (True, False):
torch.use_deterministic_algorithms(use_deterministic)
torch.empty(4, 4, device=device, dtype=dtype)
@dtypes(*FLOAT8_DTYPES)
@dtypesIfCUDA(*CUDA_FLOAT8_DTYPES)
def test_to_string(self, dtype, device):
x = torch.empty(4, 4, device=device, dtype=dtype)
str(x)
@dtypes(*FLOAT8_DTYPES)
def test_finfo(self, dtype, device):
torch.finfo(dtype)
@dtypes(*FLOAT8_DTYPES)
@dtypesIfCUDA(*CUDA_FLOAT8_DTYPES)
def test_cat(self, dtype, device):
x1 = torch.empty(4, 4, device=device, dtype=dtype)
x2 = torch.empty(4, 4, device=device, dtype=dtype)
torch.cat([x1, x2])
@dtypes(*FLOAT8_DTYPES)
@dtypesIfCUDA(*CUDA_FLOAT8_DTYPES)
def test_save_load(self, dtype, device):
x1 = torch.randint(0, 10, (4, 4), device=device, dtype=torch.uint8).view(dtype)
with TemporaryFileName() as fname:
torch.save(x1, fname)
x1_save_load = torch.load(fname)
torch.testing.assert_close(x1, x1_save_load, atol=0, rtol=0)
|
TestFloat8Dtype
|
python
|
altair-viz__altair
|
altair/vegalite/v6/schema/channels.py
|
{
"start": 489876,
"end": 490621
}
|
class ____(ValueChannelMixin, core.PositionValueDef):
"""
Radius2Value schema wrapper.
Definition object for a constant value (primitive value or gradient definition) of an
encoding channel.
Parameters
----------
value : dict, float, :class:`ExprRef`, Literal['height', 'width']
A constant value in visual domain (e.g., ``"red"`` / ``"#0099ff"`` / `gradient
definition <https://vega.github.io/vega-lite/docs/types.html#gradient>`__ for color,
values between ``0`` to ``1`` for opacity).
"""
_class_is_valid_at_instantiation = False
_encoding_name = "radius2"
def __init__(self, value, **kwds):
super().__init__(value=value, **kwds)
@with_property_setters
|
Radius2Value
|
python
|
pallets__jinja
|
tests/test_tests.py
|
{
"start": 191,
"end": 7851
}
|
class ____:
def test_defined(self, env):
tmpl = env.from_string("{{ missing is defined }}|{{ true is defined }}")
assert tmpl.render() == "False|True"
def test_even(self, env):
tmpl = env.from_string("""{{ 1 is even }}|{{ 2 is even }}""")
assert tmpl.render() == "False|True"
def test_odd(self, env):
tmpl = env.from_string("""{{ 1 is odd }}|{{ 2 is odd }}""")
assert tmpl.render() == "True|False"
def test_lower(self, env):
tmpl = env.from_string("""{{ "foo" is lower }}|{{ "FOO" is lower }}""")
assert tmpl.render() == "True|False"
# Test type checks
@pytest.mark.parametrize(
"op,expect",
(
("none is none", True),
("false is none", False),
("true is none", False),
("42 is none", False),
("none is true", False),
("false is true", False),
("true is true", True),
("0 is true", False),
("1 is true", False),
("42 is true", False),
("none is false", False),
("false is false", True),
("true is false", False),
("0 is false", False),
("1 is false", False),
("42 is false", False),
("none is boolean", False),
("false is boolean", True),
("true is boolean", True),
("0 is boolean", False),
("1 is boolean", False),
("42 is boolean", False),
("0.0 is boolean", False),
("1.0 is boolean", False),
("3.14159 is boolean", False),
("none is integer", False),
("false is integer", False),
("true is integer", False),
("42 is integer", True),
("3.14159 is integer", False),
("(10 ** 100) is integer", True),
("none is float", False),
("false is float", False),
("true is float", False),
("42 is float", False),
("4.2 is float", True),
("(10 ** 100) is float", False),
("none is number", False),
("false is number", True),
("true is number", True),
("42 is number", True),
("3.14159 is number", True),
("complex is number", True),
("(10 ** 100) is number", True),
("none is string", False),
("false is string", False),
("true is string", False),
("42 is string", False),
('"foo" is string', True),
("none is sequence", False),
("false is sequence", False),
("42 is sequence", False),
('"foo" is sequence', True),
("[] is sequence", True),
("[1, 2, 3] is sequence", True),
("{} is sequence", True),
("none is mapping", False),
("false is mapping", False),
("42 is mapping", False),
('"foo" is mapping', False),
("[] is mapping", False),
("{} is mapping", True),
("mydict is mapping", True),
("none is iterable", False),
("false is iterable", False),
("42 is iterable", False),
('"foo" is iterable', True),
("[] is iterable", True),
("{} is iterable", True),
("range(5) is iterable", True),
("none is callable", False),
("false is callable", False),
("42 is callable", False),
('"foo" is callable', False),
("[] is callable", False),
("{} is callable", False),
("range is callable", True),
),
)
def test_types(self, env, op, expect):
t = env.from_string(f"{{{{ {op} }}}}")
assert t.render(mydict=MyDict(), complex=complex(1, 2)) == str(expect)
def test_upper(self, env):
tmpl = env.from_string('{{ "FOO" is upper }}|{{ "foo" is upper }}')
assert tmpl.render() == "True|False"
def test_equalto(self, env):
tmpl = env.from_string(
"{{ foo is eq 12 }}|"
"{{ foo is eq 0 }}|"
"{{ foo is eq (3 * 4) }}|"
'{{ bar is eq "baz" }}|'
'{{ bar is eq "zab" }}|'
'{{ bar is eq ("ba" + "z") }}|'
"{{ bar is eq bar }}|"
"{{ bar is eq foo }}"
)
assert (
tmpl.render(foo=12, bar="baz")
== "True|False|True|True|False|True|True|False"
)
@pytest.mark.parametrize(
"op,expect",
(
("eq 2", True),
("eq 3", False),
("ne 3", True),
("ne 2", False),
("lt 3", True),
("lt 2", False),
("le 2", True),
("le 1", False),
("gt 1", True),
("gt 2", False),
("ge 2", True),
("ge 3", False),
),
)
def test_compare_aliases(self, env, op, expect):
t = env.from_string(f"{{{{ 2 is {op} }}}}")
assert t.render() == str(expect)
def test_sameas(self, env):
tmpl = env.from_string("{{ foo is sameas false }}|{{ 0 is sameas false }}")
assert tmpl.render(foo=False) == "True|False"
def test_no_paren_for_arg1(self, env):
tmpl = env.from_string("{{ foo is sameas none }}")
assert tmpl.render(foo=None) == "True"
def test_escaped(self, env):
env = Environment(autoescape=True)
tmpl = env.from_string("{{ x is escaped }}|{{ y is escaped }}")
assert tmpl.render(x="foo", y=Markup("foo")) == "False|True"
def test_greaterthan(self, env):
tmpl = env.from_string("{{ 1 is greaterthan 0 }}|{{ 0 is greaterthan 1 }}")
assert tmpl.render() == "True|False"
def test_lessthan(self, env):
tmpl = env.from_string("{{ 0 is lessthan 1 }}|{{ 1 is lessthan 0 }}")
assert tmpl.render() == "True|False"
def test_multiple_tests(self):
items = []
def matching(x, y):
items.append((x, y))
return False
env = Environment()
env.tests["matching"] = matching
tmpl = env.from_string(
"{{ 'us-west-1' is matching '(us-east-1|ap-northeast-1)'"
" or 'stage' is matching '(dev|stage)' }}"
)
assert tmpl.render() == "False"
assert items == [
("us-west-1", "(us-east-1|ap-northeast-1)"),
("stage", "(dev|stage)"),
]
def test_in(self, env):
tmpl = env.from_string(
'{{ "o" is in "foo" }}|'
'{{ "foo" is in "foo" }}|'
'{{ "b" is in "foo" }}|'
"{{ 1 is in ((1, 2)) }}|"
"{{ 3 is in ((1, 2)) }}|"
"{{ 1 is in [1, 2] }}|"
"{{ 3 is in [1, 2] }}|"
'{{ "foo" is in {"foo": 1}}}|'
'{{ "baz" is in {"bar": 1}}}'
)
assert tmpl.render() == "True|True|False|True|False|True|False|True|False"
def test_name_undefined(env):
with pytest.raises(TemplateAssertionError, match="No test named 'f'"):
env.from_string("{{ x is f }}")
def test_name_undefined_in_if(env):
t = env.from_string("{% if x is defined %}{{ x is f }}{% endif %}")
assert t.render() == ""
with pytest.raises(TemplateRuntimeError, match="No test named 'f'"):
t.render(x=1)
def test_is_filter(env):
assert env.call_test("filter", "title")
assert not env.call_test("filter", "bad-name")
def test_is_test(env):
assert env.call_test("test", "number")
assert not env.call_test("test", "bad-name")
|
TestTestsCase
|
python
|
ray-project__ray
|
python/ray/data/_internal/datasource/parquet_datasource.py
|
{
"start": 3529,
"end": 5724
}
|
class ____:
"""This wrapper class is created to avoid utilizing `ParquetFileFragment` original
serialization protocol that actually does network RPCs during serialization
(to fetch actual parquet metadata)"""
def __init__(self, f: "ParquetFileFragment", file_size: int):
self._fragment = f
self._file_size = file_size
@property
def file_size(self) -> int:
return self._file_size
@property
def original(self) -> "ParquetFileFragment":
return self._fragment
def __reduce__(self):
return _ParquetFragment.make_fragment, (
self._fragment.format,
self._fragment.path,
self._fragment.filesystem,
self._fragment.partition_expression,
self._file_size,
)
@staticmethod
def make_fragment(format, path, filesystem, partition_expression, file_size):
fragment = format.make_fragment(path, filesystem, partition_expression)
return _ParquetFragment(fragment, file_size)
def check_for_legacy_tensor_type(schema):
"""Check for the legacy tensor extension type and raise an error if found.
Ray Data uses an extension type to represent tensors in Arrow tables. Previously,
the extension type extended `PyExtensionType`. However, this base type can expose
users to arbitrary code execution. To prevent this, we don't load the type by
default.
"""
import pyarrow as pa
for name, type in zip(schema.names, schema.types):
if isinstance(type, pa.UnknownExtensionType) and isinstance(
type, pa.PyExtensionType
):
raise RuntimeError(
f"Ray Data couldn't infer the type of column '{name}'. This might mean "
"you're trying to read data written with an older version of Ray. "
"Reading data written with older versions of Ray might expose you to "
"arbitrary code execution. To try reading the data anyway, set "
"`RAY_DATA_AUTOLOAD_PYEXTENSIONTYPE=1` on *all* nodes."
"To learn more, see https://github.com/ray-project/ray/issues/41314."
)
@dataclass
|
_ParquetFragment
|
python
|
ansible__ansible
|
test/units/modules/mount_facts_data.py
|
{
"start": 226,
"end": 416
}
|
class ____:
fstab: str
fstab_parsed: list[dict[str, str]]
mtab: str
mtab_parsed: list[dict[str, str]]
mount: str
mount_parsed: list[dict[str, str]]
@dataclass
|
LinuxData
|
python
|
Textualize__textual
|
src/textual/widgets/_rule.py
|
{
"start": 1544,
"end": 1945
}
|
class ____:
"""Renders a horizontal rule."""
def __init__(self, character: str, style: Style, width: int):
self.character = character
self.style = style
self.width = width
def __rich_console__(
self, console: Console, options: ConsoleOptions
) -> Iterable[Segment]:
yield Segment(self.width * self.character, self.style)
|
HorizontalRuleRenderable
|
python
|
wandb__wandb
|
wandb/vendor/pygments/lexers/graphics.py
|
{
"start": 19613,
"end": 25836
}
|
class ____(RegexLexer):
"""
For `Persistence of Vision Raytracer <http://www.povray.org/>`_ files.
.. versionadded:: 0.11
"""
name = 'POVRay'
aliases = ['pov']
filenames = ['*.pov', '*.inc']
mimetypes = ['text/x-povray']
tokens = {
'root': [
(r'/\*[\w\W]*?\*/', Comment.Multiline),
(r'//.*\n', Comment.Single),
(r'(?s)"(?:\\.|[^"\\])+"', String.Double),
(words((
'break', 'case', 'debug', 'declare', 'default', 'define', 'else',
'elseif', 'end', 'error', 'fclose', 'fopen', 'for', 'if', 'ifdef',
'ifndef', 'include', 'local', 'macro', 'range', 'read', 'render',
'statistics', 'switch', 'undef', 'version', 'warning', 'while',
'write'), prefix=r'#', suffix=r'\b'),
Comment.Preproc),
(words((
'aa_level', 'aa_threshold', 'abs', 'acos', 'acosh', 'adaptive', 'adc_bailout',
'agate', 'agate_turb', 'all', 'alpha', 'ambient', 'ambient_light', 'angle',
'aperture', 'arc_angle', 'area_light', 'asc', 'asin', 'asinh', 'assumed_gamma',
'atan', 'atan2', 'atanh', 'atmosphere', 'atmospheric_attenuation',
'attenuating', 'average', 'background', 'black_hole', 'blue', 'blur_samples',
'bounded_by', 'box_mapping', 'bozo', 'break', 'brick', 'brick_size',
'brightness', 'brilliance', 'bumps', 'bumpy1', 'bumpy2', 'bumpy3', 'bump_map',
'bump_size', 'case', 'caustics', 'ceil', 'checker', 'chr', 'clipped_by', 'clock',
'color', 'color_map', 'colour', 'colour_map', 'component', 'composite', 'concat',
'confidence', 'conic_sweep', 'constant', 'control0', 'control1', 'cos', 'cosh',
'count', 'crackle', 'crand', 'cube', 'cubic_spline', 'cylindrical_mapping',
'debug', 'declare', 'default', 'degrees', 'dents', 'diffuse', 'direction',
'distance', 'distance_maximum', 'div', 'dust', 'dust_type', 'eccentricity',
'else', 'emitting', 'end', 'error', 'error_bound', 'exp', 'exponent',
'fade_distance', 'fade_power', 'falloff', 'falloff_angle', 'false',
'file_exists', 'filter', 'finish', 'fisheye', 'flatness', 'flip', 'floor',
'focal_point', 'fog', 'fog_alt', 'fog_offset', 'fog_type', 'frequency', 'gif',
'global_settings', 'glowing', 'gradient', 'granite', 'gray_threshold',
'green', 'halo', 'hexagon', 'hf_gray_16', 'hierarchy', 'hollow', 'hypercomplex',
'if', 'ifdef', 'iff', 'image_map', 'incidence', 'include', 'int', 'interpolate',
'inverse', 'ior', 'irid', 'irid_wavelength', 'jitter', 'lambda', 'leopard',
'linear', 'linear_spline', 'linear_sweep', 'location', 'log', 'looks_like',
'look_at', 'low_error_factor', 'mandel', 'map_type', 'marble', 'material_map',
'matrix', 'max', 'max_intersections', 'max_iteration', 'max_trace_level',
'max_value', 'metallic', 'min', 'minimum_reuse', 'mod', 'mortar',
'nearest_count', 'no', 'normal', 'normal_map', 'no_shadow', 'number_of_waves',
'octaves', 'off', 'offset', 'omega', 'omnimax', 'on', 'once', 'onion', 'open',
'orthographic', 'panoramic', 'pattern1', 'pattern2', 'pattern3',
'perspective', 'pgm', 'phase', 'phong', 'phong_size', 'pi', 'pigment',
'pigment_map', 'planar_mapping', 'png', 'point_at', 'pot', 'pow', 'ppm',
'precision', 'pwr', 'quadratic_spline', 'quaternion', 'quick_color',
'quick_colour', 'quilted', 'radial', 'radians', 'radiosity', 'radius', 'rainbow',
'ramp_wave', 'rand', 'range', 'reciprocal', 'recursion_limit', 'red',
'reflection', 'refraction', 'render', 'repeat', 'rgb', 'rgbf', 'rgbft', 'rgbt',
'right', 'ripples', 'rotate', 'roughness', 'samples', 'scale', 'scallop_wave',
'scattering', 'seed', 'shadowless', 'sin', 'sine_wave', 'sinh', 'sky', 'sky_sphere',
'slice', 'slope_map', 'smooth', 'specular', 'spherical_mapping', 'spiral',
'spiral1', 'spiral2', 'spotlight', 'spotted', 'sqr', 'sqrt', 'statistics', 'str',
'strcmp', 'strength', 'strlen', 'strlwr', 'strupr', 'sturm', 'substr', 'switch', 'sys',
't', 'tan', 'tanh', 'test_camera_1', 'test_camera_2', 'test_camera_3',
'test_camera_4', 'texture', 'texture_map', 'tga', 'thickness', 'threshold',
'tightness', 'tile2', 'tiles', 'track', 'transform', 'translate', 'transmit',
'triangle_wave', 'true', 'ttf', 'turbulence', 'turb_depth', 'type',
'ultra_wide_angle', 'up', 'use_color', 'use_colour', 'use_index', 'u_steps',
'val', 'variance', 'vaxis_rotate', 'vcross', 'vdot', 'version', 'vlength',
'vnormalize', 'volume_object', 'volume_rendered', 'vol_with_light',
'vrotate', 'v_steps', 'warning', 'warp', 'water_level', 'waves', 'while', 'width',
'wood', 'wrinkles', 'yes'), prefix=r'\b', suffix=r'\b'),
Keyword),
(words((
'bicubic_patch', 'blob', 'box', 'camera', 'cone', 'cubic', 'cylinder', 'difference',
'disc', 'height_field', 'intersection', 'julia_fractal', 'lathe',
'light_source', 'merge', 'mesh', 'object', 'plane', 'poly', 'polygon', 'prism',
'quadric', 'quartic', 'smooth_triangle', 'sor', 'sphere', 'superellipsoid',
'text', 'torus', 'triangle', 'union'), suffix=r'\b'),
Name.Builtin),
# TODO: <=, etc
(r'[\[\](){}<>;,]', Punctuation),
(r'[-+*/=]', Operator),
(r'\b(x|y|z|u|v)\b', Name.Builtin.Pseudo),
(r'[a-zA-Z_]\w*', Name),
(r'[0-9]+\.[0-9]*', Number.Float),
(r'\.[0-9]+', Number.Float),
(r'[0-9]+', Number.Integer),
(r'"(\\\\|\\"|[^"])*"', String),
(r'\s+', Text),
]
}
|
PovrayLexer
|
python
|
python-visualization__folium
|
folium/plugins/scroll_zoom_toggler.py
|
{
"start": 80,
"end": 1761
}
|
class ____(MacroElement):
"""Creates a button for enabling/disabling scroll on the Map."""
_template = Template(
"""
{% macro header(this,kwargs) %}
<style>
#{{ this.get_name() }} {
position:absolute;
width:35px;
bottom:10px;
height:35px;
left:10px;
background-color:#fff;
text-align:center;
line-height:35px;
vertical-align: middle;
}
</style>
{% endmacro %}
{% macro html(this,kwargs) %}
<img id="{{ this.get_name() }}"
alt="scroll"
src="https://cdnjs.cloudflare.com/ajax/libs/ionicons/2.0.1/png/512/arrow-move.png"
style="z-index: 999999"
onclick="{{ this._parent.get_name() }}.toggleScroll()">
</img>
{% endmacro %}
{% macro script(this,kwargs) %}
{{ this._parent.get_name() }}.scrollEnabled = true;
{{ this._parent.get_name() }}.toggleScroll = function() {
if (this.scrollEnabled) {
this.scrollEnabled = false;
this.scrollWheelZoom.disable();
} else {
this.scrollEnabled = true;
this.scrollWheelZoom.enable();
}
};
{{ this._parent.get_name() }}.toggleScroll();
{% endmacro %}
"""
)
def __init__(self):
super().__init__()
self._name = "ScrollZoomToggler"
|
ScrollZoomToggler
|
python
|
pytorch__pytorch
|
test/test_fx_experimental.py
|
{
"start": 2669,
"end": 67483
}
|
class ____(JitTestCase):
def test_find_single_partition(self):
class TestModule(torch.nn.Module):
def forward(self, a, b):
return a + b
m = TestModule()
traced = symbolic_trace(m)
a = torch.rand(1)
b = torch.rand(1)
graph_manipulation.get_size_of_all_nodes(traced, [a, b])
partitioner = Partitioner()
devices = [
Device("dev_0", 125, 0),
Device("dev_1", 150, 1),
Device("dev_2", 125, 2),
]
partitioner_config = PartitionerConfig(devices)
ret = partitioner.partition_graph(traced, m, partitioner_config)
module_with_submodules = ret.module_with_submodules
dag = ret.dag
self.assertEqual(traced(a, b), module_with_submodules(a, b))
assert dag.nodes[0].logical_device_ids == [1]
def test_lack_of_devices(self):
class TestModule(torch.nn.Module):
def forward(self, a, b):
return a + b
m = TestModule()
traced = symbolic_trace(m)
a = torch.rand(4)
b = torch.rand(4)
graph_manipulation.get_size_of_all_nodes(traced, [a, b])
partitioner = Partitioner()
devices = [Device("dev_0", 4, 0), Device("dev_1", 4, 1)]
partitioner_config = PartitionerConfig(devices, PartitionMode.size_based)
catch_runtime_error = False
try:
ret = partitioner.partition_graph(traced, m, partitioner_config)
except RuntimeError:
catch_runtime_error = True
assert catch_runtime_error
def test_large_node_error(self):
class TestModule(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.linear = torch.nn.Linear(4, 4)
def forward(self, a):
linear = self.linear(a)
add = linear + a
return add
m = TestModule()
traced = symbolic_trace(m)
a = torch.rand(4)
graph_manipulation.get_size_of_all_nodes(traced, [a])
partitioner = Partitioner()
devices = [
Device("dev_0", 40, 0),
Device("dev_1", 40, 0),
Device("dev_2", 40, 0),
Device("dev_3", 40, 0),
Device("dev_4", 40, 0),
]
partitioner_config = PartitionerConfig(devices, PartitionMode.size_based)
catch_runtime_error = False
try:
ret = partitioner.partition_graph(traced, m, partitioner_config)
except RuntimeError:
catch_runtime_error = True
assert catch_runtime_error
def test_partition_node_manipulation(self):
class TestModule(torch.nn.Module):
def forward(self, a, b):
add_1 = a + b
add_2 = add_1 + torch.rand(4)
add_3 = add_2 + torch.rand(4)
return add_3
m = TestModule()
traced = symbolic_trace(m)
a, b = torch.rand(4), torch.rand(4)
graph_manipulation.get_size_of_all_nodes(traced, [a, b])
partitioner = Partitioner()
devices = [Device("dev_0", 1000, 0)]
partitioner_config = PartitionerConfig(devices)
ret = partitioner.partition_graph(traced, m, partitioner_config)
partition = partitioner.partitions[0]
assert partition.used_mem_bytes == 112
# Select add_2 node to remove
selected_node = None
for node in partition.nodes:
if node.name == "add_2":
selected_node = node
partition.remove_node(selected_node)
assert partition.used_mem_bytes == 80
def test_size_based_partition(self):
class TestModule(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.linear = torch.nn.Linear(4, 4)
self.c = torch.rand(4)
def forward(self, a, b):
add_1 = a + b
linear = self.linear(add_1)
add_2 = linear + self.c
return add_2
m = TestModule()
traced = symbolic_trace(m)
a = torch.rand(4)
b = torch.rand(4)
graph_manipulation.get_size_of_all_nodes(traced, [a, b])
partitioner = Partitioner()
devices = [
Device("dev_0", 125, 0),
Device("dev_1", 125, 1),
Device("dev_2", 125, 2),
]
partitioner_config = PartitionerConfig(devices, PartitionMode.size_based)
ret = partitioner.partition_graph(traced, m, partitioner_config)
module_with_submodules = ret.module_with_submodules
dag = ret.dag
self.assertEqual(traced(a, b), module_with_submodules(a, b))
for i, node in enumerate(dag.nodes):
assert node.logical_device_ids == [i]
def test_partition_device_mapping(self):
class TestModule(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.linear = torch.nn.Linear(4, 4)
def forward(self, a):
b = torch.rand(4)
add_1 = a + b
linear_1 = self.linear(add_1)
add_2 = torch.rand(4) + a
add_3 = add_2 + linear_1
return add_3
m = TestModule()
traced = symbolic_trace(m)
a = torch.rand(4)
graph_manipulation.get_size_of_all_nodes(traced, [a])
partitioner = Partitioner()
devices = [Device("dev_0", 120, 0), Device("dev_1", 160, 1)]
partitioner_config = PartitionerConfig(devices, PartitionMode.size_based)
ret = partitioner.partition_graph(traced, m, partitioner_config)
module_with_submodules = ret.module_with_submodules
dag = ret.dag
self.assertEqual(traced(a), module_with_submodules(a))
for i, node in enumerate(dag.nodes):
if i == 1:
assert node.logical_device_ids == [1]
else:
assert node.logical_device_ids == [0]
def test_sparse_nn_partition(self):
class MyRecommendationModule(torch.nn.Module):
def create_mlp(self, num_of_layers: int, input_size: int, output_size: int):
layers = torch.nn.ModuleList()
for _ in range(num_of_layers):
ll = torch.nn.Linear(input_size, output_size)
layers.append(ll)
layers.append(torch.nn.ReLU())
return layers
def __init__(self) -> None:
super().__init__()
layers = self.create_mlp(4, 4, 4)
self.bottom_layers = torch.nn.Sequential(*layers)
layers = self.create_mlp(3, 24, 24)
self.top_layers = torch.nn.Sequential(*layers)
self.embedding_layers = torch.nn.ModuleList()
el = torch.nn.EmbeddingBag(500000, 4, mode="sum", sparse=True)
self.embedding_layers.append(el)
for _ in range(3):
el = torch.nn.EmbeddingBag(1000000, 4, mode="sum", sparse=True)
self.embedding_layers.append(el)
el = torch.nn.EmbeddingBag(500000, 4, mode="sum", sparse=True)
self.embedding_layers.append(el)
def forward(self, a, b, offset):
x = self.bottom_layers(a)
y = []
c = []
for _ in range(len(self.embedding_layers)):
temp = torch.randint(10, (8,))
c.append(temp + b)
for i in range(len(self.embedding_layers)):
if i % 2 == 0:
y.append(self.embedding_layers[i](c[i], offset))
else:
y.append(
self.embedding_layers[i](torch.randint(10, (8,)), offset)
)
z = torch.cat([x] + y, dim=1)
p = self.top_layers(z)
return p
m = MyRecommendationModule()
a = torch.rand(2, 4)
b = torch.randint(10, (8,))
offset = torch.randint(1, (2,))
traced = symbolic_trace(m)
graph_manipulation.get_size_of_all_nodes(traced, [a, b, offset])
devices = [
Device("dev_0", 33000000, 0),
Device("dev_1", 33000000, 1),
Device("dev_2", 33000000, 2),
]
partitioner_config = PartitionerConfig(devices, PartitionMode.sparse_nn)
partitioner = Partitioner()
ret = partitioner.partition_graph(traced, m, partitioner_config)
module_with_submodules = ret.module_with_submodules
dag = ret.dag
self.assertEqual(traced(a, b, offset), module_with_submodules(a, b, offset))
assert len(module_with_submodules.graph.nodes) == 24
def test_partition_latency(self):
class TestModule(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.linear = torch.nn.Linear(4, 4)
def forward(self, a):
add_1 = a + torch.rand(4)
add_2 = add_1 + torch.rand(4)
linear_1 = self.linear(add_1)
add_3 = add_2 + linear_1
add_4 = add_2 + add_3
return add_4
def get_node_to_latency_mapping(fx_module: GraphModule):
"""Given a fx module, generate node latency for each node
based on the size of each node
"""
node_to_latency_mapping: dict[Node, NodeLatency] = {}
for node in fx_module.graph.nodes:
if node.op not in {"output", "placeholder", "get_attr"}:
if node.size_bytes.total_size == node.size_bytes.output_size:
node_to_latency_mapping[node] = NodeLatency(
node.size_bytes.total_size, 2.0 * node.size_bytes.total_size
)
else:
node_to_latency_mapping[node] = NodeLatency(
node.size_bytes.total_size, node.size_bytes.output_size
)
return node_to_latency_mapping
m = TestModule()
traced = symbolic_trace(m)
a = torch.rand(4)
graph_manipulation.get_size_of_all_nodes(traced, [a])
node_to_latency_mapping = get_node_to_latency_mapping(traced)
devices = [Device("dev_0", 200, 0), Device("dev_1", 200, 1)]
partitioner = Partitioner()
partitioner_config = PartitionerConfig(devices)
ret = partitioner.partition_graph(traced, m, partitioner_config)
module_with_submodules = ret.module_with_submodules
self.assertEqual(traced(a), module_with_submodules(a))
partitions = partitioner.partitions
partition_to_latency_mapping = get_partition_to_latency_mapping(
partitions, node_to_latency_mapping
)
for p in partition_to_latency_mapping:
if p.partition_id == 0:
assert partition_to_latency_mapping[p] == (128.0, 80.0, 160.0)
else:
assert partition_to_latency_mapping[p] == (16.0, 32.0, 32.0)
transfer_rate_bytes_per_sec = 2
critical_path_latency_sec = get_latency_of_partitioned_graph(
partitions, partition_to_latency_mapping, transfer_rate_bytes_per_sec
)
assert critical_path_latency_sec == 208.0
def test_cost_aware_partition(self):
class MyModule(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.linear = torch.nn.Linear(4, 4)
def forward(self, a):
add_1 = a + torch.rand(4)
add_2 = add_1 + torch.rand(4)
linear_1 = self.linear(add_1)
add_3 = add_2 + torch.rand(4)
add_4 = add_2 + linear_1
add_5 = add_3 + add_4
return add_5
def get_node_to_latency_mapping(fx_module: GraphModule):
node_to_latency_mapping: dict[Node, NodeLatency] = {}
for node in fx_module.graph.nodes:
if node.op not in {"output", "placeholder", "get_attr"}:
if node.size_bytes.total_size == node.size_bytes.output_size:
node_to_latency_mapping[node] = NodeLatency(
node.size_bytes.total_size, 1
)
else:
node_to_latency_mapping[node] = NodeLatency(
node.size_bytes.total_size, node.size_bytes.output_size
)
return node_to_latency_mapping
m = MyModule()
traced = symbolic_trace(m)
a = torch.rand(4)
graph_manipulation.get_size_of_all_nodes(traced, [a])
devices = [
Device("dev_0", 125, 0),
Device("dev_1", 125, 1),
Device("dev_2", 125, 2),
Device("dev_3", 125, 3),
]
node_to_latency_mapping = get_node_to_latency_mapping(traced)
partitioner_config = PartitionerConfig(
devices,
mode=PartitionMode.cost_aware,
transfer_rate_bytes_per_sec=2,
node_to_latency_mapping=node_to_latency_mapping,
)
partitioner = Partitioner()
ret = partitioner.partition_graph(traced, m, partitioner_config)
module_with_submodules = ret.module_with_submodules
dag = ret.dag
self.assertEqual(traced(a), module_with_submodules(a))
partitions = partitioner.partitions
partition_to_latency_mapping = get_partition_to_latency_mapping(
partitions, node_to_latency_mapping
)
critical_path_latency_sec = get_latency_of_partitioned_graph(
partitions,
partition_to_latency_mapping,
partitioner_config.transfer_rate_bytes_per_sec,
)
assert critical_path_latency_sec == 160.0
def test_aot_based_partition(self):
class TestModule(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.b = torch.rand(4)
self.c = torch.rand(4)
def forward(self, a):
add_1 = a + self.b
add_2 = self.c + add_1
return add_2
m = TestModule()
traced = symbolic_trace(m)
a = torch.rand(4)
node_to_partition_id = {}
partition_to_logical_devices = {}
count = 0
graph_manipulation.get_size_of_all_nodes(traced, [a])
for node in traced.graph.nodes:
if node.op not in {"placeholder", "get_attr", "output"}:
node_to_partition_id[node] = count
partition_to_logical_devices[count] = [0]
count += 1
devices = [Device("dev_0", 200, 0)]
partitioner_config = PartitionerConfig(
devices=devices,
mode=PartitionMode.aot_based,
node_to_partition_mapping=node_to_partition_id,
partition_to_logical_device_mapping=partition_to_logical_devices,
)
partitioner = Partitioner()
ret = partitioner.partition_graph(traced, m, partitioner_config)
module_with_submodules = ret.module_with_submodules
dag = ret.dag
self.assertEqual(module_with_submodules(a), traced(a))
for node in dag.nodes:
assert node.size_bytes == 48
assert node.logical_device_ids == [0]
def test_replace_target_nodes_with(self):
class testModule(torch.nn.Module):
def forward(self, a, b):
return a + b
m = testModule()
traced = symbolic_trace(m)
input1 = torch.randn(1)
input2 = torch.randn(1)
assert (input1 + input2) == traced(input1, input2)
graph_manipulation.replace_target_nodes_with(
fx_module=traced,
old_op="call_function",
old_target=operator.add,
new_op="call_function",
new_target=operator.mul,
)
assert (input1 * input2) == traced(input1, input2)
def test_saturate_host(self):
class TestModule(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.linear = torch.nn.Linear(4, 4)
def forward(self, a):
add_1 = a + torch.rand(4)
add_2 = add_1 + torch.rand(4)
linear_1 = self.linear(add_1)
add_3 = add_2 + linear_1
add_4 = add_2 + add_3
return add_4
m = TestModule()
traced = symbolic_trace(m)
a = torch.rand(4)
graph_manipulation.get_size_of_all_nodes(traced, [a])
devices = [
Device("dev_0", 200, 0),
Device("dev_1", 200, 1),
Device("dev_2", 100, 2),
Device("dev_3", 100, 3),
Device("dev_4", 200, 4),
Device("dev_5", 100, 5),
]
partitioner = Partitioner()
# Without host saturation, the model will be split into two partitions.
# dev_0 holds partition 0 of 192 bytes and dev_1 holds partition 1 of 48 bytes.
partitioner_config = PartitionerConfig(devices, saturate_host=True)
ret = partitioner.partition_graph(traced, m, partitioner_config)
module_with_submodules = ret.module_with_submodules
self.assertEqual(traced(a), module_with_submodules(a))
partitions = partitioner.partitions
self.assertEqual(len(partitions), 2)
# With host saturation, partition 1 will be replicated to dev_4, and partition 2
# will be replicated to dev_2.
self.assertEqual(partitions[0].logical_device_ids, [0, 4])
self.assertEqual(partitions[1].logical_device_ids, [1, 2])
@skipIfNoTorchVision
def test_conv_bn_fusion(self):
rn18 = resnet18().eval()
traced = symbolic_trace(rn18)
fused = optimization.fuse(traced)
self.assertTrue(
all(not isinstance(m, torch.nn.BatchNorm2d) for m in fused.modules())
)
N, C, H, W = 20, 3, 224, 224
inp = torch.randn(N, C, H, W)
self.assertEqual(fused(inp), rn18(inp))
def test_conv_bn_fusion_not_running_state(self):
class M(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.conv = torch.nn.Conv2d(32, 64, 3, stride=2)
self.bn = torch.nn.BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=False)
def forward(self, x):
x = self.conv(x)
x = self.bn(x)
return x
model = M().eval()
traced = symbolic_trace(model)
fused = optimization.fuse(traced)
inp = torch.randn([1, 32, 50, 50])
# bn need not be folded in conv
self.assertTrue(
any(isinstance(m, torch.nn.BatchNorm2d) for m in fused.modules())
)
self.assertEqual(fused(inp), model(inp))
def test_conv_bn_fusion_mixed_dtype(self):
class M(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.conv = torch.nn.Conv2d(3, 16, kernel_size=3, stride=1, padding=1, bias=False, dtype=torch.bfloat16)
self.bn = torch.nn.BatchNorm2d(16, eps=0.001, momentum=0.1, affine=True, track_running_stats=True)
def forward(self, x):
x = self.conv(x)
x = self.bn(x)
return x
model = M().eval()
traced = symbolic_trace(model)
fused = optimization.fuse(traced)
inp = torch.randn(1, 3, 64, 64, dtype=torch.bfloat16)
self.assertTrue(
all(not isinstance(m, torch.nn.BatchNorm2d) for m in fused.modules())
)
self.assertEqual(fused(inp), model(inp))
def test_call_to_assert_no_msg(self):
class M(torch.nn.Module):
def forward(self, a, b):
assert a == b
return a + b
m = M()
traced = symbolic_trace_with_rewrite(m)
# Make sure the graph is well-formed
traced.graph.lint()
# Check the IR to make sure there's a call_function node with target == "Assert"
self.assertTrue(
any(
node.op == "call_function" and node.target is torch._assert
for node in traced.graph.nodes
)
)
# Ensure that the assert throws when it's supposed to and doesn't throw when it's not supposed to
traced(3, 3)
with self.assertRaisesRegex(AssertionError, ""):
traced(3, 5)
# Confirm that the output is correct
self.assertEqual(traced(3, 3), m(3, 3))
def test_meta_tracer(self):
class MetaTracerTestModule(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.emb = torch.nn.Embedding(num_embeddings=42, embedding_dim=16)
self.layernorm = torch.nn.LayerNorm(16)
def forward(self, x):
emb = self.emb(x)
emb = emb + torch.arange(emb.shape[-1], dtype=torch.float, device=emb.device)
lol = self.layernorm(emb)
return torch.relu(lol) if lol.shape[0] < 30 else torch.sigmoid(lol)
mttm = MetaTracerTestModule()
for BS in [15, 35]:
x = torch.zeros(BS, dtype=torch.long).random_(42)
meta_args = {'x' : x.to(device='meta')}
gm = torch.fx.experimental.meta_tracer.symbolic_trace(mttm, meta_args=meta_args)
torch.testing.assert_close(gm(x), mttm(x))
# Test serialization/deserialization
with tempfile.TemporaryDirectory() as tmp_dir:
with open(f'{tmp_dir}/meta_module.pkl', 'wb') as f:
pickle.dump(gm, f)
with open(f'{tmp_dir}/meta_module.pkl', 'rb') as f:
loaded = pickle.load(f)
torch.testing.assert_close(loaded(x), mttm(x))
def test_call_to_assert_with_msg(self):
class M(torch.nn.Module):
def forward(self, a, b):
assert a == b, "test message"
return a + b
m = M()
traced = symbolic_trace_with_rewrite(m)
# Make sure the graph is well-formed
traced.graph.lint()
# Check the IR to make sure there's a call_function node with target == "Assert"
self.assertTrue(
any(
node.op == "call_function" and node.target is torch._assert
for node in traced.graph.nodes
)
)
# Ensure that the assert throws when it's supposed to and doesn't throw when it's not supposed to
traced(3, 3)
with self.assertRaisesRegex(AssertionError, "test message"):
traced(3, 5)
# Confirm that the output is correct
self.assertEqual(traced(3, 3), m(3, 3))
def test_call_to_assert_with_empty_msg(self):
class M(torch.nn.Module):
def forward(self, a, b):
assert a == b, ""
return a + b
m = M()
traced = symbolic_trace_with_rewrite(m)
# Make sure the graph is well-formed
traced.graph.lint()
# Check the IR to make sure there's a call_function node with target == "Assert"
self.assertTrue(
any(
node.op == "call_function" and node.target is torch._assert
for node in traced.graph.nodes
)
)
# Ensure that the assert throws when it's supposed to and doesn't throw when it's not supposed to
traced(3, 3)
with self.assertRaisesRegex(AssertionError, ""):
traced(3, 5)
# Confirm that the output is correct
self.assertEqual(traced(3, 3), m(3, 3))
def test_call_to_assert_with_multiline_message(self):
class M(torch.nn.Module):
def forward(self, a, b):
error_msg = """
An error message with
terrible spacing
"""
assert a == b, error_msg
return a + b
m = M()
traced = symbolic_trace_with_rewrite(m)
# Make sure the graph is well-formed
traced.graph.lint()
# Check the IR to make sure there's a call_function node with target == "Assert"
self.assertTrue(
any(
node.op == "call_function" and node.target is torch._assert
for node in traced.graph.nodes
)
)
# Ensure that the assert throws when it's supposed to and doesn't throw when it's not supposed to
error_msg = """
An error message with
terrible spacing
"""
traced(3, 3)
with self.assertRaisesRegex(AssertionError, error_msg):
traced(3, 5)
# Confirm that the output is correct
self.assertEqual(traced(3, 3), m(3, 3))
def test_subgraph_creation(self):
class MyModule(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.param = torch.nn.Parameter(torch.rand(3, 4))
self.linear = torch.nn.Linear(4, 5)
def forward(self, x, y):
z = self.linear(x + self.param).clamp(min=0.0, max=1.0)
w = self.linear(y).clamp(min=0.0, max=1.0)
return z + w
# symbolically trace model
my_module = MyModule()
my_module_traced = symbolic_trace(my_module)
# random mod partitioning
partition_counter = 0
NPARTITIONS = 3
# Add some random meta info to make sure it is kept around.
for node in my_module_traced.graph.nodes:
if node.op != "output":
node.meta["test_meta_info"] = True
def mod_partition(node: Node):
nonlocal partition_counter
partition = partition_counter % NPARTITIONS
partition_counter = (partition_counter + 1) % NPARTITIONS
return partition
# split module in module with submodules
module_with_submodules = split_module(
my_module_traced, my_module, mod_partition
)
# Check that test_meta_info was still on all nodes.
submodules = dict(module_with_submodules.named_modules())
for node in module_with_submodules.graph.nodes:
if node.op == "call_module":
submod = submodules[node.target]
self.assertTrue(isinstance(submod, torch.fx.GraphModule))
for submod_node in submod.graph.nodes:
if submod_node.op != "output":
stored_op = submod_node.meta.get("test_meta_info")
self.assertTrue(stored_op is not None and stored_op)
x = torch.rand(3, 4)
y = torch.rand(3, 4)
orig_out = my_module_traced(x, y)
submodules_out = module_with_submodules(x, y)
self.assertEqual(orig_out, submodules_out)
def test_split_module_input_names(self):
class Mod(torch.nn.Module):
def forward(self, x, a0, a1, b0, b1, c0, c1):
x = x + (a0 ** 2) + (a1 / 2)
x = x + (b0 ** 2) + (b1 / 2)
x = x + (c0 ** 2) + (c1 / 2)
return x
mod = Mod()
traced = torch.fx.symbolic_trace(mod)
seen = 0
def split(n):
nonlocal seen
result = seen // 4
seen += 1
return result
split = split_module(traced, mod, split, keep_original_input_name=False)
# All the submodules should take in the inputs in the same order.
args = [torch.tensor(2.), torch.tensor(3.), torch.tensor(4.)]
output0 = split.submod_0(*args)
output1 = split.submod_1(*args)
output2 = split.submod_2(*args)
self.assertEqual(output0, output1)
self.assertEqual(output1, output2)
# Each submodule should have normalized input names
def check_ph(gm):
nodes = list(gm.graph.nodes)
self.assertEqual(nodes[0].target, "arg_0")
self.assertEqual(nodes[1].target, "arg_1")
self.assertEqual(nodes[2].target, "arg_2")
check_ph(split.submod_0)
check_ph(split.submod_1)
check_ph(split.submod_2)
def test_split_module_dead_code(self):
class ModWithDeadCode(torch.nn.Module):
def forward(self, x):
output = x * 2 # we want this
dead_line = x + 2 # this is dead
return output
mod = ModWithDeadCode()
traced = torch.fx.symbolic_trace(mod)
# split into before (0), target (1), and after(2)
saw_mul = False
def split_callback(n):
nonlocal saw_mul
if n.target == operator.mul:
saw_mul = True
return 1
if not saw_mul:
return 0
if saw_mul:
return 2
split = split_module(traced, mod, split_callback)
x = torch.randn((5,))
torch.testing.assert_close(
split(x), traced(x)
)
def test_split_module_return_node(self):
def foo(x):
x.add_(1)
gm = make_fx(foo, tracing_mode="fake")(torch.randn(3,))
def cb(_):
return 1
sp_gm = split_module(gm, None, cb)
submod_gm = sp_gm.submod_1
for node in submod_gm.graph.nodes:
if node.op == "output":
break
else:
raise RuntimeError("Expected the subgraph to have an output node.")
def test_split_module_kwargs_expansion(self):
class ModuleWithKwargsExpansion(torch.nn.Module):
def forward(self, x, **kwargs):
return x + kwargs['foo']
mod = ModuleWithKwargsExpansion()
traced = torch.fx.symbolic_trace(mod)
seen_getitem = False
def split_callback(n):
nonlocal seen_getitem
split_idx = int(seen_getitem)
if n.target == operator.getitem:
seen_getitem = True
return split_idx
split = split_module(traced, mod, split_callback)
x = torch.randn(5, 3)
foo = torch.randn(5, 3)
torch.testing.assert_close(split(x, foo=foo), traced(x, foo=foo))
@skipIfNoTorchVision
def test_subgraph_trivial_resnet(self):
# Smoke test trivially splitting resnet into 1 partition works
# There was an issue before causing submodule names to be aliased
m = resnet18()
traced = symbolic_trace(m)
a = torch.rand(64, 3, 7, 7)
module_with_submodules = split_module(traced, m, lambda node: 0)
module_with_submodules(a)
def test_split_module_default_arg(self):
class ModelToTrace(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.lin = torch.nn.Linear(512, 512)
def forward(self, x, targets=None):
x = self.lin(x)
if targets is not None:
x = x + targets
return x
mtt = ModelToTrace()
traced = torch.fx.symbolic_trace(mtt, concrete_args={'targets': None})
split = split_module(traced, mtt, lambda node: 0)
x = torch.randn(50, 512)
torch.testing.assert_close(split(x), traced(x))
def test_split_module_keep_original_order_and_noop_graph(self):
# Verify that split_module returns a similar no-op graph
# for `keep_original_order={True|False}`.
def fn(x):
return (x,)
g = make_fx(fn, tracing_mode="fake")(torch.randn(3, 3))
# g.graph.print_tabular()
# opcode name target args kwargs
# ----------- ------ -------- --------- --------
# placeholder x_1 x_1 () {}
# output output output ((x_1,),) {}
def _test_split_graph(split_gm):
# Verify that the split_gm has same structure as original
self.assertEqual(len(split_gm.graph.nodes), 2)
nodes = list(split_gm.graph.nodes)
self.assertEqual(nodes[0].op, "placeholder")
self.assertEqual(nodes[1].op, "output")
# `keep_original_order=False`
_test_split_graph(split_module(g, None, split_callback=lambda _ : 0, keep_original_order=False))
# `keep_original_order=True`
_test_split_graph(split_module(g, None, split_callback=lambda _ : 0, keep_original_order=True))
@unittest.skipIf(TEST_WITH_CROSSREF, "See https://github.com/pytorch/pytorch/issues/160077")
def test_split_module_symint_dependency_handling(self):
# Based on the code from - transformers/models/granitemoe/modeling_granitemoe.py
class GraniteMoeTopKGating(torch.nn.Module):
def __init__(self, input_size: int, num_experts: int, top_k: int):
super().__init__()
self.num_experts = num_experts
self.input_size = input_size
self.top_k = top_k
self.layer = torch.nn.Linear(input_size, num_experts, bias=False)
def forward(self, hidden_states):
# compute the top_k routing decision
logits = self.layer(hidden_states).float() # [batch_size x seq_len, num_experts]
top_k_logits, top_k_indices = logits.topk(self.top_k, dim=1) # [num_tokens, top_k]
top_k_gates = torch.softmax(top_k_logits, dim=1).type_as(hidden_states) # [num_tokens, top_k]
# compute number of input given to each expert
zeros = torch.zeros(
[top_k_gates.size(0), self.num_experts], dtype=top_k_gates.dtype, device=top_k_gates.device
) # [num_tokens, num_experts]
gates = zeros.scatter(1, top_k_indices, 1) # [num_tokens, num_experts]
expert_size = gates.long().sum(0) # [num_experts,]
expert_size = expert_size.tolist()
# sort and group input tokens according to expert assignment
top_k_experts = top_k_indices.flatten() # [num_tokens * top_k]
_, index_sorted_experts = top_k_experts.sort(0) # [num_tokens * top_k]
batch_index = index_sorted_experts.div(self.top_k, rounding_mode="trunc") # [num_tokens * top_k]
# gather the gate values for grouped input tokens
top_k_gates = top_k_gates.flatten() # [num_tokens * top_k]
batch_gates = top_k_gates[index_sorted_experts] # [num_tokens * top_k]
return index_sorted_experts, batch_index, batch_gates, expert_size, logits
class GraniteMoeMoE(torch.nn.Module):
def __init__(self):
super().__init__()
self.input_size = 32
self.num_local_experts = 4
num_experts_per_tok = 2
self.router = GraniteMoeTopKGating(
input_size=self.input_size,
num_experts=self.num_local_experts,
top_k=num_experts_per_tok,
)
def forward(self, layer_input):
_, batch_index, _, expert_size, _ = self.router(layer_input)
expert_inputs = layer_input[batch_index]
return expert_inputs.split(expert_size, dim=0)
moe = GraniteMoeMoE()
inp = torch.randn([32, 32])
expected = moe(inp)
PARTITION_ID = 0
PARTITION_OPS_CTR = 0
NODE_PARTITION_MAP = {}
# `callback` is called multiple times with same `node` in `split_module`.
# Cache the result such that partition id is consistent across calls.
def callback(node) -> int:
nonlocal PARTITION_ID, PARTITION_OPS_CTR, NODE_PARTITION_MAP
if node in NODE_PARTITION_MAP:
return NODE_PARTITION_MAP[node]
if PARTITION_OPS_CTR % 5 == 0:
PARTITION_ID += 1
PARTITION_OPS_CTR += 1
NODE_PARTITION_MAP[node] = PARTITION_ID
return PARTITION_ID
def backend(gm, inps):
split_gm = split_module(gm, root_m=None, split_callback=callback,
keep_original_order=True, keep_original_node_name=True)
return split_gm
actual = torch.compile(moe, backend=backend)(inp)
torch.testing.assert_close(actual, expected)
def test_normalize_binary_operators(self):
ops_to_test = {
torch.add,
torch.mul,
torch.sub,
torch.div,
torch.floor_divide,
torch.remainder,
torch.eq,
torch.ne,
torch.lt,
torch.le,
torch.gt,
torch.ge,
}
# Test Tensor/Tensor callsite
for op in ops_to_test:
class WrapperMod(torch.nn.Module):
def forward(self, x, y):
return op(x, y)
traced = symbolic_trace(WrapperMod())
normalized = NormalizeOperators(traced).transform()
x, y = torch.randn(3, 4), torch.randn(3, 4)
torch.testing.assert_close(traced(x, y), normalized(x, y))
self.assertFalse(
any(n.target in ops_to_test for n in normalized.graph.nodes)
)
# Test Tensor/scalar callsite
for op in ops_to_test:
class WrapperMod(torch.nn.Module):
def forward(self, x):
return op(x, 42)
traced = symbolic_trace(WrapperMod())
normalized = NormalizeOperators(traced).transform()
x = torch.randn(3, 4)
torch.testing.assert_close(traced(x), normalized(x))
self.assertFalse(
any(n.target in ops_to_test for n in normalized.graph.nodes)
)
@skipIfNoTorchVision
def test_normalize_args(self):
m = resnet18()
class FunctionalTracer(torch.fx.Tracer):
def is_leaf_module(
self, m: torch.nn.Module, module_qualified_name: str
) -> bool:
# `leaves` contains the set of standard `nn.Modules` that are not
# currently symbolically traceable. Ideally this set would be empty
leaves = {torch.nn.BatchNorm2d}
return type(m) in leaves
traced = torch.fx.GraphModule(m, FunctionalTracer().trace(m))
input = torch.randn(5, 3, 224, 224)
ref_outs = traced(input)
ShapeProp(traced).propagate(input)
traced = NormalizeArgs(traced).transform()
modules = dict(traced.named_modules())
for node in traced.graph.nodes:
if node.op == "call_function" and node.target != operator.add:
self.assertEqual(len(node.args), 0)
elif node.op == "call_module":
submod_class = modules[node.target].__class__
nn_class = getattr(torch.nn, submod_class.__name__)
if submod_class == nn_class:
self.assertEqual(len(node.args), 0)
traced(input)
self.assertEqual(traced(input), ref_outs)
def test_normalize_modules_exhaustive(self):
"""
Exhaustively test `Node.normalized_arguments` on all standard
torch.nn Module classes
"""
for test_params in module_tests + get_new_module_tests():
if "constructor" not in test_params:
constructor = getattr(torch.nn, test_params["module_name"])
else:
constructor = test_params["constructor"]
if "constructor_args" not in test_params:
args = ()
else:
args = test_params["constructor_args"]
mod = constructor(*args)
# Skip modules that are not standard `torch.nn`
# instances, including functionals. (functionals
# are tested in test_normalize_args)
if mod.__class__.__name__ not in dir(torch.nn):
continue
if "input_fn" not in test_params:
inputs = torch.randn(test_params["input_size"])
else:
inputs = test_params["input_fn"]()
if not isinstance(inputs, (tuple, list)):
inputs = (inputs,)
params = ", ".join(f"v{i}" for i in range(len(inputs)))
# Generate a class to wrap this standard `nn.Module` instance
test_classname = f"Test{mod.__class__.__name__}"
test_mod_code = f"""
class {test_classname}(torch.nn.Module):
def __init__(self, mod):
super().__init__()
self.mod = mod
def forward(self, {params}):
return self.mod({params})
"""
gbls = {"torch": torch}
exec(test_mod_code, gbls)
test_instance = gbls[test_classname](mod)
traced = symbolic_trace(test_instance)
# Use `Node.normalized_arguments` to get a new set of arguments
# to feed to the Module. Then, rewrite the node to only take
# in those arguments as kwargs
modules = dict(traced.named_modules())
for node in traced.graph.nodes:
if node.op == "call_module":
submod_class = modules[node.target].__class__
nn_class = getattr(torch.nn, submod_class.__name__)
if submod_class == nn_class:
normalized_args = node.normalized_arguments(traced)
normalized_args2 = normalize_module(
traced, node.target, node.args, node.kwargs
)
assert normalized_args == normalized_args2
assert normalized_args
node.args = normalized_args.args
node.kwargs = normalized_args.kwargs
traced.recompile()
# These Modules have an RNG in their forward, so testing
# correctness by comparing outputs is not correct. Skip that
# check for these
stochastic_modules = {"FractionalMaxPool2d", "FractionalMaxPool3d", "RReLU"}
if mod.__class__.__name__ not in stochastic_modules:
self.assertEqual(traced(*inputs), mod(*inputs))
traced = NormalizeArgs(symbolic_trace(test_instance)).transform()
modules = dict(traced.named_modules())
for node in traced.graph.nodes:
if node.op == "call_module":
submod_class = modules[node.target].__class__
nn_class = getattr(torch.nn, submod_class.__name__)
if submod_class == nn_class:
self.assertEqual(len(node.args), 0)
def test_normalize_args_preserve_meta(self):
class MyModule(torch.nn.Module):
def forward(self, a):
return torch.add(a, 3)
m = MyModule()
traced = symbolic_trace(m)
for node in traced.graph.nodes:
if node.op == "call_function" and node.target == torch.add:
node.meta["my_key"] = 7
break
else:
self.fail("Didn't find call_function torch.add")
input = torch.randn(2, 3)
ShapeProp(traced).propagate(input)
traced = NormalizeArgs(traced).transform()
for node in traced.graph.nodes:
if node.op == "call_function" and node.target == torch.add:
self.assertTrue("my_key" in node.meta)
self.assertEqual(node.meta["my_key"], 7)
break
else:
self.fail("Didn't find call_function torch.add")
def test_normalize_args_perserve_type(self):
class MyModule(torch.nn.Module):
def forward(self, a: list[torch.Tensor]):
return torch.add(a[0], a[1])
m = MyModule()
traced = symbolic_trace(m)
traced = NormalizeArgs(traced).transform()
for node in traced.graph.nodes:
if node.op == "placeholder":
self.assertEqual(node.type, list[torch.Tensor])
@skipIfNoTorchVision
def test_annotate_returns_with_schema(self):
m = resnet18()
traced_modules = symbolic_trace(m)
traced_modules_annotated = AnnotateTypesWithSchema(traced_modules).transform()
for node in traced_modules_annotated.graph.nodes:
if node.type is None:
check = (node.op, node.target)
self.assertIn(
check,
{
("placeholder", "x"),
("call_module", "maxpool"),
("call_function", operator.add),
("call_function", torch.flatten),
("output", "output"),
}
)
# Smoke test torchscript compilation since now we're emitting type annotations
torch.jit.script(traced_modules_annotated)
class FunctionalTracer(torch.fx.Tracer):
def is_leaf_module(
self, m: torch.nn.Module, module_qualified_name: str
) -> bool:
# `leaves` contains the set of standard `nn.Modules` that are not
# currently symbolically traceable. Ideally this set would be empty
leaves = {torch.nn.BatchNorm2d}
return type(m) in leaves
traced_functionals = torch.fx.GraphModule(m, FunctionalTracer().trace(m))
traced_functionals_annotated = AnnotateTypesWithSchema(
traced_functionals
).transform()
for node in traced_functionals_annotated.graph.nodes:
if node.type is None:
check = (node.op, node.target)
excluded_nodes = {
("placeholder", "x"),
# Return type differs based on boolean dispatch :(
("call_function", torch.nn.functional.max_pool2d),
("output", "output"),
}
# AnnotateTypesWithSchema doesn't work with bound C++ functions
if not isinstance(node.target, BuiltinFunctionType):
self.assertIn(check, excluded_nodes)
# Smoke test torchscript compilation since now we're emitting type annotations
torch.jit.script(traced_functionals_annotated)
def test_annotate_getitem_node(self):
class CustomType:
pass
class CustomNamedTuple(NamedTuple):
x: int
y: float
class MyModule(torch.nn.Module):
def forward(self, inp: tuple[CustomType, torch.Tensor], inp2: list[CustomType], inp3: CustomNamedTuple):
inp_0 = inp[0]
inp_1 = inp[1]
inp2_0 = inp2[0]
inp3_x = inp3.x
inp3_y = inp3.y
return inp_0 + inp_1 + inp2_0 + inp3_x + inp3_y
class MyModule2(torch.nn.Module):
def forward(self, inp: tuple[CustomType, torch.Tensor], inp2: list[CustomType], inp3: CustomNamedTuple):
inp_0 = inp[0]
inp_1 = inp[1]
inp2_0 = inp2[0]
inp3_x = inp3.x
inp3_y = inp3.y
return inp_0 + inp_1 + inp2_0 + inp3_x + inp3_y
my_module = MyModule()
my_module_traced = torch.fx.symbolic_trace(my_module)
# by default, fx transform loses type annotation of getitem nodes.
for node in my_module_traced.graph.nodes:
if node.target == operator.getitem:
assert node.type is None
annotate_getitem_nodes(my_module_traced.graph)
for node in my_module_traced.graph.nodes:
if node.target == operator.getitem:
self.assertIsNotNone(node.type, f"Node {node} should be annotated but is not.")
my_module = MyModule2()
my_module_traced = torch.fx.symbolic_trace(my_module)
# by default, fx transform loses type annotation of getitem nodes.
for node in my_module_traced.graph.nodes:
if node.target == operator.getitem:
assert node.type is None
annotate_getitem_nodes(my_module_traced.graph)
for node in my_module_traced.graph.nodes:
if node.target == operator.getitem:
self.assertIsNotNone(node.type, f"Node {node} should be annotated but is not.")
def test_subgraph_uniquename(self):
class MyModule(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.linear = torch.nn.Linear(4, 4)
def forward(self, a, b, c, d):
add_1 = a + b
add_2 = add_1 + c
linear_1 = self.linear(add_1)
add_3 = add_2 + d
add_4 = add_2 + linear_1
add_5 = add_3 + add_4
return add_5
a, b, c, d = torch.ones(4), torch.ones(4), torch.ones(4), torch.ones(4)
mm = MyModule()
traced = symbolic_trace(mm)
def split_cb(node: torch.fx.Node):
if node.name == "a" or node.name == "b" or node.name == "add":
return 0
else:
return 1
module_with_submodule = split_module(traced, mm, split_cb)
self.assertEqual(module_with_submodule(a, b, c, d), traced(a, b, c, d))
def test_split_qualname_mapping(self):
d_hid = 4
class ExampleCode(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.mm_param = torch.nn.Parameter(torch.randn(d_hid, d_hid))
self.mm_param2 = torch.nn.Parameter(torch.randn(d_hid, d_hid))
self.lin = torch.nn.Linear(d_hid, d_hid)
def forward(self, x):
x = torch.mm(x, self.mm_param)
x = torch.relu(x)
x = torch.mm(x, self.mm_param)
x = self.lin(x)
x = torch.relu(x)
x = torch.mm(x, self.mm_param2)
x = self.lin(x)
return x
my_module = ExampleCode()
my_module_traced = symbolic_trace(my_module)
part_idx = 0
def split_callback(n : torch.fx.Node):
nonlocal part_idx
if (n.op, n.target) == ('call_module', 'lin'):
part_idx += 1
return part_idx
# split module in module with submodules
qualname_map : dict[str, str] = {}
module_with_submodules = split_module(
my_module_traced, my_module, split_callback, qualname_map
)
expected_qualname_map = {
'submod_1.lin': 'lin', 'submod_2.lin': 'lin'
}
self.assertEqual(qualname_map, expected_qualname_map)
def test_traceable_function_with_nonstandard_name(self):
def foo(x):
return torch.relu(x)
traced = symbolic_trace_with_rewrite(foo)
def test_to_folder(self):
class Test(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.W = torch.nn.Parameter(torch.randn(2))
self.seq = torch.nn.Sequential(torch.nn.BatchNorm1d(2, 2))
self.linear = torch.nn.Linear(2, 2)
self.attr = torch.randn(2)
self.attr2 = torch.nn.Buffer(torch.randn(2))
self.attr3 = torch.nn.Buffer(torch.ones(2, dtype=torch.int32))
def forward(self, x):
return self.linear(self.seq(self.W + self.attr + self.attr2 + self.attr3 + x))
mod = symbolic_trace(Test())
module_name = "Foo"
import tempfile
from pathlib import Path
with tempfile.TemporaryDirectory() as tmp_dir:
tmp_dir = Path(tmp_dir)
mod.to_folder(tmp_dir, module_name)
# Recipe taken from here:
# https://docs.python.org/3/library/importlib.html#importing-a-source-file-directly
import importlib.util
spec = importlib.util.spec_from_file_location(
module_name, tmp_dir / "__init__.py"
)
module = importlib.util.module_from_spec(spec)
sys.modules[module_name] = module
spec.loader.exec_module(module)
t = torch.randn(2, 2)
self.assertEqual(module.Foo()(t), mod(t))
def test_fetch(self):
attrs_for_lowering: dict[str, list[str]] = {
"torch.nn.modules.conv.Conv2d": [
"weight",
"bias",
"kernel_size",
"stride",
"padding",
"dilation",
"groups",
"padding_mode",
],
"torch.nn.modules.batchnorm.BatchNorm2d": [
"weight",
"bias",
"running_mean",
"running_var",
"eps",
],
}
class TestModule(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.conv = torch.nn.Conv2d(3, 3, 2)
self.bn = torch.nn.BatchNorm2d(3)
def forward(self, a):
a = self.conv(a)
a += a
return self.bn(a)
mod = TestModule()
traced = symbolic_trace(mod)
lift_lowering_attrs_to_nodes(traced)
for node in traced.graph.nodes:
if node.op == "call_module":
assert hasattr(node, "attrs_for_lowering")
para_list = attrs_for_lowering[node.attrs_for_lowering["name"]]
# node.attrs_for_lowering has an addition field of class name
assert len(para_list) + 1 == len(node.attrs_for_lowering)
for p_name in para_list:
assert p_name in node.attrs_for_lowering
def test_merge_matmuls(self):
"""
A collection of test cases for torch.fx.experimental.merge_matmul,
a graph transformation that merges matrix multiplication operations.
"""
# Utility function for counting matmuls for test assertions.
def _count_matmuls(mod):
gm = torch.fx.symbolic_trace(mod)
num_matmuls = 0
for node in gm.graph.nodes:
if node.target == torch.matmul:
num_matmuls += 1
return num_matmuls
# Simple test case in which there are two matmuls of the same size to merge.
class SimpleMergeMatmulModule(torch.nn.Module):
def __init__(self, rhs):
super().__init__()
self.rhs = rhs
def forward(self, x, y):
a = torch.matmul(x, self.rhs)
b = torch.matmul(y, self.rhs)
return a + b
# Initialize inputs.
a = torch.randn(3, 3)
b = torch.randn(3, 3)
# Initialize RHS for matmuls.
rhs = torch.randn(3, 4)
# Construct SimpleMergeMatmulModule and call merge_matmul on it.
module = SimpleMergeMatmulModule(rhs)
opt_module = merge_matmul.merge_matmul(module)
# Numerical correctness check.
before = module(a, b)
after = opt_module(a, b)
before.allclose(after)
# Basic graph structure check; original module should have 2 matmuls
# and optimized module should have 1.
self.assertEqual(_count_matmuls(module), 2)
self.assertEqual(_count_matmuls(opt_module), 1)
# Test case in which there are multiple matmuls of different sizes to merge.
class FiveMergeMatmulModule(torch.nn.Module):
def __init__(self, rhs):
super().__init__()
self.rhs = rhs
def forward(self, a, b, c, d, e):
s = torch.tensor([])
matmuls = []
# For some reason using a list comprehension or for-loop for this
# doesn't work.
matmuls.append(torch.matmul(a, self.rhs))
matmuls.append(torch.matmul(b, self.rhs))
matmuls.append(torch.matmul(c, self.rhs))
matmuls.append(torch.matmul(d, self.rhs))
matmuls.append(torch.matmul(e, self.rhs))
for m in matmuls:
s += torch.sum(m)
return s
# Initialize inputs.
inputs = [torch.randn(2 * i + 1, 5) for i in range(5)]
# Initialize RHS.
rhs = torch.randn(5, 4)
# Construct FiveMergeMatmulModule and call merge_matmul on it.
module = FiveMergeMatmulModule(rhs)
opt_module = merge_matmul.merge_matmul(module)
# Numerical correctness check.
before = module(*inputs)
after = opt_module(*inputs)
before.allclose(after)
# Basic graph structure check; original module should have len(inputs) matmuls
# and optimized module should have 1.
self.assertEqual(_count_matmuls(module), len(inputs))
self.assertEqual(_count_matmuls(opt_module), 1)
# Simple test case in which two matmuls cannot be merged due to a data dependency between
# the LHS operands.
class UnmergeableMatmulModule(torch.nn.Module):
def __init__(self, rhs):
super().__init__()
self.rhs = rhs
def forward(self, x):
a = torch.matmul(x, self.rhs)
a_abs = torch.abs(a)
b = torch.matmul(a_abs.transpose(1, 0), self.rhs)
return b
# Initialize inputs.
a = torch.randn(3, 3)
# Initialize RHS for matmuls.
rhs = torch.randn(3, 4)
# Construct UnmergeableMatmulModule and call merge_matmul on it.
module = UnmergeableMatmulModule(rhs)
opt_module = merge_matmul.merge_matmul(module)
# Numerical correctness check.
before = module(a)
after = opt_module(a)
before.allclose(after)
# Basic graph structure check; the number of matrix multiplcations should not have changed.
self.assertEqual(_count_matmuls(module), 2)
self.assertEqual(_count_matmuls(opt_module), 2)
def test_type_matches(self):
should_be_equal = [
(int, int),
(numbers.Number, int),
(numbers.Number, float),
(int, type(torch.float)),
(Union[int, float], int),
(Union[int, float], float),
(list[int], int),
(list[int], create_type_hint([int, int])),
(list[int], create_type_hint((int, int))),
(list[torch.Tensor], create_type_hint([torch.Tensor, torch.Tensor])),
(
list[torch.Tensor],
create_type_hint([torch.nn.Parameter, torch.nn.Parameter]),
),
(torch.Tensor, torch.nn.Parameter),
(list[torch.Tensor], create_type_hint([torch.nn.Parameter, torch.Tensor])),
(list[torch.Tensor], create_type_hint([torch.Tensor, torch.nn.Parameter])),
(list[torch.Tensor], create_type_hint((torch.Tensor, torch.Tensor))),
(
list[torch.Tensor],
create_type_hint((torch.nn.Parameter, torch.nn.Parameter)),
),
(torch.Tensor, torch.nn.Parameter),
(list[torch.Tensor], create_type_hint((torch.nn.Parameter, torch.Tensor))),
(list[torch.Tensor], create_type_hint((torch.Tensor, torch.nn.Parameter))),
(Optional[list[torch.Tensor]], list[torch.Tensor]),
(Optional[list[int]], list[int]),
] + [
# pre-PEP585 signatures
(typing.List[int], int), # noqa: UP006
(typing.List[int], create_type_hint([int, int])), # noqa: UP006
(typing.List[int], create_type_hint((int, int))), # noqa: UP006
(typing.List[torch.Tensor], create_type_hint([torch.Tensor, torch.Tensor])), # noqa: UP006
(
typing.List[torch.Tensor], # noqa: UP006
create_type_hint([torch.nn.Parameter, torch.nn.Parameter]),
),
(typing.List[torch.Tensor], create_type_hint([torch.nn.Parameter, torch.Tensor])), # noqa: UP006
(typing.List[torch.Tensor], create_type_hint([torch.Tensor, torch.nn.Parameter])), # noqa: UP006
(typing.List[torch.Tensor], create_type_hint((torch.Tensor, torch.Tensor))), # noqa: UP006
(
typing.List[torch.Tensor], # noqa: UP006
create_type_hint((torch.nn.Parameter, torch.nn.Parameter)),
),
(typing.List[torch.Tensor], create_type_hint((torch.nn.Parameter, torch.Tensor))), # noqa: UP006
(typing.List[torch.Tensor], create_type_hint((torch.Tensor, torch.nn.Parameter))), # noqa: UP006
(Optional[typing.List[torch.Tensor]], typing.List[torch.Tensor]), # noqa: UP006
(Optional[typing.List[int]], typing.List[int]), # noqa: UP006
]
for sig_type, arg_type in should_be_equal:
self.assertTrue(type_matches(sig_type, arg_type))
should_fail = [
(int, float),
(Union[int, float], str),
(list[torch.Tensor], typing.List[int]), # noqa: UP006
] + [
# pre-PEP585 signatures
(list[torch.Tensor], list[int]),
]
for sig_type, arg_type in should_fail:
self.assertFalse(type_matches(sig_type, arg_type))
@skipIfNoMkldnn
def test_optimize_for_inference_cpu(self):
import torch.nn as nn
class Foo(nn.Module):
def __init__(self) -> None:
super().__init__()
layers = []
layers2 = []
for _ in range(10):
layers.append(nn.Conv2d(3, 3, 1))
layers.append(nn.BatchNorm2d(3))
layers.append(nn.ReLU())
layers2.append(nn.Conv2d(3, 3, 1))
layers2.append(nn.BatchNorm2d(3))
layers2.append(nn.ReLU())
self.model = nn.Sequential(*layers)
self.model2 = nn.Sequential(*layers2)
def forward(self, x):
return self.model(x) + self.model2(x)
N, C, H, W, = (
1,
3,
224,
224,
)
inp = torch.randn(N, C, H, W)
with torch.no_grad():
model = Foo().eval()
optimized_model = optimization.optimize_for_inference(model)
torch.testing.assert_close(model(inp), optimized_model(inp))
optimized_model2 = optimization.optimize_for_inference(
model, pass_config={"remove_dropout": False}
)
torch.testing.assert_close(model(inp), optimized_model2(inp))
@skipIfNoTorchVision
@skipIfNoMkldnn
def test_optimize_for_inference_cpu_torchvision(self):
models = [
torchvision.models.resnet18,
torchvision.models.resnet50,
torchvision.models.densenet121,
torchvision.models.shufflenet_v2_x1_0,
torchvision.models.vgg16,
torchvision.models.mobilenet_v2,
torchvision.models.mnasnet1_0,
torchvision.models.resnext50_32x4d,
]
with torch.no_grad():
for model_type in models:
model = model_type()
C, H, W, = (
3,
224,
224,
)
inp = torch.randn(3, C, H, W)
model(inp)
model.eval()
inp = torch.randn(1, C, H, W)
heuristic = optimization.gen_mkl_autotuner(inp, iters=0, warmup=0)
optimized_model = optimization.optimize_for_inference(model)
orig_out = model(inp)
new_out = optimized_model(inp)
torch.testing.assert_close(orig_out, new_out)
|
TestFXExperimental
|
python
|
scikit-learn__scikit-learn
|
sklearn/feature_selection/_univariate_selection.py
|
{
"start": 31023,
"end": 34099
}
|
class ____(_BaseFilter):
"""Filter: Select the p-values for an estimated false discovery rate.
This uses the Benjamini-Hochberg procedure. ``alpha`` is an upper bound
on the expected false discovery rate.
Read more in the :ref:`User Guide <univariate_feature_selection>`.
Parameters
----------
score_func : callable, default=f_classif
Function taking two arrays X and y, and returning a pair of arrays
(scores, pvalues).
Default is f_classif (see below "See Also"). The default function only
works with classification tasks.
alpha : float, default=5e-2
The highest uncorrected p-value for features to keep.
Attributes
----------
scores_ : array-like of shape (n_features,)
Scores of features.
pvalues_ : array-like of shape (n_features,)
p-values of feature scores.
n_features_in_ : int
Number of features seen during :term:`fit`.
.. versionadded:: 0.24
feature_names_in_ : ndarray of shape (`n_features_in_`,)
Names of features seen during :term:`fit`. Defined only when `X`
has feature names that are all strings.
.. versionadded:: 1.0
See Also
--------
f_classif : ANOVA F-value between label/feature for classification tasks.
mutual_info_classif : Mutual information for a discrete target.
chi2 : Chi-squared stats of non-negative features for classification tasks.
f_regression : F-value between label/feature for regression tasks.
mutual_info_regression : Mutual information for a continuous target.
SelectPercentile : Select features based on percentile of the highest
scores.
SelectKBest : Select features based on the k highest scores.
SelectFpr : Select features based on a false positive rate test.
SelectFwe : Select features based on family-wise error rate.
GenericUnivariateSelect : Univariate feature selector with configurable
mode.
References
----------
https://en.wikipedia.org/wiki/False_discovery_rate
Examples
--------
>>> from sklearn.datasets import load_breast_cancer
>>> from sklearn.feature_selection import SelectFdr, chi2
>>> X, y = load_breast_cancer(return_X_y=True)
>>> X.shape
(569, 30)
>>> X_new = SelectFdr(chi2, alpha=0.01).fit_transform(X, y)
>>> X_new.shape
(569, 16)
"""
_parameter_constraints: dict = {
**_BaseFilter._parameter_constraints,
"alpha": [Interval(Real, 0, 1, closed="both")],
}
def __init__(self, score_func=f_classif, *, alpha=5e-2):
super().__init__(score_func=score_func)
self.alpha = alpha
def _get_support_mask(self):
check_is_fitted(self)
n_features = len(self.pvalues_)
sv = np.sort(self.pvalues_)
selected = sv[
sv <= float(self.alpha) / n_features * np.arange(1, n_features + 1)
]
if selected.size == 0:
return np.zeros_like(self.pvalues_, dtype=bool)
return self.pvalues_ <= selected.max()
|
SelectFdr
|
python
|
walkccc__LeetCode
|
solutions/356. Line Reflection/356.py
|
{
"start": 0,
"end": 432
}
|
class ____:
def isReflected(self, points: list[list[int]]) -> bool:
minX = math.inf
maxX = -math.inf
seen = set()
for x, y in points:
minX = min(minX, x)
maxX = max(maxX, x)
seen.add((x, y))
summ = minX + maxX
# (leftX + rightX) / 2 = (minX + maxX) / 2
# leftX = minX + maxX - rightX
# rightX = minX + maxX - leftX
return all((summ - x, y) in seen for x, y in points)
|
Solution
|
python
|
Textualize__textual
|
docs/examples/styles/link_style.py
|
{
"start": 64,
"end": 747
}
|
class ____(App):
CSS_PATH = "link_style.tcss"
def compose(self):
yield Label(
"Visit the [link='https://textualize.io']Textualize[/link] website.",
id="lbl1", # (1)!
)
yield Label(
"Click [@click=app.bell]here[/] for the bell sound.",
id="lbl2", # (2)!
)
yield Label(
"You can also click [@click=app.bell]here[/] for the bell sound.",
id="lbl3", # (3)!
)
yield Label(
"[@click=app.quit]Exit this application.[/]",
id="lbl4", # (4)!
)
if __name__ == "__main__":
app = LinkStyleApp()
app.run()
|
LinkStyleApp
|
python
|
getsentry__sentry
|
tests/sentry/tasks/test_delete_seer_grouping_records.py
|
{
"start": 382,
"end": 6938
}
|
class ____(TestCase):
def setUp(self) -> None:
super().setUp()
# Needed for may_schedule_task_to_delete_hashes_from_seer to allow the task to be scheduled
self.project.update_option("sentry:similarity_backfill_completed", int(time()))
def _setup_groups_and_hashes(self, number_of_groups: int = 5) -> list[str]:
expected_hashes = []
for i in range(number_of_groups):
group = self.create_group(project=self.project)
group_hash = GroupHash.objects.create(
project=self.project, hash=f"{i:032d}", group=group
)
expected_hashes.append(group_hash.hash)
return expected_hashes
@patch(
"sentry.tasks.delete_seer_grouping_records.delete_seer_grouping_records_by_hash.apply_async"
)
def test_simple(self, mock_apply_async: MagicMock) -> None:
"""
Test that it correctly collects hashes and schedules a task.
"""
expected_hashes = self._setup_groups_and_hashes(number_of_groups=5)
may_schedule_task_to_delete_hashes_from_seer(self.project.id, expected_hashes)
# Verify that the task was called with the correct parameters
mock_apply_async.assert_called_once_with(args=[self.project.id, expected_hashes, 0])
def test_chunked(self) -> None:
"""
Test that it chunks large numbers of hashes
into separate tasks with a maximum of batch_size hashes per task.
"""
batch_size = 10
with (
patch(
"sentry.tasks.delete_seer_grouping_records.delete_seer_grouping_records_by_hash.apply_async"
) as mock_apply_async,
self.options({"embeddings-grouping.seer.delete-record-batch-size": batch_size}),
):
# Create 15 group hashes to test chunking (10 + 5 with batch size of 10)
expected_hashes = self._setup_groups_and_hashes(batch_size + 5)
may_schedule_task_to_delete_hashes_from_seer(self.project.id, expected_hashes)
# Verify that the task was called 2 times (15 hashes / 10 per chunk = 2 chunks)
assert mock_apply_async.call_count == 2
# Verify the first chunk has batch_size hashes
first_call_args = mock_apply_async.call_args_list[0][1]["args"]
assert len(first_call_args[1]) == batch_size
assert first_call_args[0] == self.project.id
assert first_call_args[1] == expected_hashes[0:batch_size]
assert first_call_args[2] == 0
# Verify the second chunk has 5 hashes (remainder)
second_call_args = mock_apply_async.call_args_list[1][1]["args"]
assert len(second_call_args[1]) == 5
assert second_call_args[0] == self.project.id
assert second_call_args[1] == expected_hashes[batch_size:]
assert second_call_args[2] == 0
@patch(
"sentry.tasks.delete_seer_grouping_records.delete_seer_grouping_records_by_hash.apply_async"
)
def test_group_without_hashes(self, mock_apply_async: MagicMock) -> None:
group = self.create_group(project=self.project)
hashes = GroupHash.objects.filter(group=group).values_list("hash", flat=True).all()
may_schedule_task_to_delete_hashes_from_seer(self.project.id, list(hashes))
mock_apply_async.assert_not_called()
@patch(
"sentry.tasks.delete_seer_grouping_records.delete_seer_grouping_records_by_hash.apply_async"
)
def test_no_group_ids(self, mock_apply_async: MagicMock) -> None:
"""
Test that when no group ids are provided, the task is not scheduled.
"""
may_schedule_task_to_delete_hashes_from_seer(self.project.id, [])
mock_apply_async.assert_not_called()
@patch(
"sentry.tasks.delete_seer_grouping_records.delete_seer_grouping_records_by_hash.apply_async"
)
def test_called_task_with_too_many_hashes(self, mock_apply_async: MagicMock) -> None:
"""This tests the built-in logic of spreading hashes across multiple tasks."""
batch_size = 5
with self.options({"embeddings-grouping.seer.delete-record-batch-size": batch_size}):
# Create 11 group hashes to test chunking (5 + 5 + 1 with batch size of 5)
expected_hashes = self._setup_groups_and_hashes(batch_size + batch_size + 1)
# Call function directly rather than scheduling a task
delete_seer_grouping_records_by_hash(self.project.id, expected_hashes, 0)
# Verify the first chunk has batch_size hashes
first_call_args = mock_apply_async.call_args_list[0][1]["args"]
assert len(first_call_args[1]) == batch_size
assert first_call_args[0] == self.project.id
first_chunk = expected_hashes[0:batch_size]
assert first_call_args[1] == first_chunk
assert first_call_args[2] == 0
# Verify the second chunk has batch_size hashes
second_call_args = mock_apply_async.call_args_list[1][1]["args"]
assert len(second_call_args[1]) == batch_size
assert second_call_args[0] == self.project.id
second_chunk = expected_hashes[batch_size : (batch_size * 2)]
assert second_call_args[1] == second_chunk
assert second_call_args[2] == 0
# Verify the third chunk has 1 hash (remainder)
third_call_args = mock_apply_async.call_args_list[2][1]["args"]
assert len(third_call_args[1]) == 1
assert third_call_args[0] == self.project.id
third_chunk = expected_hashes[(batch_size * 2) :]
assert third_call_args[1] == third_chunk
assert third_call_args[2] == 0
# Make sure the hashes add up to the expected hashes
assert first_chunk + second_chunk + third_chunk == expected_hashes
@patch(
"sentry.tasks.delete_seer_grouping_records.delete_seer_grouping_records_by_hash.apply_async"
)
def test_does_not_schedule_task_if_missing_option(self, mock_apply_async: MagicMock) -> None:
"""
Test that when the project option is not set, the task is not scheduled.
"""
self.project.delete_option("sentry:similarity_backfill_completed")
expected_hashes = self._setup_groups_and_hashes(number_of_groups=5)
may_schedule_task_to_delete_hashes_from_seer(self.project.id, expected_hashes)
assert mock_apply_async.call_count == 0
|
TestDeleteSeerGroupingRecordsByHash
|
python
|
django__django
|
django/db/models/base.py
|
{
"start": 18164,
"end": 18798
}
|
class ____:
"""Store model instance state."""
db = None
# If true, uniqueness validation checks will consider this a new, unsaved
# object. Necessary for correct validation of new instances of objects with
# explicit (non-auto) PKs. This impacts validation only; it has no effect
# on the actual save.
adding = True
fields_cache = ModelStateFieldsCacheDescriptor()
fetch_mode = ModelStateFetchModeDescriptor()
peers = ()
def __getstate__(self):
state = self.__dict__.copy()
# Weak references can't be pickled.
state.pop("peers", None)
return state
|
ModelState
|
python
|
PrefectHQ__prefect
|
tests/test_tasks.py
|
{
"start": 40727,
"end": 64847
}
|
class ____:
async def test_repeated_task_call_within_flow_is_cached_by_default(self):
@task(persist_result=True)
def foo(x):
return x
@flow
def bar():
return foo(1, return_state=True), foo(1, return_state=True)
first_state, second_state = bar()
assert first_state.name == "Completed"
assert second_state.name == "Cached"
assert await second_state.result() == await first_state.result()
async def test_cache_hits_within_flows_are_cached(
self,
):
@task(
cache_key_fn=lambda *_: "cache_hit-1",
persist_result=True,
)
def foo(x):
return x
@flow
def bar():
return foo(1, return_state=True), foo(2, return_state=True)
first_state, second_state = bar()
assert first_state.name == "Completed"
assert second_state.name == "Cached"
assert await second_state.result() == await first_state.result()
def test_many_repeated_cache_hits_within_flows_cached(
self,
):
@task(
cache_key_fn=lambda *_: "cache_hit-2",
persist_result=True,
)
def foo(x):
return x
@flow
def bar():
foo(1, return_state=True) # populate the cache
return [foo(i, return_state=True) for i in range(5)]
states = bar()
assert all(state.name == "Cached" for state in states), states
async def test_cache_hits_between_flows_are_cached(
self,
):
@task(
cache_key_fn=lambda *_: "cache_hit-3",
persist_result=True,
)
def foo(x):
return x
@flow
def bar(x):
return foo(x, return_state=True)
first_state = bar(1)
second_state = bar(2)
assert first_state.name == "Completed"
assert second_state.name == "Cached"
assert await second_state.result() == await first_state.result() == 1
def test_cache_misses_arent_cached(
self,
):
# this hash fn won't return the same value twice
def mutating_key(*_, tally=[]):
tally.append("x")
return "call tally:" + "".join(tally)
@task(cache_key_fn=mutating_key, persist_result=True)
def foo(x):
return x
@flow
def bar():
return foo(1, return_state=True), foo(1, return_state=True)
first_state, second_state = bar()
assert first_state.name == "Completed"
assert second_state.name == "Completed"
async def test_cache_key_fn_receives_context(self):
def get_flow_run_id(context, args):
return str(context.task_run.flow_run_id)
@task(cache_key_fn=get_flow_run_id, persist_result=True)
def foo(x):
return x
@flow
def bar():
return foo("something", return_state=True), foo(
"different", return_state=True
)
first_state, second_state = bar()
assert first_state.name == "Completed"
assert await first_state.result() == "something"
assert second_state.name == "Cached"
assert await second_state.result() == "something"
third_state, fourth_state = bar()
assert third_state.name == "Completed"
assert fourth_state.name == "Cached"
assert await third_state.result() == "something"
assert await fourth_state.result() == "something"
async def test_cache_key_fn_receives_resolved_futures(
self,
):
def check_args(context, params):
assert params["x"] == "something"
assert len(params) == 1
return params["x"]
@task
def foo(x):
return x
@task(cache_key_fn=check_args, persist_result=True)
def bar(x):
return x
@flow
def my_flow():
future = foo.submit("something")
# Mix run/submit to cover both cases
return bar(future, return_state=True), bar.submit(future, return_state=True)
first_state, second_state = my_flow()
assert first_state.name == "Completed"
assert await first_state.result() == "something"
assert second_state.name == "Cached"
assert await second_state.result() == "something"
async def test_cache_key_fn_arg_inputs_are_stable(
self,
):
def stringed_inputs(context, args):
return str(args)
@task(cache_key_fn=stringed_inputs, persist_result=True)
def foo(a, b, c=3):
return a + b + c
@flow
def bar():
return (
foo(1, 2, 3, return_state=True),
foo(1, b=2, return_state=True),
foo(c=3, a=1, b=2, return_state=True),
)
first_state, second_state, third_state = bar()
assert first_state.name == "Completed"
assert second_state.name == "Cached"
assert third_state.name == "Cached"
# same output
assert await first_state.result() == 6
assert await second_state.result() == 6
assert await third_state.result() == 6
async def test_cache_key_hits_with_future_expiration_are_cached(
self,
):
@task(
cache_key_fn=lambda *_: "cache-hit-4",
cache_expiration=datetime.timedelta(seconds=5),
persist_result=True,
)
def foo(x):
return x
@flow
def bar():
return foo(1, return_state=True), foo(2, return_state=True)
first_state, second_state = bar()
assert first_state.name == "Completed"
assert second_state.name == "Cached"
assert await second_state.result() == 1
async def test_cache_key_hits_with_past_expiration_are_not_cached(self):
@task(
cache_key_fn=lambda *_: "cache-hit-5",
cache_expiration=datetime.timedelta(seconds=-5),
persist_result=True,
)
def foo(x):
return x
@flow
def bar():
return foo(1, return_state=True), foo(2, return_state=True)
first_state, second_state = bar()
assert first_state.name == "Completed"
assert second_state.name == "Completed"
assert await second_state.result() != await first_state.result()
async def test_cache_misses_w_refresh_cache(self):
@task(
cache_key_fn=lambda *_: "cache-hit-6",
refresh_cache=True,
persist_result=True,
)
def foo(x):
return x
@flow
def bar():
return foo(1, return_state=True), foo(2, return_state=True)
first_state, second_state = bar()
assert first_state.name == "Completed"
assert second_state.name == "Completed"
assert await second_state.result() != await first_state.result()
async def test_cache_hits_wo_refresh_cache(
self,
):
@task(
cache_key_fn=lambda *_: "cache-hit-7",
refresh_cache=False,
persist_result=True,
)
def foo(x):
return x
@flow
def bar():
return foo(1, return_state=True), foo(2, return_state=True)
first_state, second_state = bar()
assert first_state.name == "Completed"
assert second_state.name == "Cached"
assert await second_state.result() == await first_state.result()
async def test_tasks_refresh_cache_setting(self):
@task(cache_key_fn=lambda *_: "cache-hit-8", persist_result=True)
def foo(x):
return x
@task(
cache_key_fn=lambda *_: "cache-hit-8",
refresh_cache=True,
persist_result=True,
)
def refresh_task(x):
return x
@task(
cache_key_fn=lambda *_: "cache-hit-8",
refresh_cache=False,
persist_result=True,
)
def not_refresh_task(x):
return x
@flow
def bar():
foo(0)
return (
foo(1, return_state=True),
refresh_task(2, return_state=True),
not_refresh_task(3, return_state=True),
)
with temporary_settings({PREFECT_TASKS_REFRESH_CACHE: True}):
first_state, second_state, third_state = bar()
assert first_state.name == "Completed"
assert second_state.name == "Completed"
assert third_state.name == "Cached"
assert await second_state.result() != await first_state.result()
assert await third_state.result() == await second_state.result()
async def test_cache_key_fn_receives_self_if_method(self):
"""
The `self` argument of a bound method is implicitly passed as a parameter to the decorated
function. This test ensures that it is passed to the cache key function by checking that
two instances of the same class do not share a cache (both instances yield COMPLETED states
the first time they run and CACHED states the second time).
"""
cache_args = []
def stringed_inputs(context, args):
cache_args.append(args)
return str(args)
class Foo:
def __init__(self, x):
self.x = x
@task(cache_key_fn=stringed_inputs, persist_result=True)
def add(self, a):
return a + self.x
# create an instance that adds 1 and another that adds 100
f1 = Foo(1)
f2 = Foo(100)
@flow
def bar():
return (
f1.add(5, return_state=True),
f1.add(5, return_state=True),
f2.add(5, return_state=True),
f2.add(5, return_state=True),
)
s1, s2, s3, s4 = bar()
# the first two calls are completed / cached
assert s1.name == "Completed"
assert s2.name == "Cached"
# the second two calls are completed / cached because it's a different instance
assert s3.name == "Completed"
assert s4.name == "Cached"
# check that the cache key function received the self arg
assert cache_args[0] == dict(self=f1, a=5)
assert cache_args[1] == dict(self=f1, a=5)
assert cache_args[2] == dict(self=f2, a=5)
assert cache_args[3] == dict(self=f2, a=5)
assert await s1.result() == 6
assert await s2.result() == 6
assert await s3.result() == 105
assert await s4.result() == 105
async def test_instance_methods_can_share_a_cache(
self,
):
"""
Test that instance methods can share a cache by using a cache key function that
ignores the bound instance argument
"""
def stringed_inputs(context, args):
# remove the self arg from the cache key
cache_args = args.copy()
cache_args.pop("self")
return str(cache_args)
class Foo:
def __init__(self, x):
self.x = x
@task(cache_key_fn=stringed_inputs, persist_result=True)
def add(self, a):
return a + self.x
# create an instance that adds 1 and another that adds 100
f1 = Foo(1)
f2 = Foo(100)
@flow
def bar():
return (
f1.add(5, return_state=True),
f1.add(5, return_state=True),
f2.add(5, return_state=True),
f2.add(5, return_state=True),
)
s1, s2, s3, s4 = bar()
# all subsequent calls are cached because the instance is not part of the cache key
assert s1.name == "Completed"
assert s2.name == "Cached"
assert s3.name == "Cached"
assert s4.name == "Cached"
assert await s1.result() == 6
assert await s2.result() == 6
assert await s3.result() == 6
assert await s4.result() == 6
async def test_cache_key_fn_takes_precedence_over_cache_policy(
self, caplog, tmpdir
):
block = LocalFileSystem(basepath=str(tmpdir))
await block.save("test-cache-key-fn-takes-precedence-over-cache-policy")
@task(
cache_key_fn=lambda *_: "cache-hit-9",
cache_policy=INPUTS,
result_storage=block,
persist_result=True,
)
def foo(x):
return x
first_state = foo(1, return_state=True)
second_state = foo(2, return_state=True)
assert first_state.name == "Completed"
assert second_state.name == "Cached"
assert await second_state.result() == await first_state.result()
assert "`cache_key_fn` will be used" in caplog.text
async def test_changing_result_storage_key_busts_cache(
self,
):
@task(
cache_key_fn=lambda *_: "cache-hit-10",
result_storage_key="before",
persist_result=True,
)
def foo(x):
return x
first_state = foo(1, return_state=True)
second_state = foo.with_options(result_storage_key="after")(
2, return_state=True
)
assert first_state.name == "Completed"
assert second_state.name == "Completed"
assert await first_state.result() == 1
assert await second_state.result() == 2
async def test_false_persist_results_sets_cache_policy_to_none(self, caplog):
@task(persist_result=False)
def foo(x):
return x
assert foo.cache_policy == NO_CACHE
assert (
"Ignoring `cache_policy` because `persist_result` is False"
not in caplog.text
)
async def test_warns_went_false_persist_result_and_cache_policy(self, caplog):
@task(persist_result=False, cache_policy=INPUTS)
def foo(x):
return x
assert foo.cache_policy == NO_CACHE
assert (
"Ignoring `cache_policy` because `persist_result` is False" in caplog.text
)
async def test_no_cache_can_be_configured_as_default(self):
with temporary_settings({PREFECT_TASKS_DEFAULT_NO_CACHE: True}):
@task
def foo(x):
return x
assert foo.cache_policy == NO_CACHE
async def test_no_cache_default_can_be_overrided(self):
with temporary_settings({PREFECT_TASKS_DEFAULT_NO_CACHE: True}):
@task(cache_policy=DEFAULT)
def foo(x):
return x
@task(cache_key_fn=lambda **kwargs: "")
def bar(x):
return x
assert foo.cache_policy == DEFAULT
assert bar.cache_policy != NO_CACHE
async def test_no_cache_default_is_respected_even_with_result_persistence(self):
with temporary_settings({PREFECT_TASKS_DEFAULT_NO_CACHE: True}):
@task(persist_result=True)
def foo(x):
return x
assert foo.cache_policy == NO_CACHE
with temporary_settings({PREFECT_TASKS_DEFAULT_NO_CACHE: True}):
@task(result_storage_key="foo-bar")
def zig(x):
return x
assert zig.cache_policy == NO_CACHE
@pytest.mark.parametrize("cache_policy", [NO_CACHE, None])
async def test_does_not_warn_went_false_persist_result_and_none_cache_policy(
self, caplog, cache_policy
):
@task(persist_result=False, cache_policy=cache_policy)
def foo(x):
return x
assert foo.cache_policy == cache_policy
assert (
"Ignoring `cache_policy` because `persist_result` is False"
not in caplog.text
)
def test_cache_policy_storage_path(self, tmp_path):
cache_policy = Inputs().configure(key_storage=tmp_path)
expected_cache_key = cache_policy.compute_key(
task_ctx=None, inputs={"x": 1}, flow_parameters=None
)
@task(cache_policy=cache_policy)
def foo(x):
return x
foo(1)
assert (tmp_path / expected_cache_key).exists()
def test_cache_policy_storage_str(self, tmp_path):
cache_policy = Inputs().configure(key_storage=str(tmp_path))
expected_cache_key = cache_policy.compute_key(
task_ctx=None, inputs={"x": 1}, flow_parameters=None
)
@task(cache_policy=cache_policy)
def foo(x):
return x
foo(1)
assert (tmp_path / expected_cache_key).exists()
def test_cache_policy_storage_storage_block(self, tmp_path):
cache_policy = Inputs().configure(
key_storage=LocalFileSystem(basepath=str(tmp_path))
)
expected_cache_key = cache_policy.compute_key(
task_ctx=None, inputs={"x": 1}, flow_parameters=None
)
@task(cache_policy=cache_policy)
def foo(x):
return x
foo(1)
# make sure cache key file and result file are both created
assert (tmp_path / expected_cache_key).exists()
assert "prefect_version" in json.loads(
(tmp_path / expected_cache_key).read_text()
)
assert (PREFECT_LOCAL_STORAGE_PATH.value() / expected_cache_key).exists()
@pytest.mark.parametrize(
"isolation_level", [IsolationLevel.SERIALIZABLE, "SERIALIZABLE"]
)
def test_cache_policy_lock_manager(self, tmp_path, isolation_level):
cache_policy = Inputs().configure(
lock_manager=FileSystemLockManager(lock_files_directory=tmp_path),
isolation_level=IsolationLevel.SERIALIZABLE,
)
expected_cache_key = cache_policy.compute_key(
task_ctx=None, inputs={"x": 1}, flow_parameters=None
)
@task(cache_policy=cache_policy)
def foo(x):
assert (tmp_path / f"{expected_cache_key}.lock").exists()
return x
assert foo(1) == 1
async def test_cache_policy_lock_manager_async(self, tmp_path):
"""Regression test for https://github.com/PrefectHQ/prefect/issues/17785"""
cache_policy = (INPUTS + TASK_SOURCE).configure(
isolation_level=IsolationLevel.SERIALIZABLE,
lock_manager=MemoryLockManager(),
)
@task(cache_policy=cache_policy)
async def my_task(x: int):
await asyncio.sleep(random.randint(1, 3))
result = x + random.randint(1, 100)
return result
tasks = [my_task(42) for _ in range(3)]
results = await asyncio.gather(*tasks)
# Assert all results are the same since the second and third calls should be cached
assert len(set(results)) == 1, (
f"Expected all results to be identical but got {results}"
)
def test_cache_policy_serializable_isolation_level_with_no_manager(self):
cache_policy = Inputs().configure(isolation_level=IsolationLevel.SERIALIZABLE)
@task(cache_policy=cache_policy)
def foo(x):
return x
with pytest.raises(
ConfigurationError, match="not supported by provided configuration"
):
foo(1)
async def test_unhashable_input_provides_helpful_error(self, caplog):
"""Test that trying to cache a task with unhashable inputs provides helpful error message"""
lock = threading.Lock()
@task(persist_result=True)
def foo(x, lock_obj):
return x
foo(42, lock_obj=lock)
error_msg = caplog.text
# First we see the cache policy's message
assert (
"This often occurs when task inputs contain objects that cannot be cached"
in error_msg
)
assert "like locks, file handles, or other system resources." in error_msg
assert "To resolve this, you can:" in error_msg
assert (
"1. Exclude these arguments by defining a custom `cache_key_fn`"
in error_msg
)
assert "2. Disable caching by passing `cache_policy=NO_CACHE`" in error_msg
# Then we see the original HashError details
assert "Unable to create hash - objects could not be serialized." in error_msg
assert (
"JSON error: Unable to serialize unknown type: <class '_thread.lock'>"
in error_msg
)
assert "Pickle error: cannot pickle '_thread.lock' object" in error_msg
async def test_unhashable_input_workarounds(self):
"""Test workarounds for handling unhashable inputs"""
lock = threading.Lock()
# Solution 1: Use cache_key_fn to exclude problematic argument
def cache_on_x_only(context, parameters):
return str(parameters.get("x"))
@task(cache_key_fn=cache_on_x_only, persist_result=True)
def foo_with_key_fn(x, lock_obj):
return x
# Solution 2: Disable caching entirely
@task(cache_policy=NO_CACHE, persist_result=True)
def foo_with_none_policy(x, lock_obj):
return x
# Solution 3: Subtract the problematic argument from the cache policy
@task(cache_policy=DEFAULT - "lock_obj", persist_result=True)
def foo_with_subtraction(x, lock_obj):
return x
@flow
def test_flow():
# Both approaches should work without errors
return (
foo_with_key_fn(42, lock_obj=lock, return_state=True),
foo_with_key_fn(42, lock_obj=lock, return_state=True),
foo_with_none_policy(42, lock_obj=lock, return_state=True),
foo_with_none_policy(42, lock_obj=lock, return_state=True),
foo_with_subtraction(42, lock_obj=lock, return_state=True),
foo_with_subtraction(42, lock_obj=lock, return_state=True),
)
s1, s2, s3, s4, s5, s6 = test_flow()
# Key fn approach should still cache based on x
assert s1.name == "Completed"
assert s2.name == "Cached"
assert await s1.result() == 42
assert await s2.result() == 42
# NO_CACHE policy approach should never cache
assert s3.name == "Completed"
assert s4.name == "Completed"
assert await s3.result() == 42
assert await s4.result() == 42
# Subtraction approach should cache based on x
assert s5.name == "Completed"
assert s6.name == "Cached"
assert await s5.result() == 42
assert await s6.result() == 42
async def test_disable_caching_setting_disables_caching_regardless_of_cache_policy(
self, caplog
):
from prefect.settings import PREFECT_TASKS_DISABLE_CACHING
with temporary_settings({PREFECT_TASKS_DISABLE_CACHING: True}):
@task(cache_policy=TASK_SOURCE)
def foo(x):
return x
assert foo.cache_policy == NO_CACHE
assert (
"Ignoring `cache_policy` because `persist_result` is False"
not in caplog.text
)
async def test_disable_caching_setting_allows_normal_caching_when_false(self):
from prefect.settings import PREFECT_TASKS_DISABLE_CACHING
with temporary_settings({PREFECT_TASKS_DISABLE_CACHING: False}):
@task(cache_policy=TASK_SOURCE)
def foo(x):
return x
assert foo.cache_policy == TASK_SOURCE
assert foo.persist_result is True
@pytest.mark.parametrize("cache_policy", [NO_CACHE, None])
async def test_does_not_warn_when_false_persist_result_and_none_cache_policy(
self, caplog, cache_policy
):
@task(persist_result=False, cache_policy=cache_policy)
def foo(x):
return x
assert foo.cache_policy == cache_policy
assert (
"Ignoring `cache_policy` because `persist_result` is False"
not in caplog.text
)
|
TestTaskCaching
|
python
|
pypa__packaging
|
src/packaging/pylock.py
|
{
"start": 7258,
"end": 8076
}
|
class ____(Exception):
"""Raised when when input data is not spec-compliant."""
context: str | None = None
message: str
def __init__(
self,
cause: str | Exception,
*,
context: str | None = None,
) -> None:
if isinstance(cause, PylockValidationError):
if cause.context:
self.context = (
f"{context}.{cause.context}" if context else cause.context
)
else:
self.context = context
self.message = cause.message
else:
self.context = context
self.message = str(cause)
def __str__(self) -> str:
if self.context:
return f"{self.message} in {self.context!r}"
return self.message
|
PylockValidationError
|
python
|
doocs__leetcode
|
solution/2500-2599/2517.Maximum Tastiness of Candy Basket/Solution.py
|
{
"start": 0,
"end": 528
}
|
class ____:
def maximumTastiness(self, price: List[int], k: int) -> int:
def check(x: int) -> bool:
cnt, pre = 0, -x
for cur in price:
if cur - pre >= x:
pre = cur
cnt += 1
return cnt >= k
price.sort()
l, r = 0, price[-1] - price[0]
while l < r:
mid = (l + r + 1) >> 1
if check(mid):
l = mid
else:
r = mid - 1
return l
|
Solution
|
python
|
getsentry__sentry
|
src/sentry/notifications/platform/registry.py
|
{
"start": 330,
"end": 1424
}
|
class ____(Registry[type[NotificationProvider[Any]]]):
"""
A registry for notification providers. Adds `get_all` and `get_available` methods to the base registry.
"""
def get_all(self) -> list[type[NotificationProvider[Any]]]:
"""
Returns every NotificationProvider that has been registered. Some providers may not be
available generally available to all customers. For only released providers, use `get_available` instead.
"""
return list(self.registrations.values())
def get_available(
self, *, organization: RpcOrganizationSummary | None = None
) -> list[type[NotificationProvider[Any]]]:
"""
Returns every registered NotificationProvider that has been released to all customers.
"""
return [
provider
for provider in self.registrations.values()
if provider.is_available(organization=organization)
]
provider_registry = NotificationProviderRegistry()
template_registry = Registry[type[NotificationTemplate[Any]]]()
|
NotificationProviderRegistry
|
python
|
ray-project__ray
|
python/ray/data/_internal/issue_detection/detectors/hanging_detector.py
|
{
"start": 871,
"end": 1004
}
|
class ____:
operator_id: str
task_idx: int
bytes_output: int
start_time_hanging: float
@dataclass
|
HangingExecutionState
|
python
|
sympy__sympy
|
sympy/solvers/ode/single.py
|
{
"start": 21968,
"end": 25071
}
|
class ____(SinglePatternODESolver):
r"""
Solves an almost-linear differential equation.
The general form of an almost linear differential equation is
.. math:: a(x) g'(f(x)) f'(x) + b(x) g(f(x)) + c(x)
Here `f(x)` is the function to be solved for (the dependent variable).
The substitution `g(f(x)) = u(x)` leads to a linear differential equation
for `u(x)` of the form `a(x) u' + b(x) u + c(x) = 0`. This can be solved
for `u(x)` by the `first_linear` hint and then `f(x)` is found by solving
`g(f(x)) = u(x)`.
See Also
========
:obj:`sympy.solvers.ode.single.FirstLinear`
Examples
========
>>> from sympy import dsolve, Function, pprint, sin, cos
>>> from sympy.abc import x
>>> f = Function('f')
>>> d = f(x).diff(x)
>>> eq = x*d + x*f(x) + 1
>>> dsolve(eq, f(x), hint='almost_linear')
Eq(f(x), (C1 - Ei(x))*exp(-x))
>>> pprint(dsolve(eq, f(x), hint='almost_linear'))
-x
f(x) = (C1 - Ei(x))*e
>>> example = cos(f(x))*f(x).diff(x) + sin(f(x)) + 1
>>> pprint(example)
d
sin(f(x)) + cos(f(x))*--(f(x)) + 1
dx
>>> pprint(dsolve(example, f(x), hint='almost_linear'))
/ -x \ / -x \
[f(x) = pi - asin\C1*e - 1/, f(x) = asin\C1*e - 1/]
References
==========
- Joel Moses, "Symbolic Integration - The Stormy Decade", Communications
of the ACM, Volume 14, Number 8, August 1971, pp. 558
"""
hint = "almost_linear"
has_integral = True
order = [1]
def _wilds(self, f, x, order):
P = Wild('P', exclude=[f(x).diff(x)])
Q = Wild('Q', exclude=[f(x).diff(x)])
return P, Q
def _equation(self, fx, x, order):
P, Q = self.wilds()
return P*fx.diff(x) + Q
def _verify(self, fx):
a, b = self.wilds_match()
c, b = b.as_independent(fx) if b.is_Add else (S.Zero, b)
# a, b and c are the function a(x), b(x) and c(x) respectively.
# c(x) is obtained by separating out b as terms with and without fx i.e, l(y)
# The following conditions checks if the given equation is an almost-linear differential equation using the fact that
# a(x)*(l(y))' / l(y)' is independent of l(y)
if b.diff(fx) != 0 and not simplify(b.diff(fx)/a).has(fx):
self.ly = factor_terms(b).as_independent(fx, as_Add=False)[1] # Gives the term containing fx i.e., l(y)
self.ax = a / self.ly.diff(fx)
self.cx = -c # cx is taken as -c(x) to simplify expression in the solution integral
self.bx = factor_terms(b) / self.ly
return True
return False
def _get_general_solution(self, *, simplify_flag: bool = True):
x = self.ode_problem.sym
(C1,) = self.ode_problem.get_numbered_constants(num=1)
gensol = Eq(self.ly, ((C1 + Integral((self.cx/self.ax)*exp(Integral(self.bx/self.ax, x)), x))
* exp(-Integral(self.bx/self.ax, x))))
return [gensol]
|
AlmostLinear
|
python
|
tensorflow__tensorflow
|
tensorflow/python/kernel_tests/metrics_test.py
|
{
"start": 11932,
"end": 19865
}
|
class ____(test.TestCase):
def setUp(self):
ops.reset_default_graph()
@test_util.run_deprecated_v1
def testVars(self):
metrics.mean_tensor(array_ops.ones([4, 3]))
_assert_metric_variables(self,
('mean/total_tensor:0', 'mean/count_tensor:0'))
@test_util.run_deprecated_v1
def testMetricsCollection(self):
my_collection_name = '__metrics__'
mean, _ = metrics.mean_tensor(
array_ops.ones([4, 3]), metrics_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [mean])
@test_util.run_deprecated_v1
def testUpdatesCollection(self):
my_collection_name = '__updates__'
_, update_op = metrics.mean_tensor(
array_ops.ones([4, 3]), updates_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [update_op])
@test_util.run_deprecated_v1
def testBasic(self):
with self.cached_session() as sess:
values_queue = data_flow_ops.FIFOQueue(
4, dtypes=dtypes_lib.float32, shapes=(1, 2))
_enqueue_vector(sess, values_queue, [0, 1])
_enqueue_vector(sess, values_queue, [-4.2, 9.1])
_enqueue_vector(sess, values_queue, [6.5, 0])
_enqueue_vector(sess, values_queue, [-3.2, 4.0])
values = values_queue.dequeue()
mean, update_op = metrics.mean_tensor(values)
self.evaluate(variables.local_variables_initializer())
for _ in range(4):
self.evaluate(update_op)
self.assertAllClose([[-0.9 / 4., 3.525]], self.evaluate(mean))
@test_util.run_deprecated_v1
def testMultiDimensional(self):
with self.cached_session() as sess:
values_queue = data_flow_ops.FIFOQueue(
2, dtypes=dtypes_lib.float32, shapes=(2, 2, 2))
_enqueue_vector(
sess,
values_queue, [[[1, 2], [1, 2]], [[1, 2], [1, 2]]],
shape=(2, 2, 2))
_enqueue_vector(
sess,
values_queue, [[[1, 2], [1, 2]], [[3, 4], [9, 10]]],
shape=(2, 2, 2))
values = values_queue.dequeue()
mean, update_op = metrics.mean_tensor(values)
self.evaluate(variables.local_variables_initializer())
for _ in range(2):
self.evaluate(update_op)
self.assertAllClose([[[1, 2], [1, 2]], [[2, 3], [5, 6]]],
self.evaluate(mean))
@test_util.run_deprecated_v1
def testUpdateOpsReturnsCurrentValue(self):
with self.cached_session() as sess:
values_queue = data_flow_ops.FIFOQueue(
4, dtypes=dtypes_lib.float32, shapes=(1, 2))
_enqueue_vector(sess, values_queue, [0, 1])
_enqueue_vector(sess, values_queue, [-4.2, 9.1])
_enqueue_vector(sess, values_queue, [6.5, 0])
_enqueue_vector(sess, values_queue, [-3.2, 4.0])
values = values_queue.dequeue()
mean, update_op = metrics.mean_tensor(values)
self.evaluate(variables.local_variables_initializer())
self.assertAllClose([[0, 1]], self.evaluate(update_op), 5)
self.assertAllClose([[-2.1, 5.05]], self.evaluate(update_op), 5)
self.assertAllClose([[2.3 / 3., 10.1 / 3.]], self.evaluate(update_op), 5)
self.assertAllClose([[-0.9 / 4., 3.525]], self.evaluate(update_op), 5)
self.assertAllClose([[-0.9 / 4., 3.525]], self.evaluate(mean), 5)
@test_util.run_deprecated_v1
def testBinaryWeighted1d(self):
with self.cached_session() as sess:
# Create the queue that populates the values.
values_queue = data_flow_ops.FIFOQueue(
4, dtypes=dtypes_lib.float32, shapes=(1, 2))
_enqueue_vector(sess, values_queue, [0, 1])
_enqueue_vector(sess, values_queue, [-4.2, 9.1])
_enqueue_vector(sess, values_queue, [6.5, 0])
_enqueue_vector(sess, values_queue, [-3.2, 4.0])
values = values_queue.dequeue()
# Create the queue that populates the weights.
weights_queue = data_flow_ops.FIFOQueue(
4, dtypes=dtypes_lib.float32, shapes=(1, 1))
_enqueue_vector(sess, weights_queue, [[1]])
_enqueue_vector(sess, weights_queue, [[0]])
_enqueue_vector(sess, weights_queue, [[1]])
_enqueue_vector(sess, weights_queue, [[0]])
weights = weights_queue.dequeue()
mean, update_op = metrics.mean_tensor(values, weights)
self.evaluate(variables.local_variables_initializer())
for _ in range(4):
self.evaluate(update_op)
self.assertAllClose([[3.25, 0.5]], self.evaluate(mean), 5)
@test_util.run_deprecated_v1
def testWeighted1d(self):
with self.cached_session() as sess:
# Create the queue that populates the values.
values_queue = data_flow_ops.FIFOQueue(
4, dtypes=dtypes_lib.float32, shapes=(1, 2))
_enqueue_vector(sess, values_queue, [0, 1])
_enqueue_vector(sess, values_queue, [-4.2, 9.1])
_enqueue_vector(sess, values_queue, [6.5, 0])
_enqueue_vector(sess, values_queue, [-3.2, 4.0])
values = values_queue.dequeue()
# Create the queue that populates the weights.
weights_queue = data_flow_ops.FIFOQueue(
4, dtypes=dtypes_lib.float32, shapes=(1, 1))
_enqueue_vector(sess, weights_queue, [[0.0025]])
_enqueue_vector(sess, weights_queue, [[0.005]])
_enqueue_vector(sess, weights_queue, [[0.01]])
_enqueue_vector(sess, weights_queue, [[0.0075]])
weights = weights_queue.dequeue()
mean, update_op = metrics.mean_tensor(values, weights)
self.evaluate(variables.local_variables_initializer())
for _ in range(4):
self.evaluate(update_op)
self.assertAllClose([[0.8, 3.52]], self.evaluate(mean), 5)
@test_util.run_deprecated_v1
def testWeighted2d_1(self):
with self.cached_session() as sess:
# Create the queue that populates the values.
values_queue = data_flow_ops.FIFOQueue(
4, dtypes=dtypes_lib.float32, shapes=(1, 2))
_enqueue_vector(sess, values_queue, [0, 1])
_enqueue_vector(sess, values_queue, [-4.2, 9.1])
_enqueue_vector(sess, values_queue, [6.5, 0])
_enqueue_vector(sess, values_queue, [-3.2, 4.0])
values = values_queue.dequeue()
# Create the queue that populates the weights.
weights_queue = data_flow_ops.FIFOQueue(
4, dtypes=dtypes_lib.float32, shapes=(1, 2))
_enqueue_vector(sess, weights_queue, [1, 1])
_enqueue_vector(sess, weights_queue, [1, 0])
_enqueue_vector(sess, weights_queue, [0, 1])
_enqueue_vector(sess, weights_queue, [0, 0])
weights = weights_queue.dequeue()
mean, update_op = metrics.mean_tensor(values, weights)
self.evaluate(variables.local_variables_initializer())
for _ in range(4):
self.evaluate(update_op)
self.assertAllClose([[-2.1, 0.5]], self.evaluate(mean), 5)
@test_util.run_deprecated_v1
def testWeighted2d_2(self):
with self.cached_session() as sess:
# Create the queue that populates the values.
values_queue = data_flow_ops.FIFOQueue(
4, dtypes=dtypes_lib.float32, shapes=(1, 2))
_enqueue_vector(sess, values_queue, [0, 1])
_enqueue_vector(sess, values_queue, [-4.2, 9.1])
_enqueue_vector(sess, values_queue, [6.5, 0])
_enqueue_vector(sess, values_queue, [-3.2, 4.0])
values = values_queue.dequeue()
# Create the queue that populates the weights.
weights_queue = data_flow_ops.FIFOQueue(
4, dtypes=dtypes_lib.float32, shapes=(1, 2))
_enqueue_vector(sess, weights_queue, [0, 1])
_enqueue_vector(sess, weights_queue, [0, 0])
_enqueue_vector(sess, weights_queue, [0, 1])
_enqueue_vector(sess, weights_queue, [0, 0])
weights = weights_queue.dequeue()
mean, update_op = metrics.mean_tensor(values, weights)
self.evaluate(variables.local_variables_initializer())
for _ in range(4):
self.evaluate(update_op)
self.assertAllClose([[0, 0.5]], self.evaluate(mean), 5)
|
MeanTensorTest
|
python
|
getsentry__sentry
|
src/sentry/apidocs/parameters.py
|
{
"start": 3686,
"end": 4179
}
|
class ____:
ENVIRONMENT = OpenApiParameter(
name="environment",
location="path",
required=True,
type=str,
description="The name of the environment.",
)
VISIBILITY = OpenApiParameter(
name="visibility",
location="query",
required=False,
type=str,
description="""The visibility of the environments to filter by. Defaults to `visible`.""",
enum=["all", "hidden", "visible"],
)
|
EnvironmentParams
|
python
|
spyder-ide__spyder
|
spyder/plugins/projects/widgets/main_widget.py
|
{
"start": 2453,
"end": 2648
}
|
class ____:
SearchInSwitcher = "search_in_switcher"
# ---- Main widget
# -----------------------------------------------------------------------------
@class_register
|
ProjectsOptionsMenuActions
|
python
|
getsentry__sentry
|
tests/sentry/integrations/vsts/test_notify_action.py
|
{
"start": 785,
"end": 8116
}
|
class ____(RuleTestCase, VstsIssueBase):
rule_cls = AzureDevopsCreateTicketAction
def setUp(self) -> None:
integration, _, _, _ = self.create_identity_integration(
user=self.user,
organization=self.organization,
integration_params={
"provider": "vsts",
"external_id": "vsts_external_id",
"name": "fabrikam-fiber-inc",
"metadata": {
"domain_name": "https://fabrikam-fiber-inc.visualstudio.com/",
"default_project": "0987654321",
},
},
identity_params={
"external_id": "vsts",
"data": {"access_token": "123456789", "expires": time() + 1234567},
},
)
self.integration = VstsIntegration(integration, self.organization.id)
@responses.activate
def test_create_issue(self) -> None:
self.mock_categories("ac7c05bb-7f8e-4880-85a6-e08f37fd4a10")
event = self.get_event()
azuredevops_rule = self.get_rule(
data={
"title": "Hello",
"description": "Fix this.",
"project": "0987654321",
"work_item_type": "Microsoft.VSTS.WorkItemTypes.Task",
"integration": self.integration.model.id,
}
)
azuredevops_rule.rule = self.create_project_rule(project=self.project)
responses.reset()
responses.add(
responses.PATCH,
"https://fabrikam-fiber-inc.visualstudio.com/0987654321/_apis/wit/workitems/$Microsoft.VSTS.WorkItemTypes.Task",
body=WORK_ITEM_RESPONSE,
content_type="application/json",
)
after_res = azuredevops_rule.after(event=event)
results = list(after_res)
assert len(results) == 1
# Trigger rule callback
rule_future = RuleFuture(rule=azuredevops_rule, kwargs=results[0].kwargs)
results[0].callback(event, futures=[rule_future])
data = orjson.loads(responses.calls[0].response.text)
assert data["fields"]["System.Title"] == "Hello"
assert data["fields"]["System.Description"] == "Fix this."
external_issue = ExternalIssue.objects.get(key="309")
assert external_issue
@responses.activate
def test_doesnt_create_issue(self) -> None:
"""Don't create an issue if one already exists on the event"""
self.mock_categories("ac7c05bb-7f8e-4880-85a6-e08f37fd4a10")
event = self.get_event()
external_issue = ExternalIssue.objects.create(
organization_id=self.organization.id,
integration_id=self.integration.model.id,
key="6",
title=event.title,
description="Fix this.",
)
GroupLink.objects.create(
group_id=event.group.id,
project_id=self.project.id,
linked_type=GroupLink.LinkedType.issue,
linked_id=external_issue.id,
relationship=GroupLink.Relationship.references,
data={"provider": self.integration.model.provider},
)
responses.add(
responses.GET,
"https://fabrikam-fiber-inc.visualstudio.com/_apis/projects?stateFilter=WellFormed&%24skip=0&%24top=100",
body=GET_PROJECTS_RESPONSE,
content_type="application/json",
)
azuredevops_rule = self.get_rule(
data={
"title": "Hello",
"description": "Fix this.",
"project": "0987654321",
"work_item_type": "Microsoft.VSTS.WorkItemTypes.Task",
"integration": self.integration.model.id,
}
)
azuredevops_rule.rule = Rule.objects.create(project=self.project, label="test rule")
results = list(azuredevops_rule.after(event=event))
assert len(results) == 1
results[0].callback(event, futures=[])
assert len(responses.calls) == 0
def test_render_label(self) -> None:
azuredevops_rule = self.get_rule(
data={
"integration": self.integration.model.id,
"work_item_type": "Microsoft.VSTS.WorkItemTypes.Task",
"project": "0987654321",
"dynamic_form_fields": {
"project": {
"name": "project",
"required": True,
"type": "choice",
"choices": [("ac7c05bb-7f8e-4880-85a6-e08f37fd4a10", "Fabrikam-Fiber-Git")],
"defaultValue": "ac7c05bb-7f8e-4880-85a6-e08f37fd4a10",
"label": "Project",
"placeholder": "ac7c05bb-7f8e-4880-85a6-e08f37fd4a10",
"updatesForm": True,
},
"work_item_type": {
"name": "work_item_type",
"required": True,
"type": "choice",
"choices": [
("Microsoft.VSTS.WorkItemTypes.Issue", "Issue"),
("Microsoft.VSTS.WorkItemTypes.Epic", "Epic"),
("Microsoft.VSTS.WorkItemTypes.TestCase", "Test Case"),
("Microsoft.VSTS.WorkItemTypes.SharedStep", "Shared Steps"),
("Microsoft.VSTS.WorkItemTypes.SharedParameter", "Shared Parameter"),
(
"Microsoft.VSTS.WorkItemTypes.CodeReviewRequest",
"Code Review Request",
),
(
"Microsoft.VSTS.WorkItemTypes.CodeReviewResponse",
"Code Review Response",
),
("Microsoft.VSTS.WorkItemTypes.FeedbackRequest", "Feedback Request"),
("Microsoft.VSTS.WorkItemTypes.FeedbackResponse", "Feedback Response"),
("Microsoft.VSTS.WorkItemTypes.TestPlan", "Test Plan"),
("Microsoft.VSTS.WorkItemTypes.TestSuite", "Test Suite"),
("Microsoft.VSTS.WorkItemTypes.Task", "Task"),
],
"defaultValue": "Microsoft.VSTS.WorkItemTypes.Issue",
"label": "Work Item Type",
"placeholder": "Bug",
},
},
}
)
assert (
azuredevops_rule.render_label()
== """Create an Azure DevOps work item in fabrikam-fiber-inc with these """
)
def test_render_label_without_integration(self) -> None:
integration = self.create_integration(
organization=self.organization,
provider="vsts",
external_id="vsts:2",
)
deleted_id = integration.id
with assume_test_silo_mode(SiloMode.CONTROL):
integration.delete()
rule = self.get_rule(data={"integration": deleted_id})
assert rule.render_label() == "Create an Azure DevOps work item in [removed] with these "
|
AzureDevopsCreateTicketActionTest
|
python
|
tensorflow__tensorflow
|
tensorflow/python/training/adam_test.py
|
{
"start": 1966,
"end": 18994
}
|
class ____(test.TestCase):
def doTestSparse(self, use_resource=False):
for dtype in [dtypes.half, dtypes.float32, dtypes.float64]:
with self.cached_session():
# Initialize variables for numpy implementation.
m0, v0, m1, v1 = 0.0, 0.0, 0.0, 0.0
var0_np = np.array([1.0, 2.0], dtype=dtype.as_numpy_dtype)
grads0_np = np.array([0.1, 0.1], dtype=dtype.as_numpy_dtype)
var1_np = np.array([3.0, 4.0], dtype=dtype.as_numpy_dtype)
grads1_np = np.array([0.01, 0.01], dtype=dtype.as_numpy_dtype)
if use_resource:
var0 = resource_variable_ops.ResourceVariable(var0_np)
var1 = resource_variable_ops.ResourceVariable(var1_np)
else:
var0 = ref_variable.RefVariable(var0_np)
var1 = ref_variable.RefVariable(var1_np)
grads0_np_indices = np.array([0, 1], dtype=np.int32)
grads0 = indexed_slices.IndexedSlices(
constant_op.constant(grads0_np),
constant_op.constant(grads0_np_indices), constant_op.constant([2]))
grads1_np_indices = np.array([0, 1], dtype=np.int32)
grads1 = indexed_slices.IndexedSlices(
constant_op.constant(grads1_np),
constant_op.constant(grads1_np_indices), constant_op.constant([2]))
opt = adam.AdamOptimizer()
update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
self.evaluate(variables.global_variables_initializer())
# Fetch params to validate initial values
self.assertAllClose([1.0, 2.0], self.evaluate(var0))
self.assertAllClose([3.0, 4.0], self.evaluate(var1))
beta1_power, beta2_power = opt._get_beta_accumulators()
# Run 3 steps of Adam
for t in range(1, 4):
self.assertAllCloseAccordingToType(0.9**t, self.evaluate(beta1_power))
self.assertAllCloseAccordingToType(0.999**t,
self.evaluate(beta2_power))
update.run()
var0_np, m0, v0 = adam_update_numpy(var0_np, grads0_np, t, m0, v0)
var1_np, m1, v1 = adam_update_numpy(var1_np, grads1_np, t, m1, v1)
# Validate updated params
self.assertAllCloseAccordingToType(var0_np, self.evaluate(var0))
self.assertAllCloseAccordingToType(var1_np, self.evaluate(var1))
def testSparse(self):
with ops.Graph().as_default():
self.doTestSparse(use_resource=False)
def testResourceSparse(self):
with ops.Graph().as_default():
self.doTestSparse(use_resource=True)
def testSparseDevicePlacement(self):
with ops.Graph().as_default():
for index_dtype in [dtypes.int32, dtypes.int64]:
with self.cached_session(force_gpu=test.is_gpu_available()):
# If a GPU is available, tests that all optimizer ops can be placed on
# it (i.e. they have GPU kernels).
var = variables.Variable([[1.0], [2.0]])
indices = constant_op.constant([0, 1], dtype=index_dtype)
gathered_sum = math_ops.reduce_sum(array_ops.gather(var, indices))
optimizer = adam.AdamOptimizer(3.0)
minimize_op = optimizer.minimize(gathered_sum)
self.evaluate(variables.global_variables_initializer())
minimize_op.run()
def testGatherGradientWithBadIndicesPolicy(self):
with ops.Graph().as_default():
with self.cached_session(force_gpu=test.is_gpu_available()):
var = variables.Variable([1.0, 2.0])
indices = constant_op.constant([[1], [-1], [0]], dtype=dtypes.int32)
out = array_ops.gather_nd(var,
array_ops.expand_dims(indices, axis=-1),
batch_dims=0,
bad_indices_policy="IGNORE")
optimizer = adam.AdamOptimizer(2.0, 0.0, 1.0)
minimize_op = optimizer.minimize(out)
self.evaluate(variables.global_variables_initializer())
minimize_op.run()
def testSparseRepeatedIndices(self):
with ops.Graph().as_default():
for dtype in [dtypes.half, dtypes.float32, dtypes.float64]:
with self.cached_session():
repeated_index_update_var = variables.Variable(
[[1.0], [2.0]], dtype=dtype)
aggregated_update_var = variables.Variable(
[[1.0], [2.0]], dtype=dtype)
grad_repeated_index = indexed_slices.IndexedSlices(
constant_op.constant(
[0.1, 0.1], shape=[2, 1], dtype=dtype),
constant_op.constant([1, 1]),
constant_op.constant([2, 1]))
grad_aggregated = indexed_slices.IndexedSlices(
constant_op.constant(
[0.2], shape=[1, 1], dtype=dtype),
constant_op.constant([1]),
constant_op.constant([2, 1]))
repeated_update = adam.AdamOptimizer().apply_gradients(
[(grad_repeated_index, repeated_index_update_var)])
aggregated_update = adam.AdamOptimizer().apply_gradients(
[(grad_aggregated, aggregated_update_var)])
self.evaluate(variables.global_variables_initializer())
self.assertAllClose(aggregated_update_var,
self.evaluate(repeated_index_update_var))
for _ in range(3):
repeated_update.run()
aggregated_update.run()
self.assertAllClose(aggregated_update_var,
self.evaluate(repeated_index_update_var))
def doTestBasic(self, use_resource=False, use_callable_params=False):
if context.executing_eagerly() and not use_resource:
self.skipTest(
"Skipping test with use_resource=False and executing eagerly.")
for i, dtype in enumerate([dtypes.half, dtypes.float32, dtypes.float64]):
with self.session(graph=ops.Graph()):
# Initialize variables for numpy implementation.
m0, v0, m1, v1 = 0.0, 0.0, 0.0, 0.0
var0_np = np.array([1.0, 2.0], dtype=dtype.as_numpy_dtype)
grads0_np = np.array([0.1, 0.1], dtype=dtype.as_numpy_dtype)
var1_np = np.array([3.0, 4.0], dtype=dtype.as_numpy_dtype)
grads1_np = np.array([0.01, 0.01], dtype=dtype.as_numpy_dtype)
if use_resource:
var0 = resource_variable_ops.ResourceVariable(
var0_np, name="var0_%d" % i)
var1 = resource_variable_ops.ResourceVariable(
var1_np, name="var1_%d" % i)
else:
var0 = ref_variable.RefVariable(var0_np)
var1 = ref_variable.RefVariable(var1_np)
grads0 = constant_op.constant(grads0_np)
grads1 = constant_op.constant(grads1_np)
learning_rate = lambda: 0.001
beta1 = lambda: 0.9
beta2 = lambda: 0.999
epsilon = lambda: 1e-8
if not use_callable_params:
learning_rate = learning_rate()
beta1 = beta1()
beta2 = beta2()
epsilon = epsilon()
opt = adam.AdamOptimizer(learning_rate=learning_rate)
update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
opt_variables = opt.variables()
beta1_power, beta2_power = opt._get_beta_accumulators()
self.assertTrue(beta1_power is not None)
self.assertTrue(beta2_power is not None)
self.assertIn(beta1_power, opt_variables)
self.assertIn(beta2_power, opt_variables)
# Ensure that non-slot variables are the same type as the requested
# variables.
self.assertEqual(
use_resource,
resource_variable_ops.is_resource_variable(beta1_power))
self.assertEqual(
use_resource,
resource_variable_ops.is_resource_variable(beta2_power))
if not context.executing_eagerly():
with ops.Graph().as_default():
# Shouldn't return non-slot variables from other graphs.
self.assertEqual(0, len(opt.variables()))
self.evaluate(variables.global_variables_initializer())
# Fetch params to validate initial values
self.assertAllClose([1.0, 2.0], self.evaluate(var0))
self.assertAllClose([3.0, 4.0], self.evaluate(var1))
beta1_power, beta2_power = opt._get_beta_accumulators()
# Run 3 steps of Adam
for t in range(1, 4):
if not context.executing_eagerly():
self.evaluate(update)
elif t > 1:
opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
self.assertAllCloseAccordingToType(0.9**(t + 1),
self.evaluate(beta1_power))
self.assertAllCloseAccordingToType(0.999**(t + 1),
self.evaluate(beta2_power))
var0_np, m0, v0 = adam_update_numpy(var0_np, grads0_np, t, m0, v0)
var1_np, m1, v1 = adam_update_numpy(var1_np, grads1_np, t, m1, v1)
# Validate updated params
self.assertAllCloseAccordingToType(var0_np, self.evaluate(var0))
self.assertAllCloseAccordingToType(var1_np, self.evaluate(var1))
if use_resource:
self.assertEqual("var0_%d/Adam:0" % (i,),
opt.get_slot(var=var0, name="m").name)
def testBasic(self):
with self.cached_session():
self.doTestBasic(use_resource=False)
@test_util.run_in_graph_and_eager_modes
@test_util.disable_tfrt("b/168527439: invalid runtime fallback "
"resource variable reference on GPU.")
def testResourceBasic(self):
self.doTestBasic(use_resource=True)
@test_util.disable_tfrt("b/153089059: cannot create half tensor on GPU.")
def testBasicCallableParams(self):
with context.eager_mode():
self.doTestBasic(use_resource=True, use_callable_params=True)
def testTensorLearningRate(self):
with ops.Graph().as_default():
for dtype in [dtypes.half, dtypes.float32, dtypes.float64]:
with self.cached_session():
# Initialize variables for numpy implementation.
m0, v0, m1, v1 = 0.0, 0.0, 0.0, 0.0
var0_np = np.array([1.0, 2.0], dtype=dtype.as_numpy_dtype)
grads0_np = np.array([0.1, 0.1], dtype=dtype.as_numpy_dtype)
var1_np = np.array([3.0, 4.0], dtype=dtype.as_numpy_dtype)
grads1_np = np.array([0.01, 0.01], dtype=dtype.as_numpy_dtype)
var0 = variables.Variable(var0_np)
var1 = variables.Variable(var1_np)
grads0 = constant_op.constant(grads0_np)
grads1 = constant_op.constant(grads1_np)
opt = adam.AdamOptimizer(constant_op.constant(0.001))
update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
self.evaluate(variables.global_variables_initializer())
# Fetch params to validate initial values
self.assertAllClose([1.0, 2.0], self.evaluate(var0))
self.assertAllClose([3.0, 4.0], self.evaluate(var1))
beta1_power, beta2_power = opt._get_beta_accumulators()
# Run 3 steps of Adam
for t in range(1, 4):
self.assertAllCloseAccordingToType(0.9**t,
self.evaluate(beta1_power))
self.assertAllCloseAccordingToType(0.999**t,
self.evaluate(beta2_power))
update.run()
var0_np, m0, v0 = adam_update_numpy(var0_np, grads0_np, t, m0, v0)
var1_np, m1, v1 = adam_update_numpy(var1_np, grads1_np, t, m1, v1)
# Validate updated params
self.assertAllCloseAccordingToType(var0_np, self.evaluate(var0))
self.assertAllCloseAccordingToType(var1_np, self.evaluate(var1))
def testSharing(self):
with ops.Graph().as_default():
for dtype in [dtypes.half, dtypes.float32, dtypes.float64]:
with self.cached_session():
# Initialize variables for numpy implementation.
m0, v0, m1, v1 = 0.0, 0.0, 0.0, 0.0
var0_np = np.array([1.0, 2.0], dtype=dtype.as_numpy_dtype)
grads0_np = np.array([0.1, 0.1], dtype=dtype.as_numpy_dtype)
var1_np = np.array([3.0, 4.0], dtype=dtype.as_numpy_dtype)
grads1_np = np.array([0.01, 0.01], dtype=dtype.as_numpy_dtype)
var0 = variables.Variable(var0_np)
var1 = variables.Variable(var1_np)
grads0 = constant_op.constant(grads0_np)
grads1 = constant_op.constant(grads1_np)
opt = adam.AdamOptimizer()
update1 = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
update2 = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
self.evaluate(variables.global_variables_initializer())
beta1_power, beta2_power = opt._get_beta_accumulators()
# Fetch params to validate initial values
self.assertAllClose([1.0, 2.0], self.evaluate(var0))
self.assertAllClose([3.0, 4.0], self.evaluate(var1))
# Run 3 steps of intertwined Adam1 and Adam2.
for t in range(1, 4):
self.assertAllCloseAccordingToType(0.9**t,
self.evaluate(beta1_power))
self.assertAllCloseAccordingToType(0.999**t,
self.evaluate(beta2_power))
if t % 2 == 0:
update1.run()
else:
update2.run()
var0_np, m0, v0 = adam_update_numpy(var0_np, grads0_np, t, m0, v0)
var1_np, m1, v1 = adam_update_numpy(var1_np, grads1_np, t, m1, v1)
# Validate updated params
self.assertAllCloseAccordingToType(var0_np, self.evaluate(var0))
self.assertAllCloseAccordingToType(var1_np, self.evaluate(var1))
@test_util.disable_tfrt("b/168527439: invalid runtime fallback "
"resource variable reference on GPU.")
def testTwoSessions(self):
optimizer = adam.AdamOptimizer()
with context.eager_mode():
var0 = variables.Variable(np.array([1.0, 2.0]), name="v0")
grads0 = constant_op.constant(np.array([0.1, 0.1]))
optimizer.apply_gradients([(grads0, var0)])
g = ops.Graph()
with g.as_default():
with session.Session():
var0 = variables.Variable(np.array([1.0, 2.0]), name="v0")
grads0 = constant_op.constant(np.array([0.1, 0.1]))
optimizer.apply_gradients([(grads0, var0)])
gg = ops.Graph()
with gg.as_default():
with session.Session():
var0 = variables.Variable(np.array([1.0, 2.0]), name="v0")
grads0 = constant_op.constant(np.array([0.1, 0.1]))
# If the optimizer saves any state not keyed by graph the following line
# fails.
optimizer.apply_gradients([(grads0, var0)])
@test_util.disable_tfrt("b/168527439: invalid runtime fallback "
"resource variable reference on GPU.")
def testSlotsUniqueEager(self):
with context.eager_mode():
v1 = resource_variable_ops.ResourceVariable(1.)
v2 = resource_variable_ops.ResourceVariable(1.)
opt = adam.AdamOptimizer(1.)
opt.minimize(lambda: v1 + v2)
# There should be two non-slot variables, and two unique slot variables
# for v1 and v2 respectively.
self.assertEqual(6, len({id(v) for v in opt.variables()}))
@test_util.deprecated_graph_mode_only
def testXlaSharding(self):
dtype = dtypes.float32
with self.session(graph=ops.Graph()):
# Initialize variables for numpy implementation.
var0_np = np.array([1.0, 2.0], dtype=dtype.as_numpy_dtype)
grads0_np = np.array([0.1, 0.1], dtype=dtype.as_numpy_dtype)
var1_np = np.array([3.0, 4.0], dtype=dtype.as_numpy_dtype)
grads1_np = np.array([0.01, 0.01], dtype=dtype.as_numpy_dtype)
var0 = resource_variable_ops.ResourceVariable(var0_np, name="var0")
var1 = resource_variable_ops.ResourceVariable(var1_np, name="var1")
var0, var1 = [
xla_sharding.mesh_split(
v, np.array([0, 1]), [0], use_sharding_op=False)
for v in (var0, var1)
]
grads0 = constant_op.constant(grads0_np)
grads1 = constant_op.constant(grads1_np)
learning_rate = lambda: 0.001
opt = adam.AdamOptimizer(learning_rate=learning_rate)
update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
self.evaluate(variables.global_variables_initializer())
self.evaluate(update)
# The beta accumulators are not sharded.
beta1_power, beta2_power = opt._get_beta_accumulators()
self.assertIsNone(xla_sharding.get_tensor_sharding(beta1_power))
self.assertIsNone(xla_sharding.get_tensor_sharding(beta2_power))
# Variables and slots are sharded.
for v in (var0, var1):
self.assertIsNotNone(xla_sharding.get_tensor_sharding(v))
for slot_name in ("m", "v"):
slot = opt.get_slot(v, slot_name)
self.assertIsNotNone(xla_sharding.get_tensor_sharding(slot))
if __name__ == "__main__":
test.main()
|
AdamOptimizerTest
|
python
|
getsentry__sentry
|
src/sentry/utils/meta.py
|
{
"start": 340,
"end": 6351
}
|
class ____:
"""
A lazy view to detached validation and normalization meta data. It allows to
safely traverse the meta tree and create a deep path lazily. Use ``enter``
to get a view to the meta data inside a specific key.
The ``Meta`` object is a shallow view onto the top-level meta structure and
only traverses data when actually accessing attributes. Thus, constructing
Meta or calling ``enter`` is relatively cheap.
To modify data for a certain path, use ``create`` and modify the returned
dict. Alternatively, use the ``merge`` or ``add_error`` convenience methods.
"""
def __init__(self, meta=None, path=None):
self._meta = {} if meta is None else meta
self._path = path or []
def enter(self, *path):
"""
Enters into sub meta data at the specified path. This always returns a
new ``Meta`` object, regardless whether the path already exists.
"""
return Meta(self._meta, path=self._path + [str(p) for p in path])
@property
def path(self):
"""
Returns the full path of this meta instance, joined with dots (".").
"""
return ".".join(self._path)
def raw(self):
"""
Returns the raw meta tree at the current path, if it exists; otherwise
an empty object. This will contain both the meta data of the key ("")
and sub meta trees.
It is not safe to mutate the return value since it might be detached
from the actual meta tree.
"""
meta = self._meta
for key in self._path:
meta = meta.get(key) or {}
return meta
def get(self):
"""
Returns meta data of the item at the current path, or an empty dict.
It is not safe to mutate the return value since it might be detached
from the actual meta tree.
"""
return self.raw().get("") or {}
def create(self):
"""
Creates an empty meta data entry corresponding to the current path. This
recursively creates the entire parent tree.
"""
meta = self._meta
for key in self._path + [""]:
if key not in meta or meta[key] is None:
meta[key] = {}
meta = meta[key]
return meta
def merge(self, other):
"""
Merges meta data of the given other ``Meta`` object into the current
path.
If no meta data entry exists for the current path, it is created, along
with the entire parent tree.
"""
other = other.get()
if not other:
return
meta = self.create()
err = meta.get("err")
meta.update(other)
if err and other.get("err"):
meta["err"] = err + other["err"]
return meta
def iter_errors(self):
"""
Iterates over meta errors of the item at the current path, if any.
Each error is a tuple ``(type, data)``, where:
- ``type`` is the error constant also used in EventError
- ``data`` a dictionary of additional error infos
"""
return (([err, {}] if isinstance(err, str) else err) for err in self.get().get("err") or ())
def get_event_errors(self):
"""
Returns all errors of the item at the current path in EventError schema,
which can directly be stored to an event's "errors" list.
"""
errors = []
value = self.get().get("val")
for error, data in self.iter_errors():
eventerror = dict(data)
eventerror["type"] = error
if self._path:
eventerror["name"] = ".".join(self._path)
if value is not None:
eventerror["value"] = value
value = None
errors.append(eventerror)
return errors
def add_error(self, error, value=None, data=None):
"""
Adds an error to the meta data at the current path. The ``error``
argument is converted to string. If optional ``data`` is specified, the
data hash is stored alongside the error.
If an optional ``value`` is specified, it is attached as original value
into the meta data. Note that there is only one original value, not one
per error.
If no meta data entry exists for the current path, it is created, along
with the entire parent tree.
"""
meta = self.create()
if "err" not in meta or meta["err"] is None:
meta["err"] = []
error = str(error)
if isinstance(data, Mapping):
error = [error, dict(data)]
meta["err"].append(error)
if value is not None:
meta["val"] = value
def add_remark(self, rem: Remark, value=None):
"""
Adds a remark to the meta data at the current path.
If an optional ``value`` is specified, it is attached as original value
into the meta data. Note that there is only one original value, not one
per remark.
`range_start` and `range_end` in `rem` are byte offsets.
If no meta data entry exists for the current path, it is created, along
with the entire parent tree.
"""
meta = self.create()
if "rem" not in meta or meta["rem"] is None:
meta["rem"] = []
rem_list: list[str | int] = [rem["rule_id"], rem["type"]]
range_start = rem.get("range_start")
if range_start is not None:
rem_list.append(range_start)
range_end = rem.get("range_end")
if range_end is not None:
rem_list.append(range_end)
meta["rem"].append(rem_list)
if value is not None:
meta["val"] = value
def __iter__(self):
"""
Iterates all child meta entries that potentially have errors set.
"""
for key in self.raw():
if key != "":
yield self.enter(key)
|
Meta
|
python
|
ray-project__ray
|
python/ray/train/v2/api/config.py
|
{
"start": 9231,
"end": 13840
}
|
class ____:
"""Runtime configuration for training runs.
Args:
name: Name of the trial or experiment. If not provided, will be deduced
from the Trainable.
storage_path: Path where all results and checkpoints are persisted.
Can be a local directory or a destination on cloud storage.
For multi-node training/tuning runs, this must be set to a
shared storage location (e.g., S3, NFS).
This defaults to the local ``~/ray_results`` directory.
storage_filesystem: A custom filesystem to use for storage.
If this is provided, `storage_path` should be a path with its
prefix stripped (e.g., `s3://bucket/path` -> `bucket/path`).
failure_config: Failure mode configuration.
checkpoint_config: Checkpointing configuration.
callbacks: [DeveloperAPI] A list of callbacks that the Ray Train controller
will invoke during training.
worker_runtime_env: [DeveloperAPI] Runtime environment configuration
for all Ray Train worker actors.
"""
name: Optional[str] = None
storage_path: Optional[str] = None
storage_filesystem: Optional[pyarrow.fs.FileSystem] = None
failure_config: Optional[FailureConfig] = None
checkpoint_config: Optional[CheckpointConfig] = None
callbacks: Optional[List["UserCallback"]] = None
worker_runtime_env: Optional[Union[dict, RuntimeEnv]] = None
sync_config: str = _DEPRECATED
verbose: str = _DEPRECATED
stop: str = _DEPRECATED
progress_reporter: str = _DEPRECATED
log_to_file: str = _DEPRECATED
def __post_init__(self):
from ray.train.constants import DEFAULT_STORAGE_PATH
if self.storage_path is None:
self.storage_path = DEFAULT_STORAGE_PATH
if not self.failure_config:
self.failure_config = FailureConfig()
if not self.checkpoint_config:
self.checkpoint_config = CheckpointConfig()
if isinstance(self.storage_path, Path):
self.storage_path = self.storage_path.as_posix()
run_config_deprecation_message = (
"`RunConfig({})` is deprecated. This configuration was a "
"Ray Tune API that did not support Ray Train usage well, "
"so we are dropping support going forward. "
"If you heavily rely on these configurations, "
"you can run Ray Train as a single Ray Tune trial. "
"See this issue for more context: "
"https://github.com/ray-project/ray/issues/49454"
)
unsupported_params = [
"sync_config",
"verbose",
"stop",
"progress_reporter",
"log_to_file",
]
for param in unsupported_params:
if getattr(self, param) != _DEPRECATED:
raise DeprecationWarning(run_config_deprecation_message.format(param))
if not self.name:
self.name = f"ray_train_run-{date_str()}"
self.callbacks = self.callbacks or []
self.worker_runtime_env = self.worker_runtime_env or {}
from ray.train.v2.api.callback import RayTrainCallback
if not all(isinstance(cb, RayTrainCallback) for cb in self.callbacks):
raise ValueError(
"All callbacks must be instances of `ray.train.UserCallback`. "
"Passing in a Ray Tune callback is no longer supported. "
"See this issue for more context: "
"https://github.com/ray-project/ray/issues/49454"
)
if not isinstance(self.checkpoint_config, CheckpointConfig):
raise ValueError(
f"Invalid `CheckpointConfig` type: {self.checkpoint_config.__class__}. "
"Use `ray.train.CheckpointConfig` instead. "
"See this issue for more context: "
"https://github.com/ray-project/ray/issues/49454"
)
if not isinstance(self.failure_config, FailureConfig):
raise ValueError(
f"Invalid `FailureConfig` type: {self.failure_config.__class__}. "
"Use `ray.train.FailureConfig` instead. "
"See this issue for more context: "
"https://github.com/ray-project/ray/issues/49454"
)
@cached_property
def storage_context(self) -> StorageContext:
return StorageContext(
storage_path=self.storage_path,
experiment_dir_name=self.name,
storage_filesystem=self.storage_filesystem,
)
|
RunConfig
|
python
|
ray-project__ray
|
rllib/models/catalog.py
|
{
"start": 3840,
"end": 36145
}
|
class ____:
"""Registry of models, preprocessors, and action distributions for envs.
.. testcode::
:skipif: True
prep = ModelCatalog.get_preprocessor(env)
observation = prep.transform(raw_observation)
dist_class, dist_dim = ModelCatalog.get_action_dist(
env.action_space, {})
model = ModelCatalog.get_model_v2(
obs_space, action_space, num_outputs, options)
dist = dist_class(model.outputs, model)
action = dist.sample()
"""
@staticmethod
@DeveloperAPI
def get_action_dist(
action_space: gym.Space,
config: ModelConfigDict,
dist_type: Optional[Union[str, Type[ActionDistribution]]] = None,
framework: str = "tf",
**kwargs
) -> (type, int):
"""Returns a distribution class and size for the given action space.
Args:
action_space: Action space of the target gym env.
config (Optional[dict]): Optional model config.
dist_type (Optional[Union[str, Type[ActionDistribution]]]):
Identifier of the action distribution (str) interpreted as a
hint or the actual ActionDistribution class to use.
framework: One of "tf2", "tf", "torch", or "jax".
kwargs: Optional kwargs to pass on to the Distribution's
constructor.
Returns:
Tuple:
- dist_class (ActionDistribution): Python class of the
distribution.
- dist_dim (int): The size of the input vector to the
distribution.
"""
dist_cls = None
config = config or MODEL_DEFAULTS
# Custom distribution given.
if config.get("custom_action_dist"):
custom_action_config = config.copy()
action_dist_name = custom_action_config.pop("custom_action_dist")
logger.debug("Using custom action distribution {}".format(action_dist_name))
dist_cls = _global_registry.get(RLLIB_ACTION_DIST, action_dist_name)
return ModelCatalog._get_multi_action_distribution(
dist_cls, action_space, custom_action_config, framework
)
# Dist_type is given directly as a class.
elif (
type(dist_type) is type
and issubclass(dist_type, ActionDistribution)
and dist_type not in (MultiActionDistribution, TorchMultiActionDistribution)
):
dist_cls = dist_type
# Box space -> DiagGaussian OR Deterministic.
elif isinstance(action_space, Box):
if action_space.dtype.name.startswith("int"):
low_ = np.min(action_space.low)
high_ = np.max(action_space.high)
dist_cls = (
TorchMultiCategorical if framework == "torch" else MultiCategorical
)
num_cats = int(np.prod(action_space.shape))
return (
partial(
dist_cls,
input_lens=[high_ - low_ + 1 for _ in range(num_cats)],
action_space=action_space,
),
num_cats * (high_ - low_ + 1),
)
else:
if len(action_space.shape) > 1:
raise UnsupportedSpaceException(
"Action space has multiple dimensions "
"{}. ".format(action_space.shape)
+ "Consider reshaping this into a single dimension, "
"using a custom action distribution, "
"using a Tuple action space, or the multi-agent API."
)
# TODO(sven): Check for bounds and return SquashedNormal, etc..
if dist_type is None:
return (
partial(
TorchDiagGaussian if framework == "torch" else DiagGaussian,
action_space=action_space,
),
DiagGaussian.required_model_output_shape(action_space, config),
)
elif dist_type == "deterministic":
dist_cls = (
TorchDeterministic if framework == "torch" else Deterministic
)
# Discrete Space -> Categorical.
elif isinstance(action_space, Discrete):
if framework == "torch":
dist_cls = TorchCategorical
elif framework == "jax":
from ray.rllib.models.jax.jax_action_dist import JAXCategorical
dist_cls = JAXCategorical
else:
dist_cls = Categorical
# Tuple/Dict Spaces -> MultiAction.
elif dist_type in (
MultiActionDistribution,
TorchMultiActionDistribution,
) or isinstance(action_space, (Tuple, Dict)):
return ModelCatalog._get_multi_action_distribution(
(
MultiActionDistribution
if framework == "tf"
else TorchMultiActionDistribution
),
action_space,
config,
framework,
)
# Simplex -> Dirichlet.
elif isinstance(action_space, Simplex):
dist_cls = TorchDirichlet if framework == "torch" else Dirichlet
# MultiDiscrete -> MultiCategorical.
elif isinstance(action_space, MultiDiscrete):
dist_cls = (
TorchMultiCategorical if framework == "torch" else MultiCategorical
)
return partial(dist_cls, input_lens=action_space.nvec), int(
sum(action_space.nvec)
)
# Unknown type -> Error.
else:
raise NotImplementedError(
"Unsupported args: {} {}".format(action_space, dist_type)
)
return dist_cls, int(dist_cls.required_model_output_shape(action_space, config))
@staticmethod
@DeveloperAPI
def get_action_shape(
action_space: gym.Space, framework: str = "tf"
) -> (np.dtype, List[int]):
"""Returns action tensor dtype and shape for the action space.
Args:
action_space: Action space of the target gym env.
framework: The framework identifier. One of "tf" or "torch".
Returns:
(dtype, shape): Dtype and shape of the actions tensor.
"""
dl_lib = torch if framework == "torch" else tf
if isinstance(action_space, Discrete):
return action_space.dtype, (None,)
elif isinstance(action_space, (Box, Simplex)):
if np.issubdtype(action_space.dtype, np.floating):
return dl_lib.float32, (None,) + action_space.shape
elif np.issubdtype(action_space.dtype, np.integer):
return dl_lib.int32, (None,) + action_space.shape
else:
raise ValueError("RLlib doesn't support non int or float box spaces")
elif isinstance(action_space, MultiDiscrete):
return action_space.dtype, (None,) + action_space.shape
elif isinstance(action_space, (Tuple, Dict)):
flat_action_space = flatten_space(action_space)
size = 0
all_discrete = True
for i in range(len(flat_action_space)):
if isinstance(flat_action_space[i], Discrete):
size += 1
else:
all_discrete = False
size += np.prod(flat_action_space[i].shape)
size = int(size)
return dl_lib.int32 if all_discrete else dl_lib.float32, (None, size)
else:
raise NotImplementedError(
"Action space {} not supported".format(action_space)
)
@staticmethod
@DeveloperAPI
def get_action_placeholder(
action_space: gym.Space, name: str = "action"
) -> TensorType:
"""Returns an action placeholder consistent with the action space
Args:
action_space: Action space of the target gym env.
name: An optional string to name the placeholder by.
Default: "action".
Returns:
action_placeholder: A placeholder for the actions
"""
dtype, shape = ModelCatalog.get_action_shape(action_space, framework="tf")
return tf1.placeholder(dtype, shape=shape, name=name)
@staticmethod
@DeveloperAPI
def get_model_v2(
obs_space: gym.Space,
action_space: gym.Space,
num_outputs: int,
model_config: ModelConfigDict,
framework: str = "tf",
name: str = "default_model",
model_interface: type = None,
default_model: type = None,
**model_kwargs
) -> ModelV2:
"""Returns a suitable model compatible with given spaces and output.
Args:
obs_space: Observation space of the target gym env. This
may have an `original_space` attribute that specifies how to
unflatten the tensor into a ragged tensor.
action_space: Action space of the target gym env.
num_outputs: The size of the output vector of the model.
model_config: The "model" sub-config dict
within the Algorithm's config dict.
framework: One of "tf2", "tf", "torch", or "jax".
name: Name (scope) for the model.
model_interface: Interface required for the model
default_model: Override the default class for the model. This
only has an effect when not using a custom model
model_kwargs: Args to pass to the ModelV2 constructor
Returns:
model (ModelV2): Model to use for the policy.
"""
# Validate the given config dict.
ModelCatalog._validate_config(
config=model_config, action_space=action_space, framework=framework
)
if model_config.get("custom_model"):
# Allow model kwargs to be overridden / augmented by
# custom_model_config.
customized_model_kwargs = dict(
model_kwargs, **model_config.get("custom_model_config", {})
)
if isinstance(model_config["custom_model"], type):
model_cls = model_config["custom_model"]
elif (
isinstance(model_config["custom_model"], str)
and "." in model_config["custom_model"]
):
return from_config(
cls=model_config["custom_model"],
obs_space=obs_space,
action_space=action_space,
num_outputs=num_outputs,
model_config=customized_model_kwargs,
name=name,
)
else:
model_cls = _global_registry.get(
RLLIB_MODEL, model_config["custom_model"]
)
# Only allow ModelV2 or native keras Models.
if not issubclass(model_cls, ModelV2):
if framework not in ["tf", "tf2"] or not issubclass(
model_cls, tf.keras.Model
):
raise ValueError(
"`model_cls` must be a ModelV2 sub-class, but is"
" {}!".format(model_cls)
)
logger.info("Wrapping {} as {}".format(model_cls, model_interface))
model_cls = ModelCatalog._wrap_if_needed(model_cls, model_interface)
if framework in ["tf2", "tf"]:
# Try wrapping custom model with LSTM/attention, if required.
if model_config.get("use_lstm") or model_config.get("use_attention"):
from ray.rllib.models.tf.attention_net import (
AttentionWrapper,
)
from ray.rllib.models.tf.recurrent_net import (
LSTMWrapper,
)
wrapped_cls = model_cls
forward = wrapped_cls.forward
model_cls = ModelCatalog._wrap_if_needed(
wrapped_cls,
LSTMWrapper
if model_config.get("use_lstm")
else AttentionWrapper,
)
model_cls._wrapped_forward = forward
# Obsolete: Track and warn if vars were created but not
# registered. Only still do this, if users do register their
# variables. If not (which they shouldn't), don't check here.
created = set()
def track_var_creation(next_creator, **kw):
v = next_creator(**kw)
created.add(v.ref())
return v
with tf.variable_creator_scope(track_var_creation):
if issubclass(model_cls, tf.keras.Model):
instance = model_cls(
input_space=obs_space,
action_space=action_space,
num_outputs=num_outputs,
name=name,
**customized_model_kwargs,
)
else:
# Try calling with kwargs first (custom ModelV2 should
# accept these as kwargs, not get them from
# config["custom_model_config"] anymore).
try:
instance = model_cls(
obs_space,
action_space,
num_outputs,
model_config,
name,
**customized_model_kwargs,
)
except TypeError as e:
# Keyword error: Try old way w/o kwargs.
if "__init__() got an unexpected " in e.args[0]:
instance = model_cls(
obs_space,
action_space,
num_outputs,
model_config,
name,
**model_kwargs,
)
logger.warning(
"Custom ModelV2 should accept all custom "
"options as **kwargs, instead of expecting"
" them in config['custom_model_config']!"
)
# Other error -> re-raise.
else:
raise e
# User still registered TFModelV2's variables: Check, whether
# ok.
registered = []
if not isinstance(instance, tf.keras.Model):
registered = set(instance.var_list)
if len(registered) > 0:
not_registered = set()
for var in created:
if var not in registered:
not_registered.add(var)
if not_registered:
raise ValueError(
"It looks like you are still using "
"`{}.register_variables()` to register your "
"model's weights. This is no longer required, but "
"if you are still calling this method at least "
"once, you must make sure to register all created "
"variables properly. The missing variables are {},"
" and you only registered {}. "
"Did you forget to call `register_variables()` on "
"some of the variables in question?".format(
instance, not_registered, registered
)
)
elif framework == "torch":
# Try wrapping custom model with LSTM/attention, if required.
if model_config.get("use_lstm") or model_config.get("use_attention"):
from ray.rllib.models.torch.attention_net import AttentionWrapper
from ray.rllib.models.torch.recurrent_net import LSTMWrapper
wrapped_cls = model_cls
forward = wrapped_cls.forward
model_cls = ModelCatalog._wrap_if_needed(
wrapped_cls,
LSTMWrapper
if model_config.get("use_lstm")
else AttentionWrapper,
)
model_cls._wrapped_forward = forward
# PyTorch automatically tracks nn.Modules inside the parent
# nn.Module's constructor.
# Try calling with kwargs first (custom ModelV2 should
# accept these as kwargs, not get them from
# config["custom_model_config"] anymore).
try:
instance = model_cls(
obs_space,
action_space,
num_outputs,
model_config,
name,
**customized_model_kwargs,
)
except TypeError as e:
# Keyword error: Try old way w/o kwargs.
if "__init__() got an unexpected " in e.args[0]:
instance = model_cls(
obs_space,
action_space,
num_outputs,
model_config,
name,
**model_kwargs,
)
logger.warning(
"Custom ModelV2 should accept all custom "
"options as **kwargs, instead of expecting"
" them in config['custom_model_config']!"
)
# Other error -> re-raise.
else:
raise e
else:
raise NotImplementedError(
"`framework` must be 'tf2|tf|torch', but is "
"{}!".format(framework)
)
return instance
# Find a default TFModelV2 and wrap with model_interface.
if framework in ["tf", "tf2"]:
v2_class = None
# Try to get a default v2 model.
if not model_config.get("custom_model"):
v2_class = default_model or ModelCatalog._get_v2_model_class(
obs_space, model_config, framework=framework
)
if not v2_class:
raise ValueError("ModelV2 class could not be determined!")
if model_config.get("use_lstm") or model_config.get("use_attention"):
from ray.rllib.models.tf.attention_net import (
AttentionWrapper,
)
from ray.rllib.models.tf.recurrent_net import (
LSTMWrapper,
)
wrapped_cls = v2_class
if model_config.get("use_lstm"):
v2_class = ModelCatalog._wrap_if_needed(wrapped_cls, LSTMWrapper)
v2_class._wrapped_forward = wrapped_cls.forward
else:
v2_class = ModelCatalog._wrap_if_needed(
wrapped_cls, AttentionWrapper
)
v2_class._wrapped_forward = wrapped_cls.forward
# Wrap in the requested interface.
wrapper = ModelCatalog._wrap_if_needed(v2_class, model_interface)
if issubclass(wrapper, tf.keras.Model):
model = wrapper(
input_space=obs_space,
action_space=action_space,
num_outputs=num_outputs,
name=name,
**dict(model_kwargs, **model_config),
)
return model
return wrapper(
obs_space, action_space, num_outputs, model_config, name, **model_kwargs
)
# Find a default TorchModelV2 and wrap with model_interface.
elif framework == "torch":
# Try to get a default v2 model.
if not model_config.get("custom_model"):
v2_class = default_model or ModelCatalog._get_v2_model_class(
obs_space, model_config, framework=framework
)
if not v2_class:
raise ValueError("ModelV2 class could not be determined!")
if model_config.get("use_lstm") or model_config.get("use_attention"):
from ray.rllib.models.torch.attention_net import AttentionWrapper
from ray.rllib.models.torch.recurrent_net import LSTMWrapper
wrapped_cls = v2_class
forward = wrapped_cls.forward
if model_config.get("use_lstm"):
v2_class = ModelCatalog._wrap_if_needed(wrapped_cls, LSTMWrapper)
else:
v2_class = ModelCatalog._wrap_if_needed(
wrapped_cls, AttentionWrapper
)
v2_class._wrapped_forward = forward
# Wrap in the requested interface.
wrapper = ModelCatalog._wrap_if_needed(v2_class, model_interface)
return wrapper(
obs_space, action_space, num_outputs, model_config, name, **model_kwargs
)
# Find a default JAXModelV2 and wrap with model_interface.
elif framework == "jax":
v2_class = default_model or ModelCatalog._get_v2_model_class(
obs_space, model_config, framework=framework
)
# Wrap in the requested interface.
wrapper = ModelCatalog._wrap_if_needed(v2_class, model_interface)
return wrapper(
obs_space, action_space, num_outputs, model_config, name, **model_kwargs
)
else:
raise NotImplementedError(
"`framework` must be 'tf2|tf|torch', but is {}!".format(framework)
)
@staticmethod
@DeveloperAPI
def get_preprocessor(
env: gym.Env, options: Optional[dict] = None, include_multi_binary: bool = False
) -> Preprocessor:
"""Returns a suitable preprocessor for the given env.
This is a wrapper for get_preprocessor_for_space().
"""
return ModelCatalog.get_preprocessor_for_space(
env.observation_space, options, include_multi_binary
)
@staticmethod
@DeveloperAPI
def get_preprocessor_for_space(
observation_space: gym.Space,
options: dict = None,
include_multi_binary: bool = False,
) -> Preprocessor:
"""Returns a suitable preprocessor for the given observation space.
Args:
observation_space: The input observation space.
options: Options to pass to the preprocessor.
include_multi_binary: Whether to include the MultiBinaryPreprocessor in
the possible preprocessors returned by this method.
Returns:
preprocessor: Preprocessor for the observations.
"""
options = options or MODEL_DEFAULTS
for k in options.keys():
if k not in MODEL_DEFAULTS:
raise Exception(
"Unknown config key `{}`, all keys: {}".format(
k, list(MODEL_DEFAULTS)
)
)
cls = get_preprocessor(
observation_space, include_multi_binary=include_multi_binary
)
prep = cls(observation_space, options)
if prep is not None:
logger.debug(
"Created preprocessor {}: {} -> {}".format(
prep, observation_space, prep.shape
)
)
return prep
@staticmethod
@PublicAPI
def register_custom_model(model_name: str, model_class: type) -> None:
"""Register a custom model class by name.
The model can be later used by specifying {"custom_model": model_name}
in the model config.
Args:
model_name: Name to register the model under.
model_class: Python class of the model.
"""
if tf is not None:
if issubclass(model_class, tf.keras.Model):
deprecation_warning(old="register_custom_model", error=False)
_global_registry.register(RLLIB_MODEL, model_name, model_class)
@staticmethod
@PublicAPI
def register_custom_action_dist(
action_dist_name: str, action_dist_class: type
) -> None:
"""Register a custom action distribution class by name.
The model can be later used by specifying
{"custom_action_dist": action_dist_name} in the model config.
Args:
model_name: Name to register the action distribution under.
model_class: Python class of the action distribution.
"""
_global_registry.register(
RLLIB_ACTION_DIST, action_dist_name, action_dist_class
)
@staticmethod
def _wrap_if_needed(model_cls: type, model_interface: type) -> type:
if not model_interface or issubclass(model_cls, model_interface):
return model_cls
assert issubclass(model_cls, ModelV2), model_cls
class wrapper(model_interface, model_cls):
pass
name = "{}_as_{}".format(model_cls.__name__, model_interface.__name__)
wrapper.__name__ = name
wrapper.__qualname__ = name
return wrapper
@staticmethod
def _get_v2_model_class(
input_space: gym.Space, model_config: ModelConfigDict, framework: str = "tf"
) -> Type[ModelV2]:
VisionNet = None
ComplexNet = None
if framework in ["tf2", "tf"]:
from ray.rllib.models.tf.complex_input_net import (
ComplexInputNetwork as ComplexNet,
)
from ray.rllib.models.tf.fcnet import (
FullyConnectedNetwork as FCNet,
)
from ray.rllib.models.tf.visionnet import (
VisionNetwork as VisionNet,
)
elif framework == "torch":
from ray.rllib.models.torch.complex_input_net import (
ComplexInputNetwork as ComplexNet,
)
from ray.rllib.models.torch.fcnet import FullyConnectedNetwork as FCNet
from ray.rllib.models.torch.visionnet import VisionNetwork as VisionNet
elif framework == "jax":
from ray.rllib.models.jax.fcnet import FullyConnectedNetwork as FCNet
else:
raise ValueError(
"framework={} not supported in `ModelCatalog._get_v2_model_"
"class`!".format(framework)
)
orig_space = (
input_space
if not hasattr(input_space, "original_space")
else input_space.original_space
)
# `input_space` is 3D Box -> VisionNet.
if isinstance(input_space, Box) and len(input_space.shape) == 3:
if framework == "jax":
raise NotImplementedError("No non-FC default net for JAX yet!")
return VisionNet
# `input_space` is 1D Box -> FCNet.
elif (
isinstance(input_space, Box)
and len(input_space.shape) == 1
and (
not isinstance(orig_space, (Dict, Tuple))
or not any(
isinstance(s, Box) and len(s.shape) >= 2
for s in flatten_space(orig_space)
)
)
):
return FCNet
# Complex (Dict, Tuple, 2D Box (flatten), Discrete, MultiDiscrete).
else:
if framework == "jax":
raise NotImplementedError("No non-FC default net for JAX yet!")
return ComplexNet
@staticmethod
def _get_multi_action_distribution(dist_class, action_space, config, framework):
# In case the custom distribution is a child of MultiActionDistr.
# If users want to completely ignore the suggested child
# distributions, they should simply do so in their custom class'
# constructor.
if issubclass(
dist_class, (MultiActionDistribution, TorchMultiActionDistribution)
):
flat_action_space = flatten_space(action_space)
child_dists_and_in_lens = tree.map_structure(
lambda s: ModelCatalog.get_action_dist(s, config, framework=framework),
flat_action_space,
)
child_dists = [e[0] for e in child_dists_and_in_lens]
input_lens = [int(e[1]) for e in child_dists_and_in_lens]
return (
partial(
dist_class,
action_space=action_space,
child_distributions=child_dists,
input_lens=input_lens,
),
int(sum(input_lens)),
)
return dist_class, dist_class.required_model_output_shape(action_space, config)
@staticmethod
def _validate_config(
config: ModelConfigDict, action_space: gym.spaces.Space, framework: str
) -> None:
"""Validates a given model config dict.
Args:
config: The "model" sub-config dict
within the Algorithm's config dict.
action_space: The action space of the model, whose config are
validated.
framework: One of "jax", "tf2", "tf", or "torch".
Raises:
ValueError: If something is wrong with the given config.
"""
# Soft-deprecate custom preprocessors.
if config.get("custom_preprocessor") is not None:
deprecation_warning(
old="model.custom_preprocessor",
new="gym.ObservationWrapper around your env or handle complex "
"inputs inside your Model",
error=True,
)
if config.get("use_attention") and config.get("use_lstm"):
raise ValueError(
"Only one of `use_lstm` or `use_attention` may be set to True!"
)
# For complex action spaces, only allow prev action inputs to
# LSTMs and attention nets iff `_disable_action_flattening=True`.
# TODO: `_disable_action_flattening=True` will be the default in
# the future.
if (
(
config.get("lstm_use_prev_action")
or config.get("attention_use_n_prev_actions", 0) > 0
)
and not config.get("_disable_action_flattening")
and isinstance(action_space, (Tuple, Dict))
):
raise ValueError(
"For your complex action space (Tuple|Dict) and your model's "
"`prev-actions` setup of your model, you must set "
"`_disable_action_flattening=True` in your main config dict!"
)
if framework == "jax":
if config.get("use_attention"):
raise ValueError(
"`use_attention` not available for framework=jax so far!"
)
elif config.get("use_lstm"):
raise ValueError("`use_lstm` not available for framework=jax so far!")
|
ModelCatalog
|
python
|
huggingface__transformers
|
src/transformers/models/mixtral/modeling_mixtral.py
|
{
"start": 30938,
"end": 31239
}
|
class ____(GenericForQuestionAnswering, MixtralPreTrainedModel):
pass
__all__ = [
"MixtralForCausalLM",
"MixtralForQuestionAnswering",
"MixtralModel",
"MixtralPreTrainedModel",
"MixtralForSequenceClassification",
"MixtralForTokenClassification",
]
|
MixtralForQuestionAnswering
|
python
|
google__jax
|
tests/pallas/mosaic_gpu_test.py
|
{
"start": 172140,
"end": 193301
}
|
class ____(PallasTest):
@parameterized.product(m=[512], n=[512], repeats=[1, 10],
manual_consumed_barriers=[False, True],
max_concurrent_steps=[2, 3])
def test_pipelined_copy(
self, m, n, repeats, manual_consumed_barriers, max_concurrent_steps
):
x = jax.random.uniform(jax.random.key(0), (m, n), dtype=jnp.float16)
blk_m = blk_n = 32
def copy_kernel(_, x_smem, o_smem, o_last_block_smem, *consumed_barriers):
wg_idx = lax.axis_index("wg")
o_smem[...] = x_smem[...]
o_last_block_smem[...] = x_smem[...]
if manual_consumed_barriers:
[x_barrier] = consumed_barriers
plgpu.barrier_arrive(x_barrier)
spec = pl.BlockSpec(
block_shape=(2 * blk_m, blk_n), index_map=lambda i, j: (i, j)
)
def body(*gmem_refs):
pipeline = mgpu_pipeline.emit_pipeline_warp_specialized(
copy_kernel,
grid=(m // (2 * blk_m), n // blk_n),
memory_registers=40,
max_concurrent_steps=max_concurrent_steps,
num_compute_wgs=1,
wg_axis="wg",
manual_consumed_barriers=manual_consumed_barriers,
in_specs=[spec],
out_specs=[
spec,
# Create an index-invariant output.
pl.BlockSpec(
block_shape=(2 * blk_m, blk_n), index_map=lambda i, j: (0, 0)
),
],
)
for _ in range(repeats):
pipeline(*gmem_refs) # Make sure we can run the pipeline multiple times
kernel = self.kernel(
body,
out_shape=(
jax.ShapeDtypeStruct((m, n), jnp.float16),
jax.ShapeDtypeStruct((2 * blk_m, blk_n), jnp.float16),
),
compiler_params=plgpu.CompilerParams(approx_math=True),
grid=(1,),
grid_names=("_",),
num_threads=2,
thread_name="wg",
)
out, out_last_block = kernel(x)
np.testing.assert_array_equal(out, x)
np.testing.assert_array_equal(out_last_block, x[-(2 * blk_m):, -blk_n:])
@parameterized.product(
m=[256, 64],
n=[256, 64],
num_compute_wgs=[1], # TODO(apaszke): Use 2WGs once we add support for outputs.
static=[False, True],
manual_consumed_barriers=[False, True],
in_tree_template=[(0, 1), ((0, (1,), None))],
)
@jtu.skip_if_mosaic_gpu_exceeds_shared_memory(device_patterns="RTX PRO 6000 Blackwell")
def test_elementwise_add(self, m, n, num_compute_wgs, static,
manual_consumed_barriers, in_tree_template):
self.skip_if_wg_semantics() # Crashes!
blk_m = blk_n = 64
if m % (num_compute_wgs * blk_m):
self.skipTest(f"{m=} must be divisible by {num_compute_wgs=} * {blk_m=}")
spec = pl.BlockSpec(
block_shape=(num_compute_wgs * blk_m, blk_n),
index_map=lambda i, j: (i, j),
)
in_treedef = jax.tree.structure(in_tree_template)
in_specs = jax.tree.unflatten(in_treedef, (spec, spec))
def tiled_add_kernel(_, *smems):
flat_smems, _ = jax.tree.flatten(smems)
x_smem, y_smem, o_smem, *consumed_barriers = flat_smems
wg_idx = lax.axis_index("wg")
m_slice = pl.ds(wg_idx * blk_m, blk_m)
o_smem[m_slice] = x_smem[m_slice] + y_smem[m_slice]
if manual_consumed_barriers:
[x_consumed_barrier, y_consumed_barrier] = consumed_barriers
plgpu.barrier_arrive(x_consumed_barrier)
plgpu.barrier_arrive(y_consumed_barrier)
def pipeline(*gmem_refs):
grid = (m // (num_compute_wgs * blk_m), n // blk_n)
if not static:
grid = jax.tree.map(jnp.asarray, grid)
return mgpu_pipeline.emit_pipeline_warp_specialized(
tiled_add_kernel,
grid=grid,
max_concurrent_steps=2,
num_compute_wgs=num_compute_wgs,
memory_registers=40,
wg_axis="wg",
in_specs=in_specs,
out_specs=[spec],
manual_consumed_barriers=manual_consumed_barriers,
)(*gmem_refs)
kernel = self.kernel(
pipeline,
out_shape=jax.ShapeDtypeStruct((m, n), jnp.float32),
compiler_params=plgpu.CompilerParams(approx_math=True),
grid=(1,),
grid_names=("_",),
num_threads=num_compute_wgs + 1,
thread_name="wg",
)
x = jax.random.uniform(jax.random.key(0), (m, n), dtype=jnp.float32)
y = jax.random.uniform(jax.random.key(1), (m, n), dtype=jnp.float32)
inputs = jax.tree.unflatten(in_treedef, (x, y))
np.testing.assert_allclose(kernel(*inputs), x + y, atol=1e-4)
def test_carry_accumulate(self, m=256, n=256, num_compute_wgs=2):
blk_m = blk_n = 64
@functools.partial(
self.kernel,
out_shape=jax.ShapeDtypeStruct((blk_m, blk_n), jnp.float32),
scratch_shapes=[
plgpu.SMEM((blk_m, blk_n), jnp.float32),
],
compiler_params=plgpu.CompilerParams(approx_math=True),
grid=(1,),
grid_names=("_",),
num_threads=num_compute_wgs + 1,
thread_name="wg",
)
def kernel(x_gmem, acc_gmem, acc_smem):
def _compute_thread(pipeline_fn):
# Cast the init value to the same layout as x_smem, so the pipeline loop
# carry has a constant signature.
o_acc = plgpu.layout_cast(
jnp.full((blk_m, blk_n,), 0, dtype=jnp.float32),
plgpu.Layout.WG_STRIDED((blk_m, blk_n), vec_size=2))
# Pass control to the pipeline emitter and return the final carry.
o_final = pipeline_fn(o_acc)
# Note that both compute WGs are doing identical work so the potential
# race condition on the store here won't affect the result.
acc_smem[...] = o_final
plgpu.commit_smem()
plgpu.copy_smem_to_gmem(acc_smem, acc_gmem)
plgpu.wait_smem_to_gmem(0)
def tiled_acc_kernel(_, x_smem, carry):
new_carry = x_smem[...] + carry
return new_carry
pipeline = mgpu_pipeline.emit_pipeline_warp_specialized(
tiled_acc_kernel,
grid=(m // blk_m, n // blk_n),
max_concurrent_steps=2,
num_compute_wgs=num_compute_wgs,
memory_registers=40,
wg_axis="wg",
compute_context=_compute_thread,
in_specs=[
pl.BlockSpec(
block_shape=(blk_m, blk_n), index_map=lambda i, j: (i, j)
)
],
out_specs=[],
)
pipeline(x_gmem)
x = jax.random.uniform(jax.random.key(0), (m, n), dtype=jnp.float32)
ref = jnp.sum(jnp.stack(np.split(x, m // blk_m, axis=0)), axis=0)
ref = jnp.sum(jnp.stack(np.split(ref, n // blk_n, axis=1)), axis=0)
np.testing.assert_allclose(kernel(x), ref, atol=1e-4)
@parameterized.product(
num_compute_wgs=[1], # TODO(apaszke): Use 2WGs once we add support for outputs.
static=[False, True],
manual_consumed_barriers=[False, True],
small_shape=[True, False],
max_concurrent_steps=[2, 3, 4],
)
@jtu.skip_if_mosaic_gpu_exceeds_shared_memory(device_patterns="RTX PRO 6000 Blackwell")
def test_delay_release(
self, num_compute_wgs, static, manual_consumed_barriers, small_shape,
max_concurrent_steps
):
if small_shape:
m = n = 64
else:
m = n = 256
blk_m, blk_n = 32, 64
spec = plgpu.BlockSpec(
block_shape=(num_compute_wgs * blk_m, blk_n),
index_map=lambda i, j: (i, j),
delay_release=1,
)
out_spec = pl.BlockSpec(
block_shape=(num_compute_wgs * blk_m, blk_n),
index_map=lambda i, j: (i, j),
)
def tiled_add_kernel(idx, x_smem, y_smem, o_smem, *consumed_barriers):
wg_idx = lax.axis_index("wg")
m_slice = pl.ds(wg_idx * blk_m, blk_m)
o_smem[m_slice] = x_smem[m_slice] + y_smem[m_slice]
if manual_consumed_barriers:
@pl.when(jnp.logical_or(idx[0] != 0, idx[1] != 0))
def _signal_consumed():
for b in consumed_barriers:
plgpu.barrier_arrive(b)
def pipeline(*gmem_refs):
grid = (m // (num_compute_wgs * blk_m), n // blk_n)
if not static:
grid = jax.tree.map(jnp.asarray, grid)
return mgpu_pipeline.emit_pipeline_warp_specialized(
tiled_add_kernel,
grid=grid,
max_concurrent_steps=max_concurrent_steps,
manual_consumed_barriers=manual_consumed_barriers,
num_compute_wgs=num_compute_wgs,
memory_registers=40,
wg_axis="wg",
in_specs=[spec, spec],
out_specs=[out_spec],
)(*gmem_refs)
kernel = self.kernel(
pipeline,
out_shape=jax.ShapeDtypeStruct((m, n), jnp.float32),
compiler_params=plgpu.CompilerParams(approx_math=True),
grid=(1,),
grid_names=("_",),
num_threads=num_compute_wgs + 1,
thread_name="wg",
)
x = jax.random.uniform(jax.random.key(0), (m, n), dtype=jnp.float32)
y = jax.random.uniform(jax.random.key(1), (m, n), dtype=jnp.float32)
np.testing.assert_allclose(kernel(x, y), x + y, atol=1e-4)
def test_different_delay_release(self):
self.skip_if_wg_semantics() # Crashes!
m, n = 128, 64
blk_m, blk_n = 32, 32
in_specs = [
plgpu.BlockSpec(
block_shape=(blk_m, blk_n),
index_map=lambda i, j: (i, j),
delay_release=delay,
)
for delay in range(3)
]
out_spec = pl.BlockSpec(
block_shape=(blk_m, blk_n),
index_map=lambda i, j: (i, j),
)
def tiled_add_kernel(_, x_smem, y_smem, z_smem, o_smem):
o_smem[...] = x_smem[...] + y_smem[...] + z_smem[...]
def pipeline(*gmem_refs):
grid = (m // blk_m, n // blk_n)
return mgpu_pipeline.emit_pipeline(
tiled_add_kernel,
grid=grid,
max_concurrent_steps=4,
in_specs=in_specs,
out_specs=[out_spec],
)(*gmem_refs)
kernel = self.kernel(
pipeline,
out_shape=jax.ShapeDtypeStruct((m, n), jnp.float32),
grid=(1,),
grid_names=("_",)
)
x = jax.random.uniform(jax.random.key(0), (m, n), dtype=jnp.float32)
y = jax.random.uniform(jax.random.key(1), (m, n), dtype=jnp.float32)
z = jax.random.uniform(jax.random.key(3), (m, n), dtype=jnp.float32)
np.testing.assert_allclose(kernel(x, y, z), x + y + z)
@parameterized.product(
delay_release=[0, 1],
)
def test_repeat(self, delay_release):
num_steps = 4
def kernel_body(_, x_smem, o_smem):
o_smem[...] = x_smem[...] + 1.0
def kernel(x_gmem, o_gmem):
in_specs = [
plgpu.BlockSpec((64, 64), lambda i: (0, i), delay_release=delay_release)
]
out_specs = [plgpu.BlockSpec((64, 64), lambda i: (0, i))]
for _ in range(3):
plgpu.emit_pipeline_warp_specialized(
kernel_body,
in_specs=in_specs,
out_specs=out_specs,
grid=(num_steps,),
max_concurrent_steps=2,
num_compute_wgs=1,
memory_registers=40,
wg_axis="wg",
)(x_gmem, o_gmem)
x = jnp.arange(64 * num_steps * 64)
x = x.reshape(-1, num_steps * 64).astype(jnp.float32)
kernel_fn = self.kernel(
kernel,
out_shape=jax.ShapeDtypeStruct(x.shape, x.dtype),
grid=(1,),
grid_names=("_",),
num_threads=2,
thread_name="wg",
)
np.testing.assert_array_equal(kernel_fn(x), x + 1.0)
@parameterized.parameters((False,), (True,))
def test_stationary_input(self, flip):
self.skip_if_wg_semantics()
m = n = 256
blk_m = blk_n = 64
def add_kernel(_, x_smem, y_smem, o_smem):
if flip:
x_smem, y_smem = y_smem, x_smem
o_smem[...] = x_smem[...] + y_smem[...]
def body(*gmem_refs):
mgpu_pipeline.emit_pipeline_warp_specialized(
add_kernel,
grid=(m // blk_m, n // blk_n),
memory_registers=40,
max_concurrent_steps=2,
num_compute_wgs=1,
wg_axis="wg",
in_specs=[
pl.BlockSpec(
block_shape=(blk_m, blk_n), index_map=lambda i, j: (i, j)
),
pl.BlockSpec(
block_shape=(blk_m, blk_n), index_map=lambda i, j: (0, 0)
)
][::(-1 if flip else 1)],
out_specs=[
pl.BlockSpec(
block_shape=(blk_m, blk_n), index_map=lambda i, j: (i, j)
),
],
)(*gmem_refs)
kernel = self.kernel(
body,
out_shape=jax.ShapeDtypeStruct((m, n), jnp.float16),
grid=(1,),
grid_names=("_",),
num_threads=2,
thread_name="wg",
)
x = jax.random.uniform(jax.random.key(1), (m, n), dtype=jnp.float16)
y = jax.random.uniform(jax.random.key(2), (blk_m, blk_n), dtype=jnp.float16)
ref = x + np.tile(y, (m // blk_m, n // blk_n))
if flip:
x, y = y, x
# TODO(apaszke,justinfu): Fix the bug (this test freezes) and remove this restriction.
with self.assertRaisesRegex(
NotImplementedError,
"Only inputs with a dependency on the grid are supported.",
):
out = kernel(x, y)
np.testing.assert_array_equal(out, ref)
def test_no_output(self):
m = n = 256
blk_m = blk_n = 64
def body(x_ref, o_ref, o_scratch, barrier):
@pl.when(lax.axis_index("wg") == 0)
def _():
o_scratch[...] = jnp.zeros_like(o_scratch)
# Wait for scratch to be initialized
plgpu.barrier_arrive(barrier)
plgpu.barrier_wait(barrier)
# Make sure we can run the pipeline many times. This also introduces
# extra jitter into warp scheduling and has uncovered bugs in the past.
@pl.loop(0, 10)
def _pipeline_loop(_):
def add(_, x_smem):
slc = pl.ds(lax.axis_index("wg") * (blk_m // 2), blk_m // 2)
o_scratch[slc] += x_smem[slc]
mgpu_pipeline.emit_pipeline_warp_specialized(
add,
grid=(m // blk_m, n // blk_n),
memory_registers=40,
max_concurrent_steps=2,
num_compute_wgs=2,
wg_axis="wg",
in_specs=[
pl.BlockSpec(
block_shape=(blk_m, blk_n), index_map=lambda i, j: (i, j)
),
]
)(x_ref)
# Wait for both compute WGs to finish initializing the output
plgpu.barrier_arrive(barrier)
plgpu.barrier_wait(barrier)
@pl.when(lax.axis_index("wg") == 0)
def _():
plgpu.copy_smem_to_gmem(o_scratch, o_ref)
plgpu.wait_smem_to_gmem(0, wait_read_only=True)
kernel = self.kernel(
body,
out_shape=jax.ShapeDtypeStruct((blk_m, blk_n), jnp.float32),
num_threads=3,
thread_name="wg",
scratch_shapes=[
plgpu.SMEM((blk_m, blk_n), jnp.float32),
plgpu.Barrier(num_arrivals=3),
],
)
x = jax.random.uniform(jax.random.key(1234), (m, n), dtype=jnp.float32)
ref = 10 * x.reshape(m // blk_m, blk_m, n // blk_n, blk_n).sum((0, 2))
np.testing.assert_allclose(kernel(x), ref, rtol=5e-6)
@parameterized.product(manual_consumed_barriers=[False, True])
def test_pipelined_pipeline(self, manual_consumed_barriers):
m = n = 512
x = jax.random.randint(jax.random.key(0), (m, n), -10, 15, dtype=jnp.int32)
blk_m = blk_n = 64
def body(x_ref, out_gmem_ref, out_ref):
wg_idx = jax.lax.axis_index("wg")
@pl.when(wg_idx == 0)
def _zero_output():
out_ref[...] = jnp.zeros_like(out_ref)
def pipeline_body(_, x_smem, *consumed_barriers):
out_ref[...] += x_smem[...]
if manual_consumed_barriers:
[x_barrier] = consumed_barriers
plgpu.barrier_arrive(x_barrier)
spec = pl.BlockSpec(
block_shape=(blk_m, blk_n), index_map=lambda i, j: (i, j)
)
pipeline = functools.partial(
mgpu_pipeline.emit_pipeline_warp_specialized,
body=pipeline_body,
grid=(m // blk_m, n // blk_n),
memory_registers=40,
max_concurrent_steps=2,
num_compute_wgs=1,
wg_axis="wg",
manual_consumed_barriers=manual_consumed_barriers,
in_specs=[spec],
)
@functools.partial(
pl.run_scoped,
allocs=pipeline(pipeline_state=None).get_allocations(x_ref),
collective_axes="wg",
)
def _pipeline_scope(allocs):
@pl.loop(0, 2)
def _outer_loop(_):
@pl.loop(0, 4)
def _pipeline_loop(i):
state = plgpu.PipelinePipeline.START
state = jnp.where(i > 0, plgpu.PipelinePipeline.STEADY, state)
state = jnp.where(i == 3, plgpu.PipelinePipeline.STOP, state)
pipeline(pipeline_state=state)(x_ref, allocations=allocs)
# Make sure we have properly quiesced the pipeline.
pipeline(pipeline_state=None)(x_ref, allocations=allocs)
@pl.when(wg_idx == 0)
def _store_out():
out_gmem_ref[...] = out_ref[...]
kernel = self.kernel(
body,
out_shape=jax.ShapeDtypeStruct((blk_m, blk_n), jnp.int32),
compiler_params=plgpu.CompilerParams(approx_math=True),
scratch_shapes=[plgpu.SMEM((blk_m, blk_n), jnp.int32)],
grid=(1,),
grid_names=("_",),
num_threads=2,
thread_name="wg",
)
out = kernel(x)
np.testing.assert_array_equal(
out, x.reshape(m // blk_m, blk_m, n // blk_n, blk_n).sum((0, 2)) * 10
)
@parameterized.product(manual_consumed_barriers=[False, True])
def test_pipeline_with_manual_allocation(self, manual_consumed_barriers):
m = n = 512
x = jax.random.randint(jax.random.key(4), (m, n), -10, 15, dtype=jnp.int32)
y = jax.random.randint(jax.random.key(5), (m, n), -10, 15, dtype=jnp.int32)
blk_m = blk_n = 64
def body(x_ref, y_ref, out_gmem_ref, out_ref):
wg_idx = jax.lax.axis_index("wg")
@pl.when(wg_idx == 0)
def _zero_output():
out_ref[...] = jnp.zeros_like(out_ref)
def pipeline_body(_, x_smem, y_smem, *consumed_barriers):
out_ref[...] += x_smem[...] + y_smem[...]
for b in consumed_barriers:
plgpu.barrier_arrive(b)
spec = pl.BlockSpec(
block_shape=(blk_m, blk_n), index_map=lambda i, j: (i, j)
)
pipeline = mgpu_pipeline.emit_pipeline_warp_specialized(
body=pipeline_body,
grid=(m // blk_m, n // blk_n),
memory_registers=40,
max_concurrent_steps=2,
num_compute_wgs=1,
wg_axis="wg",
manual_consumed_barriers=manual_consumed_barriers,
in_specs=[spec, spec],
)
@functools.partial(
pl.run_scoped,
allocs=pipeline.get_allocations(x_ref, y_ref),
collective_axes="wg",
)
def _alloc_scope(allocs):
@pl.loop(0, 4)
def _outer_loop(_):
pipeline(x_ref, y_ref, allocations=allocs)
@pl.when(wg_idx == 0)
def _store_out():
out_gmem_ref[...] = out_ref[...]
kernel = self.kernel(
body,
out_shape=jax.ShapeDtypeStruct((blk_m, blk_n), jnp.int32),
compiler_params=plgpu.CompilerParams(approx_math=True),
scratch_shapes=[plgpu.SMEM((blk_m, blk_n), jnp.int32)],
grid=(1,),
grid_names=("_",),
num_threads=2,
thread_name="wg",
)
np.testing.assert_array_equal(
kernel(x, y),
(x + y).reshape(m // blk_m, blk_m, n // blk_n, blk_n).sum((0, 2)) * 4,
)
@jtu.thread_unsafe_test() # Modifies ``os.environ``.
def test_collective(self):
num_steps = 4
def kernel(x_gmem, o_gmem):
cluster_idx = lax.axis_index("cluster")
in_specs = [
plgpu.BlockSpec(
(64, 64), lambda i: (0, i), collective_axes=("cluster",)
)
]
out_specs = [plgpu.BlockSpec((1, 64, 64), lambda i: (cluster_idx, 0, i))]
# Run a few times to make sure we leave barriers in a good state.
for _ in range(3):
def pipeline_body(_, x_smem, o_smem):
o_smem[0, ...] = x_smem[...] + 1.0
plgpu.emit_pipeline_warp_specialized(
pipeline_body,
in_specs=in_specs,
out_specs=out_specs,
grid=(num_steps,),
max_concurrent_steps=2,
num_compute_wgs=1,
memory_registers=40,
wg_axis="wg",
)(x_gmem, o_gmem)
x = jnp.arange(64 * num_steps * 64)
x = x.reshape(-1, num_steps * 64).astype(jnp.float32)
kernel_fn = self.kernel(
kernel,
out_shape=jax.ShapeDtypeStruct((2, *x.shape), x.dtype),
num_threads=2,
thread_name="wg",
cluster=(2,),
cluster_names=("cluster",)
)
with jtu.set_env(MOSAIC_GPU_DUMP_PTX="1"), self.capture_stdout() as ptx:
y = jax.block_until_ready(kernel_fn(x))
self.assertIn(
"cp.async.bulk.tensor.2d.shared::cluster.global.mbarrier::complete_tx::bytes.multicast::cluster",
ptx(),
)
np.testing.assert_array_equal(y, np.stack([x + 1.0, x + 1.0]))
|
WarpSpecializedPipelineTest
|
python
|
ray-project__ray
|
rllib/algorithms/ppo/default_ppo_rl_module.py
|
{
"start": 413,
"end": 2543
}
|
class ____(RLModule, InferenceOnlyAPI, ValueFunctionAPI, abc.ABC):
"""Default RLModule used by PPO, if user does not specify a custom RLModule.
Users who want to train their RLModules with PPO may implement any RLModule
(or TorchRLModule) subclass as long as the custom class also implements the
`ValueFunctionAPI` (see ray.rllib.core.rl_module.apis.value_function_api.py)
"""
@override(RLModule)
def setup(self):
# __sphinx_doc_begin__
# If we have a stateful model, states for the critic need to be collected
# during sampling and `inference-only` needs to be `False`. Note, at this
# point the encoder is not built, yet and therefore `is_stateful()` does
# not work.
is_stateful = isinstance(
self.catalog.actor_critic_encoder_config.base_encoder_config,
RecurrentEncoderConfig,
)
if is_stateful:
self.inference_only = False
# If this is an `inference_only` Module, we'll have to pass this information
# to the encoder config as well.
if self.inference_only and self.framework == "torch":
self.catalog.actor_critic_encoder_config.inference_only = True
# Build models from catalog.
self.encoder = self.catalog.build_actor_critic_encoder(framework=self.framework)
self.pi = self.catalog.build_pi_head(framework=self.framework)
self.vf = self.catalog.build_vf_head(framework=self.framework)
# __sphinx_doc_end__
@override(RLModule)
def get_initial_state(self) -> dict:
if hasattr(self.encoder, "get_initial_state"):
return self.encoder.get_initial_state()
else:
return {}
@OverrideToImplementCustomLogic_CallToSuperRecommended
@override(InferenceOnlyAPI)
def get_non_inference_attributes(self) -> List[str]:
"""Return attributes, which are NOT inference-only (only used for training)."""
return ["vf"] + (
[]
if self.model_config.get("vf_share_layers")
else ["encoder.critic_encoder"]
)
|
DefaultPPORLModule
|
python
|
davidhalter__jedi
|
test/completion/dynamic_params.py
|
{
"start": 1294,
"end": 1368
}
|
class ____():
def __init__(self, a):
#? str()
a
A("s")
|
A
|
python
|
pytest-dev__pytest
|
testing/code/test_code.py
|
{
"start": 4690,
"end": 5562
}
|
class ____:
def test_not_raise_exception_with_mixed_encoding(self, tw_mock) -> None:
args = [("unicode_string", "São Paulo"), ("utf8_string", b"S\xc3\xa3o Paulo")]
r = ReprFuncArgs(args)
r.toterminal(tw_mock)
assert (
tw_mock.lines[0]
== r"unicode_string = São Paulo, utf8_string = b'S\xc3\xa3o Paulo'"
)
def test_ExceptionChainRepr():
"""Test ExceptionChainRepr, especially with regard to being hashable."""
try:
raise ValueError()
except ValueError:
excinfo1 = ExceptionInfo.from_current()
excinfo2 = ExceptionInfo.from_current()
repr1 = excinfo1.getrepr()
repr2 = excinfo2.getrepr()
assert repr1 != repr2
assert isinstance(repr1, ExceptionChainRepr)
assert hash(repr1) != hash(repr2)
assert repr1 is not excinfo1.getrepr()
|
TestReprFuncArgs
|
python
|
kamyu104__LeetCode-Solutions
|
Python/evaluate-valid-expressions.py
|
{
"start": 37,
"end": 932
}
|
class ____(object):
def evaluateExpression(self, expression):
"""
:type expression: str
:rtype: int
"""
LOOKUP = {
"add":lambda a, b: a+b,
"sub":lambda a, b: a-b,
"mul":lambda a, b: a*b,
"div":lambda a, b: a//b
}
SYMBOLS = "(,)"
stk, curr = [[]], []
for x in expression:
if x not in SYMBOLS:
curr.append(x)
continue
if x == '(':
stk.append(["".join(curr)])
curr = []
continue
if curr:
stk[-1].append(int("".join(curr)))
curr = []
if x != ')':
continue
op, a, b = stk.pop()
stk[-1].append(LOOKUP[op](a, b))
return stk[0][0] if stk[0] else int("".join(curr))
|
Solution
|
python
|
scipy__scipy
|
scipy/linalg/tests/test_solvers.py
|
{
"start": 30854,
"end": 34598
}
|
class ____:
cases = [
# empty cases
(np.empty((0, 0)),
np.empty((0, 0)),
np.empty((0, 0))),
(np.empty((0, 0)),
np.empty((2, 2)),
np.empty((0, 2))),
(np.empty((2, 2)),
np.empty((0, 0)),
np.empty((2, 0))),
# a, b, c all real.
(np.array([[1, 2], [0, 4]]),
np.array([[5, 6], [0, 8]]),
np.array([[9, 10], [11, 12]])),
# a, b, c all real, 4x4. a and b have non-trivial 2x2 blocks in their
# quasi-triangular form.
(np.array([[1.0, 0, 0, 0],
[0, 1.0, 2.0, 0.0],
[0, 0, 3.0, -4],
[0, 0, 2, 5]]),
np.array([[2.0, 0, 0, 1.0],
[0, 1.0, 0.0, 0.0],
[0, 0, 1.0, -1],
[0, 0, 1, 1]]),
np.array([[1.0, 0, 0, 0],
[0, 1.0, 0, 0],
[0, 0, 1.0, 0],
[0, 0, 0, 1.0]])),
# a, b, c all complex.
(np.array([[1.0+1j, 2.0], [3.0-4.0j, 5.0]]),
np.array([[-1.0, 2j], [3.0, 4.0]]),
np.array([[2.0-2j, 2.0+2j], [-1.0-1j, 2.0]])),
# a and b real; c complex.
(np.array([[1.0, 2.0], [3.0, 5.0]]),
np.array([[-1.0, 0], [3.0, 4.0]]),
np.array([[2.0-2j, 2.0+2j], [-1.0-1j, 2.0]])),
# a and c complex; b real.
(np.array([[1.0+1j, 2.0], [3.0-4.0j, 5.0]]),
np.array([[-1.0, 0], [3.0, 4.0]]),
np.array([[2.0-2j, 2.0+2j], [-1.0-1j, 2.0]])),
# a complex; b and c real.
(np.array([[1.0+1j, 2.0], [3.0-4.0j, 5.0]]),
np.array([[-1.0, 0], [3.0, 4.0]]),
np.array([[2.0, 2.0], [-1.0, 2.0]])),
# not square matrices, real
(np.array([[8, 1, 6], [3, 5, 7], [4, 9, 2]]),
np.array([[2, 3], [4, 5]]),
np.array([[1, 2], [3, 4], [5, 6]])),
# not square matrices, complex
(np.array([[8, 1j, 6+2j], [3, 5, 7], [4, 9, 2]]),
np.array([[2, 3], [4, 5-1j]]),
np.array([[1, 2j], [3, 4j], [5j, 6+7j]])),
]
def check_case(self, a, b, c):
x = solve_sylvester(a, b, c)
assert_array_almost_equal(np.dot(a, x) + np.dot(x, b), c)
def test_cases(self):
for case in self.cases:
self.check_case(case[0], case[1], case[2])
def test_trivial(self):
a = np.array([[1.0, 0.0], [0.0, 1.0]])
b = np.array([[1.0]])
c = np.array([2.0, 2.0]).reshape(-1, 1)
x = solve_sylvester(a, b, c)
assert_array_almost_equal(x, np.array([1.0, 1.0]).reshape(-1, 1))
# Feel free to adjust this to test fewer dtypes or random selections rather than
# the Cartesian product. It doesn't take very long to test all combinations,
# though, so we'll start there and trim it down as we see fit.
@pytest.mark.parametrize("dtype_a", dtypes)
@pytest.mark.parametrize("dtype_b", dtypes)
@pytest.mark.parametrize("dtype_q", dtypes)
@pytest.mark.parametrize("m", [0, 3])
@pytest.mark.parametrize("n", [0, 3])
def test_size_0(self, m, n, dtype_a, dtype_b, dtype_q):
if m == n != 0:
pytest.skip('m = n != 0 is not a case that needs to be tested here.')
rng = np.random.default_rng(598435298262546)
a = np.zeros((m, m), dtype=dtype_a)
b = np.zeros((n, n), dtype=dtype_b)
q = np.zeros((m, n), dtype=dtype_q)
res = solve_sylvester(a, b, q)
a = (rng.random((5, 5))*100).astype(dtype_a)
b = (rng.random((6, 6))*100).astype(dtype_b)
q = (rng.random((5, 6))*100).astype(dtype_q)
ref = solve_sylvester(a, b, q)
assert res.shape == (m, n)
assert res.dtype == ref.dtype
|
TestSolveSylvester
|
python
|
redis__redis-py
|
tests/test_asyncio/test_pipeline.py
|
{
"start": 218,
"end": 15961
}
|
class ____:
async def test_pipeline_is_true(self, r):
"""Ensure pipeline instances are not false-y"""
async with r.pipeline() as pipe:
assert pipe
async def test_pipeline(self, r):
async with r.pipeline() as pipe:
(
pipe.set("a", "a1")
.get("a")
.zadd("z", {"z1": 1})
.zadd("z", {"z2": 4})
.zincrby("z", 1, "z1")
)
assert await pipe.execute() == [
True,
b"a1",
True,
True,
2.0,
]
async def test_pipeline_memoryview(self, r):
async with r.pipeline() as pipe:
(pipe.set("a", memoryview(b"a1")).get("a"))
assert await pipe.execute() == [True, b"a1"]
async def test_pipeline_length(self, r):
async with r.pipeline() as pipe:
# Initially empty.
assert len(pipe) == 0
# Fill 'er up!
pipe.set("a", "a1").set("b", "b1").set("c", "c1")
assert len(pipe) == 3
# Execute calls reset(), so empty once again.
await pipe.execute()
assert len(pipe) == 0
async def test_pipeline_no_transaction(self, r):
async with r.pipeline(transaction=False) as pipe:
pipe.set("a", "a1").set("b", "b1").set("c", "c1")
assert await pipe.execute() == [True, True, True]
assert await r.get("a") == b"a1"
assert await r.get("b") == b"b1"
assert await r.get("c") == b"c1"
@pytest.mark.onlynoncluster
async def test_pipeline_no_transaction_watch(self, r):
await r.set("a", 0)
async with r.pipeline(transaction=False) as pipe:
await pipe.watch("a")
a = await pipe.get("a")
pipe.multi()
pipe.set("a", int(a) + 1)
assert await pipe.execute() == [True]
@pytest.mark.onlynoncluster
async def test_pipeline_no_transaction_watch_failure(self, r):
await r.set("a", 0)
async with r.pipeline(transaction=False) as pipe:
await pipe.watch("a")
a = await pipe.get("a")
await r.set("a", "bad")
pipe.multi()
pipe.set("a", int(a) + 1)
with pytest.raises(redis.WatchError):
await pipe.execute()
assert await r.get("a") == b"bad"
async def test_exec_error_in_response(self, r):
"""
an invalid pipeline command at exec time adds the exception instance
to the list of returned values
"""
await r.set("c", "a")
async with r.pipeline() as pipe:
pipe.set("a", 1).set("b", 2).lpush("c", 3).set("d", 4)
result = await pipe.execute(raise_on_error=False)
assert result[0]
assert await r.get("a") == b"1"
assert result[1]
assert await r.get("b") == b"2"
# we can't lpush to a key that's a string value, so this should
# be a ResponseError exception
assert isinstance(result[2], redis.ResponseError)
assert await r.get("c") == b"a"
# since this isn't a transaction, the other commands after the
# error are still executed
assert result[3]
assert await r.get("d") == b"4"
# make sure the pipe was restored to a working state
assert await pipe.set("z", "zzz").execute() == [True]
assert await r.get("z") == b"zzz"
async def test_exec_error_raised(self, r):
await r.set("c", "a")
async with r.pipeline() as pipe:
pipe.set("a", 1).set("b", 2).lpush("c", 3).set("d", 4)
with pytest.raises(redis.ResponseError) as ex:
await pipe.execute()
assert str(ex.value).startswith(
"Command # 3 (LPUSH c 3) of pipeline caused error: "
)
# make sure the pipe was restored to a working state
assert await pipe.set("z", "zzz").execute() == [True]
assert await r.get("z") == b"zzz"
@pytest.mark.onlynoncluster
async def test_transaction_with_empty_error_command(self, r):
"""
Commands with custom EMPTY_ERROR functionality return their default
values in the pipeline no matter the raise_on_error preference
"""
for error_switch in (True, False):
async with r.pipeline() as pipe:
pipe.set("a", 1).mget([]).set("c", 3)
result = await pipe.execute(raise_on_error=error_switch)
assert result[0]
assert result[1] == []
assert result[2]
@pytest.mark.onlynoncluster
async def test_pipeline_with_empty_error_command(self, r):
"""
Commands with custom EMPTY_ERROR functionality return their default
values in the pipeline no matter the raise_on_error preference
"""
for error_switch in (True, False):
async with r.pipeline(transaction=False) as pipe:
pipe.set("a", 1).mget([]).set("c", 3)
result = await pipe.execute(raise_on_error=error_switch)
assert result[0]
assert result[1] == []
assert result[2]
async def test_parse_error_raised(self, r):
async with r.pipeline() as pipe:
# the zrem is invalid because we don't pass any keys to it
pipe.set("a", 1).zrem("b").set("b", 2)
with pytest.raises(redis.ResponseError) as ex:
await pipe.execute()
assert str(ex.value).startswith(
"Command # 2 (ZREM b) of pipeline caused error: "
)
# make sure the pipe was restored to a working state
assert await pipe.set("z", "zzz").execute() == [True]
assert await r.get("z") == b"zzz"
@pytest.mark.onlynoncluster
async def test_parse_error_raised_transaction(self, r):
async with r.pipeline() as pipe:
pipe.multi()
# the zrem is invalid because we don't pass any keys to it
pipe.set("a", 1).zrem("b").set("b", 2)
with pytest.raises(redis.ResponseError) as ex:
await pipe.execute()
assert str(ex.value).startswith(
"Command # 2 (ZREM b) of pipeline caused error: "
)
# make sure the pipe was restored to a working state
assert await pipe.set("z", "zzz").execute() == [True]
assert await r.get("z") == b"zzz"
@pytest.mark.onlynoncluster
async def test_watch_succeed(self, r):
await r.set("a", 1)
await r.set("b", 2)
async with r.pipeline() as pipe:
await pipe.watch("a", "b")
assert pipe.watching
a_value = await pipe.get("a")
b_value = await pipe.get("b")
assert a_value == b"1"
assert b_value == b"2"
pipe.multi()
pipe.set("c", 3)
assert await pipe.execute() == [True]
assert not pipe.watching
@pytest.mark.onlynoncluster
async def test_watch_failure(self, r):
await r.set("a", 1)
await r.set("b", 2)
async with r.pipeline() as pipe:
await pipe.watch("a", "b")
await r.set("b", 3)
pipe.multi()
pipe.get("a")
with pytest.raises(redis.WatchError):
await pipe.execute()
assert not pipe.watching
@pytest.mark.onlynoncluster
async def test_watch_failure_in_empty_transaction(self, r):
await r.set("a", 1)
await r.set("b", 2)
async with r.pipeline() as pipe:
await pipe.watch("a", "b")
await r.set("b", 3)
pipe.multi()
with pytest.raises(redis.WatchError):
await pipe.execute()
assert not pipe.watching
@pytest.mark.onlynoncluster
async def test_unwatch(self, r):
await r.set("a", 1)
await r.set("b", 2)
async with r.pipeline() as pipe:
await pipe.watch("a", "b")
await r.set("b", 3)
await pipe.unwatch()
assert not pipe.watching
pipe.get("a")
assert await pipe.execute() == [b"1"]
@pytest.mark.onlynoncluster
async def test_watch_exec_no_unwatch(self, r):
await r.set("a", 1)
await r.set("b", 2)
async with r.monitor() as m:
async with r.pipeline() as pipe:
await pipe.watch("a", "b")
assert pipe.watching
a_value = await pipe.get("a")
b_value = await pipe.get("b")
assert a_value == b"1"
assert b_value == b"2"
pipe.multi()
pipe.set("c", 3)
assert await pipe.execute() == [True]
assert not pipe.watching
unwatch_command = await wait_for_command(r, m, "UNWATCH")
assert unwatch_command is None, "should not send UNWATCH"
@pytest.mark.onlynoncluster
async def test_watch_reset_unwatch(self, r):
await r.set("a", 1)
async with r.monitor() as m:
async with r.pipeline() as pipe:
await pipe.watch("a")
assert pipe.watching
await pipe.reset()
assert not pipe.watching
unwatch_command = await wait_for_command(r, m, "UNWATCH")
assert unwatch_command is not None
assert unwatch_command["command"] == "UNWATCH"
@pytest.mark.onlynoncluster
async def test_aclose_is_reset(self, r):
async with r.pipeline() as pipe:
called = 0
async def mock_reset():
nonlocal called
called += 1
with mock.patch.object(pipe, "reset", mock_reset):
await pipe.aclose()
assert called == 1
@pytest.mark.onlynoncluster
async def test_aclosing(self, r):
async with aclosing(r.pipeline()):
pass
@pytest.mark.onlynoncluster
async def test_transaction_callable(self, r):
await r.set("a", 1)
await r.set("b", 2)
has_run = []
async def my_transaction(pipe):
a_value = await pipe.get("a")
assert a_value in (b"1", b"2")
b_value = await pipe.get("b")
assert b_value == b"2"
# silly run-once code... incr's "a" so WatchError should be raised
# forcing this all to run again. this should incr "a" once to "2"
if not has_run:
await r.incr("a")
has_run.append("it has")
pipe.multi()
pipe.set("c", int(a_value) + int(b_value))
result = await r.transaction(my_transaction, "a", "b")
assert result == [True]
assert await r.get("c") == b"4"
@pytest.mark.onlynoncluster
async def test_transaction_callable_returns_value_from_callable(self, r):
async def callback(pipe):
# No need to do anything here since we only want the return value
return "a"
res = await r.transaction(callback, "my-key", value_from_callable=True)
assert res == "a"
async def test_exec_error_in_no_transaction_pipeline(self, r):
await r.set("a", 1)
async with r.pipeline(transaction=False) as pipe:
pipe.llen("a")
pipe.expire("a", 100)
with pytest.raises(redis.ResponseError) as ex:
await pipe.execute()
assert str(ex.value).startswith(
"Command # 1 (LLEN a) of pipeline caused error: "
)
assert await r.get("a") == b"1"
async def test_exec_error_in_no_transaction_pipeline_unicode_command(self, r):
key = chr(3456) + "abcd" + chr(3421)
await r.set(key, 1)
async with r.pipeline(transaction=False) as pipe:
pipe.llen(key)
pipe.expire(key, 100)
with pytest.raises(redis.ResponseError) as ex:
await pipe.execute()
expected = f"Command # 1 (LLEN {key}) of pipeline caused error: "
assert str(ex.value).startswith(expected)
assert await r.get(key) == b"1"
async def test_exec_error_in_pipeline_truncated(self, r):
key = "a" * 50
a_value = "a" * 20
b_value = "b" * 20
await r.set(key, 1)
async with r.pipeline(transaction=False) as pipe:
pipe.hset(key, mapping={"field_a": a_value, "field_b": b_value})
pipe.expire(key, 100)
with pytest.raises(redis.ResponseError) as ex:
await pipe.execute()
expected = f"Command # 1 (HSET {key} field_a {a_value} field_b...) of pipeline caused error: "
assert str(ex.value).startswith(expected)
async def test_pipeline_with_bitfield(self, r):
async with r.pipeline() as pipe:
pipe.set("a", "1")
bf = pipe.bitfield("b")
pipe2 = (
bf.set("u8", 8, 255)
.get("u8", 0)
.get("u4", 8) # 1111
.get("u4", 12) # 1111
.get("u4", 13) # 1110
.execute()
)
pipe.get("a")
response = await pipe.execute()
assert pipe == pipe2
assert response == [True, [0, 0, 15, 15, 14], b"1"]
async def test_pipeline_get(self, r):
await r.set("a", "a1")
async with r.pipeline() as pipe:
pipe.get("a")
assert await pipe.execute() == [b"a1"]
@pytest.mark.onlynoncluster
@skip_if_server_version_lt("2.0.0")
async def test_pipeline_discard(self, r):
# empty pipeline should raise an error
async with r.pipeline() as pipe:
pipe.set("key", "someval")
await pipe.discard()
with pytest.raises(redis.exceptions.ResponseError):
await pipe.execute()
# setting a pipeline and discarding should do the same
async with r.pipeline() as pipe:
pipe.set("key", "someval")
pipe.set("someotherkey", "val")
response = await pipe.execute()
pipe.set("key", "another value!")
await pipe.discard()
pipe.set("key", "another vae!")
with pytest.raises(redis.exceptions.ResponseError):
await pipe.execute()
pipe.set("foo", "bar")
response = await pipe.execute()
assert response[0]
assert await r.get("foo") == b"bar"
@pytest.mark.onlynoncluster
async def test_send_set_commands_over_async_pipeline(self, r: redis.asyncio.Redis):
pipe = r.pipeline()
pipe.hset("hash:1", "foo", "bar")
pipe.hset("hash:1", "bar", "foo")
pipe.hset("hash:1", "baz", "bar")
pipe.hgetall("hash:1")
resp = await pipe.execute()
assert resp == [1, 1, 1, {b"bar": b"foo", b"baz": b"bar", b"foo": b"bar"}]
@pytest.mark.onlycluster
@skip_if_server_version_lt("8.3.224")
async def test_pipeline_with_msetex(self, r):
p = r.pipeline()
with pytest.raises(RedisClusterException):
p.msetex({"key1": "value1", "key2": "value2"}, ex=1000)
p_transaction = r.pipeline(transaction=True)
with pytest.raises(RedisClusterException):
p_transaction.msetex(
{"key1_transaction": "value1", "key2_transaction": "value2"}, ex=10
)
|
TestPipeline
|
python
|
huggingface__transformers
|
tests/quantization/hqq/test_hqq.py
|
{
"start": 3225,
"end": 5289
}
|
class ____(unittest.TestCase):
def tearDown(self):
cleanup()
def test_fp16_quantized_model(self):
"""
Simple LLM model testing fp16
"""
quant_config = HqqConfig(nbits=8, group_size=64)
hqq_runner = HQQLLMRunner(
model_id=MODEL_ID, quant_config=quant_config, compute_dtype=torch.float16, device=torch_device
)
check_hqqlayer(self, hqq_runner.model.model.layers[0].self_attn.v_proj)
check_forward(self, hqq_runner.model)
def test_quantized_model_to_new_device_and_new_dtype(self):
"""
Simple LLM model testing different devices and dtypes
"""
quant_config = HqqConfig(nbits=8, group_size=64)
hqq_runner = HQQLLMRunner(
model_id=MODEL_ID, quant_config=quant_config, compute_dtype=torch.float16, device=torch_device
)
check_hqqlayer(self, hqq_runner.model.model.layers[0].self_attn.v_proj)
check_forward(self, hqq_runner.model)
# Remove `accelerate` hooks to enable move the model to a new device
accelerate.hooks.remove_hook_from_module(hqq_runner.model, recurse=True)
hqq_runner.model.to("cpu", torch.bfloat16)
check_hqqlayer(self, hqq_runner.model.model.layers[0].self_attn.v_proj)
check_forward(self, hqq_runner.model)
hqq_runner.model.to(torch_device)
check_hqqlayer(self, hqq_runner.model.model.layers[0].self_attn.v_proj)
check_forward(self, hqq_runner.model)
def test_quantized_model_fake_weight_dtype(self):
quant_config = HqqConfig(nbits=8, group_size=64)
hqq_runner = HQQLLMRunner(
model_id=MODEL_ID, quant_config=quant_config, compute_dtype=torch.float16, device=torch_device
)
# We use a hack to inject a fake weight to HQQLinear. Check that it works
self.assertEqual(hqq_runner.model.model.layers[0].self_attn.v_proj.weight.dtype, torch.float16)
@slow
@require_torch_accelerator
@require_torch_multi_accelerator
@require_accelerate
@require_hqq
|
HQQTest
|
python
|
django__django
|
tests/model_forms/tests.py
|
{
"start": 121345,
"end": 121428
}
|
class ____(forms.Field):
queryset = 42
|
CustomFieldWithQuerysetButNoLimitChoicesTo
|
python
|
MongoEngine__mongoengine
|
tests/document/test_instance.py
|
{
"start": 124184,
"end": 125954
}
|
class ____(MongoDBTestCase):
def test_object_key_simple_document(self):
class Book(Document):
title = StringField()
book = Book(title="Whatever")
assert book._object_key == {"pk": None}
book.pk = ObjectId()
assert book._object_key == {"pk": book.pk}
def test_object_key_with_custom_primary_key(self):
class Book(Document):
isbn = StringField(primary_key=True)
title = StringField()
book = Book(title="Sapiens")
assert book._object_key == {"pk": None}
book = Book(pk="0062316117")
assert book._object_key == {"pk": "0062316117"}
def test_object_key_in_a_sharded_collection(self):
class Book(Document):
title = StringField()
meta = {"shard_key": ("pk", "title")}
book = Book()
assert book._object_key == {"pk": None, "title": None}
book = Book(pk=ObjectId(), title="Sapiens")
assert book._object_key == {"pk": book.pk, "title": "Sapiens"}
def test_object_key_with_custom_db_field(self):
class Book(Document):
author = StringField(db_field="creator")
meta = {"shard_key": ("pk", "author")}
book = Book(pk=ObjectId(), author="Author")
assert book._object_key == {"pk": book.pk, "author": "Author"}
def test_object_key_with_nested_shard_key(self):
class Author(EmbeddedDocument):
name = StringField()
class Book(Document):
author = EmbeddedDocumentField(Author)
meta = {"shard_key": ("pk", "author.name")}
book = Book(pk=ObjectId(), author=Author(name="Author"))
assert book._object_key == {"pk": book.pk, "author__name": "Author"}
|
ObjectKeyTestCase
|
python
|
scipy__scipy
|
scipy/sparse/tests/test_base.py
|
{
"start": 174984,
"end": 175478
}
|
class ____(_MatrixMixin, TestCSR):
@classmethod
def spcreator(cls, *args, **kwargs):
with warnings.catch_warnings():
warnings.filterwarnings("ignore", WMSG, SparseEfficiencyWarning)
return csr_matrix(*args, **kwargs)
def test_spmatrix_subscriptable():
result = csr_matrix[np.int8]
assert isinstance(result, GenericAlias)
assert result.__origin__ is csr_matrix
assert result.__args__ == (np.int8,)
TestCSRMatrix.init_class()
|
TestCSRMatrix
|
python
|
plotly__plotly.py
|
plotly/graph_objs/barpolar/selected/_textfont.py
|
{
"start": 233,
"end": 2425
}
|
class ____(_BaseTraceHierarchyType):
_parent_path_str = "barpolar.selected"
_path_str = "barpolar.selected.textfont"
_valid_props = {"color"}
@property
def color(self):
"""
Sets the text font color of selected points.
The 'color' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color: see https://plotly.com/python/css-colors/ for a list
Returns
-------
str
"""
return self["color"]
@color.setter
def color(self, val):
self["color"] = val
@property
def _prop_descriptions(self):
return """\
color
Sets the text font color of selected points.
"""
def __init__(self, arg=None, color=None, **kwargs):
"""
Construct a new Textfont object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.barpolar.selected.Textfont`
color
Sets the text font color of selected points.
Returns
-------
Textfont
"""
super().__init__("textfont")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError("""\
The first argument to the plotly.graph_objs.barpolar.selected.Textfont
constructor must be a dict or
an instance of :class:`plotly.graph_objs.barpolar.selected.Textfont`""")
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
self._set_property("color", arg, color)
self._process_kwargs(**dict(arg, **kwargs))
self._skip_invalid = False
|
Textfont
|
python
|
milvus-io__pymilvus
|
tests/test_connections.py
|
{
"start": 12942,
"end": 18533
}
|
class ____:
def test_issue_1196(self):
"""
>>> connections.connect(alias="default11", host="xxx.com", port=19541, user="root", password="12345", secure=True)
>>> connections.add_connection(default={"host": "xxx.com", "port": 19541})
>>> connections.connect("default", user="root", password="12345", secure=True)
Traceback (most recent call last):
File "/usr/local/lib/python3.8/dist-packages/pymilvus/client/grpc_handler.py", line 114, in _wait_for_channel_ready
grpc.channel_ready_future(self._channel).result(timeout=3)
File "/usr/local/lib/python3.8/dist-packages/grpc/_utilities.py", line 139, in result
self._block(timeout)
File "/usr/local/lib/python3.8/dist-packages/grpc/_utilities.py", line 85, in _block
raise grpc.FutureTimeoutError()
grpc.FutureTimeoutError
"""
alias = self.test_issue_1196.__name__
with mock.patch(f"{mock_prefix}.__init__", return_value=None), mock.patch(f"{mock_prefix}._wait_for_channel_ready", return_value=None):
config = {"alias": alias, "host": "localhost", "port": "19531", "user": "root", "password": 12345, "secure": True}
connections.connect(**config, keep_alive=False)
config = connections.get_connection_addr(alias)
assert config == {"address": 'localhost:19531', "user": 'root', "secure": True}
connections.add_connection(default={"host": "localhost", "port": 19531})
config = connections.get_connection_addr("default")
assert config == {"address": 'localhost:19531', "user": ""}
connections.connect("default", user="root", password="12345", secure=True, keep_alive=False)
config = connections.get_connection_addr("default")
assert config == {"address": 'localhost:19531', "user": 'root', "secure": True}
@pytest.mark.parametrize("uri, db_name, expected_db_name", [
# Issue #2670: URI ending with slash should not overwrite explicit db_name
("http://localhost:19530/", "test_db", "test_db"),
("https://localhost:19530/", "production_db", "production_db"),
("tcp://localhost:19530/", "test_db", "test_db"),
# Issue #2727: db_name passed in URI path should be used when no explicit db_name
("http://localhost:19530/test_db", "", "test_db"),
("http://localhost:19530/production_db", "", "production_db"),
("https://localhost:19530/test_db", "", "test_db"),
# Mixed scenarios: explicit db_name takes precedence over URI path
("http://localhost:19530/uri_db", "explicit_db", "explicit_db"),
("http://localhost:19530", "test_db", "test_db"),
("http://localhost:19530", "", "default"),
# Multiple path segments - only first should be used as db_name
("http://localhost:19530/db1/collection1", "", "db1"),
("http://localhost:19530/db1/collection1/", "", "db1"),
# Empty path segments should be handled correctly
("http://localhost:19530//", "test_db", "test_db"),
("http://localhost:19530///", "test_db", "test_db"),
])
def test_issue_2670_2727(self, uri: str, db_name: str, expected_db_name: str):
"""
Issue 2670:
Test for db_name being overwritten with empty string, when the uri
ends in a slash - e.g. http://localhost:19530/
See: https://github.com/milvus-io/pymilvus/issues/2670
Actual behaviour before fix: if a uri is passed ending with a slash,
it will overwrite the db_name with an empty string.
Expected and current behaviour: if db_name is passed explicitly,
it should be used in the initialization of the GrpcHandler.
Issue 2727:
If db_name is passed as a path to the uri and not explicitly passed as an argument,
it is not overwritten with an empty string.
See: https://github.com/milvus-io/pymilvus/issues/2727
Actual behaviour before fix: if db_name is passed as a path to the uri,
it will overwrite the db_name with an empty string.
Expected and current behaviour: if db_name is passed as a path to the uri,
it should be used in the initialization of the GrpcHandler.
"""
alias = f"test_2670_2727_{uri.replace('://', '_').replace('/', '_')}_{db_name}"
with mock.patch(f"{mock_prefix}.__init__", return_value=None) as mock_init, mock.patch(
f"{mock_prefix}._wait_for_channel_ready", return_value=None):
config = {"alias": alias, "uri": uri}
# Always pass db_name parameter, even if it's an empty string
if db_name or db_name == "": # Pass both empty and non-empty strings
config["db_name"] = db_name
connections.connect(**config, keep_alive=False)
# Verify that GrpcHandler was initialized with the correct db_name
mock_init.assert_called_once()
call_args = mock_init.call_args
actual_db_name = call_args.kwargs.get("db_name", "default")
assert actual_db_name == expected_db_name, (
f"Expected db_name to be '{expected_db_name}', "
f"but got '{actual_db_name}' for uri='{uri}' and db_name='{db_name}'"
)
# Clean up - mock the close method to avoid AttributeError
with mock.patch(f"{mock_prefix}.close", return_value=None):
connections.remove_connection(alias)
|
TestIssues
|
python
|
django__django
|
tests/admin_scripts/tests.py
|
{
"start": 91270,
"end": 92228
}
|
class ____(SimpleTestCase):
def test_precedence(self):
"""
Apps listed first in INSTALLED_APPS have precedence.
"""
with self.settings(
INSTALLED_APPS=[
"admin_scripts.complex_app",
"admin_scripts.simple_app",
"django.contrib.auth",
"django.contrib.contenttypes",
]
):
out = StringIO()
call_command("duplicate", stdout=out)
self.assertEqual(out.getvalue().strip(), "complex_app")
with self.settings(
INSTALLED_APPS=[
"admin_scripts.simple_app",
"admin_scripts.complex_app",
"django.contrib.auth",
"django.contrib.contenttypes",
]
):
out = StringIO()
call_command("duplicate", stdout=out)
self.assertEqual(out.getvalue().strip(), "simple_app")
|
Discovery
|
python
|
networkx__networkx
|
networkx/algorithms/tests/test_euler.py
|
{
"start": 9603,
"end": 11209
}
|
class ____:
def test_disconnected(self):
with pytest.raises(nx.NetworkXError):
G = nx.from_edgelist([(0, 1), (2, 3)])
nx.eulerize(G)
def test_null_graph(self):
with pytest.raises(nx.NetworkXPointlessConcept):
nx.eulerize(nx.Graph())
def test_null_multigraph(self):
with pytest.raises(nx.NetworkXPointlessConcept):
nx.eulerize(nx.MultiGraph())
def test_on_empty_graph(self):
with pytest.raises(nx.NetworkXError):
nx.eulerize(nx.empty_graph(3))
def test_on_eulerian(self):
G = nx.cycle_graph(3)
H = nx.eulerize(G)
assert nx.is_isomorphic(G, H)
def test_on_eulerian_multigraph(self):
G = nx.MultiGraph(nx.cycle_graph(3))
G.add_edge(0, 1)
H = nx.eulerize(G)
assert nx.is_eulerian(H)
def test_on_complete_graph(self):
G = nx.complete_graph(4)
assert nx.is_eulerian(nx.eulerize(G))
assert nx.is_eulerian(nx.eulerize(nx.MultiGraph(G)))
def test_on_non_eulerian_graph(self):
G = nx.cycle_graph(18)
G.add_edge(0, 18)
G.add_edge(18, 19)
G.add_edge(17, 19)
G.add_edge(4, 20)
G.add_edge(20, 21)
G.add_edge(21, 22)
G.add_edge(22, 23)
G.add_edge(23, 24)
G.add_edge(24, 25)
G.add_edge(25, 26)
G.add_edge(26, 27)
G.add_edge(27, 28)
G.add_edge(28, 13)
assert not nx.is_eulerian(G)
G = nx.eulerize(G)
assert nx.is_eulerian(G)
assert nx.number_of_edges(G) == 39
|
TestEulerize
|
python
|
wandb__wandb
|
wandb/vendor/pygments/lexers/parsers.py
|
{
"start": 23668,
"end": 25866
}
|
class ____(RegexLexer):
"""
A base lexer for `Treetop <http://treetop.rubyforge.org/>`_ grammars.
Not for direct use; use TreetopLexer instead.
.. versionadded:: 1.6
"""
tokens = {
'root': [
include('space'),
(r'require[ \t]+[^\n\r]+[\n\r]', Other),
(r'module\b', Keyword.Namespace, 'module'),
(r'grammar\b', Keyword, 'grammar'),
],
'module': [
include('space'),
include('end'),
(r'module\b', Keyword, '#push'),
(r'grammar\b', Keyword, 'grammar'),
(r'[A-Z]\w*(?:::[A-Z]\w*)*', Name.Namespace),
],
'grammar': [
include('space'),
include('end'),
(r'rule\b', Keyword, 'rule'),
(r'include\b', Keyword, 'include'),
(r'[A-Z]\w*', Name),
],
'include': [
include('space'),
(r'[A-Z]\w*(?:::[A-Z]\w*)*', Name.Class, '#pop'),
],
'rule': [
include('space'),
include('end'),
(r'"(\\\\|\\"|[^"])*"', String.Double),
(r"'(\\\\|\\'|[^'])*'", String.Single),
(r'([A-Za-z_]\w*)(:)', bygroups(Name.Label, Punctuation)),
(r'[A-Za-z_]\w*', Name),
(r'[()]', Punctuation),
(r'[?+*/&!~]', Operator),
(r'\[(?:\\.|\[:\^?[a-z]+:\]|[^\\\]])+\]', String.Regex),
(r'([0-9]*)(\.\.)([0-9]*)',
bygroups(Number.Integer, Operator, Number.Integer)),
(r'(<)([^>]+)(>)', bygroups(Punctuation, Name.Class, Punctuation)),
(r'\{', Punctuation, 'inline_module'),
(r'\.', String.Regex),
],
'inline_module': [
(r'\{', Other, 'ruby'),
(r'\}', Punctuation, '#pop'),
(r'[^{}]+', Other),
],
'ruby': [
(r'\{', Other, '#push'),
(r'\}', Other, '#pop'),
(r'[^{}]+', Other),
],
'space': [
(r'[ \t\n\r]+', Whitespace),
(r'#[^\n]*', Comment.Single),
],
'end': [
(r'end\b', Keyword, '#pop'),
],
}
|
TreetopBaseLexer
|
python
|
jazzband__django-simple-history
|
simple_history/tests/tests/test_signals.py
|
{
"start": 345,
"end": 4113
}
|
class ____(TestCase):
def setUp(self):
self.signal_was_called = False
self.signal_instance = None
self.signal_history_instance = None
self.signal_sender = None
self.field = None
self.rows = None
def test_pre_create_historical_record_signal(self):
def handler(sender, instance, **kwargs):
self.signal_was_called = True
self.signal_instance = instance
self.signal_history_instance = kwargs["history_instance"]
self.signal_sender = sender
pre_create_historical_record.connect(handler)
p = Poll(question="what's up?", pub_date=today)
p.save()
self.assertTrue(self.signal_was_called)
self.assertEqual(self.signal_instance, p)
self.assertIsNotNone(self.signal_history_instance)
self.assertEqual(self.signal_sender, p.history.first().__class__)
def test_post_create_historical_record_signal(self):
def handler(sender, instance, history_instance, **kwargs):
self.signal_was_called = True
self.signal_instance = instance
self.signal_history_instance = history_instance
self.signal_sender = sender
post_create_historical_record.connect(handler)
p = Poll(question="what's up?", pub_date=today)
p.save()
self.assertTrue(self.signal_was_called)
self.assertEqual(self.signal_instance, p)
self.assertIsNotNone(self.signal_history_instance)
self.assertEqual(self.signal_sender, p.history.first().__class__)
def test_pre_create_historical_m2m_records_signal(self):
def handler(sender, rows, history_instance, instance, field, **kwargs):
self.signal_was_called = True
self.signal_instance = instance
self.signal_history_instance = history_instance
self.signal_sender = sender
self.rows = rows
self.field = field
pre_create_historical_m2m_records.connect(handler)
p = PollWithManyToMany(
question="what's up?",
pub_date=today,
)
p.save()
self.setUp()
p.places.add(
Place.objects.create(name="London"), Place.objects.create(name="Paris")
)
self.assertTrue(self.signal_was_called)
self.assertEqual(self.signal_instance, p)
self.assertIsNotNone(self.signal_history_instance)
self.assertEqual(self.signal_sender, p.history.first().places.model)
self.assertEqual(self.field, PollWithManyToMany._meta.many_to_many[0])
self.assertEqual(len(self.rows), 2)
def test_post_create_historical_m2m_records_signal(self):
def handler(sender, created_rows, history_instance, instance, field, **kwargs):
self.signal_was_called = True
self.signal_instance = instance
self.signal_history_instance = history_instance
self.signal_sender = sender
self.rows = created_rows
self.field = field
post_create_historical_m2m_records.connect(handler)
p = PollWithManyToMany(
question="what's up?",
pub_date=today,
)
p.save()
self.setUp()
p.places.add(
Place.objects.create(name="London"), Place.objects.create(name="Paris")
)
self.assertTrue(self.signal_was_called)
self.assertEqual(self.signal_instance, p)
self.assertIsNotNone(self.signal_history_instance)
self.assertEqual(self.signal_sender, p.history.first().places.model)
self.assertEqual(self.field, PollWithManyToMany._meta.many_to_many[0])
self.assertEqual(len(self.rows), 2)
|
PrePostCreateHistoricalRecordSignalTest
|
python
|
takluyver__flit
|
tests/test_install.py
|
{
"start": 538,
"end": 15849
}
|
class ____(TestCase):
def setUp(self):
td = tempfile.TemporaryDirectory()
self.addCleanup(td.cleanup)
self.get_dirs_patch = patch('flit.install.get_dirs',
return_value={
'scripts': os.path.join(td.name, 'scripts'),
'purelib': os.path.join(td.name, 'site-packages'),
'data': os.path.join(td.name, 'data'),
})
self.get_dirs_patch.start()
self.tmpdir = pathlib.Path(td.name)
def tearDown(self):
self.get_dirs_patch.stop()
def _assert_direct_url(self, directory, package, version, expected_editable):
direct_url_file = (
self.tmpdir
/ 'site-packages'
/ f'{package}-{version}.dist-info'
/ 'direct_url.json'
)
assert_isfile(direct_url_file)
with direct_url_file.open() as f:
direct_url = json.load(f)
assert direct_url['url'].startswith('file:///')
assert direct_url['url'] == directory.as_uri()
assert direct_url['dir_info'].get('editable') is expected_editable
def test_install_module(self):
Installer.from_ini_path(samples_dir / 'module1_toml' / 'pyproject.toml').install_directly()
assert_isfile(self.tmpdir / 'site-packages' / 'module1.py')
assert_isdir(self.tmpdir / 'site-packages' / 'module1-0.1.dist-info')
self._assert_direct_url(
samples_dir / 'module1_toml', 'module1', '0.1', expected_editable=False
)
@skipIf(not core_samples_dir.is_dir(), "Missing flit_core samples")
def test_install_module_pep621(self):
Installer.from_ini_path(
core_samples_dir / 'pep621_nodynamic' / 'pyproject.toml',
).install_directly()
assert_isfile(self.tmpdir / 'site-packages' / 'module1.py')
assert_isdir(self.tmpdir / 'site-packages' / 'module1-0.3.dist-info')
self._assert_direct_url(
core_samples_dir / 'pep621_nodynamic', 'module1', '0.3',
expected_editable=False
)
def test_install_package(self):
oldcwd = os.getcwd()
os.chdir(str(samples_dir / 'package1'))
try:
Installer.from_ini_path(pathlib.Path('pyproject.toml')).install_directly()
finally:
os.chdir(oldcwd)
assert_isdir(self.tmpdir / 'site-packages' / 'package1')
assert_isdir(self.tmpdir / 'site-packages' / 'package1-0.1.dist-info')
assert_isfile(self.tmpdir / 'scripts' / 'pkg_script')
with (self.tmpdir / 'scripts' / 'pkg_script').open() as f:
assert f.readline().strip() == "#!" + sys.executable
self._assert_direct_url(
samples_dir / 'package1', 'package1', '0.1', expected_editable=False
)
def test_install_module_in_src(self):
oldcwd = os.getcwd()
os.chdir(samples_dir / 'packageinsrc')
try:
Installer.from_ini_path(pathlib.Path('pyproject.toml')).install_directly()
finally:
os.chdir(oldcwd)
assert_isfile(self.tmpdir / 'site-packages' / 'module1.py')
assert_isdir(self.tmpdir / 'site-packages' / 'module1-0.1.dist-info')
def test_install_ns_package_native(self):
Installer.from_ini_path(samples_dir / 'ns1-pkg' / 'pyproject.toml').install_directly()
assert_isdir(self.tmpdir / 'site-packages' / 'ns1')
assert_isfile(self.tmpdir / 'site-packages' / 'ns1' / 'pkg' / '__init__.py')
assert_not_path_exists(self.tmpdir / 'site-packages' / 'ns1' / '__init__.py')
assert_isdir(self.tmpdir / 'site-packages' / 'ns1_pkg-0.1.dist-info')
def test_install_ns_package_module_native(self):
Installer.from_ini_path(samples_dir / 'ns1-pkg-mod' / 'pyproject.toml').install_directly()
assert_isfile(self.tmpdir / 'site-packages' / 'ns1' / 'module.py')
assert_not_path_exists(self.tmpdir / 'site-packages' / 'ns1' / '__init__.py')
def test_install_ns_package_native_symlink(self):
if os.name == 'nt':
raise SkipTest('symlink')
Installer.from_ini_path(
samples_dir / 'ns1-pkg' / 'pyproject.toml', symlink=True
).install_directly()
Installer.from_ini_path(
samples_dir / 'ns1-pkg2' / 'pyproject.toml', symlink=True
).install_directly()
Installer.from_ini_path(
samples_dir / 'ns1-pkg-mod' / 'pyproject.toml', symlink=True
).install_directly()
assert_isdir(self.tmpdir / 'site-packages' / 'ns1')
assert_isdir(self.tmpdir / 'site-packages' / 'ns1' / 'pkg')
assert_islink(self.tmpdir / 'site-packages' / 'ns1' / 'pkg',
to=str(samples_dir / 'ns1-pkg' / 'ns1' / 'pkg'))
assert_isdir(self.tmpdir / 'site-packages' / 'ns1_pkg-0.1.dist-info')
assert_isdir(self.tmpdir / 'site-packages' / 'ns1' / 'pkg2')
assert_islink(self.tmpdir / 'site-packages' / 'ns1' / 'pkg2',
to=str(samples_dir / 'ns1-pkg2' / 'ns1' / 'pkg2'))
assert_isdir(self.tmpdir / 'site-packages' / 'ns1_pkg2-0.1.dist-info')
assert_islink(self.tmpdir / 'site-packages' / 'ns1' / 'module.py',
to=samples_dir / 'ns1-pkg-mod' / 'ns1' / 'module.py')
assert_isdir(self.tmpdir / 'site-packages' / 'ns1_module-0.1.dist-info')
def test_install_ns_package_pth_file(self):
Installer.from_ini_path(
samples_dir / 'ns1-pkg' / 'pyproject.toml', pth=True
).install_directly()
pth_file = self.tmpdir / 'site-packages' / 'ns1.pkg.pth'
assert_isfile(pth_file)
assert pth_file.read_text('utf-8').strip() == str(samples_dir / 'ns1-pkg')
def test_symlink_package(self):
if os.name == 'nt':
raise SkipTest("symlink")
Installer.from_ini_path(samples_dir / 'package1' / 'pyproject.toml', symlink=True).install()
assert_islink(self.tmpdir / 'site-packages' / 'package1',
to=samples_dir / 'package1' / 'package1')
assert_isfile(self.tmpdir / 'scripts' / 'pkg_script')
with (self.tmpdir / 'scripts' / 'pkg_script').open() as f:
assert f.readline().strip() == "#!" + sys.executable
self._assert_direct_url(
samples_dir / 'package1', 'package1', '0.1', expected_editable=True
)
@skipIf(not core_samples_dir.is_dir(), "Missing flit_core samples")
def test_symlink_module_pep621(self):
if os.name == 'nt':
raise SkipTest("symlink")
Installer.from_ini_path(
core_samples_dir / 'pep621_nodynamic' / 'pyproject.toml', symlink=True
).install_directly()
assert_islink(self.tmpdir / 'site-packages' / 'module1.py',
to=core_samples_dir / 'pep621_nodynamic' / 'module1.py')
assert_isdir(self.tmpdir / 'site-packages' / 'module1-0.3.dist-info')
self._assert_direct_url(
core_samples_dir / 'pep621_nodynamic', 'module1', '0.3',
expected_editable=True
)
def test_symlink_module_in_src(self):
if os.name == 'nt':
raise SkipTest("symlink")
oldcwd = os.getcwd()
os.chdir(samples_dir / 'packageinsrc')
try:
Installer.from_ini_path(
pathlib.Path('pyproject.toml'), symlink=True
).install_directly()
finally:
os.chdir(oldcwd)
assert_islink(self.tmpdir / 'site-packages' / 'module1.py',
to=(samples_dir / 'packageinsrc' / 'src' / 'module1.py'))
assert_isdir(self.tmpdir / 'site-packages' / 'module1-0.1.dist-info')
def test_pth_package(self):
Installer.from_ini_path(samples_dir / 'package1' / 'pyproject.toml', pth=True).install()
assert_isfile(self.tmpdir / 'site-packages' / 'package1.pth')
with open(str(self.tmpdir / 'site-packages' / 'package1.pth')) as f:
assert f.read() == str(samples_dir / 'package1')
assert_isfile(self.tmpdir / 'scripts' / 'pkg_script')
self._assert_direct_url(
samples_dir / 'package1', 'package1', '0.1', expected_editable=True
)
def test_pth_module_in_src(self):
oldcwd = os.getcwd()
os.chdir(samples_dir / 'packageinsrc')
try:
Installer.from_ini_path(
pathlib.Path('pyproject.toml'), pth=True
).install_directly()
finally:
os.chdir(oldcwd)
pth_path = self.tmpdir / 'site-packages' / 'module1.pth'
assert_isfile(pth_path)
assert pth_path.read_text('utf-8').strip() == str(
samples_dir / 'packageinsrc' / 'src'
)
assert_isdir(self.tmpdir / 'site-packages' / 'module1-0.1.dist-info')
def test_dist_name(self):
Installer.from_ini_path(samples_dir / 'altdistname' / 'pyproject.toml').install_directly()
assert_isdir(self.tmpdir / 'site-packages' / 'package1')
assert_isdir(self.tmpdir / 'site-packages' / 'package_dist1-0.1.dist-info')
def test_entry_points(self):
Installer.from_ini_path(samples_dir / 'entrypoints_valid' / 'pyproject.toml').install_directly()
assert_isfile(self.tmpdir / 'site-packages' / 'package1-0.1.dist-info' / 'entry_points.txt')
def test_pip_install(self):
ins = Installer.from_ini_path(samples_dir / 'package1' / 'pyproject.toml', python='mock_python',
user=False)
with MockCommand('mock_python') as mock_py:
ins.install()
calls = mock_py.get_calls()
assert len(calls) == 1
cmd = calls[0]['argv']
assert cmd[1:4] == ['-m', 'pip', 'install']
assert cmd[4].endswith('package1')
def test_symlink_other_python(self):
if os.name == 'nt':
raise SkipTest('symlink')
(self.tmpdir / 'site-packages2').mkdir()
(self.tmpdir / 'scripts2').mkdir()
# Called by Installer._auto_user() :
script1 = ("#!{python}\n"
"import sysconfig\n"
"print(True)\n" # site.ENABLE_USER_SITE
"print({purelib!r})" # sysconfig.get_path('purelib')
).format(python=sys.executable,
purelib=str(self.tmpdir / 'site-packages2'))
# Called by Installer._get_dirs() :
script2 = ("#!{python}\n"
"import json, sys\n"
"json.dump({{'purelib': {purelib!r}, 'scripts': {scripts!r}, 'data': {data!r} }}, "
"sys.stdout)"
).format(python=sys.executable,
purelib=str(self.tmpdir / 'site-packages2'),
scripts=str(self.tmpdir / 'scripts2'),
data=str(self.tmpdir / 'data'),
)
with MockCommand('mock_python', content=script1):
ins = Installer.from_ini_path(samples_dir / 'package1' / 'pyproject.toml', python='mock_python',
symlink=True)
with MockCommand('mock_python', content=script2):
ins.install()
assert_islink(self.tmpdir / 'site-packages2' / 'package1',
to=samples_dir / 'package1' / 'package1')
assert_isfile(self.tmpdir / 'scripts2' / 'pkg_script')
with (self.tmpdir / 'scripts2' / 'pkg_script').open() as f:
assert f.readline().strip() == "#!mock_python"
def test_install_requires(self):
ins = Installer.from_ini_path(samples_dir / 'requires-requests.toml',
user=False, python='mock_python')
with MockCommand('mock_python') as mockpy:
ins.install_requirements()
calls = mockpy.get_calls()
assert len(calls) == 1
assert calls[0]['argv'][1:5] == ['-m', 'pip', 'install', '-r']
@skipIf(not core_samples_dir.is_dir(), "Missing flit_core samples")
def test_install_reqs_my_python_if_needed_pep621(self):
ins = Installer.from_ini_path(
core_samples_dir / 'pep621_nodynamic' / 'pyproject.toml',
deps='none',
)
# This shouldn't try to get version & docstring from the module
ins.install_reqs_my_python_if_needed()
def test_extras_error(self):
with pytest.raises(DependencyError):
Installer.from_ini_path(samples_dir / 'requires-requests.toml',
user=False, deps='none', extras='dev')
@skipIf(not core_samples_dir.is_dir(), "Missing flit_core samples")
def test_install_data_dir(self):
Installer.from_ini_path(
core_samples_dir / 'with_data_dir' / 'pyproject.toml',
).install_directly()
assert_isfile(self.tmpdir / 'site-packages' / 'module1.py')
assert_isfile(self.tmpdir / 'data' / 'share' / 'man' / 'man1' / 'foo.1')
@skipIf(not core_samples_dir.is_dir(), "Missing flit_core samples")
def test_symlink_data_dir(self):
if os.name == 'nt':
raise SkipTest("symlink")
Installer.from_ini_path(
core_samples_dir / 'with_data_dir' / 'pyproject.toml', symlink=True
).install_directly()
assert_isfile(self.tmpdir / 'site-packages' / 'module1.py')
assert_islink(
self.tmpdir / 'data' / 'share' / 'man' / 'man1' / 'foo.1',
to=core_samples_dir / 'with_data_dir' / 'data' / 'share' / 'man' / 'man1' / 'foo.1'
)
@pytest.mark.parametrize(('deps', 'extras', 'installed'), [
('none', [], set()),
('develop', [], {'pytest ;', 'toml ;'}),
('production', [], {'toml ;'}),
('all', [], {'toml ;', 'pytest ;', 'requests ;'}),
])
def test_install_requires_extra(deps, extras, installed):
it = InstallTests()
try:
it.setUp()
ins = Installer.from_ini_path(samples_dir / 'extras' / 'pyproject.toml', python='mock_python',
user=False, deps=deps, extras=extras)
cmd = MockCommand('mock_python')
get_reqs = (
f"#!{sys.executable}\n"
"import sys\n"
f"with open({cmd.recording_file!r}, 'wb') as w, open(sys.argv[-1], 'rb') as r:\n"
" w.write(r.read())"
)
cmd.content = get_reqs
with cmd as mock_py:
ins.install_requirements()
with open(mock_py.recording_file) as f:
str_deps = f.read()
deps = str_deps.split('\n') if str_deps else []
assert set(deps) == installed
finally:
it.tearDown()
def test_requires_dist_to_pip_requirement():
rd = 'pathlib2 (>=2.3); python_version == "2.7"'
assert _requires_dist_to_pip_requirement(rd) == \
'pathlib2>=2.3 ; python_version == "2.7"'
def test_test_writable_dir_win():
with tempfile.TemporaryDirectory() as td:
assert install._test_writable_dir_win(td) is True
# Ironically, I don't know how to make a non-writable dir on Windows,
# so although the functionality is for Windows, the test is for Posix
if os.name != 'posix':
return
# Remove write permissions from the directory
os.chmod(td, 0o444)
try:
assert install._test_writable_dir_win(td) is False
finally:
os.chmod(td, 0o644)
|
InstallTests
|
python
|
Lightning-AI__lightning
|
src/lightning/pytorch/callbacks/timer.py
|
{
"start": 1203,
"end": 8283
}
|
class ____(Callback):
"""The Timer callback tracks the time spent in the training, validation, and test loops and interrupts the Trainer
if the given time limit for the training loop is reached.
Args:
duration: A string in the format DD:HH:MM:SS (days, hours, minutes seconds), or a :class:`datetime.timedelta`,
or a dict containing key-value compatible with :class:`~datetime.timedelta`.
interval: Determines if the interruption happens on epoch level or mid-epoch.
Can be either ``"epoch"`` or ``"step"``.
verbose: Set this to ``False`` to suppress logging messages.
Raises:
MisconfigurationException:
If ``duration`` is not in the expected format.
MisconfigurationException:
If ``interval`` is not one of the supported choices.
Example::
from lightning.pytorch import Trainer
from lightning.pytorch.callbacks import Timer
# stop training after 12 hours
timer = Timer(duration="00:12:00:00")
# or provide a datetime.timedelta
from datetime import timedelta
timer = Timer(duration=timedelta(weeks=1))
# or provide a dictionary
timer = Timer(duration=dict(weeks=4, days=2))
# force training to stop after given time limit
trainer = Trainer(callbacks=[timer])
# query training/validation/test time (in seconds)
timer.time_elapsed("train")
timer.start_time("validate")
timer.end_time("test")
"""
def __init__(
self,
duration: Optional[Union[str, timedelta, dict[str, int]]] = None,
interval: str = Interval.step,
verbose: bool = True,
) -> None:
super().__init__()
if isinstance(duration, str):
duration_match = re.fullmatch(r"(\d+):(\d\d):(\d\d):(\d\d)", duration.strip())
if not duration_match:
raise MisconfigurationException(
f"`Timer(duration={duration!r})` is not a valid duration. "
"Expected a string in the format DD:HH:MM:SS."
)
duration = timedelta(
days=int(duration_match.group(1)),
hours=int(duration_match.group(2)),
minutes=int(duration_match.group(3)),
seconds=int(duration_match.group(4)),
)
elif isinstance(duration, dict):
duration = timedelta(**duration)
if interval not in set(Interval):
raise MisconfigurationException(
f"Unsupported parameter value `Timer(interval={interval})`. Possible choices are:"
f" {', '.join(set(Interval))}"
)
self._duration = duration.total_seconds() if duration is not None else None
self._interval = interval
self._verbose = verbose
self._start_time: dict[RunningStage, Optional[float]] = dict.fromkeys(RunningStage)
self._end_time: dict[RunningStage, Optional[float]] = dict.fromkeys(RunningStage)
self._offset = 0
def start_time(self, stage: str = RunningStage.TRAINING) -> Optional[float]:
"""Return the start time of a particular stage (in seconds)"""
stage = RunningStage(stage)
return self._start_time[stage]
def end_time(self, stage: str = RunningStage.TRAINING) -> Optional[float]:
"""Return the end time of a particular stage (in seconds)"""
stage = RunningStage(stage)
return self._end_time[stage]
def time_elapsed(self, stage: str = RunningStage.TRAINING) -> float:
"""Return the time elapsed for a particular stage (in seconds)"""
start = self.start_time(stage)
end = self.end_time(stage)
offset = self._offset if stage == RunningStage.TRAINING else 0
if start is None:
return offset
if end is None:
return time.monotonic() - start + offset
return end - start + offset
def time_remaining(self, stage: str = RunningStage.TRAINING) -> Optional[float]:
"""Return the time remaining for a particular stage (in seconds)"""
if self._duration is not None:
return self._duration - self.time_elapsed(stage)
return None
@override
def on_train_start(self, trainer: "pl.Trainer", pl_module: "pl.LightningModule") -> None:
self._start_time[RunningStage.TRAINING] = time.monotonic()
@override
def on_train_end(self, trainer: "pl.Trainer", pl_module: "pl.LightningModule") -> None:
self._end_time[RunningStage.TRAINING] = time.monotonic()
@override
def on_validation_start(self, trainer: "pl.Trainer", pl_module: "pl.LightningModule") -> None:
self._start_time[RunningStage.VALIDATING] = time.monotonic()
@override
def on_validation_end(self, trainer: "pl.Trainer", pl_module: "pl.LightningModule") -> None:
self._end_time[RunningStage.VALIDATING] = time.monotonic()
@override
def on_test_start(self, trainer: "pl.Trainer", pl_module: "pl.LightningModule") -> None:
self._start_time[RunningStage.TESTING] = time.monotonic()
@override
def on_test_end(self, trainer: "pl.Trainer", pl_module: "pl.LightningModule") -> None:
self._end_time[RunningStage.TESTING] = time.monotonic()
@override
def on_fit_start(self, trainer: "pl.Trainer", *args: Any, **kwargs: Any) -> None:
# this checks the time after the state is reloaded, regardless of the interval.
# this is necessary in case we load a state whose timer is already depleted
if self._duration is None:
return
self._check_time_remaining(trainer)
@override
def on_train_batch_end(self, trainer: "pl.Trainer", *args: Any, **kwargs: Any) -> None:
if self._interval != Interval.step or self._duration is None:
return
self._check_time_remaining(trainer)
@override
def on_train_epoch_end(self, trainer: "pl.Trainer", *args: Any, **kwargs: Any) -> None:
if self._interval != Interval.epoch or self._duration is None:
return
self._check_time_remaining(trainer)
@override
def state_dict(self) -> dict[str, Any]:
return {"time_elapsed": {stage.value: self.time_elapsed(stage) for stage in RunningStage}}
@override
def load_state_dict(self, state_dict: dict[str, Any]) -> None:
time_elapsed = state_dict.get("time_elapsed", {})
self._offset = time_elapsed.get(RunningStage.TRAINING.value, 0)
def _check_time_remaining(self, trainer: "pl.Trainer") -> None:
assert self._duration is not None
should_stop = self.time_elapsed() >= self._duration
should_stop = trainer.strategy.broadcast(should_stop)
trainer.should_stop = trainer.should_stop or should_stop
if should_stop and self._verbose:
elapsed = timedelta(seconds=int(self.time_elapsed(RunningStage.TRAINING)))
rank_zero_info(f"Time limit reached. Elapsed time is {elapsed}. Signaling Trainer to stop.")
|
Timer
|
python
|
pytorch__pytorch
|
torch/_inductor/select_algorithm.py
|
{
"start": 11655,
"end": 59634
}
|
class ____(TritonKernel):
"""
A specialized kernel class for Triton templates that handles code generation
for templated Triton kernels.
This class extends TritonKernel to provide additional functionality for
template-based kernel generation, including support for subgraphs, workspace
arguments, and prologue/epilogue fusion.
"""
def __init__(
self,
kernel_name,
input_nodes: tuple[ir.IRNode],
output_node,
defines,
num_stages,
num_warps,
grid_fn,
meta,
call_sizes,
num_consumer_groups=0,
num_buffers_warp_spec=0,
use_jit=False,
tma_store=False,
prefix_args=0,
suffix_args=0,
epilogue_fn=identity,
subgraphs: Optional[list[ir.ComputedBuffer]] = None,
workspace_arg: Optional[WorkspaceArg] = None,
prologue_loads_all_inputs=False,
hint_override: Optional[int] = None,
) -> None:
if tma_store:
pass
numel = sympy_product(output_node.get_size())
if tma_store:
assert len(output_node.get_size()) == 2, (
"TMA store only supported for 2D with templates"
)
tiling = {
"x": output_node.get_size()[0],
"y": output_node.get_size()[1],
"r0_": sympy.S.One,
}
else:
tiling = {
"x": numel,
"r0_": sympy.S.One,
}
super().__init__(
tiling,
features=SIMDKernelFeatures([], numel),
hint_override=hint_override,
)
self.input_nodes = input_nodes
self.output_node = output_node
self.named_input_nodes = {} # type: ignore[var-annotated]
self.defines = defines
self.kernel_name = kernel_name
self.use_jit = use_jit
self.tma_store = tma_store
self.num_stages = num_stages
self.num_warps = num_warps
self.num_consumer_groups = num_consumer_groups
self.num_buffers_warp_spec = num_buffers_warp_spec
self.grid_fn = grid_fn
self.meta = meta
self.call_sizes = call_sizes
# for templates with fixed epilogues
self.prefix_args = prefix_args
self.suffix_args = suffix_args
# pyrefly: ignore [invalid-type-var]
self.epilogue_fn = epilogue_fn
self.render_hooks = {} # type: ignore[var-annotated]
self.triton_meta: Optional[dict[str, object]] = None
# For Templated Attention this can be a list of ir.Subgraph
self.subgraphs: Optional[list[ir.ComputedBuffer]] = subgraphs
# Some templates use extra global memory as a workspace
self.workspace_arg = workspace_arg
if workspace_arg is not None:
self.args.workspace_args.append(workspace_arg)
# The following attributes (body, template_mask, output_val) are all
# used for triton kernel codegen.
# They are swapped onto the TritonTemplateKernel object by
# `set_subgraph_body`
self.subgraph_bodies: dict[str, SubgraphInfo] = {}
# input buffers which we are allowed to prologue fuse into
self.prologue_supported_inputs: OrderedSet[str] = OrderedSet()
# input buffers which we are fusing into
self.prologue_fused_inputs: OrderedSet[str] = OrderedSet()
# input buffers which we are fusing into, which preserve a zero mask
self.prologue_fused_inputs_preserve_zero: OrderedSet[str] = OrderedSet()
# The following attributes are all used for triton kernel codegen.
# They are swapped onto the TritonTemplateKernel object by
# `set_subgraph_body`
# NB: the names here must match the fields in SubgraphInfo
self.body: IndentedBuffer = FakeIndentedBuffer()
self.compute: IndentedBuffer = FakeIndentedBuffer()
self.indexing_code: IndentedBuffer = FakeIndentedBuffer()
self.loads: IndentedBuffer = FakeIndentedBuffer()
self.stores: IndentedBuffer = FakeIndentedBuffer()
self.template_mask: Optional[str] = None
self.template_out_shape: Optional[Union[str, tuple[str]]] = None
self.ops_handler: Optional[V.WrapperHandler] = None # type: ignore[name-defined]
# When caching is enabled, the generated code is not dependent on the input nodes names, or
# symbolic sizes names.
# However, some of the variables returned by generate_and_load that are computed during the
# triton template expansions (code generation) are dependent on those.
# In order to cache the code generation and avoid redoing it for similar inputs that varies only by
# input names or symbol names, we do a record and replay method.
# During template expansions we record all function calls that change input_dependent_preserved_state
# and replay them on a cache hit to regenerate them.
self.cached_replay_events: Optional[RecordedEventsType] = None
# Update each time an input is marked frozen, used to replay the freezing of inputs on a cache hit.
self.frozen_layouts_cnt = 0
# When prologue_loads_all_inputs is true, prologue_supported_inputs is populated during def_kernel
# by adding all inputs.
self.prologue_loads_all_inputs = prologue_loads_all_inputs
# Extra functions to be exposed during partial template rendering.
self.extra_template_env_fns: list[Callable[..., Any]] = []
# Tracking for intermediate variables
self.tmp_var_ctr = itertools.count()
def _gen_tmp_var(self) -> str:
return f"_tmp_var{next(self.tmp_var_ctr)}"
def input_dependent_preserved_state(self) -> str:
# Not adding self.args.output_buffers on purpose. But we do not need to reproduce it on a cache hit.
# (never accessed).
return repr(
[
self.args.input_buffers,
self.args.sizevars,
self.args.workspace_args,
self.prologue_supported_inputs,
self.frozen_layouts_cnt,
]
)
def record_input_dependent_tracked_event(self) -> Callable[..., Any]:
def decorator(fn) -> Callable[..., Any]:
def wrapper(*args, **kwargs) -> Any:
pre_state = self.input_dependent_preserved_state()
result = fn(*args, **kwargs)
post_state = self.input_dependent_preserved_state()
if pre_state != post_state:
assert self.cached_replay_events is not None
self.cached_replay_events.append((fn.__name__, [*args], {**kwargs}))
return result
return wrapper
return decorator
def replay_cached_events(self, events: RecordedEventsType) -> None:
for f, args, kwargs in events:
getattr(self, f)(*args, **kwargs)
@contextlib.contextmanager
def set_subgraph_body(self, body_name: str):
assert all(
hasattr(self, field.name) for field in dataclasses.fields(SubgraphInfo)
)
old_state = {
key.name: getattr(self, key.name)
for key in dataclasses.fields(SubgraphInfo)
}
assert body_name in self.subgraph_bodies, body_name
subgraph = self.subgraph_bodies[body_name]
for key, value in subgraph.to_dict().items():
if value is None and key in subgraph.only_copy_if_non_none_fields:
continue
setattr(self, key, value)
context = (
contextlib.nullcontext
if not self.ops_handler
# pyrefly: ignore [not-callable]
else lambda: V.set_ops_handler(self.ops_handler(V.get_ops_handler()))
)
with context(): # type: ignore[operator]
yield
self.subgraph_bodies[body_name] = SubgraphInfo(
**{
key.name: getattr(self, key.name)
for key in dataclasses.fields(SubgraphInfo)
}
)
for key, value in old_state.items():
setattr(self, key, value)
@contextlib.contextmanager
def create_subgraph_body(self, body_name: str, clear_cse: bool = False):
assert body_name not in self.subgraph_bodies
self.subgraph_bodies[body_name] = SubgraphInfo(
IndentedBuffer(), None, None, cse=self.cse.clone() if clear_cse else None
)
with self.set_subgraph_body(body_name):
yield
def need_numel_args(self):
return False
def estimate_kernel_num_bytes(self):
"""
Estimate the total number of bytes this kernel takes.
For in/out nodes, sizes are counted twice: once for reading and
once for writing.
"""
ninplace_args = len(unique(self.args.inplace_buffers.values()))
num_bytes = []
for i, inp in enumerate(itertools.chain(self.input_nodes, (self.output_node,))):
size = V.graph.sizevars.size_hints(inp.get_size(), fallback=0)
numel = functools.reduce(operator.mul, size, 1)
dtype_size = get_dtype_size(inp.get_dtype())
num_bytes.append(numel * dtype_size * (1 + int(i < ninplace_args)))
return sum(num_bytes)
def estimate_flops(self) -> int:
for node in self.input_nodes:
for fx_node in node._current_origins:
f = count_flops_fx(fx_node)
if f is not None:
return V.graph.sizevars.size_hint(f, fallback=0)
return 0
def jit_lines(self):
if self.use_jit:
return "@triton.jit"
argdefs, _, signature, _ = self.args.python_argdefs()
triton_meta: dict[str, Any] = {
"signature": signature_to_meta(
signature,
size_dtype=self.index_dtype,
argdefs=argdefs,
is_template=True,
),
"device": DeviceProperties.create(self.output_node.get_device()),
"constants": {},
}
triton_meta["configs"] = [config_of(signature)]
for arg_num in equal_1_arg_indices(signature): # type: ignore[index]
triton_meta["constants"][signature[arg_num].name] = 1 # type: ignore[index,union-attr]
matrix_instr_nonkdim = self.meta.get("matrix_instr_nonkdim", None)
waves_per_eu = self.meta.get("waves_per_eu", None)
kpack = self.meta.get("kpack", None)
if matrix_instr_nonkdim:
triton_meta["matrix_instr_nonkdim"] = matrix_instr_nonkdim
if waves_per_eu:
triton_meta["waves_per_eu"] = waves_per_eu
if kpack:
triton_meta["kpack"] = kpack
self.triton_meta = triton_meta
inductor_meta = {
"kernel_name": str(Placeholder.DESCRIPTIVE_NAME),
**self.inductor_meta_common(),
**FixedGrid.setup_grid_as_args(),
}
if config.profile_bandwidth or config.benchmark_kernel:
num_gb = self.estimate_kernel_num_bytes() / 1e9
inductor_meta["kernel_num_gb"] = num_gb
if config.benchmark_kernel:
flops = self.estimate_flops()
inductor_meta["kernel_flop"] = flops
inductor_meta["config_args"] = self.meta
template_args = f"""
num_stages={self.num_stages},
num_warps={self.num_warps},
triton_meta={triton_meta!r},
inductor_meta={inductor_meta!r},
"""
if HAS_WARP_SPEC:
template_args += f"""
num_consumer_groups={self.num_consumer_groups},
num_buffers_warp_spec={self.num_buffers_warp_spec},
"""
return f"""
@triton_heuristics.template(
{template_args}
)
@triton.jit
"""
def gen_argdefs(self):
def hook():
# python_argdefs() cannot be run until after the rest of the template lazily adds more args
arg_defs, *_ = self.args.python_argdefs()
return f"{', '.join(x.full_name() for x in arg_defs)}"
return self._register_hook("<ARGDEFS>", hook, allow_overwriting=True)
def gen_defines(self):
return self.defines
def def_kernel(self, *argnames):
"""
Hook called from template code to generate function def and
needed args.
"""
assert all(isinstance(x, str) for x in argnames)
renames = IndentedBuffer(initial_indent=1)
named_args = self.input_nodes[
self.prefix_args : len(self.input_nodes) - self.suffix_args
]
assert len(argnames) == len(named_args), (
len(argnames),
len(named_args),
self.prefix_args,
len(self.input_nodes),
)
for input_node in self.input_nodes[: self.prefix_args]:
# get args in correct order
self.args.input(input_node.get_name())
for name, input_node in zip(argnames, named_args):
arg_name = f"arg_{name}"
self.named_input_nodes[name] = input_node
if input_node.get_name() in V.graph.removed_buffers:
continue
if input_node.get_name() in self.prologue_fused_inputs:
continue
self.args.input_buffers[input_node.get_name()] = arg_name
# The args may be duplicated, so renaming must be after args are de-duplicated.
for name in argnames:
input_node = self.named_input_nodes[name]
if self.prologue_loads_all_inputs:
self.prologue_supported_inputs.add(input_node.get_name())
if input_node.get_name() in V.graph.removed_buffers:
continue
if input_node.get_name() in self.prologue_fused_inputs:
continue
arg_name = self.args.input_buffers[input_node.get_name()]
if input_node.get_layout().offset == 0:
renames.writeline(f"{name} = {arg_name}")
else:
offset = texpr(self.rename_indexing(input_node.get_layout().offset))
renames.writeline(f"{name} = {arg_name} + {offset}")
for input_node in self.input_nodes[len(self.input_nodes) - self.suffix_args :]:
# get args in correct order
if input_node.get_name() in V.graph.removed_buffers:
continue
if input_node.get_name() in self.prologue_fused_inputs:
continue
self.args.input(input_node.get_name())
def hook():
# python_argdefs() cannot be run until after the rest of the template lazily adds more args
arg_defs, *_ = self.args.python_argdefs()
code = IndentedBuffer()
code.splice(gen_common_triton_imports())
code.splice(self.jit_lines())
code.writeline(
f"def {self.kernel_name}({', '.join(x.full_name() for x in arg_defs)}):"
)
with code.indent():
code.splice(self.defines)
code.splice(renames.getvalue())
self.codegen_prologue(code)
return code.getvalue()
return self._register_hook("<DEF_KERNEL>", hook)
def size(self, name: Optional[str], index: int):
"""
Hook called from template code to get the size of an arg.
Will add needed args to pass it in if it is dynamic.
"""
assert isinstance(index, int)
if name is None:
val = self.output_node.get_size()[index]
else:
assert isinstance(name, str)
val = self.named_input_nodes[name].get_size()[index]
return texpr(self.rename_indexing(val))
def stride(self, name, index=None):
"""
Hook called from template code to get the stride of an arg.
Will add needed args to pass it in if it is dynamic.
"""
if name is None:
val = self.output_node.get_stride()
else:
assert isinstance(name, str)
val = self.get_stride_and_maybe_freeze_layout(self.named_input_nodes[name])
if isinstance(index, int):
return texpr(self.rename_indexing(val[index]))
return ", ".join([texpr(self.rename_indexing(i)) for i in val])
def _get_subgraph(self, subgraph_number: int):
assert isinstance(subgraph_number, int)
assert isinstance(self.subgraphs, list)
assert subgraph_number < len(self.subgraphs), (
f"Invalid subgraph number provided to create_modification, {subgraph_number} must be < {len(self.subgraphs)}"
)
assert self.body.getvalue() == "", (
"Body should be clear before adding a modification"
)
return self.subgraphs[subgraph_number]
def _handle_scatter_graph(self, scatter_graph):
"""Handle processing for a single scatter graph.
Args:
scatter_graph: The scatter graph to process
"""
assert isinstance(scatter_graph, ir.ComputedBuffer), (
f"scatter_graph must be an instance of ComputeBuffer but got {type(scatter_graph)}"
)
def contiguous_strides(x):
# We always create a fresh contiguous grad for scattering into
return sum(
x_i * stride for x_i, stride in zip(x, scatter_graph.get_stride())
)
return scatter_graph.data.store_output( # type: ignore[attr-defined]
scatter_graph.name, contiguous_strides, []
)
def modification(
self,
subgraph_number: int,
output_name: Optional[str],
mask: Optional[str] = None,
**fixed_inputs,
) -> str:
"""This creates a modification function for a subgraph.
To use this inside a template, the first argument should specify which subgraph to codegen for
Args:
subgraph_number (int): The index of the subgraph in self.subgraphs
output_name (Optional[str]): The name of the output variable to store the result in
mask (Optional[str]): An optional mask to use for the store operation. If provided, this mask
will be applied to the store.
"""
num = 0
out = None
scatters = []
while f"mod_{subgraph_number}_{num}" in self.subgraph_bodies:
num += 1
with self.create_subgraph_body(f"mod_{subgraph_number}_{num}"):
subgraph = self._get_subgraph(subgraph_number)
modification_handler = ModificationWrapper(
self, subgraph_number, fixed_inputs, mask
)
with V.set_ops_handler(modification_handler):
assert isinstance(subgraph, (ir.ComputedBuffer, list)), (
f"Expected the subgraph to be a ComputedBuffer or a List[ComputedBuffer], got {type(subgraph)}"
)
# Handle scatter stores
if isinstance(subgraph, list):
for scatter_graph in subgraph:
scatters.append(self._handle_scatter_graph(scatter_graph))
elif isinstance(subgraph.data, ir.InputBuffer):
out = subgraph.data.make_loader()(())
else:
out = subgraph.data.inner_fn(())
self.codegen_body()
if output_name is not None:
assert isinstance(output_name, str)
assert out is not None
self.body.writeline(f"{output_name} = {out.value}")
else:
assert out is None
for scatter in scatters:
self.body.writeline(str(scatter))
body_val = self.body.getvalue()
self.cse.invalidate(OrderedSet())
return body_val
def load_input(
self,
input_name: str,
output_name: str,
indices: Union[list[Any], tuple[Any]],
mask: Optional[str] = None,
other: Optional[Union[float, int]] = 0.0,
indent_width: int = 4,
index_shape: Optional[tuple[str]] = None,
):
"""Loads an input and applies any necessary preprocessing or masking.
Args:
input_name (str): The name of the input to load.
indices (Union[List, Tuple]): The index for each dimension of the input.
val (str): The name of the variable to store the loaded value.
mask (Optional[str]): An optional mask to use for the load operation.
other (Optional[Union[float, int]]): The value to use for masked elements. Default is 0.0.
indent_width (int): The number of spaces to use for indentation.
"""
input_node = self.named_input_nodes[input_name]
if not self.prologue_loads_all_inputs:
self.prologue_supported_inputs.add(input_node.get_name())
tilings = (sympy_product(input_node.get_size()), sympy.Integer(1))
groups = {
"x": tilings[0],
"r0_": tilings[1],
}
range_trees = self.construct_range_trees(
pid_cache=None,
inside_reduction=False,
is_reduction=False,
numels=groups,
no_x_dim=False,
)
load_code = None
with self.create_subgraph_body(f"<LOAD_INPUT_{input_name}>"):
assert isinstance(indices, (list, tuple))
assert isinstance(output_name, str)
assert isinstance(mask, (str, type(None)))
self.range_trees = range_trees
self.numels = {k: V.graph.sizevars.simplify(v) for k, v in groups.items()}
indices = list(map(OpOverrides.paren, indices))
index_symbols = [sympy.Symbol(x, integer=True) for x in indices]
lengths = [V.graph.sizevars.simplify(s) for s in input_node.get_size()]
assert len(indices) == len(lengths)
index_symbols = [sympy.Symbol(x, integer=True) for x in indices]
assert len(indices) == len(lengths)
# glue to make generated code use same indexing from template
# TODO (from reviewers as well)
# in codegen_template,
# prologue_node.codegen(kernel.split_and_set_ranges(prologue_node.get_ranges()))
# the ranges need to reflect the group of the prologue input or it will error
# not sure if there is any difference between original range_tree_entry in
# and new one from correct lengths/groups... both actually seem to work
for name, range_tree_entry in zip(
indices, self.range_trees[0].construct_entries(lengths)
):
range_tree_entry.set_name(name)
contiguous_index = sympy_dot(
ir.FlexibleLayout.contiguous_strides(lengths), index_symbols
)
contiguous_index = self.rename_indexing(contiguous_index)
self.body.writeline("xindex = " + texpr(contiguous_index))
xindex_range_root = self.range_trees[0].lookup(
sympy.Integer(1), sympy_product(lengths)
)
xindex_range_root.set_name("xindex")
# Note - ["None" override_mask]
# MM Templates work by taking out of bounds index values and wrapping them around to 0
# so that no mask is required on the load: offs_a_m = `rm % M`
# We should to override the mask to be "None" instead of inheriting the mask that would
# have been loaded otherwise.
# We are using "None" for clarity in output code, but
# we could alternatively emit `xmask = tl.full([xindex.shape], True, tl.int1)`
self.template_mask = mask if mask is not None else "None"
self.template_out_shape = index_shape if index_shape else "xindex"
self.template_indices = indices
self.cse.invalidate(OrderedSet())
template_mask = self.template_mask
class StoreOutputSubstitution(V.WrapperHandler): # type: ignore[name-defined]
name = "StoreOutputSubstitution"
def store(
self,
name: str,
index: sympy.Expr,
value: "CSEVariable",
mode: "StoreMode" = None,
):
V.kernel.store_buffer_names.add(name)
V.kernel.cse.store_cache[name] = value
if name in V.kernel.prologue_fused_inputs:
# We load masked out values with 0, then apply a prologue.
# The masked out values may not necessariliy be 0 any more
# so we need to reapply the mask.
value_dtype = value.dtype
value_str = str(value)
if template_mask != "None" and (
name not in V.kernel.prologue_fused_inputs_preserve_zero
or other != 0
):
value_str = (
f"tl.where({template_mask}, {value_str}, {other})"
)
if value_dtype != V.graph.get_buffer(name).dtype:
value_str = f"{value_str}.to({triton_type(V.graph.get_buffer(name).dtype)})"
# TODO: we should have intermediary var shapes
V.kernel.compute.writeline(
f"{output_name} = {value_str}.broadcast_to(xindex.shape)"
)
# pyrefly: ignore [bad-assignment]
self.ops_handler = StoreOutputSubstitution
input_node = self.named_input_nodes[input_name]
output_index = input_node.make_indexer()(index_symbols)
# in def_kernel above we define the inputs with the storage offset adjusted
# creating the load in input_node.make_indexer() will also adjust by storage offset
# so subtract here to not double increment
if not V.graph.sizevars.statically_known_equals(
input_node.layout.offset, 0
):
output_index = output_index - self.rename_indexing(
input_node.get_layout().offset
)
output_index = self.rename_indexing(output_index)
if output_index == contiguous_index:
output_index_str = "xindex"
else:
out_indexing = self.indexing(
output_index,
copy_shape=self.template_out_shape,
override_mask=self.template_mask,
)
from .codegen.triton import IndexingOptions
assert isinstance(out_indexing, IndexingOptions)
output_index_str = (
f"({out_indexing.index_str}).broadcast_to(xindex.shape)"
)
# Generate load code
load_code = f"{output_name} = tl.load({input_name} + ({output_index_str})"
if mask:
load_code += f", mask={mask}, other={other})"
else:
load_code += ")"
hook_key = f"<LOAD_INPUT_{input_name}>"
def hook():
with self.set_subgraph_body(hook_key):
self.cse.invalidate(OrderedSet())
self.codegen_body()
self.cse.invalidate(OrderedSet())
if input_node.get_name() not in self.prologue_fused_inputs:
assert load_code is not None
self.body.writeline(load_code)
return textwrap.indent(self.body.getvalue(), " " * indent_width).strip()
return self._register_hook(hook_key, hook)
def _generate_index_from_tma_index(
self,
output_name: str,
offset_name: str,
tma_index: sympy.Symbol,
block_size: str,
dim: int,
num_dims: int,
block_name: Optional[str] = None,
) -> list[str]:
"""
Generate the logic to compute the regular tl.load index from the provided
tma index. This is used to ensure variables can support fusions.
Args:
output_name (str): The output variable name.
offset_name (str): The name used for the intermediate offset.
tma_index (sympy.Symbol): The symbol used for the original TMA index.
block_size (str): The block size of the index.
dim (int): Which dimension to project the index in.
num_dims (int): The total number of dimensions in the output.
block_name (Optional[str]): The name of the block variable. If not passed
in then we aren't reusing standard symbol names.
Returns:
list[str]: The lines used to generate the index.
"""
if block_name:
# Generate the expected names for the structure:
# XBLOCK/YBLOCK and xoffset/yoffset. We append XBLOCK/YBLOCK
# to the top of the kernel so we can safely extract the tensor
# descriptor construction to the top of the kernel.
if block_name in self.prologue_cache:
assert self.prologue_cache[block_name] == block_size, (
f"Constant {block_name} must be used for all stores"
)
else:
self.prologue_cache[block_name] = block_size
self.prologue.writeline(f"{block_name}: tl.constexpr = {block_size}")
else:
block_name = block_size
line0 = f"{offset_name} = {texpr(tma_index)}"
expr = f"({offset_name} + tl.arange(0, {block_name}))"
prefix_none = "".join(["None, "] * dim)
suffix_none = ", ".join(["None"] * (num_dims - (dim + 1)))
line1 = f"{output_name} = {expr}[{prefix_none}:, {suffix_none}]"
return [line0, line1]
def _generated_mask_for_tma(
self,
index_name: str,
shape_val: str,
output_name: str,
) -> str:
"""
Generate the mask logic to feed to fusions for mask. The expectation
is that if we have X/Y there will be a variable named xmask and ymask.
Args:
index_name (str): The index used in the mask. Should be one of
xindex or yindex.
shape_val (str): The expression for the upper bound shape.
output_name (str): The expression used for the output.
Returns:
str: The mask generation line.
"""
return f"{output_name} = {index_name} < {shape_val}"
def store_output(
self,
indices: Union[list[Any], tuple[Any]],
val: str,
mask: Optional[str] = None,
indent_width: int = 4,
val_shape: Optional[tuple[str]] = None,
block_indexing: bool = False,
):
"""Stores the final output and appends any epilogue fusions if the buffer hasn't been optimized away.
Args:
indices (Union[List, Tuple]): The index for each dimension of the output. The dot product of
these indices and output strides must match `val`.
val (str): The value to store.
mask (Optional[str]): An optional mask to use for the store operation. If provided, this mask
will be applied to the store.
indent_width (int): The number of spaces to use for indentation. This is used when the call to
store_output is indented in the kernel definition.
block_indexing (bool): Are the input indices presented as offsets for creating the block (e.g.
inputs to TMA) or are they tensors that should be passed in directly.
"""
subgraph_name = self._get_store_output_subgraph_name(
next(self.store_output_ctr)
)
with self.create_subgraph_body(subgraph_name, clear_cse=True):
assert isinstance(indices, (list, tuple))
assert isinstance(val, str)
assert isinstance(mask, (str, type(None)))
assert isinstance(val_shape, (tuple, type(None)))
assert isinstance(block_indexing, bool)
assert self.template_mask is None
indices = list(map(OpOverrides.paren, indices))
index_symbols = [sympy.Symbol(x, integer=True) for x in indices]
lengths = [
V.graph.sizevars.simplify(s) for s in self.output_node.get_size()
]
assert len(indices) == len(lengths)
output_layout = self.output_node.get_layout()
self.template_out = val
if block_indexing:
assert val_shape, "Blocking indexing requires passing in val_shape"
assert len(val_shape) == 2, (
"Blocking indexing only supports 2D data at this time"
)
assert not mask, "Mask is not supported with blocking indexing"
intermediate_lines: list[str] = []
epilogue_index_symbols: list[sympy.Symbol] = []
if self.tma_store:
# Generate the expected indexing symbols.
# Note: TMA indices are expected to be in the
# format (x, y), but the range_tree is always
# (yindex, xindex).
index_order = [1, 0]
val_shape_copy = list(val_shape)
for i, range_tree in zip(index_order, self.range_trees[:-1]):
name = range_tree.name
symbol = range_tree.symbol()
epilogue_index_symbols.append(symbol)
lookup_output = range_tree.lookup(sympy.S.One, lengths[i])
old_name = lookup_output.symbol()
lookup_output.set_name(name)
# Update var_list and var_range
range_tree.var_list[range_tree.var_list.index(old_name)] = (
symbol
)
range_val = range_tree.var_ranges[old_name]
del range_tree.var_ranges[old_name]
range_tree.var_ranges[symbol] = range_val
intermediate_lines.extend(
self._generate_index_from_tma_index(
name,
"xoffset" if name == "xindex" else "yoffset",
index_symbols[i],
val_shape[i],
i,
len(index_order),
# pyrefly: ignore [missing-argument]
block_name=range_tree.symt.name,
)
)
# Generate the xmask and ymask
intermediate_lines.append(
self._generated_mask_for_tma(
name,
self.size(None, i),
"xmask" if name == "xindex" else "ymask",
)
)
# Update the val_shape information to use consistent naming
# after the remapping.
# pyrefly: ignore [missing-argument]
val_shape_copy[i] = range_tree.symt.name
# Reverse the index symbols because TMA is indexed
# as (x, y) whereas the variables will naturally be indexed
# as (y, x)
epilogue_index_symbols.reverse()
val_shape = tuple(val_shape_copy)
else:
mask_vars: list[str] = []
for i, (index, shape) in enumerate(zip(index_symbols, val_shape)):
index_name = self._gen_tmp_var()
offset_name = self._gen_tmp_var()
intermediate_lines.extend(
self._generate_index_from_tma_index(
index_name,
offset_name,
index,
shape,
i,
len(index_symbols),
)
)
epilogue_index_symbols.append(
sympy.Symbol(index_name, integer=True)
)
mask_name = self._gen_tmp_var()
intermediate_lines.append(
self._generated_mask_for_tma(
index_name,
self.size(None, i),
mask_name,
)
)
mask_vars.append(mask_name)
final_mask_var = self._gen_tmp_var()
final_mask_rhs = " & ".join(
f"{mask_name}" for mask_name in mask_vars
)
intermediate_lines.append(f"{final_mask_var} = {final_mask_rhs}")
self.template_mask = final_mask_var
index_symbols = epilogue_index_symbols
contiguous_index = sympy_dot(output_layout.stride, index_symbols)
if not self.tma_store:
# Convert to just use xindex.
contiguous_index = self.rename_indexing(contiguous_index)
intermediate_lines.append(f"xindex = {texpr(contiguous_index)}")
self.range_trees[0].lookup(
sympy.S.One, sympy_product(lengths)
).set_name("xindex")
index_symbols = epilogue_index_symbols
output_index = contiguous_index
# Write out the intermediate lines
for line in intermediate_lines:
self.body.writeline(line)
else:
assert not self.tma_store, "TMA store requires block indexing"
# glue to make generated code use same indexing from template
for name, range_tree_entry in zip(
indices, self.range_trees[0].construct_entries(lengths)
):
range_tree_entry.set_name(name)
contiguous_index = sympy_dot(
ir.FlexibleLayout.contiguous_strides(lengths), index_symbols
)
contiguous_index = self.rename_indexing(contiguous_index)
self.body.writeline("xindex = " + texpr(contiguous_index))
self.range_trees[0].lookup(
sympy.S.One, sympy_product(lengths)
).set_name("xindex")
self.template_mask = mask
self.template_indices = indices
output_index = self.output_node.get_layout().make_indexer()(
index_symbols
)
output_index = self.rename_indexing(output_index)
if output_index == contiguous_index:
output_index = sympy.Symbol("xindex", integer=True)
# pyrefly: ignore [bad-assignment]
self.template_out_shape = val_shape if val_shape else val
acc_dtype = (
triton_type_to_torch(self.meta["ACC_TYPE"])
if "ACC_TYPE" in self.meta
else torch.float32
)
epilogue_args = [
V.kernel.cse.namedvar(val, dtype=acc_dtype, shape=val_shape)
]
for input_node in itertools.chain(
self.input_nodes[: self.prefix_args],
self.input_nodes[len(self.input_nodes) - self.suffix_args :],
):
input_node.freeze_layout()
epilogue_arg = V.kernel.cse.generate(
self.compute,
input_node.make_loader()(index_symbols),
dtype=acc_dtype,
shape=input_node.get_size(),
)
epilogue_args.append(epilogue_arg)
# We update frozen_layouts_cnt in order to replay this function on a cache hit.
self.frozen_layouts_cnt += 1
V.ops.store(
self.output_node.get_name(),
output_index,
self.epilogue_fn(*epilogue_args),
mode="tma" if self.tma_store else None,
)
self.codegen_body()
def hook():
with self.set_subgraph_body(subgraph_name):
# more stuff might have been added since the codegen_body above
self.codegen_body()
self.cse.invalidate(OrderedSet())
return textwrap.indent(self.body.getvalue(), " " * indent_width).strip()
return self._register_hook(subgraph_name, hook)
def _register_hook(
self,
hook_name: str,
hook_fn: PartialRender.HookFn,
*,
allow_overwriting: bool = False,
) -> str:
"""
Register a hook function with a name.
``hook_name`` should match the string that will be replaced via
``hook_fn``, and should not already be in use for a hook.
If ``allow_overwriting`` is ``False``, will assert that there isn't
currently a registered hook of the same name before registering the new
one.
"""
if not allow_overwriting:
assert hook_name not in self.render_hooks, (
f"Tried to register the hook {hook_name} multiple times. If "
"desired, pass allow_overwriting=True to _register_hook"
)
self.render_hooks[hook_name] = hook_fn
return hook_name
def _register_extra_template_env_fns(self, *fns: Callable[..., Any]):
"""
Register some extra functions to expose when performing the initial
template render, so that they're in scope to by used by jinja
expressions.
These can be used to, for example, implement extra replacement hooks,
if the given function:
* Returns the name of their hook, which should also be the string to
replace via the hook function. The convention is to use the format
<HOOK_NAME>.
* Assigns the corresponding entry in ``self.render_hooks`` to a hook
function.
"""
self.extra_template_env_fns.extend(fns)
def render(self, template, kwargs, record_input_dependent_tracked_event=False):
if record_input_dependent_tracked_event:
self.cached_replay_events = []
template_env = {
fn.__name__: (
self.record_input_dependent_tracked_event()(fn)
if record_input_dependent_tracked_event
else fn
)
for fn in [
self.def_kernel,
self.size,
self.stride,
self.store_output,
self.load_input,
self.make_load,
self.modification,
self.gen_argdefs,
self.gen_defines,
*self.extra_template_env_fns,
]
}
return PartialRender(
template.render(**template_env, **kwargs),
self.render_hooks,
)
def make_load(self, name, indices, mask):
"""
Optional helper called from template code to generate the code
needed to load from an tensor.
"""
assert isinstance(indices, (list, tuple))
assert isinstance(name, str)
assert isinstance(mask, str)
stride = self.get_stride_and_maybe_freeze_layout(self.named_input_nodes[name])
indices = list(map(OpOverrides.paren, indices))
assert len(indices) == len(stride)
index = " + ".join(
f"{texpr(self.rename_indexing(s))} * {i}" for s, i in zip(stride, indices)
)
return f"tl.load({name} + ({index}), {mask}, other=0.0)"
def indexing(
self,
index: sympy.Expr,
*,
dense_indexing=False,
copy_shape=None,
override_mask=None,
block_ptr=False,
tma_compatibility_checker: Optional[TMACompatibilityChecker] = None,
):
"""
Override the default indexing to use our custom mask and force
dense indexing.
"""
return super().indexing(
index,
dense_indexing=False,
# We pass template_out as the shape to broadcast the indexing to as
# the mask might be broadcast to the output shape
copy_shape=self.template_out_shape,
override_mask=self.template_mask,
block_ptr=block_ptr,
tma_compatibility_checker=tma_compatibility_checker,
)
def codegen_range_tree(self):
pass # ignore default codegen
def additional_call_args_and_types(self):
if isinstance(self.grid_fn, SymbolicGridFn):
grid_args = self.grid_fn.sympy_call(*self.call_sizes, self.meta)
assert len(grid_args) in (0, 3), "grid_fn should return 3 values"
return (grid_args, map(type, grid_args))
elif all(isinstance(x, (int, sympy.Integer)) for x in self.call_sizes):
grid_args = self.grid_fn(*map(int, self.call_sizes), self.meta)
assert len(grid_args) in (0, 3), "grid_fn should return 3 values"
return (grid_args, map(type, grid_args))
return ((), ())
def call_kernel(
self, name: str, node: Optional[ir.IRNode] = None, deallocate_ws: bool = True
):
wrapper = V.graph.wrapper_code
_, call_args, _, arg_types = self.args.python_argdefs()
additional_call_args, additional_arg_types = (
self.additional_call_args_and_types()
)
if not additional_call_args:
assert not V.graph.cpp_wrapper, "cpp_wrapper requires SymbolicGridFn"
wrapper.add_import_once(f"import {self.grid_fn.__module__}")
meta = wrapper.add_meta_once(self.meta)
fn_name = f"{self.grid_fn.__module__}.{self.grid_fn.__name__}"
call_args.append(
f"*{fn_name}({', '.join(map(pexpr, self.call_sizes))}, {meta})"
)
arg_types.append(None)
call_args.extend(additional_call_args)
arg_types.extend(additional_arg_types)
if self.workspace_arg is not None:
wrapper.generate_workspace_allocation(self.workspace_arg)
wrapper.generate_kernel_call(
name,
call_args,
arg_types=arg_types,
triton_meta=self.triton_meta,
triton=True,
)
if self.workspace_arg is not None:
wrapper.generate_workspace_deallocation(self.workspace_arg)
def kernel_benchmark_extra_args(self) -> list[str]:
return [
str(x)
for x in self.grid_fn(
*V.graph.sizevars.size_hints(self.call_sizes), self.meta
)
]
def get_stride_and_maybe_freeze_layout(self, node) -> list[int]:
node.data.freeze_layout()
return node.get_stride()
@functools.cache
def _jinja2_env():
try:
import jinja2
return jinja2.Environment(
undefined=jinja2.StrictUndefined,
)
except ImportError:
return None
|
TritonTemplateKernel
|
python
|
dagster-io__dagster
|
python_modules/dagster/dagster/_core/errors.py
|
{
"start": 20162,
"end": 20532
}
|
class ____(DagsterError):
def __init__(self, *args, **kwargs):
from dagster._utils.error import SerializableErrorInfo
self.load_error_infos = check.list_param(
kwargs.pop("load_error_infos"),
"load_error_infos",
SerializableErrorInfo,
)
super().__init__(*args, **kwargs)
|
DagsterCodeLocationLoadError
|
python
|
django__django
|
tests/admin_inlines/models.py
|
{
"start": 6769,
"end": 6983
}
|
class ____(models.Model):
name = models.CharField(max_length=100, help_text="Help text for ReadOnlyInline")
capo_famiglia = models.ForeignKey(CapoFamiglia, models.CASCADE)
# Models for #18433
|
ReadOnlyInline
|
python
|
readthedocs__readthedocs.org
|
readthedocs/api/v2/views/model_views.py
|
{
"start": 3519,
"end": 5452
}
|
class ____:
"""
Helper to disable APIv2 listing endpoint.
We are disablng the listing endpoint because it could cause DOS without
using any type of filtering.
This class disables these endpoints except:
- version resource when passing ``?project__slug=``
- build resource when using ``?commit=``
All the other type of listings are disabled and return 409 CONFLICT with an
error message pointing the user to APIv3.
"""
def list(self, *args, **kwargs):
# Using private repos will list resources the user has access to.
if settings.ALLOW_PRIVATE_REPOS:
return super().list(*args, **kwargs)
disabled = True
# DRF strips whitespaces from query params, and if the final string is empty
# the filter is ignored. So we do the same to check if the filter is going to be used or not.
project_slug = self.request.GET.get("project__slug", "").strip()
commit = self.request.GET.get("commit", "").strip()
slug = self.request.GET.get("slug", "").strip()
# NOTE: keep list endpoint that specifies a resource
if any(
[
self.basename == "version" and project_slug,
self.basename == "build" and (commit or project_slug),
self.basename == "project" and slug,
]
):
disabled = False
if not disabled:
return super().list(*args, **kwargs)
return Response(
{
"error": "disabled",
"msg": (
"List endpoint have been disabled due to heavy resource usage. "
"Take into account than APIv2 is planned to be deprecated soon. "
"Please use APIv3: https://docs.readthedocs.io/page/api/v3.html"
),
},
status=status.HTTP_410_GONE,
)
|
DisableListEndpoint
|
python
|
instagram__MonkeyType
|
tests/test_util.py
|
{
"start": 2738,
"end": 3148
}
|
class ____:
@pytest.mark.parametrize(
'input_string, expected',
[
("foo", "Foo"),
("foo_bar", "FooBar"),
("fooBar", "FooBar"),
("FooBar", "FooBar"),
("_foo___bar_baz__", "FooBarBaz"),
],
)
def test_pascal_case(self, input_string: str, expected: str):
assert pascal_case(input_string) == expected
|
TestPascalCase
|
python
|
pyparsing__pyparsing
|
pyparsing/core.py
|
{
"start": 150775,
"end": 151533
}
|
class ____(PositionToken):
"""Matches if current position is at the end of a line within the
parse string
"""
def __init__(self) -> None:
super().__init__()
self.whiteChars.discard("\n")
self.set_whitespace_chars(self.whiteChars, copy_defaults=False)
self.set_name("end of line")
def parseImpl(self, instring, loc, do_actions=True) -> ParseImplReturnType:
if loc < len(instring):
if instring[loc] == "\n":
return loc + 1, "\n"
else:
raise ParseException(instring, loc, self.errmsg, self)
elif loc == len(instring):
return loc + 1, []
else:
raise ParseException(instring, loc, self.errmsg, self)
|
LineEnd
|
python
|
Pylons__pyramid
|
src/pyramid/i18n.py
|
{
"start": 545,
"end": 8435
}
|
class ____:
"""
An object providing translation and pluralizations related to
the current request's locale name. A
:class:`pyramid.i18n.Localizer` object is created using the
:func:`pyramid.i18n.get_localizer` function.
"""
def __init__(self, locale_name, translations):
self.locale_name = locale_name
self.translations = translations
self.pluralizer = None
self.translator = None
def translate(self, tstring, domain=None, mapping=None):
"""
Translate a :term:`translation string` to the current language
and interpolate any *replacement markers* in the result. The
``translate`` method accepts three arguments: ``tstring``
(required), ``domain`` (optional) and ``mapping`` (optional).
When called, it will translate the ``tstring`` translation
string using the current locale. If the current locale could not be
determined, the result of interpolation of the default value is
returned. The optional ``domain`` argument can be used to specify
or override the domain of the ``tstring`` (useful when ``tstring``
is a normal string rather than a translation string). The optional
``mapping`` argument can specify or override the ``tstring``
interpolation mapping, useful when the ``tstring`` argument is
a simple string instead of a translation string.
Example::
from pyramid.i18n import TranslationString
ts = TranslationString('Add ${item}', domain='mypackage',
mapping={'item':'Item'})
translated = localizer.translate(ts)
Example::
translated = localizer.translate('Add ${item}', domain='mypackage',
mapping={'item':'Item'})
"""
if self.translator is None:
self.translator = Translator(self.translations)
return self.translator(tstring, domain=domain, mapping=mapping)
def pluralize(self, singular, plural, n, domain=None, mapping=None):
"""
Return a string translation by using two
:term:`message identifier` objects as a singular/plural pair
and an ``n`` value representing the number that appears in the
message using gettext plural forms support. The ``singular``
and ``plural`` objects should be strings. There is no
reason to use translation string objects as arguments as all
metadata is ignored.
``n`` represents the number of elements. ``domain`` is the
translation domain to use to do the pluralization, and ``mapping``
is the interpolation mapping that should be used on the result. If
the ``domain`` is not supplied, a default domain is used (usually
``messages``).
Example::
num = 1
translated = localizer.pluralize('Add ${num} item',
'Add ${num} items',
num,
mapping={'num':num})
If using the gettext plural support, which is required for
languages that have pluralisation rules other than n != 1, the
``singular`` argument must be the message_id defined in the
translation file. The plural argument is not used in this case.
Example::
num = 1
translated = localizer.pluralize('item_plural',
'',
num,
mapping={'num':num})
"""
if self.pluralizer is None:
self.pluralizer = Pluralizer(self.translations)
return self.pluralizer(
singular, plural, n, domain=domain, mapping=mapping
)
def default_locale_negotiator(request):
"""The default :term:`locale negotiator`. Returns a locale name
or ``None``.
- First, the negotiator looks for the ``_LOCALE_`` attribute of
the request object (possibly set by a view or a listener for an
:term:`event`). If the attribute exists and it is not ``None``,
its value will be used.
- Then it looks for the ``request.params['_LOCALE_']`` value.
- Then it looks for the ``request.cookies['_LOCALE_']`` value.
- Finally, the negotiator returns ``None`` if the locale could not
be determined via any of the previous checks (when a locale
negotiator returns ``None``, it signifies that the
:term:`default locale name` should be used.)
"""
name = '_LOCALE_'
locale_name = getattr(request, name, None)
if locale_name is None:
locale_name = request.params.get(name)
if locale_name is None:
locale_name = request.cookies.get(name)
return locale_name
def negotiate_locale_name(request):
"""Negotiate and return the :term:`locale name` associated with
the current request."""
try:
registry = request.registry
except AttributeError:
registry = get_current_registry()
negotiator = registry.queryUtility(
ILocaleNegotiator, default=default_locale_negotiator
)
locale_name = negotiator(request)
if locale_name is None:
settings = registry.settings or {}
locale_name = settings.get('default_locale_name', 'en')
return locale_name
def get_locale_name(request):
"""
.. deprecated:: 1.5
Use :attr:`pyramid.request.Request.locale_name` directly instead.
Return the :term:`locale name` associated with the current request.
"""
return request.locale_name
def make_localizer(current_locale_name, translation_directories):
"""Create a :class:`pyramid.i18n.Localizer` object
corresponding to the provided locale name from the
translations found in the list of translation directories."""
translations = Translations()
translations._catalog = {}
locales_to_try = []
if '_' in current_locale_name:
locales_to_try = [current_locale_name.split('_')[0]]
locales_to_try.append(current_locale_name)
# intent: order locales left to right in least specific to most specific,
# e.g. ['de', 'de_DE']. This services the intent of creating a
# translations object that returns a "more specific" translation for a
# region, but will fall back to a "less specific" translation for the
# locale if necessary. Ordering from least specific to most specific
# allows us to call translations.add in the below loop to get this
# behavior.
for tdir in translation_directories:
locale_dirs = []
for lname in locales_to_try:
ldir = os.path.realpath(os.path.join(tdir, lname))
if os.path.isdir(ldir):
locale_dirs.append(ldir)
for locale_dir in locale_dirs:
messages_dir = os.path.join(locale_dir, 'LC_MESSAGES')
if not os.path.isdir(os.path.realpath(messages_dir)):
continue
for mofile in os.listdir(messages_dir):
mopath = os.path.realpath(os.path.join(messages_dir, mofile))
if mofile.endswith('.mo') and os.path.isfile(mopath):
with open(mopath, 'rb') as mofp:
domain = mofile[:-3]
dtrans = Translations(mofp, domain)
translations.add(dtrans)
return Localizer(
locale_name=current_locale_name, translations=translations
)
def get_localizer(request):
"""
.. deprecated:: 1.5
Use the :attr:`pyramid.request.Request.localizer` attribute directly
instead. Retrieve a :class:`pyramid.i18n.Localizer` object
corresponding to the current request's locale name.
"""
return request.localizer
|
Localizer
|
python
|
fastai__fastai
|
fastai/callback/fp16.py
|
{
"start": 6619,
"end": 6990
}
|
class ____(Callback):
"Use with NonNativeMixedPrecision callback (but it needs to run at the very beginning)"
order=-50
def before_fit(self): self.learn.model = convert_network(self.model, dtype=torch.float16)
def after_fit (self): self.learn.model = convert_network(self.model, dtype=torch.float32)
# %% ../../nbs/18_callback.fp16.ipynb 62
@docs
|
ModelToHalf
|
python
|
tensorflow__tensorflow
|
tensorflow/python/ops/sparse_ops_test.py
|
{
"start": 1735,
"end": 11169
}
|
class ____(test_util.TensorFlowTestCase, parameterized.TestCase):
def testSparseEye(self):
def test_one(n, m, as_tensors):
expected = np.eye(n, m)
if as_tensors:
m = constant_op.constant(m)
n = constant_op.constant(n)
s = sparse_ops.sparse_eye(n, m)
d = sparse_ops.sparse_to_dense(s.indices, s.dense_shape, s.values)
self.assertAllEqual(self.evaluate(d), expected)
for n in range(2, 10, 2):
for m in range(2, 10, 2):
# Test with n and m as both constants and tensors.
test_one(n, m, True)
test_one(n, m, False)
def testDenseFromConstantToSparse(self):
expected_constant = np.reshape(np.arange(24, dtype=np.int64), (3, 4, 2))
tensor = constant_op.constant(expected_constant)
sparse = sparse_ops.from_dense(tensor)
dense = sparse_ops.sparse_to_dense(sparse.indices, sparse.dense_shape,
sparse.values)
constant = self.evaluate(dense)
self.assertAllEqual(expected_constant, constant)
def testTransposePreservesShape(self):
with ops.Graph().as_default():
t = sparse_tensor.SparseTensor(indices=[[0, 0]],
values=[0.],
dense_shape=[3, 4])
self.assertTrue(t.shape.is_fully_defined)
transposed = sparse_ops.sparse_transpose(t)
self.assertAllEqual(transposed.shape, [4, 3])
def testSparseExpandDims(self):
for rank in range(1, 4):
# Create a dummy input. When rank=3, shape=[2, 4, 6].
shape = np.arange(1, rank + 1) * 2
before = np.arange(np.prod(shape)).reshape(shape)
# Make entries sparse.
before *= np.random.binomial(1, .2, before.shape)
dense_shape = before.shape
indices = np.array(np.where(before)).T
values = before[before != 0]
# Try every possible valid value of axis.
for axis in range(-rank - 1, rank):
expected_after = np.expand_dims(before, axis)
for axis_as_tensor in [False, True]:
dense_shape_t = constant_op.constant(dense_shape, dtype=dtypes.int64)
indices_t = constant_op.constant(indices)
values_t = constant_op.constant(values)
before_t = sparse_tensor.SparseTensor(
indices=indices_t, values=values_t, dense_shape=dense_shape_t)
if axis_as_tensor:
axis = constant_op.constant(axis)
s = sparse_ops.sparse_expand_dims(before_t, axis)
d = sparse_ops.sparse_to_dense(s.indices, s.dense_shape, s.values)
self.assertAllEqual(self.evaluate(d), expected_after)
@parameterized.parameters([
(math_ops.abs, [1.0, -1.0, 3.0, -4.0], [1.0, 1.0, 3.0, 4.0]),
(math_ops.negative, [1.0, -1.0, 3.0, -4.0], [-1.0, 1.0, -3.0, 4.0]),
(math_ops.sign, [3.0, -2.0, 0.0, -4.0], [1.0, -1.0, 0.0, -1.0]),
(math_ops.square, [1.0, -1.0, 3.0, -4.0], [1.0, 1.0, 9.0, 16.0]),
(
math_ops.asinh,
[1.0, -1.0, 3.0, -4.0],
[0.8813736, -0.8813736, 1.8184465, -2.0947125],
),
(
math_ops.sin,
[1.0, -1.0, 3.0, -4.0],
[0.84147096, -0.84147096, 0.14112, 0.7568025],
),
(
math_ops.asin,
[1.0, -1.0, 0.4, -0.5],
[1.5707964, -1.5707964, 0.41151685, -0.5235988],
),
(
math_ops.tan,
[1.0, -1.0, 0.4, -0.5],
[1.5574077, -1.5574077, 0.42279324, -0.5463025],
),
(
math_ops.atan,
[0.4, -0.4, 1.0, 0.5],
[0.3805064, -0.3805064, 0.7853982, 0.4636476],
),
(
math_ops.atanh,
[0.4, -0.4, -0.5, 0.5],
[0.42364895, -0.42364895, -0.54930615, 0.54930615],
),
])
def testUnarySparseDispatch(self, op, values, expected):
st = sparse_tensor.SparseTensor(
indices=[[0, 0], [0, 1], [2, 0], [2, 4]],
values=values,
dense_shape=[3, 6])
result = op(st)
result_value = self.evaluate(result)
self.assertAllEqual(result_value.indices, st.indices)
self.assertAllClose(result_value.values, expected)
self.assertAllEqual(result_value.dense_shape, st.dense_shape)
def testSparseToDenseGradient(self):
def f(sparse_values, default_value):
st = sparse_tensor.SparseTensor(
indices=[[0, 3, 6], [1, 4, 7], [2, 5, 8]],
values=sparse_values,
dense_shape=[3, 6, 9])
return sparse_ops.sparse_tensor_to_dense(st, default_value)
grads = gradient_checker.compute_gradient(
f, [constant_op.constant([1.0, 2.0, 3.0]),
constant_op.constant(0.0)])
epsilon = 1e-4
self.assertLess(gradient_checker.max_error(*grads), epsilon)
def testSparseTensorToDenseString(self):
sp = sparse_tensor.SparseTensor(
indices=[[0, 0], [1, 2]], values=['a', 'b'], dense_shape=[2, 3])
dense = sparse_ops.sparse_tensor_to_dense(sp)
expected_dense = [[b'a', b'', b''], [b'', b'', b'b']]
result_dense = self.evaluate(dense)
self.assertAllEqual(expected_dense, result_dense)
def testDenseSparseTensorMatMul(self):
np.random.seed(42)
dense_numpy_array = np.random.rand(3, 3)
independent_dense_tf = constant_op.constant(
dense_numpy_array, dtype='float32')
sp = sparse_tensor.SparseTensor(
indices=[[0, 0], [1, 2]], values=[4., 8.], dense_shape=[3, 3])
dense_of_sparse = sparse_ops.sparse_to_dense(sp.indices, sp.shape,
sp.values)
result = sparse_ops.sparse_tensor_dense_matmul(
independent_dense_tf, sp, adjoint_a=False, adjoint_b=False)
expected = math_ops.matmul(independent_dense_tf, dense_of_sparse)
self.assertAllEqual(expected, result)
result = sparse_ops.sparse_tensor_dense_matmul(
independent_dense_tf, sp, adjoint_a=False, adjoint_b=True)
expected = math_ops.matmul(independent_dense_tf,
array_ops.transpose(dense_of_sparse))
self.assertAllEqual(expected, result)
result = sparse_ops.sparse_tensor_dense_matmul(
independent_dense_tf, sp, adjoint_a=True, adjoint_b=False)
expected = math_ops.matmul(
array_ops.transpose(independent_dense_tf), dense_of_sparse)
self.assertAllEqual(expected, result)
result = sparse_ops.sparse_tensor_dense_matmul(
independent_dense_tf, sp, adjoint_a=True, adjoint_b=True)
expected = math_ops.matmul(
array_ops.transpose(independent_dense_tf),
array_ops.transpose(dense_of_sparse))
self.assertAllEqual(expected, result)
def testMapValues(self):
# supplying no sparse tensor should result in ValueError
with self.assertRaises(ValueError):
sparse_ops.map_values(math_ops.abs, 0.0)
sp = sparse_ops.from_dense([[0.0, 1.0, 0.0], [-2.0, 1.0, 0.0]])
# helper function to check equality of sparse tensor
def assert_sparse_equal(expected, result):
self.assertAllEqual(expected.values, result.values, msg='Values differ')
self.assertAllEqual(
expected.indices, result.indices, msg='Indices differ')
self.assertAllEqual(
expected.dense_shape, result.dense_shape, msg='Shapes differ')
# check for a single sparse argument
expected = sparse_ops.from_dense([[0.0, 1.0, 0.0], [2.0, 1.0, 0.0]])
result = sparse_ops.map_values(math_ops.abs, sp)
assert_sparse_equal(expected, result)
# check correct passing of keyword argument, and handling of two sparse
# arguments at the same time
def mapping(arg1, arg2, kwarg):
self.assertEqual(kwarg, 'kwarg')
return arg1 + arg2
result = sparse_ops.map_values(mapping, sp, sp, kwarg='kwarg')
expected = sparse_ops.from_dense([[0.0, 2.0, 0.0], [-4.0, 2.0, 0.0]])
assert_sparse_equal(expected, result)
# check that index mismatches are correctly detected even if the `value`s
# have compatible shape
sp_incomp = sparse_ops.from_dense([[0.0, 1.0, 0.0], [-2.0, 0.0, 1.0]])
with self.assertRaises((errors.InvalidArgumentError, ValueError)):
result = sparse_ops.map_values(mapping, sp, sp_incomp, kwarg='kwarg')
self.evaluate(result)
# check that shape mismatches are correctly detected
sp_incomp = sparse_tensor.SparseTensor(sp.indices, sp.values, (25, 25))
with self.assertRaises((errors.InvalidArgumentError, ValueError)):
result = sparse_ops.map_values(mapping, sp, sp_incomp, kwarg='kwarg')
self.evaluate(result)
def testConstantStringToSparse(self):
# Test case for GitHub issue 40633.
tensor = constant_op.constant(list('ababa'))
sparse = sparse_ops.from_dense(tensor)
result = self.evaluate(sparse)
self.assertAllEqual([[0], [1], [2], [3], [4]], result.indices)
self.assertAllEqual([b'a', b'b', b'a', b'b', b'a'], result.values)
self.assertAllEqual([5], result.dense_shape)
def testSparseTensorToDenseQint(self):
x = np.asarray([1, 2])
y = np.asarray([[1, 0, 0], [0, 0, 2]])
for dtype in [dtypes.qint8, dtypes.qint16, dtypes.quint8, dtypes.quint16]:
sp = sparse_tensor.SparseTensor(
indices=[[0, 0], [1, 2]],
values=x.astype(dtype.as_numpy_dtype),
dense_shape=[2, 3])
v = self.evaluate(sparse_ops.sparse_tensor_to_dense(sp))
self.assertAllEqual(
y.astype(dtype.as_numpy_dtype), v.astype(dtype.as_numpy_dtype))
@test_util.run_all_in_graph_and_eager_modes
|
SparseOpsTest
|
python
|
apache__airflow
|
providers/amazon/src/airflow/providers/amazon/aws/operators/eks.py
|
{
"start": 38092,
"end": 42456
}
|
class ____(AwsBaseOperator[EksHook]):
"""
Deletes an Amazon EKS managed node group from an Amazon EKS Cluster.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:EksDeleteNodegroupOperator`
:param cluster_name: The name of the Amazon EKS Cluster associated with your nodegroup. (templated)
:param nodegroup_name: The name of the nodegroup to delete. (templated)
:param wait_for_completion: If True, waits for operator to complete. (default: False) (templated)
:param aws_conn_id: The Airflow connection used for AWS credentials. (templated)
If this is None or empty then the default boto3 behaviour is used. If
running Airflow in a distributed manner and aws_conn_id is None or
empty, then the default boto3 configuration would be used (and must be
maintained on each worker node).
:param region_name: Which AWS region the connection should use. (templated)
If this is None or empty then the default boto3 behaviour is used.
:param verify: Whether or not to verify SSL certificates. See:
https://boto3.amazonaws.com/v1/documentation/api/latest/reference/core/session.html
:param waiter_delay: Time (in seconds) to wait between two consecutive calls to check nodegroup state
:param waiter_max_attempts: The maximum number of attempts to check nodegroup state
:param deferrable: If True, the operator will wait asynchronously for the nodegroup to be deleted.
This implies waiting for completion. This mode requires aiobotocore module to be installed.
(default: False)
"""
aws_hook_class = EksHook
template_fields: Sequence[str] = aws_template_fields(
"cluster_name", "nodegroup_name", "wait_for_completion"
)
def __init__(
self,
cluster_name: str,
nodegroup_name: str,
region: str | None = None,
wait_for_completion: bool = False,
waiter_delay: int = 30,
waiter_max_attempts: int = 40,
deferrable: bool = conf.getboolean("operators", "default_deferrable", fallback=False),
**kwargs,
) -> None:
self.cluster_name = cluster_name
self.nodegroup_name = nodegroup_name
self.wait_for_completion = wait_for_completion
self.waiter_delay = waiter_delay
self.waiter_max_attempts = waiter_max_attempts
self.deferrable = deferrable
if region is not None:
warnings.warn(
message="Parameter `region` is deprecated. Use the parameter `region_name` instead",
category=AirflowProviderDeprecationWarning,
stacklevel=2,
)
kwargs["region_name"] = region
super().__init__(**kwargs)
def execute(self, context: Context):
self.hook.delete_nodegroup(clusterName=self.cluster_name, nodegroupName=self.nodegroup_name)
if self.deferrable:
self.defer(
trigger=EksDeleteNodegroupTrigger(
cluster_name=self.cluster_name,
nodegroup_name=self.nodegroup_name,
aws_conn_id=self.aws_conn_id,
region_name=self.region_name,
waiter_delay=self.waiter_delay,
waiter_max_attempts=self.waiter_max_attempts,
),
method_name="execute_complete",
# timeout is set to ensure that if a trigger dies, the timeout does not restart
# 60 seconds is added to allow the trigger to exit gracefully (i.e. yield TriggerEvent)
timeout=timedelta(seconds=self.waiter_max_attempts * self.waiter_delay + 60),
)
elif self.wait_for_completion:
self.log.info("Waiting for nodegroup to delete. This will take some time.")
self.hook.conn.get_waiter("nodegroup_deleted").wait(
clusterName=self.cluster_name, nodegroupName=self.nodegroup_name
)
def execute_complete(self, context: Context, event: dict[str, Any] | None = None) -> None:
validated_event = validate_execute_complete_event(event)
if validated_event["status"] != "success":
raise AirflowException(f"Error deleting nodegroup: {validated_event}")
|
EksDeleteNodegroupOperator
|
python
|
tensorflow__tensorflow
|
tensorflow/python/autograph/pyct/errors.py
|
{
"start": 798,
"end": 937
}
|
class ____(PyCTError, NotImplementedError):
"""Raised for code patterns that AutoGraph does not support."""
|
UnsupportedLanguageElementError
|
python
|
pytorch__pytorch
|
torch/utils/_sympy/functions.py
|
{
"start": 47474,
"end": 47792
}
|
class ____(sympy.Function):
is_real = True
@classmethod
def eval(cls, number, ndigits):
# assert number.is_integer is not True, number
if isinstance(number, sympy.Number) and isinstance(ndigits, sympy.Integer):
return sympy.Float(round(float(number), int(ndigits)))
|
RoundDecimal
|
python
|
Textualize__textual
|
src/textual/_spatial_map.py
|
{
"start": 316,
"end": 3764
}
|
class ____(Generic[ValueType]):
"""A spatial map allows for data to be associated with rectangular regions
in Euclidean space, and efficiently queried.
When the SpatialMap is populated, a reference to each value is placed into one or
more buckets associated with a regular grid that covers 2D space.
The SpatialMap is able to quickly retrieve the values under a given "window" region
by combining the values in the grid squares under the visible area.
"""
def __init__(self, grid_width: int = 100, grid_height: int = 20) -> None:
"""Create a spatial map with the given grid size.
Args:
grid_width: Width of a grid square.
grid_height: Height of a grid square.
"""
self._grid_size = (grid_width, grid_height)
self.total_region = Region()
self._map: defaultdict[GridCoordinate, list[ValueType]] = defaultdict(list)
self._fixed: list[ValueType] = []
def _region_to_grid_coordinates(self, region: Region) -> Iterable[GridCoordinate]:
"""Get the grid squares under a region.
Args:
region: A region.
Returns:
Iterable of grid coordinates (tuple of 2 values).
"""
# (x1, y1) is the coordinate of the top left cell
# (x2, y2) is the coordinate of the bottom right cell
x1, y1, width, height = region
x2 = x1 + width - 1
y2 = y1 + height - 1
grid_width, grid_height = self._grid_size
return product(
range(x1 // grid_width, x2 // grid_width + 1),
range(y1 // grid_height, y2 // grid_height + 1),
)
def insert(
self, regions_and_values: Iterable[tuple[Region, Offset, bool, bool, ValueType]]
) -> None:
"""Insert values into the Spatial map.
Values are associated with their region in Euclidean space, and a boolean that
indicates fixed regions. Fixed regions don't scroll and are always visible.
Args:
regions_and_values: An iterable of (REGION, OFFSET, FIXED, OVERLAY, VALUE).
"""
append_fixed = self._fixed.append
get_grid_list = self._map.__getitem__
_region_to_grid = self._region_to_grid_coordinates
total_region = self.total_region
for region, offset, fixed, overlay, value in regions_and_values:
if fixed:
append_fixed(value)
else:
if not overlay:
total_region = total_region.union(region)
for grid in _region_to_grid(region + offset):
get_grid_list(grid).append(value)
self.total_region = total_region
def get_values_in_region(self, region: Region) -> list[ValueType]:
"""Get a superset of all the values that intersect with a given region.
Note that this may return false positives.
Args:
region: A region.
Returns:
Values under the region.
"""
results: list[ValueType] = self._fixed.copy()
add_results = results.extend
get_grid_values = self._map.get
for grid_coordinate in self._region_to_grid_coordinates(region):
grid_values = get_grid_values(grid_coordinate)
if grid_values is not None:
add_results(grid_values)
unique_values = list(dict.fromkeys(results))
return unique_values
|
SpatialMap
|
python
|
protocolbuffers__protobuf
|
upb/bazel/amalgamate.py
|
{
"start": 1835,
"end": 5021
}
|
class ____:
def __init__(self, h_out, c_out):
self.include_paths = ["."]
self.included = set()
self.output_h = open(h_out, "w")
self.output_c = open(c_out, "w")
self.h_out = h_out.split("/")[-1]
def amalgamate(self, h_files, c_files):
self.h_files = set(h_files)
self.output_c.write("/* Amalgamated source file */\n")
self.output_c.write('#include "%s"\n' % (self.h_out))
if self.h_out == "ruby-upb.h":
self.output_h.write("// Ruby is still using proto3 enum semantics for proto2\n")
self.output_h.write("#define UPB_DISABLE_CLOSED_ENUM_CHECKING\n")
self.output_h.write("/* Amalgamated source file */\n")
port_def = self._find_include_file("upb/port/def.inc")
port_undef = self._find_include_file("upb/port/undef.inc")
self._process_file(port_def, self.output_h)
self._process_file(port_def, self.output_c)
for file in c_files:
self._process_file(file, self.output_c)
self._process_file(port_undef, self.output_h)
self._process_file(port_undef, self.output_c)
def _process_file(self, infile_name, outfile):
lines = open(infile_name).readlines()
has_copyright = lines[0].startswith(
"// Protocol Buffers - Google's data interchange format"
)
if has_copyright:
while not lines[0].startswith(
"// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH"
" DAMAGE"
) and not lines[0].startswith(
"// https://developers.google.com/open-source/licenses/bsd"
):
lines.pop(0)
lines.pop(0)
for line in lines:
if not self._process_include(line):
outfile.write(line)
def _find_include_file(self, name):
for h_file in self.h_files:
if h_file.endswith(name):
return h_file
def _process_include(self, line):
include = parse_include(line)
if not include:
return False
if not (
include.startswith("upb")
or include.startswith("google")
):
return False
if include and (include.endswith("port/def.inc") or include.endswith("port/undef.inc")):
# Skip, we handle this separately
return True
if include.endswith("hpp"):
# Skip, we don't support the amalgamation from C++.
return True
if "decode_fast" in include:
# Skip, we don't support the fasttable parser in the amalgamation.
return True
if re.search(r"stage\d/", include):
return True
elif include in self.included:
return True
else:
# Include this upb header inline.
h_file = self._find_include_file(include)
if h_file:
self.h_files.remove(h_file)
self.included.add(include)
self._process_file(h_file, self.output_h)
return True
raise RuntimeError("Couldn't find include: " + include + ", h_files=" + repr(self.h_files))
# ---- main ----
c_out = sys.argv[1]
h_out = sys.argv[2]
amalgamator = Amalgamator(h_out, c_out)
c_files = []
h_files = []
for arg in sys.argv[3:]:
arg = arg.strip()
if arg.endswith(".h") or arg.endswith(".inc"):
h_files.append(arg)
else:
c_files.append(arg)
amalgamator.amalgamate(h_files, c_files)
|
Amalgamator
|
python
|
encode__django-rest-framework
|
tests/test_validators.py
|
{
"start": 35754,
"end": 35904
}
|
class ____(models.Model):
slug = models.CharField(max_length=100, unique_for_year='published')
published = models.DateField()
|
UniqueForYearModel
|
python
|
chroma-core__chroma
|
chromadb/utils/data_loaders.py
|
{
"start": 222,
"end": 1021
}
|
class ____(DataLoader[List[Optional[Image]]]):
def __init__(self, max_workers: int = multiprocessing.cpu_count()) -> None:
try:
self._PILImage = importlib.import_module("PIL.Image")
self._max_workers = max_workers
except ImportError:
raise ValueError(
"The PIL python package is not installed. Please install it with `pip install pillow`"
)
def _load_image(self, uri: Optional[URI]) -> Optional[Image]:
return np.array(self._PILImage.open(uri)) if uri is not None else None
def __call__(self, uris: Sequence[Optional[URI]]) -> List[Optional[Image]]:
with ThreadPoolExecutor(max_workers=self._max_workers) as executor:
return list(executor.map(self._load_image, uris))
|
ImageLoader
|
python
|
django__django
|
tests/test_utils/tests.py
|
{
"start": 51860,
"end": 54090
}
|
class ____(SimpleTestCase):
def test_equal(self):
valid_tests = (
("http://example.com/?", "http://example.com/"),
("http://example.com/?x=1&", "http://example.com/?x=1"),
("http://example.com/?x=1&y=2", "http://example.com/?y=2&x=1"),
("http://example.com/?x=1&y=2", "http://example.com/?y=2&x=1"),
(
"http://example.com/?x=1&y=2&a=1&a=2",
"http://example.com/?a=1&a=2&y=2&x=1",
),
("/path/to/?x=1&y=2&z=3", "/path/to/?z=3&y=2&x=1"),
("?x=1&y=2&z=3", "?z=3&y=2&x=1"),
("/test_utils/no_template_used/", reverse_lazy("no_template_used")),
)
for url1, url2 in valid_tests:
with self.subTest(url=url1):
self.assertURLEqual(url1, url2)
def test_not_equal(self):
invalid_tests = (
# Protocol must be the same.
("http://example.com/", "https://example.com/"),
("http://example.com/?x=1&x=2", "https://example.com/?x=2&x=1"),
("http://example.com/?x=1&y=bar&x=2", "https://example.com/?y=bar&x=2&x=1"),
# Parameters of the same name must be in the same order.
("/path/to?a=1&a=2", "/path/to/?a=2&a=1"),
)
for url1, url2 in invalid_tests:
with self.subTest(url=url1), self.assertRaises(AssertionError):
self.assertURLEqual(url1, url2)
def test_message(self):
msg = (
"Expected 'http://example.com/?x=1&x=2' to equal "
"'https://example.com/?x=2&x=1'"
)
with self.assertRaisesMessage(AssertionError, msg):
self.assertURLEqual(
"http://example.com/?x=1&x=2", "https://example.com/?x=2&x=1"
)
def test_msg_prefix(self):
msg = (
"Prefix: Expected 'http://example.com/?x=1&x=2' to equal "
"'https://example.com/?x=2&x=1'"
)
with self.assertRaisesMessage(AssertionError, msg):
self.assertURLEqual(
"http://example.com/?x=1&x=2",
"https://example.com/?x=2&x=1",
msg_prefix="Prefix",
)
|
AssertURLEqualTests
|
python
|
huggingface__transformers
|
src/transformers/models/tvp/modeling_tvp.py
|
{
"start": 7107,
"end": 12778
}
|
class ____(nn.Module):
"""
Takes input of both image and video (multi-frame)
"""
def __init__(self, config):
super().__init__()
# sequence embedding
self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size)
self.row_position_embeddings = nn.Embedding(config.max_grid_row_position_embeddings, config.hidden_size)
self.col_position_embeddings = nn.Embedding(config.max_grid_col_position_embeddings, config.hidden_size)
self.token_type_embeddings = nn.Embedding(1, config.hidden_size)
self.layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.max_grid_row_position_embeddings = config.max_grid_row_position_embeddings
self.max_grid_col_position_embeddings = config.max_grid_col_position_embeddings
def interpolate_pos_encoding(self, embedding: torch.Tensor, height: int, width: int) -> torch.Tensor:
"""
This method allows to interpolate the pre-trained pad weights , to be able to use the model on collection of high
resolution images (high resolution videos).
"""
h0 = w0 = 1
# if height dimension is to be interpolated
if height > self.max_grid_row_position_embeddings:
h0 = height / self.max_grid_row_position_embeddings
# if width dimension is to be interpolated
if width > self.max_grid_col_position_embeddings:
w0 = width / self.max_grid_col_position_embeddings
embedding = embedding.permute(0, 3, 1, 2) # (batch_size, hidden_dim, height, width)
embedding = nn.functional.interpolate(
embedding,
scale_factor=(h0, w0),
mode="bicubic",
align_corners=False,
)
embedding = embedding.permute(0, 2, 3, 1) # (batch_size, height, width, hidden_dim)
return embedding
def add_2d_positional_embeddings(self, grid, interpolate_pos_encoding: bool = False):
"""
Args:
grid: (batch_size, height, width, hidden_dim)
interpolate_pos_encoding: (`bool`, *optional*, defaults to `False`):
Whether to interpolate the pre-trained position encodings.
Returns:
grid + col_position_embeddings.view(*col_shape): (batch_size, *, height, width, hidden_dim)
"""
batch_size, height, width, hidden_dim = grid.shape
# add row-wise position embeddings
# (height, )
row_height = min(self.max_grid_row_position_embeddings, height)
row_position_ids = torch.arange(row_height, dtype=torch.long, device=grid.device)
# (height, hidden_dim)
row_position_embeddings = self.row_position_embeddings(row_position_ids)
row_shape = (1,) * (len(grid.shape) - 3) + (row_height, 1, hidden_dim)
# (batch_size, height, 1, hidden_dim)
row_position_embeddings = row_position_embeddings.view(*row_shape)
# add column-wise position embeddings
row_width = min(self.max_grid_col_position_embeddings, width)
col_position_ids = torch.arange(row_width, dtype=torch.long, device=grid.device)
# (width, hidden_dim)
col_position_embeddings = self.col_position_embeddings(col_position_ids)
col_shape = (batch_size, 1, row_width, hidden_dim)
# (batch_size, 1, width, hidden_dim)
col_position_embeddings = col_position_embeddings.view(*col_shape)
# (batch_size, height, width, hidden_dim)
positional_embeddings = row_position_embeddings + col_position_embeddings
# This interpolation gets triggered ONLY when the input image dim is larger in any dimension than the original position embeddings
if interpolate_pos_encoding and (
height > self.max_grid_row_position_embeddings or width > self.max_grid_col_position_embeddings
):
grid = grid + self.interpolate_pos_encoding(positional_embeddings, height, width)
else:
grid = grid + positional_embeddings
return grid
def forward(self, grid, interpolate_pos_encoding: bool = False):
"""
Args:
grid: Array of shape (batch_size, num_frames, height, width, num_channels).
It contains processed frames extracted from videos, and is generated by Tvp image preprocessor. Note,
num_frames can be 1
interpolate_pos_encoding: (bool, *optional*, defaults to `False`):
Whether to interpolate the pre-trained position encodings.
Returns:
embeddings: The embedding of grid with size (batch_size, height*width, num_channels)
"""
batch_size, num_frames, height, width, num_channels = grid.shape
# temporal mean pooling, (batch_size, height, width, hidden_size)
grid = grid.mean(1)
grid = self.add_2d_positional_embeddings(grid, interpolate_pos_encoding=interpolate_pos_encoding)
# image token sequence, (batch_size, height*width, num_channels)
visual_tokens = grid.view(batch_size, -1, num_channels)
visual_tokens_shape = visual_tokens.shape[:-1]
device = visual_tokens.device
# image token type embeddings.
token_type_ids = torch.zeros(visual_tokens_shape, dtype=torch.long, device=device)
token_type_embeddings = self.token_type_embeddings(token_type_ids)
embeddings = visual_tokens + token_type_embeddings
embeddings = self.layer_norm(embeddings)
embeddings = self.dropout(embeddings)
return embeddings
|
TvpVisualInputEmbedding
|
python
|
tensorflow__tensorflow
|
tensorflow/python/kernel_tests/strings_ops/string_to_hash_bucket_op_test.py
|
{
"start": 1028,
"end": 4128
}
|
class ____(test.TestCase):
@test_util.run_deprecated_v1
def testStringToOneHashBucketFast(self):
with self.cached_session():
input_string = array_ops.placeholder(dtypes.string)
output = string_ops.string_to_hash_bucket_fast(input_string, 1)
result = output.eval(feed_dict={input_string: ['a', 'b', 'c']})
self.assertAllEqual([0, 0, 0], result)
@test_util.run_deprecated_v1
def testStringToHashBucketsFast(self):
with self.cached_session():
input_string = array_ops.placeholder(dtypes.string)
output = string_ops.string_to_hash_bucket_fast(input_string, 10)
result = output.eval(feed_dict={input_string: ['a', 'b', 'c', 'd']})
# Fingerprint64('a') -> 12917804110809363939 -> mod 10 -> 9
# Fingerprint64('b') -> 11795596070477164822 -> mod 10 -> 2
# Fingerprint64('c') -> 11430444447143000872 -> mod 10 -> 2
# Fingerprint64('d') -> 4470636696479570465 -> mod 10 -> 5
self.assertAllEqual([9, 2, 2, 5], result)
@test_util.run_deprecated_v1
def testStringToOneHashBucketLegacyHash(self):
with self.cached_session():
input_string = array_ops.placeholder(dtypes.string)
output = string_ops.string_to_hash_bucket(input_string, 1)
result = output.eval(feed_dict={input_string: ['a', 'b', 'c']})
self.assertAllEqual([0, 0, 0], result)
@test_util.run_deprecated_v1
def testStringToHashBucketsLegacyHash(self):
with self.cached_session():
input_string = array_ops.placeholder(dtypes.string)
output = string_ops.string_to_hash_bucket(input_string, 10)
result = output.eval(feed_dict={input_string: ['a', 'b', 'c']})
# Hash64('a') -> 2996632905371535868 -> mod 10 -> 8
# Hash64('b') -> 5795986006276551370 -> mod 10 -> 0
# Hash64('c') -> 14899841994519054197 -> mod 10 -> 7
self.assertAllEqual([8, 0, 7], result)
def testStringToOneHashBucketStrongOneHashBucket(self):
with self.cached_session():
input_string = constant_op.constant(['a', 'b', 'c'])
output = string_ops.string_to_hash_bucket_strong(
input_string, 1, key=[123, 345])
self.assertAllEqual([0, 0, 0], self.evaluate(output))
def testStringToHashBucketsStrong(self):
with self.cached_session():
input_string = constant_op.constant(['a', 'b', 'c'])
output = string_ops.string_to_hash_bucket_strong(
input_string, 10, key=[98765, 132])
# key = [98765, 132]
# StrongKeyedHash(key, 'a') -> 7157389809176466784 -> mod 10 -> 4
# StrongKeyedHash(key, 'b') -> 15805638358933211562 -> mod 10 -> 2
# StrongKeyedHash(key, 'c') -> 18100027895074076528 -> mod 10 -> 8
self.assertAllEqual([4, 2, 8], self.evaluate(output))
def testStringToHashBucketsStrongInvalidKey(self):
with self.cached_session():
input_string = constant_op.constant(['a', 'b', 'c'])
with self.assertRaisesOpError('Key must have 2 elements'):
string_ops.string_to_hash_bucket_strong(
input_string, 10, key=[98765]).eval()
if __name__ == '__main__':
test.main()
|
StringToHashBucketOpTest
|
python
|
getsentry__sentry
|
src/sentry/rules/conditions/new_high_priority_issue.py
|
{
"start": 387,
"end": 1595
}
|
class ____(EventCondition):
id = "sentry.rules.conditions.high_priority_issue.NewHighPriorityIssueCondition"
label = "Sentry marks a new issue as high priority"
def is_new(self, state: EventState) -> bool:
if not self.rule or self.rule.environment_id is None:
return state.is_new
return state.is_new_group_environment
def passes(self, event: GroupEvent, state: EventState) -> bool:
is_new = self.is_new(state)
return is_new and event.group.priority == PriorityLevel.HIGH
def get_activity(
self, start: datetime, end: datetime, limit: int
) -> Sequence[ConditionActivity]:
first_seen = (
Group.objects.filter(
project=self.project,
first_seen__gte=start,
first_seen__lt=end,
priority=PriorityLevel.HIGH,
)
.order_by("-first_seen")[:limit]
.values_list("id", "first_seen")
)
return [
ConditionActivity(
group_id=g[0], type=ConditionActivityType.NEW_HIGH_PRIORITY_ISSUE, timestamp=g[1]
)
for g in first_seen
]
|
NewHighPriorityIssueCondition
|
python
|
numba__numba
|
numba/core/callwrapper.py
|
{
"start": 112,
"end": 2435
}
|
class ____(object):
"""
A utility class to handle argument unboxing and cleanup
"""
def __init__(self, context, builder, api, env_manager, endblk, nargs):
self.context = context
self.builder = builder
self.api = api
self.env_manager = env_manager
self.arg_count = 0 # how many function arguments have been processed
self.cleanups = []
self.nextblk = endblk
def add_arg(self, obj, ty):
"""
Unbox argument and emit code that handles any error during unboxing.
Args are cleaned up in reverse order of the parameter list, and
cleanup begins as soon as unboxing of any argument fails. E.g. failure
on arg2 will result in control flow going through:
arg2.err -> arg1.err -> arg0.err -> arg.end (returns)
"""
# Unbox argument
native = self.api.to_native_value(ty, obj)
# If an error occurred, go to the cleanup block for
# the previous argument
with cgutils.if_unlikely(self.builder, native.is_error):
self.builder.branch(self.nextblk)
# Define the cleanup function for the argument
def cleanup_arg():
# Native value reflection
self.api.reflect_native_value(ty, native.value, self.env_manager)
# Native value cleanup
if native.cleanup is not None:
native.cleanup()
# NRT cleanup
# (happens after the native value cleanup as the latter
# may need the native value)
if self.context.enable_nrt:
self.context.nrt.decref(self.builder, ty, native.value)
self.cleanups.append(cleanup_arg)
# Write the on-error cleanup block for this argument
cleanupblk = self.builder.append_basic_block(
"arg%d.err" % self.arg_count)
with self.builder.goto_block(cleanupblk):
cleanup_arg()
# Go to next cleanup block
self.builder.branch(self.nextblk)
self.nextblk = cleanupblk
self.arg_count += 1
return native.value
def emit_cleanup(self):
"""
Emit the cleanup code after returning from the wrapped function.
"""
for dtor in self.cleanups:
dtor()
|
_ArgManager
|
python
|
joke2k__faker
|
faker/providers/person/en_NZ/__init__.py
|
{
"start": 105,
"end": 40961
}
|
class ____(PersonProvider):
formats = (
"{{first_name_male}} {{last_name}}",
"{{first_name_male}} {{last_name}}",
"{{first_name_male}} {{last_name}}",
"{{first_name_male}} {{last_name}}",
"{{first_name_male}} {{last_name}}-{{last_name}}",
"{{first_name_female}} {{last_name}}",
"{{first_name_female}} {{last_name}}",
"{{first_name_female}} {{last_name}}",
"{{first_name_female}} {{last_name}}",
"{{first_name_female}} {{last_name}}-{{last_name}}",
)
# Names compiled from the following sources:
#
# https://www.dia.govt.nz/diawebsite.nsf/wpg_URL/Services-Births-Deaths-and-Marriages-Most-Popular-Male-and-Female-First-Names
first_names_male: Dict[str, float] = OrderedDict(
(
("Aaron", 9912.0),
("Adam", 7639),
("Adrian", 2420),
("Aidan", 1521),
("Aiden", 782),
("Alan", 5689),
("Alex", 2819),
("Alexander", 7783),
("Alistair", 429),
("Allan", 3148),
("Allen", 51),
("Andre", 127),
("Andrew", 25593),
("Angus", 1680),
("Anthony", 12549),
("Antony", 1594),
("Archer", 381),
("Archie", 774),
("Arlo", 584),
("Arthur", 630),
("Asher", 319),
("Ashley", 861),
("Ashton", 1236),
("Austin", 688),
("Bailey", 1304),
("Barry", 3624),
("Beau", 491),
("Beauden", 125),
("Ben", 2427),
("Benjamin", 15497),
("Bernard", 657),
("Bevan", 634),
("Blair", 2863),
("Blake", 3500),
("Bodhi", 70),
("Brad", 450),
("Bradley", 3910),
("Brandon", 1000),
("Braxton", 741),
("Brayden", 317),
("Brendan", 2010),
("Brendon", 3163),
("Brent", 5564),
("Brett", 4598),
("Brian", 6247),
("Brodie", 216),
("Brooklyn", 406),
("Bruce", 6079),
("Bryan", 1435),
("Caleb", 5374),
("Callum", 2364),
("Cameron", 7756),
("Campbell", 422),
("Carl", 3304),
("Carlos", 122),
("Carter", 1308),
("Charles", 3933),
("Charlie", 2367),
("Chase", 174),
("Christian", 1138),
("Christopher", 23459),
("Clayton", 59),
("Clinton", 1004),
("Cody", 2482),
("Cohen", 99),
("Cole", 648),
("Colin", 3980),
("Connor", 4632),
("Conor", 54),
("Cooper", 2113),
("Corey", 1656),
("Cory", 129),
("Craig", 12702),
("Cruz", 52),
("Damian", 1084),
("Damon", 211),
("Daniel", 23515),
("Darren", 3143),
("Darrin", 217),
("Darryl", 1517),
("Darryn", 260),
("Daryl", 421),
("David", 36792),
("Dean", 6096),
("Declan", 108),
("Denis", 66),
("Dennis", 1129),
("Derek", 1307),
("Desmond", 224),
("Dillon", 63),
("Dion", 1283),
("Dominic", 801),
("Donald", 2405),
("Douglas", 2587),
("Duncan", 471),
("Dwayne", 57),
("Dylan", 6564),
("Edward", 4864),
("Eli", 961),
("Elijah", 2137),
("Elliot", 54),
("Eric", 808),
("Ethan", 6578),
("Ezra", 309),
("Felix", 769),
("Finn", 2084),
("Fletcher", 447),
("Flynn", 1577),
("Francis", 420),
("Frank", 46),
("Fraser", 51),
("Frederick", 49),
("Gabriel", 739),
("Gareth", 2087),
("Garry", 1840),
("Gary", 5520),
("Gavin", 3197),
("Geoffrey", 4439),
("George", 7320),
("Gerald", 104),
("Gerard", 614),
("Glen", 2709),
("Glenn", 3983),
("Gordon", 1444),
("Graeme", 4705),
("Graham", 3746),
("Grant", 8355),
("Grayson", 259),
("Gregory", 7916),
("Hamish", 5758),
("Harley", 403),
("Harrison", 2800),
("Harry", 2454),
("Harvey", 192),
("Hayden", 5209),
("Henry", 3111),
("Hudson", 281),
("Hugh", 101),
("Hugo", 543),
("Hunter", 3044),
("Ian", 7592),
("Isaac", 4208),
("Isaiah", 349),
("Israel", 52),
("Ivan", 236),
("Jack", 9468),
("Jackson", 3088),
("Jacob", 8612),
("Jake", 2421),
("Jakob", 46),
("James", 27224),
("Jamie", 5064),
("Jared", 2840),
("Jarrod", 773),
("Jason", 14737),
("Jasper", 246),
("Jaxon", 623),
("Jayden", 4541),
("Jeffrey", 2826),
("Jeremy", 4775),
("Jesse", 3965),
("Joel", 2932),
("John", 26867),
("Jonathan", 7957),
("Jonathon", 349),
("Jordan", 6499),
("Joseph", 10061),
("Josh", 56),
("Joshua", 17109),
("Josiah", 52),
("Julian", 232),
("Justin", 3882),
("Kaleb", 492),
("Kane", 1236),
("Karl", 3822),
("Kayden", 123),
("Keanu", 54),
("Keegan", 351),
("Keith", 2175),
("Kelly", 58),
("Kelvin", 1262),
("Kenneth", 3240),
("Kerry", 2404),
("Kevin", 9358),
("Kieran", 1525),
("Kim", 125),
("Kingston", 692),
("Kurt", 515),
("Kyle", 2568),
("Lachlan", 2965),
("Lance", 2958),
("Lawrence", 226),
("Lee", 872),
("Leo", 1872),
("Leon", 967),
("Leonard", 102),
("Leslie", 1126),
("Levi", 2986),
("Lewis", 324),
("Liam", 8629),
("Lincoln", 857),
("Lindsay", 883),
("Lloyd", 46),
("Logan", 5063),
("Louis", 863),
("Luca", 1318),
("Lucas", 3329),
("Luka", 119),
("Lukas", 70),
("Luke", 8296),
("Malcolm", 2398),
("Marcus", 1129),
("Mark", 23154),
("Martin", 4260),
("Mason", 2613),
("Mathew", 3107),
("Matthew", 23181),
("Maurice", 385),
("Max", 3988),
("Maxwell", 172),
("Mervyn", 162),
("Micah", 52),
("Michael", 40099),
("Micheal", 49),
("Mitchell", 2730),
("Morgan", 58),
("Murray", 4843),
("Nate", 48),
("Nathan", 8920),
("Nathaniel", 329),
("Neil", 3392),
("Neville", 1268),
("Nicholas", 13132),
("Nigel", 4435),
("Nikau", 53),
("Nixon", 219),
("Noah", 3511),
("Noel", 778),
("Norman", 221),
("Oliver", 6515),
("Oscar", 1987),
("Owen", 484),
("Patrick", 6219),
("Paul", 22959),
("Peter", 23996),
("Philip", 7036),
("Phillip", 5977),
("Phoenix", 882),
("Quentin", 67),
("Quinn", 742),
("Raymond", 4404),
("Regan", 1182),
("Reuben", 1678),
("Rex", 561),
("Rhys", 967),
("Richard", 17664),
("Ricky", 806),
("Riley", 2771),
("Robert", 19791),
("Robin", 1431),
("Rodney", 1936),
("Roger", 2612),
("Roman", 429),
("Ronald", 1769),
("Rory", 220),
("Ross", 4823),
("Roy", 101),
("Russell", 2863),
("Ryan", 9965),
("Ryder", 727),
("Sam", 2347),
("Samuel", 15565),
("Scott", 9481),
("Sean", 5201),
("Sebastian", 1031),
("Seth", 780),
("Shane", 10213),
("Shannon", 1082),
("Shaun", 4397),
("Shayne", 296),
("Simon", 9846),
("Sione", 165),
("Spencer", 52),
("Stefan", 52),
("Stephen", 18603),
("Steven", 11007),
("Stewart", 499),
("Stuart", 4662),
("Taine", 204),
("Taylor", 1356),
("Terence", 1154),
("Terry", 860),
("Theo", 311),
("Theodore", 429),
("Thomas", 15382),
("Timothy", 10924),
("Toby", 1490),
("Todd", 1264),
("Tom", 47),
("Tony", 5670),
("Travis", 65),
("Trent", 524),
("Trevor", 3194),
("Tristan", 111),
("Troy", 2423),
("Tyler", 3765),
("Tyrone", 231),
("Tyson", 531),
("Vaughan", 322),
("Vincent", 907),
("Walter", 57),
("Warren", 3223),
("Warwick", 295),
("Wayne", 8542),
("William", 18322),
("Wyatt", 58),
("Xavier", 1879),
("Zac", 111),
("Zachary", 2569),
("Zane", 761),
("Zion", 217),
("Anaru", 735),
("Ari", 984),
("Ariki", 1178),
("Hemi", 1360),
("Hoani", 574),
("Ihaia", 476),
("Kahu", 700),
("Kahurangi", 939),
("Kauri", 1613),
("Manaaki", 574),
("Manaia", 1434),
("Manawa", 536),
("Matiu", 455),
("Mikaere", 1413),
("Nikau", 1942),
("Niko", 972),
("Nikora", 1766),
("Rawiri", 1553),
("Tai", 793),
("Tama", 1257),
("Tamati", 1766),
("Tane", 1698),
("Tangaroa", 605),
("Te Ariki", 1423),
("Te Koha", 537),
("Tiare", 476),
("Wiremu", 1923),
)
)
first_names_female: Dict[str, float] = OrderedDict(
(
("Aaliyah", 1042.0),
("Abbey", 40),
("Abby", 503),
("Abigail", 2017),
("Addison", 538),
("Adrienne", 625),
("Aimee", 2315),
("Alana", 1194),
("Aleisha", 102),
("Alexandra", 2689),
("Alexis", 789),
("Alice", 3252),
("Alicia", 683),
("Alison", 3444),
("Alyssa", 1032),
("Amaia", 45),
("Amanda", 7667),
("Amber", 3661),
("Amelia", 4060),
("Amy", 7061),
("Anahera", 140),
("Andrea", 5003),
("Angel", 695),
("Angela", 9634),
("Angelina", 43),
("Anika", 46),
("Anita", 1526),
("Ann", 1834),
("Anna", 9371),
("Annabelle", 457),
("Anne", 3879),
("Annette", 2348),
("April", 49),
("Arabella", 42),
("Aria", 1025),
("Ariana", 473),
("Aroha", 50),
("Ashlee", 464),
("Ashleigh", 3158),
("Ashley", 2477),
("Aurora", 251),
("Ava", 2487),
("Ayla", 612),
("Bailey", 150),
("Barbara", 3531),
("Belinda", 1254),
("Bella", 1238),
("Beverley", 996),
("Billie", 45),
("Brenda", 2451),
("Briana", 49),
("Brianna", 740),
("Bridget", 1611),
("Britney", 64),
("Brittany", 1239),
("Bronwyn", 2406),
("Brooke", 3634),
("Brooklyn", 782),
("Caitlin", 3370),
("Caitlyn", 454),
("Carla", 323),
("Carmen", 233),
("Carol", 3626),
("Caroline", 2530),
("Carolyn", 3212),
("Casey", 1097),
("Cassandra", 489),
("Catherine", 7765),
("Chantelle", 55),
("Charlie", 215),
("Charlotte", 7759),
("Chelsea", 1943),
("Cherie", 1064),
("Cheryl", 1781),
("Cheyenne", 345),
("Chloe", 4582),
("Christina", 2675),
("Christine", 10604),
("Cindy", 65),
("Claire", 3174),
("Clara", 41),
("Clare", 55),
("Claudia", 804),
("Colleen", 1367),
("Courtney", 2941),
("Crystal", 828),
("Daisy", 197),
("Danielle", 4151),
("Dawn", 62),
("Debbie", 1389),
("Deborah", 8819),
("Debra", 3094),
("Denise", 3577),
("Destiny", 190),
("Diana", 977),
("Diane", 3952),
("Dianne", 2314),
("Donna", 7054),
("Dorothy", 303),
("Eden", 1578),
("Eilish", 52),
("Elaine", 381),
("Eleanor", 155),
("Elise", 48),
("Elizabeth", 11869),
("Ella", 5301),
("Ellen", 124),
("Ellie", 443),
("Elsie", 97),
("Emilia", 145),
("Emily", 7766),
("Emma", 13245),
("Erin", 1624),
("Esther", 88),
("Eva", 1637),
("Evelyn", 634),
("Evie", 419),
("Faith", 735),
("Fiona", 6039),
("Florence", 291),
("Frances", 1212),
("Frankie", 195),
("Freya", 218),
("Gabriella", 94),
("Gabrielle", 808),
("Gail", 1253),
("Gaylene", 82),
("Gemma", 2120),
("Georgia", 5613),
("Georgina", 786),
("Gillian", 1388),
("Gina", 301),
("Glenda", 859),
("Glenys", 410),
("Gloria", 127),
("Grace", 6036),
("Haley", 173),
("Hannah", 9082),
("Harmony", 300),
("Harper", 1186),
("Harriet", 210),
("Hayley", 4951),
("Hazel", 814),
("Heather", 4351),
("Heidi", 353),
("Helen", 7775),
("Holly", 4402),
("Hope", 142),
("Imogen", 293),
("Indi", 42),
("Indie", 494),
("Irene", 166),
("Isabel", 499),
("Isabella", 4257),
("Isabelle", 1182),
("Isla", 2246),
("Isobel", 85),
("Ivy", 577),
("Jacqueline", 5559),
("Jade", 3234),
("Jaime", 61),
("Jamie", 1066),
("Jan", 1587),
("Jane", 4932),
("Janet", 2253),
("Janette", 69),
("Janice", 1881),
("Janine", 2641),
("Jasmine", 3786),
("Jean", 64),
("Jeanette", 900),
("Jemma", 200),
("Jenna", 1162),
("Jennifer", 9991),
("Jessica", 12989),
("Jessie", 1123),
("Jill", 455),
("Jillian", 1571),
("Joan", 199),
("Joanna", 2716),
("Joanne", 9329),
("Jocelyn", 557),
("Jodi", 56),
("Jodie", 359),
("Jolene", 313),
("Jordan", 797),
("Jorja", 456),
("Josephine", 570),
("Joy", 487),
("Judith", 4677),
("Julia", 2092),
("Julie", 8289),
("Justine", 1127),
("Kaitlin", 45),
("Kaitlyn", 358),
("Karen", 13524),
("Karla", 62),
("Karyn", 429),
("Kate", 5782),
("Katelyn", 294),
("Katherine", 3912),
("Kathleen", 2503),
("Kathryn", 5104),
("Katie", 3455),
("Katrina", 3184),
("Kay", 1205),
("Kaye", 227),
("Kayla", 2806),
("Keira", 759),
("Kellie", 66),
("Kelly", 6137),
("Kelsey", 718),
("Kerry", 1917),
("Khloe", 98),
("Kim", 5667),
("Kimberley", 1578),
("Kiri", 130),
("Kirsten", 1183),
("Kirsty", 2083),
("Kristy", 172),
("Krystal", 650),
("Kyla", 41),
("Kylie", 3692),
("Laura", 4669),
("Lauren", 3275),
("Layla", 536),
("Leah", 1894),
("Leanne", 3478),
("Leonie", 52),
("Lesley", 1453),
("Libby", 48),
("Lilly", 813),
("Lily", 3546),
("Linda", 6288),
("Lisa", 11891),
("Lois", 278),
("Lola", 343),
("Lorraine", 1675),
("Louise", 4580),
("Lucia", 235),
("Lucy", 4938),
("Luna", 53),
("Lydia", 335),
("Lynda", 1972),
("Lynette", 3666),
("Lynley", 228),
("Lynn", 53),
("Lynne", 1025),
("Lynnette", 120),
("MacKenzie", 67),
("Mackenzie", 1039),
("Maddison", 1846),
("Madeleine", 780),
("Madeline", 184),
("Madison", 3128),
("Maia", 1937),
("Manaia", 204),
("Maree", 2270),
("Margaret", 5517),
("Maria", 5541),
("Marian", 60),
("Marie", 2582),
("Marilyn", 546),
("Marion", 370),
("Mary", 5891),
("Matilda", 570),
("Maureen", 1099),
("Maya", 432),
("Megan", 5869),
("Melanie", 4476),
("Melissa", 6898),
("Mia", 2627),
("Michaela", 687),
("Michele", 1082),
("Michelle", 12961),
("Mikaela", 48),
("Mikayla", 1492),
("Mila", 1139),
("Millie", 711),
("Molly", 1590),
("Monica", 56),
("Monique", 1859),
("Morgan", 646),
("Mya", 352),
("Nadine", 126),
("Naomi", 421),
("Natalie", 4112),
("Natasha", 5533),
("Nevaeh", 673),
("Ngaire", 116),
("Niamh", 49),
("Nicola", 10395),
("Nicole", 6011),
("Nikita", 1263),
("Nikki", 57),
("Nina", 379),
("Olive", 525),
("Olivia", 8816),
("Paige", 3719),
("Pamela", 2677),
("Paris", 551),
("Patricia", 5007),
("Paula", 3667),
("Pauline", 2404),
("Payton", 44),
("Penelope", 1213),
("Peyton", 621),
("Philippa", 1359),
("Phoebe", 1380),
("Piper", 580),
("Pippa", 416),
("Poppy", 842),
("Quinn", 213),
("Rachael", 3210),
("Rachel", 9769),
("Rachelle", 64),
("Raewyn", 3039),
("Rebecca", 11608),
("Rebekah", 1255),
("Renee", 3387),
("Rhonda", 131),
("Riley", 676),
("Robyn", 5598),
("Rochelle", 2086),
("Rose", 1384),
("Rosemary", 1918),
("Ruby", 4332),
("Ruth", 1616),
("Sadie", 151),
("Sally", 2445),
("Samantha", 7549),
("Sandra", 7429),
("Sara", 1121),
("Sarah", 19901),
("Sasha", 44),
("Savannah", 443),
("Scarlett", 1045),
("Shakira", 52),
("Shania", 338),
("Shannon", 2446),
("Sharlene", 220),
("Sharon", 7243),
("Shelley", 2569),
("Sheree", 169),
("Sheryl", 1688),
("Shirley", 1673),
("Shona", 1210),
("Sienna", 1358),
("Sinead", 53),
("Skye", 97),
("Skyla", 105),
("Skylar", 41),
("Sofia", 630),
("Sonia", 246),
("Sonya", 632),
("Sophia", 2595),
("Sophie", 7868),
("Stacey", 3037),
("Stella", 1323),
("Stephanie", 5794),
("Summer", 1477),
("Susan", 12686),
("Suzanne", 4705),
("Tamara", 312),
("Tania", 6879),
("Tanya", 1595),
("Tara", 503),
("Tayla", 1823),
("Taylor", 1499),
("Tegan", 318),
("Teresa", 2294),
("Tessa", 1439),
("Thea", 279),
("Tiana", 388),
("Tina", 2124),
("Toni", 2572),
("Tori", 50),
("Tracey", 6914),
("Tracy", 3999),
("Trinity", 401),
("Tyla", 98),
("Valerie", 394),
("Vanessa", 3941),
("Vicki", 3171),
("Vicky", 198),
("Victoria", 4823),
("Violet", 506),
("Virginia", 54),
("Vivienne", 802),
("Wendy", 6832),
("Whitney", 50),
("Willow", 743),
("Yvonne", 1822),
("Zara", 1292),
("Zoe", 3973),
("Zoey", 165),
("Amaia", 667),
("Ana", 730),
("Anahera", 1760),
("Anika", 1432),
("Aria", 1960),
("Ariana", 1729),
("Aroha", 1796),
("Ataahua", 876),
("Awhina", 583),
("Hana", 536),
("Hinewai", 536),
("Huia", 528),
("Kahurangi", 730),
("Kaia", 1576),
("Kora", 878),
("Mahi", 556),
("Maia", 1960),
("Manaia", 912),
("Maraea", 703),
("Mareikura", 948),
("Mereana", 637),
("Miriama", 614),
("Nia", 667),
("Ria", 703),
("Terina", 528),
("Tia", 1695),
("Tiare", 671),
("Tui", 1251),
("Waimarie", 671),
("Wikitoria", 583),
)
)
first_names: Dict[str, float] = first_names_male.copy()
first_names.update(first_names_female)
# New Zealand surnames compiled (and cleaned up) from the following sources:
#
# NZ Cemetery plot data:
# https://catalogue.data.govt.nz/dataset?q=cemetery+plots
last_names = OrderedDict(
(
("Smith", 948.0),
("Anderson", 394),
("Jones", 386),
("Taylor", 364),
("Brown", 350),
("Williams", 337),
("Thompson", 295),
("Scott", 266),
("Harris", 253),
("Mitchell", 217),
("Thomas", 214),
("Campbell", 193),
("Jackson", 191),
("Stewart", 188),
("Martin", 186),
("Turner", 174),
("Moore", 173),
("Simpson", 171),
("Hart", 166),
("Bell", 163),
("Evans", 161),
("Hansen", 160),
("Gray", 156),
("Henderson", 155),
("Edwards", 153),
("McDonald", 152),
("Davis", 150),
("Ward", 150),
("Cameron", 149),
("Wood", 149),
("MacDonald", 148),
("Reid", 140),
("Cook", 138),
("Bailey", 137),
("Adams", 136),
("Mason", 136),
("Baker", 135),
("Green", 134),
("Jensen", 134),
("Parker", 132),
("Neal", 131),
("Russell", 131),
("Carter", 128),
("Allen", 127),
("Roberts", 127),
("Knight", 126),
("Morgan", 126),
("Murphy", 126),
("Miller", 124),
("Morris", 124),
("McKay", 122),
("Morrison", 121),
("Wallace", 121),
("Stevens", 119),
("Johnston", 113),
("Jenkins", 111),
("Lewis", 110),
("Davies", 109),
("Oliver", 109),
("Ryan", 109),
("Marshall", 108),
("Webb", 108),
("Patchett", 107),
("Hughes", 106),
("Graham", 104),
("Wells", 104),
("Harrison", 103),
("Larsen", 103),
("Matthews", 103),
("Phillips", 102),
("Clarke", 100),
("Gibson", 99),
("Lucas", 99),
("Price", 97),
("O'Sullivan", 96),
("Barnes", 94),
("Gardiner", 92),
("Richards", 91),
("Boyce", 90),
("Duncan", 89),
("Fisher", 89),
("Gill", 89),
("O'Brien", 89),
("Gordon", 88),
("Olsen", 88),
("Powell", 86),
("Black", 85),
("Kennedy", 85),
("Dixon", 84),
("Jamieson", 84),
("O'Connor", 84),
("Sinclair", 84),
("Perry", 83),
("Williamson", 83),
("Day", 82),
("Pedersen", 81),
("Currie", 80),
("Grant", 80),
("Rush", 80),
("McEwen", 79),
("Wilton", 79),
("Kelly", 78),
("Nicholson", 77),
("Coleman", 76),
("Davidson", 76),
("Gardner", 76),
("Saunders", 76),
("Rogers", 75),
("Bryant", 74),
("Ferguson", 74),
("Ford", 73),
("Fowler", 73),
("McLean", 73),
("Holland", 72),
("Lloyd", 72),
("Page", 72),
("Francis", 71),
("Smart", 71),
("Weston", 71),
("Chapman", 70),
("Crawford", 70),
("Shaw", 70),
("Sullivan", 70),
("Webster", 70),
("Millar", 69),
("Burton", 68),
("Fuller", 68),
("Hamilton", 68),
("West", 68),
("Burns", 67),
("Cox", 67),
("Cresswell", 67),
("Holdaway", 67),
("Hodson", 66),
("Kerr", 66),
("Brooks", 64),
("Fletcher", 64),
("McCallum", 64),
("Allan", 63),
("Buchanan", 63),
("Carr", 63),
("Lee", 63),
("Pickering", 63),
("Pope", 63),
("Rowe", 63),
("Woolley", 63),
("McLeod", 62),
("Barnett", 61),
("Berry", 61),
("Lane", 61),
("Tapp", 61),
("Bartlett", 60),
("Elliott", 60),
("Pearson", 60),
("Wilkinson", 60),
("Atkinson", 59),
("Butler", 59),
("Douglas", 59),
("Pratt", 59),
("Cole", 58),
("Hayward", 58),
("Little", 58),
("Newman", 58),
("Simmons", 58),
("Barrett", 57),
("Cooksley", 57),
("Freeman", 57),
("Higgins", 57),
("Hope", 57),
("McGregor", 57),
("McMillan", 57),
("Rose", 57),
("Sutton", 57),
("Wong", 57),
("Harper", 56),
("Osborne", 56),
("Stevenson", 56),
("Bird", 55),
("Boyd", 55),
("Dick", 55),
("Field", 55),
("Greer", 55),
("Greig", 55),
("Nielsen", 55),
("Reynolds", 55),
("Forrest", 54),
("Bradley", 53),
("Gibbons", 53),
("Howard", 53),
("MacKenzie", 53),
("Nelson", 53),
("Todd", 53),
("Waters", 53),
("Ball", 52),
("Davey", 52),
("Holmes", 52),
("Rodgers", 52),
("Stratford", 52),
("Griffiths", 51),
("Small", 51),
("Watt", 51),
("Andrew", 50),
("Bishop", 50),
("Dunn", 50),
("Goodwin", 50),
("Gore", 50),
("Healy", 50),
("May", 50),
("Munro", 50),
("Parsons", 50),
("Poole", 50),
("Watts", 50),
("Hills", 49),
("Peters", 49),
("Vercoe", 49),
("Armstrong", 48),
("Bright", 48),
("Burgess", 48),
("Collis", 48),
("O'Neill", 48),
("Spencer", 48),
("Ritchie", 47),
("Alexander", 46),
("Curtis", 46),
("Freeth", 46),
("Nicol", 46),
("Robson", 46),
("Satherley", 46),
("Stuart", 46),
("Waugh", 46),
("Woods", 46),
("Coley", 45),
("Fitzgerald", 45),
("Fleming", 45),
("Herd", 45),
("Morton", 45),
("Beattie", 44),
("Clifford", 44),
("Costello", 44),
("Dawson", 44),
("Donaldson", 44),
("Fox", 44),
("Hay", 44),
("Jellyman", 44),
("Joe", 44),
("Johansen", 44),
("Knowles", 44),
("Lawson", 44),
("O'Donnell", 44),
("Patterson", 44),
("Payne", 44),
("Read", 44),
("Casey", 43),
("Chandler", 43),
("Donald", 43),
("Gilchrist", 43),
("Hyde", 43),
("McIntosh", 43),
("Paton", 43),
("Robb", 43),
("Rutherford", 43),
("Pike", 42),
("Dillon", 41),
("Drummond", 41),
("Hickey", 41),
("Hooper", 41),
("Jordan", 41),
("Judd", 41),
("Kenny", 41),
("Low", 41),
("Marfell", 41),
("Newton", 41),
("O'Leary", 41),
("Tucker", 41),
("Carson", 40),
("Dean", 40),
("Dickson", 40),
("George", 40),
("Ham", 40),
("McCarthy", 40),
("McIntyre", 40),
("Moran", 40),
("O'Connell", 40),
("Parkes", 40),
("Short", 40),
("Barr", 39),
("Baxter", 39),
("Dalton", 39),
("Forbes", 39),
("Hawkins", 39),
("Ireland", 39),
("Miles", 39),
("Nash", 39),
("Owen", 39),
("Perano", 39),
("Sowman", 39),
("Whyte", 39),
("Bush", 38),
("Drake", 38),
("Eden", 38),
("Giles", 38),
("Hoare", 38),
("Hubbard", 38),
("Hudson", 38),
("MacKay", 38),
("McKinnon", 38),
("Mears", 38),
("Prentice", 38),
("Schwass", 38),
("Simonsen", 38),
("Walton", 38),
("Wheeler", 38),
("Wratt", 38),
("Avery", 37),
("Barker", 37),
("Blake", 37),
("Conway", 37),
("Holloway", 37),
("Horton", 37),
("Manning", 37),
("Nolan", 37),
("Pritchard", 37),
("Bishell", 36),
("Blair", 36),
("Christiansen", 36),
("Fulton", 36),
("Gibbs", 36),
("Griffin", 36),
("Hook", 36),
("McGill", 36),
("Mercer", 36),
("Middleton", 36),
("Rayner", 36),
("Stone", 36),
("Terry", 36),
("Walsh", 36),
("Craig", 35),
("Craven", 35),
("Ellery", 35),
("Findlay", 35),
("Maxwell", 35),
("North", 35),
("Reardon", 35),
("Tait", 35),
("Baldwin", 34),
("Butcher", 34),
("Caldwell", 34),
("Doyle", 34),
("Eaton", 34),
("Flood", 34),
("Gifford", 34),
("Guy", 34),
("Jennings", 34),
("Leslie", 34),
("McMahon", 34),
("McNabb", 34),
("Paterson", 34),
("Porter", 34),
("Reeves", 34),
("Seymour", 34),
("Trask", 34),
("Warren", 34),
("Watkins", 34),
("Wills", 34),
("Best", 33),
("Bull", 33),
("Dawick", 33),
("Dobson", 33),
("Gledhill", 33),
("Hardy", 33),
("Hayes", 33),
("Kendall", 33),
("McCormick", 33),
("McPherson", 33),
("Pollard", 33),
("Rasmussen", 33),
("Shailer", 33),
("Shepherd", 33),
("Sheridan", 33),
("Simmonds", 33),
("Steele", 33),
("Booth", 32),
("Edmonds", 32),
("Gunn", 32),
("Hood", 32),
("Humphrey", 32),
("Hutchinson", 32),
("Laurenson", 32),
("Long", 32),
("Lowe", 32),
("Manson", 32),
("McGrath", 32),
("McKenna", 32),
("Muir", 32),
("O'Keefe", 32),
("Potter", 32),
("Searle", 32),
("Stubbs", 32),
("Wall", 32),
("Wallis", 32),
("Browne", 31),
("Carroll", 31),
("Cunningham", 31),
("Foley", 31),
("Franklin", 31),
("Furness", 31),
("Gilbert", 31),
("Hopkins", 31),
("Jefferies", 31),
("Johnstone", 31),
("Linton", 31),
("Mann", 31),
("Norton", 31),
("Rees", 31),
("Rowlands", 31),
("Sanders", 31),
("Bond", 30),
("Chambers", 30),
("Cragg", 30),
("Davison", 30),
("Gee", 30),
("Gleeson", 30),
("Gullery", 30),
("Hadfield", 30),
("Haines", 30),
("Hepburn", 30),
("Howell", 30),
("Jeffries", 30),
("Lamb", 30),
("Law", 30),
("MacPherson", 30),
("McIsaac", 30),
("Millard", 30),
("Paul", 30),
("Pearce", 30),
("Prouse", 30),
("Ramsay", 30),
("Rowland", 30),
("Spelman", 30),
("Waghorn", 30),
("Willis", 30),
("Zimmerman", 30),
("Aitken", 29),
("Booker", 29),
("Bruce", 29),
("Burrell", 29),
("Burt", 29),
("Funnell", 29),
("Gilmore", 29),
("Guthrie", 29),
("Hewitt", 29),
("Hogg", 29),
("Lammas", 29),
("Lang", 29),
("Lyons", 29),
("McDowall", 29),
("Neilson", 29),
("Norman", 29),
("Reed", 29),
("Rickard", 29),
("Stokes", 29),
("Stratton", 29),
("Strawbridge", 29),
("York", 29),
("Alve", 28),
("Baldick", 28),
("Banks", 28),
("Beard", 28),
("Bowden", 28),
("Boyle", 28),
("Carpenter", 28),
("Connolly", 28),
("Cooke", 28),
("Craw", 28),
("Cumming", 28),
("Drew", 28),
("Fairhall", 28),
("Gillespie", 28),
("Gillies", 28),
("Healey", 28),
("Horn", 28),
("Ingram", 28),
("Knox", 28),
("Lancaster", 28),
("Landon-Lane", 28),
("Marsh", 28),
("Mortimer", 28),
("Riley", 28),
("Sixtus", 28),
("Turnbull", 28),
("Warner", 28),
("Aldridge", 27),
("Allerby", 27),
("Arnold", 27),
("Blackwell", 27),
("Blick", 27),
("Boon", 27),
("Bowater", 27),
("Broughan", 27),
("Davenport", 27),
("Foote", 27),
("Forsyth", 27),
("Laing", 27),
("Mayo", 27),
("McFarlane", 27),
("McMurray", 27),
("Monk", 27),
("Orr", 27),
("Procter", 27),
("Shannon", 27),
("Southee", 27),
("Stace", 27),
("Waller", 27),
("Webby", 27),
("Arnott", 26),
("Baird", 26),
("Bary", 26),
("Bassett", 26),
("Buckley", 26),
("Burke", 26),
("Claridge", 26),
("Clunies-Ross", 26),
("Croad", 26),
("Dyer", 26),
("Ewart", 26),
("Faulkner", 26),
("Fenton", 26),
("Gibb", 26),
("Huddleston", 26),
("Jarvis", 26),
("Kay", 26),
("Kemp", 26),
("McLachlan", 26),
("Middlemiss", 26),
("Moody", 26),
("Mudgway", 26),
("Nicholas", 26),
("Reader", 26),
("Robert", 26),
("Steer", 26),
("Thornton", 26),
("Toms", 26),
("Twidle", 26),
("Vincent", 26),
("Way", 26),
("Whittaker", 26),
("Batchelar", 25),
("Boniface", 25),
("Botham", 25),
("Buick", 25),
("Burnett", 25),
("Ching", 25),
("Christie", 25),
("Corlett", 25),
("Coutts", 25),
("Eglinton", 25),
("Enright", 25),
("Foot", 25),
("Frost", 25),
("Gaskin", 25),
("Hanson", 25),
("Hardie", 25),
("Henry", 25),
("Hoskins", 25),
("Lambert", 25),
("Learmonth", 25),
("Logan", 25),
("Matheson", 25),
("McManaway", 25),
("Meads", 25),
("Meredith", 25),
("Montgomery", 25),
("Murdoch", 25),
("Orchard", 25),
("Perrin", 25),
("Peterson", 25),
("Priest", 25),
("Rossiter", 25),
("Shand", 25),
("Skinner", 25),
("Soper", 25),
("Street", 25),
("Tanner", 25),
("Aberhart", 24),
("Berkahn", 24),
("Burr", 24),
("Cairns", 24),
("Corbett", 24),
("Dalziel", 24),
("Doherty", 24),
("Esson", 24),
("Farland", 24),
("Godfrey", 24),
("Guard", 24),
("Hume", 24),
("Irving", 24),
("Jacques", 24),
("Kirk", 24),
("Love", 24),
("Lyon", 24),
)
)
|
Provider
|
python
|
encode__django-rest-framework
|
tests/test_response.py
|
{
"start": 7761,
"end": 8436
}
|
class ____(TestCase):
def test_should_allow_posting_json(self):
response = self.client.post('/json', data='{"test": 123}', content_type='application/json')
self.assertEqual(response.status_code, 200)
def test_should_not_allow_posting_xml(self):
response = self.client.post('/json', data='<test>123</test>', content_type='application/xml')
self.assertEqual(response.status_code, 415)
def test_should_not_allow_posting_a_form(self):
response = self.client.post('/json', data={'test': 123})
self.assertEqual(response.status_code, 415)
@override_settings(ROOT_URLCONF='tests.test_response')
|
UnsupportedMediaTypeTests
|
python
|
doocs__leetcode
|
solution/0400-0499/0438.Find All Anagrams in a String/Solution.py
|
{
"start": 0,
"end": 406
}
|
class ____:
def findAnagrams(self, s: str, p: str) -> List[int]:
m, n = len(s), len(p)
ans = []
if m < n:
return ans
cnt1 = Counter(p)
cnt2 = Counter(s[: n - 1])
for i in range(n - 1, m):
cnt2[s[i]] += 1
if cnt1 == cnt2:
ans.append(i - n + 1)
cnt2[s[i - n + 1]] -= 1
return ans
|
Solution
|
python
|
Pylons__pyramid
|
src/pyramid/authentication.py
|
{
"start": 40771,
"end": 42497
}
|
class ____(CallbackAuthenticationPolicy):
"""A :app:`Pyramid` authentication policy which gets its data from the
configured :term:`session`. For this authentication policy to work, you
will have to follow the instructions in the :ref:`sessions_chapter` to
configure a :term:`session factory`.
Constructor Arguments
``prefix``
A prefix used when storing the authentication parameters in the
session. Defaults to 'auth.'. Optional.
``callback``
Default: ``None``. A callback passed the userid and the
request, expected to return ``None`` if the userid doesn't
exist or a sequence of principal identifiers (possibly empty) if
the user does exist. If ``callback`` is ``None``, the userid
will be assumed to exist with no principals. Optional.
``debug``
Default: ``False``. If ``debug`` is ``True``, log messages to the
Pyramid debug logger about the results of various authentication
steps. The output from debugging is useful for reporting to maillist
or IRC channels when asking for support.
"""
def __init__(self, prefix='auth.', callback=None, debug=False):
self.callback = callback
self.debug = debug
self.helper = SessionAuthenticationHelper(prefix)
def remember(self, request, userid, **kw):
"""Store a userid in the session."""
return self.helper.remember(request, userid, **kw)
def forget(self, request):
"""Remove the stored userid from the session."""
return self.helper.forget(request)
def unauthenticated_userid(self, request):
return self.helper.authenticated_userid(request)
|
SessionAuthenticationPolicy
|
python
|
chroma-core__chroma
|
chromadb/db/migrations.py
|
{
"start": 1008,
"end": 1390
}
|
class ____(Exception):
def __init__(self, dir: str, db_version: int, source_version: int):
super().__init__(
f"Inconsistent migration versions in {dir}:"
+ f"db version was {db_version}, source version was {source_version}."
+ " Has the migration sequence been modified since being applied to the DB?"
)
|
InconsistentVersionError
|
python
|
sphinx-doc__sphinx
|
tests/roots/test-add_enumerable_node/enumerable_node.py
|
{
"start": 242,
"end": 527
}
|
class ____(Directive):
required_arguments = 1
has_content = True
def run(self):
figure_node = my_figure()
figure_node += nodes.image(uri=self.arguments[0])
figure_node += nodes.caption(text=''.join(self.content))
return [figure_node]
|
MyFigure
|
python
|
huggingface__transformers
|
examples/modular-transformers/modeling_super.py
|
{
"start": 7742,
"end": 10820
}
|
class ____(nn.Module):
"""Multi-headed attention from 'Attention Is All You Need' paper"""
def __init__(self, config: SuperConfig, layer_idx: int):
super().__init__()
self.config = config
self.layer_idx = layer_idx
self.head_dim = getattr(config, "head_dim", config.hidden_size // config.num_attention_heads)
self.num_key_value_groups = config.num_attention_heads // config.num_key_value_heads
self.scaling = self.head_dim**-0.5
self.attention_dropout = config.attention_dropout
self.is_causal = True
self.q_proj = nn.Linear(
config.hidden_size, config.num_attention_heads * self.head_dim, bias=config.attention_bias
)
self.k_proj = nn.Linear(
config.hidden_size, config.num_key_value_heads * self.head_dim, bias=config.attention_bias
)
self.v_proj = nn.Linear(
config.hidden_size, config.num_key_value_heads * self.head_dim, bias=config.attention_bias
)
self.o_proj = nn.Linear(
config.num_attention_heads * self.head_dim, config.hidden_size, bias=config.attention_bias
)
def forward(
self,
hidden_states: torch.Tensor,
position_embeddings: tuple[torch.Tensor, torch.Tensor],
attention_mask: Optional[torch.Tensor],
past_key_values: Optional[Cache] = None,
cache_position: Optional[torch.LongTensor] = None,
**kwargs: Unpack[TransformersKwargs],
) -> tuple[torch.Tensor, torch.Tensor]:
input_shape = hidden_states.shape[:-1]
hidden_shape = (*input_shape, -1, self.head_dim)
query_states = self.q_proj(hidden_states).view(hidden_shape).transpose(1, 2)
key_states = self.k_proj(hidden_states).view(hidden_shape).transpose(1, 2)
value_states = self.v_proj(hidden_states).view(hidden_shape).transpose(1, 2)
cos, sin = position_embeddings
query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin)
if past_key_values is not None:
# sin and cos are specific to RoPE models; cache_position needed for the static cache
cache_kwargs = {"sin": sin, "cos": cos, "cache_position": cache_position}
key_states, value_states = past_key_values.update(key_states, value_states, self.layer_idx, cache_kwargs)
attention_interface: Callable = eager_attention_forward
if self.config._attn_implementation != "eager":
attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation]
attn_output, attn_weights = attention_interface(
self,
query_states,
key_states,
value_states,
attention_mask,
dropout=0.0 if not self.training else self.attention_dropout,
scaling=self.scaling,
**kwargs,
)
attn_output = attn_output.reshape(*input_shape, -1).contiguous()
attn_output = self.o_proj(attn_output)
return attn_output, attn_weights
|
SuperAttention
|
python
|
pypa__pip
|
tests/unit/test_options.py
|
{
"start": 8507,
"end": 14799
}
|
class ____(AddFakeCommandMixin):
@pytest.mark.parametrize("option", ["verbose", "quiet"])
@pytest.mark.parametrize("value", range(4))
def test_cli_long(self, option: str, value: int) -> None:
flags = [f"--{option}"] * value
# FakeCommand intentionally returns the wrong type.
opt1, args1 = cast(tuple[Values, list[str]], main(flags + ["fake"]))
opt2, args2 = cast(tuple[Values, list[str]], main(["fake"] + flags))
assert getattr(opt1, option) == getattr(opt2, option) == value
@pytest.mark.parametrize("option", ["verbose", "quiet"])
@pytest.mark.parametrize("value", range(1, 4))
def test_cli_short(self, option: str, value: int) -> None:
flag = "-" + option[0] * value
# FakeCommand intentionally returns the wrong type.
opt1, args1 = cast(tuple[Values, list[str]], main([flag, "fake"]))
opt2, args2 = cast(tuple[Values, list[str]], main(["fake", flag]))
assert getattr(opt1, option) == getattr(opt2, option) == value
@pytest.mark.parametrize("option", ["verbose", "quiet"])
@pytest.mark.parametrize("value", range(4))
def test_env_var(
self, option: str, value: int, monkeypatch: pytest.MonkeyPatch
) -> None:
monkeypatch.setenv("PIP_" + option.upper(), str(value))
# FakeCommand intentionally returns the wrong type.
options, args = cast(tuple[Values, list[str]], main(["fake"]))
assert getattr(options, option) == value
@pytest.mark.parametrize("option", ["verbose", "quiet"])
@pytest.mark.parametrize("value", range(3))
def test_env_var_integrate_cli(
self, option: str, value: int, monkeypatch: pytest.MonkeyPatch
) -> None:
monkeypatch.setenv("PIP_" + option.upper(), str(value))
# FakeCommand intentionally returns the wrong type.
options, args = cast(tuple[Values, list[str]], main(["fake", "--" + option]))
assert getattr(options, option) == value + 1
@pytest.mark.parametrize("option", ["verbose", "quiet"])
@pytest.mark.parametrize("value", [-1, "foobar"])
def test_env_var_invalid(
self,
option: str,
value: Any,
monkeypatch: pytest.MonkeyPatch,
capsys: pytest.CaptureFixture[str],
) -> None:
monkeypatch.setenv("PIP_" + option.upper(), str(value))
with assert_option_error(capsys, expected="a non-negative integer"):
main(["fake"])
# Undocumented, support for backward compatibility
@pytest.mark.parametrize("option", ["verbose", "quiet"])
@pytest.mark.parametrize("value", ["no", "false"])
def test_env_var_false(
self, option: str, value: str, monkeypatch: pytest.MonkeyPatch
) -> None:
monkeypatch.setenv("PIP_" + option.upper(), str(value))
# FakeCommand intentionally returns the wrong type.
options, args = cast(tuple[Values, list[str]], main(["fake"]))
assert getattr(options, option) == 0
# Undocumented, support for backward compatibility
@pytest.mark.parametrize("option", ["verbose", "quiet"])
@pytest.mark.parametrize("value", ["yes", "true"])
def test_env_var_true(
self, option: str, value: str, monkeypatch: pytest.MonkeyPatch
) -> None:
monkeypatch.setenv("PIP_" + option.upper(), str(value))
# FakeCommand intentionally returns the wrong type.
options, args = cast(tuple[Values, list[str]], main(["fake"]))
assert getattr(options, option) == 1
@pytest.mark.parametrize("option", ["verbose", "quiet"])
@pytest.mark.parametrize("value", range(4))
def test_config_file(
self, option: str, value: int, monkeypatch: pytest.MonkeyPatch
) -> None:
with tmpconfig(option, value) as name:
monkeypatch.setenv("PIP_CONFIG_FILE", name)
# FakeCommand intentionally returns the wrong type.
options, args = cast(tuple[Values, list[str]], main(["fake"]))
assert getattr(options, option) == value
@pytest.mark.parametrize("option", ["verbose", "quiet"])
@pytest.mark.parametrize("value", range(3))
def test_config_file_integrate_cli(
self, option: str, value: int, monkeypatch: pytest.MonkeyPatch
) -> None:
with tmpconfig(option, value) as name:
monkeypatch.setenv("PIP_CONFIG_FILE", name)
# FakeCommand intentionally returns the wrong type.
options, args = cast(
tuple[Values, list[str]], main(["fake", "--" + option])
)
assert getattr(options, option) == value + 1
@pytest.mark.parametrize("option", ["verbose", "quiet"])
@pytest.mark.parametrize("value", [-1, "foobar"])
def test_config_file_invalid(
self,
option: str,
value: Any,
monkeypatch: pytest.MonkeyPatch,
capsys: pytest.CaptureFixture[str],
) -> None:
with tmpconfig(option, value) as name:
monkeypatch.setenv("PIP_CONFIG_FILE", name)
with assert_option_error(capsys, expected="non-negative integer"):
main(["fake"])
# Undocumented, support for backward compatibility
@pytest.mark.parametrize("option", ["verbose", "quiet"])
@pytest.mark.parametrize("value", ["no", "false"])
def test_config_file_false(
self, option: str, value: str, monkeypatch: pytest.MonkeyPatch
) -> None:
with tmpconfig(option, value) as name:
monkeypatch.setenv("PIP_CONFIG_FILE", name)
# FakeCommand intentionally returns the wrong type.
options, args = cast(tuple[Values, list[str]], main(["fake"]))
assert getattr(options, option) == 0
# Undocumented, support for backward compatibility
@pytest.mark.parametrize("option", ["verbose", "quiet"])
@pytest.mark.parametrize("value", ["yes", "true"])
def test_config_file_true(
self, option: str, value: str, monkeypatch: pytest.MonkeyPatch
) -> None:
with tmpconfig(option, value) as name:
monkeypatch.setenv("PIP_CONFIG_FILE", name)
# FakeCommand intentionally returns the wrong type.
options, args = cast(tuple[Values, list[str]], main(["fake"]))
assert getattr(options, option) == 1
|
TestCountOptions
|
python
|
kamyu104__LeetCode-Solutions
|
Python/word-ladder.py
|
{
"start": 1283,
"end": 2150
}
|
class ____(object):
def ladderLength(self, beginWord, endWord, wordList):
"""
:type beginWord: str
:type endWord: str
:type wordList: List[str]
:rtype: int
"""
lookup = set(wordList)
if endWord not in lookup:
return 0
ladder = 2
q = [beginWord]
while q:
new_q = []
for word in q:
for i in xrange(len(word)):
for j in ascii_lowercase:
new_word = word[:i] + j + word[i+1:]
if new_word == endWord:
return ladder
if new_word in lookup:
lookup.remove(new_word)
new_q.append(new_word)
q = new_q
ladder += 1
return 0
|
Solution2
|
python
|
pyparsing__pyparsing
|
tests/test_unit.py
|
{
"start": 4512,
"end": 5234
}
|
class ____(TestCase):
def runTest(self):
print(
"Beginning test of pyparsing, version",
pp.__version__,
pp.__version_time__,
)
config_options = []
if PYTHON_JIT_ENABLED:
config_options.append("JIT enabled")
if PYTHON_FREE_THREADED:
config_options.append("free_threaded")
config_options_str = f" ({','.join(config_options)})"
print(
f"Python version {sys.version}"
f"{config_options_str if config_options else ''}"
)
print(f"__version_info__ : {pp.__version_info__}")
print(f"__version_info__ repr: {repr(pp.__version_info__)}")
|
Test01_PyparsingTestInit
|
python
|
kamyu104__LeetCode-Solutions
|
Python/intersection-of-two-arrays-ii.py
|
{
"start": 2512,
"end": 3266
}
|
class ____(object):
def intersect(self, nums1, nums2):
"""
:type nums1: List[int]
:type nums2: List[int]
:rtype: List[int]
"""
nums1.sort(), nums2.sort() # Make sure it is sorted, doesn't count in time.
res = []
it1, it2 = 0, 0
while it1 < len(nums1) and it2 < len(nums2):
if nums1[it1] < nums2[it2]:
it1 += 1
elif nums1[it1] > nums2[it2]:
it2 += 1
else:
res += nums1[it1],
it1 += 1
it2 += 1
return res
# If the given array is not sorted, and the memory is limited.
# Time: O(max(m, n) * log(max(m, n)))
# Space: O(1)
# Two pointers solution.
|
Solution
|
python
|
sqlalchemy__sqlalchemy
|
lib/sqlalchemy/sql/coercions.py
|
{
"start": 30354,
"end": 30448
}
|
class ____(_Deannotate, _ReturnsStringKey, RoleImpl):
__slots__ = ()
|
DDLConstraintColumnImpl
|
python
|
matplotlib__matplotlib
|
lib/matplotlib/dates.py
|
{
"start": 33172,
"end": 37519
}
|
class ____:
"""
A simple wrapper around a `dateutil.rrule` allowing flexible
date tick specifications.
"""
def __init__(self, freq, tzinfo=None, **kwargs):
"""
Parameters
----------
freq : {YEARLY, MONTHLY, WEEKLY, DAILY, HOURLY, MINUTELY, SECONDLY}
Tick frequency. These constants are defined in `dateutil.rrule`,
but they are accessible from `matplotlib.dates` as well.
tzinfo : `datetime.tzinfo`, optional
Time zone information. The default is None.
**kwargs
Additional keyword arguments are passed to the `dateutil.rrule`.
"""
kwargs['freq'] = freq
self._base_tzinfo = tzinfo
self._update_rrule(**kwargs)
def set(self, **kwargs):
"""Set parameters for an existing wrapper."""
self._construct.update(kwargs)
self._update_rrule(**self._construct)
def _update_rrule(self, **kwargs):
tzinfo = self._base_tzinfo
# rrule does not play nicely with timezones - especially pytz time
# zones, it's best to use naive zones and attach timezones once the
# datetimes are returned
if 'dtstart' in kwargs:
dtstart = kwargs['dtstart']
if dtstart.tzinfo is not None:
if tzinfo is None:
tzinfo = dtstart.tzinfo
else:
dtstart = dtstart.astimezone(tzinfo)
kwargs['dtstart'] = dtstart.replace(tzinfo=None)
if 'until' in kwargs:
until = kwargs['until']
if until.tzinfo is not None:
if tzinfo is not None:
until = until.astimezone(tzinfo)
else:
raise ValueError('until cannot be aware if dtstart '
'is naive and tzinfo is None')
kwargs['until'] = until.replace(tzinfo=None)
self._construct = kwargs.copy()
self._tzinfo = tzinfo
self._rrule = rrule(**self._construct)
def _attach_tzinfo(self, dt, tzinfo):
# pytz zones are attached by "localizing" the datetime
if hasattr(tzinfo, 'localize'):
return tzinfo.localize(dt, is_dst=True)
return dt.replace(tzinfo=tzinfo)
def _aware_return_wrapper(self, f, returns_list=False):
"""Decorator function that allows rrule methods to handle tzinfo."""
# This is only necessary if we're actually attaching a tzinfo
if self._tzinfo is None:
return f
# All datetime arguments must be naive. If they are not naive, they are
# converted to the _tzinfo zone before dropping the zone.
def normalize_arg(arg):
if isinstance(arg, datetime.datetime) and arg.tzinfo is not None:
if arg.tzinfo is not self._tzinfo:
arg = arg.astimezone(self._tzinfo)
return arg.replace(tzinfo=None)
return arg
def normalize_args(args, kwargs):
args = tuple(normalize_arg(arg) for arg in args)
kwargs = {kw: normalize_arg(arg) for kw, arg in kwargs.items()}
return args, kwargs
# There are two kinds of functions we care about - ones that return
# dates and ones that return lists of dates.
if not returns_list:
def inner_func(*args, **kwargs):
args, kwargs = normalize_args(args, kwargs)
dt = f(*args, **kwargs)
return self._attach_tzinfo(dt, self._tzinfo)
else:
def inner_func(*args, **kwargs):
args, kwargs = normalize_args(args, kwargs)
dts = f(*args, **kwargs)
return [self._attach_tzinfo(dt, self._tzinfo) for dt in dts]
return functools.wraps(f)(inner_func)
def __getattr__(self, name):
if name in self.__dict__:
return self.__dict__[name]
f = getattr(self._rrule, name)
if name in {'after', 'before'}:
return self._aware_return_wrapper(f)
elif name in {'xafter', 'xbefore', 'between'}:
return self._aware_return_wrapper(f, returns_list=True)
else:
return f
def __setstate__(self, state):
self.__dict__.update(state)
|
rrulewrapper
|
python
|
neetcode-gh__leetcode
|
python/0143-reorder-list.py
|
{
"start": 0,
"end": 682
}
|
class ____:
def reorderList(self, head: ListNode) -> None:
# find middle
slow, fast = head, head.next
while fast and fast.next:
slow = slow.next
fast = fast.next.next
# reverse second half
second = slow.next
prev = slow.next = None
while second:
tmp = second.next
second.next = prev
prev = second
second = tmp
# merge two halfs
first, second = head, prev
while second:
tmp1, tmp2 = first.next, second.next
first.next = second
second.next = tmp1
first, second = tmp1, tmp2
|
Solution
|
python
|
astropy__astropy
|
astropy/extern/configobj/configobj.py
|
{
"start": 5919,
"end": 6018
}
|
class ____(ConfigObjError):
"""Base class for the two interpolation errors."""
|
InterpolationError
|
python
|
apache__airflow
|
airflow-core/src/airflow/dag_processing/dagbag.py
|
{
"start": 7174,
"end": 29485
}
|
class ____(LoggingMixin):
"""
A dagbag is a collection of dags, parsed out of a folder tree and has high level configuration settings.
Some possible setting are database to use as a backend and what executor
to use to fire off tasks. This makes it easier to run distinct environments
for say production and development, tests, or for different teams or security
profiles. What would have been system level settings are now dagbag level so
that one system can run multiple, independent settings sets.
:param dag_folder: the folder to scan to find DAGs
:param include_examples: whether to include the examples that ship
with airflow or not
:param safe_mode: when ``False``, scans all python modules for dags.
When ``True`` uses heuristics (files containing ``DAG`` and ``airflow`` strings)
to filter python modules to scan for dags.
:param load_op_links: Should the extra operator link be loaded via plugins when
de-serializing the DAG? This flag is set to False in Scheduler so that Extra Operator links
are not loaded to not run User code in Scheduler.
:param collect_dags: when True, collects dags during class initialization.
:param known_pools: If not none, then generate warnings if a Task attempts to use an unknown pool.
"""
def __init__(
self,
dag_folder: str | Path | None = None, # todo AIP-66: rename this to path
include_examples: bool | ArgNotSet = NOTSET,
safe_mode: bool | ArgNotSet = NOTSET,
load_op_links: bool = True,
collect_dags: bool = True,
known_pools: set[str] | None = None,
bundle_path: Path | None = None,
bundle_name: str | None = None,
):
super().__init__()
self.bundle_path = bundle_path
self.bundle_name = bundle_name
dag_folder = dag_folder or settings.DAGS_FOLDER
self.dag_folder = dag_folder
self.dags: dict[str, DAG] = {}
# the file's last modified timestamp when we last read it
self.file_last_changed: dict[str, datetime] = {}
# Store import errors with relative file paths as keys (relative to bundle_path)
self.import_errors: dict[str, str] = {}
self.captured_warnings: dict[str, tuple[str, ...]] = {}
self.has_logged = False
# Only used by SchedulerJob to compare the dag_hash to identify change in DAGs
self.dags_hash: dict[str, str] = {}
self.known_pools = known_pools
self.dagbag_import_error_tracebacks = conf.getboolean("core", "dagbag_import_error_tracebacks")
self.dagbag_import_error_traceback_depth = conf.getint("core", "dagbag_import_error_traceback_depth")
if collect_dags:
self.collect_dags(
dag_folder=dag_folder,
include_examples=(
include_examples
if is_arg_set(include_examples)
else conf.getboolean("core", "LOAD_EXAMPLES")
),
safe_mode=(
safe_mode if is_arg_set(safe_mode) else conf.getboolean("core", "DAG_DISCOVERY_SAFE_MODE")
),
)
# Should the extra operator link be loaded via plugins?
# This flag is set to False in Scheduler so that Extra Operator links are not loaded
self.load_op_links = load_op_links
def size(self) -> int:
""":return: the amount of dags contained in this dagbag"""
return len(self.dags)
@property
def dag_ids(self) -> list[str]:
"""
Get DAG ids.
:return: a list of DAG IDs in this bag
"""
return list(self.dags)
@provide_session
def get_dag(self, dag_id, session: Session = NEW_SESSION):
"""
Get the DAG out of the dictionary, and refreshes it if expired.
:param dag_id: DAG ID
"""
# Avoid circular import
from airflow.models.dag import DagModel
dag = self.dags.get(dag_id)
# If DAG Model is absent, we can't check last_expired property. Is the DAG not yet synchronized?
if (orm_dag := DagModel.get_current(dag_id, session=session)) is None:
return dag
is_expired = (
orm_dag.last_expired and dag and dag.last_loaded and dag.last_loaded < orm_dag.last_expired
)
if is_expired:
# Remove associated dags so we can re-add them.
self.dags.pop(dag_id, None)
if dag is None or is_expired:
# Reprocess source file.
found_dags = self.process_file(
filepath=correct_maybe_zipped(orm_dag.fileloc), only_if_updated=False
)
# If the source file no longer exports `dag_id`, delete it from self.dags
if found_dags and dag_id in [found_dag.dag_id for found_dag in found_dags]:
return self.dags[dag_id]
self.dags.pop(dag_id, None)
return self.dags.get(dag_id)
def process_file(self, filepath, only_if_updated=True, safe_mode=True):
"""Given a path to a python module or zip file, import the module and look for dag objects within."""
from airflow.sdk.definitions._internal.contextmanager import DagContext
# if the source file no longer exists in the DB or in the filesystem,
# return an empty list
# todo: raise exception?
if filepath is None or not os.path.isfile(filepath):
return []
try:
# This failed before in what may have been a git sync
# race condition
file_last_changed_on_disk = datetime.fromtimestamp(os.path.getmtime(filepath))
if (
only_if_updated
and filepath in self.file_last_changed
and file_last_changed_on_disk == self.file_last_changed[filepath]
):
return []
except Exception as e:
self.log.exception(e)
return []
# Ensure we don't pick up anything else we didn't mean to
DagContext.autoregistered_dags.clear()
self.captured_warnings.pop(filepath, None)
with _capture_with_reraise() as captured_warnings:
if filepath.endswith(".py") or not zipfile.is_zipfile(filepath):
mods = self._load_modules_from_file(filepath, safe_mode)
else:
mods = self._load_modules_from_zip(filepath, safe_mode)
if captured_warnings:
formatted_warnings = []
for msg in captured_warnings:
category = msg.category.__name__
if (module := msg.category.__module__) != "builtins":
category = f"{module}.{category}"
formatted_warnings.append(f"{msg.filename}:{msg.lineno}: {category}: {msg.message}")
self.captured_warnings[filepath] = tuple(formatted_warnings)
found_dags = self._process_modules(filepath, mods, file_last_changed_on_disk)
self.file_last_changed[filepath] = file_last_changed_on_disk
return found_dags
@property
def dag_warnings(self) -> set[DagWarning]:
"""Get the set of DagWarnings for the bagged dags."""
from airflow.models.dagwarning import DagWarning, DagWarningType
# None means this feature is not enabled. Empty set means we don't know about any pools at all!
if self.known_pools is None:
return set()
def get_pools(dag) -> dict[str, set[str]]:
return {dag.dag_id: {task.pool for task in dag.tasks}}
pool_dict: dict[str, set[str]] = {}
for dag in self.dags.values():
pool_dict.update(get_pools(dag))
warnings: set[DagWarning] = set()
for dag_id, dag_pools in pool_dict.items():
nonexistent_pools = dag_pools - self.known_pools
if nonexistent_pools:
warnings.add(
DagWarning(
dag_id,
DagWarningType.NONEXISTENT_POOL,
f"Dag '{dag_id}' references non-existent pools: {sorted(nonexistent_pools)!r}",
)
)
return warnings
def _get_relative_fileloc(self, filepath: str) -> str:
"""
Get the relative file location for a given filepath.
:param filepath: Absolute path to the file
:return: Relative path from bundle_path, or original filepath if no bundle_path
"""
if self.bundle_path:
return str(Path(filepath).relative_to(self.bundle_path))
return filepath
def _load_modules_from_file(self, filepath, safe_mode):
from airflow.sdk.definitions._internal.contextmanager import DagContext
def handler(signum, frame):
"""Handle SIGSEGV signal and let the user know that the import failed."""
msg = f"Received SIGSEGV signal while processing {filepath}."
self.log.error(msg)
relative_filepath = self._get_relative_fileloc(filepath)
self.import_errors[relative_filepath] = msg
try:
signal.signal(signal.SIGSEGV, handler)
except ValueError:
self.log.warning("SIGSEGV signal handler registration failed. Not in the main thread")
if not might_contain_dag(filepath, safe_mode):
# Don't want to spam user with skip messages
if not self.has_logged:
self.has_logged = True
self.log.info("File %s assumed to contain no DAGs. Skipping.", filepath)
return []
self.log.debug("Importing %s", filepath)
mod_name = get_unique_dag_module_name(filepath)
if mod_name in sys.modules:
del sys.modules[mod_name]
DagContext.current_autoregister_module_name = mod_name
def parse(mod_name, filepath):
try:
loader = importlib.machinery.SourceFileLoader(mod_name, filepath)
spec = importlib.util.spec_from_loader(mod_name, loader)
new_module = importlib.util.module_from_spec(spec)
sys.modules[spec.name] = new_module
loader.exec_module(new_module)
return [new_module]
except KeyboardInterrupt:
# re-raise ctrl-c
raise
except BaseException as e:
# Normally you shouldn't catch BaseException, but in this case we want to, as, pytest.skip
# raises an exception which does not inherit from Exception, and we want to catch that here.
# This would also catch `exit()` in a dag file
DagContext.autoregistered_dags.clear()
self.log.exception("Failed to import: %s", filepath)
relative_filepath = self._get_relative_fileloc(filepath)
if self.dagbag_import_error_tracebacks:
self.import_errors[relative_filepath] = traceback.format_exc(
limit=-self.dagbag_import_error_traceback_depth
)
else:
self.import_errors[relative_filepath] = str(e)
return []
dagbag_import_timeout = settings.get_dagbag_import_timeout(filepath)
if not isinstance(dagbag_import_timeout, (int, float)):
raise TypeError(
f"Value ({dagbag_import_timeout}) from get_dagbag_import_timeout must be int or float"
)
if dagbag_import_timeout <= 0: # no parsing timeout
return parse(mod_name, filepath)
timeout_msg = (
f"DagBag import timeout for {filepath} after {dagbag_import_timeout}s.\n"
"Please take a look at these docs to improve your DAG import time:\n"
f"* {get_docs_url('best-practices.html#top-level-python-code')}\n"
f"* {get_docs_url('best-practices.html#reducing-dag-complexity')}"
)
with timeout(dagbag_import_timeout, error_message=timeout_msg):
return parse(mod_name, filepath)
def _load_modules_from_zip(self, filepath, safe_mode):
from airflow.sdk.definitions._internal.contextmanager import DagContext
mods = []
with zipfile.ZipFile(filepath) as current_zip_file:
for zip_info in current_zip_file.infolist():
zip_path = Path(zip_info.filename)
if zip_path.suffix not in [".py", ".pyc"] or len(zip_path.parts) > 1:
continue
if zip_path.stem == "__init__":
self.log.warning("Found %s at root of %s", zip_path.name, filepath)
self.log.debug("Reading %s from %s", zip_info.filename, filepath)
if not might_contain_dag(zip_info.filename, safe_mode, current_zip_file):
# todo: create ignore list
# Don't want to spam user with skip messages
if not self.has_logged:
self.has_logged = True
self.log.info(
"File %s:%s assumed to contain no DAGs. Skipping.", filepath, zip_info.filename
)
continue
mod_name = zip_path.stem
if mod_name in sys.modules:
del sys.modules[mod_name]
DagContext.current_autoregister_module_name = mod_name
try:
sys.path.insert(0, filepath)
current_module = importlib.import_module(mod_name)
mods.append(current_module)
except Exception as e:
DagContext.autoregistered_dags.clear()
fileloc = os.path.join(filepath, zip_info.filename)
self.log.exception("Failed to import: %s", fileloc)
relative_fileloc = self._get_relative_fileloc(fileloc)
if self.dagbag_import_error_tracebacks:
self.import_errors[relative_fileloc] = traceback.format_exc(
limit=-self.dagbag_import_error_traceback_depth
)
else:
self.import_errors[relative_fileloc] = str(e)
finally:
if sys.path[0] == filepath:
del sys.path[0]
return mods
def _process_modules(self, filepath, mods, file_last_changed_on_disk):
from airflow.sdk import DAG
from airflow.sdk.definitions._internal.contextmanager import DagContext
top_level_dags = {(o, m) for m in mods for o in m.__dict__.values() if isinstance(o, DAG)}
top_level_dags.update(DagContext.autoregistered_dags)
DagContext.current_autoregister_module_name = None
DagContext.autoregistered_dags.clear()
found_dags = []
for dag, mod in top_level_dags:
dag.fileloc = mod.__file__
relative_fileloc = self._get_relative_fileloc(dag.fileloc)
dag.relative_fileloc = relative_fileloc
try:
dag.validate()
_validate_executor_fields(dag, self.bundle_name)
self.bag_dag(dag=dag)
except AirflowClusterPolicySkipDag:
pass
except Exception as e:
self.log.exception("Failed to bag_dag: %s", dag.fileloc)
self.import_errors[relative_fileloc] = f"{type(e).__name__}: {e}"
self.file_last_changed[dag.fileloc] = file_last_changed_on_disk
else:
found_dags.append(dag)
return found_dags
def bag_dag(self, dag: DAG):
"""
Add the DAG into the bag.
:raises: AirflowDagCycleException if a cycle is detected.
:raises: AirflowDagDuplicatedIdException if this dag already exists in the bag.
"""
dag.check_cycle() # throws exception if a task cycle is found
dag.resolve_template_files()
dag.last_loaded = timezone.utcnow()
try:
# Check policies
settings.dag_policy(dag)
for task in dag.tasks:
# The listeners are not supported when ending a task via a trigger on asynchronous operators.
if getattr(task, "end_from_trigger", False) and get_listener_manager().has_listeners:
raise AirflowException(
"Listeners are not supported with end_from_trigger=True for deferrable operators. "
"Task %s in DAG %s has end_from_trigger=True with listeners from plugins. "
"Set end_from_trigger=False to use listeners.",
task.task_id,
dag.dag_id,
)
settings.task_policy(task)
except (AirflowClusterPolicyViolation, AirflowClusterPolicySkipDag):
raise
except Exception as e:
self.log.exception(e)
raise AirflowClusterPolicyError(e)
from airflow.sdk.exceptions import AirflowDagCycleException
try:
prev_dag = self.dags.get(dag.dag_id)
if prev_dag and prev_dag.fileloc != dag.fileloc:
raise AirflowDagDuplicatedIdException(
dag_id=dag.dag_id,
incoming=dag.fileloc,
existing=self.dags[dag.dag_id].fileloc,
)
self.dags[dag.dag_id] = dag
self.log.debug("Loaded DAG %s", dag)
except (AirflowDagCycleException, AirflowDagDuplicatedIdException):
# There was an error in bagging the dag. Remove it from the list of dags
self.log.exception("Exception bagging dag: %s", dag.dag_id)
raise
def collect_dags(
self,
dag_folder: str | Path | None = None,
only_if_updated: bool = True,
include_examples: bool = conf.getboolean("core", "LOAD_EXAMPLES"),
safe_mode: bool = conf.getboolean("core", "DAG_DISCOVERY_SAFE_MODE"),
):
"""
Look for python modules in a given path, import them, and add them to the dagbag collection.
Note that if a ``.airflowignore`` file is found while processing
the directory, it will behave much like a ``.gitignore``,
ignoring files that match any of the patterns specified
in the file.
**Note**: The patterns in ``.airflowignore`` are interpreted as either
un-anchored regexes or gitignore-like glob expressions, depending on
the ``DAG_IGNORE_FILE_SYNTAX`` configuration parameter.
"""
self.log.info("Filling up the DagBag from %s", dag_folder)
dag_folder = dag_folder or self.dag_folder
# Used to store stats around DagBag processing
stats = []
# Ensure dag_folder is a str -- it may have been a pathlib.Path
dag_folder = correct_maybe_zipped(str(dag_folder))
files_to_parse = list_py_file_paths(dag_folder, safe_mode=safe_mode)
if include_examples:
from airflow import example_dags
example_dag_folder = next(iter(example_dags.__path__))
files_to_parse.extend(list_py_file_paths(example_dag_folder, safe_mode=safe_mode))
for filepath in files_to_parse:
try:
file_parse_start_dttm = timezone.utcnow()
found_dags = self.process_file(filepath, only_if_updated=only_if_updated, safe_mode=safe_mode)
file_parse_end_dttm = timezone.utcnow()
stats.append(
FileLoadStat(
file=filepath.replace(settings.DAGS_FOLDER, ""),
duration=file_parse_end_dttm - file_parse_start_dttm,
dag_num=len(found_dags),
task_num=sum(len(dag.tasks) for dag in found_dags),
dags=str([dag.dag_id for dag in found_dags]),
warning_num=len(self.captured_warnings.get(filepath, [])),
)
)
except Exception as e:
self.log.exception(e)
self.dagbag_stats = sorted(stats, key=lambda x: x.duration, reverse=True)
def dagbag_report(self):
"""Print a report around DagBag loading stats."""
stats = self.dagbag_stats
dag_folder = self.dag_folder
duration = sum((o.duration for o in stats), timedelta()).total_seconds()
dag_num = sum(o.dag_num for o in stats)
task_num = sum(o.task_num for o in stats)
table = tabulate(stats, headers="keys")
report = textwrap.dedent(
f"""\n
-------------------------------------------------------------------
DagBag loading stats for {dag_folder}
-------------------------------------------------------------------
Number of DAGs: {dag_num}
Total task number: {task_num}
DagBag parsing time: {duration}\n{table}
"""
)
return report
@provide_session
def sync_bag_to_db(
dagbag: DagBag,
bundle_name: str,
bundle_version: str | None,
*,
session: Session = NEW_SESSION,
) -> None:
"""Save attributes about list of DAG to the DB."""
from airflow.dag_processing.collection import update_dag_parsing_results_in_db
import_errors = {(bundle_name, rel_path): error for rel_path, error in dagbag.import_errors.items()}
# Build the set of all files that were parsed and include files with import errors
# in case they are not in file_last_changed
files_parsed = set(import_errors)
if dagbag.bundle_path:
files_parsed.update(
(bundle_name, dagbag._get_relative_fileloc(abs_filepath))
for abs_filepath in dagbag.file_last_changed
)
update_dag_parsing_results_in_db(
bundle_name,
bundle_version,
[LazyDeserializedDAG.from_dag(dag) for dag in dagbag.dags.values()],
import_errors,
None, # file parsing duration is not well defined when parsing multiple files / multiple DAGs.
dagbag.dag_warnings,
session=session,
files_parsed=files_parsed,
)
|
DagBag
|
python
|
qdrant__qdrant-client
|
qdrant_client/http/models/models.py
|
{
"start": 144378,
"end": 144495
}
|
class ____(BaseModel, extra="forbid"):
upsert: "PointInsertOperations" = Field(..., description="")
|
UpsertOperation
|
python
|
hynek__structlog
|
tests/test_stdlib.py
|
{
"start": 44837,
"end": 49125
}
|
class ____:
def test_sync_bl(self, abl, cl):
"""
AsyncBoungLogger.sync_bl works outside of loops.
"""
abl.sync_bl.info("test")
assert [
CapturedCall(method_name="info", args=(), kwargs={"event": "test"})
] == cl.calls
@pytest.mark.asyncio
async def test_protocol(self, abl):
"""
AsyncBoundLogger is a proper BindableLogger.
"""
assert isinstance(abl, BindableLogger)
@pytest.mark.asyncio
async def test_correct_levels(self, abl, cl, stdlib_log_method):
"""
The proxy methods call the correct upstream methods.
"""
await getattr(abl.bind(foo="bar"), stdlib_log_method)("42")
aliases = {"warn": "warning"}
expect = aliases.get(stdlib_log_method, stdlib_log_method)
assert expect == cl.calls[0].method_name
@pytest.mark.asyncio
async def test_correct_level_fatal(self, abl, cl):
"""
fatal, that I have no idea why we support, maps to critical.
"""
await abl.bind(foo="bar").fatal("42")
assert "critical" == cl.calls[0].method_name
@pytest.mark.asyncio
async def test_log_method(self, abl, cl):
"""
The `log` method is proxied too.
"""
await abl.bind(foo="bar").log(logging.ERROR, "42")
assert "error" == cl.calls[0].method_name
@pytest.mark.asyncio
async def test_exception(self, abl, cl):
"""
`exception` makes sure 'exc_info" is set, if it's not set already.
"""
try:
raise ValueError("omg")
except ValueError:
await abl.exception("oops")
ei = cl.calls[0].kwargs["exc_info"]
assert ValueError is ei[0]
assert ("omg",) == ei[1].args
@pytest.mark.asyncio
async def test_exception_do_not_overwrite(self, abl, cl):
"""
`exception` leaves exc_info be, if it's set.
"""
o1 = object()
o2 = object()
o3 = object()
try:
raise ValueError("omg")
except ValueError:
await abl.exception("oops", exc_info=(o1, o2, o3))
ei = cl.calls[0].kwargs["exc_info"]
assert (o1, o2, o3) == ei
@pytest.mark.asyncio
async def test_bind_unbind(self, cl):
"""
new/bind/unbind/try_unbind are correctly propagated.
"""
l1 = AsyncBoundLogger(cl, context={}, processors=[])
l2 = l1.bind(x=42)
assert l1 is not l2
assert l1.sync_bl is not l2.sync_bl
assert {} == l1._context
assert {"x": 42} == l2._context
l3 = l2.new(y=23)
assert l2 is not l3
assert l2.sync_bl is not l3.sync_bl
assert {"y": 23} == l3._context
l4 = l3.unbind("y")
assert {} == l4._context
assert l3 is not l4
# N.B. x isn't bound anymore.
l5 = l4.try_unbind("x")
assert {} == l5._context
assert l4 is not l5
@pytest.mark.asyncio
async def test_integration(self, capsys):
"""
Configure and log an actual entry.
"""
configure(
processors=[add_log_level, JSONRenderer()],
logger_factory=PrintLogger,
wrapper_class=AsyncBoundLogger,
cache_logger_on_first_use=True,
)
logger = get_logger()
await logger.bind(foo="bar").info("baz", x="42")
assert {
"foo": "bar",
"x": "42",
"event": "baz",
"level": "info",
} == json.loads(capsys.readouterr().out)
@pytest.mark.parametrize("log_level", [None, 45])
def test_recreate_defaults(log_level):
"""
Recreate defaults configures structlog and -- if asked -- logging.
"""
logging.basicConfig(
stream=sys.stderr,
level=1,
force=True,
)
recreate_defaults(log_level=log_level)
assert BoundLogger is _CONFIG.default_wrapper_class
assert dict is _CONFIG.default_context_class
assert isinstance(_CONFIG.logger_factory, LoggerFactory)
log = get_logger().bind()
if log_level is not None:
assert log_level == log.getEffectiveLevel()
else:
assert 1 == log.getEffectiveLevel()
|
TestAsyncBoundLogger
|
python
|
dask__distributed
|
distributed/worker_state_machine.py
|
{
"start": 20172,
"end": 20386
}
|
class ____(GatherDepDoneEvent):
""":class:`GatherDep` instruction terminated:
network failure while trying to communicate with remote worker
"""
__slots__ = ()
@dataclass
|
GatherDepNetworkFailureEvent
|
python
|
Pylons__pyramid
|
docs/conf.py
|
{
"start": 11117,
"end": 13690
}
|
class ____(Directive):
def run(self):
return [nodes.raw(
'',
format='latex')]
def app_role(role, rawtext, text, lineno, inliner, options={}, content=[]):
"""custom role for :app: marker, does nothing in particular except allow
:app:`Pyramid` to work (for later search and replace)."""
if 'class' in options:
assert 'classes' not in options
options['classes'] = options['class']
del options['class']
return [nodes.inline(rawtext, utils.unescape(text), **options)], []
def setup(app):
app.add_role('app', app_role)
app.add_directive('frontmatter', FrontMatter)
app.add_directive('mainmatter', MainMatter)
app.add_directive('backmatter', BackMatter)
# turn off all line numbers in latex formatting
## from pygments.formatters import LatexFormatter
## from sphinx.highlighting import PygmentsBridge
## class NoLinenosLatexFormatter(LatexFormatter):
## def __init__(self, **options):
## LatexFormatter.__init__(self, **options)
## self.linenos = False
## PygmentsBridge.latex_formatter = NoLinenosLatexFormatter
# -- Options for Epub output ---------------------------------------------------
# Bibliographic Dublin Core info.
epub_title = 'The Pyramid Web Framework, Version %s' \
% release
epub_author = 'Chris McDonough'
epub_publisher = 'Agendaless Consulting'
epub_copyright = '2008-%d' % thisyear
# The language of the text. It defaults to the language option
# or en if the language is not set.
epub_language = 'en'
# The scheme of the identifier. Typical schemes are ISBN or URL.
epub_scheme = 'ISBN'
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
epub_identifier = '0615445675'
# A unique identification for the text.
epub_uid = 'The Pyramid Web Framework, Version %s' \
% release
# A list of files that should not be packed into the epub file.
epub_exclude_files = ['_static/opensearch.xml', '_static/doctools.js',
'_static/jquery.js', '_static/searchtools.js', '_static/underscore.js',
'_static/basic.css', 'search.html', '_static/websupport.js']
# The depth of the table of contents in toc.ncx.
epub_tocdepth = 3
# For a list of all settings, visit http://sphinx-doc.org/config.html
# -- Options for linkcheck builder -------------------------------------------
# List of items to ignore when running linkcheck
linkcheck_ignore = [
r'http://localhost:\d+',
r'http://localhost',
r'https://web.libera.chat/#pyramid', # JavaScript "anchor"
]
|
BackMatter
|
python
|
apache__airflow
|
providers/google/src/airflow/providers/google/cloud/operators/cloud_build.py
|
{
"start": 2212,
"end": 5637
}
|
class ____(GoogleCloudBaseOperator):
"""
Cancels a build in progress.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:CloudBuildCancelBuildOperator`
:param id_: The ID of the build.
:param project_id: Optional, Google Cloud Project project_id where the function belongs.
If set to None or missing, the default project_id from the GCP connection is used.
:param retry: Optional, a retry object used to retry requests. If `None` is specified, requests
will not be retried.
:param timeout: Optional, the amount of time, in seconds, to wait for the request to complete.
Note that if `retry` is specified, the timeout applies to each individual attempt.
:param metadata: Optional, additional metadata that is provided to the method.
:param gcp_conn_id: Optional, the connection ID used to connect to Google Cloud Platform.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
:param location: The location of the project.
"""
template_fields: Sequence[str] = ("project_id", "id_", "gcp_conn_id", "location")
operator_extra_links = (CloudBuildLink(),)
def __init__(
self,
*,
id_: str,
project_id: str = PROVIDE_PROJECT_ID,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
location: str = "global",
**kwargs,
) -> None:
super().__init__(**kwargs)
self.id_ = id_
self.project_id = project_id
self.retry = retry
self.timeout = timeout
self.metadata = metadata
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
self.location = location
@property
def extra_links_params(self) -> dict[str, Any]:
return {
"region": self.location,
}
def execute(self, context: Context):
hook = CloudBuildHook(gcp_conn_id=self.gcp_conn_id, impersonation_chain=self.impersonation_chain)
result = hook.cancel_build(
id_=self.id_,
project_id=self.project_id,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
location=self.location,
)
context["task_instance"].xcom_push(key="id", value=result.id)
project_id = self.project_id or hook.project_id
if project_id:
CloudBuildLink.persist(
context=context,
project_id=project_id,
build_id=result.id,
)
return Build.to_dict(result)
|
CloudBuildCancelBuildOperator
|
python
|
fastapi__sqlmodel
|
docs_src/tutorial/fastapi/relationships/tutorial001.py
|
{
"start": 1334,
"end": 5078
}
|
class ____(TeamPublic):
heroes: List[HeroPublic] = []
sqlite_file_name = "database.db"
sqlite_url = f"sqlite:///{sqlite_file_name}"
connect_args = {"check_same_thread": False}
engine = create_engine(sqlite_url, echo=True, connect_args=connect_args)
def create_db_and_tables():
SQLModel.metadata.create_all(engine)
def get_session():
with Session(engine) as session:
yield session
app = FastAPI()
@app.on_event("startup")
def on_startup():
create_db_and_tables()
@app.post("/heroes/", response_model=HeroPublic)
def create_hero(*, session: Session = Depends(get_session), hero: HeroCreate):
db_hero = Hero.model_validate(hero)
session.add(db_hero)
session.commit()
session.refresh(db_hero)
return db_hero
@app.get("/heroes/", response_model=List[HeroPublic])
def read_heroes(
*,
session: Session = Depends(get_session),
offset: int = 0,
limit: int = Query(default=100, le=100),
):
heroes = session.exec(select(Hero).offset(offset).limit(limit)).all()
return heroes
@app.get("/heroes/{hero_id}", response_model=HeroPublicWithTeam)
def read_hero(*, session: Session = Depends(get_session), hero_id: int):
hero = session.get(Hero, hero_id)
if not hero:
raise HTTPException(status_code=404, detail="Hero not found")
return hero
@app.patch("/heroes/{hero_id}", response_model=HeroPublic)
def update_hero(
*, session: Session = Depends(get_session), hero_id: int, hero: HeroUpdate
):
db_hero = session.get(Hero, hero_id)
if not db_hero:
raise HTTPException(status_code=404, detail="Hero not found")
hero_data = hero.model_dump(exclude_unset=True)
db_hero.sqlmodel_update(hero_data)
session.add(db_hero)
session.commit()
session.refresh(db_hero)
return db_hero
@app.delete("/heroes/{hero_id}")
def delete_hero(*, session: Session = Depends(get_session), hero_id: int):
hero = session.get(Hero, hero_id)
if not hero:
raise HTTPException(status_code=404, detail="Hero not found")
session.delete(hero)
session.commit()
return {"ok": True}
@app.post("/teams/", response_model=TeamPublic)
def create_team(*, session: Session = Depends(get_session), team: TeamCreate):
db_team = Team.model_validate(team)
session.add(db_team)
session.commit()
session.refresh(db_team)
return db_team
@app.get("/teams/", response_model=List[TeamPublic])
def read_teams(
*,
session: Session = Depends(get_session),
offset: int = 0,
limit: int = Query(default=100, le=100),
):
teams = session.exec(select(Team).offset(offset).limit(limit)).all()
return teams
@app.get("/teams/{team_id}", response_model=TeamPublicWithHeroes)
def read_team(*, team_id: int, session: Session = Depends(get_session)):
team = session.get(Team, team_id)
if not team:
raise HTTPException(status_code=404, detail="Team not found")
return team
@app.patch("/teams/{team_id}", response_model=TeamPublic)
def update_team(
*,
session: Session = Depends(get_session),
team_id: int,
team: TeamUpdate,
):
db_team = session.get(Team, team_id)
if not db_team:
raise HTTPException(status_code=404, detail="Team not found")
team_data = team.model_dump(exclude_unset=True)
db_team.sqlmodel_update(team_data)
session.add(db_team)
session.commit()
session.refresh(db_team)
return db_team
@app.delete("/teams/{team_id}")
def delete_team(*, session: Session = Depends(get_session), team_id: int):
team = session.get(Team, team_id)
if not team:
raise HTTPException(status_code=404, detail="Team not found")
session.delete(team)
session.commit()
return {"ok": True}
|
TeamPublicWithHeroes
|
python
|
kamyu104__LeetCode-Solutions
|
Python/convert-to-base-2.py
|
{
"start": 32,
"end": 380
}
|
class ____(object):
def baseNeg2(self, N):
"""
:type N: int
:rtype: str
"""
result = []
while N:
result.append(str(-N & 1)) # N % -2
N = -(N >> 1) # N //= -2
result.reverse()
return "".join(result) if result else "0"
# Time: O(logn)
# Space: O(1)
|
Solution
|
python
|
ApeWorX__ape
|
src/ape_pm/dependency.py
|
{
"start": 12916,
"end": 19618
}
|
class ____(DependencyAPI):
"""
A dependency installed from Python tooling, such as `pip`.
"""
site_package: Optional[str] = None
"""
The Python site-package name, such as ``"snekmate"``. Cannot use
with ``pypi:``. Requires the dependency to have been installed
either via ``pip`` or something alike.
"""
pypi: Optional[str] = None
"""
The ``pypi`` reference, such as ``"snekmate"``. Cannot use with
``python:``. When set, downloads the dependency from ``pypi``
using HTTP directly (not ``pip``).
"""
version: Optional[str] = None
"""
Optionally specify the version expected to be installed.
"""
@model_validator(mode="before")
@classmethod
def validate_model(cls, values):
if "python" in values:
# `.python` is the old key but we have to always support it
# so dependencies-of-dependencies always work, even when referencing
# older projects.
values["site_package"] = values.pop("python")
if "name" not in values:
if name := values.get("site_package") or values.get("pypi"):
values["name"] = name
else:
raise ValueError(
"Must set either 'pypi:' or 'site_package': when using Python dependencies"
)
return values
@cached_property
def path(self) -> Optional[Path]:
if self.pypi:
# Is pypi: specified; has no special path.
return None
elif python := self.site_package:
try:
return get_package_path(python)
except ValueError as err:
raise ProjectError(str(err)) from err
return None
@property
def package_id(self) -> str:
if pkg_id := (self.pypi or self.site_package):
return pkg_id
raise ProjectError("Must provide either 'pypi:' or 'python:' for python-base dependencies.")
@property
def python(self) -> Optional[str]:
# For backwards-compat; serves as an undocumented alias.
return self.site_package
@property
def version_id(self) -> str:
if self.version:
# It is helpful to just return the cfg version here
# so uninstalled dependencies can attempt to set up their caches.
return self.version
elif self.pypi:
# Version available in package data.
vers = self._get_version_from_package_data()
elif self.site_package:
# Python dependency not installed; attempt to use latest from pypi.
if pkg_vers := self.version_from_package_data:
return pkg_vers
# Force the user to specify the version, as it is not installed and not
# available on PyPI.
raise ProjectError(
f"Dependency '{self.name}' not installed. Either install or specify the `version:` to continue."
)
else:
raise ProjectError(
"Must provide either 'pypi:' or 'python:' for python-base dependencies."
)
return vers
@property
def uri(self) -> str:
if self.pypi:
return self.download_archive_url
elif self.site_package and (path := self.path):
# Local site-package path.
return path.as_uri()
else:
raise ProjectError(
"Must provide either 'pypi:' or 'python:' for python-base dependencies."
)
@cached_property
def package_data(self) -> dict:
url = f"https://pypi.org/pypi/{self.package_id}/json"
response = requests.get(url)
try:
response.raise_for_status()
except requests.HTTPError as err:
if err.response.status_code == 404:
# There is no available package data on PyPI; use empty dict.
return {}
else:
# It should have worked in this case, so it is good to raise an error.
raise ProjectError(
f"Problem downloading package data for '{self.package_id}': {err}"
)
return response.json()
@cached_property
def version_from_package_data(self) -> Optional[str]:
return self.package_data.get("info", {}).get("version")
@cached_property
def download_archive_url(self) -> str:
if not (version := self.version):
if not (version := self.version_from_package_data):
# Not sure if this is possible, but just in case API data changes or something.
raise ProjectError(f"Unable to find version for package '{self.package_id}'.")
releases = self.package_data.get("releases", {})
if version not in releases:
raise ProjectError(f"Version '{version}' not found for package '{self.package_id}'.")
# Find the first zip file in the specified release.
for file_info in releases[version]:
if file_info.get("packagetype") != "sdist":
continue
return file_info["url"]
raise ProjectError(
f"No zip file found for package '{self.package_id}' with version '{version}' on PyPI."
)
def fetch(self, destination: Path):
if self.pypi:
self._fetch_from_pypi(destination)
elif path := self.path:
# 'python:' key.
_fetch_local(path, destination, config_override=self.config_override)
def _fetch_from_pypi(self, destination: Path):
archive_path = self._fetch_archive_file(destination)
extract_archive(archive_path)
archive_path.unlink(missing_ok=True)
def _fetch_archive_file(self, destination) -> Path:
logger.info(f"Fetching python dependency '{self.package_id}' from 'pypi.")
download_url = self.download_archive_url
filename = download_url.split("/")[-1]
destination.mkdir(exist_ok=True, parents=True)
archive_destination = destination / filename
with requests.get(download_url, stream=True) as response:
response.raise_for_status()
with open(archive_destination, "wb") as file:
for chunk in response.iter_content(chunk_size=8192): # 8 KB
file.write(chunk)
return archive_destination
def _get_version_from_package_data(self) -> str:
if vers := self.version_from_package_data:
return vers
# I doubt this is a possible condition, but just in case.
raise ProjectError(f"Missing version from PyPI for package '{self.package_id}'.")
|
PythonDependency
|
python
|
airbytehq__airbyte
|
airbyte-integrations/connectors/source-github/source_github/github_schema.py
|
{
"start": 532976,
"end": 533427
}
|
class ____(sgqlc.types.Type):
"""Autogenerated return type of CopyProjectV2"""
__schema__ = github_schema
__field_names__ = ("client_mutation_id", "project_v2")
client_mutation_id = sgqlc.types.Field(String, graphql_name="clientMutationId")
"""A unique identifier for the client performing the mutation."""
project_v2 = sgqlc.types.Field("ProjectV2", graphql_name="projectV2")
"""The copied project."""
|
CopyProjectV2Payload
|
python
|
airbytehq__airbyte
|
airbyte-integrations/connectors/source-github/source_github/github_schema.py
|
{
"start": 1593086,
"end": 1593249
}
|
class ____(sgqlc.types.Union):
"""Types that can be inside Project Cards."""
__schema__ = github_schema
__types__ = (Issue, PullRequest)
|
ProjectCardItem
|
python
|
walkccc__LeetCode
|
solutions/2049. Count Nodes With the Highest Score/2049.py
|
{
"start": 0,
"end": 701
}
|
class ____:
def countHighestScoreNodes(self, parents: list[int]) -> int:
tree = [[] for _ in range(len(parents))]
for i, parent in enumerate(parents):
if parent == -1:
continue
tree[parent].append(i)
ans = 0
maxScore = 0
def dfs(u: int) -> int: # Returns node count
nonlocal ans
nonlocal maxScore
count = 1
score = 1
for v in tree[u]:
childCount = dfs(v)
count += childCount
score *= childCount
score *= len(parents) - count or 1
if score > maxScore:
maxScore = score
ans = 1
elif score == maxScore:
ans += 1
return count
dfs(0)
return ans
|
Solution
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.