diff --git a/fixtures/backup/model_dependencies/detailed.json b/fixtures/backup/model_dependencies/detailed.json
index 87a66b8cadc792..68d4d162241c28 100644
--- a/fixtures/backup/model_dependencies/detailed.json
+++ b/fixtures/backup/model_dependencies/detailed.json
@@ -737,30 +737,6 @@
"table_name": "sentry_artifactbundle",
"uniques": []
},
- "sentry.artifactbundleflatfileindex": {
- "dangling": false,
- "foreign_keys": {
- "project_id": {
- "kind": "ImplicitForeignKey",
- "model": "sentry.project",
- "nullable": false
- }
- },
- "model": "sentry.artifactbundleflatfileindex",
- "relocation_dependencies": [],
- "relocation_scope": "Excluded",
- "silos": [
- "Region"
- ],
- "table_name": "sentry_artifactbundleflatfileindex",
- "uniques": [
- [
- "dist_name",
- "project_id",
- "release_name"
- ]
- ]
- },
"sentry.artifactbundleindex": {
"dangling": false,
"foreign_keys": {
@@ -2119,34 +2095,6 @@
]
]
},
- "sentry.flatfileindexstate": {
- "dangling": false,
- "foreign_keys": {
- "artifact_bundle": {
- "kind": "FlexibleForeignKey",
- "model": "sentry.artifactbundle",
- "nullable": false
- },
- "flat_file_index": {
- "kind": "FlexibleForeignKey",
- "model": "sentry.artifactbundleflatfileindex",
- "nullable": false
- }
- },
- "model": "sentry.flatfileindexstate",
- "relocation_dependencies": [],
- "relocation_scope": "Excluded",
- "silos": [
- "Region"
- ],
- "table_name": "sentry_flatfileindexstate",
- "uniques": [
- [
- "artifact_bundle",
- "flat_file_index"
- ]
- ]
- },
"sentry.group": {
"dangling": false,
"foreign_keys": {
@@ -3289,6 +3237,24 @@
]
]
},
+ "sentry.monitorenvbrokendetection": {
+ "dangling": false,
+ "foreign_keys": {
+ "monitor_incident": {
+ "kind": "FlexibleForeignKey",
+ "model": "sentry.monitorincident",
+ "nullable": false
+ }
+ },
+ "model": "sentry.monitorenvbrokendetection",
+ "relocation_dependencies": [],
+ "relocation_scope": "Excluded",
+ "silos": [
+ "Region"
+ ],
+ "table_name": "sentry_monitorenvbrokendetection",
+ "uniques": []
+ },
"sentry.monitorenvironment": {
"dangling": false,
"foreign_keys": {
diff --git a/fixtures/backup/model_dependencies/flat.json b/fixtures/backup/model_dependencies/flat.json
index d3b183546af522..de92ea42c392b3 100644
--- a/fixtures/backup/model_dependencies/flat.json
+++ b/fixtures/backup/model_dependencies/flat.json
@@ -106,9 +106,6 @@
"sentry.file",
"sentry.organization"
],
- "sentry.artifactbundleflatfileindex": [
- "sentry.project"
- ],
"sentry.artifactbundleindex": [
"sentry.artifactbundle",
"sentry.organization"
@@ -291,10 +288,6 @@
"sentry.fileblob",
"sentry.organization"
],
- "sentry.flatfileindexstate": [
- "sentry.artifactbundle",
- "sentry.artifactbundleflatfileindex"
- ],
"sentry.group": [
"sentry.project",
"sentry.release"
@@ -456,6 +449,9 @@
"sentry.monitorlocation",
"sentry.project"
],
+ "sentry.monitorenvbrokendetection": [
+ "sentry.monitorincident"
+ ],
"sentry.monitorenvironment": [
"sentry.environment",
"sentry.monitor"
diff --git a/fixtures/backup/model_dependencies/sorted.json b/fixtures/backup/model_dependencies/sorted.json
index 6f268249602ca4..f8b0e4f4049cd3 100644
--- a/fixtures/backup/model_dependencies/sorted.json
+++ b/fixtures/backup/model_dependencies/sorted.json
@@ -109,7 +109,6 @@
"sentry.authidentity",
"sentry.authenticator",
"sentry.assistantactivity",
- "sentry.artifactbundleflatfileindex",
"sentry.artifactbundle",
"sentry.appconnectbuild",
"sentry.apikey",
@@ -175,7 +174,6 @@
"sentry.groupcommitresolution",
"sentry.groupbookmark",
"sentry.groupassignee",
- "sentry.flatfileindexstate",
"sentry.fileblobindex",
"sentry.exporteddatablob",
"sentry.environmentproject",
@@ -208,6 +206,7 @@
"sentry.organizationmemberteamreplica",
"sentry.notificationactionproject",
"sentry.monitorincident",
+ "sentry.monitorenvbrokendetection",
"sentry.incident",
"sentry.dashboardwidgetquery",
"sentry.alertruletrigger",
diff --git a/fixtures/backup/model_dependencies/truncate.json b/fixtures/backup/model_dependencies/truncate.json
index ea15145708227c..f1bf22ceed0965 100644
--- a/fixtures/backup/model_dependencies/truncate.json
+++ b/fixtures/backup/model_dependencies/truncate.json
@@ -109,7 +109,6 @@
"sentry_authidentity",
"auth_authenticator",
"sentry_assistant_activity",
- "sentry_artifactbundleflatfileindex",
"sentry_artifactbundle",
"sentry_appconnectbuild",
"sentry_apikey",
@@ -175,7 +174,6 @@
"sentry_groupcommitresolution",
"sentry_groupbookmark",
"sentry_groupasignee",
- "sentry_flatfileindexstate",
"sentry_fileblobindex",
"sentry_exporteddatablob",
"sentry_environmentproject",
@@ -208,6 +206,7 @@
"sentry_organizationmember_teamsreplica",
"sentry_notificationactionproject",
"sentry_monitorincident",
+ "sentry_monitorenvbrokendetection",
"sentry_incident",
"sentry_dashboardwidgetquery",
"sentry_alertruletrigger",
diff --git a/fixtures/js-stubs/group.ts b/fixtures/js-stubs/group.ts
index 6da2c6da9c79b0..27662363a478ee 100644
--- a/fixtures/js-stubs/group.ts
+++ b/fixtures/js-stubs/group.ts
@@ -39,6 +39,7 @@ export function GroupFixture(params: Partial = {}): Group {
pluginContexts: [],
pluginIssues: [],
priority: PriorityLevel.MEDIUM,
+ priorityLockedAt: null,
project: ProjectFixture({
platform: 'javascript',
}),
diff --git a/migrations_lockfile.txt b/migrations_lockfile.txt
index 09f771acb7fa5a..872543c08d7e1d 100644
--- a/migrations_lockfile.txt
+++ b/migrations_lockfile.txt
@@ -9,5 +9,5 @@ feedback: 0004_index_together
hybridcloud: 0013_add_orgauthtokenreplica_token_index
nodestore: 0002_nodestore_no_dictfield
replays: 0004_index_together
-sentry: 0660_fix_cron_monitor_invalid_orgs
+sentry: 0666_monitor_incident_default_grouphash
social_auth: 0002_default_auto_field
diff --git a/pyproject.toml b/pyproject.toml
index ce2b18186d3fea..13e699b27c5321 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -287,7 +287,6 @@ module = [
"sentry.incidents.endpoints.organization_incident_comment_details",
"sentry.incidents.endpoints.organization_incident_index",
"sentry.incidents.logic",
- "sentry.incidents.models",
"sentry.incidents.subscription_processor",
"sentry.incidents.tasks",
"sentry.integrations.aws_lambda.integration",
@@ -371,9 +370,7 @@ module = [
"sentry.issues.endpoints.group_events",
"sentry.issues.endpoints.organization_group_index",
"sentry.issues.endpoints.source_map_debug",
- "sentry.issues.occurrence_consumer",
"sentry.issues.search",
- "sentry.issues.status_change",
"sentry.middleware.access_log",
"sentry.middleware.auth",
"sentry.middleware.ratelimit",
@@ -627,6 +624,7 @@ disable_error_code = [
module = [
"sentry.buffer.base",
"sentry.buffer.redis",
+ "sentry.eventstore.reprocessing.redis",
"sentry.utils.redis",
"sentry.utils.redis_metrics",
"sentry.utils.locking.backends.redis",
diff --git a/requirements-base.txt b/requirements-base.txt
index fa6f922654d677..518dda18cf1ce4 100644
--- a/requirements-base.txt
+++ b/requirements-base.txt
@@ -65,7 +65,7 @@ sentry-arroyo>=2.16.2
sentry-kafka-schemas>=0.1.58
sentry-ophio==0.1.5
sentry-redis-tools>=0.1.7
-sentry-relay>=0.8.45
+sentry-relay>=0.8.48
sentry-sdk>=1.39.2
snuba-sdk>=2.0.29
simplejson>=3.17.6
diff --git a/requirements-dev-frozen.txt b/requirements-dev-frozen.txt
index bde98736cf313d..ae4c46b61f9e18 100644
--- a/requirements-dev-frozen.txt
+++ b/requirements-dev-frozen.txt
@@ -180,7 +180,7 @@ sentry-forked-djangorestframework-stubs==3.14.5.post1
sentry-kafka-schemas==0.1.58
sentry-ophio==0.1.5
sentry-redis-tools==0.1.7
-sentry-relay==0.8.45
+sentry-relay==0.8.48
sentry-sdk==1.39.2
sentry-usage-accountant==0.0.10
simplejson==3.17.6
diff --git a/requirements-frozen.txt b/requirements-frozen.txt
index b0ef79fddf89a1..004b1d338a4f08 100644
--- a/requirements-frozen.txt
+++ b/requirements-frozen.txt
@@ -122,7 +122,7 @@ sentry-arroyo==2.16.2
sentry-kafka-schemas==0.1.58
sentry-ophio==0.1.5
sentry-redis-tools==0.1.7
-sentry-relay==0.8.45
+sentry-relay==0.8.48
sentry-sdk==1.39.2
sentry-usage-accountant==0.0.10
simplejson==3.17.6
diff --git a/src/sentry/api/bases/incident.py b/src/sentry/api/bases/incident.py
index a13b8fbdf855c4..aa18754573d9b3 100644
--- a/src/sentry/api/bases/incident.py
+++ b/src/sentry/api/bases/incident.py
@@ -4,7 +4,7 @@
from sentry import features
from sentry.api.bases.organization import OrganizationEndpoint, OrganizationPermission
from sentry.api.exceptions import ResourceDoesNotExist
-from sentry.incidents.models import Incident
+from sentry.incidents.models.incident import Incident
class IncidentPermission(OrganizationPermission):
diff --git a/src/sentry/api/bases/sentryapps.py b/src/sentry/api/bases/sentryapps.py
index 8ba7c8b923c3f7..b058f931cbdf00 100644
--- a/src/sentry/api/bases/sentryapps.py
+++ b/src/sentry/api/bases/sentryapps.py
@@ -364,7 +364,10 @@ def has_object_permission(self, request: Request, view, installation):
# TODO(hybrid-cloud): Replace this RPC with an org member lookup when that exists?
org_context = organization_service.get_organization_by_id(
- id=installation.organization_id, user_id=request.user.id
+ id=installation.organization_id,
+ user_id=request.user.id,
+ include_teams=False,
+ include_projects=False,
)
if (
org_context.member is None
diff --git a/src/sentry/api/endpoints/auth_index.py b/src/sentry/api/endpoints/auth_index.py
index 6944fc86f22b96..dfd764a6e30eff 100644
--- a/src/sentry/api/endpoints/auth_index.py
+++ b/src/sentry/api/endpoints/auth_index.py
@@ -1,6 +1,5 @@
import logging
-from django.conf import settings
from django.contrib.auth import logout
from django.contrib.auth.models import AnonymousUser
from django.utils.http import url_has_allowed_host_and_scheme
@@ -33,10 +32,6 @@
PREFILLED_SU_MODAL_KEY = "prefilled_su_modal"
-DISABLE_SU_FORM_U2F_CHECK_FOR_LOCAL = getattr(
- settings, "DISABLE_SU_FORM_U2F_CHECK_FOR_LOCAL", False
-)
-
@control_silo_endpoint
class BaseAuthIndexEndpoint(Endpoint):
@@ -66,7 +61,9 @@ def _reauthenticate_with_sso(request: Request, org_id: int) -> None:
if not url_has_allowed_host_and_scheme(redirect, allowed_hosts=(request.get_host(),)):
redirect = None
initiate_login(request, redirect)
- organization_context = organization_service.get_organization_by_id(id=org_id)
+ organization_context = organization_service.get_organization_by_id(
+ id=org_id, include_teams=False, include_projects=False
+ )
assert organization_context, "Failed to fetch organization in _reauthenticate_with_sso"
raise SsoRequired(
organization=organization_context.organization,
@@ -153,6 +150,15 @@ def _validate_superuser(
SSO and if they do not, we redirect them back to the SSO login.
"""
+ logger.info(
+ "auth-index.validate_superuser",
+ extra={
+ "validator": validator,
+ "user": request.user.id,
+ "raise_exception": not DISABLE_SSO_CHECK_FOR_LOCAL_DEV,
+ "verify_authenticator": verify_authenticator,
+ },
+ )
# Disable exception for missing password or u2f code if we're running locally
validator.is_valid(raise_exception=not DISABLE_SSO_CHECK_FOR_LOCAL_DEV)
@@ -242,17 +248,17 @@ def put(self, request: Request) -> Response:
if not DISABLE_SSO_CHECK_FOR_LOCAL_DEV and not is_self_hosted():
if Superuser.org_id:
- superuser_org = organization_service.get_organization_by_id(id=Superuser.org_id)
+ superuser_org = organization_service.get_organization_by_id(
+ id=Superuser.org_id, include_teams=False, include_projects=False
+ )
- verify_authenticator = (
- False
- if superuser_org is None
- else features.has(
+ if superuser_org is not None:
+ has_u2f_flag = features.has(
"organizations:u2f-superuser-form",
superuser_org.organization,
actor=request.user,
)
- )
+ verify_authenticator = has_u2f_flag
if verify_authenticator:
if not Authenticator.objects.filter(
@@ -261,6 +267,15 @@ def put(self, request: Request) -> Response:
return Response(
{"detail": {"code": "no_u2f"}}, status=status.HTTP_403_FORBIDDEN
)
+ logger.info(
+ "auth-index.put",
+ extra={
+ "organization": superuser_org,
+ "u2f_flag": has_u2f_flag,
+ "user": request.user.id,
+ "verify_authenticator": verify_authenticator,
+ },
+ )
try:
authenticated = self._validate_superuser(validator, request, verify_authenticator)
except ValidationError:
diff --git a/src/sentry/api/endpoints/chunk.py b/src/sentry/api/endpoints/chunk.py
index fefc9541b01495..13e7d645d47536 100644
--- a/src/sentry/api/endpoints/chunk.py
+++ b/src/sentry/api/endpoints/chunk.py
@@ -14,6 +14,7 @@
from sentry.api.api_publish_status import ApiPublishStatus
from sentry.api.base import region_silo_endpoint
from sentry.api.bases.organization import OrganizationEndpoint, OrganizationReleasePermission
+from sentry.api.utils import generate_region_url
from sentry.models.files.fileblob import FileBlob
from sentry.ratelimits.config import RateLimitConfig
from sentry.utils.files import get_max_file_size
@@ -81,7 +82,12 @@ def get(self, request: Request, organization) -> Response:
url = relative_url.lstrip(API_PREFIX)
# Otherwise, if we do not support them, return an absolute, versioned endpoint with a default, system-wide prefix
else:
- url = absolute_uri(relative_url)
+ # We need to generate region specific upload URLs when possible to avoid hitting the API proxy
+ # which tends to cause timeouts and performance issues for uploads.
+ base_url = None
+ if options.get("hybrid_cloud.use_region_specific_upload_url"):
+ base_url = generate_region_url()
+ url = absolute_uri(relative_url, base_url)
else:
# If user overridden upload url prefix, we want an absolute, versioned endpoint, with user-configured prefix
url = absolute_uri(relative_url, endpoint)
diff --git a/src/sentry/api/endpoints/group_ai_autofix.py b/src/sentry/api/endpoints/group_ai_autofix.py
index e53edea874e4b5..a9db0ac70e500f 100644
--- a/src/sentry/api/endpoints/group_ai_autofix.py
+++ b/src/sentry/api/endpoints/group_ai_autofix.py
@@ -112,6 +112,7 @@ def _call_autofix(
repos: list[dict],
event_entries: list[dict],
additional_context: str,
+ timeout_secs: int,
):
response = requests.post(
f"{settings.SEER_AUTOFIX_URL}/v0/automation/autofix",
@@ -123,10 +124,12 @@ def _call_autofix(
"issue": {
"id": group.id,
"title": group.title,
- "short_id": group.short_id,
+ "short_id": group.qualified_short_id,
"events": [{"entries": event_entries}],
},
"additional_context": additional_context,
+ "timeout_secs": timeout_secs,
+ "last_updated": datetime.now().isoformat(),
"invoking_user": (
{
"id": user.id,
@@ -192,7 +195,12 @@ def post(self, request: Request, group: Group) -> Response:
try:
self._call_autofix(
- request.user, group, repos, event_entries, data.get("additional_context", "")
+ request.user,
+ group,
+ repos,
+ event_entries,
+ data.get("additional_context", ""),
+ TIMEOUT_SECONDS,
)
# Mark the task as completed after TIMEOUT_SECONDS
diff --git a/src/sentry/api/endpoints/integrations/sentry_apps/details.py b/src/sentry/api/endpoints/integrations/sentry_apps/details.py
index 1248e6c6fa601b..78d52975bdef9a 100644
--- a/src/sentry/api/endpoints/integrations/sentry_apps/details.py
+++ b/src/sentry/api/endpoints/integrations/sentry_apps/details.py
@@ -58,7 +58,7 @@ def put(self, request: Request, sentry_app) -> Response:
status=403,
)
owner_context = organization_service.get_organization_by_id(
- id=sentry_app.owner_id, user_id=None
+ id=sentry_app.owner_id, user_id=None, include_projects=False, include_teams=False
)
if (
owner_context
diff --git a/src/sentry/api/endpoints/organization_ddm.py b/src/sentry/api/endpoints/organization_ddm.py
index 8e1d01e9db3e41..fec9593b918e04 100644
--- a/src/sentry/api/endpoints/organization_ddm.py
+++ b/src/sentry/api/endpoints/organization_ddm.py
@@ -17,10 +17,7 @@
from sentry.exceptions import InvalidParams
from sentry.models.organization import Organization
from sentry.models.project import Project
-from sentry.sentry_metrics.querying.errors import (
- LatestReleaseNotFoundError,
- TooManyCodeLocationsRequestedError,
-)
+from sentry.sentry_metrics.querying.errors import LatestReleaseNotFoundError
from sentry.sentry_metrics.querying.metadata import (
MetricCodeLocations,
MetricCorrelations,
@@ -126,8 +123,6 @@ def get(self, request: Request, organization) -> Response:
)
except LatestReleaseNotFoundError as e:
return Response(status=404, data={"detail": str(e)})
- except TooManyCodeLocationsRequestedError as e:
- return Response(status=400, data={"detail": str(e)})
response[meta_type.value] = serialize(
data, request.user, METRIC_META_TYPE_SERIALIZER[meta_type.value]
diff --git a/src/sentry/api/endpoints/organization_events.py b/src/sentry/api/endpoints/organization_events.py
index 03f6d5361bdef7..be83db23f3dbbe 100644
--- a/src/sentry/api/endpoints/organization_events.py
+++ b/src/sentry/api/endpoints/organization_events.py
@@ -26,7 +26,7 @@
from sentry.snuba.metrics.extraction import MetricSpecType
from sentry.snuba.referrer import Referrer
from sentry.types.ratelimit import RateLimit, RateLimitCategory
-from sentry.utils.snuba import SnubaError, SnubaTSResult
+from sentry.utils.snuba import SnubaError
logger = logging.getLogger(__name__)
@@ -324,8 +324,13 @@ def fn(offset, limit) -> dict[str, Any]:
try:
widget = DashboardWidget.objects.get(id=dashboard_widget_id)
does_widget_have_split = widget.discover_widget_split is not None
+ has_override_feature = features.has(
+ "organizations:performance-discover-widget-split-override-save",
+ organization,
+ actor=request.user,
+ )
- if does_widget_have_split:
+ if does_widget_have_split and not has_override_feature:
# This is essentially cached behaviour and we skip the check
split_query = scoped_query
if widget.discover_widget_split == DashboardWidgetTypes.ERROR_EVENTS:
@@ -347,12 +352,15 @@ def fn(offset, limit) -> dict[str, Any]:
has_errors = len(error_results["data"]) > 0
except SnubaError:
has_errors = False
+ error_results = None
original_results = _data_fn(scopedDataset, offset, limit, scoped_query)
- if isinstance(original_results, SnubaTSResult):
- dataset_meta = original_results.data.get("meta", {})
+ if original_results.get("data"):
+ dataset_meta = original_results.get("data").get("meta", {})
else:
- dataset_meta = list(original_results.values())[0].data.get("meta", {})
+ dataset_meta = (
+ list(original_results.values())[0].get("data").get("meta", {})
+ )
using_metrics = dataset_meta.get("isMetricsData", False) or dataset_meta.get(
"isMetricsExtractedData", False
)
diff --git a/src/sentry/api/endpoints/organization_events_stats.py b/src/sentry/api/endpoints/organization_events_stats.py
index 357980de4e12d4..7f237696d793e5 100644
--- a/src/sentry/api/endpoints/organization_events_stats.py
+++ b/src/sentry/api/endpoints/organization_events_stats.py
@@ -16,7 +16,6 @@
from sentry.models.organization import Organization
from sentry.snuba import (
discover,
- errors,
functions,
metrics_enhanced_performance,
metrics_performance,
@@ -214,7 +213,6 @@ def get(self, request: Request, organization: Organization) -> Response:
if dataset
in [
discover,
- errors,
functions,
metrics_performance,
metrics_enhanced_performance,
@@ -323,8 +321,13 @@ def fn(
try:
widget = DashboardWidget.objects.get(id=dashboard_widget_id)
does_widget_have_split = widget.discover_widget_split is not None
+ has_override_feature = features.has(
+ "organizations:performance-discover-widget-split-override-save",
+ organization,
+ actor=request.user,
+ )
- if does_widget_have_split:
+ if does_widget_have_split and not has_override_feature:
# This is essentially cached behaviour and we skip the check
split_query = query
if widget.discover_widget_split == DashboardWidgetTypes.ERROR_EVENTS:
diff --git a/src/sentry/api/endpoints/organization_metrics.py b/src/sentry/api/endpoints/organization_metrics.py
index f8a0f9c0a203f3..39038ffe4c82ea 100644
--- a/src/sentry/api/endpoints/organization_metrics.py
+++ b/src/sentry/api/endpoints/organization_metrics.py
@@ -35,7 +35,6 @@
InvalidMetricsQueryError,
LatestReleaseNotFoundError,
MetricsQueryExecutionError,
- TooManyCodeLocationsRequestedError,
)
from sentry.sentry_metrics.querying.metadata import MetricCodeLocations, get_metric_code_locations
from sentry.sentry_metrics.querying.samples_list import get_sample_list_executor_cls
@@ -67,6 +66,13 @@ class MetricMetaType(Enum):
MetricMetaType.CODE_LOCATIONS.value: MetricCodeLocationsSerializer(),
}
+DEFAULT_USE_CASE_IDS = [
+ UseCaseID.TRANSACTIONS,
+ UseCaseID.SESSIONS,
+ UseCaseID.SPANS,
+ UseCaseID.CUSTOM,
+]
+
def get_use_case_id(request: Request) -> UseCaseID:
"""
@@ -83,6 +89,22 @@ def get_use_case_id(request: Request) -> UseCaseID:
)
+def get_use_case_ids(request: Request) -> Sequence[UseCaseID]:
+ """
+ Gets use case ids from the query params and validates them again the `UseCaseID` enum type.
+
+ If an empty list is supplied, the use case ids in `DEFAULT_USE_CASE_IDS` will be used.
+ """
+
+ try:
+ use_case_params = request.GET.getlist("useCase", DEFAULT_USE_CASE_IDS)
+ return [string_to_use_case_id(use_case_param) for use_case_param in use_case_params]
+ except ValueError:
+ raise ParseError(
+ detail=f"Invalid useCase parameter. Please use one of: {[uc.value for uc in UseCaseID]}"
+ )
+
+
@region_silo_endpoint
class OrganizationMetricsDetailsEndpoint(OrganizationEndpoint):
publish_status = {
@@ -101,7 +123,7 @@ def get(self, request: Request, organization) -> Response:
start, end = get_date_range_from_params(request.GET)
metrics = get_metrics_meta(
- projects=projects, use_case_id=get_use_case_id(request), start=start, end=end
+ projects=projects, use_case_ids=get_use_case_ids(request), start=start, end=end
)
return Response(metrics, status=200)
@@ -452,6 +474,7 @@ class MetricsSamplesSerializer(serializers.Serializer):
field = serializers.ListField(required=True, allow_empty=False, child=serializers.CharField())
max = serializers.FloatField(required=False)
min = serializers.FloatField(required=False)
+ operation = serializers.CharField(required=False)
query = serializers.CharField(required=False)
referrer = serializers.CharField(required=False)
sort = serializers.CharField(required=False)
@@ -510,6 +533,7 @@ def get(self, request: Request, organization: Organization) -> Response:
params,
snuba_params,
serialized["field"],
+ serialized.get("operation"),
serialized.get("query", ""),
serialized.get("min"),
serialized.get("max"),
@@ -585,8 +609,6 @@ def get(self, request: Request, organization) -> Response:
)
except LatestReleaseNotFoundError as e:
return Response(status=404, data={"detail": str(e)})
- except TooManyCodeLocationsRequestedError as e:
- return Response(status=400, data={"detail": str(e)})
response[meta_type.value] = serialize(
data, request.user, METRIC_META_TYPE_SERIALIZER[meta_type.value]
diff --git a/src/sentry/api/endpoints/organization_profiling_functions.py b/src/sentry/api/endpoints/organization_profiling_functions.py
index 4ca349f751defb..bb8f4aa2084426 100644
--- a/src/sentry/api/endpoints/organization_profiling_functions.py
+++ b/src/sentry/api/endpoints/organization_profiling_functions.py
@@ -91,24 +91,25 @@ def get(self, request: Request, organization: Organization) -> Response:
return Response(serializer.errors, status=400)
data = serializer.validated_data
- top_functions = functions.query(
- selected_columns=[
- "project.id",
- "fingerprint",
- "package",
- "function",
- "count()",
- "examples()",
- ],
- query=data.get("query"),
- params=params,
- orderby=["-count()"],
- limit=TOP_FUNCTIONS_LIMIT,
- referrer=Referrer.API_PROFILING_FUNCTION_TRENDS_TOP_EVENTS.value,
- auto_aggregations=True,
- use_aggregate_conditions=True,
- transform_alias_to_input_format=True,
- )
+ with handle_query_errors():
+ top_functions = functions.query(
+ selected_columns=[
+ "project.id",
+ "fingerprint",
+ "package",
+ "function",
+ "count()",
+ "examples()",
+ ],
+ query=data.get("query"),
+ params=params,
+ orderby=["-count()"],
+ limit=TOP_FUNCTIONS_LIMIT,
+ referrer=Referrer.API_PROFILING_FUNCTION_TRENDS_TOP_EVENTS.value,
+ auto_aggregations=True,
+ use_aggregate_conditions=True,
+ transform_alias_to_input_format=True,
+ )
def get_event_stats(_columns, query, params, _rollup, zerofill_results, _comparison_delta):
rollup = get_rollup_from_range(params["end"] - params["start"])
diff --git a/src/sentry/api/endpoints/project_details.py b/src/sentry/api/endpoints/project_details.py
index d6ca3460395363..a5f85689a7394f 100644
--- a/src/sentry/api/endpoints/project_details.py
+++ b/src/sentry/api/endpoints/project_details.py
@@ -817,10 +817,10 @@ def put(self, request: Request, project) -> Response:
"sentry:replay_rage_click_issues",
bool(options["sentry:replay_rage_click_issues"]),
)
- if "sentry:feedback_user_report_notification" in options:
+ if "sentry:feedback_user_report_notifications" in options:
project.update_option(
- "sentry:feedback_user_report_notification",
- bool(options["sentry:feedback_user_report_notification"]),
+ "sentry:feedback_user_report_notifications",
+ bool(options["sentry:feedback_user_report_notifications"]),
)
if "sentry:feedback_ai_spam_detection" in options:
project.update_option(
diff --git a/src/sentry/api/endpoints/rule_snooze.py b/src/sentry/api/endpoints/rule_snooze.py
index e1cf268de7205d..98eb0eb9423a9c 100644
--- a/src/sentry/api/endpoints/rule_snooze.py
+++ b/src/sentry/api/endpoints/rule_snooze.py
@@ -12,7 +12,7 @@
from sentry.api.bases.project import ProjectAlertRulePermission, ProjectEndpoint
from sentry.api.serializers import Serializer, register, serialize
from sentry.api.serializers.rest_framework.base import CamelSnakeSerializer
-from sentry.incidents.models import AlertRule
+from sentry.incidents.models.alert_rule import AlertRule
from sentry.models.organization import Organization
from sentry.models.organizationmember import OrganizationMember
from sentry.models.rule import Rule
diff --git a/src/sentry/api/endpoints/seer_rpc.py b/src/sentry/api/endpoints/seer_rpc.py
index c2c2537721b15b..cde7f37b58a68e 100644
--- a/src/sentry/api/endpoints/seer_rpc.py
+++ b/src/sentry/api/endpoints/seer_rpc.py
@@ -5,6 +5,7 @@
from django.conf import settings
from django.contrib.auth.models import AnonymousUser
+from django.core.exceptions import ObjectDoesNotExist
from rest_framework.exceptions import (
AuthenticationFailed,
NotFound,
@@ -25,6 +26,7 @@
from sentry.services.hybrid_cloud.sig import SerializableFunctionValueException
from sentry.silo.base import SiloMode
from sentry.utils import json
+from sentry.utils.env import in_test_environment
def compare_signature(url: str, body: bytes, signature: str) -> bool:
@@ -131,8 +133,13 @@ def post(self, request: Request, method_name: str) -> Response:
except SerializableFunctionValueException as e:
capture_exception()
raise ParseError from e
+ except ObjectDoesNotExist as e:
+ # Let this fall through, this is normal.
+ capture_exception()
+ raise NotFound from e
except Exception as e:
- # Produce more detailed log
+ if in_test_environment():
+ raise
if settings.DEBUG:
raise Exception(f"Problem processing seer rpc endpoint {method_name}") from e
capture_exception()
@@ -174,7 +181,36 @@ def on_autofix_complete(*, issue_id: int, status: str, steps: list[dict], fix: d
group.save()
+def get_autofix_state(*, issue_id: int) -> dict:
+ group: Group = Group.objects.get(id=issue_id)
+
+ metadata = group.data.get("metadata", {})
+ autofix_data = metadata.get("autofix", {})
+
+ return autofix_data
+
+
seer_method_registry = {
"on_autofix_step_update": on_autofix_step_update,
"on_autofix_complete": on_autofix_complete,
+ "get_autofix_state": get_autofix_state,
}
+
+
+def generate_request_signature(url_path: str, body: bytes) -> str:
+ """
+ Generate a signature for the request body
+ with the first shared secret. If there are other
+ shared secrets in the list they are only to be used
+ for verfication during key rotation.
+ """
+ if not settings.SEER_RPC_SHARED_SECRET:
+ raise RpcAuthenticationSetupException("Cannot sign RPC requests without RPC_SHARED_SECRET")
+
+ signature_input = b"%s:%s" % (
+ url_path.encode("utf8"),
+ body,
+ )
+ secret = settings.SEER_RPC_SHARED_SECRET[0]
+ signature = hmac.new(secret.encode("utf-8"), signature_input, hashlib.sha256).hexdigest()
+ return f"rpc0:{signature}"
diff --git a/src/sentry/api/endpoints/team_alerts_triggered.py b/src/sentry/api/endpoints/team_alerts_triggered.py
index 60ab51d1fd351a..7ba02e36b837ab 100644
--- a/src/sentry/api/endpoints/team_alerts_triggered.py
+++ b/src/sentry/api/endpoints/team_alerts_triggered.py
@@ -14,8 +14,8 @@
from sentry.api.serializers import serialize
from sentry.api.serializers.models.alert_rule import AlertRuleSerializer
from sentry.api.utils import get_date_range_from_params
-from sentry.incidents.models import (
- AlertRule,
+from sentry.incidents.models.alert_rule import AlertRule
+from sentry.incidents.models.incident import (
IncidentActivity,
IncidentActivityType,
IncidentProject,
diff --git a/src/sentry/api/endpoints/team_projects.py b/src/sentry/api/endpoints/team_projects.py
index 50604514966f8e..108215210be5d2 100644
--- a/src/sentry/api/endpoints/team_projects.py
+++ b/src/sentry/api/endpoints/team_projects.py
@@ -4,7 +4,7 @@
from rest_framework.request import Request
from rest_framework.response import Response
-from sentry import audit_log, features
+from sentry import audit_log
from sentry.api.api_owners import ApiOwner
from sentry.api.api_publish_status import ApiPublishStatus
from sentry.api.base import EnvironmentMixin, region_silo_endpoint
@@ -182,11 +182,7 @@ def post(self, request: Request, team) -> Response:
# XXX: create sample event?
# Turns on some inbound filters by default for new Javascript platform projects
- if (
- features.has("organizations:default-inbound-filters", team.organization)
- and project.platform
- and project.platform.startswith("javascript")
- ):
+ if project.platform and project.platform.startswith("javascript"):
set_default_inbound_filters(project, team.organization)
self.create_audit_entry(
diff --git a/src/sentry/api/helpers/group_index/update.py b/src/sentry/api/helpers/group_index/update.py
index 8f627049fd9ff1..a5b82494774ee4 100644
--- a/src/sentry/api/helpers/group_index/update.py
+++ b/src/sentry/api/helpers/group_index/update.py
@@ -597,7 +597,6 @@ def update_groups(
acting_user=acting_user,
status_details=result.get("statusDetails", {}),
sender=update_groups,
- activity_type=activity_type,
)
# XXX (ahmed): hack to get the activities to work properly on issues page. Not sure of
diff --git a/src/sentry/api/permissions.py b/src/sentry/api/permissions.py
index afba82240fecaf..333fb579833cc6 100644
--- a/src/sentry/api/permissions.py
+++ b/src/sentry/api/permissions.py
@@ -78,16 +78,36 @@ class (that is not StaffPermission) require this mixin because staff does not gi
staff_allowed_methods = {"GET", "POST", "PUT", "DELETE"}
def has_permission(self, request, *args, **kwargs) -> bool:
- # Check for staff before calling super to avoid catching exceptions from super
- if request.method in self.staff_allowed_methods and is_active_staff(request):
+ """
+ Calls the parent class's has_permission method. If it returns False or
+ raises an exception and the method is allowed by the mixin, we then check
+ if the request is from an active staff. Raised exceptions are not caught
+ if the request is not allowed by the mixin or from an active staff.
+ """
+ try:
+ if super().has_permission(request, *args, **kwargs):
+ return True
+ except Exception:
+ if not (request.method in self.staff_allowed_methods and is_active_staff(request)):
+ raise
return True
- return super().has_permission(request, *args, **kwargs)
+ return request.method in self.staff_allowed_methods and is_active_staff(request)
def has_object_permission(self, request, *args, **kwargs) -> bool:
- # Check for staff before calling super to avoid catching exceptions from super
- if request.method in self.staff_allowed_methods and is_active_staff(request):
+ """
+ Calls the parent class's has_object_permission method. If it returns False or
+ raises an exception and the method is allowed by the mixin, we then check
+ if the request is from an active staff. Raised exceptions are not caught
+ if the request is not allowed by the mixin or from an active staff.
+ """
+ try:
+ if super().has_object_permission(request, *args, **kwargs):
+ return True
+ except Exception:
+ if not (request.method in self.staff_allowed_methods and is_active_staff(request)):
+ raise
return True
- return super().has_object_permission(request, *args, **kwargs)
+ return request.method in self.staff_allowed_methods and is_active_staff(request)
def is_not_2fa_compliant(self, request, *args, **kwargs) -> bool:
return super().is_not_2fa_compliant(request, *args, **kwargs) and not is_active_staff(
diff --git a/src/sentry/api/serializers/models/alert_rule.py b/src/sentry/api/serializers/models/alert_rule.py
index 063560be0bf40e..ae3bf16b8fcedd 100644
--- a/src/sentry/api/serializers/models/alert_rule.py
+++ b/src/sentry/api/serializers/models/alert_rule.py
@@ -11,7 +11,7 @@
from sentry import features
from sentry.api.serializers import Serializer, register, serialize
from sentry.api.serializers.models.rule import RuleSerializer
-from sentry.incidents.models import (
+from sentry.incidents.models.alert_rule import (
AlertRule,
AlertRuleActivity,
AlertRuleActivityType,
@@ -19,8 +19,8 @@
AlertRuleMonitorType,
AlertRuleTrigger,
AlertRuleTriggerAction,
- Incident,
)
+from sentry.incidents.models.incident import Incident
from sentry.models.actor import ACTOR_TYPES, Actor, actor_type_to_string
from sentry.models.rule import Rule
from sentry.models.rulesnooze import RuleSnooze
diff --git a/src/sentry/api/serializers/models/alert_rule_trigger.py b/src/sentry/api/serializers/models/alert_rule_trigger.py
index da668a14342490..c7bb264383077f 100644
--- a/src/sentry/api/serializers/models/alert_rule_trigger.py
+++ b/src/sentry/api/serializers/models/alert_rule_trigger.py
@@ -5,7 +5,7 @@
from sentry.api.serializers import Serializer, register, serialize
from sentry.incidents.endpoints.utils import translate_threshold
-from sentry.incidents.models import (
+from sentry.incidents.models.alert_rule import (
AlertRuleTrigger,
AlertRuleTriggerAction,
AlertRuleTriggerExclusion,
diff --git a/src/sentry/api/serializers/models/alert_rule_trigger_action.py b/src/sentry/api/serializers/models/alert_rule_trigger_action.py
index 060d6f4d0615a6..7e552f9d6c2e23 100644
--- a/src/sentry/api/serializers/models/alert_rule_trigger_action.py
+++ b/src/sentry/api/serializers/models/alert_rule_trigger_action.py
@@ -1,7 +1,7 @@
import logging
from sentry.api.serializers import Serializer, register
-from sentry.incidents.models import AlertRuleTriggerAction
+from sentry.incidents.models.alert_rule import AlertRuleTriggerAction
logger = logging.getLogger(__name__)
diff --git a/src/sentry/api/serializers/models/incident.py b/src/sentry/api/serializers/models/incident.py
index 29e3c48a89c779..2ec3f96f7dd536 100644
--- a/src/sentry/api/serializers/models/incident.py
+++ b/src/sentry/api/serializers/models/incident.py
@@ -4,7 +4,7 @@
from sentry.api.serializers import Serializer, register, serialize
from sentry.api.serializers.models.alert_rule import AlertRuleSerializer
-from sentry.incidents.models import (
+from sentry.incidents.models.incident import (
Incident,
IncidentActivity,
IncidentProject,
diff --git a/src/sentry/api/serializers/models/incidentactivity.py b/src/sentry/api/serializers/models/incidentactivity.py
index 36c35db2077d0b..598e5db1ebe86a 100644
--- a/src/sentry/api/serializers/models/incidentactivity.py
+++ b/src/sentry/api/serializers/models/incidentactivity.py
@@ -1,7 +1,7 @@
from django.db.models import prefetch_related_objects
from sentry.api.serializers import Serializer, register
-from sentry.incidents.models import IncidentActivity
+from sentry.incidents.models.incident import IncidentActivity
from sentry.services.hybrid_cloud.user.serial import serialize_generic_user
from sentry.services.hybrid_cloud.user.service import user_service
diff --git a/src/sentry/api/serializers/models/incidentseen.py b/src/sentry/api/serializers/models/incidentseen.py
index d9e92469c12640..ec96c57523529f 100644
--- a/src/sentry/api/serializers/models/incidentseen.py
+++ b/src/sentry/api/serializers/models/incidentseen.py
@@ -1,5 +1,5 @@
from sentry.api.serializers import Serializer, register
-from sentry.incidents.models import IncidentSeen
+from sentry.incidents.models.incident import IncidentSeen
from sentry.services.hybrid_cloud.user.serial import serialize_generic_user
from sentry.services.hybrid_cloud.user.service import user_service
diff --git a/src/sentry/api/serializers/models/project.py b/src/sentry/api/serializers/models/project.py
index 4475c5146e411d..59c2519e113013 100644
--- a/src/sentry/api/serializers/models/project.py
+++ b/src/sentry/api/serializers/models/project.py
@@ -233,8 +233,8 @@ def format_options(attrs: dict[str, Any]) -> dict[str, Any]:
options.get(f"sentry:{FilterTypes.ERROR_MESSAGES}", [])
),
"feedback:branding": options.get("feedback:branding", "1") == "1",
- "sentry:feedback_user_report_notification": bool(
- options.get("sentry:feedback_user_report_notification")
+ "sentry:feedback_user_report_notifications": bool(
+ options.get("sentry:feedback_user_report_notifications")
),
"sentry:feedback_ai_spam_detection": bool(options.get("sentry:feedback_ai_spam_detection")),
"sentry:replay_rage_click_issues": options.get("sentry:replay_rage_click_issues"),
diff --git a/src/sentry/api/utils.py b/src/sentry/api/utils.py
index ace64749aca00d..56d9d903b3d72c 100644
--- a/src/sentry/api/utils.py
+++ b/src/sentry/api/utils.py
@@ -20,6 +20,7 @@
from rest_framework.exceptions import APIException, ParseError
from rest_framework.request import Request
from sentry_sdk import Scope
+from urllib3.exceptions import MaxRetryError, ReadTimeoutError
from sentry import options
from sentry.auth.staff import is_active_staff
@@ -424,6 +425,7 @@ def handle_query_errors() -> Generator[None, None, None]:
error,
(
RateLimitExceeded,
+ ReadTimeoutError,
QueryMemoryLimitExceeded,
QueryExecutionTimeMaximum,
QueryTooManySimultaneous,
@@ -447,6 +449,12 @@ def handle_query_errors() -> Generator[None, None, None]:
):
sentry_sdk.capture_exception(error)
message = "Internal error. Your query failed to run."
+ elif isinstance(
+ error,
+ (MaxRetryError),
+ ):
+ sentry_sdk.capture_message(str(error), level="warning")
+ message = "Internal error. Your query failed to run."
else:
sentry_sdk.capture_exception(error)
raise APIException(detail=message)
diff --git a/src/sentry/auth/superuser.py b/src/sentry/auth/superuser.py
index f2f15518f24048..5747c7ea55c556 100644
--- a/src/sentry/auth/superuser.py
+++ b/src/sentry/auth/superuser.py
@@ -179,7 +179,15 @@ def __init__(self, request, allowed_ips=UNSET, org_id=UNSET, current_datetime=No
@staticmethod
def _needs_validation():
- if is_self_hosted() or DISABLE_SU_FORM_U2F_CHECK_FOR_LOCAL:
+ self_hosted = is_self_hosted()
+ logger.info(
+ "superuser.needs-validation",
+ extra={
+ "DISABLE_SU_FORM_U2F_CHECK_FOR_LOCAL": DISABLE_SU_FORM_U2F_CHECK_FOR_LOCAL,
+ "self_hosted": self_hosted,
+ },
+ )
+ if self_hosted or DISABLE_SU_FORM_U2F_CHECK_FOR_LOCAL:
return False
return settings.VALIDATE_SUPERUSER_ACCESS_CATEGORY_AND_REASON
diff --git a/src/sentry/backup/imports.py b/src/sentry/backup/imports.py
index c65ee6ca0d8624..85b1a65fe55a18 100644
--- a/src/sentry/backup/imports.py
+++ b/src/sentry/backup/imports.py
@@ -47,6 +47,20 @@
"import_in_global_scope",
)
+# We have to be careful when removing fields from our model schemas, since exports created using
+# the old-but-still-in-the-support-window versions could have those fields set in the data they
+# provide. This dict serves as a map of all fields that have been deleted on HEAD but are still
+# valid in at least one of the versions we support. For example, since our current version
+# support window is two minor versions back, if we delete a field at version 24.5.N, we must
+# include an entry in this map for that field until that version is out of the support window
+# (in this case, we can remove shim once version 24.7.0 is released).
+#
+# NOTE TO FUTURE EDITORS: please keep the `DELETED_FIELDS` dict, and the subsequent `if` clause,
+# around even if the dict is empty, to ensure that there is a ready place to pop shims into. For
+# each entry in this dict, please leave a TODO comment pointed to a github issue for removing
+# the shim, noting in the comment which self-hosted release will trigger the removal.
+DELETED_FIELDS: dict[str, set[str]] = {}
+
class ImportingError(Exception):
def __init__(self, context: RpcImportError) -> None:
@@ -135,6 +149,21 @@ def _import(
if decryptor is not None
else src.read().decode("utf-8")
)
+
+ if len(DELETED_FIELDS) > 0:
+ # Parse the content JSON and remove and fields that we have marked for deletion in the
+ # function.
+ shimmed_models = set(DELETED_FIELDS.keys())
+ content_as_json = json.loads(content) # type: ignore
+ for json_model in content_as_json:
+ if json_model["model"] in shimmed_models:
+ fields_to_remove = DELETED_FIELDS[json_model["model"]]
+ for field in fields_to_remove:
+ json_model["fields"].pop(field, None)
+
+ # Return the content to byte form, as that is what the Django deserializer expects.
+ content = json.dumps(content_as_json)
+
filters = []
if filter_by is not None:
filters.append(filter_by)
diff --git a/src/sentry/conf/api_pagination_allowlist_do_not_modify.py b/src/sentry/conf/api_pagination_allowlist_do_not_modify.py
new file mode 100644
index 00000000000000..445db3cad1a5ce
--- /dev/null
+++ b/src/sentry/conf/api_pagination_allowlist_do_not_modify.py
@@ -0,0 +1,101 @@
+"""
+ This list is tracking old api endpoints that don't correctly implement pagination.
+ The goal is to eventually add pagination for all and shrink this list.
+ DO NOT ADD ANY NEW APIS
+"""
+SENTRY_API_PAGINATION_ALLOWLIST_DO_NOT_MODIFY = {
+ "GroupTagsEndpoint",
+ "GroupIntegrationsEndpoint",
+ "ProjectServiceHookStatsEndpoint",
+ "OrganizationProcessingIssuesEndpoint",
+ "OrganizationEnvironmentsEndpoint",
+ "OrganizationMetricsTagDetailsEndpoint",
+ "OrganizationProjectsEndpoint",
+ "SentryInternalAppTokensEndpoint",
+ "ProjectPlatformsEndpoint",
+ "UserRolesEndpoint",
+ "AuthenticatorIndexEndpoint",
+ "OrganizationUserTeamsEndpoint",
+ "GroupParticipantsEndpoint",
+ "AssistantEndpoint",
+ "ApiTokensEndpoint",
+ "UserPermissionsConfigEndpoint",
+ "ProjectFiltersEndpoint",
+ "InternalQueueTasksEndpoint",
+ "TeamStatsEndpoint",
+ "OrganizationPluginsConfigsEndpoint",
+ "ProjectIssuesResolvedInReleaseEndpoint",
+ "OrganizationUsersEndpoint",
+ "ProjectEnvironmentsEndpoint",
+ "ProjectUserStatsEndpoint",
+ "ProjectUsersEndpoint",
+ "OrganizationEventsRootCauseAnalysisEndpoint",
+ "UserAuthenticatorIndexEndpoint",
+ "OrganizationDeriveCodeMappingsEndpoint",
+ "UserUserRolesEndpoint",
+ "UserSocialIdentitiesIndexEndpoint",
+ "OrganizationEventsNewTrendsStatsEndpoint",
+ "OrganizationMetricsTagsEndpoint",
+ "OrganizationIntegrationServerlessFunctionsEndpoint",
+ "OrganizationRepositoriesEndpoint",
+ "OrganizationSentryFunctionEndpoint",
+ "GroupSimilarIssuesEmbeddingsEndpoint",
+ "OrganizationMissingMembersEndpoint",
+ "OrganizationSdkUpdatesEndpoint",
+ "OrganizationUserReportsEndpoint",
+ "OrganizationReleasesEndpoint",
+ "ProjectGroupingConfigsEndpoint",
+ "OrganizationAccessRequestDetailsEndpoint",
+ "OrganizationIndexEndpoint",
+ "ProjectPluginsEndpoint",
+ "TeamGroupsOldEndpoint",
+ "UserPermissionsEndpoint",
+ "ReleaseThresholdEndpoint",
+ "UserIdentityConfigEndpoint",
+ "UserSubscriptionsEndpoint",
+ "BuiltinSymbolSourcesEndpoint",
+ "OrganizationRelayUsage",
+ "OrganizationApiKeyIndexEndpoint",
+ "OrganizationMetricsDetailsEndpoint",
+ "GroupStatsEndpoint",
+ "ProjectMemberIndexEndpoint",
+ "ProjectReleaseSetupCompletionEndpoint",
+ "OrganizationPluginsEndpoint",
+ "SentryAppRequestsEndpoint",
+ "ProjectSymbolSourcesEndpoint",
+ "SentryAppsStatsEndpoint",
+ "OrganizationStatsEndpoint",
+ "ProjectArtifactLookupEndpoint",
+ "UserNotificationSettingsProvidersEndpoint",
+ "IntegrationFeaturesEndpoint",
+ "OrganizationAuthProvidersEndpoint",
+ "UserEmailsEndpoint",
+ "ProjectCodeOwnersEndpoint",
+ "OrganizationUserDetailsEndpoint",
+ "OrgAuthTokensEndpoint",
+ "OrganizationProfilingFiltersEndpoint",
+ "GroupingConfigsEndpoint",
+ "ProjectStatsEndpoint",
+ "OrganizationRecentSearchesEndpoint",
+ "UserNotificationSettingsOptionsEndpoint",
+ "ProjectAgnosticRuleConditionsEndpoint",
+ "JiraServerSearchEndpointTest",
+ "GroupEventsEndpoint",
+ "PluginGroupEndpoint",
+ "KeyTransactionEndpoint",
+ "DiscoverSavedQueriesEndpoint",
+ "OrganizationEventsFacetsEndpoint",
+ "OrganizationEventsRelatedIssuesEndpoint",
+ "OrganizationEventsSpansHistogramEndpoint",
+ "OrganizationEventsVitalsEndpoint",
+ "OrganizationGroupIndexStatsEndpoint",
+ "OrganizationIssuesResolvedInReleaseEndpoint",
+ "OrganizationTagsEndpoint",
+ "ProjectGroupIndexEndpoint",
+ "ProjectTagsEndpoint",
+ "OrganizationSearchesEndpoint",
+ "OrganizationAlertRuleAvailableActionIndexEndpoint",
+ "JiraSearchEndpoint",
+ "GithubSharedSearchEndpoint",
+ "ProjectMonitorStatsEndpoint",
+}
diff --git a/src/sentry/conf/server.py b/src/sentry/conf/server.py
index 951c76fcf570a7..11d4deeb1a1574 100644
--- a/src/sentry/conf/server.py
+++ b/src/sentry/conf/server.py
@@ -17,11 +17,13 @@
from urllib.parse import urlparse
import sentry
+from sentry.conf.api_pagination_allowlist_do_not_modify import (
+ SENTRY_API_PAGINATION_ALLOWLIST_DO_NOT_MODIFY,
+)
from sentry.conf.types.kafka_definition import ConsumerDefinition
from sentry.conf.types.logging_config import LoggingConfig
from sentry.conf.types.role_dict import RoleDict
from sentry.conf.types.sdk_config import ServerSdkConfig
-from sentry.conf.types.topic_definition import TopicDefinition
from sentry.utils import json # NOQA (used in getsentry config)
from sentry.utils.celery import crontab_with_minute_jitter
from sentry.utils.types import Type, type_from_value
@@ -1455,6 +1457,8 @@ def custom_parameter_sort(parameter: dict) -> tuple[str, int]:
"auth:enterprise-superuser-read-write": False,
# Enables user registration.
"auth:register": True,
+ # Enables activated alert rules
+ "organizations:activated-alert-rules": False,
# Enable advanced search features, like negation and wildcard matching.
"organizations:advanced-search": True,
# Enables alert creation on indexed events in UI (use for PoC/testing only)
@@ -1480,8 +1484,6 @@ def custom_parameter_sort(parameter: dict) -> tuple[str, int]:
"organizations:codecov-commit-sha-from-git-blame": False,
# The overall flag for codecov integration, gated by plans.
"organizations:codecov-integration": False,
- # Enable the Commit Context feature
- "organizations:commit-context": True,
# Enable alerting based on crash free sessions/users
"organizations:crash-rate-alerts": True,
# Enable creating organizations within sentry
@@ -1517,8 +1519,6 @@ def custom_parameter_sort(parameter: dict) -> tuple[str, int]:
"organizations:dashboards-rh-widget": False,
# Enables experimental WIP ddm related features
"organizations:ddm-experimental": False,
- # Enables ddm formula features
- "organizations:ddm-formulas": False,
# Delightful Developer Metrics (DDM):
# Enable sidebar menu item and all UI (requires custom-metrics flag as well)
"organizations:ddm-ui": False,
@@ -1528,10 +1528,10 @@ def custom_parameter_sort(parameter: dict) -> tuple[str, int]:
"organizations:ddm-dashboard-import": False,
# Enable the default alert at project creation to be the high priority alert
"organizations:default-high-priority-alerts": False,
- # Enable inbound filters to be turned on by default for new Javascript Projects
- "organizations:default-inbound-filters": False,
# Enables automatically deriving of code mappings
"organizations:derive-code-mappings": True,
+ # Enables automatically deriving of PHP code mappings
+ "organizations:derive-code-mappings-php": False,
# Enable device.class as a selectable column
"organizations:device-classification": False,
# Enables synthesis of device.class in ingest
@@ -1721,6 +1721,8 @@ def custom_parameter_sort(parameter: dict) -> tuple[str, int]:
"organizations:performance-database-view-percentiles": False,
# Enable UI sending a discover split for widget
"organizations:performance-discover-widget-split-ui": False,
+ # Enable backend overriding and always making a fresh split decision
+ "organizations:performance-discover-widget-split-override-save": False,
# Enables updated all events tab in a performance issue
"organizations:performance-issues-all-events-tab": False,
# Enable compressed assets performance issue type
@@ -1847,10 +1849,10 @@ def custom_parameter_sort(parameter: dict) -> tuple[str, int]:
"organizations:session-replay-enable-canvas": False,
# Enable canvas replaying
"organizations:session-replay-enable-canvas-replayer": False,
- # Enable replay event linking in event processing
- "organizations:session-replay-event-linking": False,
# Enable linking from 'new issue' email notifs to the issue replay list
"organizations:session-replay-issue-emails": False,
+ # Enable mobile replay player
+ "organizations:session-replay-mobile-player": False,
# Enable the new event linking columns to be queried
"organizations:session-replay-new-event-counts": False,
# Enable Rage Click Issue Creation In Recording Consumer
@@ -2405,6 +2407,8 @@ def custom_parameter_sort(parameter: dict) -> tuple[str, int]:
# Secret key for OpenAI
OPENAI_API_KEY: str | None = None
+SENTRY_API_PAGINATION_ALLOWLIST = SENTRY_API_PAGINATION_ALLOWLIST_DO_NOT_MODIFY
+
SENTRY_SCOPES = {
"org:read",
"org:write",
@@ -3459,6 +3463,7 @@ def build_cdc_postgres_init_db_volume(settings: Any) -> dict[str, dict[str, str]
KAFKA_SESSIONS_SUBSCRIPTIONS_RESULTS = "sessions-subscription-results"
KAFKA_METRICS_SUBSCRIPTIONS_RESULTS = "metrics-subscription-results"
KAFKA_INGEST_EVENTS = "ingest-events"
+KAFKA_INGEST_FEEDBACK_EVENTS = "ingest-feedback-events"
KAFKA_INGEST_EVENTS_DLQ = "ingest-events-dlq"
KAFKA_INGEST_ATTACHMENTS = "ingest-attachments"
KAFKA_INGEST_TRANSACTIONS = "ingest-transactions"
@@ -3511,6 +3516,7 @@ def build_cdc_postgres_init_db_volume(settings: Any) -> dict[str, dict[str, str]
"sessions-subscription-results": "default",
"metrics-subscription-results": "default",
"ingest-events": "default",
+ "ingest-feedback-events": "default",
"ingest-attachments": "default",
"ingest-transactions": "default",
"ingest-metrics": "default",
@@ -3531,9 +3537,16 @@ def build_cdc_postgres_init_db_volume(settings: Any) -> dict[str, dict[str, str]
"shared-resources-usage": "default",
}
+from typing import TypedDict
+
+
+class LegacyTopicDefinition(TypedDict):
+ cluster: str
+
+
# Cluster configuration for each Kafka topic by name.
# DEPRECATED
-KAFKA_TOPICS: Mapping[str, TopicDefinition] = {
+KAFKA_TOPICS: Mapping[str, LegacyTopicDefinition] = {
KAFKA_EVENTS: {"cluster": "default"},
KAFKA_EVENTS_COMMIT_LOG: {"cluster": "default"},
KAFKA_TRANSACTIONS: {"cluster": "default"},
@@ -3661,6 +3674,10 @@ def build_cdc_postgres_init_db_volume(settings: Any) -> dict[str, dict[str, str]
SENTRY_USE_UWSGI = True
+# Configure service wrapper for reprocessing2 state
+SENTRY_REPROCESSING_STORE = "sentry.eventstore.reprocessing.redis.RedisReprocessingStore"
+SENTRY_REPROCESSING_STORE_OPTIONS = {"cluster": "default"}
+
# When copying attachments for to-be-reprocessed events into processing store,
# how large is an individual file chunk? Each chunk is stored as Redis key.
SENTRY_REPROCESSING_ATTACHMENT_CHUNK_SIZE = 2**20
@@ -3834,7 +3851,7 @@ def build_cdc_postgres_init_db_volume(settings: Any) -> dict[str, dict[str, str]
ENABLE_ANALYTICS = False
MAX_SLOW_CONDITION_ISSUE_ALERTS = 100
-MAX_MORE_SLOW_CONDITION_ISSUE_ALERTS = 200
+MAX_MORE_SLOW_CONDITION_ISSUE_ALERTS = 300
MAX_FAST_CONDITION_ISSUE_ALERTS = 500
MAX_QUERY_SUBSCRIPTIONS_PER_ORG = 1000
diff --git a/src/sentry/conf/types/kafka_definition.py b/src/sentry/conf/types/kafka_definition.py
index 61820572647de8..2bdf08bad8684f 100644
--- a/src/sentry/conf/types/kafka_definition.py
+++ b/src/sentry/conf/types/kafka_definition.py
@@ -1,6 +1,6 @@
from __future__ import annotations
-from collections.abc import Callable, Mapping, Sequence
+from collections.abc import Mapping, Sequence
from enum import Enum
from typing import Any, Required, TypedDict
@@ -26,6 +26,7 @@ class Topic(Enum):
METRICS_SUBSCRIPTIONS_RESULTS = "metrics-subscription-results"
INGEST_EVENTS = "ingest-events"
INGEST_EVENTS_DLQ = "ingest-events-dlq"
+ INGEST_FEEDBACK_EVENTS = "ingest-feedback-events"
INGEST_ATTACHMENTS = "ingest-attachments"
INGEST_TRANSACTIONS = "ingest-transactions"
INGEST_METRICS = "ingest-metrics"
@@ -48,11 +49,8 @@ class Topic(Enum):
class ConsumerDefinition(TypedDict, total=False):
- # XXX: Eventually only Topic will be accepted here.
- # For backward compatibility with getsentry, we must also
- # support the physical override topic name (str, Callable[str], str)
- # while the migration is taking place
- topic: Required[Topic | str | Callable[[], str]]
+ # Default topic
+ topic: Required[Topic]
# Schema validation will be run if true
validate_schema: bool | None
@@ -70,7 +68,7 @@ class ConsumerDefinition(TypedDict, total=False):
synchronize_commit_group_default: str
synchronize_commit_log_topic_default: str
- dlq_topic: str
+ dlq_topic: Topic
dlq_max_invalid_ratio: float | None
dlq_max_consecutive_count: int | None
diff --git a/src/sentry/conf/types/topic_definition.py b/src/sentry/conf/types/topic_definition.py
index 41992b74d9ad78..bc5aaa44ddef80 100644
--- a/src/sentry/conf/types/topic_definition.py
+++ b/src/sentry/conf/types/topic_definition.py
@@ -5,3 +5,5 @@
class TopicDefinition(TypedDict):
cluster: str
+ # The topic name may be overridden from the default via KAFKA_TOPIC_OVERRIDES
+ real_topic_name: str
diff --git a/src/sentry/consumers/__init__.py b/src/sentry/consumers/__init__.py
index 1f4788fe70b247..e7e9b5a21987d2 100644
--- a/src/sentry/consumers/__init__.py
+++ b/src/sentry/consumers/__init__.py
@@ -186,48 +186,41 @@ def ingest_events_options() -> list[click.Option]:
),
]
-
-_INGEST_SPANS_OPTIONS = multiprocessing_options(default_max_batch_size=100) + [
- click.Option(["--output-topic", "output_topic"], type=str, default="snuba-spans"),
-]
-
# consumer name -> consumer definition
-# TODO: `topic` should gradually be migrated to the logical topic rather than the overridden
-# string. We support both currently for backward compatibility.
KAFKA_CONSUMERS: Mapping[str, ConsumerDefinition] = {
"ingest-profiles": {
- "topic": settings.KAFKA_PROFILES,
+ "topic": Topic.PROFILES,
"strategy_factory": "sentry.profiles.consumers.process.factory.ProcessProfileStrategyFactory",
},
"ingest-replay-recordings": {
- "topic": settings.KAFKA_INGEST_REPLAYS_RECORDINGS,
+ "topic": Topic.INGEST_REPLAYS_RECORDINGS,
"strategy_factory": "sentry.replays.consumers.recording.ProcessReplayRecordingStrategyFactory",
"click_options": ingest_replay_recordings_options(),
},
"ingest-replay-recordings-buffered": {
- "topic": settings.KAFKA_INGEST_REPLAYS_RECORDINGS,
+ "topic": Topic.INGEST_REPLAYS_RECORDINGS,
"strategy_factory": "sentry.replays.consumers.recording_buffered.RecordingBufferedStrategyFactory",
"click_options": ingest_replay_recordings_buffered_options(),
},
"ingest-monitors": {
- "topic": settings.KAFKA_INGEST_MONITORS,
+ "topic": Topic.INGEST_MONITORS,
"strategy_factory": "sentry.monitors.consumers.monitor_consumer.StoreMonitorCheckInStrategyFactory",
"click_options": ingest_monitors_options(),
},
"billing-metrics-consumer": {
- "topic": settings.KAFKA_SNUBA_GENERIC_METRICS,
+ "topic": Topic.SNUBA_GENERIC_METRICS,
"strategy_factory": "sentry.ingest.billing_metrics_consumer.BillingMetricsConsumerStrategyFactory",
},
# Known differences to 'sentry run occurrences-ingest-consumer':
# - ingest_consumer_types metric tag is missing. Use the kafka_topic and
# group_id tags provided by run_basic_consumer instead
"ingest-occurrences": {
- "topic": settings.KAFKA_INGEST_OCCURRENCES,
+ "topic": Topic.INGEST_OCCURRENCES,
"strategy_factory": "sentry.issues.run.OccurrenceStrategyFactory",
"click_options": multiprocessing_options(default_max_batch_size=20),
},
"events-subscription-results": {
- "topic": settings.KAFKA_EVENTS_SUBSCRIPTIONS_RESULTS,
+ "topic": Topic.EVENTS_SUBSCRIPTIONS_RESULTS,
"strategy_factory": "sentry.snuba.query_subscriptions.run.QuerySubscriptionStrategyFactory",
"click_options": multiprocessing_options(default_max_batch_size=100),
"static_args": {
@@ -235,7 +228,7 @@ def ingest_events_options() -> list[click.Option]:
},
},
"transactions-subscription-results": {
- "topic": settings.KAFKA_TRANSACTIONS_SUBSCRIPTIONS_RESULTS,
+ "topic": Topic.TRANSACTIONS_SUBSCRIPTIONS_RESULTS,
"strategy_factory": "sentry.snuba.query_subscriptions.run.QuerySubscriptionStrategyFactory",
"click_options": multiprocessing_options(default_max_batch_size=100),
"static_args": {
@@ -252,7 +245,7 @@ def ingest_events_options() -> list[click.Option]:
},
},
"sessions-subscription-results": {
- "topic": settings.KAFKA_SESSIONS_SUBSCRIPTIONS_RESULTS,
+ "topic": Topic.SESSIONS_SUBSCRIPTIONS_RESULTS,
"strategy_factory": "sentry.snuba.query_subscriptions.run.QuerySubscriptionStrategyFactory",
"click_options": multiprocessing_options(),
"static_args": {
@@ -260,7 +253,7 @@ def ingest_events_options() -> list[click.Option]:
},
},
"metrics-subscription-results": {
- "topic": settings.KAFKA_METRICS_SUBSCRIPTIONS_RESULTS,
+ "topic": Topic.METRICS_SUBSCRIPTIONS_RESULTS,
"strategy_factory": "sentry.snuba.query_subscriptions.run.QuerySubscriptionStrategyFactory",
"click_options": multiprocessing_options(default_max_batch_size=100),
"static_args": {
@@ -268,7 +261,15 @@ def ingest_events_options() -> list[click.Option]:
},
},
"ingest-events": {
- "topic": settings.KAFKA_INGEST_EVENTS,
+ "topic": Topic.INGEST_EVENTS,
+ "strategy_factory": "sentry.ingest.consumer.factory.IngestStrategyFactory",
+ "click_options": ingest_events_options(),
+ "static_args": {
+ "consumer_type": "events",
+ },
+ },
+ "ingest-feedback-events": {
+ "topic": settings.KAFKA_INGEST_FEEDBACK_EVENTS,
"strategy_factory": "sentry.ingest.consumer.factory.IngestStrategyFactory",
"click_options": ingest_events_options(),
"static_args": {
@@ -276,7 +277,7 @@ def ingest_events_options() -> list[click.Option]:
},
},
"ingest-attachments": {
- "topic": settings.KAFKA_INGEST_ATTACHMENTS,
+ "topic": Topic.INGEST_ATTACHMENTS,
"strategy_factory": "sentry.ingest.consumer.factory.IngestStrategyFactory",
"click_options": ingest_events_options(),
"static_args": {
@@ -284,7 +285,7 @@ def ingest_events_options() -> list[click.Option]:
},
},
"ingest-transactions": {
- "topic": settings.KAFKA_INGEST_TRANSACTIONS,
+ "topic": Topic.INGEST_TRANSACTIONS,
"strategy_factory": "sentry.ingest.consumer.factory.IngestStrategyFactory",
"click_options": ingest_events_options(),
"static_args": {
@@ -292,29 +293,29 @@ def ingest_events_options() -> list[click.Option]:
},
},
"ingest-metrics": {
- "topic": settings.KAFKA_INGEST_METRICS,
+ "topic": Topic.INGEST_METRICS,
"strategy_factory": "sentry.sentry_metrics.consumers.indexer.parallel.MetricsConsumerStrategyFactory",
"click_options": _METRICS_INDEXER_OPTIONS,
"static_args": {
"ingest_profile": "release-health",
},
- "dlq_topic": settings.KAFKA_INGEST_METRICS_DLQ,
+ "dlq_topic": Topic.INGEST_METRICS_DLQ,
"dlq_max_invalid_ratio": 0.01,
"dlq_max_consecutive_count": 1000,
},
"ingest-generic-metrics": {
- "topic": settings.KAFKA_INGEST_PERFORMANCE_METRICS,
+ "topic": Topic.INGEST_PERFORMANCE_METRICS,
"strategy_factory": "sentry.sentry_metrics.consumers.indexer.parallel.MetricsConsumerStrategyFactory",
"click_options": _METRICS_INDEXER_OPTIONS,
"static_args": {
"ingest_profile": "performance",
},
- "dlq_topic": settings.KAFKA_INGEST_GENERIC_METRICS_DLQ,
+ "dlq_topic": Topic.INGEST_GENERIC_METRICS_DLQ,
"dlq_max_invalid_ratio": 0.01,
"dlq_max_consecutive_count": 1000,
},
"generic-metrics-last-seen-updater": {
- "topic": settings.KAFKA_SNUBA_GENERIC_METRICS,
+ "topic": Topic.SNUBA_GENERIC_METRICS,
"strategy_factory": "sentry.sentry_metrics.consumers.last_seen_updater.LastSeenUpdaterStrategyFactory",
"click_options": _METRICS_LAST_SEEN_UPDATER_OPTIONS,
"static_args": {
@@ -322,7 +323,7 @@ def ingest_events_options() -> list[click.Option]:
},
},
"metrics-last-seen-updater": {
- "topic": settings.KAFKA_SNUBA_METRICS,
+ "topic": Topic.SNUBA_METRICS,
"strategy_factory": "sentry.sentry_metrics.consumers.last_seen_updater.LastSeenUpdaterStrategyFactory",
"click_options": _METRICS_LAST_SEEN_UPDATER_OPTIONS,
"static_args": {
@@ -330,28 +331,28 @@ def ingest_events_options() -> list[click.Option]:
},
},
"post-process-forwarder-issue-platform": {
- "topic": settings.KAFKA_EVENTSTREAM_GENERIC,
+ "topic": Topic.EVENTSTREAM_GENERIC,
"strategy_factory": "sentry.eventstream.kafka.dispatch.EventPostProcessForwarderStrategyFactory",
"synchronize_commit_log_topic_default": "snuba-generic-events-commit-log",
"synchronize_commit_group_default": "generic_events_group",
"click_options": _POST_PROCESS_FORWARDER_OPTIONS,
},
"post-process-forwarder-transactions": {
- "topic": settings.KAFKA_TRANSACTIONS,
+ "topic": Topic.TRANSACTIONS,
"strategy_factory": "sentry.eventstream.kafka.dispatch.EventPostProcessForwarderStrategyFactory",
"synchronize_commit_log_topic_default": "snuba-transactions-commit-log",
"synchronize_commit_group_default": "transactions_group",
"click_options": _POST_PROCESS_FORWARDER_OPTIONS,
},
"post-process-forwarder-errors": {
- "topic": settings.KAFKA_EVENTS,
+ "topic": Topic.EVENTS,
"strategy_factory": "sentry.eventstream.kafka.dispatch.EventPostProcessForwarderStrategyFactory",
"synchronize_commit_log_topic_default": "snuba-commit-log",
"synchronize_commit_group_default": "snuba-consumers",
"click_options": _POST_PROCESS_FORWARDER_OPTIONS,
},
"process-spans": {
- "topic": settings.KAFKA_SNUBA_SPANS,
+ "topic": Topic.SNUBA_SPANS,
"strategy_factory": "sentry.spans.consumers.process.factory.ProcessSpansStrategyFactory",
},
**settings.SENTRY_KAFKA_CONSUMERS,
@@ -405,15 +406,8 @@ def get_stream_processor(
strategy_factory_cls = import_string(consumer_definition["strategy_factory"])
consumer_topic = consumer_definition["topic"]
- if isinstance(consumer_topic, Topic):
- default_topic = consumer_topic.value
- real_topic = settings.KAFKA_TOPIC_OVERRIDES.get(default_topic, default_topic)
- else:
- # TODO: Deprecated, remove once this way is no longer used
- if not isinstance(consumer_topic, str):
- real_topic = consumer_topic()
- else:
- real_topic = consumer_topic
+ default_topic = consumer_topic.value
+ real_topic = settings.KAFKA_TOPIC_OVERRIDES.get(default_topic, default_topic)
if topic is None:
topic = real_topic
@@ -496,10 +490,6 @@ def build_consumer_config(group_id: str):
validate_schema = consumer_definition.get("validate_schema") or False
if validate_schema:
- # TODO: Remove this later but for now we can only validate if `topic_def` is
- # the logical topic and not the legacy override topic
- assert isinstance(consumer_topic, Topic)
-
strategy_factory = ValidateSchemaStrategyFactoryWrapper(
consumer_topic.value, validate_schema, strategy_factory
)
@@ -517,7 +507,8 @@ def build_consumer_config(group_id: str):
f"Cannot enable DLQ for consumer: {consumer_name}, no DLQ topic has been defined for it"
) from e
try:
- cluster_setting = get_topic_definition(dlq_topic)["cluster"]
+ dlq_topic_defn = get_topic_definition(dlq_topic)
+ cluster_setting = dlq_topic_defn["cluster"]
except ValueError as e:
raise click.BadParameter(
f"Cannot enable DLQ for consumer: {consumer_name}, DLQ topic {dlq_topic} is not configured in this environment"
@@ -527,7 +518,7 @@ def build_consumer_config(group_id: str):
dlq_producer = KafkaProducer(producer_config)
dlq_policy = DlqPolicy(
- KafkaDlqProducer(dlq_producer, ArroyoTopic(dlq_topic)),
+ KafkaDlqProducer(dlq_producer, ArroyoTopic(dlq_topic_defn["real_topic_name"])),
DlqLimit(
max_invalid_ratio=consumer_definition["dlq_max_invalid_ratio"],
max_consecutive_count=consumer_definition["dlq_max_consecutive_count"],
diff --git a/src/sentry/deletions/__init__.py b/src/sentry/deletions/__init__.py
index 81ba3a368a4625..9148230eeeee16 100644
--- a/src/sentry/deletions/__init__.py
+++ b/src/sentry/deletions/__init__.py
@@ -78,7 +78,6 @@
descendants, such as Event, so it can more efficiently bulk delete rows.
"""
-
from .base import BulkModelDeletionTask, ModelDeletionTask, ModelRelation # NOQA
from .defaults.artifactbundle import ArtifactBundleDeletionTask
from .manager import DeletionTaskManager
@@ -89,7 +88,11 @@
def load_defaults():
from sentry import models
from sentry.discover.models import DiscoverSavedQuery
- from sentry.incidents.models import AlertRule, AlertRuleTrigger, AlertRuleTriggerAction
+ from sentry.incidents.models.alert_rule import (
+ AlertRule,
+ AlertRuleTrigger,
+ AlertRuleTriggerAction,
+ )
from sentry.models.commitfilechange import CommitFileChange
from sentry.monitors import models as monitor_models
diff --git a/src/sentry/deletions/defaults/alert_rule_trigger.py b/src/sentry/deletions/defaults/alert_rule_trigger.py
index fe36d050fc8c5e..9aa1469390a79f 100644
--- a/src/sentry/deletions/defaults/alert_rule_trigger.py
+++ b/src/sentry/deletions/defaults/alert_rule_trigger.py
@@ -3,7 +3,7 @@
class AlertRuleTriggerDeletionTask(ModelDeletionTask):
def get_child_relations(self, instance):
- from sentry.incidents.models import AlertRuleTriggerAction
+ from sentry.incidents.models.alert_rule import AlertRuleTriggerAction
return [
ModelRelation(AlertRuleTriggerAction, {"alert_rule_trigger_id": instance.id}),
diff --git a/src/sentry/deletions/defaults/alertrule.py b/src/sentry/deletions/defaults/alertrule.py
index 39340100891b77..2911d3fbe620fa 100644
--- a/src/sentry/deletions/defaults/alertrule.py
+++ b/src/sentry/deletions/defaults/alertrule.py
@@ -7,7 +7,7 @@ class AlertRuleDeletionTask(ModelDeletionTask):
manager_name = "objects_with_snapshots"
def get_child_relations(self, instance):
- from sentry.incidents.models import AlertRuleTrigger
+ from sentry.incidents.models.alert_rule import AlertRuleTrigger
return [
ModelRelation(AlertRuleTrigger, {"alert_rule_id": instance.id}),
diff --git a/src/sentry/deletions/defaults/organization.py b/src/sentry/deletions/defaults/organization.py
index 43cda85e52a54e..659dc77fc3e766 100644
--- a/src/sentry/deletions/defaults/organization.py
+++ b/src/sentry/deletions/defaults/organization.py
@@ -19,7 +19,8 @@ def should_proceed(self, instance):
def get_child_relations(self, instance):
from sentry.deletions.defaults.discoversavedquery import DiscoverSavedQueryDeletionTask
from sentry.discover.models import DiscoverSavedQuery, TeamKeyTransaction
- from sentry.incidents.models import AlertRule, Incident
+ from sentry.incidents.models.alert_rule import AlertRule
+ from sentry.incidents.models.incident import Incident
from sentry.models.artifactbundle import ArtifactBundle
from sentry.models.commitauthor import CommitAuthor
from sentry.models.dashboard import Dashboard
diff --git a/src/sentry/deletions/defaults/project.py b/src/sentry/deletions/defaults/project.py
index cca0783a987410..37aa34cdd98c54 100644
--- a/src/sentry/deletions/defaults/project.py
+++ b/src/sentry/deletions/defaults/project.py
@@ -7,7 +7,8 @@ class ProjectDeletionTask(ModelDeletionTask):
def get_child_relations(self, instance):
from sentry import models
from sentry.discover.models import DiscoverSavedQueryProject
- from sentry.incidents.models import AlertRule, IncidentProject
+ from sentry.incidents.models.alert_rule import AlertRule
+ from sentry.incidents.models.incident import IncidentProject
from sentry.models.projectteam import ProjectTeam
from sentry.monitors.models import Monitor
from sentry.replays.models import ReplayRecordingSegment
diff --git a/src/sentry/deletions/defaults/team.py b/src/sentry/deletions/defaults/team.py
index f799550c155120..5b3847af48e71d 100644
--- a/src/sentry/deletions/defaults/team.py
+++ b/src/sentry/deletions/defaults/team.py
@@ -17,7 +17,7 @@ def mark_deletion_in_progress(self, instance_list):
instance.update(status=TeamStatus.DELETION_IN_PROGRESS)
def delete_instance(self, instance):
- from sentry.incidents.models import AlertRule
+ from sentry.incidents.models.alert_rule import AlertRule
from sentry.models.rule import Rule
AlertRule.objects.filter(owner_id=instance.actor_id).update(owner=None)
diff --git a/src/sentry/dynamic_sampling/tasks/utils.py b/src/sentry/dynamic_sampling/tasks/utils.py
index 13e333744ca662..c44acf0526c27c 100644
--- a/src/sentry/dynamic_sampling/tasks/utils.py
+++ b/src/sentry/dynamic_sampling/tasks/utils.py
@@ -1,7 +1,5 @@
from functools import wraps
-import sentry_sdk
-
from sentry import features
from sentry.dynamic_sampling.tasks.common import TimeoutException
from sentry.dynamic_sampling.tasks.logging import log_task_execution, log_task_timeout
@@ -38,12 +36,9 @@ def _wrapper():
try:
func(context=context)
except TimeoutException:
- sentry_sdk.set_extra("context-data", context.to_dict())
log_task_timeout(context)
raise
else:
- sentry_sdk.set_extra("context-data", context.to_dict())
- sentry_sdk.capture_message(f"timing for {task_name}")
log_task_execution(context)
return _wrapper
diff --git a/src/sentry/event_manager.py b/src/sentry/event_manager.py
index fcac517bf48539..cbf999469d39ab 100644
--- a/src/sentry/event_manager.py
+++ b/src/sentry/event_manager.py
@@ -1492,12 +1492,10 @@ def _save_aggregate(
):
raise HashDiscarded("Load shedding group creation", reason="load_shed")
- with sentry_sdk.start_span(
- op="event_manager.create_group_transaction"
- ) as span, metrics.timer(
- "event_manager.create_group_transaction"
- ) as metric_tags, transaction.atomic(
- router.db_for_write(GroupHash)
+ with (
+ sentry_sdk.start_span(op="event_manager.create_group_transaction") as span,
+ metrics.timer("event_manager.create_group_transaction") as metric_tags,
+ transaction.atomic(router.db_for_write(GroupHash)),
):
span.set_tag("create_group_transaction.outcome", "no_group")
metric_tags["create_group_transaction.outcome"] = "no_group"
@@ -2696,10 +2694,12 @@ def _calculate_span_grouping(jobs: Sequence[Job], projects: ProjectsMapping) ->
@metrics.wraps("save_event.detect_performance_problems")
-def _detect_performance_problems(jobs: Sequence[Job], projects: ProjectsMapping) -> None:
+def _detect_performance_problems(
+ jobs: Sequence[Job], projects: ProjectsMapping, is_standalone_spans: bool = False
+) -> None:
for job in jobs:
job["performance_problems"] = detect_performance_problems(
- job["data"], projects[job["project_id"]]
+ job["data"], projects[job["project_id"]], is_standalone_spans=is_standalone_spans
)
diff --git a/src/sentry/eventstore/reprocessing/__init__.py b/src/sentry/eventstore/reprocessing/__init__.py
new file mode 100644
index 00000000000000..a0262cf36a4370
--- /dev/null
+++ b/src/sentry/eventstore/reprocessing/__init__.py
@@ -0,0 +1,13 @@
+from django.conf import settings
+
+from sentry.eventstore.reprocessing.base import ReprocessingStore
+from sentry.utils.services import LazyServiceWrapper
+
+reprocessing_store = LazyServiceWrapper(
+ ReprocessingStore,
+ settings.SENTRY_REPROCESSING_STORE,
+ settings.SENTRY_REPROCESSING_STORE_OPTIONS,
+)
+
+
+__all__ = ["reprocessing_store"]
diff --git a/src/sentry/eventstore/reprocessing/base.py b/src/sentry/eventstore/reprocessing/base.py
new file mode 100644
index 00000000000000..59feb6f6625c35
--- /dev/null
+++ b/src/sentry/eventstore/reprocessing/base.py
@@ -0,0 +1,72 @@
+from collections.abc import Sequence
+from datetime import datetime
+from typing import Any
+
+from sentry.utils.services import Service
+
+
+class ReprocessingStore(Service):
+ __all__ = (
+ "event_count_for_hashes",
+ "pop_batched_events",
+ "get_old_primary_hashes",
+ "expire_hash",
+ "add_hash",
+ "get_remaining_event_count",
+ "rename_key",
+ "mark_event_reprocessed",
+ "start_reprocessing",
+ "get_pending",
+ "get_progress",
+ )
+
+ def __init__(self, **options: Any) -> None:
+ pass
+
+ def event_count_for_hashes(
+ self, project_id: int, group_id: int, old_primary_hashes: Sequence[str]
+ ) -> int:
+ raise NotImplementedError()
+
+ def pop_batched_events(
+ self, project_id: int, group_id: int, primary_hash: str
+ ) -> tuple[list[str], datetime | None, datetime | None]:
+ raise NotImplementedError()
+
+ def get_old_primary_hashes(self, project_id: int, group_id: int) -> set[Any]:
+ raise NotImplementedError()
+
+ def expire_hash(
+ self,
+ project_id: int,
+ group_id: int,
+ event_id: str,
+ date_val: datetime,
+ old_primary_hash: str,
+ ) -> None:
+ raise NotImplementedError()
+
+ def add_hash(self, project_id: int, group_id: int, hash: str) -> None:
+ raise NotImplementedError()
+
+ def get_remaining_event_count(
+ self, project_id: int, old_group_id: int, datetime_to_event: list[tuple[datetime, str]]
+ ) -> int:
+ raise NotImplementedError()
+
+ def rename_key(self, project_id: int, old_group_id: int) -> str | None:
+ raise NotImplementedError()
+
+ def mark_event_reprocessed(self, group_id: int, num_events: int) -> bool:
+ raise NotImplementedError()
+
+ def start_reprocessing(
+ self, group_id: int, date_created: Any, sync_count: int, event_count: int
+ ) -> None:
+ raise NotImplementedError()
+
+ def get_pending(self, group_id: int) -> Any:
+ raise NotImplementedError()
+
+ def get_progress(self, group_id: int) -> dict[str, Any] | None:
+ raise NotImplementedError()
diff --git a/src/sentry/eventstore/reprocessing/redis.py b/src/sentry/eventstore/reprocessing/redis.py
new file mode 100644
index 00000000000000..046fb709cfaebd
--- /dev/null
+++ b/src/sentry/eventstore/reprocessing/redis.py
@@ -0,0 +1,172 @@
+import uuid
+from collections.abc import Sequence
+from datetime import datetime
+from typing import Any
+
+import redis
+from django.conf import settings
+
+from sentry.utils import json
+from sentry.utils.dates import to_datetime, to_timestamp
+from sentry.utils.redis import redis_clusters
+
+from .base import ReprocessingStore
+
+
+def _get_sync_counter_key(group_id: int) -> str:
+ return f"re2:count:{group_id}"
+
+
+def _get_info_reprocessed_key(group_id: int) -> str:
+ return f"re2:info:{group_id}"
+
+
+def _get_old_primary_hash_subset_key(project_id: int, group_id: int, primary_hash: str) -> str:
+ return f"re2:tombstones:{{{project_id}:{group_id}:{primary_hash}}}"
+
+
+def _get_remaining_key(project_id: int, group_id: int) -> str:
+ return f"re2:remaining:{{{project_id}:{group_id}}}"
+
+
+class RedisReprocessingStore(ReprocessingStore):
+ def __init__(self, **options: dict[str, Any]) -> None:
+ cluster = options.pop("cluster", "default")
+ assert isinstance(cluster, str), "cluster option must be a string"
+ self.redis = redis_clusters.get(cluster)
+
+ def event_count_for_hashes(
+ self, project_id: int, group_id: int, old_primary_hashes: Sequence[str]
+ ) -> int:
+ # Events for a group are split and bucketed by their primary hashes. If flushing is to be
+ # performed on a per-group basis, the event count needs to be summed up across all buckets
+ # belonging to a single group.
+ event_count = 0
+ for primary_hash in old_primary_hashes:
+ key = _get_old_primary_hash_subset_key(project_id, group_id, primary_hash)
+ event_count += self.redis.llen(key)
+ return event_count
+
+ def pop_batched_events(
+ self, project_id: int, group_id: int, primary_hash: str
+ ) -> tuple[list[str], datetime | None, datetime | None]:
+ """
+ For redis key pointing to a list of buffered events structured like
+ `event id;datetime of event`, returns a list of event IDs, the
+ earliest datetime, and the latest datetime.
+ """
+ event_ids_batch = []
+ min_datetime: datetime | None = None
+ max_datetime: datetime | None = None
+ key = _get_old_primary_hash_subset_key(project_id, group_id, primary_hash)
+
+ for row in self.redis.lrange(key, 0, -1):
+ datetime_raw, event_id = row.split(";")
+ parsed_datetime = to_datetime(float(datetime_raw))
+
+ assert parsed_datetime is not None
+
+ if min_datetime is None or parsed_datetime < min_datetime:
+ min_datetime = parsed_datetime
+ if max_datetime is None or parsed_datetime > max_datetime:
+ max_datetime = parsed_datetime
+
+ event_ids_batch.append(event_id)
+
+ self.redis.delete(key)
+
+ return event_ids_batch, min_datetime, max_datetime
+
+ def get_old_primary_hashes(self, project_id: int, group_id: int) -> set[Any]:
+ # This is a meta key that contains old primary hashes. These hashes are then
+ # combined with other values to construct a key that points to a list of
+ # tombstonable events.
+ primary_hash_set_key = f"re2:tombstone-primary-hashes:{project_id}:{group_id}"
+
+ return self.redis.smembers(primary_hash_set_key)
+
+ def expire_hash(
+ self,
+ project_id: int,
+ group_id: int,
+ event_id: str,
+ date_val: datetime,
+ old_primary_hash: str,
+ ) -> None:
+ event_key = _get_old_primary_hash_subset_key(project_id, group_id, old_primary_hash)
+ self.redis.lpush(event_key, f"{to_timestamp(date_val)};{event_id}")
+ self.redis.expire(event_key, settings.SENTRY_REPROCESSING_TOMBSTONES_TTL)
+
+ def add_hash(self, project_id: int, group_id: int, hash: str) -> None:
+ primary_hash_set_key = f"re2:tombstone-primary-hashes:{project_id}:{group_id}"
+
+ self.redis.sadd(primary_hash_set_key, hash)
+ self.redis.expire(primary_hash_set_key, settings.SENTRY_REPROCESSING_TOMBSTONES_TTL)
+
+ def get_remaining_event_count(
+ self, project_id: int, old_group_id: int, datetime_to_event: list[tuple[datetime, str]]
+ ) -> int:
+ # We explicitly cluster by only project_id and group_id here such that our
+ # RENAME command later succeeds.
+ key = _get_remaining_key(project_id, old_group_id)
+
+ if datetime_to_event:
+ llen = self.redis.lpush(
+ key,
+ *(
+ f"{to_timestamp(datetime)};{event_id}"
+ for datetime, event_id in datetime_to_event
+ ),
+ )
+ self.redis.expire(key, settings.SENTRY_REPROCESSING_SYNC_TTL)
+ else:
+ llen = self.redis.llen(key)
+ return llen
+
+ def rename_key(self, project_id: int, old_group_id: int) -> str | None:
+ key = _get_remaining_key(project_id, old_group_id)
+ new_key = f"{key}:{uuid.uuid4().hex}"
+ try:
+ # Rename `key` to a new temp key that is passed to celery task. We
+ # use `renamenx` instead of `rename` only to detect UUID collisions.
+ assert self.redis.renamenx(key, new_key), "UUID collision for new_key?"
+
+ return new_key
+ except redis.exceptions.ResponseError:
+ # `key` does not exist in Redis. `ResponseError` is a bit too broad
+ # but it seems we'd have to do string matching on error message
+ # otherwise.
+ return None
+
+ def mark_event_reprocessed(self, group_id: int, num_events: int) -> bool:
+ # refresh the TTL of the metadata:
+ self.redis.expire(
+ _get_info_reprocessed_key(group_id), settings.SENTRY_REPROCESSING_SYNC_TTL
+ )
+ key = _get_sync_counter_key(group_id)
+ self.redis.expire(key, settings.SENTRY_REPROCESSING_SYNC_TTL)
+ return self.redis.decrby(key, num_events) == 0
+
+ def start_reprocessing(
+ self, group_id: int, date_created: Any, sync_count: int, event_count: int
+ ) -> None:
+ self.redis.setex(
+ _get_sync_counter_key(group_id), settings.SENTRY_REPROCESSING_SYNC_TTL, sync_count
+ )
+ self.redis.setex(
+ _get_info_reprocessed_key(group_id),
+ settings.SENTRY_REPROCESSING_SYNC_TTL,
+ json.dumps(
+ {"dateCreated": date_created, "syncCount": sync_count, "totalEvents": event_count}
+ ),
+ )
+
+ def get_pending(self, group_id: int) -> tuple[int | None, int]:
+ pending_key = _get_sync_counter_key(group_id)
+ pending = self.redis.get(pending_key)
+ ttl = self.redis.ttl(pending_key)
+ return pending, ttl
+
+ def get_progress(self, group_id: int) -> dict[str, Any] | None:
+ info = self.redis.get(_get_info_reprocessed_key(group_id))
+ return info
diff --git a/src/sentry/eventstream/kafka/backend.py b/src/sentry/eventstream/kafka/backend.py
index 4ec2aa728aa37f..97fe9b98de8435 100644
--- a/src/sentry/eventstream/kafka/backend.py
+++ b/src/sentry/eventstream/kafka/backend.py
@@ -7,9 +7,9 @@
from confluent_kafka import KafkaError
from confluent_kafka import Message as KafkaMessage
from confluent_kafka import Producer
-from django.conf import settings
from sentry import options
+from sentry.conf.types.kafka_definition import Topic
from sentry.eventstream.base import EventStreamEventType, GroupStates
from sentry.eventstream.snuba import KW_SKIP_SEMANTIC_PARTITIONING, SnubaProtocolEventStream
from sentry.killswitches import killswitch_matches_context
@@ -24,15 +24,15 @@
class KafkaEventStream(SnubaProtocolEventStream):
def __init__(self, **options: Any) -> None:
- self.topic = settings.KAFKA_EVENTS
- self.transactions_topic = settings.KAFKA_TRANSACTIONS
- self.issue_platform_topic = settings.KAFKA_EVENTSTREAM_GENERIC
- self.__producers: MutableMapping[str, Producer] = {}
+ self.topic = Topic.EVENTS
+ self.transactions_topic = Topic.TRANSACTIONS
+ self.issue_platform_topic = Topic.EVENTSTREAM_GENERIC
+ self.__producers: MutableMapping[Topic, Producer] = {}
- def get_transactions_topic(self, project_id: int) -> str:
+ def get_transactions_topic(self, project_id: int) -> Topic:
return self.transactions_topic
- def get_producer(self, topic: str) -> Producer:
+ def get_producer(self, topic: Topic) -> Producer:
if topic not in self.__producers:
cluster_name = get_topic_definition(topic)["cluster"]
cluster_options = get_kafka_producer_cluster_options(cluster_name)
@@ -202,9 +202,11 @@ def _send(
assert isinstance(extra_data, tuple)
+ real_topic = get_topic_definition(topic)["real_topic_name"]
+
try:
producer.produce(
- topic=topic,
+ topic=real_topic,
key=str(project_id).encode("utf-8") if not skip_semantic_partitioning else None,
value=json.dumps((self.EVENT_PROTOCOL_VERSION, _type) + extra_data),
on_delivery=self.delivery_callback,
diff --git a/src/sentry/features/__init__.py b/src/sentry/features/__init__.py
index fb45a3a496a149..779b40f18cec8d 100644
--- a/src/sentry/features/__init__.py
+++ b/src/sentry/features/__init__.py
@@ -70,6 +70,7 @@
default_manager.add("relocation:enabled", SystemFeature, FeatureHandlerStrategy.INTERNAL)
# Organization scoped features that are in development or in customer trials.
+default_manager.add("organizations:activated-alert-rules", OrganizationFeature, FeatureHandlerStrategy.REMOTE)
default_manager.add("organizations:alert-allow-indexed", OrganizationFeature, FeatureHandlerStrategy.REMOTE)
default_manager.add("organizations:alert-crash-free-metrics", OrganizationFeature, FeatureHandlerStrategy.REMOTE)
default_manager.add("organizations:alert-filters", OrganizationFeature, FeatureHandlerStrategy.INTERNAL)
@@ -88,14 +89,13 @@
default_manager.add("organizations:dashboards-import", OrganizationFeature, FeatureHandlerStrategy.REMOTE)
default_manager.add("organizations:dashboards-mep", OrganizationFeature, FeatureHandlerStrategy.REMOTE)
default_manager.add("organizations:dashboards-rh-widget", OrganizationFeature, FeatureHandlerStrategy.REMOTE)
-default_manager.add("organizations:default-inbound-filters", OrganizationFeature, FeatureHandlerStrategy.REMOTE)
default_manager.add("organizations:ddm-experimental", OrganizationFeature, FeatureHandlerStrategy.REMOTE)
-default_manager.add("organizations:ddm-formulas", OrganizationFeature, FeatureHandlerStrategy.REMOTE)
default_manager.add("organizations:ddm-dashboard-import", OrganizationFeature, FeatureHandlerStrategy.REMOTE)
default_manager.add("organizations:ddm-ui", OrganizationFeature, FeatureHandlerStrategy.INTERNAL)
default_manager.add("organizations:ddm-metrics-api-unit-normalization", OrganizationFeature, FeatureHandlerStrategy.REMOTE)
default_manager.add("organizations:default-high-priority-alerts", OrganizationFeature, FeatureHandlerStrategy.INTERNAL)
default_manager.add("organizations:derive-code-mappings", OrganizationFeature, FeatureHandlerStrategy.INTERNAL)
+default_manager.add("organizations:derive-code-mappings-php", OrganizationFeature, FeatureHandlerStrategy.INTERNAL)
default_manager.add("organizations:device-class-synthesis", OrganizationFeature, FeatureHandlerStrategy.INTERNAL)
default_manager.add("organizations:device-classification", OrganizationFeature, FeatureHandlerStrategy.REMOTE)
default_manager.add("organizations:discover-events-rate-limit", OrganizationFeature, FeatureHandlerStrategy.REMOTE)
@@ -176,6 +176,7 @@
default_manager.add("organizations:performance-database-view", OrganizationFeature, FeatureHandlerStrategy.REMOTE)
default_manager.add("organizations:performance-db-main-thread-detector", OrganizationFeature)
default_manager.add("organizations:performance-discover-widget-split-ui", OrganizationFeature, FeatureHandlerStrategy.REMOTE)
+default_manager.add("organizations:performance-discover-widget-split-override-save", OrganizationFeature, FeatureHandlerStrategy.REMOTE)
default_manager.add("organizations:performance-file-io-main-thread-detector", OrganizationFeature, FeatureHandlerStrategy.INTERNAL)
default_manager.add("organizations:performance-issues-all-events-tab", OrganizationFeature, FeatureHandlerStrategy.REMOTE)
default_manager.add("organizations:performance-issues-compressed-assets-detector", OrganizationFeature, FeatureHandlerStrategy.INTERNAL)
@@ -240,8 +241,8 @@
default_manager.add("organizations:session-replay-count-query-optimize", OrganizationFeature, FeatureHandlerStrategy.REMOTE)
default_manager.add("organizations:session-replay-enable-canvas-replayer", OrganizationFeature, FeatureHandlerStrategy.REMOTE)
default_manager.add("organizations:session-replay-enable-canvas", OrganizationFeature, FeatureHandlerStrategy.REMOTE)
-default_manager.add("organizations:session-replay-event-linking", OrganizationFeature, FeatureHandlerStrategy.INTERNAL)
default_manager.add("organizations:session-replay-issue-emails", OrganizationFeature, FeatureHandlerStrategy.INTERNAL)
+default_manager.add("organizations:session-replay-mobile-player", OrganizationFeature, FeatureHandlerStrategy.REMOTE)
default_manager.add("organizations:session-replay-new-event-counts", OrganizationFeature, FeatureHandlerStrategy.REMOTE)
default_manager.add("organizations:session-replay-recording-scrubbing", OrganizationFeature, FeatureHandlerStrategy.INTERNAL)
default_manager.add("organizations:session-replay-rage-click-issue-creation", OrganizationFeature, FeatureHandlerStrategy.INTERNAL)
diff --git a/src/sentry/features/permanent.py b/src/sentry/features/permanent.py
index 38c3e415b35633..3c22b5cecdb565 100644
--- a/src/sentry/features/permanent.py
+++ b/src/sentry/features/permanent.py
@@ -16,7 +16,6 @@ def register_permanent_features(manager: FeatureManager):
"organizations:advanced-search",
"organizations:app-store-connect-multiple",
"organizations:change-alerts",
- "organizations:commit-context",
"organizations:codecov-integration",
"organizations:crash-rate-alerts",
"organizations:custom-symbol-sources",
diff --git a/src/sentry/grouping/enhancer/__init__.py b/src/sentry/grouping/enhancer/__init__.py
index 35df2d586804b5..dbb17c9bf44d9a 100644
--- a/src/sentry/grouping/enhancer/__init__.py
+++ b/src/sentry/grouping/enhancer/__init__.py
@@ -3,7 +3,6 @@
import base64
import logging
import os
-import random
import zlib
from collections.abc import Sequence
from hashlib import md5
@@ -19,8 +18,7 @@
from sentry_ophio.enhancers import Cache as RustCache
from sentry_ophio.enhancers import Enhancements as RustEnhancements
-from sentry import options, projectoptions
-from sentry.features.rollout import in_random_rollout
+from sentry import projectoptions
from sentry.grouping.component import GroupingComponent
from sentry.stacktraces.functions import set_in_app
from sentry.utils import metrics
@@ -150,37 +148,25 @@ def merge_rust_enhancements(
def parse_rust_enhancements(
- source: Literal["config_structure", "config_string"], input: str | bytes, force_parsing=False
+ source: Literal["config_structure", "config_string"], input: str | bytes
) -> RustEnhancements | None:
"""
Parses ``RustEnhancements`` from either a msgpack-encoded `config_structure`,
or from the text representation called `config_string`.
-
- Parsing itself is controlled via an option, but can be forced via `force_parsing`.
"""
rust_enhancements = None
- parse_rust_enhancements = force_parsing
- if not force_parsing:
- try:
- parse_rust_enhancements = random.random() < options.get(
- "grouping.rust_enhancers.parse_rate"
- )
- except Exception:
- parse_rust_enhancements = False
-
- if parse_rust_enhancements:
- try:
- if source == "config_structure":
- assert isinstance(input, bytes)
- rust_enhancements = RustEnhancements.from_config_structure(input, RUST_CACHE)
- else:
- assert isinstance(input, str)
- rust_enhancements = RustEnhancements.parse(input, RUST_CACHE)
+ try:
+ if source == "config_structure":
+ assert isinstance(input, bytes)
+ rust_enhancements = RustEnhancements.from_config_structure(input, RUST_CACHE)
+ else:
+ assert isinstance(input, str)
+ rust_enhancements = RustEnhancements.parse(input, RUST_CACHE)
- metrics.incr("rust_enhancements.parsing_performed", tags={"source": source})
- except Exception:
- logger.exception("failed parsing Rust Enhancements from `%s`", source)
+ metrics.incr("rust_enhancements.parsing_performed", tags={"source": source})
+ except Exception:
+ logger.exception("failed parsing Rust Enhancements from `%s`", source)
return rust_enhancements
@@ -201,13 +187,6 @@ def apply_rust_enhancements(
if not rust_enhancements:
return None
- try:
- use_rust_enhancements = in_random_rollout("grouping.rust_enhancers.modify_frames_rate")
- except Exception:
- use_rust_enhancements = False
- if not use_rust_enhancements:
- return None
-
try:
e = exception_data or {}
e = {
@@ -249,13 +228,6 @@ def compare_rust_enhancers(
sentry_sdk.capture_message("Rust Enhancements mismatch")
-def prefer_rust_enhancers():
- try:
- return in_random_rollout("grouping.rust_enhancers.prefer_rust_result")
- except Exception:
- return False
-
-
class Enhancements:
# NOTE: You must add a version to ``VERSIONS`` any time attributes are added
# to this class, s.t. no enhancements lacking these attributes are loaded
@@ -299,13 +271,15 @@ def apply_modifications_to_frame(
self.rust_enhancements, match_frames, exception_data
)
- if rust_enhanced_frames and prefer_rust_enhancers():
+ if rust_enhanced_frames:
for frame, (category, in_app) in zip(frames, rust_enhanced_frames):
if in_app is not None:
set_in_app(frame, in_app)
if category is not None:
set_path(frame, "data", "category", value=category)
return
+ else:
+ logger.error("Rust enhancements were not applied successfully")
in_memory_cache: dict[str, str] = {}
@@ -476,8 +450,8 @@ def loads(cls, data) -> Enhancements:
@classmethod
@sentry_sdk.tracing.trace
- def from_config_string(self, s, bases=None, id=None, force_rust_parsing=False) -> Enhancements:
- rust_enhancements = parse_rust_enhancements("config_string", s, force_rust_parsing)
+ def from_config_string(self, s, bases=None, id=None) -> Enhancements:
+ rust_enhancements = parse_rust_enhancements("config_string", s)
try:
tree = enhancements_grammar.parse(s)
@@ -815,9 +789,7 @@ def _load_configs() -> dict[str, Enhancements]:
fn = fn.replace("@", ":")
# NOTE: we want to force parsing the `RustEnhancements` here, as the base rules
# are required for inheritance, and because they are well tested.
- enhancements = Enhancements.from_config_string(
- f.read(), id=fn[:-4], force_rust_parsing=True
- )
+ enhancements = Enhancements.from_config_string(f.read(), id=fn[:-4])
rv[fn[:-4]] = enhancements
return rv
diff --git a/src/sentry/hybridcloud/apigateway/proxy.py b/src/sentry/hybridcloud/apigateway/proxy.py
index 0ec1e15729a1e8..c6b354aa9aa371 100644
--- a/src/sentry/hybridcloud/apigateway/proxy.py
+++ b/src/sentry/hybridcloud/apigateway/proxy.py
@@ -1,6 +1,7 @@
"""
Utilities related to proxying a request to a region silo
"""
+
from __future__ import annotations
import logging
@@ -166,6 +167,10 @@ def proxy_region_request(
data=_body_with_length(request),
stream=True,
timeout=timeout,
+ # By default, external_request will resolve any redirects for any verb except for HEAD.
+ # We explicitly disable this behavior to avoid misrepresenting the original sentry.io request with the
+ # body response of the redirect.
+ allow_redirects=False,
)
except Timeout:
# remote silo timeout. Use DRF timeout instead
diff --git a/src/sentry/incidents/action_handlers.py b/src/sentry/incidents/action_handlers.py
index b70db725c2cdb3..5e67e57bb17398 100644
--- a/src/sentry/incidents/action_handlers.py
+++ b/src/sentry/incidents/action_handlers.py
@@ -13,13 +13,8 @@
from sentry.charts.types import ChartSize
from sentry.constants import CRASH_RATE_ALERT_AGGREGATE_ALIAS
from sentry.incidents.charts import build_metric_alert_chart
-from sentry.incidents.models import (
- INCIDENT_STATUS,
- AlertRuleThresholdType,
- AlertRuleTriggerAction,
- IncidentStatus,
- TriggerStatus,
-)
+from sentry.incidents.models.alert_rule import AlertRuleThresholdType, AlertRuleTriggerAction
+from sentry.incidents.models.incident import INCIDENT_STATUS, IncidentStatus, TriggerStatus
from sentry.models.rulesnooze import RuleSnooze
from sentry.models.user import User
from sentry.notifications.types import NotificationSettingEnum
diff --git a/src/sentry/incidents/charts.py b/src/sentry/incidents/charts.py
index 226e47b5fb647c..292af8a180d585 100644
--- a/src/sentry/incidents/charts.py
+++ b/src/sentry/incidents/charts.py
@@ -15,7 +15,8 @@
from sentry.charts import backend as charts
from sentry.charts.types import ChartSize, ChartType
from sentry.incidents.logic import translate_aggregate_field
-from sentry.incidents.models import AlertRule, Incident
+from sentry.incidents.models.alert_rule import AlertRule
+from sentry.incidents.models.incident import Incident
from sentry.models.apikey import ApiKey
from sentry.models.organization import Organization
from sentry.models.user import User
diff --git a/src/sentry/incidents/endpoints/bases.py b/src/sentry/incidents/endpoints/bases.py
index 8ea857498c9af4..efb7349500ba6d 100644
--- a/src/sentry/incidents/endpoints/bases.py
+++ b/src/sentry/incidents/endpoints/bases.py
@@ -6,7 +6,7 @@
from sentry.api.bases.organization import OrganizationAlertRulePermission, OrganizationEndpoint
from sentry.api.bases.project import ProjectAlertRulePermission, ProjectEndpoint
from sentry.api.exceptions import ResourceDoesNotExist
-from sentry.incidents.models import AlertRule, AlertRuleTrigger, AlertRuleTriggerAction
+from sentry.incidents.models.alert_rule import AlertRule, AlertRuleTrigger, AlertRuleTriggerAction
class ProjectAlertRuleEndpoint(ProjectEndpoint):
diff --git a/src/sentry/incidents/endpoints/organization_alert_rule_available_action_index.py b/src/sentry/incidents/endpoints/organization_alert_rule_available_action_index.py
index 751a80c647786e..265f8cf7428598 100644
--- a/src/sentry/incidents/endpoints/organization_alert_rule_available_action_index.py
+++ b/src/sentry/incidents/endpoints/organization_alert_rule_available_action_index.py
@@ -19,7 +19,7 @@
get_opsgenie_teams,
get_pagerduty_services,
)
-from sentry.incidents.models import AlertRuleTriggerAction
+from sentry.incidents.models.alert_rule import AlertRuleTriggerAction
from sentry.incidents.serializers import ACTION_TARGET_TYPE_TO_STRING
from sentry.models.organization import Organization
from sentry.services.hybrid_cloud.app import RpcSentryAppInstallation, app_service
diff --git a/src/sentry/incidents/endpoints/organization_alert_rule_index.py b/src/sentry/incidents/endpoints/organization_alert_rule_index.py
index 585e55efbca61c..3dcb2b12627e7d 100644
--- a/src/sentry/incidents/endpoints/organization_alert_rule_index.py
+++ b/src/sentry/incidents/endpoints/organization_alert_rule_index.py
@@ -34,7 +34,8 @@
from sentry.constants import ObjectStatus
from sentry.exceptions import InvalidParams
from sentry.incidents.logic import get_slack_actions_with_async_lookups
-from sentry.incidents.models import AlertRule, Incident
+from sentry.incidents.models.alert_rule import AlertRule
+from sentry.incidents.models.incident import Incident
from sentry.incidents.serializers import AlertRuleSerializer as DrfAlertRuleSerializer
from sentry.incidents.utils.sentry_apps import trigger_sentry_app_action_creators_for_incidents
from sentry.integrations.slack.utils import RedisRuleStatus
diff --git a/src/sentry/incidents/endpoints/organization_incident_comment_details.py b/src/sentry/incidents/endpoints/organization_incident_comment_details.py
index aedd0123f4307f..e715a4ab2dbb17 100644
--- a/src/sentry/incidents/endpoints/organization_incident_comment_details.py
+++ b/src/sentry/incidents/endpoints/organization_incident_comment_details.py
@@ -10,7 +10,7 @@
from sentry.api.exceptions import ResourceDoesNotExist
from sentry.api.serializers import serialize
from sentry.incidents.logic import delete_comment, update_comment
-from sentry.incidents.models import IncidentActivity, IncidentActivityType
+from sentry.incidents.models.incident import IncidentActivity, IncidentActivityType
class CommentSerializer(serializers.Serializer):
diff --git a/src/sentry/incidents/endpoints/organization_incident_comment_index.py b/src/sentry/incidents/endpoints/organization_incident_comment_index.py
index 89343fd6cdd7cf..dfe9db275e6839 100644
--- a/src/sentry/incidents/endpoints/organization_incident_comment_index.py
+++ b/src/sentry/incidents/endpoints/organization_incident_comment_index.py
@@ -13,7 +13,7 @@
extract_user_ids_from_mentions,
)
from sentry.incidents.logic import create_incident_activity
-from sentry.incidents.models import IncidentActivityType
+from sentry.incidents.models.incident import IncidentActivityType
class CommentSerializer(serializers.Serializer, MentionsMixin):
diff --git a/src/sentry/incidents/endpoints/organization_incident_details.py b/src/sentry/incidents/endpoints/organization_incident_details.py
index e618aacfed5a8d..ab8d04e75b5602 100644
--- a/src/sentry/incidents/endpoints/organization_incident_details.py
+++ b/src/sentry/incidents/endpoints/organization_incident_details.py
@@ -9,7 +9,7 @@
from sentry.api.serializers import serialize
from sentry.api.serializers.models.incident import DetailedIncidentSerializer
from sentry.incidents.logic import update_incident_status
-from sentry.incidents.models import IncidentStatus, IncidentStatusMethod
+from sentry.incidents.models.incident import IncidentStatus, IncidentStatusMethod
class IncidentSerializer(serializers.Serializer):
diff --git a/src/sentry/incidents/endpoints/organization_incident_index.py b/src/sentry/incidents/endpoints/organization_incident_index.py
index 5f4d5a3cc6bf42..88279d697295d1 100644
--- a/src/sentry/incidents/endpoints/organization_incident_index.py
+++ b/src/sentry/incidents/endpoints/organization_incident_index.py
@@ -15,12 +15,8 @@
from sentry.api.serializers import serialize
from sentry.api.serializers.models.incident import IncidentSerializer
from sentry.exceptions import InvalidParams
-from sentry.incidents.models import (
- AlertRuleActivity,
- AlertRuleActivityType,
- Incident,
- IncidentStatus,
-)
+from sentry.incidents.models.alert_rule import AlertRuleActivity, AlertRuleActivityType
+from sentry.incidents.models.incident import Incident, IncidentStatus
from sentry.snuba.dataset import Dataset
from sentry.utils.dates import ensure_aware
diff --git a/src/sentry/incidents/endpoints/project_alert_rule_task_details.py b/src/sentry/incidents/endpoints/project_alert_rule_task_details.py
index f502c841732084..60849ecb63bb80 100644
--- a/src/sentry/incidents/endpoints/project_alert_rule_task_details.py
+++ b/src/sentry/incidents/endpoints/project_alert_rule_task_details.py
@@ -7,7 +7,7 @@
from sentry.api.base import region_silo_endpoint
from sentry.api.bases.project import ProjectEndpoint, ProjectSettingPermission
from sentry.api.serializers import serialize
-from sentry.incidents.models import AlertRule
+from sentry.incidents.models.alert_rule import AlertRule
from sentry.integrations.slack.utils import RedisRuleStatus
diff --git a/src/sentry/incidents/endpoints/utils.py b/src/sentry/incidents/endpoints/utils.py
index 2b258cdd5aac09..2007e4f8312d53 100644
--- a/src/sentry/incidents/endpoints/utils.py
+++ b/src/sentry/incidents/endpoints/utils.py
@@ -1,5 +1,5 @@
from sentry.api.helpers.teams import get_teams
-from sentry.incidents.models import AlertRule, AlertRuleThresholdType
+from sentry.incidents.models.alert_rule import AlertRule, AlertRuleThresholdType
def parse_team_params(request, organization, teams):
diff --git a/src/sentry/incidents/logic.py b/src/sentry/incidents/logic.py
index aa5e5e5f134ad0..3ad3022b77c91b 100644
--- a/src/sentry/incidents/logic.py
+++ b/src/sentry/incidents/logic.py
@@ -18,7 +18,7 @@
from sentry.auth.access import SystemAccess
from sentry.constants import CRASH_RATE_ALERT_AGGREGATE_ALIAS, ObjectStatus
from sentry.incidents import tasks
-from sentry.incidents.models import (
+from sentry.incidents.models.alert_rule import (
AlertRule,
AlertRuleActivationCondition,
AlertRuleActivity,
@@ -30,6 +30,8 @@
AlertRuleTrigger,
AlertRuleTriggerAction,
AlertRuleTriggerExclusion,
+)
+from sentry.incidents.models.incident import (
Incident,
IncidentActivity,
IncidentActivityType,
diff --git a/src/sentry/incidents/models/__init__.py b/src/sentry/incidents/models/__init__.py
new file mode 100644
index 00000000000000..8f6adf234bba98
--- /dev/null
+++ b/src/sentry/incidents/models/__init__.py
@@ -0,0 +1,10 @@
+from .alert_rule import AlertRule, AlertRuleStatus, AlertRuleThresholdType, AlertRuleTriggerAction
+from .incident import Incident
+
+__all__ = (
+ "AlertRule",
+ "AlertRuleStatus",
+ "AlertRuleThresholdType",
+ "AlertRuleTriggerAction",
+ "Incident",
+)
diff --git a/src/sentry/incidents/models.py b/src/sentry/incidents/models/alert_rule.py
similarity index 63%
rename from src/sentry/incidents/models.py
rename to src/sentry/incidents/models/alert_rule.py
index 434734d74f8470..3ca802ac017794 100644
--- a/src/sentry/incidents/models.py
+++ b/src/sentry/incidents/models/alert_rule.py
@@ -6,44 +6,39 @@
from datetime import timedelta
from enum import Enum
from typing import Any, ClassVar, Self
-from uuid import uuid4
from django.conf import settings
from django.core.cache import cache
-from django.db import IntegrityError, models, router, transaction
+from django.db import models
from django.db.models import QuerySet
from django.db.models.signals import post_delete, post_save
from django.utils import timezone
-from sentry.backup.dependencies import PrimaryKeyMap, get_model_name
+from sentry.backup.dependencies import PrimaryKeyMap
from sentry.backup.helpers import ImportFlags
from sentry.backup.scopes import ImportScope, RelocationScope
from sentry.constants import ObjectStatus
from sentry.db.models import (
- ArrayField,
BoundedPositiveIntegerField,
FlexibleForeignKey,
JSONField,
Model,
- OneToOneCascadeDeletes,
- UUIDField,
region_silo_only_model,
sane_repr,
)
from sentry.db.models.fields.hybrid_cloud_foreign_key import HybridCloudForeignKey
from sentry.db.models.manager import BaseManager
+from sentry.incidents.models.incident import IncidentTrigger
from sentry.incidents.utils.constants import INCIDENTS_SNUBA_SUBSCRIPTION_TYPE
from sentry.incidents.utils.types import AlertRuleActivationConditionType
from sentry.models.actor import Actor
from sentry.models.notificationaction import AbstractNotificationAction, ActionService, ActionTarget
-from sentry.models.organization import Organization
from sentry.models.project import Project
from sentry.models.team import Team
from sentry.services.hybrid_cloud.user.service import user_service
from sentry.snuba.models import QuerySubscription
from sentry.snuba.subscriptions import bulk_create_snuba_subscriptions, delete_snuba_subscription
from sentry.utils import metrics
-from sentry.utils.retries import TimedRetryPolicy
alert_subscription_callback_registry: dict[
AlertRuleMonitorType, Callable[[QuerySubscription], bool]
@@ -74,306 +69,6 @@ def invoke_alert_subscription_callback(
logger = logging.getLogger(__name__)
-@region_silo_only_model
-class IncidentProject(Model):
- __relocation_scope__ = RelocationScope.Excluded
-
- project = FlexibleForeignKey("sentry.Project", db_index=False, db_constraint=False)
- incident = FlexibleForeignKey("sentry.Incident")
-
- class Meta:
- app_label = "sentry"
- db_table = "sentry_incidentproject"
- unique_together = (("project", "incident"),)
-
-
-@region_silo_only_model
-class IncidentSeen(Model):
- __relocation_scope__ = RelocationScope.Excluded
-
- incident = FlexibleForeignKey("sentry.Incident")
- user_id = HybridCloudForeignKey(settings.AUTH_USER_MODEL, on_delete="CASCADE", db_index=False)
- last_seen = models.DateTimeField(default=timezone.now)
-
- class Meta:
- app_label = "sentry"
- db_table = "sentry_incidentseen"
- unique_together = (("user_id", "incident"),)
-
-
-class IncidentManager(BaseManager["Incident"]):
- CACHE_KEY = "incidents:active:%s:%s"
-
- def fetch_for_organization(self, organization, projects):
- return self.filter(organization=organization, projects__in=projects).distinct()
-
- @classmethod
- def _build_active_incident_cache_key(cls, alert_rule_id, project_id):
- return cls.CACHE_KEY % (alert_rule_id, project_id)
-
- def get_active_incident(self, alert_rule, project):
- cache_key = self._build_active_incident_cache_key(alert_rule.id, project.id)
- incident = cache.get(cache_key)
- if incident is None:
- try:
- incident = (
- Incident.objects.filter(
- type=IncidentType.ALERT_TRIGGERED.value,
- alert_rule=alert_rule,
- projects=project,
- )
- .exclude(status=IncidentStatus.CLOSED.value)
- .order_by("-date_added")[0]
- )
- except IndexError:
- # Set this to False so that we can have a negative cache as well.
- incident = False
- cache.set(cache_key, incident)
- if incident is False:
- incident = None
- elif not incident:
- # If we had a falsey not None value in the cache, then we stored that there
- # are no current active incidents. Just set to None
- incident = None
-
- return incident
-
- @classmethod
- def clear_active_incident_cache(cls, instance, **kwargs):
- for project in instance.projects.all():
- cache.delete(cls._build_active_incident_cache_key(instance.alert_rule_id, project.id))
- assert (
- cache.get(cls._build_active_incident_cache_key(instance.alert_rule_id, project.id))
- is None
- )
-
- @classmethod
- def clear_active_incident_project_cache(cls, instance, **kwargs):
- cache.delete(
- cls._build_active_incident_cache_key(
- instance.incident.alert_rule_id, instance.project_id
- )
- )
- assert (
- cache.get(
- cls._build_active_incident_cache_key(
- instance.incident.alert_rule_id, instance.project_id
- )
- )
- is None
- )
-
- @TimedRetryPolicy.wrap(timeout=5, exceptions=(IntegrityError,))
- def create(self, organization, **kwargs):
- """
- Creates an Incident. Fetches the maximum identifier value for the org
- and increments it by one. If two incidents are created for the
- Organization at the same time then an integrity error will be thrown,
- and we'll retry again several times. I prefer to lock optimistically
- here since if we're creating multiple Incidents a second for an
- Organization then we're likely failing at making Incidents useful.
- """
- with transaction.atomic(router.db_for_write(Organization)):
- result = self.filter(organization=organization).aggregate(models.Max("identifier"))
- identifier = result["identifier__max"]
- if identifier is None:
- identifier = 1
- else:
- identifier += 1
-
- return super().create(organization=organization, identifier=identifier, **kwargs)
-
-
-class IncidentType(Enum):
- DETECTED = 0
- ALERT_TRIGGERED = 2
-
-
-class IncidentStatus(Enum):
- OPEN = 1
- CLOSED = 2
- WARNING = 10
- CRITICAL = 20
-
-
-class IncidentStatusMethod(Enum):
- MANUAL = 1
- RULE_UPDATED = 2
- RULE_TRIGGERED = 3
-
-
-INCIDENT_STATUS = {
- IncidentStatus.OPEN: "Open",
- IncidentStatus.CLOSED: "Resolved",
- IncidentStatus.CRITICAL: "Critical",
- IncidentStatus.WARNING: "Warning",
-}
-
-
-@region_silo_only_model
-class Incident(Model):
- __relocation_scope__ = RelocationScope.Organization
-
- objects: ClassVar[IncidentManager] = IncidentManager()
-
- organization = FlexibleForeignKey("sentry.Organization")
- projects = models.ManyToManyField(
- "sentry.Project", related_name="incidents", through=IncidentProject
- )
- alert_rule = FlexibleForeignKey("sentry.AlertRule", on_delete=models.PROTECT)
- # Incrementing id that is specific to the org.
- identifier = models.IntegerField()
- # Identifier used to match incoming events from the detection algorithm
- detection_uuid = UUIDField(null=True, db_index=True)
- status = models.PositiveSmallIntegerField(default=IncidentStatus.OPEN.value)
- status_method = models.PositiveSmallIntegerField(
- default=IncidentStatusMethod.RULE_TRIGGERED.value
- )
- type = models.PositiveSmallIntegerField()
- title = models.TextField()
- # When we suspect the incident actually started
- date_started = models.DateTimeField(default=timezone.now)
- # When we actually detected the incident
- date_detected = models.DateTimeField(default=timezone.now)
- date_added = models.DateTimeField(default=timezone.now)
- date_closed = models.DateTimeField(null=True)
-
- class Meta:
- app_label = "sentry"
- db_table = "sentry_incident"
- unique_together = (("organization", "identifier"),)
- indexes = (models.Index(fields=("alert_rule", "type", "status")),)
-
- @property
- def current_end_date(self):
- """
- Returns the current end of the incident. Either the date it was closed,
- or the current time if it's still open.
- """
- return self.date_closed if self.date_closed else timezone.now()
-
- @property
- def duration(self):
- return self.current_end_date - self.date_started
-
- def normalize_before_relocation_import(
- self, pk_map: PrimaryKeyMap, scope: ImportScope, flags: ImportFlags
- ) -> int | None:
- old_pk = super().normalize_before_relocation_import(pk_map, scope, flags)
- if old_pk is None:
- return None
-
- # Generate a new UUID, if one exists.
- if self.detection_uuid:
- self.detection_uuid = uuid4()
- return old_pk
-
-
-@region_silo_only_model
-class PendingIncidentSnapshot(Model):
- __relocation_scope__ = RelocationScope.Organization
-
- incident = OneToOneCascadeDeletes("sentry.Incident", db_constraint=False)
- target_run_date = models.DateTimeField(db_index=True, default=timezone.now)
- date_added = models.DateTimeField(default=timezone.now)
-
- class Meta:
- app_label = "sentry"
- db_table = "sentry_pendingincidentsnapshot"
-
-
-@region_silo_only_model
-class IncidentSnapshot(Model):
- __relocation_scope__ = RelocationScope.Organization
-
- incident = OneToOneCascadeDeletes("sentry.Incident", db_constraint=False)
- event_stats_snapshot = FlexibleForeignKey("sentry.TimeSeriesSnapshot", db_constraint=False)
- unique_users = models.IntegerField()
- total_events = models.IntegerField()
- date_added = models.DateTimeField(default=timezone.now)
-
- class Meta:
- app_label = "sentry"
- db_table = "sentry_incidentsnapshot"
-
-
-@region_silo_only_model
-class TimeSeriesSnapshot(Model):
- __relocation_scope__ = RelocationScope.Organization
- __relocation_dependencies__ = {"sentry.Incident"}
-
- start = models.DateTimeField()
- end = models.DateTimeField()
- values = ArrayField(of=ArrayField(models.FloatField()))
- period = models.IntegerField()
- date_added = models.DateTimeField(default=timezone.now)
-
- class Meta:
- app_label = "sentry"
- db_table = "sentry_timeseriessnapshot"
-
- @classmethod
- def query_for_relocation_export(cls, q: models.Q, pk_map: PrimaryKeyMap) -> models.Q:
- pks = IncidentSnapshot.objects.filter(
- incident__in=pk_map.get_pks(get_model_name(Incident))
- ).values_list("event_stats_snapshot_id", flat=True)
-
- return q & models.Q(pk__in=pks)
-
-
-class IncidentActivityType(Enum):
- CREATED = 1
- STATUS_CHANGE = 2
- COMMENT = 3
- DETECTED = 4
-
-
-@region_silo_only_model
-class IncidentActivity(Model):
- __relocation_scope__ = RelocationScope.Organization
-
- incident = FlexibleForeignKey("sentry.Incident")
- user_id = HybridCloudForeignKey(settings.AUTH_USER_MODEL, on_delete="CASCADE", null=True)
- type: models.Field[int | IncidentActivityType, int] = models.IntegerField()
- value = models.TextField(null=True)
- previous_value = models.TextField(null=True)
- comment = models.TextField(null=True)
- date_added = models.DateTimeField(default=timezone.now)
- notification_uuid = models.UUIDField("notification_uuid", null=True)
-
- class Meta:
- app_label = "sentry"
- db_table = "sentry_incidentactivity"
-
- def normalize_before_relocation_import(
- self, pk_map: PrimaryKeyMap, scope: ImportScope, flags: ImportFlags
- ) -> int | None:
- old_pk = super().normalize_before_relocation_import(pk_map, scope, flags)
- if old_pk is None:
- return None
-
- # Generate a new UUID, if one exists.
- if self.notification_uuid:
- self.notification_uuid = uuid4()
- return old_pk
-
-
-@region_silo_only_model
-class IncidentSubscription(Model):
- __relocation_scope__ = RelocationScope.Organization
-
- incident = FlexibleForeignKey("sentry.Incident", db_index=False)
- user_id = HybridCloudForeignKey(settings.AUTH_USER_MODEL, on_delete="CASCADE")
- date_added = models.DateTimeField(default=timezone.now)
-
- class Meta:
- app_label = "sentry"
- db_table = "sentry_incidentsubscription"
- unique_together = (("incident", "user_id"),)
-
- __repr__ = sane_repr("incident_id", "user_id")
-
-
class AlertRuleStatus(Enum):
PENDING = 0
SNAPSHOT = 4
@@ -578,9 +273,9 @@ def _validate_actor(self):
if self.owner_id is not None and self.team_id is None and self.user_id is None:
raise ValueError("AlertRule with owner requires either team_id or user_id")
- def save(self, **kwargs: Any) -> None:
+ def save(self, *args, **kwargs: Any) -> None:
self._validate_actor()
- return super().save(**kwargs)
+ return super().save(*args, **kwargs)
@property
def created_by_id(self):
@@ -652,61 +347,6 @@ def subscribe_projects(
return []
-class TriggerStatus(Enum):
- ACTIVE = 0
- RESOLVED = 1
-
-
-class IncidentTriggerManager(BaseManager["IncidentTrigger"]):
- CACHE_KEY = "incident:triggers:%s"
-
- @classmethod
- def _build_cache_key(cls, incident_id):
- return cls.CACHE_KEY % incident_id
-
- def get_for_incident(self, incident):
- """
- Fetches the IncidentTriggers associated with an Incident. Attempts to fetch from
- cache then hits the database.
- """
- cache_key = self._build_cache_key(incident.id)
- triggers = cache.get(cache_key)
- if triggers is None:
- triggers = list(IncidentTrigger.objects.filter(incident=incident))
- cache.set(cache_key, triggers, 3600)
-
- return triggers
-
- @classmethod
- def clear_incident_cache(cls, instance, **kwargs):
- cache.delete(cls._build_cache_key(instance.id))
- assert cache.get(cls._build_cache_key(instance.id)) is None
-
- @classmethod
- def clear_incident_trigger_cache(cls, instance, **kwargs):
- cache.delete(cls._build_cache_key(instance.incident_id))
- assert cache.get(cls._build_cache_key(instance.incident_id)) is None
-
-
-@region_silo_only_model
-class IncidentTrigger(Model):
- __relocation_scope__ = RelocationScope.Organization
-
- objects: ClassVar[IncidentTriggerManager] = IncidentTriggerManager()
-
- incident = FlexibleForeignKey("sentry.Incident", db_index=False)
- alert_rule_trigger = FlexibleForeignKey("sentry.AlertRuleTrigger")
- status = models.SmallIntegerField()
- date_modified = models.DateTimeField(default=timezone.now, null=False)
- date_added = models.DateTimeField(default=timezone.now)
-
- class Meta:
- app_label = "sentry"
- db_table = "sentry_incidenttrigger"
- unique_together = (("incident", "alert_rule_trigger"),)
- indexes = (models.Index(fields=("alert_rule_trigger", "incident_id")),)
-
-
class AlertRuleTriggerManager(BaseManager["AlertRuleTrigger"]):
CACHE_KEY = "alert_rule_triggers:alert_rule:%s"
@@ -834,7 +474,7 @@ class AlertRuleTriggerAction(AbstractNotificationAction):
Type = ActionService
TargetType = ActionTarget
- _type_registrations = {}
+ _type_registrations: dict[ActionService, TypeRegistration] = {}
INTEGRATION_TYPES = frozenset(
(
@@ -989,11 +629,3 @@ def clean_expired_alerts(subscription: QuerySubscription) -> bool:
post_save.connect(AlertRuleTriggerManager.clear_alert_rule_trigger_cache, sender=AlertRule)
post_save.connect(AlertRuleTriggerManager.clear_trigger_cache, sender=AlertRuleTrigger)
post_delete.connect(AlertRuleTriggerManager.clear_trigger_cache, sender=AlertRuleTrigger)
-
-post_save.connect(IncidentManager.clear_active_incident_cache, sender=Incident)
-post_save.connect(IncidentManager.clear_active_incident_project_cache, sender=IncidentProject)
-post_delete.connect(IncidentManager.clear_active_incident_project_cache, sender=IncidentProject)
-
-post_delete.connect(IncidentTriggerManager.clear_incident_cache, sender=Incident)
-post_save.connect(IncidentTriggerManager.clear_incident_trigger_cache, sender=IncidentTrigger)
-post_delete.connect(IncidentTriggerManager.clear_incident_trigger_cache, sender=IncidentTrigger)
diff --git a/src/sentry/incidents/models/incident.py b/src/sentry/incidents/models/incident.py
new file mode 100644
index 00000000000000..fff82e641fd0c5
--- /dev/null
+++ b/src/sentry/incidents/models/incident.py
@@ -0,0 +1,395 @@
+from __future__ import annotations
+
+import logging
+from enum import Enum
+from typing import ClassVar
+from uuid import uuid4
+
+from django.conf import settings
+from django.core.cache import cache
+from django.db import IntegrityError, models, router, transaction
+from django.db.models.signals import post_delete, post_save
+from django.utils import timezone
+
+from sentry.backup.dependencies import PrimaryKeyMap, get_model_name
+from sentry.backup.helpers import ImportFlags
+from sentry.backup.scopes import ImportScope, RelocationScope
+from sentry.db.models import (
+ ArrayField,
+ FlexibleForeignKey,
+ Model,
+ OneToOneCascadeDeletes,
+ UUIDField,
+ region_silo_only_model,
+ sane_repr,
+)
+from sentry.db.models.fields.hybrid_cloud_foreign_key import HybridCloudForeignKey
+from sentry.db.models.manager import BaseManager
+from sentry.models.organization import Organization
+from sentry.utils.retries import TimedRetryPolicy
+
+logger = logging.getLogger(__name__)
+
+
+@region_silo_only_model
+class IncidentProject(Model):
+ __relocation_scope__ = RelocationScope.Excluded
+
+ project = FlexibleForeignKey("sentry.Project", db_index=False, db_constraint=False)
+ incident = FlexibleForeignKey("sentry.Incident")
+
+ class Meta:
+ app_label = "sentry"
+ db_table = "sentry_incidentproject"
+ unique_together = (("project", "incident"),)
+
+
+@region_silo_only_model
+class IncidentSeen(Model):
+ __relocation_scope__ = RelocationScope.Excluded
+
+ incident = FlexibleForeignKey("sentry.Incident")
+ user_id = HybridCloudForeignKey(settings.AUTH_USER_MODEL, on_delete="CASCADE", db_index=False)
+ last_seen = models.DateTimeField(default=timezone.now)
+
+ class Meta:
+ app_label = "sentry"
+ db_table = "sentry_incidentseen"
+ unique_together = (("user_id", "incident"),)
+
+
+class IncidentManager(BaseManager["Incident"]):
+ CACHE_KEY = "incidents:active:%s:%s"
+
+ def fetch_for_organization(self, organization, projects):
+ return self.filter(organization=organization, projects__in=projects).distinct()
+
+ @classmethod
+ def _build_active_incident_cache_key(cls, alert_rule_id, project_id):
+ return cls.CACHE_KEY % (alert_rule_id, project_id)
+
+ def get_active_incident(self, alert_rule, project):
+ cache_key = self._build_active_incident_cache_key(alert_rule.id, project.id)
+ incident = cache.get(cache_key)
+ if incident is None:
+ try:
+ incident = (
+ Incident.objects.filter(
+ type=IncidentType.ALERT_TRIGGERED.value,
+ alert_rule=alert_rule,
+ projects=project,
+ )
+ .exclude(status=IncidentStatus.CLOSED.value)
+ .order_by("-date_added")[0]
+ )
+ except IndexError:
+ # Set this to False so that we can have a negative cache as well.
+ incident = False
+ cache.set(cache_key, incident)
+ if incident is False:
+ incident = None
+ elif not incident:
+ # If we had a falsey not None value in the cache, then we stored that there
+ # are no current active incidents. Just set to None
+ incident = None
+
+ return incident
+
+ @classmethod
+ def clear_active_incident_cache(cls, instance, **kwargs):
+ for project in instance.projects.all():
+ cache.delete(cls._build_active_incident_cache_key(instance.alert_rule_id, project.id))
+ assert (
+ cache.get(cls._build_active_incident_cache_key(instance.alert_rule_id, project.id))
+ is None
+ )
+
+ @classmethod
+ def clear_active_incident_project_cache(cls, instance, **kwargs):
+ cache.delete(
+ cls._build_active_incident_cache_key(
+ instance.incident.alert_rule_id, instance.project_id
+ )
+ )
+ assert (
+ cache.get(
+ cls._build_active_incident_cache_key(
+ instance.incident.alert_rule_id, instance.project_id
+ )
+ )
+ is None
+ )
+
+ @TimedRetryPolicy.wrap(timeout=5, exceptions=(IntegrityError,))
+ def create(self, organization, **kwargs):
+ """
+ Creates an Incident. Fetches the maximum identifier value for the org
+ and increments it by one. If two incidents are created for the
+ Organization at the same time then an integrity error will be thrown,
+ and we'll retry again several times. I prefer to lock optimistically
+ here since if we're creating multiple Incidents a second for an
+ Organization then we're likely failing at making Incidents useful.
+ """
+ with transaction.atomic(router.db_for_write(Organization)):
+ result = self.filter(organization=organization).aggregate(models.Max("identifier"))
+ identifier = result["identifier__max"]
+ if identifier is None:
+ identifier = 1
+ else:
+ identifier += 1
+
+ return super().create(organization=organization, identifier=identifier, **kwargs)
+
+
+class IncidentType(Enum):
+ DETECTED = 0
+ ALERT_TRIGGERED = 2
+
+
+class IncidentStatus(Enum):
+ OPEN = 1
+ CLOSED = 2
+ WARNING = 10
+ CRITICAL = 20
+
+
+class IncidentStatusMethod(Enum):
+ MANUAL = 1
+ RULE_UPDATED = 2
+ RULE_TRIGGERED = 3
+
+
+INCIDENT_STATUS = {
+ IncidentStatus.OPEN: "Open",
+ IncidentStatus.CLOSED: "Resolved",
+ IncidentStatus.CRITICAL: "Critical",
+ IncidentStatus.WARNING: "Warning",
+}
+
+
+@region_silo_only_model
+class Incident(Model):
+ __relocation_scope__ = RelocationScope.Organization
+
+ objects: ClassVar[IncidentManager] = IncidentManager()
+
+ organization = FlexibleForeignKey("sentry.Organization")
+ projects = models.ManyToManyField(
+ "sentry.Project", related_name="incidents", through=IncidentProject
+ )
+ alert_rule = FlexibleForeignKey("sentry.AlertRule", on_delete=models.PROTECT)
+ # Incrementing id that is specific to the org.
+ identifier = models.IntegerField()
+ # Identifier used to match incoming events from the detection algorithm
+ detection_uuid = UUIDField(null=True, db_index=True)
+ status = models.PositiveSmallIntegerField(default=IncidentStatus.OPEN.value)
+ status_method = models.PositiveSmallIntegerField(
+ default=IncidentStatusMethod.RULE_TRIGGERED.value
+ )
+ type = models.PositiveSmallIntegerField()
+ title = models.TextField()
+ # When we suspect the incident actually started
+ date_started = models.DateTimeField(default=timezone.now)
+ # When we actually detected the incident
+ date_detected = models.DateTimeField(default=timezone.now)
+ date_added = models.DateTimeField(default=timezone.now)
+ date_closed = models.DateTimeField(null=True)
+
+ class Meta:
+ app_label = "sentry"
+ db_table = "sentry_incident"
+ unique_together = (("organization", "identifier"),)
+ indexes = (models.Index(fields=("alert_rule", "type", "status")),)
+
+ @property
+ def current_end_date(self):
+ """
+ Returns the current end of the incident. Either the date it was closed,
+ or the current time if it's still open.
+ """
+ return self.date_closed if self.date_closed else timezone.now()
+
+ @property
+ def duration(self):
+ return self.current_end_date - self.date_started
+
+ def normalize_before_relocation_import(
+ self, pk_map: PrimaryKeyMap, scope: ImportScope, flags: ImportFlags
+ ) -> int | None:
+ old_pk = super().normalize_before_relocation_import(pk_map, scope, flags)
+ if old_pk is None:
+ return None
+
+ # Generate a new UUID, if one exists.
+ if self.detection_uuid:
+ self.detection_uuid = uuid4()
+ return old_pk
+
+
+@region_silo_only_model
+class PendingIncidentSnapshot(Model):
+ __relocation_scope__ = RelocationScope.Organization
+
+ incident = OneToOneCascadeDeletes("sentry.Incident", db_constraint=False)
+ target_run_date = models.DateTimeField(db_index=True, default=timezone.now)
+ date_added = models.DateTimeField(default=timezone.now)
+
+ class Meta:
+ app_label = "sentry"
+ db_table = "sentry_pendingincidentsnapshot"
+
+
+@region_silo_only_model
+class IncidentSnapshot(Model):
+ __relocation_scope__ = RelocationScope.Organization
+
+ incident = OneToOneCascadeDeletes("sentry.Incident", db_constraint=False)
+ event_stats_snapshot = FlexibleForeignKey("sentry.TimeSeriesSnapshot", db_constraint=False)
+ unique_users = models.IntegerField()
+ total_events = models.IntegerField()
+ date_added = models.DateTimeField(default=timezone.now)
+
+ class Meta:
+ app_label = "sentry"
+ db_table = "sentry_incidentsnapshot"
+
+
+@region_silo_only_model
+class TimeSeriesSnapshot(Model):
+ __relocation_scope__ = RelocationScope.Organization
+ __relocation_dependencies__ = {"sentry.Incident"}
+
+ start = models.DateTimeField()
+ end = models.DateTimeField()
+ values = ArrayField(of=ArrayField(models.FloatField()))
+ period = models.IntegerField()
+ date_added = models.DateTimeField(default=timezone.now)
+
+ class Meta:
+ app_label = "sentry"
+ db_table = "sentry_timeseriessnapshot"
+
+ @classmethod
+ def query_for_relocation_export(cls, q: models.Q, pk_map: PrimaryKeyMap) -> models.Q:
+ pks = IncidentSnapshot.objects.filter(
+ incident__in=pk_map.get_pks(get_model_name(Incident))
+ ).values_list("event_stats_snapshot_id", flat=True)
+
+ return q & models.Q(pk__in=pks)
+
+
+class IncidentActivityType(Enum):
+ CREATED = 1
+ STATUS_CHANGE = 2
+ COMMENT = 3
+ DETECTED = 4
+
+
+@region_silo_only_model
+class IncidentActivity(Model):
+ __relocation_scope__ = RelocationScope.Organization
+
+ incident = FlexibleForeignKey("sentry.Incident")
+ user_id = HybridCloudForeignKey(settings.AUTH_USER_MODEL, on_delete="CASCADE", null=True)
+ type: models.Field = models.IntegerField()
+ value = models.TextField(null=True)
+ previous_value = models.TextField(null=True)
+ comment = models.TextField(null=True)
+ date_added = models.DateTimeField(default=timezone.now)
+ notification_uuid = models.UUIDField("notification_uuid", null=True)
+
+ class Meta:
+ app_label = "sentry"
+ db_table = "sentry_incidentactivity"
+
+ def normalize_before_relocation_import(
+ self, pk_map: PrimaryKeyMap, scope: ImportScope, flags: ImportFlags
+ ) -> int | None:
+ old_pk = super().normalize_before_relocation_import(pk_map, scope, flags)
+ if old_pk is None:
+ return None
+
+ # Generate a new UUID, if one exists.
+ if self.notification_uuid:
+ self.notification_uuid = uuid4()
+ return old_pk
+
+
+@region_silo_only_model
+class IncidentSubscription(Model):
+ __relocation_scope__ = RelocationScope.Organization
+
+ incident = FlexibleForeignKey("sentry.Incident", db_index=False)
+ user_id = HybridCloudForeignKey(settings.AUTH_USER_MODEL, on_delete="CASCADE")
+ date_added = models.DateTimeField(default=timezone.now)
+
+ class Meta:
+ app_label = "sentry"
+ db_table = "sentry_incidentsubscription"
+ unique_together = (("incident", "user_id"),)
+
+ __repr__ = sane_repr("incident_id", "user_id")
+
+
+class TriggerStatus(Enum):
+ ACTIVE = 0
+ RESOLVED = 1
+
+
+class IncidentTriggerManager(BaseManager["IncidentTrigger"]):
+ CACHE_KEY = "incident:triggers:%s"
+
+ @classmethod
+ def _build_cache_key(cls, incident_id):
+ return cls.CACHE_KEY % incident_id
+
+ def get_for_incident(self, incident):
+ """
+ Fetches the IncidentTriggers associated with an Incident. Attempts to fetch from
+ cache then hits the database.
+ """
+ cache_key = self._build_cache_key(incident.id)
+ triggers = cache.get(cache_key)
+ if triggers is None:
+ triggers = list(IncidentTrigger.objects.filter(incident=incident))
+ cache.set(cache_key, triggers, 3600)
+
+ return triggers
+
+ @classmethod
+ def clear_incident_cache(cls, instance, **kwargs):
+ cache.delete(cls._build_cache_key(instance.id))
+ assert cache.get(cls._build_cache_key(instance.id)) is None
+
+ @classmethod
+ def clear_incident_trigger_cache(cls, instance, **kwargs):
+ cache.delete(cls._build_cache_key(instance.incident_id))
+ assert cache.get(cls._build_cache_key(instance.incident_id)) is None
+
+
+@region_silo_only_model
+class IncidentTrigger(Model):
+ __relocation_scope__ = RelocationScope.Organization
+
+ objects: ClassVar[IncidentTriggerManager] = IncidentTriggerManager()
+
+ incident = FlexibleForeignKey("sentry.Incident", db_index=False)
+ alert_rule_trigger = FlexibleForeignKey("sentry.AlertRuleTrigger")
+ status = models.SmallIntegerField()
+ date_modified = models.DateTimeField(default=timezone.now, null=False)
+ date_added = models.DateTimeField(default=timezone.now)
+
+ class Meta:
+ app_label = "sentry"
+ db_table = "sentry_incidenttrigger"
+ unique_together = (("incident", "alert_rule_trigger"),)
+ indexes = (models.Index(fields=("alert_rule_trigger", "incident_id")),)
+
+
+post_save.connect(IncidentManager.clear_active_incident_cache, sender=Incident)
+post_save.connect(IncidentManager.clear_active_incident_project_cache, sender=IncidentProject)
+post_delete.connect(IncidentManager.clear_active_incident_project_cache, sender=IncidentProject)
+
+post_delete.connect(IncidentTriggerManager.clear_incident_cache, sender=Incident)
+post_save.connect(IncidentTriggerManager.clear_incident_trigger_cache, sender=IncidentTrigger)
+post_delete.connect(IncidentTriggerManager.clear_incident_trigger_cache, sender=IncidentTrigger)
diff --git a/src/sentry/incidents/receivers.py b/src/sentry/incidents/receivers.py
index a26024e8afeea7..de8c5e03703a51 100644
--- a/src/sentry/incidents/receivers.py
+++ b/src/sentry/incidents/receivers.py
@@ -3,7 +3,8 @@
from django.db.models.signals import post_save, pre_save
from django.dispatch import receiver
-from sentry.incidents.models import AlertRule, IncidentTrigger
+from sentry.incidents.models.alert_rule import AlertRule
+from sentry.incidents.models.incident import IncidentTrigger
from sentry.models.project import Project
diff --git a/src/sentry/incidents/serializers/__init__.py b/src/sentry/incidents/serializers/__init__.py
index d544785ec92df7..1f5aed9e2af469 100644
--- a/src/sentry/incidents/serializers/__init__.py
+++ b/src/sentry/incidents/serializers/__init__.py
@@ -1,4 +1,4 @@
-from sentry.incidents.models import AlertRuleTriggerAction
+from sentry.incidents.models.alert_rule import AlertRuleTriggerAction
from sentry.snuba.dataset import Dataset
from sentry.snuba.models import SnubaQuery, SnubaQueryEventType
diff --git a/src/sentry/incidents/serializers/alert_rule.py b/src/sentry/incidents/serializers/alert_rule.py
index 8d3d009fb87a4e..559cdd3269f23b 100644
--- a/src/sentry/incidents/serializers/alert_rule.py
+++ b/src/sentry/incidents/serializers/alert_rule.py
@@ -26,7 +26,12 @@
translate_aggregate_field,
update_alert_rule,
)
-from sentry.incidents.models import AlertRule, AlertRuleThresholdType, AlertRuleTrigger
+from sentry.incidents.models.alert_rule import (
+ AlertRule,
+ AlertRuleMonitorType,
+ AlertRuleThresholdType,
+ AlertRuleTrigger,
+)
from sentry.snuba.dataset import Dataset
from sentry.snuba.entity_subscription import (
ENTITY_TIME_COLUMNS,
@@ -196,6 +201,16 @@ def validate_threshold_type(self, threshold_type):
% [item.value for item in AlertRuleThresholdType]
)
+ def validate_monitor_type(self, monitor_type):
+ if monitor_type > 0 and not features.has(
+ "organizations:activated-alert-rules",
+ self.context["organization"],
+ actor=self.context.get("user", None),
+ ):
+ raise serializers.ValidationError("Invalid monitor type")
+
+ return AlertRuleMonitorType(monitor_type)
+
def validate(self, data):
"""
Performs validation on an alert rule's data.
diff --git a/src/sentry/incidents/serializers/alert_rule_trigger.py b/src/sentry/incidents/serializers/alert_rule_trigger.py
index c8820301190607..e6aa7d7fffad18 100644
--- a/src/sentry/incidents/serializers/alert_rule_trigger.py
+++ b/src/sentry/incidents/serializers/alert_rule_trigger.py
@@ -10,7 +10,7 @@
rewrite_trigger_action_fields,
update_alert_rule_trigger,
)
-from sentry.incidents.models import AlertRuleTrigger, AlertRuleTriggerAction
+from sentry.incidents.models.alert_rule import AlertRuleTrigger, AlertRuleTriggerAction
from .alert_rule_trigger_action import AlertRuleTriggerActionSerializer
diff --git a/src/sentry/incidents/serializers/alert_rule_trigger_action.py b/src/sentry/incidents/serializers/alert_rule_trigger_action.py
index 810852c293efaa..c326db9c97d47b 100644
--- a/src/sentry/incidents/serializers/alert_rule_trigger_action.py
+++ b/src/sentry/incidents/serializers/alert_rule_trigger_action.py
@@ -8,7 +8,7 @@
create_alert_rule_trigger_action,
update_alert_rule_trigger_action,
)
-from sentry.incidents.models import AlertRuleTriggerAction
+from sentry.incidents.models.alert_rule import AlertRuleTriggerAction
from sentry.incidents.serializers import (
ACTION_TARGET_TYPE_TO_STRING,
STRING_TO_ACTION_TARGET_TYPE,
diff --git a/src/sentry/incidents/subscription_processor.py b/src/sentry/incidents/subscription_processor.py
index 70b7fb531091d4..9024252512b0cd 100644
--- a/src/sentry/incidents/subscription_processor.py
+++ b/src/sentry/incidents/subscription_processor.py
@@ -22,11 +22,14 @@
deduplicate_trigger_actions,
update_incident_status,
)
-from sentry.incidents.models import (
+from sentry.incidents.models.alert_rule import (
AlertRule,
AlertRuleMonitorType,
AlertRuleThresholdType,
AlertRuleTrigger,
+ invoke_alert_subscription_callback,
+)
+from sentry.incidents.models.incident import (
Incident,
IncidentActivity,
IncidentStatus,
@@ -34,7 +37,6 @@
IncidentTrigger,
IncidentType,
TriggerStatus,
- invoke_alert_subscription_callback,
)
from sentry.incidents.tasks import handle_trigger_action
from sentry.incidents.utils.types import QuerySubscriptionUpdate
diff --git a/src/sentry/incidents/tasks.py b/src/sentry/incidents/tasks.py
index 148ad317ddcb1c..6cfe422c3fc00e 100644
--- a/src/sentry/incidents/tasks.py
+++ b/src/sentry/incidents/tasks.py
@@ -7,10 +7,9 @@
from django.urls import reverse
from sentry.auth.access import from_user
-from sentry.incidents.models import (
+from sentry.incidents.models.alert_rule import AlertRuleStatus, AlertRuleTriggerAction
+from sentry.incidents.models.incident import (
INCIDENT_STATUS,
- AlertRuleStatus,
- AlertRuleTriggerAction,
Incident,
IncidentActivity,
IncidentActivityType,
@@ -240,7 +239,7 @@ def handle_trigger_action(
)
def auto_resolve_snapshot_incidents(alert_rule_id: int, **kwargs: Any) -> None:
from sentry.incidents.logic import update_incident_status
- from sentry.incidents.models import AlertRule
+ from sentry.incidents.models.alert_rule import AlertRule
try:
alert_rule = AlertRule.objects_with_snapshots.get(id=alert_rule_id)
diff --git a/src/sentry/incidents/utils/sentry_apps.py b/src/sentry/incidents/utils/sentry_apps.py
index 3eeefbb6093a56..d4b6cd175fcb3c 100644
--- a/src/sentry/incidents/utils/sentry_apps.py
+++ b/src/sentry/incidents/utils/sentry_apps.py
@@ -5,7 +5,7 @@
from sentry.auth.access import NoAccess
from sentry.incidents.logic import get_filtered_actions
-from sentry.incidents.models import AlertRuleTriggerAction
+from sentry.incidents.models.alert_rule import AlertRuleTriggerAction
from sentry.incidents.serializers import AlertRuleTriggerActionSerializer
from sentry.services.hybrid_cloud.app import app_service
diff --git a/src/sentry/ingest/types.py b/src/sentry/ingest/types.py
index f0dd0c6ad36bf5..0f07507dd8ef43 100644
--- a/src/sentry/ingest/types.py
+++ b/src/sentry/ingest/types.py
@@ -6,19 +6,3 @@ class ConsumerType:
Events = "events" # consumes simple events ( from the Events topic)
Attachments = "attachments" # consumes events with attachments ( from the Attachments topic)
Transactions = "transactions" # consumes transaction events ( from the Transactions topic)
-
- @staticmethod
- def all():
- return (ConsumerType.Events, ConsumerType.Attachments, ConsumerType.Transactions)
-
- @staticmethod
- def get_topic_name(consumer_type):
- from django.conf import settings
-
- if consumer_type == ConsumerType.Events:
- return settings.KAFKA_INGEST_EVENTS
- elif consumer_type == ConsumerType.Attachments:
- return settings.KAFKA_INGEST_ATTACHMENTS
- elif consumer_type == ConsumerType.Transactions:
- return settings.KAFKA_INGEST_TRANSACTIONS
- raise ValueError("Invalid consumer type", consumer_type)
diff --git a/src/sentry/integrations/bitbucket/uninstalled.py b/src/sentry/integrations/bitbucket/uninstalled.py
index 90ce7563e513c7..071e1faed76df0 100644
--- a/src/sentry/integrations/bitbucket/uninstalled.py
+++ b/src/sentry/integrations/bitbucket/uninstalled.py
@@ -8,9 +8,8 @@
from sentry.constants import ObjectStatus
from sentry.integrations.utils import AtlassianConnectValidationError, get_integration_from_jwt
from sentry.models.integrations.integration import Integration
-from sentry.models.organization import Organization
-from sentry.models.repository import Repository
from sentry.services.hybrid_cloud.integration import integration_service
+from sentry.services.hybrid_cloud.repository import repository_service
@control_silo_endpoint
@@ -44,15 +43,12 @@ def post(self, request: Request, *args, **kwargs) -> Response:
org_integrations = integration_service.get_organization_integrations(
integration_id=integration.id
)
- organizations = Organization.objects.filter(
- id__in=[oi.organization_id for oi in org_integrations]
- )
- # TODO: Replace with repository_service; support status write
- Repository.objects.filter(
- organization_id__in=organizations.values_list("id", flat=True),
- provider="integrations:bitbucket",
- integration_id=integration.id,
- ).update(status=ObjectStatus.DISABLED)
+ for oi in org_integrations:
+ repository_service.disable_repositories_for_integration(
+ organization_id=oi.organization_id,
+ integration_id=integration.id,
+ provider="integrations:bitbucket",
+ )
return self.respond()
diff --git a/src/sentry/integrations/discord/actions/metric_alert.py b/src/sentry/integrations/discord/actions/metric_alert.py
index 44d5c7816bb643..a4578a84c411cf 100644
--- a/src/sentry/integrations/discord/actions/metric_alert.py
+++ b/src/sentry/integrations/discord/actions/metric_alert.py
@@ -4,7 +4,8 @@
from sentry import features
from sentry.incidents.charts import build_metric_alert_chart
-from sentry.incidents.models import AlertRuleTriggerAction, Incident, IncidentStatus
+from sentry.incidents.models.alert_rule import AlertRuleTriggerAction
+from sentry.incidents.models.incident import Incident, IncidentStatus
from sentry.integrations.discord.client import DiscordClient
from sentry.integrations.discord.message_builder.metric_alerts import (
DiscordMetricAlertMessageBuilder,
diff --git a/src/sentry/integrations/discord/message_builder/metric_alerts.py b/src/sentry/integrations/discord/message_builder/metric_alerts.py
index 653b036dfa31c7..dc43aeac8cbeaa 100644
--- a/src/sentry/integrations/discord/message_builder/metric_alerts.py
+++ b/src/sentry/integrations/discord/message_builder/metric_alerts.py
@@ -3,7 +3,8 @@
import time
from datetime import datetime
-from sentry.incidents.models import AlertRule, Incident, IncidentStatus
+from sentry.incidents.models.alert_rule import AlertRule
+from sentry.incidents.models.incident import Incident, IncidentStatus
from sentry.integrations.discord.message_builder import INCIDENT_COLOR_MAPPING, LEVEL_TO_COLOR
from sentry.integrations.discord.message_builder.base.base import DiscordMessageBuilder
from sentry.integrations.discord.message_builder.base.embed.base import DiscordMessageEmbed
diff --git a/src/sentry/integrations/metric_alerts.py b/src/sentry/integrations/metric_alerts.py
index 618612d0f7fbed..cc628c6a042d8b 100644
--- a/src/sentry/integrations/metric_alerts.py
+++ b/src/sentry/integrations/metric_alerts.py
@@ -7,10 +7,9 @@
from sentry.constants import CRASH_RATE_ALERT_AGGREGATE_ALIAS
from sentry.incidents.logic import get_incident_aggregates
-from sentry.incidents.models import (
+from sentry.incidents.models.alert_rule import AlertRule, AlertRuleThresholdType
+from sentry.incidents.models.incident import (
INCIDENT_STATUS,
- AlertRule,
- AlertRuleThresholdType,
Incident,
IncidentStatus,
IncidentTrigger,
diff --git a/src/sentry/integrations/msteams/card_builder/incident_attachment.py b/src/sentry/integrations/msteams/card_builder/incident_attachment.py
index 9ab336eb2582d3..72e6d0c67c1bf8 100644
--- a/src/sentry/integrations/msteams/card_builder/incident_attachment.py
+++ b/src/sentry/integrations/msteams/card_builder/incident_attachment.py
@@ -2,7 +2,7 @@
from typing import Literal
-from sentry.incidents.models import Incident, IncidentStatus
+from sentry.incidents.models.incident import Incident, IncidentStatus
from sentry.integrations.metric_alerts import incident_attachment_info
from sentry.integrations.msteams.card_builder.block import (
AdaptiveCard,
diff --git a/src/sentry/integrations/msteams/utils.py b/src/sentry/integrations/msteams/utils.py
index c1a3f64ae05b84..b88de0a990b39c 100644
--- a/src/sentry/integrations/msteams/utils.py
+++ b/src/sentry/integrations/msteams/utils.py
@@ -3,7 +3,8 @@
import enum
import logging
-from sentry.incidents.models import AlertRuleTriggerAction, Incident, IncidentStatus
+from sentry.incidents.models.alert_rule import AlertRuleTriggerAction
+from sentry.incidents.models.incident import Incident, IncidentStatus
from sentry.models.integrations.integration import Integration
from sentry.services.hybrid_cloud.integration import integration_service
diff --git a/src/sentry/integrations/opsgenie/utils.py b/src/sentry/integrations/opsgenie/utils.py
index 189ba1cf309fae..06c031c1dbd31f 100644
--- a/src/sentry/integrations/opsgenie/utils.py
+++ b/src/sentry/integrations/opsgenie/utils.py
@@ -4,7 +4,8 @@
from typing import Any, cast
from sentry.constants import ObjectStatus
-from sentry.incidents.models import AlertRuleTriggerAction, Incident, IncidentStatus
+from sentry.incidents.models.alert_rule import AlertRuleTriggerAction
+from sentry.incidents.models.incident import Incident, IncidentStatus
from sentry.integrations.metric_alerts import incident_attachment_info
from sentry.services.hybrid_cloud.integration import integration_service
from sentry.services.hybrid_cloud.integration.model import RpcOrganizationIntegration
diff --git a/src/sentry/integrations/pagerduty/utils.py b/src/sentry/integrations/pagerduty/utils.py
index 1b7a853d7fcd7c..91a403802a095e 100644
--- a/src/sentry/integrations/pagerduty/utils.py
+++ b/src/sentry/integrations/pagerduty/utils.py
@@ -6,7 +6,8 @@
from django.db import router, transaction
from django.http import Http404
-from sentry.incidents.models import AlertRuleTriggerAction, Incident, IncidentStatus
+from sentry.incidents.models.alert_rule import AlertRuleTriggerAction
+from sentry.incidents.models.incident import Incident, IncidentStatus
from sentry.integrations.metric_alerts import incident_attachment_info
from sentry.models.integrations.organization_integration import OrganizationIntegration
from sentry.services.hybrid_cloud.integration import integration_service
diff --git a/src/sentry/integrations/repository/metric_alert.py b/src/sentry/integrations/repository/metric_alert.py
index 21bd2b7339680d..2c446687339a90 100644
--- a/src/sentry/integrations/repository/metric_alert.py
+++ b/src/sentry/integrations/repository/metric_alert.py
@@ -3,7 +3,8 @@
from dataclasses import dataclass
from logging import Logger, getLogger
-from sentry.incidents.models import AlertRuleTriggerAction, Incident
+from sentry.incidents.models.alert_rule import AlertRuleTriggerAction
+from sentry.incidents.models.incident import Incident
from sentry.integrations.repository.base import BaseNewNotificationMessage, BaseNotificationMessage
from sentry.models.notificationmessage import NotificationMessage
@@ -22,9 +23,11 @@ def from_model(cls, instance: NotificationMessage) -> MetricAlertNotificationMes
error_code=instance.error_code,
error_details=instance.error_details,
message_identifier=instance.message_identifier,
- parent_notification_message_id=instance.parent_notification_message.id
- if instance.parent_notification_message
- else None,
+ parent_notification_message_id=(
+ instance.parent_notification_message.id
+ if instance.parent_notification_message
+ else None
+ ),
incident=instance.incident,
trigger_action=instance.trigger_action,
date_added=instance.date_added,
diff --git a/src/sentry/integrations/slack/message_builder/incidents.py b/src/sentry/integrations/slack/message_builder/incidents.py
index bf59b5a0bd1018..e8e37ea400742e 100644
--- a/src/sentry/integrations/slack/message_builder/incidents.py
+++ b/src/sentry/integrations/slack/message_builder/incidents.py
@@ -1,6 +1,6 @@
from datetime import datetime
-from sentry.incidents.models import Incident, IncidentStatus
+from sentry.incidents.models.incident import Incident, IncidentStatus
from sentry.integrations.metric_alerts import incident_attachment_info
from sentry.integrations.slack.message_builder import (
INCIDENT_COLOR_MAPPING,
diff --git a/src/sentry/integrations/slack/message_builder/metric_alerts.py b/src/sentry/integrations/slack/message_builder/metric_alerts.py
index 6f4566ab4886fe..3ea351d89981a2 100644
--- a/src/sentry/integrations/slack/message_builder/metric_alerts.py
+++ b/src/sentry/integrations/slack/message_builder/metric_alerts.py
@@ -1,4 +1,5 @@
-from sentry.incidents.models import AlertRule, Incident, IncidentStatus
+from sentry.incidents.models.alert_rule import AlertRule
+from sentry.incidents.models.incident import Incident, IncidentStatus
from sentry.integrations.metric_alerts import metric_alert_attachment_info
from sentry.integrations.slack.message_builder import (
INCIDENT_COLOR_MAPPING,
diff --git a/src/sentry/integrations/slack/unfurl/metric_alerts.py b/src/sentry/integrations/slack/unfurl/metric_alerts.py
index 166c268421ef2f..460d0df36b13a7 100644
--- a/src/sentry/integrations/slack/unfurl/metric_alerts.py
+++ b/src/sentry/integrations/slack/unfurl/metric_alerts.py
@@ -12,7 +12,8 @@
from sentry import features
from sentry.incidents.charts import build_metric_alert_chart
-from sentry.incidents.models import AlertRule, Incident
+from sentry.incidents.models.alert_rule import AlertRule
+from sentry.incidents.models.incident import Incident
from sentry.integrations.slack.message_builder.metric_alerts import SlackMetricAlertMessageBuilder
from sentry.models.integrations.integration import Integration
from sentry.models.organization import Organization
diff --git a/src/sentry/integrations/slack/utils/notifications.py b/src/sentry/integrations/slack/utils/notifications.py
index 62f624f68f35d4..dc87acf545f6eb 100644
--- a/src/sentry/integrations/slack/utils/notifications.py
+++ b/src/sentry/integrations/slack/utils/notifications.py
@@ -8,7 +8,8 @@
from sentry import features
from sentry.constants import ObjectStatus
from sentry.incidents.charts import build_metric_alert_chart
-from sentry.incidents.models import AlertRuleTriggerAction, Incident, IncidentStatus
+from sentry.incidents.models.alert_rule import AlertRuleTriggerAction
+from sentry.incidents.models.incident import Incident, IncidentStatus
from sentry.integrations.repository import get_default_metric_alert_repository
from sentry.integrations.repository.metric_alert import (
MetricAlertNotificationMessageRepository,
diff --git a/src/sentry/integrations/slack/webhooks/action.py b/src/sentry/integrations/slack/webhooks/action.py
index 8dffd76b55c058..b258439d3ad657 100644
--- a/src/sentry/integrations/slack/webhooks/action.py
+++ b/src/sentry/integrations/slack/webhooks/action.py
@@ -776,7 +776,7 @@ def post(self, request: Request) -> Response:
use_block_kit = False
if len(org_integrations):
org_context = organization_service.get_organization_by_id(
- id=org_integrations[0].organization_id
+ id=org_integrations[0].organization_id, include_projects=False, include_teams=False
)
if org_context:
use_block_kit = any(
diff --git a/src/sentry/integrations/slack/webhooks/event.py b/src/sentry/integrations/slack/webhooks/event.py
index 2d41db1f02fcf8..3bd70488286be6 100644
--- a/src/sentry/integrations/slack/webhooks/event.py
+++ b/src/sentry/integrations/slack/webhooks/event.py
@@ -133,7 +133,9 @@ def on_link_shared(self, request: Request, slack_request: SlackDMRequest) -> boo
)
organization_id = ois[0].organization_id if len(ois) > 0 else None
organization_context = (
- organization_service.get_organization_by_id(id=organization_id, user_id=None)
+ organization_service.get_organization_by_id(
+ id=organization_id, user_id=None, include_projects=False, include_teams=False
+ )
if organization_id
else None
)
diff --git a/src/sentry/integrations/utils/scope.py b/src/sentry/integrations/utils/scope.py
index aa8a1d4dcfe9aa..54b54609d51d02 100644
--- a/src/sentry/integrations/utils/scope.py
+++ b/src/sentry/integrations/utils/scope.py
@@ -89,7 +89,9 @@ def bind_org_context_from_integration(
check_tag_for_scope_bleed("integration_id", integration_id, add_to_scope=False)
elif len(org_integrations) == 1:
org_integration = org_integrations[0]
- org = organization_service.get_organization_by_id(id=org_integration.organization_id)
+ org = organization_service.get_organization_by_id(
+ id=org_integration.organization_id, include_teams=False, include_projects=False
+ )
if org is not None:
bind_organization_context(org.organization)
else:
diff --git a/src/sentry/issues/attributes.py b/src/sentry/issues/attributes.py
index 5e0e699662f779..9b15f831558564 100644
--- a/src/sentry/issues/attributes.py
+++ b/src/sentry/issues/attributes.py
@@ -6,7 +6,7 @@
import requests
import urllib3
-from arroyo import Topic
+from arroyo import Topic as ArroyoTopic
from arroyo.backends.kafka import KafkaPayload, KafkaProducer, build_kafka_configuration
from django.conf import settings
from django.db.models import F, Window
@@ -16,6 +16,7 @@
from sentry_kafka_schemas.schema_types.group_attributes_v1 import GroupAttributesSnapshot
from sentry import options
+from sentry.conf.types.kafka_definition import Topic
from sentry.models.group import Group
from sentry.models.groupassignee import GroupAssignee
from sentry.models.groupowner import GroupOwner, GroupOwnerType
@@ -44,7 +45,7 @@ class GroupValues:
def _get_attribute_snapshot_producer() -> KafkaProducer:
- cluster_name = get_topic_definition(settings.KAFKA_GROUP_ATTRIBUTES)["cluster"]
+ cluster_name = get_topic_definition(Topic.GROUP_ATTRIBUTES)["cluster"]
producer_config = get_kafka_producer_cluster_options(cluster_name)
producer_config.pop("compression.type", None)
producer_config.pop("message.max.bytes", None)
@@ -122,7 +123,7 @@ def produce_snapshot_to_kafka(snapshot: GroupAttributesSnapshot) -> None:
raise snuba.SnubaError(err)
else:
payload = KafkaPayload(None, json.dumps(snapshot).encode("utf-8"), [])
- _attribute_snapshot_producer.produce(Topic(settings.KAFKA_GROUP_ATTRIBUTES), payload)
+ _attribute_snapshot_producer.produce(ArroyoTopic(settings.KAFKA_GROUP_ATTRIBUTES), payload)
def _retrieve_group_values(group_id: int) -> GroupValues:
diff --git a/src/sentry/issues/occurrence_consumer.py b/src/sentry/issues/occurrence_consumer.py
index e9699900c3bbe9..e11bbc35c7f81f 100644
--- a/src/sentry/issues/occurrence_consumer.py
+++ b/src/sentry/issues/occurrence_consumer.py
@@ -8,7 +8,7 @@
import jsonschema
import sentry_sdk
from django.utils import timezone
-from sentry_sdk.tracing import NoOpSpan, Transaction
+from sentry_sdk.tracing import NoOpSpan, Span, Transaction
from sentry import nodestore
from sentry.event_manager import GroupInfo
@@ -52,7 +52,7 @@ def save_event_from_occurrence(
def lookup_event(project_id: int, event_id: str) -> Event:
- data = nodestore.get(Event.generate_node_id(project_id, event_id))
+ data = nodestore.backend.get(Event.generate_node_id(project_id, event_id))
if data is None:
raise EventLookupError(f"Failed to lookup event({event_id}) for project_id({project_id})")
event = Event(event_id=event_id, project_id=project_id)
@@ -214,8 +214,8 @@ def _get_kwargs(payload: Mapping[str, Any]) -> Mapping[str, Any]:
def process_occurrence_message(
- message: Mapping[str, Any], txn: Transaction | NoOpSpan
-) -> tuple[IssueOccurrence, GroupInfo | None]:
+ message: Mapping[str, Any], txn: Transaction | NoOpSpan | Span
+) -> tuple[IssueOccurrence, GroupInfo | None] | None:
with metrics.timer("occurrence_consumer._process_message._get_kwargs"):
kwargs = _get_kwargs(message)
occurrence_data = kwargs["occurrence_data"]
@@ -260,7 +260,9 @@ def process_occurrence_message(
return lookup_event_and_process_issue_occurrence(kwargs["occurrence_data"])
-def _process_message(message: Mapping[str, Any]) -> tuple[IssueOccurrence, GroupInfo | None] | None:
+def _process_message(
+ message: Mapping[str, Any]
+) -> tuple[IssueOccurrence | None, GroupInfo | None] | None:
"""
:raises InvalidEventPayloadError: when the message is invalid
:raises EventLookupError: when the provided event_id in the message couldn't be found.
@@ -275,6 +277,9 @@ def _process_message(message: Mapping[str, Any]) -> tuple[IssueOccurrence, Group
payload_type = message.get("payload_type", PayloadType.OCCURRENCE.value)
if payload_type == PayloadType.STATUS_CHANGE.value:
group = process_status_change_message(message, txn)
+ if not group:
+ return None
+
return None, GroupInfo(group=group, is_new=False, is_regression=False)
elif payload_type == PayloadType.OCCURRENCE.value:
return process_occurrence_message(message, txn)
@@ -287,4 +292,4 @@ def _process_message(message: Mapping[str, Any]) -> tuple[IssueOccurrence, Group
except (ValueError, KeyError) as e:
txn.set_tag("result", "error")
raise InvalidEventPayloadError(e)
- return
+ return None
diff --git a/src/sentry/issues/producer.py b/src/sentry/issues/producer.py
index 5acfef85adcf18..3933af8cc19ae0 100644
--- a/src/sentry/issues/producer.py
+++ b/src/sentry/issues/producer.py
@@ -1,14 +1,17 @@
from __future__ import annotations
import logging
+import sys
from collections.abc import MutableMapping
from typing import Any, cast
-from arroyo import Topic
+from arroyo import Topic as ArroyoTopic
from arroyo.backends.kafka import KafkaPayload, KafkaProducer, build_kafka_configuration
from arroyo.types import Message, Value
+from confluent_kafka import KafkaException
from django.conf import settings
+from sentry.conf.types.kafka_definition import Topic
from sentry.issues.issue_occurrence import IssueOccurrence
from sentry.issues.run import process_message
from sentry.issues.status_change_message import StatusChangeMessage
@@ -33,7 +36,7 @@ class PayloadType(ValueEqualityEnum):
def _get_occurrence_producer() -> KafkaProducer:
- cluster_name = get_topic_definition(settings.KAFKA_INGEST_OCCURRENCES)["cluster"]
+ cluster_name = get_topic_definition(Topic.INGEST_OCCURRENCES)["cluster"]
producer_config = get_kafka_producer_cluster_options(cluster_name)
producer_config.pop("compression.type", None)
producer_config.pop("message.max.bytes", None)
@@ -68,7 +71,17 @@ def produce_occurrence_to_kafka(
process_message(Message(Value(payload=payload, committable={})))
return
- _occurrence_producer.produce(Topic(settings.KAFKA_INGEST_OCCURRENCES), payload)
+ try:
+ _occurrence_producer.produce(ArroyoTopic(settings.KAFKA_INGEST_OCCURRENCES), payload)
+ except KafkaException:
+ logger.exception(
+ "Failed to send occurrence to issue platform",
+ extra={
+ "total_payload_size": sys.getsizeof(payload),
+ "total_payload_data_size": sys.getsizeof(payload_data),
+ "payload_data_key_sizes": {k: sys.getsizeof(v) for k, v in payload_data.items()},
+ },
+ )
def _prepare_occurrence_message(
diff --git a/src/sentry/issues/status_change.py b/src/sentry/issues/status_change.py
index 3f2b669a01353d..20595ab88cd651 100644
--- a/src/sentry/issues/status_change.py
+++ b/src/sentry/issues/status_change.py
@@ -31,18 +31,21 @@ def handle_status_update(
is_bulk: bool,
status_details: dict[str, Any],
acting_user: User | None,
- activity_type: str | None,
sender: Any,
) -> ActivityInfo:
"""
Update the status for a list of groups and create entries for Activity and GroupHistory.
+ This currently handles unresolving or ignoring groups.
Returns a tuple of (activity_type, activity_data) for the activity that was created.
"""
activity_data = {}
+ activity_type = (
+ ActivityType.SET_IGNORED.value
+ if new_status == GroupStatus.IGNORED
+ else ActivityType.SET_UNRESOLVED.value
+ )
if new_status == GroupStatus.UNRESOLVED:
- activity_type = ActivityType.SET_UNRESOLVED.value
-
for group in group_list:
if group.status == GroupStatus.IGNORED:
issue_unignored.send_robust(
@@ -64,7 +67,6 @@ def handle_status_update(
ignore_duration = (
status_details.pop("ignoreDuration", None) or status_details.pop("snoozeDuration", None)
) or None
- activity_type = ActivityType.SET_IGNORED.value
activity_data = {
"ignoreCount": status_details.get("ignoreCount", None),
"ignoreDuration": ignore_duration,
diff --git a/src/sentry/issues/status_change_consumer.py b/src/sentry/issues/status_change_consumer.py
index f5606f4d4d998b..0b2230ca1f67ff 100644
--- a/src/sentry/issues/status_change_consumer.py
+++ b/src/sentry/issues/status_change_consumer.py
@@ -5,7 +5,7 @@
from collections.abc import Iterable, Mapping, Sequence
from typing import Any
-from sentry_sdk.tracing import NoOpSpan, Transaction
+from sentry_sdk.tracing import NoOpSpan, Span, Transaction
from sentry.issues.escalating import manage_issue_states
from sentry.issues.status_change_message import StatusChangeMessageData
@@ -174,7 +174,7 @@ def _get_status_change_kwargs(payload: Mapping[str, Any]) -> Mapping[str, Any]:
def process_status_change_message(
- message: Mapping[str, Any], txn: Transaction | NoOpSpan
+ message: Mapping[str, Any], txn: Transaction | NoOpSpan | Span
) -> Group | None:
with metrics.timer("occurrence_consumer._process_message.status_change._get_kwargs"):
kwargs = _get_status_change_kwargs(message)
diff --git a/src/sentry/lang/native/symbolicator.py b/src/sentry/lang/native/symbolicator.py
index 6915afb5867f52..cc668e112fcda5 100644
--- a/src/sentry/lang/native/symbolicator.py
+++ b/src/sentry/lang/native/symbolicator.py
@@ -16,6 +16,7 @@
from sentry import options
from sentry.lang.native.sources import (
get_internal_artifact_lookup_source,
+ get_internal_source,
get_scraping_config,
sources_for_symbolication,
)
@@ -150,7 +151,10 @@ def process_minidump(self, minidump):
}
res = self._process(
- "process_minidump", "minidump", data=data, files={"upload_file_minidump": minidump}
+ "process_minidump",
+ "minidump",
+ data=data,
+ files={"upload_file_minidump": minidump},
)
return process_response(res)
@@ -176,7 +180,10 @@ def process_payload(self, stacktraces, modules, signal=None, apply_source_contex
scraping_config = get_scraping_config(self.project)
json = {
"sources": sources,
- "options": {"dif_candidates": True, "apply_source_context": apply_source_context},
+ "options": {
+ "dif_candidates": True,
+ "apply_source_context": apply_source_context,
+ },
"stacktraces": stacktraces,
"modules": modules,
"scraping": scraping_config,
@@ -207,6 +214,39 @@ def process_js(self, stacktraces, modules, release, dist, apply_source_context=T
return self._process("symbolicate_js_stacktraces", "symbolicate-js", json=json)
+ def process_jvm(
+ self,
+ exceptions,
+ stacktraces,
+ modules,
+ release_package,
+ apply_source_context=True,
+ ):
+ """
+ Process a JVM event by remapping its frames and exceptions with
+ ProGuard.
+
+ :param exceptions: The event's exceptions. These must contain a `type` and a `module`.
+ :param stacktraces: The event's stacktraces. Frames must contain a `function` and a `module`.
+ :param modules: ProGuard modules to use for deobfuscation. They must contain a `uuid`.
+ :param release_package: The name of the release's package. This is optional.
+ :param apply_source_context: Whether to add source context to frames.
+ """
+ source = get_internal_source(self.project)
+
+ json = {
+ "sources": [source],
+ "exceptions": exceptions,
+ "stacktraces": stacktraces,
+ "modules": modules,
+ "options": {"apply_source_context": apply_source_context},
+ }
+
+ if release_package is not None:
+ json["release_package"] = release_package
+
+ return self._process("symbolicate_jvm_stacktraces", "symbolicate-jvm", json=json)
+
class TaskIdNotFound(Exception):
pass
diff --git a/src/sentry/migrations/0001_squashed_0484_break_org_member_user_fk.py b/src/sentry/migrations/0001_squashed_0484_break_org_member_user_fk.py
index 70454ff2713be0..f0f9eded8e110b 100644
--- a/src/sentry/migrations/0001_squashed_0484_break_org_member_user_fk.py
+++ b/src/sentry/migrations/0001_squashed_0484_break_org_member_user_fk.py
@@ -9158,7 +9158,9 @@ class Migration(CheckedMigration):
(
"environment",
sentry.db.models.fields.foreignkey.FlexibleForeignKey(
- on_delete=django.db.models.deletion.CASCADE, to="sentry.Environment"
+ on_delete=django.db.models.deletion.CASCADE,
+ to="sentry.Environment",
+ db_constraint=False,
),
),
(
diff --git a/src/sentry/migrations/0361_monitor_environment.py b/src/sentry/migrations/0361_monitor_environment.py
index c5bb4e0ea00c29..829c4cee7fe621 100644
--- a/src/sentry/migrations/0361_monitor_environment.py
+++ b/src/sentry/migrations/0361_monitor_environment.py
@@ -43,7 +43,9 @@ class Migration(CheckedMigration):
(
"environment",
sentry.db.models.fields.foreignkey.FlexibleForeignKey(
- on_delete=django.db.models.deletion.CASCADE, to="sentry.Environment"
+ on_delete=django.db.models.deletion.CASCADE,
+ to="sentry.Environment",
+ db_constraint=False,
),
),
(
diff --git a/src/sentry/migrations/0651_enable_activated_alert_rules.py b/src/sentry/migrations/0651_enable_activated_alert_rules.py
index 04968bb479fcda..043020cb96ab70 100644
--- a/src/sentry/migrations/0651_enable_activated_alert_rules.py
+++ b/src/sentry/migrations/0651_enable_activated_alert_rules.py
@@ -6,7 +6,7 @@
import sentry.db.models.fields.bounded
import sentry.db.models.fields.foreignkey
-import sentry.incidents.models
+import sentry.incidents.models.alert_rule
from sentry.new_migrations.migrations import CheckedMigration
@@ -45,7 +45,7 @@ class Migration(CheckedMigration):
model_name="alertrule",
name="monitor_type",
field=models.IntegerField(
- default=sentry.incidents.models.AlertRuleMonitorType.CONTINUOUS.value
+ default=sentry.incidents.models.alert_rule.AlertRuleMonitorType.CONTINUOUS.value
),
),
],
diff --git a/src/sentry/migrations/0660_fix_cron_monitor_invalid_orgs.py b/src/sentry/migrations/0660_fix_cron_monitor_invalid_orgs.py
index b7b49e554bf5b0..a23e9a672ef6a1 100644
--- a/src/sentry/migrations/0660_fix_cron_monitor_invalid_orgs.py
+++ b/src/sentry/migrations/0660_fix_cron_monitor_invalid_orgs.py
@@ -17,8 +17,13 @@ def fix_cron_monitor_invalid_orgs(apps, schema_editor) -> None:
continue
if project.organization_id != monitor.organization_id:
- monitor.organization_id = project.organization_id
- monitor.save(update_fields=["organization_id"])
+ if Monitor.objects.filter(organization_id=project.organization_id, slug=monitor.slug):
+ # There are a small number of these and due to the way ingest works they can't
+ # receive checkins, so they're totally broken. Just delete.
+ monitor.delete()
+ else:
+ monitor.organization_id = project.organization_id
+ monitor.save(update_fields=["organization_id", "slug"])
class Migration(CheckedMigration):
diff --git a/src/sentry/migrations/0661_artifactbundleindex_cleanup_step2.py b/src/sentry/migrations/0661_artifactbundleindex_cleanup_step2.py
new file mode 100644
index 00000000000000..e5c5f50d959fe4
--- /dev/null
+++ b/src/sentry/migrations/0661_artifactbundleindex_cleanup_step2.py
@@ -0,0 +1,61 @@
+# Generated by Django 5.0.2 on 2024-03-04 10:43
+
+from django.db import migrations
+
+from sentry.new_migrations.migrations import CheckedMigration
+
+
+class Migration(CheckedMigration):
+ # This flag is used to mark that a migration shouldn't be automatically run in production. For
+ # the most part, this should only be used for operations where it's safe to run the migration
+ # after your code has deployed. So this should not be used for most operations that alter the
+ # schema of a table.
+ # Here are some things that make sense to mark as dangerous:
+ # - Large data migrations. Typically we want these to be run manually by ops so that they can
+ # be monitored and not block the deploy for a long period of time while they run.
+ # - Adding indexes to large tables. Since this can take a long time, we'd generally prefer to
+ # have ops run this and not block the deploy. Note that while adding an index is a schema
+ # change, it's completely safe to run the operation after the code has deployed.
+ is_dangerous = False
+
+ dependencies = [
+ ("sentry", "0660_fix_cron_monitor_invalid_orgs"),
+ ]
+
+ operations = [
+ migrations.SeparateDatabaseAndState(
+ database_operations=[],
+ state_operations=[
+ migrations.RemoveField(
+ model_name="flatfileindexstate",
+ name="flat_file_index",
+ ),
+ migrations.AlterUniqueTogether(
+ name="flatfileindexstate",
+ unique_together=None,
+ ),
+ migrations.RemoveField(
+ model_name="flatfileindexstate",
+ name="artifact_bundle",
+ ),
+ migrations.RemoveField(
+ model_name="artifactbundleindex",
+ name="date_last_modified",
+ ),
+ migrations.RemoveField(
+ model_name="artifactbundleindex",
+ name="dist_name",
+ ),
+ migrations.RemoveField(
+ model_name="artifactbundleindex",
+ name="release_name",
+ ),
+ migrations.DeleteModel(
+ name="ArtifactBundleFlatFileIndex",
+ ),
+ migrations.DeleteModel(
+ name="FlatFileIndexState",
+ ),
+ ],
+ )
+ ]
diff --git a/src/sentry/migrations/0662_monitor_drop_last_state_change.py b/src/sentry/migrations/0662_monitor_drop_last_state_change.py
new file mode 100644
index 00000000000000..46974b99824d80
--- /dev/null
+++ b/src/sentry/migrations/0662_monitor_drop_last_state_change.py
@@ -0,0 +1,32 @@
+# Generated by Django 5.0.2 on 2024-03-05 21:47
+
+from django.db import migrations
+
+from sentry.new_migrations.migrations import CheckedMigration
+
+
+class Migration(CheckedMigration):
+ # This flag is used to mark that a migration shouldn't be automatically run in production. For
+ # the most part, this should only be used for operations where it's safe to run the migration
+ # after your code has deployed. So this should not be used for most operations that alter the
+ # schema of a table.
+ # Here are some things that make sense to mark as dangerous:
+ # - Large data migrations. Typically we want these to be run manually by ops so that they can
+ # be monitored and not block the deploy for a long period of time while they run.
+ # - Adding indexes to large tables. Since this can take a long time, we'd generally prefer to
+ # have ops run this and not block the deploy. Note that while adding an index is a schema
+ # change, it's completely safe to run the operation after the code has deployed.
+ is_dangerous = False
+
+ dependencies = [
+ ("sentry", "0661_artifactbundleindex_cleanup_step2"),
+ ]
+
+ operations = [
+ migrations.SeparateDatabaseAndState(
+ database_operations=[],
+ state_operations=[
+ migrations.RemoveField(model_name="monitorenvironment", name="last_state_change"),
+ ],
+ )
+ ]
diff --git a/src/sentry/migrations/0663_artifactbundleindex_cleanup_step3.py b/src/sentry/migrations/0663_artifactbundleindex_cleanup_step3.py
new file mode 100644
index 00000000000000..895c1324341777
--- /dev/null
+++ b/src/sentry/migrations/0663_artifactbundleindex_cleanup_step3.py
@@ -0,0 +1,55 @@
+# Generated by Django 5.0.2 on 2024-03-04 10:50
+
+from django.db import migrations
+
+from sentry.new_migrations.migrations import CheckedMigration
+
+
+class Migration(CheckedMigration):
+ # This flag is used to mark that a migration shouldn't be automatically run in production. For
+ # the most part, this should only be used for operations where it's safe to run the migration
+ # after your code has deployed. So this should not be used for most operations that alter the
+ # schema of a table.
+ # Here are some things that make sense to mark as dangerous:
+ # - Large data migrations. Typically we want these to be run manually by ops so that they can
+ # be monitored and not block the deploy for a long period of time while they run.
+ # - Adding indexes to large tables. Since this can take a long time, we'd generally prefer to
+ # have ops run this and not block the deploy. Note that while adding an index is a schema
+ # change, it's completely safe to run the operation after the code has deployed.
+ is_dangerous = False
+
+ dependencies = [
+ ("sentry", "0662_monitor_drop_last_state_change"),
+ ]
+
+ operations = [
+ migrations.RunSQL(
+ """
+ DROP TABLE "sentry_artifactbundleflatfileindex";
+ """,
+ # We just create a fake table here so that the DROP will work if we roll back the migration.
+ reverse_sql="CREATE TABLE sentry_artifactbundleflatfileindex (fake_col int);",
+ hints={"tables": ["sentry_artifactbundleflatfileindex"]},
+ ),
+ migrations.RunSQL(
+ """
+ DROP TABLE "sentry_flatfileindexstate";
+ """,
+ # We just create a fake table here so that the DROP will work if we roll back the migration.
+ reverse_sql="CREATE TABLE sentry_flatfileindexstate (fake_col int);",
+ hints={"tables": ["sentry_flatfileindexstate"]},
+ ),
+ migrations.RunSQL(
+ """
+ ALTER TABLE "sentry_artifactbundleindex" DROP COLUMN "release_name";
+ ALTER TABLE "sentry_artifactbundleindex" DROP COLUMN "dist_name";
+ ALTER TABLE "sentry_artifactbundleindex" DROP COLUMN "date_last_modified";
+ """,
+ reverse_sql="""
+ ALTER TABLE "sentry_artifactbundleindex" ADD COLUMN "release_name" varchar(250) NULL;
+ ALTER TABLE "sentry_artifactbundleindex" ADD COLUMN "dist_name" varchar(64) NULL;
+ ALTER TABLE "sentry_artifactbundleindex" ADD COLUMN "date_last_modified" timestamptz NULL;
+ """,
+ hints={"tables": ["sentry_artifactbundleindex"]},
+ ),
+ ]
diff --git a/src/sentry/migrations/0664_create_new_broken_monitor_detection_table.py b/src/sentry/migrations/0664_create_new_broken_monitor_detection_table.py
new file mode 100644
index 00000000000000..8ba0f377138103
--- /dev/null
+++ b/src/sentry/migrations/0664_create_new_broken_monitor_detection_table.py
@@ -0,0 +1,51 @@
+# Generated by Django 5.0.2 on 2024-03-06 18:54
+
+import django.db.models.deletion
+from django.db import migrations, models
+
+import sentry.db.models.fields.bounded
+import sentry.db.models.fields.foreignkey
+from sentry.new_migrations.migrations import CheckedMigration
+
+
+class Migration(CheckedMigration):
+ # This flag is used to mark that a migration shouldn't be automatically run in production. For
+ # the most part, this should only be used for operations where it's safe to run the migration
+ # after your code has deployed. So this should not be used for most operations that alter the
+ # schema of a table.
+ # Here are some things that make sense to mark as dangerous:
+ # - Large data migrations. Typically we want these to be run manually by ops so that they can
+ # be monitored and not block the deploy for a long period of time while they run.
+ # - Adding indexes to large tables. Since this can take a long time, we'd generally prefer to
+ # have ops run this and not block the deploy. Note that while adding an index is a schema
+ # change, it's completely safe to run the operation after the code has deployed.
+ is_dangerous = False
+
+ dependencies = [
+ ("sentry", "0663_artifactbundleindex_cleanup_step3"),
+ ]
+
+ operations = [
+ migrations.CreateModel(
+ name="MonitorEnvBrokenDetection",
+ fields=[
+ (
+ "id",
+ sentry.db.models.fields.bounded.BoundedBigAutoField(
+ primary_key=True, serialize=False
+ ),
+ ),
+ ("detection_timestamp", models.DateTimeField(auto_now_add=True)),
+ ("user_notified_timestamp", models.DateTimeField(db_index=True, null=True)),
+ (
+ "monitor_incident",
+ sentry.db.models.fields.foreignkey.FlexibleForeignKey(
+ on_delete=django.db.models.deletion.CASCADE, to="sentry.monitorincident"
+ ),
+ ),
+ ],
+ options={
+ "db_table": "sentry_monitorenvbrokendetection",
+ },
+ ),
+ ]
diff --git a/src/sentry/migrations/0665_monitor_drop_last_state_change_db.py b/src/sentry/migrations/0665_monitor_drop_last_state_change_db.py
new file mode 100644
index 00000000000000..290e08308f35b3
--- /dev/null
+++ b/src/sentry/migrations/0665_monitor_drop_last_state_change_db.py
@@ -0,0 +1,40 @@
+# Generated by Django 5.0.2 on 2024-03-06 18:06
+
+from django.db import migrations
+
+from sentry.new_migrations.migrations import CheckedMigration
+
+
+class Migration(CheckedMigration):
+ # This flag is used to mark that a migration shouldn't be automatically run in production. For
+ # the most part, this should only be used for operations where it's safe to run the migration
+ # after your code has deployed. So this should not be used for most operations that alter the
+ # schema of a table.
+ # Here are some things that make sense to mark as dangerous:
+ # - Large data migrations. Typically we want these to be run manually by ops so that they can
+ # be monitored and not block the deploy for a long period of time while they run.
+ # - Adding indexes to large tables. Since this can take a long time, we'd generally prefer to
+ # have ops run this and not block the deploy. Note that while adding an index is a schema
+ # change, it's completely safe to run the operation after the code has deployed.
+ is_dangerous = False
+
+ dependencies = [
+ ("sentry", "0664_create_new_broken_monitor_detection_table"),
+ ]
+
+ operations = [
+ migrations.SeparateDatabaseAndState(
+ database_operations=[
+ migrations.RunSQL(
+ """
+ ALTER TABLE "sentry_monitorenvironment" DROP COLUMN "last_state_change";
+ """,
+ reverse_sql="""
+ ALTER TABLE "sentry_monitorenvironment" ADD COLUMN "last_state_change" timestamptz;
+ """,
+ hints={"tables": ["sentry_monitorenvironment"]},
+ )
+ ],
+ state_operations=[],
+ )
+ ]
diff --git a/src/sentry/migrations/0666_monitor_incident_default_grouphash.py b/src/sentry/migrations/0666_monitor_incident_default_grouphash.py
new file mode 100644
index 00000000000000..053a8135aedb07
--- /dev/null
+++ b/src/sentry/migrations/0666_monitor_incident_default_grouphash.py
@@ -0,0 +1,32 @@
+# Generated by Django 5.0.2 on 2024-03-06 21:13
+
+from django.db import migrations, models
+
+import sentry.monitors.models
+from sentry.new_migrations.migrations import CheckedMigration
+
+
+class Migration(CheckedMigration):
+ # This flag is used to mark that a migration shouldn't be automatically run in production. For
+ # the most part, this should only be used for operations where it's safe to run the migration
+ # after your code has deployed. So this should not be used for most operations that alter the
+ # schema of a table.
+ # Here are some things that make sense to mark as dangerous:
+ # - Large data migrations. Typically we want these to be run manually by ops so that they can
+ # be monitored and not block the deploy for a long period of time while they run.
+ # - Adding indexes to large tables. Since this can take a long time, we'd generally prefer to
+ # have ops run this and not block the deploy. Note that while adding an index is a schema
+ # change, it's completely safe to run the operation after the code has deployed.
+ is_dangerous = False
+
+ dependencies = [
+ ("sentry", "0665_monitor_drop_last_state_change_db"),
+ ]
+
+ operations = [
+ migrations.AlterField(
+ model_name="monitorincident",
+ name="grouphash",
+ field=models.CharField(default=sentry.monitors.models.default_grouphash, max_length=32),
+ ),
+ ]
diff --git a/src/sentry/models/artifactbundle.py b/src/sentry/models/artifactbundle.py
index 7eea9de67834ee..93a4bc253fd01f 100644
--- a/src/sentry/models/artifactbundle.py
+++ b/src/sentry/models/artifactbundle.py
@@ -134,40 +134,6 @@ def delete_file_for_artifact_bundle(instance, **kwargs):
post_delete.connect(delete_file_for_artifact_bundle, sender=ArtifactBundle)
-@region_silo_only_model
-class ArtifactBundleFlatFileIndex(Model):
- __relocation_scope__ = RelocationScope.Excluded
-
- project_id = BoundedBigIntegerField(db_index=True)
- release_name = models.CharField(max_length=250)
- dist_name = models.CharField(max_length=64, default=NULL_STRING)
- date_added = models.DateTimeField(default=timezone.now)
-
- class Meta:
- app_label = "sentry"
- db_table = "sentry_artifactbundleflatfileindex"
-
- unique_together = (("project_id", "release_name", "dist_name"),)
-
-
-@region_silo_only_model
-class FlatFileIndexState(Model):
- __relocation_scope__ = RelocationScope.Excluded
-
- flat_file_index = FlexibleForeignKey("sentry.ArtifactBundleFlatFileIndex", db_constraint=False)
- artifact_bundle = FlexibleForeignKey("sentry.ArtifactBundle", db_constraint=False)
- indexing_state = models.IntegerField(
- choices=ArtifactBundleIndexingState.choices(), db_index=True
- )
- date_added = models.DateTimeField(default=timezone.now)
-
- class Meta:
- app_label = "sentry"
- db_table = "sentry_flatfileindexstate"
-
- unique_together = (("flat_file_index", "artifact_bundle"),)
-
-
@region_silo_only_model
class ArtifactBundleIndex(Model):
__relocation_scope__ = RelocationScope.Excluded
@@ -177,13 +143,6 @@ class ArtifactBundleIndex(Model):
url = models.TextField()
date_added = models.DateTimeField(default=timezone.now)
- # TODO: legacy fields:
- # These will eventually be removed in a migration, as they can be joined
- # via the `{Release,}ArtifactBundle` tables.
- release_name = models.CharField(max_length=250, null=True)
- dist_name = models.CharField(max_length=64, null=True, default=NULL_STRING)
- date_last_modified = models.DateTimeField(null=True, default=timezone.now)
-
class Meta:
app_label = "sentry"
db_table = "sentry_artifactbundleindex"
diff --git a/src/sentry/models/files/abstractfileblob.py b/src/sentry/models/files/abstractfileblob.py
index 6f320c4345b2de..6598e1da17a4c5 100644
--- a/src/sentry/models/files/abstractfileblob.py
+++ b/src/sentry/models/files/abstractfileblob.py
@@ -15,10 +15,9 @@
from sentry.db.models import BoundedPositiveIntegerField, Model
from sentry.models.files.abstractfileblobowner import AbstractFileBlobOwner
from sentry.models.files.utils import (
+ get_and_optionally_update_blob,
get_size_and_checksum,
get_storage,
- lock_blob,
- locked_blob,
nooplogger,
)
from sentry.utils import metrics
@@ -68,10 +67,9 @@ def from_files(cls, files, organization=None, logger=nooplogger):
checksums_seen = set()
blobs_to_save = []
- locks = set()
semaphore = Semaphore(value=MULTI_BLOB_UPLOAD_CONCURRENCY)
- def _upload_and_pend_chunk(fileobj, size, checksum, lock):
+ def _upload_and_pend_chunk(fileobj, size, checksum):
logger.debug(
"FileBlob.from_files._upload_and_pend_chunk.start",
extra={"checksum": checksum, "size": size},
@@ -80,7 +78,7 @@ def _upload_and_pend_chunk(fileobj, size, checksum, lock):
blob.path = cls.generate_unique_path()
storage = get_storage(cls._storage_config())
storage.save(blob.path, fileobj)
- blobs_to_save.append((blob, lock))
+ blobs_to_save.append(blob)
metrics.distribution(
"filestore.blob-size", size, tags={"function": "from_files"}, unit="byte"
)
@@ -123,13 +121,11 @@ def _save_blob(blob):
def _flush_blobs():
while True:
try:
- blob, lock = blobs_to_save.pop()
+ blob = blobs_to_save.pop()
except IndexError:
break
_save_blob(blob)
- lock.__exit__(None, None, None)
- locks.discard(lock)
semaphore.release()
try:
@@ -152,19 +148,13 @@ def _flush_blobs():
continue
checksums_seen.add(checksum)
- # Check if we need to lock the blob. If we get a result back
+ # Check if we need to upload the blob. If we get a result back
# here it means the blob already exists.
- lock = locked_blob(cls, size, checksum, logger=logger)
- existing = lock.__enter__()
+ existing = get_and_optionally_update_blob(cls, checksum)
if existing is not None:
- lock.__exit__(None, None, None)
_ensure_blob_owned(existing)
continue
- # Remember the lock to force unlock all at the end if we
- # encounter any difficulties.
- locks.add(lock)
-
# Otherwise we leave the blob locked and submit the task.
# We use the semaphore to ensure we never schedule too
# many. The upload will be done with a certain amount
@@ -172,16 +162,11 @@ def _flush_blobs():
# `_flush_blobs` call will take all those uploaded
# blobs and associate them with the database.
semaphore.acquire()
- exe.submit(_upload_and_pend_chunk(fileobj, size, checksum, lock))
+ exe.submit(_upload_and_pend_chunk(fileobj, size, checksum))
logger.debug("FileBlob.from_files.end", extra={"checksum": reference_checksum})
_flush_blobs()
finally:
- for lock in locks:
- try:
- lock.__exit__(None, None, None)
- except Exception:
- pass
logger.debug("FileBlob.from_files.end")
@classmethod
@@ -194,24 +179,22 @@ def from_file(cls, fileobj, logger=nooplogger) -> Self:
size, checksum = get_size_and_checksum(fileobj)
- # TODO(dcramer): the database here is safe, but if this lock expires
- # and duplicate files are uploaded then we need to prune one
- with locked_blob(cls, size, checksum, logger=logger) as existing:
- if existing is not None:
- return existing
+ existing = get_and_optionally_update_blob(cls, checksum)
+ if existing is not None:
+ return existing
- blob = cls(size=size, checksum=checksum)
- blob.path = cls.generate_unique_path()
- storage = get_storage(cls._storage_config())
- storage.save(blob.path, fileobj)
- try:
- blob.save()
- except IntegrityError:
- # see `_save_blob` above
- metrics.incr("filestore.upload_race", sample_rate=1.0)
- saved_path = blob.path
- blob = cls.objects.get(checksum=checksum)
- storage.delete(saved_path)
+ blob = cls(size=size, checksum=checksum)
+ blob.path = cls.generate_unique_path()
+ storage = get_storage(cls._storage_config())
+ storage.save(blob.path, fileobj)
+ try:
+ blob.save()
+ except IntegrityError:
+ # see `_save_blob` above
+ metrics.incr("filestore.upload_race", sample_rate=1.0)
+ saved_path = blob.path
+ blob = cls.objects.get(checksum=checksum)
+ storage.delete(saved_path)
metrics.distribution("filestore.blob-size", size, unit="byte")
logger.debug("FileBlob.from_file.end")
@@ -235,11 +218,7 @@ def delete(self, *args, **kwargs):
self.DELETE_FILE_TASK.apply_async(
kwargs={"path": self.path, "checksum": self.checksum}, countdown=60
)
- lock = lock_blob(
- self.checksum, "fileblob_upload_delete", metric_instance="lock.fileblob.delete"
- )
- with lock:
- super().delete(*args, **kwargs)
+ super().delete(*args, **kwargs)
def getfile(self):
"""
diff --git a/src/sentry/models/files/utils.py b/src/sentry/models/files/utils.py
index 93255c3db764af..e163199521fab2 100644
--- a/src/sentry/models/files/utils.py
+++ b/src/sentry/models/files/utils.py
@@ -2,29 +2,20 @@
import os
import time
-from contextlib import contextmanager
from datetime import timedelta
from hashlib import sha1
+from typing import Any
from django.conf import settings
from django.utils import timezone
-from rediscluster import RedisCluster
-from sentry import options
-from sentry.locks import locks
-from sentry.utils import redis
from sentry.utils.imports import import_string
-from sentry.utils.retries import TimedRetryPolicy
ONE_DAY = 60 * 60 * 24
ONE_DAY_AND_A_HALF = int(ONE_DAY * 1.5)
HALF_DAY = timedelta(hours=12)
-UPLOAD_RETRY_TIME = getattr(settings, "SENTRY_UPLOAD_RETRY_TIME", 60) # 1min
-
DEFAULT_BLOB_SIZE = 1024 * 1024 # one mb
-CHUNK_STATE_HEADER = "__state"
-
MAX_FILE_SIZE = 2**31 # 2GB is the maximum offset supported by fileblob
@@ -54,65 +45,23 @@ def get_size_and_checksum(fileobj, logger=nooplogger):
return size, checksum.hexdigest()
-@contextmanager
-def lock_blob(checksum: str, name: str, metric_instance: str | None = None):
- if not options.get("fileblob.upload.use_lock"):
- yield
- return
-
- lock = locks.get(f"fileblob:upload:{checksum}", duration=UPLOAD_RETRY_TIME, name=name)
- with TimedRetryPolicy(UPLOAD_RETRY_TIME, metric_instance=metric_instance)(lock.acquire):
- yield
-
-
-def _get_redis_for_blobs() -> RedisCluster:
- cluster_key = settings.SENTRY_DEBUG_FILES_REDIS_CLUSTER
- return redis.redis_clusters.get(cluster_key) # type: ignore[return-value]
-
-
-def _redis_key_for_blob(file_blob_model, checksum):
- return f"fileblob:{file_blob_model.__name__}:{checksum}"
-
-
-def _get_cached_blob_id(file_blob_model, checksum):
- if not options.get("fileblob.upload.use_blobid_cache"):
- return None
- redis = _get_redis_for_blobs()
- if id := redis.get(_redis_key_for_blob(file_blob_model, checksum)):
- return int(id)
- return None
-
-
-def cache_blob_id(file_blob_model, checksum, id):
- if not options.get("fileblob.upload.use_blobid_cache"):
- return
- redis = _get_redis_for_blobs()
- redis.set(_redis_key_for_blob(file_blob_model, checksum), str(id), ex=HALF_DAY.seconds)
-
+def get_and_optionally_update_blob(file_blob_model: Any, checksum: str):
+ """
+ Returns the `FileBlob` (actually generic `file_blob_model`) identified by its `checksum`.
+ This will also bump its `timestamp` in a debounced fashion,
+ in order to prevent it from being cleaned up.
+ """
+ try:
+ existing = file_blob_model.objects.get(checksum=checksum)
-@contextmanager
-def locked_blob(file_blob_model, size, checksum, logger=nooplogger):
- if cached_id := _get_cached_blob_id(file_blob_model, checksum):
- yield file_blob_model(id=cached_id, size=size, checksum=checksum)
- return
+ now = timezone.now()
+ threshold = now - HALF_DAY
+ if existing.timestamp <= threshold:
+ existing.update(timestamp=now)
+ except file_blob_model.DoesNotExist:
+ existing = None
- logger.debug("locked_blob.start", extra={"checksum": checksum})
- lock = lock_blob(checksum, "fileblob_upload_model", metric_instance="lock.fileblob.upload")
- with lock:
- logger.debug("locked_blob.acquired", extra={"checksum": checksum})
- # test for presence
- try:
- existing = file_blob_model.objects.get(checksum=checksum)
- cache_blob_id(file_blob_model, checksum, existing.id)
-
- now = timezone.now()
- threshold = now - HALF_DAY
- if existing.timestamp <= threshold:
- existing.update(timestamp=now)
- except file_blob_model.DoesNotExist:
- existing = None
- yield existing
- logger.debug("locked_blob.end", extra={"checksum": checksum})
+ return existing
class AssembleChecksumMismatch(Exception):
diff --git a/src/sentry/models/project.py b/src/sentry/models/project.py
index f532388d3fc89d..b09e78cbdd25dd 100644
--- a/src/sentry/models/project.py
+++ b/src/sentry/models/project.py
@@ -312,8 +312,9 @@ def __str__(self):
def next_short_id(self, delta: int = 1) -> int:
from sentry.models.counter import Counter
- with sentry_sdk.start_span(op="project.next_short_id") as span, metrics.timer(
- "project.next_short_id"
+ with (
+ sentry_sdk.start_span(op="project.next_short_id") as span,
+ metrics.timer("project.next_short_id"),
):
span.set_data("project_id", self.id)
span.set_data("project_slug", self.slug)
@@ -412,7 +413,7 @@ def get_full_name(self):
return self.slug
def transfer_to(self, organization):
- from sentry.incidents.models import AlertRule
+ from sentry.incidents.models.alert_rule import AlertRule
from sentry.models.actor import ACTOR_TYPES
from sentry.models.environment import Environment, EnvironmentProject
from sentry.models.integrations.external_issue import ExternalIssue
@@ -547,7 +548,7 @@ def add_team(self, team):
return True
def remove_team(self, team):
- from sentry.incidents.models import AlertRule
+ from sentry.incidents.models.alert_rule import AlertRule
from sentry.models.projectteam import ProjectTeam
from sentry.models.rule import Rule
diff --git a/src/sentry/models/releases/release_project.py b/src/sentry/models/releases/release_project.py
index 43bc412a0ed269..4e3a38a50148dc 100644
--- a/src/sentry/models/releases/release_project.py
+++ b/src/sentry/models/releases/release_project.py
@@ -49,7 +49,7 @@ def _subscribe_project_to_alert_rule(
NOTE: import AlertRule model here to avoid circular dependency
TODO: move once AlertRule has been split into separate subdirectory files
"""
- from sentry.incidents.models import AlertRule
+ from sentry.incidents.models.alert_rule import AlertRule
query_extra = f"release:{release.version} AND event.timestamp:>{timezone.now().isoformat()}"
return AlertRule.objects.conditionally_subscribe_project_to_alert_rules(
diff --git a/src/sentry/monitors/endpoints/base.py b/src/sentry/monitors/endpoints/base.py
index 7fad81aa44ccc7..7a219b63cb10f0 100644
--- a/src/sentry/monitors/endpoints/base.py
+++ b/src/sentry/monitors/endpoints/base.py
@@ -24,6 +24,8 @@
from sentry.monitors.models import CheckInStatus, Monitor, MonitorCheckIn, MonitorEnvironment
from sentry.utils.sdk import bind_organization_context, configure_scope
+DEPRECATED_INGEST_API_MESSAGE = "We have removed this deprecated API. Please migrate to using DSN instead: https://docs.sentry.io/product/crons/legacy-endpoint-migration/#am-i-using-legacy-endpoints"
+
class OrganizationMonitorPermission(OrganizationPermission):
scope_map = {
diff --git a/src/sentry/monitors/endpoints/monitor_ingest_checkin_details.py b/src/sentry/monitors/endpoints/monitor_ingest_checkin_details.py
index da7486bb298845..3918e78013dd1d 100644
--- a/src/sentry/monitors/endpoints/monitor_ingest_checkin_details.py
+++ b/src/sentry/monitors/endpoints/monitor_ingest_checkin_details.py
@@ -27,7 +27,7 @@
from ... import features
from ...api.exceptions import ResourceDoesNotExist
-from .base import MonitorIngestEndpoint
+from .base import DEPRECATED_INGEST_API_MESSAGE, MonitorIngestEndpoint
@region_silo_endpoint
@@ -72,7 +72,7 @@ def put(
the most recent (by creation date) check-in which is still mutable (not marked as finished).
"""
if features.has("organizations:crons-disable-ingest-endpoints", project.organization):
- raise ResourceDoesNotExist
+ raise ResourceDoesNotExist(detail=DEPRECATED_INGEST_API_MESSAGE)
if checkin.status in CheckInStatus.FINISHED_VALUES:
return self.respond(status=400)
diff --git a/src/sentry/monitors/endpoints/monitor_ingest_checkin_index.py b/src/sentry/monitors/endpoints/monitor_ingest_checkin_index.py
index fff161979939e0..9c4898790e77f1 100644
--- a/src/sentry/monitors/endpoints/monitor_ingest_checkin_index.py
+++ b/src/sentry/monitors/endpoints/monitor_ingest_checkin_index.py
@@ -39,7 +39,7 @@
from sentry.utils import metrics
from ...api.exceptions import ResourceDoesNotExist
-from .base import MonitorIngestEndpoint
+from .base import DEPRECATED_INGEST_API_MESSAGE, MonitorIngestEndpoint
logger = logging.getLogger(__name__)
@@ -104,7 +104,7 @@ def post(
Note: If a DSN is utilized for authentication, the response will be limited in details.
"""
if features.has("organizations:crons-disable-ingest-endpoints", project.organization):
- raise ResourceDoesNotExist
+ raise ResourceDoesNotExist(detail=DEPRECATED_INGEST_API_MESSAGE)
if monitor and monitor.status in [
ObjectStatus.PENDING_DELETION,
diff --git a/src/sentry/monitors/logic/mark_failed.py b/src/sentry/monitors/logic/mark_failed.py
index 839aa1a6dc2a42..bb4479d5ce88eb 100644
--- a/src/sentry/monitors/logic/mark_failed.py
+++ b/src/sentry/monitors/logic/mark_failed.py
@@ -7,7 +7,6 @@
from django.db.models import Q
from sentry import features
-from sentry.grouping.utils import hash_from_values
from sentry.issues.grouptype import (
MonitorCheckInFailure,
MonitorCheckInMissed,
@@ -70,15 +69,6 @@ def mark_failed(
"next_checkin_latest": next_checkin_latest,
}
- # Additionally update status when not using thresholds. The threshold based
- # failure will only update status once it has passed the threshold.
- if not failure_issue_threshold:
- failed_status_map = {
- CheckInStatus.MISSED: MonitorStatus.MISSED_CHECKIN,
- CheckInStatus.TIMEOUT: MonitorStatus.TIMEOUT,
- }
- field_updates["status"] = failed_status_map.get(failed_checkin.status, MonitorStatus.ERROR)
-
affected = monitors_to_update.update(**field_updates)
# If we did not update the monitor environment it means there was a newer
@@ -95,12 +85,11 @@ def mark_failed(
monitor_env.refresh_from_db()
# Create incidents + issues
- use_issue_platform = False
try:
organization = Organization.objects.get_from_cache(id=monitor_env.monitor.organization_id)
use_issue_platform = features.has("organizations:issue-platform", organization=organization)
except Organization.DoesNotExist:
- pass
+ use_issue_platform = False
if use_issue_platform:
return mark_failed_threshold(failed_checkin, failure_issue_threshold)
@@ -119,24 +108,7 @@ def mark_failed_threshold(failed_checkin: MonitorCheckIn, failure_issue_threshol
# check to see if we need to update the status
if monitor_env.status in [MonitorStatus.OK, MonitorStatus.ACTIVE]:
- # evaluation logic for multiple check-ins
- if failure_issue_threshold > 1:
- # reverse the list after slicing in order to start with oldest check-in
- # use .values() to speed up query
- previous_checkins = list(
- reversed(
- MonitorCheckIn.objects.filter(
- monitor_environment=monitor_env, date_added__lte=failed_checkin.date_added
- )
- .order_by("-date_added")
- .values("id", "date_added", "status")[:failure_issue_threshold]
- )
- )
- # check for any successful previous check-in
- if any([checkin["status"] == CheckInStatus.OK for checkin in previous_checkins]):
- return False
- # if threshold is 1, just use the most recent check-in
- else:
+ if failure_issue_threshold == 1:
previous_checkins = [
{
"id": failed_checkin.id,
@@ -144,26 +116,47 @@ def mark_failed_threshold(failed_checkin: MonitorCheckIn, failure_issue_threshol
"status": failed_checkin.status,
}
]
+ else:
+ previous_checkins = (
+ # Using .values for performance reasons
+ MonitorCheckIn.objects.filter(
+ monitor_environment=monitor_env, date_added__lte=failed_checkin.date_added
+ )
+ .order_by("-date_added")
+ .values("id", "date_added", "status")
+ )
+
+ # reverse the list after slicing in order to start with oldest check-in
+ previous_checkins = list(reversed(previous_checkins[:failure_issue_threshold]))
+
+ # If we have any successful check-ins within the threshold of
+ # commits we have NOT reached an incident state
+ if any([checkin["status"] == CheckInStatus.OK for checkin in previous_checkins]):
+ return False
# change monitor status + update fingerprint timestamp
monitor_env.status = MonitorStatus.ERROR
- monitor_env.last_state_change = monitor_env.last_checkin
- monitor_env.save(update_fields=("status", "last_state_change"))
-
- # Do not create incident if monitor is muted
- if not monitor_muted:
- starting_checkin = previous_checkins[0]
-
- # for new incidents, generate a new hash from a uuid to use
- fingerprint = hash_from_values([uuid.uuid4()])
-
- MonitorIncident.objects.create(
- monitor=monitor_env.monitor,
- monitor_environment=monitor_env,
- starting_checkin_id=starting_checkin["id"],
- starting_timestamp=starting_checkin["date_added"],
- grouphash=fingerprint,
- )
+ monitor_env.save(update_fields=("status",))
+
+ # Do not create incident if monitor is muted. This check happens late
+ # as we still want the status to have been updated
+ if monitor_muted:
+ return True
+
+ starting_checkin = previous_checkins[0]
+
+ # for new incidents, generate a uuid as the fingerprint. This is
+ # not deterministic of any property of the incident and is simply
+ # used to associate the incident to it's event occurrences
+ fingerprint = uuid.uuid4().hex
+
+ MonitorIncident.objects.create(
+ monitor=monitor_env.monitor,
+ monitor_environment=monitor_env,
+ starting_checkin_id=starting_checkin["id"],
+ starting_timestamp=starting_checkin["date_added"],
+ grouphash=fingerprint,
+ )
elif monitor_env.status in [
MonitorStatus.ERROR,
MonitorStatus.MISSED_CHECKIN,
@@ -189,9 +182,11 @@ def mark_failed_threshold(failed_checkin: MonitorCheckIn, failure_issue_threshol
if monitor_muted:
return True
- for previous_checkin in previous_checkins:
- checkin_from_db = MonitorCheckIn.objects.get(id=previous_checkin["id"])
- create_issue_platform_occurrence(checkin_from_db, fingerprint)
+ # Do not create event/occurrence if we don't have a fingerprint
+ if fingerprint:
+ checkins = MonitorCheckIn.objects.filter(id__in=[c["id"] for c in previous_checkins])
+ for previous_checkin in checkins:
+ create_issue_platform_occurrence(previous_checkin, fingerprint)
monitor_environment_failed.send(monitor_environment=monitor_env, sender=type(monitor_env))
@@ -257,7 +252,7 @@ def create_legacy_event(failed_checkin: MonitorCheckIn):
def create_issue_platform_occurrence(
failed_checkin: MonitorCheckIn,
- fingerprint=None,
+ fingerprint: str,
):
from sentry.issues.issue_occurrence import IssueEvidence, IssueOccurrence
from sentry.issues.producer import PayloadType, produce_occurrence_to_kafka
@@ -278,13 +273,7 @@ def create_issue_platform_occurrence(
resource_id=None,
project_id=monitor_env.monitor.project_id,
event_id=uuid.uuid4().hex,
- fingerprint=[
- fingerprint
- if fingerprint
- else hash_from_values(
- ["monitor", str(monitor_env.monitor.guid), occurrence_data["reason"]]
- )
- ],
+ fingerprint=[fingerprint],
type=occurrence_data["group_type"],
issue_title=f"Monitor failure: {monitor_env.monitor.name}",
subtitle=occurrence_data["subtitle"],
@@ -314,13 +303,7 @@ def create_issue_platform_occurrence(
"contexts": {"monitor": get_monitor_environment_context(monitor_env)},
"environment": monitor_env.get_environment().name,
"event_id": occurrence.event_id,
- "fingerprint": [fingerprint]
- if fingerprint
- else [
- "monitor",
- str(monitor_env.monitor.guid),
- occurrence_data["reason"],
- ],
+ "fingerprint": [fingerprint],
"platform": "other",
"project_id": monitor_env.monitor.project_id,
"received": current_timestamp.isoformat(),
diff --git a/src/sentry/monitors/logic/mark_ok.py b/src/sentry/monitors/logic/mark_ok.py
index e113220a550186..f331d21b4590f2 100644
--- a/src/sentry/monitors/logic/mark_ok.py
+++ b/src/sentry/monitors/logic/mark_ok.py
@@ -62,11 +62,11 @@ def mark_ok(checkin: MonitorCheckIn, ts: datetime):
# Only send an occurrence if we have an active incident
for grouphash in active_incidents.values_list("grouphash", flat=True):
resolve_incident_group(grouphash, checkin.monitor.project_id)
- if active_incidents.update(
+
+ active_incidents.update(
resolving_checkin=checkin,
resolving_timestamp=checkin.date_added,
- ):
- params["last_state_change"] = ts
+ )
else:
# Don't update status if incident isn't recovered
params.pop("status", None)
diff --git a/src/sentry/monitors/models.py b/src/sentry/monitors/models.py
index 3b9b5009b28e4d..686bc318cdc055 100644
--- a/src/sentry/monitors/models.py
+++ b/src/sentry/monitors/models.py
@@ -1,6 +1,7 @@
from __future__ import annotations
import logging
+import uuid
import zoneinfo
from collections.abc import Sequence
from datetime import datetime
@@ -32,7 +33,6 @@
)
from sentry.db.models.fields.slug import SentrySlugField
from sentry.db.models.utils import slugify_instance
-from sentry.grouping.utils import hash_from_values
from sentry.locks import locks
from sentry.models.environment import Environment
from sentry.models.rule import Rule, RuleSource
@@ -606,11 +606,6 @@ class MonitorEnvironment(Model):
auto-generated missed check-ins.
"""
- last_state_change = models.DateTimeField(null=True)
- """
- The last time that the monitor changed state. Used for issue fingerprinting.
- """
-
objects: ClassVar[MonitorEnvironmentManager] = MonitorEnvironmentManager()
class Meta:
@@ -642,8 +637,10 @@ def get_last_successful_checkin(self):
@property
def incident_grouphash(self):
- # TODO(rjo100): Check to see if there's an active incident
- # if not, use last_state_change as fallback
+ """
+ Retrieve the grouphash for the current active incident. If there is no
+ active incident None will be returned.
+ """
active_incident = (
MonitorIncident.objects.filter(
monitor_environment_id=self.id, resolving_checkin__isnull=True
@@ -654,18 +651,7 @@ def incident_grouphash(self):
if active_incident:
return active_incident.grouphash
- # XXX(rjo100): While we migrate monitor issues to using the
- # Incident stored grouphash we still may have some active issues
- # that are using the old hashes. We can remove this in the
- # future once all existing issues are resolved.
- return hash_from_values(
- [
- "monitor",
- str(self.monitor.guid),
- self.get_environment().name,
- str(self.last_state_change),
- ]
- )
+ return None
@receiver(pre_save, sender=MonitorEnvironment)
@@ -680,6 +666,13 @@ def check_monitor_environment_limits(sender, instance, **kwargs):
)
+def default_grouphash():
+ """
+ Generate a unique 32 character grouphash for a monitor incident
+ """
+ return uuid.uuid4().hex
+
+
@region_silo_only_model
class MonitorIncident(Model):
__relocation_scope__ = RelocationScope.Excluded
@@ -702,10 +695,33 @@ class MonitorIncident(Model):
This represents the final OK check-in that we receive
"""
- grouphash = models.CharField(max_length=32)
+ grouphash = models.CharField(max_length=32, default=default_grouphash)
+ """
+ Used for issue occurances generation. Failed check-ins produce occurance
+ associated to this grouphash.
+ """
+
date_added = models.DateTimeField(default=timezone.now)
class Meta:
app_label = "sentry"
db_table = "sentry_monitorincident"
indexes = [models.Index(fields=["monitor_environment", "resolving_checkin"])]
+
+
+@region_silo_only_model
+class MonitorEnvBrokenDetection(Model):
+ """
+ Records an instance where we have detected a monitor environment to be
+ broken based on a long duration of failure and consecutive failing check-ins
+ """
+
+ __relocation_scope__ = RelocationScope.Excluded
+
+ monitor_incident = FlexibleForeignKey("sentry.MonitorIncident")
+ detection_timestamp = models.DateTimeField(auto_now_add=True)
+ user_notified_timestamp = models.DateTimeField(null=True, db_index=True)
+
+ class Meta:
+ app_label = "sentry"
+ db_table = "sentry_monitorenvbrokendetection"
diff --git a/src/sentry/monitors/tasks.py b/src/sentry/monitors/tasks.py
index 79f86b62a7f404..82fd558235d5a5 100644
--- a/src/sentry/monitors/tasks.py
+++ b/src/sentry/monitors/tasks.py
@@ -7,11 +7,13 @@
import msgpack
import sentry_sdk
-from arroyo import Partition, Topic
+from arroyo import Partition
+from arroyo import Topic as ArroyoTopic
from arroyo.backends.kafka import KafkaPayload, KafkaProducer, build_kafka_configuration
from confluent_kafka.admin import AdminClient, PartitionMetadata
from django.conf import settings
+from sentry.conf.types.kafka_definition import Topic
from sentry.constants import ObjectStatus
from sentry.monitors.logic.mark_failed import mark_failed
from sentry.monitors.schedule import get_prev_schedule
@@ -50,7 +52,7 @@
def _get_producer() -> KafkaProducer:
- cluster_name = get_topic_definition(settings.KAFKA_INGEST_MONITORS)["cluster"]
+ cluster_name = get_topic_definition(Topic.INGEST_MONITORS)["cluster"]
producer_config = get_kafka_producer_cluster_options(cluster_name)
producer_config.pop("compression.type", None)
producer_config.pop("message.max.bytes", None)
@@ -62,10 +64,10 @@ def _get_producer() -> KafkaProducer:
@lru_cache(maxsize=None)
def _get_partitions() -> Mapping[int, PartitionMetadata]:
- topic = settings.KAFKA_INGEST_MONITORS
- cluster_name = get_topic_definition(topic)["cluster"]
+ topic_defn = get_topic_definition(Topic.INGEST_MONITORS)
+ topic = topic_defn["real_topic_name"]
- conf = get_kafka_admin_cluster_options(cluster_name)
+ conf = get_kafka_admin_cluster_options(topic_defn["cluster"])
admin_client = AdminClient(conf)
result = admin_client.list_topics(topic)
topic_metadata = result.topics.get(topic)
@@ -203,7 +205,7 @@ def clock_pulse(current_datetime=None):
# topic. This is a requirement to ensure that none of the partitions stall,
# since the global clock is tied to the slowest partition.
for partition in _get_partitions().values():
- dest = Partition(Topic(settings.KAFKA_INGEST_MONITORS), partition.id)
+ dest = Partition(ArroyoTopic(settings.KAFKA_INGEST_MONITORS), partition.id)
_checkin_producer.produce(dest, payload)
diff --git a/src/sentry/notifications/utils/__init__.py b/src/sentry/notifications/utils/__init__.py
index 3829556a5809a0..58aca93029dcfd 100644
--- a/src/sentry/notifications/utils/__init__.py
+++ b/src/sentry/notifications/utils/__init__.py
@@ -16,7 +16,7 @@
from sentry import integrations
from sentry.eventstore.models import Event, GroupEvent
-from sentry.incidents.models import AlertRuleTriggerAction
+from sentry.incidents.models.alert_rule import AlertRuleTriggerAction
from sentry.integrations import IntegrationFeatures, IntegrationProvider
from sentry.issues.grouptype import (
PerformanceConsecutiveDBQueriesGroupType,
@@ -458,9 +458,9 @@ def to_dict(self) -> dict[str, str | float | list[str]]:
"transaction_name": self.transaction,
"parent_span": get_span_evidence_value(self.parent_span),
"repeating_spans": get_span_evidence_value(self.repeating_spans),
- "num_repeating_spans": str(len(self.problem.offender_span_ids))
- if self.problem.offender_span_ids
- else "",
+ "num_repeating_spans": (
+ str(len(self.problem.offender_span_ids)) if self.problem.offender_span_ids else ""
+ ),
}
@property
@@ -531,9 +531,9 @@ def to_dict(self) -> dict[str, str | float | list[str]]:
"transaction_name": self.transaction,
"repeating_spans": self.path_prefix,
"parameters": self.parameters,
- "num_repeating_spans": str(len(self.problem.offender_span_ids))
- if self.problem.offender_span_ids
- else "",
+ "num_repeating_spans": (
+ str(len(self.problem.offender_span_ids)) if self.problem.offender_span_ids else ""
+ ),
}
@property
diff --git a/src/sentry/options/defaults.py b/src/sentry/options/defaults.py
index 4300c4168dca7f..e0dc7921309729 100644
--- a/src/sentry/options/defaults.py
+++ b/src/sentry/options/defaults.py
@@ -964,6 +964,9 @@
# Drop delete_old_primary_hash messages for a particular project.
register("reprocessing2.drop-delete-old-primary-hash", default=[], flags=FLAG_AUTOMATOR_MODIFIABLE)
+# Switch to use service wrapper for reprocessing redis operations
+register("reprocessing.use_store", default=False, flags=FLAG_AUTOMATOR_MODIFIABLE)
+
# BEGIN ABUSE QUOTAS
# Example:
@@ -1650,6 +1653,10 @@
register("hybrid_cloud.region-domain-allow-list", default=[], flags=FLAG_AUTOMATOR_MODIFIABLE)
register("hybrid_cloud.region-user-allow-list", default=[], flags=FLAG_AUTOMATOR_MODIFIABLE)
+register(
+ "hybrid_cloud.use_region_specific_upload_url", default=False, flags=FLAG_AUTOMATOR_MODIFIABLE
+)
+
# Retry controls
register("hybridcloud.regionsiloclient.retries", default=5, flags=FLAG_AUTOMATOR_MODIFIABLE)
register("hybridcloud.rpc.retries", default=5, flags=FLAG_AUTOMATOR_MODIFIABLE)
diff --git a/src/sentry/pipeline/base.py b/src/sentry/pipeline/base.py
index 7613b8d5bf8131..487286c09af2e8 100644
--- a/src/sentry/pipeline/base.py
+++ b/src/sentry/pipeline/base.py
@@ -85,7 +85,9 @@ def unpack_state(cls, request: HttpRequest) -> PipelineRequestState | None:
organization: RpcOrganization | None = None
if state.org_id:
- org_context = organization_service.get_organization_by_id(id=state.org_id)
+ org_context = organization_service.get_organization_by_id(
+ id=state.org_id, include_teams=False
+ )
if org_context:
organization = org_context.organization
diff --git a/src/sentry/profiles/task.py b/src/sentry/profiles/task.py
index 7d31ff7055dab7..86c53714c608a8 100644
--- a/src/sentry/profiles/task.py
+++ b/src/sentry/profiles/task.py
@@ -3,6 +3,7 @@
from collections.abc import Mapping, MutableMapping
from copy import deepcopy
from datetime import datetime, timezone
+from functools import lru_cache
from time import time
from typing import Any
@@ -21,6 +22,7 @@
from sentry.models.eventerror import EventError
from sentry.models.organization import Organization
from sentry.models.project import Project
+from sentry.models.projectkey import ProjectKey, UseCase
from sentry.profiles.device import classify_device
from sentry.profiles.java import deobfuscate_signature, format_signature
from sentry.profiles.utils import get_from_profiling_service
@@ -136,6 +138,20 @@ def process_profile_task(
set_measurement("profile.stacks.processed", len(profile["profile"]["stacks"]))
set_measurement("profile.frames.processed", len(profile["profile"]["frames"]))
+ if options.get(
+ "profiling.generic_metrics.functions_ingestion.enabled"
+ ) and project.organization_id in options.get(
+ "profiling.generic_metrics.functions_ingestion.allowed_org_ids"
+ ):
+ try:
+ with metrics.timer("process_profile.get_metrics_dsn"):
+ dsn = get_metrics_dsn(project.id)
+ profile["options"] = {
+ "dsn": dsn,
+ }
+ except Exception as e:
+ sentry_sdk.capture_exception(e)
+
if not _push_profile_to_vroom(profile, project):
return
@@ -898,3 +914,11 @@ def clean_android_js_profile(profile: Profile):
del p["event_id"]
del p["release"]
del p["dist"]
+
+
+@lru_cache(maxsize=100)
+def get_metrics_dsn(project_id: int) -> str:
+ project_key, _ = ProjectKey.objects.get_or_create(
+ project_id=project_id, use_case=UseCase.PROFILING.value
+ )
+ return project_key.get_dsn(public=True)
diff --git a/src/sentry/ratelimits/redis.py b/src/sentry/ratelimits/redis.py
index 90c3a834085398..613a1911cb5d45 100644
--- a/src/sentry/ratelimits/redis.py
+++ b/src/sentry/ratelimits/redis.py
@@ -106,8 +106,11 @@ def is_limited_with_value(
# Reset Time = next time bucket's start time
reset_time = _bucket_start_time(_time_bucket(request_time, window) + 1, window)
try:
- result = self.client.incr(redis_key)
- self.client.expire(redis_key, expiration)
+ pipe = self.client.pipeline()
+ pipe.incr(redis_key)
+ pipe.expire(redis_key, expiration)
+ pipeline_result = pipe.execute()
+ result = pipeline_result[0]
except RedisError:
# We don't want rate limited endpoints to fail when ratelimits
# can't be updated. We do want to know when that happens.
diff --git a/src/sentry/receivers/features.py b/src/sentry/receivers/features.py
index e653644ed170ed..db56bf7f2c7f88 100644
--- a/src/sentry/receivers/features.py
+++ b/src/sentry/receivers/features.py
@@ -122,7 +122,7 @@ def record_event_processed(project, event, **kwargs):
# Check to make sure more the ip address is being sent.
# testing for this in test_no_user_tracking_for_ip_address_only
# list(d.keys()) pattern is to make this python3 safe
- if user_context and list(user_context.keys()) != ["ip_address"]:
+ if user_context and len(user_context.keys() - {"ip_address", "sentry_user"}) > 0:
feature_slugs.append("user_tracking")
# Custom Tags
diff --git a/src/sentry/relay/config/metric_extraction.py b/src/sentry/relay/config/metric_extraction.py
index dccb25aa28241b..4723904522f0a9 100644
--- a/src/sentry/relay/config/metric_extraction.py
+++ b/src/sentry/relay/config/metric_extraction.py
@@ -14,7 +14,7 @@
from sentry import features, options
from sentry.api.endpoints.project_transaction_threshold import DEFAULT_THRESHOLD
from sentry.api.utils import get_date_range_from_params
-from sentry.incidents.models import AlertRule, AlertRuleStatus
+from sentry.incidents.models.alert_rule import AlertRule, AlertRuleStatus
from sentry.models.dashboard_widget import (
ON_DEMAND_ENABLED_KEY,
DashboardWidgetQuery,
diff --git a/src/sentry/replays/lib/kafka.py b/src/sentry/replays/lib/kafka.py
index 26ab2368e649cc..2bde967b5faf01 100644
--- a/src/sentry/replays/lib/kafka.py
+++ b/src/sentry/replays/lib/kafka.py
@@ -1,5 +1,4 @@
-from django.conf import settings
-
+from sentry.conf.types.kafka_definition import Topic
from sentry.utils.kafka_config import get_kafka_producer_cluster_options, get_topic_definition
from sentry.utils.pubsub import KafkaPublisher
@@ -10,7 +9,7 @@ def initialize_replays_publisher(is_async=False) -> KafkaPublisher:
global replay_publisher
if replay_publisher is None:
- config = get_topic_definition(settings.KAFKA_INGEST_REPLAY_EVENTS)
+ config = get_topic_definition(Topic.INGEST_REPLAY_EVENTS)
replay_publisher = KafkaPublisher(
get_kafka_producer_cluster_options(config["cluster"]),
asynchronous=is_async,
diff --git a/src/sentry/replays/usecases/ingest/dom_index.py b/src/sentry/replays/usecases/ingest/dom_index.py
index 80b626bb20c987..d97babe7532a8a 100644
--- a/src/sentry/replays/usecases/ingest/dom_index.py
+++ b/src/sentry/replays/usecases/ingest/dom_index.py
@@ -6,17 +6,12 @@
import uuid
from collections.abc import Generator
from hashlib import md5
-from typing import Any, Literal, TypedDict, cast
-
-from django.conf import settings
+from typing import Any, Literal, TypedDict
from sentry import features
+from sentry.conf.types.kafka_definition import Topic
from sentry.models.project import Project
-from sentry.replays.usecases.ingest.events import SentryEvent
-from sentry.replays.usecases.ingest.issue_creation import (
- report_rage_click_issue,
- report_rage_click_issue_with_replay_event,
-)
+from sentry.replays.usecases.ingest.issue_creation import report_rage_click_issue_with_replay_event
from sentry.utils import json, kafka_config, metrics
from sentry.utils.pubsub import KafkaPublisher
@@ -219,7 +214,7 @@ def _initialize_publisher() -> KafkaPublisher:
global replay_publisher
if replay_publisher is None:
- config = kafka_config.get_topic_definition(settings.KAFKA_INGEST_REPLAY_EVENTS)
+ config = kafka_config.get_topic_definition(Topic.INGEST_REPLAY_EVENTS)
replay_publisher = KafkaPublisher(
kafka_config.get_kafka_producer_cluster_options(config["cluster"])
)
@@ -403,10 +398,6 @@ def _handle_breadcrumb(
payload["data"]["node"],
replay_event,
)
- else:
- report_rage_click_issue.delay(
- project_id, replay_id, cast(SentryEvent, event)
- )
# Log the event for tracking.
log = event["data"].get("payload", {}).copy()
log["project_id"] = project_id
diff --git a/src/sentry/replays/usecases/ingest/issue_creation.py b/src/sentry/replays/usecases/ingest/issue_creation.py
index 0985253646429a..c2ec8fc642c220 100644
--- a/src/sentry/replays/usecases/ingest/issue_creation.py
+++ b/src/sentry/replays/usecases/ingest/issue_creation.py
@@ -5,12 +5,7 @@
from sentry.constants import MAX_CULPRIT_LENGTH
from sentry.issues.grouptype import ReplayRageClickType
from sentry.issues.issue_occurrence import IssueEvidence
-from sentry.models.project import Project
-from sentry.replays.query import query_replay_instance
-from sentry.replays.usecases.ingest.events import SentryEvent
from sentry.replays.usecases.issue import new_issue_occurrence
-from sentry.silo.base import SiloMode
-from sentry.tasks.base import instrumented_task
from sentry.utils import metrics
logger = logging.getLogger()
@@ -19,73 +14,6 @@
RAGE_CLICK_LEVEL = "error"
-@instrumented_task(
- name="sentry.replays.usecases.ingest.issue_creation.report_rage_click_issue",
- queue="replays.ingest_replay",
- default_retry_delay=5,
- max_retries=5,
- silo_mode=SiloMode.REGION,
-)
-def report_rage_click_issue(project_id: int, replay_id: str, event: SentryEvent):
- metrics.incr("replay.rage_click_issue_creation")
- payload = event["data"]["payload"]
-
- project = Project.objects.get(id=project_id)
-
- # Seconds since epoch is UTC.
- timestamp = datetime.datetime.fromtimestamp(payload["timestamp"])
- timestamp = timestamp.replace(tzinfo=datetime.UTC)
-
- replay_info_list = query_replay_instance(
- project_id=project_id,
- replay_id=replay_id,
- start=timestamp - datetime.timedelta(hours=1),
- end=timestamp,
- organization=project.organization,
- )
- if not replay_info_list or len(replay_info_list) == 0:
- metrics.incr("replay.rage_click_issue_creation.no_replay_info")
- return
-
- replay_info = replay_info_list[0]
-
- selector = payload["message"]
- clicked_element = selector.split(" > ")[-1]
- new_issue_occurrence(
- culprit=payload["data"]["url"][:MAX_CULPRIT_LENGTH],
- environment=replay_info["agg_environment"],
- fingerprint=[selector],
- issue_type=ReplayRageClickType,
- level=RAGE_CLICK_LEVEL,
- platform="javascript",
- project_id=project_id,
- subtitle=selector,
- timestamp=timestamp,
- title=RAGE_CLICK_TITLE,
- evidence_data={
- # RRWeb node data of clicked element.
- "node": payload["data"]["node"],
- # CSS selector path to clicked element.
- "selector": selector,
- },
- evidence_display=[
- IssueEvidence(name="Clicked Element", value=clicked_element, important=True),
- IssueEvidence(name="Selector Path", value=selector, important=True),
- ],
- extra_event_data={
- "contexts": {"replay": {"replay_id": replay_id}},
- "level": RAGE_CLICK_LEVEL,
- "tags": {"replayId": replay_id, "url": payload["data"]["url"]},
- "user": {
- "id": replay_info["user_id"],
- "username": replay_info["user_username"],
- "email": replay_info["user_email"],
- "ip_address": replay_info["user_ip"],
- },
- },
- )
-
-
def report_rage_click_issue_with_replay_event(
project_id: int,
replay_id: str,
diff --git a/src/sentry/reprocessing2.py b/src/sentry/reprocessing2.py
index 75586690ea6bb5..368fa6d173903d 100644
--- a/src/sentry/reprocessing2.py
+++ b/src/sentry/reprocessing2.py
@@ -97,6 +97,7 @@
from sentry.deletions.defaults.group import DIRECT_GROUP_RELATED_MODELS
from sentry.eventstore.models import Event
from sentry.eventstore.processing import event_processing_store
+from sentry.eventstore.reprocessing import reprocessing_store
from sentry.models.eventattachment import EventAttachment
from sentry.snuba.dataset import Dataset
from sentry.utils import json, metrics, snuba
@@ -140,6 +141,8 @@
Literal["attachment.not_found"],
]
+use_store_option = "reprocessing.use_store"
+
class CannotReprocess(Exception):
def __init__(self, reason: CannotReprocessReason):
@@ -265,10 +268,15 @@ def _send_delete_old_primary_hash_messages(
# Events for a group are split and bucketed by their primary hashes. If flushing is to be
# performed on a per-group basis, the event count needs to be summed up across all buckets
# belonging to a single group.
- event_count = 0
- for primary_hash in old_primary_hashes:
- key = _get_old_primary_hash_subset_key(project_id, group_id, primary_hash)
- event_count += client.llen(key)
+ if options.get(use_store_option):
+ event_count = reprocessing_store.event_count_for_hashes(
+ project_id, group_id, old_primary_hashes
+ )
+ else:
+ event_count = 0
+ for primary_hash in old_primary_hashes:
+ key = _get_old_primary_hash_subset_key(project_id, group_id, primary_hash)
+ event_count += client.llen(key)
if (
not force_flush_batch
@@ -277,8 +285,13 @@ def _send_delete_old_primary_hash_messages(
return
for primary_hash in old_primary_hashes:
- event_key = _get_old_primary_hash_subset_key(project_id, group_id, primary_hash)
- event_ids, from_date, to_date = pop_batched_events_from_redis(event_key)
+ if options.get(use_store_option):
+ event_ids, from_date, to_date = reprocessing_store.pop_batched_events(
+ project_id, group_id, primary_hash
+ )
+ else:
+ event_key = _get_old_primary_hash_subset_key(project_id, group_id, primary_hash)
+ event_ids, from_date, to_date = pop_batched_events_from_redis(event_key)
# Racing might be happening between two different tasks. Give up on the
# task that's lagging behind by prematurely terminating flushing.
@@ -366,21 +379,33 @@ def buffered_delete_old_primary_hash(
client = _get_sync_redis_client()
- # This is a meta key that contains old primary hashes. These hashes are then
- # combined with other values to construct a key that points to a list of
- # tombstonable events.
- primary_hash_set_key = f"re2:tombstone-primary-hashes:{project_id}:{group_id}"
- old_primary_hashes = client.smembers(primary_hash_set_key)
+ if options.get(use_store_option):
+ old_primary_hashes = reprocessing_store.get_old_primary_hashes(project_id, group_id)
+ else:
+ # This is a meta key that contains old primary hashes. These hashes are then
+ # combined with other values to construct a key that points to a list of
+ # tombstonable events.
+ primary_hash_set_key = f"re2:tombstone-primary-hashes:{project_id}:{group_id}"
+ old_primary_hashes = client.smembers(primary_hash_set_key)
if old_primary_hash is not None and old_primary_hash != current_primary_hash:
- event_key = _get_old_primary_hash_subset_key(project_id, group_id, old_primary_hash)
- client.lpush(event_key, f"{to_timestamp(datetime)};{event_id}")
- client.expire(event_key, settings.SENTRY_REPROCESSING_TOMBSTONES_TTL)
+ if options.get(use_store_option):
+ reprocessing_store.expire_hash(
+ project_id, group_id, event_id, datetime, old_primary_hash
+ )
+ else:
+ event_key = _get_old_primary_hash_subset_key(project_id, group_id, old_primary_hash)
+ client.lpush(event_key, f"{to_timestamp(datetime)};{event_id}")
+ client.expire(event_key, settings.SENTRY_REPROCESSING_TOMBSTONES_TTL)
if old_primary_hash not in old_primary_hashes:
old_primary_hashes.add(old_primary_hash)
- client.sadd(primary_hash_set_key, old_primary_hash)
- client.expire(primary_hash_set_key, settings.SENTRY_REPROCESSING_TOMBSTONES_TTL)
+ if options.get(use_store_option):
+ reprocessing_store.add_hash(project_id, group_id, old_primary_hash)
+ else:
+ primary_hash_set_key = f"re2:tombstone-primary-hashes:{project_id}:{group_id}"
+ client.sadd(primary_hash_set_key, old_primary_hash)
+ client.expire(primary_hash_set_key, settings.SENTRY_REPROCESSING_TOMBSTONES_TTL)
with sentry_sdk.configure_scope() as scope:
scope.set_tag("project_id", project_id)
@@ -474,32 +499,49 @@ def buffered_handle_remaining_events(
more than counters.
"""
+ key = None
client = _get_sync_redis_client()
- # We explicitly cluster by only project_id and group_id here such that our
- # RENAME command later succeeds.
- key = f"re2:remaining:{{{project_id}:{old_group_id}}}"
-
- if datetime_to_event:
- llen = client.lpush(
- key,
- *(f"{to_timestamp(datetime)};{event_id}" for datetime, event_id in datetime_to_event),
+
+ if options.get(use_store_option):
+ llen = reprocessing_store.get_remaining_event_count(
+ project_id, old_group_id, datetime_to_event
)
- client.expire(key, settings.SENTRY_REPROCESSING_SYNC_TTL)
else:
- llen = client.llen(key)
+ # We explicitly cluster by only project_id and group_id here such that our
+ # RENAME command later succeeds.
+ key = f"re2:remaining:{{{project_id}:{old_group_id}}}"
+
+ if datetime_to_event:
+ llen = client.lpush(
+ key,
+ *(
+ f"{to_timestamp(datetime)};{event_id}"
+ for datetime, event_id in datetime_to_event
+ ),
+ )
+ client.expire(key, settings.SENTRY_REPROCESSING_SYNC_TTL)
+ else:
+ llen = client.llen(key)
if force_flush_batch or llen > settings.SENTRY_REPROCESSING_REMAINING_EVENTS_BUF_SIZE:
- new_key = f"{key}:{uuid.uuid4().hex}"
-
- try:
- # Rename `key` to a new temp key that is passed to celery task. We
- # use `renamenx` instead of `rename` only to detect UUID collisions.
- assert client.renamenx(key, new_key), "UUID collision for new_key?"
- except redis.exceptions.ResponseError:
- # `key` does not exist in Redis. `ResponseError` is a bit too broad
- # but it seems we'd have to do string matching on error message
- # otherwise.
- return
+
+ if options.get(use_store_option):
+ new_key = reprocessing_store.rename_key(project_id, old_group_id)
+ if not new_key:
+ return
+ else:
+ assert key, "Key must exist in this branch"
+ new_key = f"{key}:{uuid.uuid4().hex}"
+
+ try:
+ # Rename `key` to a new temp key that is passed to celery task. We
+ # use `renamenx` instead of `rename` only to detect UUID collisions.
+ assert client.renamenx(key, new_key), "UUID collision for new_key?"
+ except redis.exceptions.ResponseError:
+ # `key` does not exist in Redis. `ResponseError` is a bit too broad
+ # but it seems we'd have to do string matching on error message
+ # otherwise.
+ return
from sentry.tasks.reprocessing2 import handle_remaining_events
@@ -555,15 +597,22 @@ def mark_event_reprocessed(data=None, group_id=None, project_id=None, num_events
project_id = data["project"]
- client = _get_sync_redis_client()
- # refresh the TTL of the metadata:
- client.expire(_get_info_reprocessed_key(group_id), settings.SENTRY_REPROCESSING_SYNC_TTL)
- key = _get_sync_counter_key(group_id)
- client.expire(key, settings.SENTRY_REPROCESSING_SYNC_TTL)
- if client.decrby(key, num_events) == 0:
- from sentry.tasks.reprocessing2 import finish_reprocessing
+ if options.get(use_store_option):
+ result = reprocessing_store.mark_event_reprocessed(group_id, num_events)
+ if result:
+ from sentry.tasks.reprocessing2 import finish_reprocessing
- finish_reprocessing.delay(project_id=project_id, group_id=group_id)
+ finish_reprocessing.delay(project_id=project_id, group_id=group_id)
+ else:
+ client = _get_sync_redis_client()
+ # refresh the TTL of the metadata:
+ client.expire(_get_info_reprocessed_key(group_id), settings.SENTRY_REPROCESSING_SYNC_TTL)
+ key = _get_sync_counter_key(group_id)
+ client.expire(key, settings.SENTRY_REPROCESSING_SYNC_TTL)
+ if client.decrby(key, num_events) == 0:
+ from sentry.tasks.reprocessing2 import finish_reprocessing
+
+ finish_reprocessing.delay(project_id=project_id, group_id=group_id)
def start_group_reprocessing(
@@ -647,15 +696,20 @@ def start_group_reprocessing(
# New Activity Timestamp
date_created = new_activity.datetime
- client = _get_sync_redis_client()
- client.setex(_get_sync_counter_key(group_id), settings.SENTRY_REPROCESSING_SYNC_TTL, sync_count)
- client.setex(
- _get_info_reprocessed_key(group_id),
- settings.SENTRY_REPROCESSING_SYNC_TTL,
- json.dumps(
- {"dateCreated": date_created, "syncCount": sync_count, "totalEvents": event_count}
- ),
- )
+ if options.get(use_store_option):
+ reprocessing_store.start_reprocessing(group_id, date_created, sync_count, event_count)
+ else:
+ client = _get_sync_redis_client()
+ client.setex(
+ _get_sync_counter_key(group_id), settings.SENTRY_REPROCESSING_SYNC_TTL, sync_count
+ )
+ client.setex(
+ _get_info_reprocessed_key(group_id),
+ settings.SENTRY_REPROCESSING_SYNC_TTL,
+ json.dumps(
+ {"dateCreated": date_created, "syncCount": sync_count, "totalEvents": event_count}
+ ),
+ )
return new_group.id
@@ -670,11 +724,15 @@ def is_group_finished(group_id):
def get_progress(group_id, project_id=None):
- client = _get_sync_redis_client()
- pending_key = _get_sync_counter_key(group_id)
- pending = client.get(pending_key)
- ttl = client.ttl(pending_key)
- info = client.get(_get_info_reprocessed_key(group_id))
+ if options.get(use_store_option):
+ pending, ttl = reprocessing_store.get_pending(group_id)
+ info = reprocessing_store.get_progress(group_id)
+ else:
+ client = _get_sync_redis_client()
+ pending_key = _get_sync_counter_key(group_id)
+ pending = client.get(pending_key)
+ ttl = client.ttl(pending_key)
+ info = client.get(_get_info_reprocessed_key(group_id))
if pending is None:
logger.error("reprocessing2.missing_counter")
return 0, None
diff --git a/src/sentry/rules/actions/notify_event_service.py b/src/sentry/rules/actions/notify_event_service.py
index 757d0b5a26d639..035a793bffa920 100644
--- a/src/sentry/rules/actions/notify_event_service.py
+++ b/src/sentry/rules/actions/notify_event_service.py
@@ -10,7 +10,8 @@
from sentry.api.serializers.models.app_platform_event import AppPlatformEvent
from sentry.api.serializers.models.incident import IncidentSerializer
from sentry.eventstore.models import GroupEvent
-from sentry.incidents.models import AlertRuleTriggerAction, Incident, IncidentStatus
+from sentry.incidents.models.alert_rule import AlertRuleTriggerAction
+from sentry.incidents.models.incident import Incident, IncidentStatus
from sentry.integrations.metric_alerts import incident_attachment_info
from sentry.plugins.base import plugins
from sentry.rules import EventState
diff --git a/src/sentry/runner/commands/createuser.py b/src/sentry/runner/commands/createuser.py
index 7d6f0662776968..906d41ad98f5d5 100644
--- a/src/sentry/runner/commands/createuser.py
+++ b/src/sentry/runner/commands/createuser.py
@@ -151,7 +151,9 @@ def createuser(emails, org_id, password, superuser, staff, no_password, no_input
# Get the org if specified, otherwise use the default.
if org_id:
- org_context = organization_service.get_organization_by_id(id=org_id)
+ org_context = organization_service.get_organization_by_id(
+ id=org_id, include_teams=False, include_projects=False
+ )
if org_context is None:
raise Exception("Organization ID not found")
org = org_context.organization
diff --git a/src/sentry/runner/commands/devserver.py b/src/sentry/runner/commands/devserver.py
index 038a66bfa72238..6e00c6890af293 100644
--- a/src/sentry/runner/commands/devserver.py
+++ b/src/sentry/runner/commands/devserver.py
@@ -366,12 +366,11 @@ def devserver(
from sentry.conf.types.kafka_definition import Topic
from sentry.utils.batching_kafka_consumer import create_topics
+ from sentry.utils.kafka_config import get_topic_definition
for topic in Topic:
- default_name = topic.value
- physical_name = settings.KAFKA_TOPIC_OVERRIDES.get(default_name, default_name)
- cluster_name = settings.KAFKA_TOPIC_TO_CLUSTER[default_name]
- create_topics(cluster_name, [physical_name])
+ topic_defn = get_topic_definition(topic)
+ create_topics(topic_defn["cluster"], [topic_defn["real_topic_name"]])
if dev_consumer:
daemons.append(
diff --git a/src/sentry/search/events/builder/metrics.py b/src/sentry/search/events/builder/metrics.py
index 4ff57eb5e3f541..aa57b8c871ab5c 100644
--- a/src/sentry/search/events/builder/metrics.py
+++ b/src/sentry/search/events/builder/metrics.py
@@ -1029,18 +1029,23 @@ def run_query(self, referrer: str, use_cache: bool = False) -> Any:
groupbys = self.groupby
if not groupbys and self.use_on_demand:
# Need this otherwise top_events returns only 1 item
- groupbys = [Column(col) for col in self._get_group_bys()]
- groupby_aliases = [
- (
- groupby.alias
- if isinstance(groupby, (AliasedExpression, CurriedFunction))
- else groupby.name
- )
- for groupby in groupbys
- if not (
- isinstance(groupby, CurriedFunction) and groupby.function == "team_key_transaction"
- )
- ]
+ groupbys = [self.resolve_column(col) for col in self._get_group_bys()]
+ # Later the query is made by passing these columns to metrics layer so we can just have the aliases be the
+ # raw groupbys
+ groupby_aliases = self._get_group_bys()
+ else:
+ groupby_aliases = [
+ (
+ groupby.alias
+ if isinstance(groupby, (AliasedExpression, CurriedFunction))
+ else groupby.name
+ )
+ for groupby in groupbys
+ if not (
+ isinstance(groupby, CurriedFunction)
+ and groupby.function == "team_key_transaction"
+ )
+ ]
# The typing for these are weak (all using Any) since the results from snuba can contain an assortment of types
value_map: dict[str, Any] = defaultdict(dict)
groupby_values: list[Any] = []
diff --git a/src/sentry/search/events/constants.py b/src/sentry/search/events/constants.py
index 4194f2cc08a451..ebf18f77d23fa7 100644
--- a/src/sentry/search/events/constants.py
+++ b/src/sentry/search/events/constants.py
@@ -282,7 +282,7 @@ class ThresholdDict(TypedDict):
"measurements.score.weight.cls": "d:transactions/measurements.score.weight.cls@ratio",
"measurements.score.weight.fcp": "d:transactions/measurements.score.weight.fcp@ratio",
"measurements.score.weight.ttfb": "d:transactions/measurements.score.weight.ttfb@ratio",
- "measurements.inp": "d:spans/webvital.inp@ratio",
+ "measurements.inp": "d:spans/webvital.inp@millisecond",
"measurements.score.inp": "d:spans/webvital.score.inp@ratio",
"measurements.score.weight.inp": "d:spans/webvital.score.weight.inp@ratio",
"spans.browser": "d:transactions/breakdowns.span_ops.ops.browser@millisecond",
@@ -316,6 +316,7 @@ class ThresholdDict(TypedDict):
"transaction.method",
"transaction.op",
"transaction.status",
+ "span.op",
}
SPAN_METRICS_MAP = {
"user": "s:spans/user@none",
diff --git a/src/sentry/search/events/datasets/discover.py b/src/sentry/search/events/datasets/discover.py
index 52a084d44967cf..b7c9673a5b202a 100644
--- a/src/sentry/search/events/datasets/discover.py
+++ b/src/sentry/search/events/datasets/discover.py
@@ -1007,9 +1007,7 @@ def function_converter(self) -> Mapping[str, SnQLFunction]:
SnQLFunction(
"example",
required_args=[NumericColumn("column")],
- snql_aggregate=lambda args, alias: function_aliases.resolve_random_sample(
- ["timestamp", "span_id", args["column"].name], alias
- ),
+ snql_aggregate=self._resolve_random_sample,
private=True,
),
SnQLFunction(
@@ -1813,6 +1811,20 @@ def _resolve_count_scores_function(self, args: Mapping[str, Column], alias: str)
alias,
)
+ def _resolve_random_sample(
+ self,
+ args: Mapping[str, str | Column | SelectType | int | float],
+ alias: str,
+ ) -> SelectType:
+ offset = 0 if self.builder.offset is None else self.builder.offset.offset
+ limit = 0 if self.builder.limit is None else self.builder.limit.limit
+ return function_aliases.resolve_random_sample(
+ ["timestamp", "span_id", args["column"].name],
+ alias,
+ offset,
+ limit,
+ )
+
# Query Filters
def _project_slug_filter_converter(self, search_filter: SearchFilter) -> WhereType | None:
return filter_aliases.project_slug_converter(self.builder, search_filter)
diff --git a/src/sentry/search/events/datasets/function_aliases.py b/src/sentry/search/events/datasets/function_aliases.py
index f6d604c92286db..b6280f519211e8 100644
--- a/src/sentry/search/events/datasets/function_aliases.py
+++ b/src/sentry/search/events/datasets/function_aliases.py
@@ -13,6 +13,7 @@
from sentry.search.events.types import SelectType
from sentry.sentry_metrics.configuration import UseCaseKey
from sentry.sentry_metrics.use_case_id_registry import UseCaseID
+from sentry.utils.hashlib import fnv1a_32
def resolve_project_threshold_config(
@@ -344,7 +345,9 @@ def resolve_rounded_timestamp(interval: int, alias: str, timestamp_column: str =
)
-def resolve_random_sample(columns: list[str], alias: str, seed: int = 1):
+def resolve_random_sample(columns: list[str], alias: str, offset: int, limit: int):
+ seed_str = f"{offset}-{limit}"
+ seed = fnv1a_32(seed_str.encode("utf-8"))
return Function(
"arrayElement",
[
diff --git a/src/sentry/search/events/datasets/metrics_summaries.py b/src/sentry/search/events/datasets/metrics_summaries.py
index f6ce6160d0bc06..a52e0c04eb3851 100644
--- a/src/sentry/search/events/datasets/metrics_summaries.py
+++ b/src/sentry/search/events/datasets/metrics_summaries.py
@@ -2,7 +2,7 @@
from collections.abc import Callable, Mapping
-from snuba_sdk import And, Condition, Direction, Function, Op, OrderBy
+from snuba_sdk import And, Column, Condition, Direction, Function, Op, OrderBy
from sentry.api.event_search import SearchFilter
from sentry.search.events import builder, constants
@@ -31,6 +31,7 @@ def field_alias_converter(self) -> Mapping[str, Callable[[str], SelectType]]:
return {
constants.PROJECT_ALIAS: self._resolve_project_slug_alias,
constants.PROJECT_NAME_ALIAS: self._resolve_project_slug_alias,
+ "avg_metric": self._resolve_avg_alias,
}
@property
@@ -40,18 +41,7 @@ def function_converter(self) -> Mapping[str, SnQLFunction]:
for function in [
SnQLFunction(
"example",
- snql_aggregate=lambda args, alias: function_aliases.resolve_random_sample(
- [
- "group",
- "end_timestamp",
- "span_id",
- "min",
- "max",
- "sum",
- "count",
- ],
- alias,
- ),
+ snql_aggregate=self._resolve_random_sample,
private=True,
),
SnQLFunction(
@@ -91,3 +81,32 @@ def _metric_filter_converter(self, search_filter: SearchFilter) -> WhereType | N
def _resolve_project_slug_alias(self, alias: str) -> SelectType:
return field_aliases.resolve_project_slug_alias(self.builder, alias)
+
+ def _resolve_avg_alias(self, alias: str) -> SelectType:
+ return Function(
+ "divide",
+ [self.builder.column("sum_metric"), self.builder.column("count_metric")],
+ alias,
+ )
+
+ def _resolve_random_sample(
+ self,
+ args: Mapping[str, str | Column | SelectType | int | float],
+ alias: str,
+ ) -> SelectType:
+ offset = 0 if self.builder.offset is None else self.builder.offset.offset
+ limit = 0 if self.builder.limit is None else self.builder.limit.limit
+ return function_aliases.resolve_random_sample(
+ [
+ "group",
+ "end_timestamp",
+ "span_id",
+ "min",
+ "max",
+ "sum",
+ "count",
+ ],
+ alias,
+ offset,
+ limit,
+ )
diff --git a/src/sentry/search/events/datasets/spans_indexed.py b/src/sentry/search/events/datasets/spans_indexed.py
index 5a8c6b5286a72c..857774a86fd0c7 100644
--- a/src/sentry/search/events/datasets/spans_indexed.py
+++ b/src/sentry/search/events/datasets/spans_indexed.py
@@ -196,9 +196,7 @@ def function_converter(self) -> Mapping[str, SnQLFunction]:
),
SnQLFunction(
"example",
- snql_aggregate=lambda args, alias: function_aliases.resolve_random_sample(
- ["group", "timestamp", "span_id"], alias
- ),
+ snql_aggregate=self._resolve_random_sample,
private=True,
),
SnQLFunction(
@@ -340,3 +338,17 @@ def _resolve_percentile(
alias,
)
)
+
+ def _resolve_random_sample(
+ self,
+ args: Mapping[str, str | Column | SelectType | int | float],
+ alias: str,
+ ) -> SelectType:
+ offset = 0 if self.builder.offset is None else self.builder.offset.offset
+ limit = 0 if self.builder.limit is None else self.builder.limit.limit
+ return function_aliases.resolve_random_sample(
+ ["group", "timestamp", "span_id"],
+ alias,
+ offset,
+ limit,
+ )
diff --git a/src/sentry/sentry_metrics/aggregation_option_registry.py b/src/sentry/sentry_metrics/aggregation_option_registry.py
index b67afda1b9dac1..634fa416e8b735 100644
--- a/src/sentry/sentry_metrics/aggregation_option_registry.py
+++ b/src/sentry/sentry_metrics/aggregation_option_registry.py
@@ -9,20 +9,27 @@ class AggregationOption(Enum):
TEN_SECOND = "ten_second"
+class TimeWindow(Enum):
+ SEVEN_DAYS = "7d"
+ FOURTEEN_DAYS = "14d"
+ THIRTY_DAYS = "30d"
+ NINETY_DAYS = "90d"
+
+
METRIC_ID_AGG_OPTION = {
- "d:transactions/measurements.fcp@millisecond": AggregationOption.HIST,
- "d:transactions/measurements.lcp@millisecond": AggregationOption.HIST,
+ "d:transactions/measurements.fcp@millisecond": {AggregationOption.HIST: TimeWindow.NINETY_DAYS},
+ "d:transactions/measurements.lcp@millisecond": {AggregationOption.HIST: TimeWindow.NINETY_DAYS},
}
-USE_CASE_AGG_OPTION = {UseCaseID.CUSTOM: AggregationOption.TEN_SECOND}
+USE_CASE_AGG_OPTION = {UseCaseID.CUSTOM: {AggregationOption.TEN_SECOND: TimeWindow.SEVEN_DAYS}}
-def get_aggregation_option(metric_id: str) -> AggregationOption | None:
- use_case_id: UseCaseID = extract_use_case_id(metric_id)
+def get_aggregation_options(mri: str) -> dict[AggregationOption, TimeWindow] | None:
+ use_case_id: UseCaseID = extract_use_case_id(mri)
# We check first if the particular metric ID has a specified aggregation
- if metric_id in METRIC_ID_AGG_OPTION:
- return METRIC_ID_AGG_OPTION.get(metric_id)
+ if mri in METRIC_ID_AGG_OPTION:
+ return METRIC_ID_AGG_OPTION.get(mri)
# And move to the use case if not
elif options.get("sentry-metrics.10s-granularity") and (use_case_id in USE_CASE_AGG_OPTION):
return USE_CASE_AGG_OPTION[use_case_id]
diff --git a/src/sentry/sentry_metrics/configuration.py b/src/sentry/sentry_metrics/configuration.py
index eddebed13a3220..a885712f379d64 100644
--- a/src/sentry/sentry_metrics/configuration.py
+++ b/src/sentry/sentry_metrics/configuration.py
@@ -10,6 +10,8 @@
import sentry_sdk
+from sentry.conf.types.kafka_definition import Topic
+
# The maximum length of a column that is indexed in postgres. It is important to keep this in
# sync between the consumers and the models defined in src/sentry/sentry_metrics/models.py
MAX_INDEXED_COLUMN_LENGTH = 200
@@ -46,7 +48,7 @@ class MetricsIngestConfiguration:
db_backend: IndexerStorage
db_backend_options: Mapping[str, Any]
input_topic: str
- output_topic: str
+ output_topic: Topic
use_case_id: UseCaseKey
internal_metrics_tag: str | None
writes_limiter_cluster_options: Mapping[str, Any]
@@ -79,7 +81,7 @@ def get_ingest_config(
db_backend=IndexerStorage.POSTGRES,
db_backend_options={},
input_topic=settings.KAFKA_INGEST_METRICS,
- output_topic=settings.KAFKA_SNUBA_METRICS,
+ output_topic=Topic.SNUBA_METRICS,
use_case_id=UseCaseKey.RELEASE_HEALTH,
internal_metrics_tag="release-health",
writes_limiter_cluster_options=settings.SENTRY_METRICS_INDEXER_WRITES_LIMITER_OPTIONS,
@@ -96,7 +98,7 @@ def get_ingest_config(
db_backend=IndexerStorage.POSTGRES,
db_backend_options={},
input_topic=settings.KAFKA_INGEST_PERFORMANCE_METRICS,
- output_topic=settings.KAFKA_SNUBA_GENERIC_METRICS,
+ output_topic=Topic.SNUBA_GENERIC_METRICS,
use_case_id=UseCaseKey.PERFORMANCE,
internal_metrics_tag="perf",
writes_limiter_cluster_options=settings.SENTRY_METRICS_INDEXER_WRITES_LIMITER_OPTIONS_PERFORMANCE,
diff --git a/src/sentry/sentry_metrics/consumers/indexer/batch.py b/src/sentry/sentry_metrics/consumers/indexer/batch.py
index 9bc1960df40bc2..3d3b1251d9b2d5 100644
--- a/src/sentry/sentry_metrics/consumers/indexer/batch.py
+++ b/src/sentry/sentry_metrics/consumers/indexer/batch.py
@@ -17,7 +17,7 @@
from sentry_kafka_schemas.schema_types.snuba_metrics_v1 import Metric
from sentry import options
-from sentry.sentry_metrics.aggregation_option_registry import get_aggregation_option
+from sentry.sentry_metrics.aggregation_option_registry import get_aggregation_options
from sentry.sentry_metrics.configuration import MAX_INDEXED_COLUMN_LENGTH
from sentry.sentry_metrics.consumers.indexer.common import (
BrokerMeta,
@@ -66,7 +66,7 @@ class IndexerBatchMetrics:
max_tags_len: int = 0
max_value_len: int = 0
- def add_metric(self, num_bytes: int, tags_len: int, value_len: int):
+ def add_metric(self, num_bytes: int, tags_len: int, value_len: int) -> None:
self.message_count += 1
self.total_bytes += num_bytes
self.total_tags_len += tags_len
@@ -75,13 +75,13 @@ def add_metric(self, num_bytes: int, tags_len: int, value_len: int):
self.max_tags_len = max(self.max_tags_len, tags_len)
self.max_value_len = max(self.max_value_len, value_len)
- def avg_bytes(self):
+ def avg_bytes(self) -> float:
return self.total_bytes / self.message_count
- def avg_tags_len(self):
+ def avg_tags_len(self) -> float:
return self.total_tags_len / self.message_count
- def avg_value_len(self):
+ def avg_value_len(self) -> float:
return self.total_value_len / self.message_count
@@ -487,8 +487,11 @@ def reconstruct_messages(
"value": old_payload_value["value"],
"sentry_received_timestamp": sentry_received_timestamp,
}
- if aggregation_option := get_aggregation_option(old_payload_value["name"]):
- new_payload_v2["aggregation_option"] = aggregation_option.value
+ if aggregation_options := get_aggregation_options(old_payload_value["name"]):
+ # TODO: This should eventually handle multiple aggregation options
+ option = list(aggregation_options.items())[0][0]
+ assert option is not None
+ new_payload_v2["aggregation_option"] = option.value
new_payload_value = new_payload_v2
@@ -517,6 +520,8 @@ def reconstruct_messages(
with metrics.timer("metrics_consumer.reconstruct_messages.emit_payload_metrics"):
for use_case_id, metrics_by_type in self._message_metrics.items():
for metric_type, batch_metric in metrics_by_type.items():
+ if batch_metric.message_count == 0:
+ continue
metrics.incr(
"metrics_consumer.process_message.messages_seen",
amount=batch_metric.message_count,
@@ -563,33 +568,34 @@ def reconstruct_messages(
for use_case_metrics in self._message_metrics.values()
for type_metrics in use_case_metrics.values()
)
- metrics.gauge(
- "metrics_consumer.process_message.message.avg_size_in_batch",
- sum(
- type_metrics.total_bytes
- for use_case_metrics in self._message_metrics.values()
- for type_metrics in use_case_metrics.values()
+ if not num_messages == 0:
+ metrics.gauge(
+ "metrics_consumer.process_message.message.avg_size_in_batch",
+ sum(
+ type_metrics.total_bytes
+ for use_case_metrics in self._message_metrics.values()
+ for type_metrics in use_case_metrics.values()
+ )
+ / num_messages,
)
- / num_messages,
- )
- metrics.gauge(
- "metrics_consumer.process_message.message.avg_tags_len_in_batch",
- sum(
- type_metrics.total_tags_len
- for use_case_metrics in self._message_metrics.values()
- for type_metrics in use_case_metrics.values()
+ metrics.gauge(
+ "metrics_consumer.process_message.message.avg_tags_len_in_batch",
+ sum(
+ type_metrics.total_tags_len
+ for use_case_metrics in self._message_metrics.values()
+ for type_metrics in use_case_metrics.values()
+ )
+ / num_messages,
)
- / num_messages,
- )
- metrics.gauge(
- "metrics_consumer.process_message.message.avg_value_len_in_batch",
- sum(
- type_metrics.total_value_len
- for use_case_metrics in self._message_metrics.values()
- for type_metrics in use_case_metrics.values()
+ metrics.gauge(
+ "metrics_consumer.process_message.message.avg_value_len_in_batch",
+ sum(
+ type_metrics.total_value_len
+ for use_case_metrics in self._message_metrics.values()
+ for type_metrics in use_case_metrics.values()
+ )
+ / num_messages,
)
- / num_messages,
- )
return IndexerOutputMessageBatch(
new_messages,
diff --git a/src/sentry/sentry_metrics/consumers/indexer/multiprocess.py b/src/sentry/sentry_metrics/consumers/indexer/multiprocess.py
index dd56520a20f521..4dbd6a27f54d01 100644
--- a/src/sentry/sentry_metrics/consumers/indexer/multiprocess.py
+++ b/src/sentry/sentry_metrics/consumers/indexer/multiprocess.py
@@ -10,6 +10,7 @@
from arroyo.types import Commit, FilteredPayload, Message, Partition
from confluent_kafka import Producer
+from sentry.conf.types.kafka_definition import Topic
from sentry.utils import kafka_config, metrics
logger = logging.getLogger(__name__)
@@ -18,7 +19,7 @@
class SimpleProduceStep(ProcessingStep[KafkaPayload]):
def __init__(
self,
- output_topic: str,
+ output_topic: Topic,
commit_function: Commit,
producer: AbstractProducer[KafkaPayload] | None = None,
) -> None:
@@ -26,7 +27,7 @@ def __init__(
self.__producer = Producer(
kafka_config.get_kafka_producer_cluster_options(snuba_metrics["cluster"]),
)
- self.__producer_topic = output_topic
+ self.__producer_topic = snuba_metrics["real_topic_name"]
self.__commit_function = commit_function
self.__closed = False
diff --git a/src/sentry/sentry_metrics/querying/data_v2/execution.py b/src/sentry/sentry_metrics/querying/data_v2/execution.py
index 8f92c03a990dc8..e8d40fcf73e98d 100644
--- a/src/sentry/sentry_metrics/querying/data_v2/execution.py
+++ b/src/sentry/sentry_metrics/querying/data_v2/execution.py
@@ -12,9 +12,9 @@
from sentry.models.project import Project
from sentry.sentry_metrics.querying.common import SNUBA_QUERY_LIMIT
from sentry.sentry_metrics.querying.data_v2.preparation import IntermediateQuery
-from sentry.sentry_metrics.querying.data_v2.units import MeasurementUnit, UnitFamily
from sentry.sentry_metrics.querying.errors import MetricsQueryExecutionError
from sentry.sentry_metrics.querying.types import GroupKey, GroupsCollection, QueryOrder
+from sentry.sentry_metrics.querying.units import MeasurementUnit, UnitFamily
from sentry.sentry_metrics.querying.visitors import (
QueriedMetricsVisitor,
TimeseriesConditionInjectionVisitor,
diff --git a/src/sentry/sentry_metrics/querying/data_v2/plan.py b/src/sentry/sentry_metrics/querying/data_v2/plan.py
index 4ca94865bda144..5fd89690670b7f 100644
--- a/src/sentry/sentry_metrics/querying/data_v2/plan.py
+++ b/src/sentry/sentry_metrics/querying/data_v2/plan.py
@@ -2,7 +2,7 @@
from dataclasses import dataclass, replace
from sentry.sentry_metrics.querying.data_v2.execution import QueryResult
-from sentry.sentry_metrics.querying.data_v2.transformation.base import (
+from sentry.sentry_metrics.querying.data_v2.transformation import (
QueryTransformer,
QueryTransformerResult,
)
diff --git a/src/sentry/sentry_metrics/querying/data_v2/preparation.py b/src/sentry/sentry_metrics/querying/data_v2/preparation.py
deleted file mode 100644
index 7b929fdfcef45c..00000000000000
--- a/src/sentry/sentry_metrics/querying/data_v2/preparation.py
+++ /dev/null
@@ -1,85 +0,0 @@
-from abc import ABC, abstractmethod
-from dataclasses import dataclass, replace
-
-from snuba_sdk import MetricsQuery, Timeseries
-
-from sentry.sentry_metrics.querying.data_v2.units import (
- MeasurementUnit,
- UnitFamily,
- get_unit_family_and_unit,
-)
-from sentry.sentry_metrics.querying.types import QueryOrder
-from sentry.snuba.metrics import parse_mri
-
-
-@dataclass(frozen=True)
-class IntermediateQuery:
- metrics_query: MetricsQuery
- order: QueryOrder | None = None
- limit: int | None = None
- unit_family: UnitFamily | None = None
- unit: MeasurementUnit | None = None
- scaling_factor: float | None = None
-
-
-class PreparationStep(ABC):
- @abstractmethod
- def run(self, intermediate_queries: list[IntermediateQuery]) -> list[IntermediateQuery]:
- raise NotImplementedError
-
-
-def run_preparation_steps(
- intermediate_queries: list[IntermediateQuery], *steps
-) -> list[IntermediateQuery]:
- for step in steps:
- if isinstance(step, PreparationStep):
- intermediate_queries = step.run(intermediate_queries=intermediate_queries)
-
- return intermediate_queries
-
-
-class UnitNormalizationStep(PreparationStep):
-
- EXCLUDED_AGGREGATES = {"count", "count_unique"}
-
- def _extract_unit(self, timeseries: Timeseries) -> str | None:
- # If the aggregate doesn't support unit normalization, we will skip it.
- if timeseries.aggregate in self.EXCLUDED_AGGREGATES:
- return None
-
- parsed_mri = parse_mri(timeseries.metric.mri)
- if parsed_mri is not None:
- return parsed_mri.unit
-
- return None
-
- def run(self, intermediate_queries: list[IntermediateQuery]) -> list[IntermediateQuery]:
- normalized_intermediate_queries = []
-
- for intermediate_query in intermediate_queries:
- normalized_intermediate_query = intermediate_query
- metrics_query = intermediate_query.metrics_query
- # For now, we want to perform units coercion only if the query is a timeseries.
- if isinstance(metrics_query.query, Timeseries):
- extracted_unit = self._extract_unit(timeseries=metrics_query.query)
- if extracted_unit is not None:
- unit_family_and_unit = get_unit_family_and_unit(extracted_unit)
- if unit_family_and_unit is not None:
- (
- unit_family,
- reference_unit,
- unit,
- ) = unit_family_and_unit
- normalized_intermediate_query = replace(
- intermediate_query,
- metrics_query=metrics_query.set_query(
- unit.apply_on_timeseries(metrics_query.query)
- ),
- unit_family=unit_family,
- unit=reference_unit,
- scaling_factor=unit.scaling_factor,
- )
-
- normalized_intermediate_queries.append(normalized_intermediate_query)
-
- return normalized_intermediate_queries
diff --git a/src/sentry/sentry_metrics/querying/data_v2/preparation/__init__.py b/src/sentry/sentry_metrics/querying/data_v2/preparation/__init__.py
new file mode 100644
index 00000000000000..a898bf5e93d7f5
--- /dev/null
+++ b/src/sentry/sentry_metrics/querying/data_v2/preparation/__init__.py
@@ -0,0 +1,4 @@
+from .base import IntermediateQuery, PreparationStep, run_preparation_steps
+from .units_normalization import UnitNormalizationStep
+
+__all__ = ["PreparationStep", "IntermediateQuery", "run_preparation_steps", "UnitNormalizationStep"]
diff --git a/src/sentry/sentry_metrics/querying/data_v2/preparation/base.py b/src/sentry/sentry_metrics/querying/data_v2/preparation/base.py
new file mode 100644
index 00000000000000..72e91eed571a1c
--- /dev/null
+++ b/src/sentry/sentry_metrics/querying/data_v2/preparation/base.py
@@ -0,0 +1,33 @@
+from abc import ABC, abstractmethod
+from dataclasses import dataclass
+
+from snuba_sdk import MetricsQuery
+
+from sentry.sentry_metrics.querying.types import QueryOrder
+from sentry.sentry_metrics.querying.units import MeasurementUnit, UnitFamily
+
+
+@dataclass(frozen=True)
+class IntermediateQuery:
+ metrics_query: MetricsQuery
+ order: QueryOrder | None = None
+ limit: int | None = None
+ unit_family: UnitFamily | None = None
+ unit: MeasurementUnit | None = None
+ scaling_factor: float | None = None
+
+
+class PreparationStep(ABC):
+ @abstractmethod
+ def run(self, intermediate_queries: list[IntermediateQuery]) -> list[IntermediateQuery]:
+ raise NotImplementedError
+
+
+def run_preparation_steps(
+ intermediate_queries: list[IntermediateQuery], *steps
+) -> list[IntermediateQuery]:
+ for step in steps:
+ if isinstance(step, PreparationStep):
+ intermediate_queries = step.run(intermediate_queries=intermediate_queries)
+
+ return intermediate_queries
diff --git a/src/sentry/sentry_metrics/querying/data_v2/preparation/units_normalization.py b/src/sentry/sentry_metrics/querying/data_v2/preparation/units_normalization.py
new file mode 100644
index 00000000000000..5f5ab1d8a001f2
--- /dev/null
+++ b/src/sentry/sentry_metrics/querying/data_v2/preparation/units_normalization.py
@@ -0,0 +1,41 @@
+from dataclasses import replace
+
+from sentry.sentry_metrics.querying.data_v2.preparation import IntermediateQuery, PreparationStep
+from sentry.sentry_metrics.querying.errors import NonNormalizableUnitsError
+from sentry.sentry_metrics.querying.visitors import UnitsNormalizationVisitor
+
+
+class UnitNormalizationStep(PreparationStep):
+ def _get_normalized_intermediate_query(
+ self, intermediate_query: IntermediateQuery
+ ) -> IntermediateQuery:
+ try:
+ units_normalization = UnitsNormalizationVisitor()
+ # We compute the new normalized query by visiting and mutating the expression tree.
+ normalized_query = units_normalization.visit(intermediate_query.metrics_query.query)
+ # We obtain the units that have been used by the visitor.
+ (
+ unit_family,
+ reference_unit,
+ scaling_factor,
+ ) = units_normalization.get_units_metadata()
+
+ return replace(
+ intermediate_query,
+ metrics_query=intermediate_query.metrics_query.set_query(normalized_query),
+ unit_family=unit_family,
+ unit=reference_unit,
+ scaling_factor=scaling_factor,
+ )
+ except NonNormalizableUnitsError:
+ return intermediate_query
+
+ def run(self, intermediate_queries: list[IntermediateQuery]) -> list[IntermediateQuery]:
+ normalized_intermediate_queries = []
+
+ for intermediate_query in intermediate_queries:
+ normalized_intermediate_queries.append(
+ self._get_normalized_intermediate_query(intermediate_query)
+ )
+
+ return normalized_intermediate_queries
diff --git a/src/sentry/sentry_metrics/querying/data_v2/transformation/__init__.py b/src/sentry/sentry_metrics/querying/data_v2/transformation/__init__.py
index 951baeb6f93f99..f90fa951e6a17f 100644
--- a/src/sentry/sentry_metrics/querying/data_v2/transformation/__init__.py
+++ b/src/sentry/sentry_metrics/querying/data_v2/transformation/__init__.py
@@ -1,3 +1,4 @@
+from .base import QueryTransformer, QueryTransformerResult
from .metrics_api import MetricsAPIQueryTransformer
-__all__ = ["MetricsAPIQueryTransformer"]
+__all__ = ["QueryTransformerResult", "QueryTransformer", "MetricsAPIQueryTransformer"]
diff --git a/src/sentry/sentry_metrics/querying/data_v2/transformation/metrics_api.py b/src/sentry/sentry_metrics/querying/data_v2/transformation/metrics_api.py
index a4461f575dc09c..916a7721fea3e7 100644
--- a/src/sentry/sentry_metrics/querying/data_v2/transformation/metrics_api.py
+++ b/src/sentry/sentry_metrics/querying/data_v2/transformation/metrics_api.py
@@ -6,8 +6,8 @@
from sentry.search.utils import parse_datetime_string
from sentry.sentry_metrics.querying.data_v2.execution import QueryResult
-from sentry.sentry_metrics.querying.data_v2.transformation.base import QueryTransformer
-from sentry.sentry_metrics.querying.data_v2.utils import nan_to_none
+from sentry.sentry_metrics.querying.data_v2.transformation import QueryTransformer
+from sentry.sentry_metrics.querying.data_v2.utils import undefined_value_to_none
from sentry.sentry_metrics.querying.errors import MetricsQueryExecutionError
from sentry.sentry_metrics.querying.types import GroupKey, ResultValue, Series, Totals
@@ -86,7 +86,7 @@ def _generate_full_series(
for time, value in series:
time_seconds = parse_datetime_string(time).timestamp()
index = int((time_seconds - start_seconds) / interval)
- full_series[index] = nan_to_none(value)
+ full_series[index] = undefined_value_to_none(value)
return full_series
@@ -213,7 +213,7 @@ def transform(self, query_results: list[QueryResult]) -> Mapping[str, Any]:
"series": _generate_full_series(
int(start.timestamp()), len(intervals), interval, group_value.series
),
- "totals": nan_to_none(group_value.totals),
+ "totals": undefined_value_to_none(group_value.totals),
}
)
diff --git a/src/sentry/sentry_metrics/querying/data_v2/utils.py b/src/sentry/sentry_metrics/querying/data_v2/utils.py
index 0b367089051f3c..f1661782fbd680 100644
--- a/src/sentry/sentry_metrics/querying/data_v2/utils.py
+++ b/src/sentry/sentry_metrics/querying/data_v2/utils.py
@@ -3,26 +3,30 @@
from sentry.sentry_metrics.querying.types import ResultValue
-def nan_to_none(value: ResultValue) -> ResultValue:
+def undefined_value_to_none(value: ResultValue) -> ResultValue:
"""
- Converts a nan value to None or returns the original value.
+ Converts an undefined value to None or returns the original value.
"""
if value is None:
return None
- if is_nan(value):
+ if is_undefined(value):
return None
return value
-def is_nan(value: ResultValue) -> bool:
+def is_undefined(value: ResultValue) -> bool:
"""
- Returns whether the result of a query is nan.
+ Returns whether the result of a query is undefined.
"""
if value is None:
return False
- elif isinstance(value, list):
- return any(map(lambda e: e is not None and math.isnan(e), value))
- return math.isnan(value)
+ def _is_undefined(inner_value: int | float) -> bool:
+ return math.isnan(inner_value) or math.isinf(inner_value)
+
+ if isinstance(value, list):
+ return any(map(lambda e: e is not None and _is_undefined(e), value))
+
+ return _is_undefined(value)
diff --git a/src/sentry/sentry_metrics/querying/errors.py b/src/sentry/sentry_metrics/querying/errors.py
index a8fcc10431eb59..a9742a89e76db3 100644
--- a/src/sentry/sentry_metrics/querying/errors.py
+++ b/src/sentry/sentry_metrics/querying/errors.py
@@ -14,5 +14,5 @@ class CorrelationsQueryExecutionError(Exception):
pass
-class TooManyCodeLocationsRequestedError(Exception):
+class NonNormalizableUnitsError(Exception):
pass
diff --git a/src/sentry/sentry_metrics/querying/metadata/metrics_code_locations.py b/src/sentry/sentry_metrics/querying/metadata/metrics_code_locations.py
index baf3d71b437848..ffa9bdfedba33d 100644
--- a/src/sentry/sentry_metrics/querying/metadata/metrics_code_locations.py
+++ b/src/sentry/sentry_metrics/querying/metadata/metrics_code_locations.py
@@ -1,12 +1,13 @@
+import math
from collections.abc import Generator, Sequence
from dataclasses import dataclass
from datetime import datetime
from sentry.models.organization import Organization
from sentry.models.project import Project
-from sentry.sentry_metrics.querying.errors import TooManyCodeLocationsRequestedError
-from sentry.sentry_metrics.querying.utils import fnv1a_32, get_redis_client_for_metrics_meta
+from sentry.sentry_metrics.querying.utils import get_redis_client_for_metrics_meta
from sentry.utils import json, metrics
+from sentry.utils.hashlib import fnv1a_32
DAY_IN_SECONDS = 86400
@@ -99,26 +100,28 @@ def __init__(
self._redis_client = get_redis_client_for_metrics_meta()
- self._validate()
-
- def _validate(self):
- total_combinations = len(self._projects) * len(self._metric_mris) * len(self._timestamps)
- if total_combinations > self.MAXIMUM_KEYS:
- raise TooManyCodeLocationsRequestedError(
- "The request results in too many code locations to be fetched, try to reduce the number of "
- "metrics, projects or the time interval"
- )
-
def _code_location_queries(self) -> Generator[CodeLocationQuery, None, None]:
+ total_count = len(self._projects) * len(self._metric_mris) * len(self._timestamps)
+ step_size = (
+ 1 if total_count <= self.MAXIMUM_KEYS else math.ceil(total_count / self.MAXIMUM_KEYS)
+ )
+
+ # We want to distribute evenly and deterministically the elements in the set of combinations. For example, if
+ # the total count of code locations queries you made is 100 and our maximum is 50, then we will sample 1 out of
+ # 2 elements out of the 100 queries, to be within the 50.
+ current_step = 0
for project in self._projects:
for metric_mri in self._metric_mris:
for timestamp in self._timestamps:
- yield CodeLocationQuery(
- organization_id=self._organization.id,
- project_id=project.id,
- metric_mri=metric_mri,
- timestamp=timestamp,
- )
+ if current_step % step_size == 0:
+ yield CodeLocationQuery(
+ organization_id=self._organization.id,
+ project_id=project.id,
+ metric_mri=metric_mri,
+ timestamp=timestamp,
+ )
+
+ current_step += 1
def _parse_code_location_payload(self, encoded_location: str) -> CodeLocationPayload:
decoded_location = json.loads(encoded_location)
diff --git a/src/sentry/sentry_metrics/querying/samples_list.py b/src/sentry/sentry_metrics/querying/samples_list.py
index d18821c0c4c8ff..1ce0f7f0c1ec00 100644
--- a/src/sentry/sentry_metrics/querying/samples_list.py
+++ b/src/sentry/sentry_metrics/querying/samples_list.py
@@ -39,6 +39,7 @@ def __init__(
params: ParamsType,
snuba_params: SnubaParams,
fields: list[str],
+ operation: str | None,
query: str | None,
min: float | None,
max: float | None,
@@ -50,6 +51,7 @@ def __init__(
self.params = params
self.snuba_params = snuba_params
self.fields = fields
+ self.operation = operation
self.query = query
self.min = min
self.max = max
@@ -596,6 +598,12 @@ class CustomSamplesListExecutor(AbstractSamplesListExecutor):
"timestamp": "timestamp",
}
+ MIN_MAX_CONDITION_COLUMN = {
+ "min": "min_metric",
+ "max": "max_metric",
+ "count": "count_metric",
+ }
+
@classmethod
def convert_sort(cls, sort) -> tuple[Literal["", "-"], str] | None:
direction: Literal["", "-"] = ""
@@ -660,7 +668,7 @@ def get_sorted_span_keys(
)
additional_conditions = self.get_additional_conditions(builder)
- min_max_conditions = self.get_min_max_conditions()
+ min_max_conditions = self.get_min_max_conditions(builder)
builder.add_conditions([*additional_conditions, *min_max_conditions])
query_results = builder.run_query(self.referrer.value)
@@ -720,7 +728,7 @@ def get_unsorted_span_keys(
)
additional_conditions = self.get_additional_conditions(builder)
- min_max_conditions = self.get_min_max_conditions()
+ min_max_conditions = self.get_min_max_conditions(builder)
builder.add_conditions([*additional_conditions, *min_max_conditions])
query_results = builder.run_query(self.referrer.value)
@@ -762,13 +770,17 @@ def get_additional_conditions(self, builder: QueryBuilder) -> list[Condition]:
)
]
- def get_min_max_conditions(self) -> list[Condition]:
+ def get_min_max_conditions(self, builder: QueryBuilder) -> list[Condition]:
conditions = []
+ column = builder.resolve_column(
+ self.MIN_MAX_CONDITION_COLUMN.get(self.operation or "", "avg_metric")
+ )
+
if self.min is not None:
- conditions.append(Condition(Column("min"), Op.GTE, self.min))
+ conditions.append(Condition(column, Op.GTE, self.min))
if self.max is not None:
- conditions.append(Condition(Column("max"), Op.LTE, self.max))
+ conditions.append(Condition(column, Op.LTE, self.max))
return conditions
diff --git a/src/sentry/sentry_metrics/querying/data_v2/units.py b/src/sentry/sentry_metrics/querying/units.py
similarity index 96%
rename from src/sentry/sentry_metrics/querying/data_v2/units.py
rename to src/sentry/sentry_metrics/querying/units.py
index 0be0d69c37672a..cdc99c6387aaa7 100644
--- a/src/sentry/sentry_metrics/querying/data_v2/units.py
+++ b/src/sentry/sentry_metrics/querying/units.py
@@ -45,6 +45,7 @@ class UnitFamily(Enum):
DURATION = "duration"
INFORMATION = "information"
+ UNKNOWN = "unknown"
@dataclass(frozen=True)
@@ -122,10 +123,10 @@ class UnitsSpec:
def get_unit_family_and_unit(
unit: MeasurementUnit,
-) -> tuple[UnitFamily, MeasurementUnit, Unit] | None:
+) -> tuple[UnitFamily, MeasurementUnit | None, Unit | None]:
for unit_family, units_spec in FAMILY_TO_UNITS.items():
for inner_unit in units_spec.units:
if inner_unit.name == unit:
return unit_family, units_spec.reference_unit, inner_unit
- return None
+ return UnitFamily.UNKNOWN, None, None
diff --git a/src/sentry/sentry_metrics/querying/utils.py b/src/sentry/sentry_metrics/querying/utils.py
index 552b7f9c505721..527cf26721bef8 100644
--- a/src/sentry/sentry_metrics/querying/utils.py
+++ b/src/sentry/sentry_metrics/querying/utils.py
@@ -14,22 +14,6 @@ def get_redis_client_for_metrics_meta() -> RedisCluster:
return redis.redis_clusters.get(cluster_key) # type: ignore[return-value]
-def fnv1a_32(data: bytes) -> int:
- """
- Fowler–Noll–Vo hash function 32 bit implementation.
- """
- fnv_init = 0x811C9DC5
- fnv_prime = 0x01000193
- fnv_size = 2**32
-
- result_hash = fnv_init
- for byte in data:
- result_hash ^= byte
- result_hash = (result_hash * fnv_prime) % fnv_size
-
- return result_hash
-
-
def remove_if_match(pattern, string: str) -> str:
"""
Removes a pattern from a string.
diff --git a/src/sentry/sentry_metrics/querying/visitors/__init__.py b/src/sentry/sentry_metrics/querying/visitors/__init__.py
index 5ffb47a5393871..b4af4db58fbeae 100644
--- a/src/sentry/sentry_metrics/querying/visitors/__init__.py
+++ b/src/sentry/sentry_metrics/querying/visitors/__init__.py
@@ -11,6 +11,7 @@
QueryValidationV2Visitor,
QueryValidationVisitor,
TimeseriesConditionInjectionVisitor,
+ UnitsNormalizationVisitor,
UsedGroupBysVisitor,
)
@@ -28,4 +29,5 @@
"QueryConditionsCompositeVisitor",
"QueriedMetricsVisitor",
"UsedGroupBysVisitor",
+ "UnitsNormalizationVisitor",
]
diff --git a/src/sentry/sentry_metrics/querying/visitors/query_expression.py b/src/sentry/sentry_metrics/querying/visitors/query_expression.py
index 9b03aefdfcbb9d..3602e62b0317ed 100644
--- a/src/sentry/sentry_metrics/querying/visitors/query_expression.py
+++ b/src/sentry/sentry_metrics/querying/visitors/query_expression.py
@@ -1,11 +1,27 @@
from collections.abc import Sequence
-from snuba_sdk import AliasedExpression, Column, Condition, Formula, Op, Timeseries
+from snuba_sdk import (
+ AliasedExpression,
+ ArithmeticOperator,
+ Column,
+ Condition,
+ Formula,
+ Op,
+ Timeseries,
+)
from snuba_sdk.conditions import ConditionGroup
from sentry.models.environment import Environment
-from sentry.sentry_metrics.querying.errors import InvalidMetricsQueryError
+from sentry.sentry_metrics.querying.errors import (
+ InvalidMetricsQueryError,
+ NonNormalizableUnitsError,
+)
from sentry.sentry_metrics.querying.types import QueryExpression
+from sentry.sentry_metrics.querying.units import (
+ MeasurementUnit,
+ UnitFamily,
+ get_unit_family_and_unit,
+)
from sentry.sentry_metrics.querying.visitors.base import (
QueryConditionVisitor,
QueryExpressionVisitor,
@@ -258,3 +274,94 @@ def _group_bys_as_string(self, group_bys: list[Column | AliasedExpression] | Non
string_group_bys.add(group_by.name)
return string_group_bys
+
+
+class UnitsNormalizationVisitor(QueryExpressionVisitor[QueryExpression]):
+ """
+ Visitor that recursively transforms the `QueryExpression` components to have the same unit. Throws an error in
+ case units are incompatible.
+ """
+
+ UNITLESS_FORMULA_FUNCTIONS = {
+ ArithmeticOperator.DIVIDE.value,
+ ArithmeticOperator.MULTIPLY.value,
+ }
+ UNITLESS_AGGREGATES = {"count", "count_unique"}
+
+ def __init__(self):
+ self._unit_family = None
+ self._reference_unit = None
+ self._scaling_factor = None
+
+ self._is_formula = False
+
+ def _visit_formula(self, formula: Formula) -> QueryExpression:
+ self._is_formula = True
+
+ has_all_timeseries_params = True
+ parameters = []
+ for parameter in formula.parameters:
+ if not isinstance(parameter, Timeseries):
+ has_all_timeseries_params = False
+
+ parameters.append(self.visit(parameter))
+
+ # If we have all timeseries as parameters of a formula and the function is belonging to `*` or `/` we will
+ # not perform any units normalization.
+ # TODO: we might want to implement units normalization following a more mathematical approach like `ms^2` or
+ # `byte/s` but this is going to come at a later point.
+ if formula.function_name in self.UNITLESS_FORMULA_FUNCTIONS and has_all_timeseries_params:
+ raise NonNormalizableUnitsError(
+ "A unitless formula function is being used and has at least one "
+ "timeseries in one of its operands"
+ )
+
+ return formula.set_parameters(parameters)
+
+ def _visit_timeseries(self, timeseries: Timeseries) -> QueryExpression:
+ extracted_unit = self._extract_unit(timeseries=timeseries)
+ if extracted_unit is not None:
+ unit_family, reference_unit, unit = get_unit_family_and_unit(extracted_unit)
+ # If we encounter multiple unit families in a `QueryExpression`, we want to unwind and not apply any
+ # units normalization.
+ if self._unit_family is not None and unit_family != self._unit_family:
+ raise NonNormalizableUnitsError("Multiple unit families are found in the formula")
+
+ # We set the first seen unit family, irrespectively if a unit is found, since if it's not found, the family
+ # will be unknown.
+ self._unit_family = unit_family
+
+ if reference_unit is not None and unit is not None:
+ self._reference_unit = reference_unit
+ self._scaling_factor = unit.scaling_factor
+ return unit.apply_on_timeseries(timeseries)
+
+ return timeseries
+
+ def _extract_unit(self, timeseries: Timeseries) -> str | None:
+ # If the aggregate doesn't support unit normalization, we will skip it.
+ if timeseries.aggregate in self.UNITLESS_AGGREGATES:
+ raise NonNormalizableUnitsError(
+ f"The aggregate {timeseries.aggregate} doesn't need unit normalization"
+ )
+
+ parsed_mri = parse_mri(timeseries.metric.mri)
+ if parsed_mri is not None:
+ return parsed_mri.unit
+
+ raise NonNormalizableUnitsError(
+ "Units normalization can't be run if not all components have a metric mri"
+ )
+
+ def get_units_metadata(
+ self,
+ ) -> tuple[UnitFamily | None, MeasurementUnit | None, float | int | None]:
+ """
+ Returns metadata of the units that were encountered during the traversal.
+ """
+ # If we have a formula, we do not return the scaling factor, since a formula technically has multiple scaling
+ # factors, but they won't be of use to the frontend.
+ if self._is_formula:
+ return self._unit_family, self._reference_unit, None
+
+ return self._unit_family, self._reference_unit, self._scaling_factor
diff --git a/src/sentry/services/hybrid_cloud/integration/impl.py b/src/sentry/services/hybrid_cloud/integration/impl.py
index 5a02f9c913cdd1..4b7864a10495a6 100644
--- a/src/sentry/services/hybrid_cloud/integration/impl.py
+++ b/src/sentry/services/hybrid_cloud/integration/impl.py
@@ -10,7 +10,7 @@
from sentry.api.paginator import OffsetPaginator
from sentry.api.serializers import AppPlatformEvent
from sentry.constants import SentryAppInstallationStatus
-from sentry.incidents.models import INCIDENT_STATUS, IncidentStatus
+from sentry.incidents.models.incident import INCIDENT_STATUS, IncidentStatus
from sentry.integrations.mixins import NotifyBasicMixin
from sentry.integrations.msteams import MsTeamsClient
from sentry.models.integrations import Integration, OrganizationIntegration
@@ -32,7 +32,6 @@
serialize_integration_external_project,
serialize_organization_integration,
)
-from sentry.services.hybrid_cloud.organization import RpcOrganizationSummary
from sentry.services.hybrid_cloud.pagination import RpcPaginationArgs, RpcPaginationResult
from sentry.shared_integrations.exceptions import ApiError
from sentry.utils import json, metrics
@@ -357,15 +356,10 @@ def send_incident_alert_notification(
incident_id: int,
new_status: int,
incident_attachment_json: str,
- organization: RpcOrganizationSummary | None = None, # deprecated
- organization_id: int | None = None,
+ organization_id: int,
metric_value: str | None = None,
notification_uuid: str | None = None,
) -> bool:
- if organization_id is None and organization is not None:
- organization_id = organization.id
- assert organization_id is not None, "organization or organization_id is required"
-
sentry_app = SentryApp.objects.get(id=sentry_app_id)
metrics.incr("notifications.sent", instance=sentry_app.slug, skip_internal=False)
diff --git a/src/sentry/services/hybrid_cloud/integration/service.py b/src/sentry/services/hybrid_cloud/integration/service.py
index fbe7932ab7be39..6776e7814d8ec0 100644
--- a/src/sentry/services/hybrid_cloud/integration/service.py
+++ b/src/sentry/services/hybrid_cloud/integration/service.py
@@ -12,7 +12,6 @@
RpcIntegrationExternalProject,
RpcIntegrationIdentityContext,
)
-from sentry.services.hybrid_cloud.organization import RpcOrganizationSummary
from sentry.services.hybrid_cloud.pagination import RpcPaginationArgs, RpcPaginationResult
from sentry.services.hybrid_cloud.rpc import RpcService, rpc_method
from sentry.silo import SiloMode
@@ -238,8 +237,7 @@ def send_incident_alert_notification(
incident_id: int,
new_status: int,
incident_attachment_json: str,
- organization: RpcOrganizationSummary | None = None,
- organization_id: int | None = None,
+ organization_id: int,
metric_value: str | None = None,
notification_uuid: str | None = None,
) -> bool:
diff --git a/src/sentry/services/hybrid_cloud/rpc.py b/src/sentry/services/hybrid_cloud/rpc.py
index 285d69a9ac1847..13deb3cf10cd1f 100644
--- a/src/sentry/services/hybrid_cloud/rpc.py
+++ b/src/sentry/services/hybrid_cloud/rpc.py
@@ -518,8 +518,9 @@ def _remote_exception(self, message: str) -> RpcRemoteException:
return RpcRemoteException(self.service_name, self.method_name, message)
def _raise_from_response_status_error(self, response: requests.Response) -> NoReturn:
+ rpc_method = f"{self.service_name}.{self.method_name}"
with sentry_sdk.configure_scope() as scope:
- scope.set_tag("rpc_method", f"{self.service_name}.{self.method_name}")
+ scope.set_tag("rpc_method", rpc_method)
scope.set_tag("rpc_status_code", response.status_code)
if in_test_environment():
@@ -535,6 +536,13 @@ def _raise_from_response_status_error(self, response: requests.Response) -> NoRe
if response.status_code == 403:
raise self._remote_exception("Unauthorized service access")
if response.status_code == 400:
+ logger.warning(
+ "rpc.bad_request",
+ extra={
+ "rpc_method": rpc_method,
+ "error": response.content.decode("utf8"),
+ },
+ )
raise self._remote_exception("Invalid service request")
raise self._remote_exception(f"Service unavailable ({response.status_code} status)")
diff --git a/src/sentry/snuba/metrics/datasource.py b/src/sentry/snuba/metrics/datasource.py
index fbb69ed83b6517..72e48eab8927df 100644
--- a/src/sentry/snuba/metrics/datasource.py
+++ b/src/sentry/snuba/metrics/datasource.py
@@ -46,6 +46,7 @@
from sentry.snuba.metrics.fields import run_metrics_query
from sentry.snuba.metrics.fields.base import (
SnubaDataType,
+ build_metrics_query,
get_derived_metrics,
org_id_from_projects,
)
@@ -85,7 +86,7 @@
get_intervals,
to_intervals,
)
-from sentry.utils.snuba import raw_snql_query
+from sentry.utils.snuba import bulk_snql_query, raw_snql_query
logger = logging.getLogger(__name__)
@@ -112,6 +113,27 @@ def _get_metrics_for_entity(
)
+def _get_metrics_by_project_for_entity_query(
+ entity_key: EntityKey,
+ project_ids: Sequence[int],
+ org_id: int,
+ use_case_id: UseCaseID,
+ start: datetime | None = None,
+ end: datetime | None = None,
+) -> Request:
+ return build_metrics_query(
+ entity_key=entity_key,
+ select=[Column("project_id"), Column("metric_id")],
+ groupby=[Column("project_id"), Column("metric_id")],
+ where=[Condition(Column("use_case_id"), Op.EQ, use_case_id.value)],
+ project_ids=project_ids,
+ org_id=org_id,
+ use_case_id=use_case_id,
+ start=start,
+ end=end,
+ )
+
+
def _get_metrics_by_project_for_entity(
entity_key: EntityKey,
project_ids: Sequence[int],
@@ -186,12 +208,8 @@ def get_available_derived_metrics(
def get_metrics_blocking_state_of_projects(
- projects: Sequence[Project], use_case_id: UseCaseID
+ projects: Sequence[Project],
) -> dict[str, Sequence[tuple[bool, Sequence[str], int]]]:
- # Blocked metrics are only supported for custom metrics.
- if use_case_id != UseCaseID.CUSTOM:
- return {}
-
metrics_blocking_state_by_project = get_metrics_blocking_state(projects)
metrics_blocking_state_by_mri = {}
@@ -220,15 +238,17 @@ def _build_metric_meta(
def get_metrics_meta(
projects: Sequence[Project],
- use_case_id: UseCaseID,
+ use_case_ids: Sequence[UseCaseID],
start: datetime | None = None,
end: datetime | None = None,
) -> Sequence[MetricMeta]:
if not projects:
return []
- stored_metrics = get_stored_metrics_of_projects(projects, use_case_id, start, end)
- metrics_blocking_state = get_metrics_blocking_state_of_projects(projects, use_case_id)
+ stored_metrics = get_stored_metrics_of_projects(projects, use_case_ids, start, end)
+ metrics_blocking_state = (
+ get_metrics_blocking_state_of_projects(projects) if UseCaseID.CUSTOM in use_case_ids else {}
+ )
metrics_metas = []
for metric_mri, project_ids in stored_metrics.items():
@@ -276,38 +296,64 @@ def get_metrics_meta(
def get_stored_metrics_of_projects(
projects: Sequence[Project],
- use_case_id: UseCaseID,
+ use_case_ids: Sequence[UseCaseID],
start: datetime | None = None,
end: datetime | None = None,
) -> Mapping[str, Sequence[int]]:
org_id = projects[0].organization_id
project_ids = [project.id for project in projects]
- stored_metrics = []
- entity_keys = get_entity_keys_of_use_case_id(use_case_id=use_case_id)
- for entity_key in entity_keys or ():
- stored_metrics += _get_metrics_by_project_for_entity(
- entity_key=entity_key,
- project_ids=project_ids,
- org_id=org_id,
- use_case_id=use_case_id,
- start=start,
- end=end,
- )
+ # We compute a list of all the queries that we want to run in parallel across entities and use cases.
+ requests = []
+ use_case_id_to_index = defaultdict(list)
+ for use_case_id in use_case_ids:
+ entity_keys = get_entity_keys_of_use_case_id(use_case_id=use_case_id)
+ for entity_key in entity_keys:
+ requests.append(
+ _get_metrics_by_project_for_entity_query(
+ entity_key=entity_key,
+ project_ids=project_ids,
+ org_id=org_id,
+ use_case_id=use_case_id,
+ start=start,
+ end=end,
+ )
+ )
+ use_case_id_to_index[use_case_id].append(len(requests) - 1)
- grouped_stored_metrics = {}
- for stored_metric in stored_metrics:
- grouped_stored_metrics.setdefault(stored_metric["metric_id"], []).append(
- stored_metric["project_id"]
+ # We run the queries all in parallel.
+ results = bulk_snql_query(
+ requests=requests,
+ referrer="snuba.metrics.datasource.get_stored_metrics_of_projects",
+ use_cache=True,
+ )
+
+ # We reverse resolve all the metric ids by bulking together all the resolutions of the same use case id to maximize
+ # the parallelism.
+ resolved_metric_ids = defaultdict(dict)
+ for use_case_id, results_indexes in use_case_id_to_index.items():
+ metrics_ids = []
+ for result_index in results_indexes:
+ data = results[result_index]["data"]
+ for row in data or ():
+ metrics_ids.append(row["metric_id"])
+
+ # We have to partition the resolved metric ids per use case id, since the indexer values might clash across
+ # use cases.
+ resolved_metric_ids[use_case_id].update(
+ bulk_reverse_resolve(use_case_id, org_id, [metric_id for metric_id in metrics_ids])
)
- resolved_mris = bulk_reverse_resolve(
- use_case_id, org_id, [metric_id for metric_id in grouped_stored_metrics.keys()]
- )
+ # We iterate over each result and compute a map of `metric_id -> project_id`.
+ grouped_stored_metrics = defaultdict(list)
+ for use_case_id, results_indexes in use_case_id_to_index.items():
+ for result_index in results_indexes:
+ data = results[result_index]["data"]
+ for row in data or ():
+ resolved_metric_id = resolved_metric_ids[use_case_id][row["metric_id"]]
+ grouped_stored_metrics[resolved_metric_id].append(row["project_id"])
- return {
- resolved_mris[metric_id]: projects for metric_id, projects in grouped_stored_metrics.items()
- }
+ return grouped_stored_metrics
def get_custom_measurements(
diff --git a/src/sentry/snuba/metrics/fields/base.py b/src/sentry/snuba/metrics/fields/base.py
index bb489f2f81b502..d2dc6d95b9ea47 100644
--- a/src/sentry/snuba/metrics/fields/base.py
+++ b/src/sentry/snuba/metrics/fields/base.py
@@ -105,7 +105,7 @@
MetricOperationParams = Mapping[str, Union[str, int, float]]
-def run_metrics_query(
+def build_metrics_query(
*,
entity_key: EntityKey,
select: list[Column],
@@ -113,18 +113,15 @@ def run_metrics_query(
groupby: list[Column],
project_ids: Sequence[int],
org_id: int,
- referrer: str,
use_case_id: UseCaseID,
start: datetime | None = None,
end: datetime | None = None,
-) -> list[SnubaDataType]:
+) -> Request:
if end is None:
end = datetime.now()
if start is None:
start = end - timedelta(hours=24)
- # Round timestamp to minute to get cache efficiency:
- # Also floor start to match the daily granularity
end = end.replace(second=0, microsecond=0)
start = start.replace(hour=0, minute=0, second=0, microsecond=0)
@@ -141,12 +138,43 @@ def run_metrics_query(
+ where,
granularity=Granularity(GRANULARITY),
)
+
request = Request(
- dataset=Dataset.Metrics.value,
+ dataset=Dataset.Metrics.value
+ if use_case_id == UseCaseID.SESSIONS
+ else Dataset.PerformanceMetrics.value,
app_id="metrics",
query=query,
tenant_ids={"organization_id": org_id, "use_case_id": use_case_id.value},
)
+
+ return request
+
+
+def run_metrics_query(
+ *,
+ entity_key: EntityKey,
+ select: list[Column],
+ where: list[Condition],
+ groupby: list[Column],
+ project_ids: Sequence[int],
+ org_id: int,
+ referrer: str,
+ use_case_id: UseCaseID,
+ start: datetime | None = None,
+ end: datetime | None = None,
+) -> list[SnubaDataType]:
+ request = build_metrics_query(
+ entity_key=entity_key,
+ select=select,
+ where=where,
+ groupby=groupby,
+ project_ids=project_ids,
+ org_id=org_id,
+ use_case_id=use_case_id,
+ start=start,
+ end=end,
+ )
result = raw_snql_query(request, referrer, use_cache=True)
return result["data"]
diff --git a/src/sentry/snuba/metrics_layer/query.py b/src/sentry/snuba/metrics_layer/query.py
index 8dc85d1498f532..e7b541137f968a 100644
--- a/src/sentry/snuba/metrics_layer/query.py
+++ b/src/sentry/snuba/metrics_layer/query.py
@@ -18,6 +18,7 @@
Timeseries,
)
from snuba_sdk.formula import FormulaParameterGroup
+from snuba_sdk.mql.mql import parse_mql
from sentry.exceptions import InvalidParams
from sentry.sentry_metrics.use_case_id_registry import UseCaseID
@@ -137,6 +138,10 @@ def _setup_metrics_query(request: Request) -> tuple[Request, datetime, datetime]
metrics_query = request.query
assert isinstance(metrics_query, MetricsQuery)
+ # We allow users to pass in a string instead of a Formula/Timeseries object. Handle that case here.
+ if isinstance(metrics_query.query, str):
+ metrics_query = metrics_query.set_query(parse_mql(metrics_query.query))
+
assert len(metrics_query.scope.org_ids) == 1 # Initially only allow 1 org id
organization_id = metrics_query.scope.org_ids[0]
tenant_ids = request.tenant_ids or {"organization_id": organization_id}
@@ -265,7 +270,14 @@ def _resolve_query_metadata(
assert metrics_query.query is not None
org_id = metrics_query.scope.org_ids[0]
- use_case_id_str = _resolve_use_case_id_str(metrics_query.query)
+ use_case_ids = _resolve_use_case_ids(metrics_query.query)
+
+ if not use_case_ids:
+ raise InvalidParams("No use case found in formula parameters")
+ if len(use_case_ids) > 1:
+ raise InvalidParams("Formula parameters must all be from the same use case")
+ use_case_id_str = use_case_ids.pop()
+
if metrics_query.scope.use_case_id is None:
metrics_query = metrics_query.set_scope(
metrics_query.scope.set_use_case_id(use_case_id_str)
@@ -331,7 +343,7 @@ def _resolve_timeseries_metadata(
return series, mappings
-def _resolve_use_case_id_str(exp: Formula | Timeseries) -> str:
+def _resolve_use_case_ids(exp: Formula | Timeseries) -> set[str]:
def fetch_namespace(metric: Metric) -> str:
if metric.mri is None:
mri = get_mri(metric.public_name)
@@ -344,20 +356,15 @@ def fetch_namespace(metric: Metric) -> str:
return parsed_mri.namespace
if isinstance(exp, Timeseries):
- return fetch_namespace(exp.metric)
+ return {fetch_namespace(exp.metric)}
assert isinstance(exp, Formula), exp
namespaces = set()
for p in exp.parameters:
if isinstance(p, (Formula, Timeseries)):
- namespaces.add(_resolve_use_case_id_str(p))
-
- if not namespaces:
- raise InvalidParams("No use case found in formula parameters")
- if len(namespaces) > 1:
- raise InvalidParams("Formula parameters must all be from the same use case")
+ namespaces |= _resolve_use_case_ids(p)
- return namespaces.pop()
+ return namespaces
def _lookup_indexer_resolve(
diff --git a/src/sentry/snuba/models.py b/src/sentry/snuba/models.py
index c22137be580c78..bd9377695a7e72 100644
--- a/src/sentry/snuba/models.py
+++ b/src/sentry/snuba/models.py
@@ -52,7 +52,7 @@ def event_types(self):
@classmethod
def query_for_relocation_export(cls, q: models.Q, pk_map: PrimaryKeyMap) -> models.Q:
- from sentry.incidents.models import AlertRule
+ from sentry.incidents.models.alert_rule import AlertRule
from sentry.models.actor import Actor
from sentry.models.organization import Organization
from sentry.models.project import Project
diff --git a/src/sentry/tasks/check_am2_compatibility.py b/src/sentry/tasks/check_am2_compatibility.py
index 0ab5e20c1c3ff0..abc135146b8fd8 100644
--- a/src/sentry/tasks/check_am2_compatibility.py
+++ b/src/sentry/tasks/check_am2_compatibility.py
@@ -9,7 +9,7 @@
from sentry.dynamic_sampling import get_redis_client_for_ds
from sentry.exceptions import IncompatibleMetricsQuery
-from sentry.incidents.models import AlertRule
+from sentry.incidents.models.alert_rule import AlertRule
from sentry.models.dashboard_widget import (
ON_DEMAND_ENABLED_KEY,
DashboardWidgetQuery,
diff --git a/src/sentry/tasks/derive_code_mappings.py b/src/sentry/tasks/derive_code_mappings.py
index 746ba13bbff193..0b7b9f93175925 100644
--- a/src/sentry/tasks/derive_code_mappings.py
+++ b/src/sentry/tasks/derive_code_mappings.py
@@ -22,7 +22,7 @@
from sentry.utils.locking import UnableToAcquireLock
from sentry.utils.safe import get_path
-SUPPORTED_LANGUAGES = ["javascript", "python", "node", "ruby"]
+SUPPORTED_LANGUAGES = ["javascript", "python", "node", "ruby", "php"]
logger = logging.getLogger(__name__)
@@ -92,13 +92,19 @@ def derive_code_mappings(
"organization.slug": org.slug,
}
- if (
- not features.has("organizations:derive-code-mappings", org)
- or not data["platform"] in SUPPORTED_LANGUAGES
+ if not (
+ features.has("organizations:derive-code-mappings", org)
+ and data.get("platform") in SUPPORTED_LANGUAGES
):
logger.info("Event should not be processed.", extra=extra)
return
+ # php automatic code mappings currently in LA
+ if data["platform"].startswith("php") and not features.has(
+ "organizations:derive-code-mappings-php", org
+ ):
+ return
+
stacktrace_paths: list[str] = identify_stacktrace_paths(data)
if not stacktrace_paths:
return
diff --git a/src/sentry/tasks/files.py b/src/sentry/tasks/files.py
index 868ef90df186bd..fa1f12b58cc69f 100644
--- a/src/sentry/tasks/files.py
+++ b/src/sentry/tasks/files.py
@@ -42,15 +42,13 @@ def delete_file_control(path, checksum, **kwargs):
def delete_file(file_blob_model, path, checksum, **kwargs):
- from sentry.models.files.utils import get_storage, lock_blob
-
- lock = lock_blob(checksum, "fileblob_upload")
- with lock:
- # check that the fileblob with *this* path exists, as its possible
- # that a concurrent re-upload added the same chunk once again, with a
- # different path that time
- if not file_blob_model.objects.filter(checksum=checksum, path=path).exists():
- get_storage().delete(path)
+ from sentry.models.files.utils import get_storage
+
+ # check that the fileblob with *this* path exists, as its possible
+ # that a concurrent re-upload added the same chunk once again, with a
+ # different path that time
+ if not file_blob_model.objects.filter(checksum=checksum, path=path).exists():
+ get_storage().delete(path)
@instrumented_task(
diff --git a/src/sentry/tasks/groupowner.py b/src/sentry/tasks/groupowner.py
index aa0796af38ab21..a89838eb097c58 100644
--- a/src/sentry/tasks/groupowner.py
+++ b/src/sentry/tasks/groupowner.py
@@ -92,6 +92,15 @@ def _process_suspect_commits(
pass
else:
owner.delete()
+ logger.info(
+ "process_suspect_commits.group_owner_removed",
+ extra={
+ "event": event_id,
+ "group": group_id,
+ "owner_id": owner.user_id,
+ "project": project_id,
+ },
+ )
except GroupOwner.MultipleObjectsReturned:
GroupOwner.objects.filter(
group_id=group_id,
@@ -100,6 +109,15 @@ def _process_suspect_commits(
project=project,
organization_id=project.organization_id,
)[0].delete()
+ logger.info(
+ "process_suspect_commits.multiple_owners_removed",
+ extra={
+ "event": event_id,
+ "group": group_id,
+ "owner_id": owner_id,
+ "project": project_id,
+ },
+ )
cache.set(
cache_key, True, PREFERRED_GROUP_OWNER_AGE.total_seconds()
diff --git a/src/sentry/tasks/integrations/slack/find_channel_id_for_alert_rule.py b/src/sentry/tasks/integrations/slack/find_channel_id_for_alert_rule.py
index cedcf9fd9fddfa..6fe4316cdb4974 100644
--- a/src/sentry/tasks/integrations/slack/find_channel_id_for_alert_rule.py
+++ b/src/sentry/tasks/integrations/slack/find_channel_id_for_alert_rule.py
@@ -11,7 +11,7 @@
InvalidTriggerActionError,
get_slack_channel_ids,
)
-from sentry.incidents.models import AlertRule
+from sentry.incidents.models.alert_rule import AlertRule
from sentry.incidents.serializers import AlertRuleSerializer
from sentry.integrations.slack.utils import SLACK_RATE_LIMITED_MESSAGE, RedisRuleStatus
from sentry.models.organization import Organization
diff --git a/src/sentry/tasks/integrations/slack/find_channel_id_for_rule.py b/src/sentry/tasks/integrations/slack/find_channel_id_for_rule.py
index 168c7dfdc3843e..66847e5b3cb54a 100644
--- a/src/sentry/tasks/integrations/slack/find_channel_id_for_rule.py
+++ b/src/sentry/tasks/integrations/slack/find_channel_id_for_rule.py
@@ -2,7 +2,7 @@
from collections.abc import Sequence
from typing import Any
-from sentry.incidents.models import AlertRuleTriggerAction
+from sentry.incidents.models.alert_rule import AlertRuleTriggerAction
from sentry.integrations.slack.utils import (
SLACK_RATE_LIMITED_MESSAGE,
RedisRuleStatus,
diff --git a/src/sentry/tasks/post_process.py b/src/sentry/tasks/post_process.py
index e559d929f2e33d..70087b9dfff336 100644
--- a/src/sentry/tasks/post_process.py
+++ b/src/sentry/tasks/post_process.py
@@ -339,6 +339,10 @@ def handle_invalid_group_owners(group):
)
for owner in invalid_group_owners:
owner.delete()
+ logger.info(
+ "handle_invalid_group_owners.delete_group_owner",
+ extra={"group": group.id, "group_owner_id": owner.id, "project": group.project_id},
+ )
def handle_group_owners(
@@ -358,9 +362,11 @@ def handle_group_owners(
lock = locks.get(f"groupowner-bulk:{group.id}", duration=10, name="groupowner_bulk")
try:
- with metrics.timer("post_process.handle_group_owners"), sentry_sdk.start_span(
- op="post_process.handle_group_owners"
- ), lock.acquire():
+ with (
+ metrics.timer("post_process.handle_group_owners"),
+ sentry_sdk.start_span(op="post_process.handle_group_owners"),
+ lock.acquire(),
+ ):
current_group_owners = GroupOwner.objects.filter(
group=group,
type__in=[GroupOwnerType.OWNERSHIP_RULE.value, GroupOwnerType.CODEOWNERS.value],
@@ -377,6 +383,12 @@ def handle_group_owners(
# Owners already in the database that we'll keep
keeping_owners = set()
for group_owner in current_group_owners:
+ logging_params = {
+ "group": group.id,
+ "project": project.id,
+ "organization": project.organization_id,
+ "group_owner_id": group_owner.id,
+ }
owner_rule_type = (
OwnerRuleType.CODEOWNERS.value
if group_owner.type == GroupOwnerType.CODEOWNERS.value
@@ -391,6 +403,10 @@ def handle_group_owners(
lookup_key_value = None
if lookup_key not in new_owners:
group_owner.delete()
+ logger.info(
+ "handle_group_owners.delete_group_owner",
+ extra={**logging_params, "reason": "assignment_deleted"},
+ )
else:
lookup_key_value = new_owners.get(lookup_key)
# Old groupowner assignment from outdated rules get deleted
@@ -399,6 +415,10 @@ def handle_group_owners(
and (group_owner.context or {}).get("rule") not in lookup_key_value
):
group_owner.delete()
+ logger.info(
+ "handle_group_owners.delete_group_owner",
+ extra={**logging_params, "reason": "outdated_rule"},
+ )
else:
keeping_owners.add(lookup_key)
@@ -439,6 +459,15 @@ def handle_group_owners(
instance=go,
created=True,
)
+ logger.info(
+ "group_owners.bulk_create",
+ extra={
+ "group_id": group.id,
+ "project_id": project.id,
+ "organization_id": project.organization_id,
+ "count": len(new_group_owners),
+ },
+ )
except UnableToAcquireLock:
pass
@@ -737,14 +766,17 @@ def run_post_process_job(job: PostProcessJob):
for pipeline_step in pipeline:
try:
- with metrics.timer(
- "tasks.post_process.run_post_process_job.pipeline.duration",
- tags={
- "pipeline": pipeline_step.__name__,
- "issue_category": issue_category_metric,
- "is_reprocessed": job["is_reprocessed"],
- },
- ), sentry_sdk.start_span(op=f"tasks.post_process_group.{pipeline_step.__name__}"):
+ with (
+ metrics.timer(
+ "tasks.post_process.run_post_process_job.pipeline.duration",
+ tags={
+ "pipeline": pipeline_step.__name__,
+ "issue_category": issue_category_metric,
+ "is_reprocessed": job["is_reprocessed"],
+ },
+ ),
+ sentry_sdk.start_span(op=f"tasks.post_process_group.{pipeline_step.__name__}"),
+ ):
pipeline_step(job)
except Exception:
metrics.incr(
@@ -1008,12 +1040,6 @@ def _get_replay_id(event):
if job["is_reprocessed"]:
return
- if not features.has(
- "organizations:session-replay-event-linking", job["event"].project.organization
- ):
- metrics.incr("post_process.process_replay_link.feature_not_enabled")
- return
-
metrics.incr("post_process.process_replay_link.id_sampled")
group_event = job["event"]
@@ -1100,7 +1126,7 @@ def process_code_mappings(job: PostProcessJob) -> None:
with metrics.timer("post_process.process_code_mappings.duration"):
# Supported platforms
- if event.data["platform"] not in SUPPORTED_LANGUAGES:
+ if event.data.get("platform") not in SUPPORTED_LANGUAGES:
return
# To limit the overall number of tasks, only process one issue per project per hour. In
@@ -1180,10 +1206,7 @@ def process_commits(job: PostProcessJob) -> None:
# Cache the integrations check for 4 hours
cache.set(integration_cache_key, has_integrations, 14400)
- if (
- features.has("organizations:commit-context", event.project.organization)
- and has_integrations
- ):
+ if has_integrations:
if not job["group_state"]["is_new"]:
return
@@ -1386,7 +1409,7 @@ def should_postprocess_feedback(job: PostProcessJob) -> bool:
return True
should_notify_on_old_feedbacks = job["event"].project.get_option(
- "sentry:replay_rage_click_issues"
+ "sentry:feedback_user_report_notifications"
)
if (
diff --git a/src/sentry/tasks/spans.py b/src/sentry/tasks/spans.py
index 5c1588d9132ff9..d3c596decc341a 100644
--- a/src/sentry/tasks/spans.py
+++ b/src/sentry/tasks/spans.py
@@ -97,6 +97,7 @@ def _update_occurrence_group_type(jobs: Sequence[Job], projects: ProjectsMapping
performance_problems = job.pop("performance_problems")
for performance_problem in performance_problems:
performance_problem.type = PerformanceStreamedSpansGroupTypeExperimental
+ performance_problem.fingerprint = f"{performance_problem.fingerprint}-{PerformanceStreamedSpansGroupTypeExperimental.type_id}"
updated_problems.append(performance_problem)
job["performance_problems"] = updated_problems
@@ -178,7 +179,10 @@ def _process_segment(project_id, segment_id):
_pull_out_data(jobs, projects)
_calculate_span_grouping(jobs, projects)
- _detect_performance_problems(jobs, projects)
+ _detect_performance_problems(jobs, projects, is_standalone_spans=True)
+
+ # Updates group type and fingerprint of all performance problems
+ # so they don't double write occurrences as we test.
_update_occurrence_group_type(jobs, projects)
return jobs
diff --git a/src/sentry/tasks/summaries/daily_summary.py b/src/sentry/tasks/summaries/daily_summary.py
index d94a115113bbce..3f8cfa1d7086fb 100644
--- a/src/sentry/tasks/summaries/daily_summary.py
+++ b/src/sentry/tasks/summaries/daily_summary.py
@@ -76,9 +76,14 @@ def schedule_organizations(timestamp: float | None = None, duration: int | None
user_ids = {
user_id
for user_id in OrganizationMember.objects.filter(
- organization_id=organization.id, teams__projectteam__project__isnull=False
+ organization_id=organization.id,
+ teams__projectteam__project__isnull=False,
+ user_id__isnull=False,
).values_list("user_id", flat=True)
}
+ if not user_ids:
+ continue
+
# TODO: convert timezones to UTC offsets and group
users_by_tz = defaultdict(list)
users_with_tz = user_option_service.get_many(
@@ -199,7 +204,7 @@ def build_summary_data(
project=project, substatus__in=(GroupSubStatus.ESCALATING, GroupSubStatus.REGRESSED)
).using_replica()
regressed_or_escalated_groups_today = Activity.objects.filter(
- group__in=(regressed_or_escalated_groups),
+ group__in=([group for group in regressed_or_escalated_groups]),
type__in=(ActivityType.SET_REGRESSION.value, ActivityType.SET_ESCALATING.value),
)
if regressed_or_escalated_groups_today:
@@ -262,6 +267,10 @@ def deliver_summary(ctx: OrganizationReportContext, users: list[int]):
for user_id in user_ids:
top_projects_context_map = build_top_projects_map(ctx, user_id)
user = cast(RpcActor, user_service.get_user(user_id=user_id))
+ logger.info(
+ "daily_summary.delivering_summary",
+ extra={"user": user_id, "organization": ctx.organization.id},
+ )
DailySummaryNotification(
organization=ctx.organization,
recipient=user,
diff --git a/src/sentry/testutils/cases.py b/src/sentry/testutils/cases.py
index cbec06b057cf61..0d383cf7d862a8 100644
--- a/src/sentry/testutils/cases.py
+++ b/src/sentry/testutils/cases.py
@@ -637,8 +637,12 @@ def create_performance_issue(
perf_event_manager = EventManager(event_data)
perf_event_manager.normalize()
- def detect_performance_problems_interceptor(data: Event, project: Project):
- perf_problems = detect_performance_problems(data, project)
+ def detect_performance_problems_interceptor(
+ data: Event, project: Project, is_standalone_spans: bool = False
+ ):
+ perf_problems = detect_performance_problems(
+ data, project, is_standalone_spans=is_standalone_spans
+ )
if fingerprint:
for perf_problem in perf_problems:
perf_problem.fingerprint = fingerprint
diff --git a/src/sentry/testutils/factories.py b/src/sentry/testutils/factories.py
index 51b3f83edc2812..72651aa8fa8a7c 100644
--- a/src/sentry/testutils/factories.py
+++ b/src/sentry/testutils/factories.py
@@ -35,10 +35,12 @@
create_alert_rule_trigger_action,
query_datasets_to_type,
)
-from sentry.incidents.models import (
+from sentry.incidents.models.alert_rule import (
AlertRuleMonitorType,
AlertRuleThresholdType,
AlertRuleTriggerAction,
+)
+from sentry.incidents.models.incident import (
Incident,
IncidentActivity,
IncidentProject,
diff --git a/src/sentry/testutils/fixtures.py b/src/sentry/testutils/fixtures.py
index c4f7145ef28005..bebcaebe4c429d 100644
--- a/src/sentry/testutils/fixtures.py
+++ b/src/sentry/testutils/fixtures.py
@@ -8,7 +8,8 @@
from django.utils.functional import cached_property
from sentry.eventstore.models import Event
-from sentry.incidents.models import AlertRuleMonitorType, IncidentActivityType
+from sentry.incidents.models.alert_rule import AlertRuleMonitorType
+from sentry.incidents.models.incident import IncidentActivityType
from sentry.models.activity import Activity
from sentry.models.actor import Actor, get_actor_id_for_user
from sentry.models.grouprelease import GroupRelease
diff --git a/src/sentry/testutils/helpers/apigateway.py b/src/sentry/testutils/helpers/apigateway.py
index 215e0650a73e78..6684b6aa4de3c2 100644
--- a/src/sentry/testutils/helpers/apigateway.py
+++ b/src/sentry/testutils/helpers/apigateway.py
@@ -4,6 +4,7 @@
import responses
from django.conf import settings
+from django.http import HttpResponseRedirect
from django.test import override_settings
from django.urls import re_path
from rest_framework.permissions import AllowAny
@@ -32,6 +33,9 @@ class RegionEndpoint(OrganizationEndpoint):
def get(self, request, organization):
return Response({"proxy": False})
+ def post(self, request, organization):
+ return HttpResponseRedirect("https://zombo.com")
+
@region_silo_endpoint
class NoOrgRegionEndpoint(Endpoint):
diff --git a/src/sentry/testutils/helpers/backups.py b/src/sentry/testutils/helpers/backups.py
index 159127336b49c5..c9839daaef7f5f 100644
--- a/src/sentry/testutils/helpers/backups.py
+++ b/src/sentry/testutils/helpers/backups.py
@@ -42,8 +42,8 @@
from sentry.backup.validate import validate
from sentry.db.models.fields.bounded import BoundedBigAutoField
from sentry.db.models.paranoia import ParanoidModel
-from sentry.incidents.models import (
- AlertRuleMonitorType,
+from sentry.incidents.models.alert_rule import AlertRuleMonitorType
+from sentry.incidents.models.incident import (
IncidentActivity,
IncidentSnapshot,
IncidentSubscription,
diff --git a/src/sentry/testutils/pytest/kafka.py b/src/sentry/testutils/pytest/kafka.py
index 151349f3bf6a9e..aaaa8029b16ce3 100644
--- a/src/sentry/testutils/pytest/kafka.py
+++ b/src/sentry/testutils/pytest/kafka.py
@@ -63,32 +63,6 @@ def inner(settings):
return inner
-@pytest.fixture
-def kafka_topics_setter():
- """
- Returns a function that given a Django settings objects will setup the
- kafka topics names to test names.
-
- :return: a function that given a settings object changes all kafka topic names
- to "test-"
- """
-
- def set_test_kafka_settings(settings):
- settings.KAFKA_INGEST_EVENTS = "ingest-events"
- settings.KAFKA_TOPICS[settings.KAFKA_INGEST_EVENTS] = {"cluster": "default"}
-
- settings.INGEST_TRANSACTIONS = "ingest-transactions"
- settings.KAFKA_TOPICS[settings.INGEST_TRANSACTIONS] = {"cluster": "default"}
-
- settings.KAFKA_INGEST_ATTACHMENTS = "ingest-attachments"
- settings.KAFKA_TOPICS[settings.KAFKA_INGEST_ATTACHMENTS] = {"cluster": "default"}
-
- settings.KAFKA_OUTCOMES = "outcomes"
- settings.KAFKA_TOPICS[settings.KAFKA_OUTCOMES] = {"cluster": "default"}
-
- return set_test_kafka_settings
-
-
@pytest.fixture(scope="session")
def scope_consumers():
"""
diff --git a/src/sentry/types/region.py b/src/sentry/types/region.py
index 40b67ffe8a664a..80b3db7d11f777 100644
--- a/src/sentry/types/region.py
+++ b/src/sentry/types/region.py
@@ -62,6 +62,9 @@ class Region:
category: RegionCategory
"""The region's category."""
+ visible: bool = True
+ """Whether the region is visible in API responses"""
+
def validate(self) -> None:
from sentry.utils.snowflake import REGION_ID
@@ -135,6 +138,9 @@ def regions(self) -> frozenset[Region]:
def get_by_name(self, region_name: str) -> Region | None:
return self._by_name.get(region_name)
+ def get_regions(self, category: RegionCategory | None = None) -> Iterable[Region]:
+ return (r for r in self.regions if (category is None or r.category == category))
+
def get_region_names(self, category: RegionCategory | None = None) -> Iterable[str]:
return (r.name for r in self.regions if (category is None or r.category == category))
@@ -335,7 +341,11 @@ def find_all_region_names() -> Iterable[str]:
def find_all_multitenant_region_names() -> list[str]:
- return list(get_global_directory().get_region_names(RegionCategory.MULTI_TENANT))
+ """
+ Return all visible multi_tenant regions.
+ """
+ regions = get_global_directory().get_regions(RegionCategory.MULTI_TENANT)
+ return list([r.name for r in regions if r.visible])
def find_all_region_addresses() -> Iterable[str]:
diff --git a/src/sentry/usage_accountant/accountant.py b/src/sentry/usage_accountant/accountant.py
index 2ecf3c49f75c03..ee1e98a8c9cc8f 100644
--- a/src/sentry/usage_accountant/accountant.py
+++ b/src/sentry/usage_accountant/accountant.py
@@ -12,9 +12,9 @@
from arroyo.backends.abstract import Producer
from arroyo.backends.kafka import KafkaPayload, KafkaProducer, build_kafka_configuration
-from django.conf import settings
from usageaccountant import UsageAccumulator, UsageUnit
+from sentry.conf.types.kafka_definition import Topic
from sentry.options import get
from sentry.utils.kafka_config import get_kafka_producer_cluster_options, get_topic_definition
@@ -71,7 +71,7 @@ def record(
if _accountant_backend is None:
cluster_name = get_topic_definition(
- settings.KAFKA_SHARED_RESOURCES_USAGE,
+ Topic.SHARED_RESOURCES_USAGE,
)["cluster"]
producer_config = get_kafka_producer_cluster_options(cluster_name)
producer = KafkaProducer(
diff --git a/src/sentry/utils/hashlib.py b/src/sentry/utils/hashlib.py
index 608c290bd06b2c..72371bedc81fa8 100644
--- a/src/sentry/utils/hashlib.py
+++ b/src/sentry/utils/hashlib.py
@@ -73,3 +73,19 @@ def hash_values(
for value in values:
hash_value(_hash, value)
return _hash.hexdigest()
+
+
+def fnv1a_32(data: bytes) -> int:
+ """
+ Fowler–Noll–Vo hash function 32 bit implementation.
+ """
+ fnv_init = 0x811C9DC5
+ fnv_prime = 0x01000193
+ fnv_size = 2**32
+
+ result_hash = fnv_init
+ for byte in data:
+ result_hash ^= byte
+ result_hash = (result_hash * fnv_prime) % fnv_size
+
+ return result_hash
diff --git a/src/sentry/utils/kafka_config.py b/src/sentry/utils/kafka_config.py
index 2ca53a67bf3a47..93e3c4fc87a126 100644
--- a/src/sentry/utils/kafka_config.py
+++ b/src/sentry/utils/kafka_config.py
@@ -3,6 +3,7 @@
from django.conf import settings
+from sentry.conf.types.kafka_definition import Topic
from sentry.conf.types.topic_definition import TopicDefinition
SUPPORTED_KAFKA_CONFIGURATION = (
@@ -96,9 +97,8 @@ def get_kafka_admin_cluster_options(
)
-def get_topic_definition(topic: str) -> TopicDefinition:
- defn = settings.KAFKA_TOPICS.get(topic)
- if defn is not None:
- return defn
- else:
- raise ValueError(f"Unknown {topic=}")
+def get_topic_definition(topic: Topic) -> TopicDefinition:
+ return {
+ "cluster": settings.KAFKA_TOPIC_TO_CLUSTER[topic.value],
+ "real_topic_name": settings.KAFKA_TOPIC_OVERRIDES.get(topic.value, topic.value),
+ }
diff --git a/src/sentry/utils/mockdata/core.py b/src/sentry/utils/mockdata/core.py
index 3484d6d0d3131f..e2cff8e4bfbb55 100644
--- a/src/sentry/utils/mockdata/core.py
+++ b/src/sentry/utils/mockdata/core.py
@@ -20,7 +20,8 @@
from sentry.constants import ObjectStatus
from sentry.exceptions import HashDiscarded
from sentry.incidents.logic import create_alert_rule, create_alert_rule_trigger, create_incident
-from sentry.incidents.models import AlertRuleThresholdType, IncidentType
+from sentry.incidents.models.alert_rule import AlertRuleThresholdType
+from sentry.incidents.models.incident import IncidentType
from sentry.models.activity import Activity
from sentry.models.broadcast import Broadcast
from sentry.models.commit import Commit
diff --git a/src/sentry/utils/outcomes.py b/src/sentry/utils/outcomes.py
index 4aa2951b4bc0aa..f7f1947797d354 100644
--- a/src/sentry/utils/outcomes.py
+++ b/src/sentry/utils/outcomes.py
@@ -4,8 +4,7 @@
from datetime import datetime
from enum import IntEnum
-from django.conf import settings
-
+from sentry.conf.types.kafka_definition import Topic
from sentry.constants import DataCategory
from sentry.utils import json, kafka_config, metrics
from sentry.utils.dates import to_datetime
@@ -72,8 +71,8 @@ def track_outcome(
assert isinstance(category, (type(None), DataCategory))
assert isinstance(quantity, int)
- outcomes_config = kafka_config.get_topic_definition(settings.KAFKA_OUTCOMES)
- billing_config = kafka_config.get_topic_definition(settings.KAFKA_OUTCOMES_BILLING)
+ outcomes_config = kafka_config.get_topic_definition(Topic.OUTCOMES)
+ billing_config = kafka_config.get_topic_definition(Topic.OUTCOMES_BILLING)
use_billing = outcome.is_billing()
@@ -97,14 +96,10 @@ def track_outcome(
timestamp = timestamp or to_datetime(time.time())
- # Send billing outcomes to a dedicated topic if there is a separate
- # configuration for it. Otherwise, fall back to the regular outcomes topic.
- # This does NOT switch the producer, if both topics are on the same cluster.
- #
- # In Sentry, there is no significant difference between the classes of
- # outcome. In Sentry SaaS, they have elevated stability requirements as they
- # are used for spike protection and quota enforcement.
- topic_name = settings.KAFKA_OUTCOMES_BILLING if use_billing else settings.KAFKA_OUTCOMES
+ # Send billing outcomes to a dedicated topic.
+ topic_name = (
+ billing_config["real_topic_name"] if use_billing else outcomes_config["real_topic_name"]
+ )
# Send a snuba metrics payload.
publisher.publish(
diff --git a/src/sentry/utils/performance_issues/performance_detection.py b/src/sentry/utils/performance_issues/performance_detection.py
index 7f5a3afdff00da..8eb20d550a2e9a 100644
--- a/src/sentry/utils/performance_issues/performance_detection.py
+++ b/src/sentry/utils/performance_issues/performance_detection.py
@@ -110,7 +110,9 @@ def fetch_multi(
# Facade in front of performance detection to limit impact of detection on our events ingestion
-def detect_performance_problems(data: dict[str, Any], project: Project) -> list[PerformanceProblem]:
+def detect_performance_problems(
+ data: dict[str, Any], project: Project, is_standalone_spans: bool = False
+) -> list[PerformanceProblem]:
try:
rate = options.get("performance.issues.all.problem-detection")
if rate and rate > random.random():
@@ -121,7 +123,9 @@ def detect_performance_problems(data: dict[str, Any], project: Project) -> list[
), sentry_sdk.start_span(
op="py.detect_performance_issue", description="none"
) as sdk_span:
- return _detect_performance_problems(data, sdk_span, project)
+ return _detect_performance_problems(
+ data, sdk_span, project, is_standalone_spans=is_standalone_spans
+ )
except Exception:
logging.exception("Failed to detect performance problems")
return []
@@ -325,7 +329,7 @@ def get_detection_settings(project_id: int | None = None) -> dict[DetectorType,
def _detect_performance_problems(
- data: dict[str, Any], sdk_span: Any, project: Project
+ data: dict[str, Any], sdk_span: Any, project: Project, is_standalone_spans: bool = False
) -> list[PerformanceProblem]:
event_id = data.get("event_id", None)
@@ -340,7 +344,14 @@ def _detect_performance_problems(
run_detector_on_data(detector, data)
# Metrics reporting only for detection, not created issues.
- report_metrics_for_detectors(data, event_id, detectors, sdk_span, project.organization)
+ report_metrics_for_detectors(
+ data,
+ event_id,
+ detectors,
+ sdk_span,
+ project.organization,
+ is_standalone_spans=is_standalone_spans,
+ )
organization = project.organization
if project is None or organization is None:
@@ -396,6 +407,7 @@ def report_metrics_for_detectors(
detectors: Sequence[PerformanceDetector],
sdk_span: Any,
organization: Organization,
+ is_standalone_spans: bool = False,
):
all_detected_problems = [i for d in detectors for i in d.stored_problems]
has_detected_problems = bool(all_detected_problems)
@@ -410,10 +422,11 @@ def report_metrics_for_detectors(
if has_detected_problems:
set_tag("_pi_all_issue_count", len(all_detected_problems))
set_tag("_pi_sdk_name", sdk_name or "")
+ set_tag("is_standalone_spans", is_standalone_spans)
metrics.incr(
"performance.performance_issue.aggregate",
len(all_detected_problems),
- tags={"sdk_name": sdk_name},
+ tags={"sdk_name": sdk_name, "is_standalone_spans": is_standalone_spans},
)
if event_id:
set_tag("_pi_transaction", event_id)
@@ -444,6 +457,7 @@ def report_metrics_for_detectors(
detected_tags = {
"sdk_name": sdk_name,
"is_early_adopter": organization.flags.early_adopter.is_set,
+ "is_standalone_spans": is_standalone_spans,
}
event_integrations = event.get("sdk", {}).get("integrations", []) or []
diff --git a/src/sentry/utils/sdk.py b/src/sentry/utils/sdk.py
index 5dbba56f16ba72..02622eef964854 100644
--- a/src/sentry/utils/sdk.py
+++ b/src/sentry/utils/sdk.py
@@ -217,8 +217,11 @@ def before_send_transaction(event, _):
def before_send(event, _):
- if event.get("tags") and settings.SILO_MODE:
- event["tags"]["silo_mode"] = settings.SILO_MODE
+ if event.get("tags"):
+ if settings.SILO_MODE:
+ event["tags"]["silo_mode"] = settings.SILO_MODE
+ if settings.SENTRY_REGION:
+ event["tags"]["sentry_region"] = settings.SENTRY_REGION
return event
diff --git a/src/sentry/utils/snuba.py b/src/sentry/utils/snuba.py
index 38fcd47748cd0a..337e3d25eecf30 100644
--- a/src/sentry/utils/snuba.py
+++ b/src/sentry/utils/snuba.py
@@ -134,7 +134,8 @@ def log_snuba_info(content):
"segment.id": "segment_id",
"transaction.op": "transaction_op",
"user": "user",
- "profile_id": "profile_id",
+ "profile_id": "profile_id", # deprecated in favour of `profile.id`
+ "profile.id": "profile_id",
"transaction.method": "sentry_tags[transaction.method]",
"system": "sentry_tags[system]",
"raw_domain": "sentry_tags[raw_domain]",
diff --git a/src/sentry/web/client_config.py b/src/sentry/web/client_config.py
index 42b0c7e97c75f3..7b9823f0410880 100644
--- a/src/sentry/web/client_config.py
+++ b/src/sentry/web/client_config.py
@@ -328,16 +328,23 @@ def regions(self) -> list[Mapping[str, Any]]:
has membership on any single-tenant regions those will also be included.
"""
user = self.user
+
+ # Only expose visible regions.
+ # When new regions are added they can take some work to get working correctly.
+ # Before they are working we need ways to bring parts of the region online without
+ # exposing the region to customers.
region_names = find_all_multitenant_region_names()
+
if not region_names:
return [{"name": "default", "url": options.get("system.url-prefix")}]
- # No logged in user.
+ # Show all visible multi-tenant regions to unauthenticated users as they could
+ # create a new account
if not user or not user.id:
- return [get_region_by_name(region).api_serialize() for region in region_names]
+ return [get_region_by_name(name).api_serialize() for name in region_names]
# Ensure all regions the current user is in are included as there
- # could be single tenants as well.
+ # could be single tenants or hidden regions
memberships = user_service.get_organizations(user_id=user.id)
unique_regions = set(region_names) | {membership.region_name for membership in memberships}
diff --git a/src/sentry/web/frontend/debug/debug_incident_activity_email.py b/src/sentry/web/frontend/debug/debug_incident_activity_email.py
index 5338713358a884..1fbd288625ae6d 100644
--- a/src/sentry/web/frontend/debug/debug_incident_activity_email.py
+++ b/src/sentry/web/frontend/debug/debug_incident_activity_email.py
@@ -1,7 +1,7 @@
from django.http import HttpRequest, HttpResponse
from django.views.generic import View
-from sentry.incidents.models import Incident, IncidentActivity, IncidentActivityType
+from sentry.incidents.models.incident import Incident, IncidentActivity, IncidentActivityType
from sentry.incidents.tasks import generate_incident_activity_email
from sentry.models.organization import Organization
from sentry.models.user import User
diff --git a/src/sentry/web/frontend/debug/debug_incident_trigger_email.py b/src/sentry/web/frontend/debug/debug_incident_trigger_email.py
index 5b22cff07ba5c3..cdb6893afb6b40 100644
--- a/src/sentry/web/frontend/debug/debug_incident_trigger_email.py
+++ b/src/sentry/web/frontend/debug/debug_incident_trigger_email.py
@@ -4,13 +4,8 @@
from django.utils import timezone
from sentry.incidents.action_handlers import generate_incident_trigger_email_context
-from sentry.incidents.models import (
- AlertRule,
- AlertRuleTrigger,
- Incident,
- IncidentStatus,
- TriggerStatus,
-)
+from sentry.incidents.models.alert_rule import AlertRule, AlertRuleTrigger
+from sentry.incidents.models.incident import Incident, IncidentStatus, TriggerStatus
from sentry.models.organization import Organization
from sentry.models.project import Project
from sentry.models.user import User
diff --git a/static/app/components/actions/resolve.spec.tsx b/static/app/components/actions/resolve.spec.tsx
index caba10b4e8fae6..59130c9b8f8c61 100644
--- a/static/app/components/actions/resolve.spec.tsx
+++ b/static/app/components/actions/resolve.spec.tsx
@@ -1,4 +1,3 @@
-import selectEvent from 'react-select-event';
import {ReleaseFixture} from 'sentry-fixture/release';
import {
@@ -8,6 +7,7 @@ import {
userEvent,
within,
} from 'sentry-test/reactTestingLibrary';
+import selectEvent from 'sentry-test/selectEvent';
import ResolveActions from 'sentry/components/actions/resolve';
import ModalStore from 'sentry/stores/modalStore';
@@ -149,7 +149,7 @@ describe('ResolveActions', function () {
await userEvent.click(screen.getByLabelText('More resolve options'));
await userEvent.click(screen.getByText('Another existing release…'));
- selectEvent.openMenu(screen.getByText('e.g. 1.0.4'));
+ await selectEvent.openMenu(screen.getByText('e.g. 1.0.4'));
expect(await screen.findByText('1.2.0')).toBeInTheDocument();
await userEvent.click(screen.getByText('1.2.0'));
diff --git a/static/app/components/contextPickerModal.spec.tsx b/static/app/components/contextPickerModal.spec.tsx
index bfd943d0023be0..36069db3b63227 100644
--- a/static/app/components/contextPickerModal.spec.tsx
+++ b/static/app/components/contextPickerModal.spec.tsx
@@ -1,10 +1,10 @@
-import selectEvent from 'react-select-event';
import {GitHubIntegrationFixture} from 'sentry-fixture/githubIntegration';
import {OrganizationFixture} from 'sentry-fixture/organization';
import {ProjectFixture} from 'sentry-fixture/project';
import {UserFixture} from 'sentry-fixture/user';
import {render, screen, waitFor} from 'sentry-test/reactTestingLibrary';
+import selectEvent from 'sentry-test/selectEvent';
import ContextPickerModal from 'sentry/components/contextPickerModal';
import {
diff --git a/static/app/components/customCommitsResolutionModal.spec.tsx b/static/app/components/customCommitsResolutionModal.spec.tsx
index 8cbafed1ff3f66..686a5373141699 100644
--- a/static/app/components/customCommitsResolutionModal.spec.tsx
+++ b/static/app/components/customCommitsResolutionModal.spec.tsx
@@ -1,8 +1,8 @@
-import selectEvent from 'react-select-event';
import styled from '@emotion/styled';
import {CommitFixture} from 'sentry-fixture/commit';
import {render, screen, userEvent, waitFor} from 'sentry-test/reactTestingLibrary';
+import selectEvent from 'sentry-test/selectEvent';
import CustomCommitsResolutionModal from 'sentry/components/customCommitsResolutionModal';
import {makeCloseButton} from 'sentry/components/globalModal/components';
diff --git a/static/app/components/customResolutionModal.spec.tsx b/static/app/components/customResolutionModal.spec.tsx
index d018f4d5648eb2..edc89c7cab5e1e 100644
--- a/static/app/components/customResolutionModal.spec.tsx
+++ b/static/app/components/customResolutionModal.spec.tsx
@@ -1,10 +1,10 @@
-import selectEvent from 'react-select-event';
import styled from '@emotion/styled';
import {OrganizationFixture} from 'sentry-fixture/organization';
import {ReleaseFixture} from 'sentry-fixture/release';
import {UserFixture} from 'sentry-fixture/user';
import {render, screen, userEvent} from 'sentry-test/reactTestingLibrary';
+import selectEvent from 'sentry-test/selectEvent';
import CustomResolutionModal from 'sentry/components/customResolutionModal';
import {makeCloseButton} from 'sentry/components/globalModal/components';
@@ -43,7 +43,7 @@ describe('CustomResolutionModal', () => {
);
expect(releasesMock).toHaveBeenCalled();
- selectEvent.openMenu(screen.getByText('e.g. 1.0.4'));
+ await selectEvent.openMenu(screen.getByText('e.g. 1.0.4'));
expect(await screen.findByText('1.2.0')).toBeInTheDocument();
await userEvent.click(screen.getByText('1.2.0'));
@@ -70,7 +70,7 @@ describe('CustomResolutionModal', () => {
);
expect(releasesMock).toHaveBeenCalled();
- selectEvent.openMenu(screen.getByText('e.g. 1.0.4'));
+ await selectEvent.openMenu(screen.getByText('e.g. 1.0.4'));
expect(await screen.findByText(/You committed/)).toBeInTheDocument();
});
@@ -120,7 +120,7 @@ describe('CustomResolutionModal', () => {
/>
);
- selectEvent.openMenu(screen.getByText('e.g. 1.0.4'));
+ await selectEvent.openMenu(screen.getByText('e.g. 1.0.4'));
expect(
await screen.findByRole('menuitemradio', {name: 'abcdef (non-semver)'})
).toBeInTheDocument();
diff --git a/static/app/components/ddm/metricSamplesTable.tsx b/static/app/components/ddm/metricSamplesTable.tsx
index 9a985f333380cd..07c8151b3632e9 100644
--- a/static/app/components/ddm/metricSamplesTable.tsx
+++ b/static/app/components/ddm/metricSamplesTable.tsx
@@ -3,7 +3,7 @@ import styled from '@emotion/styled';
import type {LocationDescriptorObject} from 'history';
import debounce from 'lodash/debounce';
-import {LinkButton} from 'sentry/components/button';
+import {Button, LinkButton} from 'sentry/components/button';
import EmptyStateWarning from 'sentry/components/emptyStateWarning';
import GridEditable, {
COL_WIDTH_UNDEFINED,
@@ -525,7 +525,13 @@ function ProfileId({projectSlug, profileId}: {projectSlug: string; profileId?: s
const organization = useOrganization();
if (!defined(profileId)) {
- return {t('(no value)')};
+ return (
+
+
+
+ );
}
const target = generateProfileFlamechartRoute({
@@ -543,11 +549,6 @@ function ProfileId({projectSlug, profileId}: {projectSlug: string; profileId?: s
);
}
-const EmptyValueContainer = styled('span')`
- color: ${p => p.theme.gray300};
- ${p => p.theme.overflowEllipsis};
-`;
-
const SearchBar = styled(SmartSearchBar)`
margin-bottom: ${space(2)};
`;
diff --git a/static/app/components/deprecatedforms/selectAsyncField.spec.tsx b/static/app/components/deprecatedforms/selectAsyncField.spec.tsx
index 697bf7a1034c89..e26b5e37953a5d 100644
--- a/static/app/components/deprecatedforms/selectAsyncField.spec.tsx
+++ b/static/app/components/deprecatedforms/selectAsyncField.spec.tsx
@@ -1,14 +1,14 @@
-import selectEvent from 'react-select-event';
-
import {render, screen, userEvent} from 'sentry-test/reactTestingLibrary';
+import selectEvent from 'sentry-test/selectEvent';
import Form from 'sentry/components/deprecatedforms/form';
import SelectAsyncField from 'sentry/components/deprecatedforms/selectAsyncField';
describe('SelectAsyncField', function () {
- let api;
+ let api: jest.Mock;
beforeEach(function () {
+ MockApiClient.clearMockResponses();
api = MockApiClient.addMockResponse({
url: '/foo/bar/',
body: {
@@ -26,7 +26,7 @@ describe('SelectAsyncField', function () {
it('supports autocomplete arguments from an integration', async function () {
render();
- selectEvent.openMenu(screen.getByText('Select me'));
+ await selectEvent.openMenu(screen.getByText('Select me'));
await userEvent.type(screen.getByRole('textbox'), 'baz');
expect(api).toHaveBeenCalled();
@@ -43,10 +43,9 @@ describe('SelectAsyncField', function () {
);
- selectEvent.openMenu(screen.getByText('Select me'));
+ await selectEvent.openMenu(screen.getByText('Select me'));
await userEvent.type(screen.getByRole('textbox'), 'baz');
-
- await selectEvent.select(screen.getByText('Select me'), 'Baz Label');
+ await userEvent.click(screen.getByText('Baz Label'));
expect(screen.getByLabelText('form')).toHaveFormValues({
fieldName: 'baz',
diff --git a/static/app/components/deprecatedforms/selectField.spec.tsx b/static/app/components/deprecatedforms/selectField.spec.tsx
index 70795ca9801380..722c71c333f7b1 100644
--- a/static/app/components/deprecatedforms/selectField.spec.tsx
+++ b/static/app/components/deprecatedforms/selectField.spec.tsx
@@ -1,6 +1,5 @@
-import selectEvent from 'react-select-event';
-
import {render, screen, userEvent} from 'sentry-test/reactTestingLibrary';
+import selectEvent from 'sentry-test/selectEvent';
import Form from 'sentry/components/deprecatedforms/form';
import SelectField from 'sentry/components/deprecatedforms/selectField';
diff --git a/static/app/components/dropdownAutoComplete/types.tsx b/static/app/components/dropdownAutoComplete/types.tsx
index 41fbe6d4f7f6ae..d3e9c80c66e80e 100644
--- a/static/app/components/dropdownAutoComplete/types.tsx
+++ b/static/app/components/dropdownAutoComplete/types.tsx
@@ -1,6 +1,6 @@
export type Item = {
index: number;
- label: ((value: any) => React.ReactNode) | React.ReactNode;
+ label: React.ReactNode;
value: any;
'data-test-id'?: string;
disabled?: boolean;
diff --git a/static/app/components/dropdownMenu/footer.tsx b/static/app/components/dropdownMenu/footer.tsx
new file mode 100644
index 00000000000000..de1c7d14cd5e54
--- /dev/null
+++ b/static/app/components/dropdownMenu/footer.tsx
@@ -0,0 +1,15 @@
+import styled from '@emotion/styled';
+
+import {space} from 'sentry/styles/space';
+
+/**
+ * Provides default styling for custom footer content in a `DropdownMenu`.
+ */
+export const DropdownMenuFooter = styled('div')`
+ border-top: solid 1px ${p => p.theme.innerBorder};
+ padding: ${space(1)} ${space(1.5)};
+ font-size: ${p => p.theme.fontSizeSmall};
+ color: ${p => p.theme.subText};
+ display: flex;
+ align-items: center;
+`;
diff --git a/static/app/components/dropdownMenu/list.tsx b/static/app/components/dropdownMenu/list.tsx
index 95f68b54d20b95..d1907b352230bc 100644
--- a/static/app/components/dropdownMenu/list.tsx
+++ b/static/app/components/dropdownMenu/list.tsx
@@ -57,6 +57,10 @@ export interface DropdownMenuListProps
* Whether the menu should close when an item has been clicked/selected
*/
closeOnSelect?: boolean;
+ /**
+ * To be displayed below the menu items
+ */
+ menuFooter?: React.ReactChild;
/**
* Title to display on top of the menu
*/
@@ -74,6 +78,7 @@ function DropdownMenuList({
minMenuWidth,
size,
menuTitle,
+ menuFooter,
overlayState,
overlayPositionProps,
...props
@@ -249,6 +254,7 @@ function DropdownMenuList({
>
{renderCollection(stateCollection)}
+ {menuFooter}
diff --git a/static/app/components/events/eventExtraData/index.spec.tsx b/static/app/components/events/eventExtraData/index.spec.tsx
index d724e4fa70f8a6..0038ce1445fa49 100644
--- a/static/app/components/events/eventExtraData/index.spec.tsx
+++ b/static/app/components/events/eventExtraData/index.spec.tsx
@@ -178,6 +178,7 @@ describe('EventExtraData', function () {
},
});
+ await userEvent.click(screen.getByRole('button', {name: 'Expand'}));
expect(await screen.findAllByText(/redacted/)).toHaveLength(10);
await userEvent.hover(screen.getAllByText(/redacted/)[0]);
diff --git a/static/app/components/events/eventReplay/constants.tsx b/static/app/components/events/eventReplay/constants.tsx
new file mode 100644
index 00000000000000..0764635529a0ba
--- /dev/null
+++ b/static/app/components/events/eventReplay/constants.tsx
@@ -0,0 +1 @@
+export const REPLAY_LOADING_HEIGHT = 480;
diff --git a/static/app/components/events/eventReplay/index.tsx b/static/app/components/events/eventReplay/index.tsx
index 96ae7f273c1329..51a1949b68d113 100644
--- a/static/app/components/events/eventReplay/index.tsx
+++ b/static/app/components/events/eventReplay/index.tsx
@@ -4,6 +4,7 @@ import styled from '@emotion/styled';
import NegativeSpaceContainer from 'sentry/components/container/negativeSpaceContainer';
import ErrorBoundary from 'sentry/components/errorBoundary';
+import {REPLAY_LOADING_HEIGHT} from 'sentry/components/events/eventReplay/constants';
import {EventReplaySection} from 'sentry/components/events/eventReplay/eventReplaySection';
import LazyLoad from 'sentry/components/lazyLoad';
import LoadingIndicator from 'sentry/components/loadingIndicator';
@@ -109,6 +110,7 @@ function EventReplayContent({
{...commonProps}
component={replayClipPreview}
clipOffsets={CLIP_OFFSETS}
+ issueCategory={group?.issueCategory}
/>
) : (
@@ -144,10 +146,10 @@ export default function EventReplay({event, group, projectSlug}: Props) {
// The min-height here is due to max-height that is set in replayPreview.tsx
const ReplaySectionMinHeight = styled(EventReplaySection)`
- min-height: 508px;
+ min-height: 557px;
`;
const StyledNegativeSpaceContainer = styled(NegativeSpaceContainer)`
- height: 400px;
+ height: ${REPLAY_LOADING_HEIGHT}px;
margin-bottom: ${space(2)};
`;
diff --git a/static/app/components/events/eventReplay/replayClipPreview.tsx b/static/app/components/events/eventReplay/replayClipPreview.tsx
index 82d4b9bf2fbe5d..de444df33da8c1 100644
--- a/static/app/components/events/eventReplay/replayClipPreview.tsx
+++ b/static/app/components/events/eventReplay/replayClipPreview.tsx
@@ -7,6 +7,7 @@ import {LinkButton} from 'sentry/components/button';
import ButtonBar from 'sentry/components/buttonBar';
import NegativeSpaceContainer from 'sentry/components/container/negativeSpaceContainer';
import ErrorBoundary from 'sentry/components/errorBoundary';
+import {REPLAY_LOADING_HEIGHT} from 'sentry/components/events/eventReplay/constants';
import {StaticReplayPreview} from 'sentry/components/events/eventReplay/staticReplayPreview';
import LoadingIndicator from 'sentry/components/loadingIndicator';
import Panel from 'sentry/components/panels/panel';
@@ -27,11 +28,14 @@ import TimeAndScrubberGrid from 'sentry/components/replays/timeAndScrubberGrid';
import {IconDelete} from 'sentry/icons';
import {t} from 'sentry/locale';
import {space} from 'sentry/styles/space';
+import {IssueCategory} from 'sentry/types';
+import EventView from 'sentry/utils/discover/eventView';
import getRouteStringFromRoutes from 'sentry/utils/getRouteStringFromRoutes';
import {TabKey} from 'sentry/utils/replays/hooks/useActiveReplayTab';
import useReplayReader from 'sentry/utils/replays/hooks/useReplayReader';
import type RequestError from 'sentry/utils/requestError/requestError';
import useRouteAnalyticsParams from 'sentry/utils/routeAnalytics/useRouteAnalyticsParams';
+import {useLocation} from 'sentry/utils/useLocation';
import useOrganization from 'sentry/utils/useOrganization';
import {useRoutes} from 'sentry/utils/useRoutes';
import useFullscreen from 'sentry/utils/window/useFullscreen';
@@ -40,6 +44,7 @@ import {normalizeUrl} from 'sentry/utils/withDomainRequired';
import Breadcrumbs from 'sentry/views/replays/detail/breadcrumbs';
import BrowserOSIcons from 'sentry/views/replays/detail/browserOSIcons';
import FluidHeight from 'sentry/views/replays/detail/layout/fluidHeight';
+import {ReplayCell} from 'sentry/views/replays/replayTable/tableCell';
import type {ReplayRecord} from 'sentry/views/replays/types';
type Props = {
@@ -53,6 +58,7 @@ type Props = {
replaySlug: string;
focusTab?: TabKey;
fullReplayButtonProps?: Partial>;
+ issueCategory?: IssueCategory;
};
function getReplayAnalyticsStatus({
@@ -80,14 +86,20 @@ function getReplayAnalyticsStatus({
function ReplayPreviewPlayer({
replayId,
fullReplayButtonProps,
+ replayRecord,
+ issueCategory,
}: {
replayId: string;
+ replayRecord: ReplayRecord;
fullReplayButtonProps?: Partial>;
+ issueCategory?: IssueCategory;
}) {
const routes = useRoutes();
+ const location = useLocation();
const organization = useOrganization();
const [isSidebarOpen, setIsSidebarOpen] = useState(true);
const {replay, currentTime} = useReplayContext();
+ const eventView = EventView.fromLocation(location);
const fullscreenRef = useRef(null);
const {toggle: toggleFullscreen} = useFullscreen({
@@ -96,17 +108,29 @@ function ReplayPreviewPlayer({
const isFullscreen = useIsFullscreen();
const startOffsetMs = replay?.getStartOffsetMs() ?? 0;
+ const isRageClickIssue = issueCategory === IssueCategory.REPLAY;
+
const fullReplayUrl = {
pathname: normalizeUrl(`/organizations/${organization.slug}/replays/${replayId}/`),
query: {
referrer: getRouteStringFromRoutes(routes),
- t_main: TabKey.ERRORS,
+ t_main: isRageClickIssue ? TabKey.BREADCRUMBS : TabKey.ERRORS,
t: (currentTime + startOffsetMs) / 1000,
+ f_b_type: isRageClickIssue ? 'rageOrDead' : undefined,
},
};
return (
+ {replayRecord && (
+
+ )}
@@ -152,6 +176,7 @@ function ReplayClipPreview({
orgSlug,
replaySlug,
fullReplayButtonProps,
+ issueCategory,
}: Props) {
const clipWindow = useMemo(
() => ({
@@ -221,6 +246,8 @@ function ReplayClipPreview({
)}
@@ -261,7 +288,7 @@ const PreviewPlayerContainer = styled(FluidHeight)<{isSidebarOpen: boolean}>`
const PlayerContainer = styled(FluidHeight)`
position: relative;
- max-height: 448px;
+ max-height: ${REPLAY_LOADING_HEIGHT + 16}px;
`;
const PlayerContextContainer = styled(FluidHeight)`
@@ -276,7 +303,7 @@ const StaticPanel = styled(FluidHeight)`
`;
const StyledNegativeSpaceContainer = styled(NegativeSpaceContainer)`
- height: 400px;
+ height: ${REPLAY_LOADING_HEIGHT}px;
margin-bottom: ${space(2)};
`;
@@ -303,4 +330,8 @@ const ContextContainer = styled('div')`
gap: ${space(1)};
`;
+const ReplayCellNoPadding = styled(ReplayCell)`
+ padding: 0 0 ${space(1)};
+`;
+
export default ReplayClipPreview;
diff --git a/static/app/components/events/eventReplay/replayPreview.tsx b/static/app/components/events/eventReplay/replayPreview.tsx
index dee70c91421702..33ca9389c22e67 100644
--- a/static/app/components/events/eventReplay/replayPreview.tsx
+++ b/static/app/components/events/eventReplay/replayPreview.tsx
@@ -5,6 +5,7 @@ import styled from '@emotion/styled';
import {Alert} from 'sentry/components/alert';
import type {LinkButton} from 'sentry/components/button';
import NegativeSpaceContainer from 'sentry/components/container/negativeSpaceContainer';
+import {REPLAY_LOADING_HEIGHT} from 'sentry/components/events/eventReplay/constants';
import {StaticReplayPreview} from 'sentry/components/events/eventReplay/staticReplayPreview';
import LoadingIndicator from 'sentry/components/loadingIndicator';
import {Flex} from 'sentry/components/profiling/flex';
@@ -112,7 +113,7 @@ function ReplayPreview({
}
const StyledNegativeSpaceContainer = styled(NegativeSpaceContainer)`
- height: 400px;
+ height: ${REPLAY_LOADING_HEIGHT}px;
margin-bottom: ${space(2)};
`;
diff --git a/static/app/components/events/eventReplay/staticReplayPreview.tsx b/static/app/components/events/eventReplay/staticReplayPreview.tsx
index fac32545b58069..0156e4110f7a0c 100644
--- a/static/app/components/events/eventReplay/staticReplayPreview.tsx
+++ b/static/app/components/events/eventReplay/staticReplayPreview.tsx
@@ -2,6 +2,7 @@ import {type ComponentProps, Fragment, useMemo} from 'react';
import styled from '@emotion/styled';
import {LinkButton} from 'sentry/components/button';
+import {REPLAY_LOADING_HEIGHT} from 'sentry/components/events/eventReplay/constants';
import {StaticReplayPreferences} from 'sentry/components/replays/preferences/replayPreferences';
import {Provider as ReplayContextProvider} from 'sentry/components/replays/replayContext';
import ReplayPlayer from 'sentry/components/replays/replayPlayer';
@@ -91,7 +92,7 @@ const PlayerContainer = styled(FluidHeight)`
position: relative;
background: ${p => p.theme.background};
gap: ${space(1)};
- max-height: 448px;
+ max-height: ${REPLAY_LOADING_HEIGHT + 16}px;
`;
const StaticPanel = styled(FluidHeight)`
diff --git a/static/app/components/events/interfaces/performance/spanEvidenceKeyValueList.tsx b/static/app/components/events/interfaces/performance/spanEvidenceKeyValueList.tsx
index 4374d8cebc4158..c6cc119d566a05 100644
--- a/static/app/components/events/interfaces/performance/spanEvidenceKeyValueList.tsx
+++ b/static/app/components/events/interfaces/performance/spanEvidenceKeyValueList.tsx
@@ -475,7 +475,7 @@ const makeTransactionNameRow = (event: Event, orgSlug: string, projectSlug?: str
const makeRow = (
subject: KeyValueListDataItem['subject'],
- value: KeyValueListDataItem['value'] | KeyValueListDataItem['value'][],
+ value: KeyValueListDataItem['value'],
actionButton?: ReactNode
): KeyValueListDataItem => {
const itemKey = kebabCase(subject);
diff --git a/static/app/components/events/interfaces/request/index.spec.tsx b/static/app/components/events/interfaces/request/index.spec.tsx
index 349775b91cb910..df27c7b78df44f 100644
--- a/static/app/components/events/interfaces/request/index.spec.tsx
+++ b/static/app/components/events/interfaces/request/index.spec.tsx
@@ -174,6 +174,8 @@ describe('Request entry', function () {
expect(screen.getAllByText(/redacted/)).toHaveLength(5);
+ // Expand two levels down
+ await userEvent.click(await screen.findByLabelText('Expand'));
await userEvent.click(await screen.findByLabelText('Expand'));
expect(screen.getAllByText(/redacted/)).toHaveLength(7);
diff --git a/static/app/components/feedback/feedbackOnboarding/feedbackOnboardingLayout.tsx b/static/app/components/feedback/feedbackOnboarding/feedbackOnboardingLayout.tsx
index 6190e87172044c..1e404e0d59a489 100644
--- a/static/app/components/feedback/feedbackOnboarding/feedbackOnboardingLayout.tsx
+++ b/static/app/components/feedback/feedbackOnboarding/feedbackOnboardingLayout.tsx
@@ -23,8 +23,8 @@ export function FeedbackOnboardingLayout({
}: OnboardingLayoutProps) {
const organization = useOrganization();
- const [email, setEmail] = useState(true);
- const [name, setName] = useState(true);
+ const [email, setEmail] = useState(false);
+ const [name, setName] = useState(false);
const {isLoading: isLoadingRegistry, data: registryData} =
useSourcePackageRegistries(organization);
diff --git a/static/app/components/feedback/feedbackOnboarding/sidebar.tsx b/static/app/components/feedback/feedbackOnboarding/sidebar.tsx
index f96669948b06ff..8882ab74f54b1f 100644
--- a/static/app/components/feedback/feedbackOnboarding/sidebar.tsx
+++ b/static/app/components/feedback/feedbackOnboarding/sidebar.tsx
@@ -8,12 +8,13 @@ import HighlightTopRightPattern from 'sentry-images/pattern/highlight-top-right.
import {Button} from 'sentry/components/button';
import {CompactSelect} from 'sentry/components/compactSelect';
import {FeedbackOnboardingLayout} from 'sentry/components/feedback/feedbackOnboarding/feedbackOnboardingLayout';
-import useCurrentProjectState from 'sentry/components/feedback/feedbackOnboarding/useCurrentProjectState';
import useLoadFeedbackOnboardingDoc from 'sentry/components/feedback/feedbackOnboarding/useLoadFeedbackOnboardingDoc';
+import {CRASH_REPORT_HASH} from 'sentry/components/feedback/useFeedbackOnboarding';
import RadioGroup from 'sentry/components/forms/controls/radioGroup';
import IdBadge from 'sentry/components/idBadge';
import LoadingIndicator from 'sentry/components/loadingIndicator';
import {FeedbackOnboardingWebApiBanner} from 'sentry/components/onboarding/gettingStartedDoc/utils/feedbackOnboarding';
+import useCurrentProjectState from 'sentry/components/onboarding/gettingStartedDoc/utils/useCurrentProjectState';
import {PlatformOptionDropdown} from 'sentry/components/replaysOnboarding/platformOptionDropdown';
import {replayJsFrameworkOptions} from 'sentry/components/replaysOnboarding/utils';
import SidebarPanel from 'sentry/components/sidebar/sidebarPanel';
@@ -34,6 +35,7 @@ import {t, tct} from 'sentry/locale';
import {space} from 'sentry/styles/space';
import type {PlatformKey, Project, SelectValue} from 'sentry/types';
import useOrganization from 'sentry/utils/useOrganization';
+import {useRouteContext} from 'sentry/utils/useRouteContext';
import useUrlParams from 'sentry/utils/useUrlParams';
function FeedbackOnboardingSidebar(props: CommonSidebarProps) {
@@ -43,12 +45,15 @@ function FeedbackOnboardingSidebar(props: CommonSidebarProps) {
const isActive = currentPanel === SidebarPanelKey.FEEDBACK_ONBOARDING;
const hasProjectAccess = organization.access.includes('project:read');
- const {projects, currentProject, setCurrentProject} = useCurrentProjectState({
+ const {allProjects, currentProject, setCurrentProject} = useCurrentProjectState({
currentPanel,
+ targetPanel: SidebarPanelKey.FEEDBACK_ONBOARDING,
+ onboardingPlatforms: feedbackOnboardingPlatforms,
+ allPlatforms: feedbackOnboardingPlatforms,
});
const projectSelectOptions = useMemo(() => {
- const supportedProjectItems: SelectValue[] = projects
+ const supportedProjectItems: SelectValue[] = allProjects
.sort((aProject, bProject) => {
// if we're comparing two projects w/ or w/o feedback alphabetical sort
if (aProject.hasNewFeedbacks === bProject.hasNewFeedbacks) {
@@ -73,7 +78,7 @@ function FeedbackOnboardingSidebar(props: CommonSidebarProps) {
options: supportedProjectItems,
},
];
- }, [projects]);
+ }, [allProjects]);
if (!isActive || !hasProjectAccess || !currentProject) {
return null;
@@ -112,7 +117,9 @@ function FeedbackOnboardingSidebar(props: CommonSidebarProps) {
)
}
value={currentProject?.id}
- onChange={opt => setCurrentProject(projects.find(p => p.id === opt.value))}
+ onChange={opt =>
+ setCurrentProject(allProjects.find(p => p.id === opt.value))
+ }
triggerProps={{'aria-label': currentProject?.slug}}
options={projectSelectOptions}
position="bottom-end"
@@ -147,6 +154,8 @@ function OnboardingContent({currentProject}: {currentProject: Project}) {
}>(jsFrameworkSelectOptions[0]);
const defaultTab = 'npm';
+ const {location} = useRouteContext();
+ const crashReportOnboarding = location.hash === CRASH_REPORT_HASH;
const {getParamValue: setupMode, setParamValue: setSetupMode} = useUrlParams(
'mode',
@@ -168,9 +177,9 @@ function OnboardingContent({currentProject}: {currentProject: Project}) {
.filter(p => p !== 'javascript')
.includes(currentPlatform.id);
- const showRadioButtons = replayJsLoaderInstructionsPlatformList.includes(
- currentPlatform.id
- );
+ const showRadioButtons =
+ replayJsLoaderInstructionsPlatformList.includes(currentPlatform.id) &&
+ !crashReportOnboarding;
function getJsFramework() {
return (
@@ -197,7 +206,7 @@ function OnboardingContent({currentProject}: {currentProject: Project}) {
projectSlug: currentProject.slug,
});
- if (webApiPlatform) {
+ if (webApiPlatform && !crashReportOnboarding) {
return ;
}
@@ -245,7 +254,8 @@ function OnboardingContent({currentProject}: {currentProject: Project}) {
/>
) : (
newDocs?.platformOptions &&
- widgetPlatform && (
+ widgetPlatform &&
+ !crashReportOnboarding && (
{tct("I'm using [platformSelect]", {
platformSelect: (
@@ -295,6 +305,9 @@ function OnboardingContent({currentProject}: {currentProject: Project}) {
}
function getConfig() {
+ if (crashReportOnboarding) {
+ return 'crashReportOnboarding';
+ }
if (crashApiPlatform) {
return 'feedbackOnboardingCrashApi';
}
diff --git a/static/app/components/feedback/feedbackSetupPanel.tsx b/static/app/components/feedback/feedbackSetupPanel.tsx
index 9c9f1530c5b1d8..9c7bda62c43014 100644
--- a/static/app/components/feedback/feedbackSetupPanel.tsx
+++ b/static/app/components/feedback/feedbackSetupPanel.tsx
@@ -37,7 +37,13 @@ export default function FeedbackSetupPanel() {
)}
{hasNewOnboarding ? (
-