Compare commits
14 Commits
v0.58.2-on
...
v0.58.0-cl
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
5005923ef4 | ||
|
|
db4338be42 | ||
|
|
c7d0598ec0 | ||
|
|
4978fb9599 | ||
|
|
7b18c3ba06 | ||
|
|
92cdb36879 | ||
|
|
580f0b816e | ||
|
|
b770fc2457 | ||
|
|
c177230cce | ||
|
|
2112047a02 | ||
|
|
03c193d5a1 | ||
|
|
b83b295318 | ||
|
|
fbe75cd057 | ||
|
|
860145fb1d |
2
.github/workflows/staging-deployment.yaml
vendored
2
.github/workflows/staging-deployment.yaml
vendored
@@ -31,7 +31,6 @@ jobs:
|
||||
GCP_ZONE: ${{ secrets.GCP_ZONE }}
|
||||
GCP_INSTANCE: ${{ secrets.GCP_INSTANCE }}
|
||||
CLOUDSDK_CORE_DISABLE_PROMPTS: 1
|
||||
KAFKA_SPAN_EVAL: true
|
||||
run: |
|
||||
read -r -d '' COMMAND <<EOF || true
|
||||
echo "GITHUB_BRANCH: ${GITHUB_BRANCH}"
|
||||
@@ -39,6 +38,7 @@ jobs:
|
||||
export DOCKER_TAG="${GITHUB_SHA:0:7}" # needed for child process to access it
|
||||
export OTELCOL_TAG="main"
|
||||
export PATH="/usr/local/go/bin/:$PATH" # needed for Golang to work
|
||||
export KAFKA_SPAN_EVAL="true"
|
||||
docker system prune --force
|
||||
docker pull signoz/signoz-otel-collector:main
|
||||
docker pull signoz/signoz-schema-migrator:main
|
||||
|
||||
@@ -191,6 +191,7 @@ services:
|
||||
- GODEBUG=netdns=go
|
||||
- TELEMETRY_ENABLED=true
|
||||
- DEPLOYMENT_TYPE=docker-standalone-amd
|
||||
- KAFKA_SPAN_EVAL=${KAFKA_SPAN_EVAL:-false}
|
||||
restart: on-failure
|
||||
healthcheck:
|
||||
test:
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
package model
|
||||
|
||||
import (
|
||||
"go.signoz.io/signoz/pkg/query-service/constants"
|
||||
basemodel "go.signoz.io/signoz/pkg/query-service/model"
|
||||
)
|
||||
|
||||
@@ -134,6 +135,13 @@ var BasicPlan = basemodel.FeatureSet{
|
||||
UsageLimit: -1,
|
||||
Route: "",
|
||||
},
|
||||
basemodel.Feature{
|
||||
Name: basemodel.HostsInfraMonitoring,
|
||||
Active: constants.EnableHostsInfraMonitoring(),
|
||||
Usage: 0,
|
||||
UsageLimit: -1,
|
||||
Route: "",
|
||||
},
|
||||
}
|
||||
|
||||
var ProPlan = basemodel.FeatureSet{
|
||||
@@ -249,6 +257,13 @@ var ProPlan = basemodel.FeatureSet{
|
||||
UsageLimit: -1,
|
||||
Route: "",
|
||||
},
|
||||
basemodel.Feature{
|
||||
Name: basemodel.HostsInfraMonitoring,
|
||||
Active: constants.EnableHostsInfraMonitoring(),
|
||||
Usage: 0,
|
||||
UsageLimit: -1,
|
||||
Route: "",
|
||||
},
|
||||
}
|
||||
|
||||
var EnterprisePlan = basemodel.FeatureSet{
|
||||
@@ -378,4 +393,11 @@ var EnterprisePlan = basemodel.FeatureSet{
|
||||
UsageLimit: -1,
|
||||
Route: "",
|
||||
},
|
||||
basemodel.Feature{
|
||||
Name: basemodel.HostsInfraMonitoring,
|
||||
Active: constants.EnableHostsInfraMonitoring(),
|
||||
Usage: 0,
|
||||
UsageLimit: -1,
|
||||
Route: "",
|
||||
},
|
||||
}
|
||||
|
||||
@@ -34,7 +34,7 @@
|
||||
"@dnd-kit/core": "6.1.0",
|
||||
"@dnd-kit/modifiers": "7.0.0",
|
||||
"@dnd-kit/sortable": "8.0.0",
|
||||
"@grafana/data": "^9.5.2",
|
||||
"@grafana/data": "^11.2.3",
|
||||
"@mdx-js/loader": "2.3.0",
|
||||
"@mdx-js/react": "2.3.0",
|
||||
"@monaco-editor/react": "^4.3.1",
|
||||
@@ -51,7 +51,7 @@
|
||||
"ansi-to-html": "0.7.2",
|
||||
"antd": "5.11.0",
|
||||
"antd-table-saveas-excel": "2.2.1",
|
||||
"axios": "1.7.4",
|
||||
"axios": "1.7.7",
|
||||
"babel-eslint": "^10.1.0",
|
||||
"babel-jest": "^29.6.4",
|
||||
"babel-loader": "9.1.3",
|
||||
@@ -76,7 +76,7 @@
|
||||
"fontfaceobserver": "2.3.0",
|
||||
"history": "4.10.1",
|
||||
"html-webpack-plugin": "5.5.0",
|
||||
"http-proxy-middleware": "2.0.6",
|
||||
"http-proxy-middleware": "2.0.7",
|
||||
"i18next": "^21.6.12",
|
||||
"i18next-browser-languagedetector": "^6.1.3",
|
||||
"i18next-http-backend": "^1.3.2",
|
||||
@@ -126,7 +126,7 @@
|
||||
"uplot": "1.6.31",
|
||||
"uuid": "^8.3.2",
|
||||
"web-vitals": "^0.2.4",
|
||||
"webpack": "5.88.2",
|
||||
"webpack": "5.94.0",
|
||||
"webpack-dev-server": "^4.15.1",
|
||||
"webpack-retry-chunk-load-plugin": "3.1.1",
|
||||
"xstate": "^4.31.0"
|
||||
|
||||
@@ -9,7 +9,7 @@ import ROUTES from 'constants/routes';
|
||||
import useLicense from 'hooks/useLicense';
|
||||
import { useNotifications } from 'hooks/useNotifications';
|
||||
import history from 'lib/history';
|
||||
import { isEmpty } from 'lodash-es';
|
||||
import { isEmpty, isNull } from 'lodash-es';
|
||||
import { ReactChild, useEffect, useMemo, useState } from 'react';
|
||||
import { useTranslation } from 'react-i18next';
|
||||
import { useQuery } from 'react-query';
|
||||
@@ -35,13 +35,18 @@ function PrivateRoute({ children }: PrivateRouteProps): JSX.Element {
|
||||
const location = useLocation();
|
||||
const { pathname } = location;
|
||||
|
||||
const { org, orgPreferences } = useSelector<AppState, AppReducer>(
|
||||
(state) => state.app,
|
||||
);
|
||||
const [isLoading, setIsLoading] = useState<boolean>(true);
|
||||
|
||||
const [isOnboardingComplete, setIsOnboardingComplete] = useState<
|
||||
boolean | null
|
||||
>(null);
|
||||
const {
|
||||
org,
|
||||
orgPreferences,
|
||||
user,
|
||||
role,
|
||||
isUserFetching,
|
||||
isUserFetchingError,
|
||||
isLoggedIn: isLoggedInState,
|
||||
isFetchingOrgPreferences,
|
||||
} = useSelector<AppState, AppReducer>((state) => state.app);
|
||||
|
||||
const mapRoutes = useMemo(
|
||||
() =>
|
||||
@@ -56,30 +61,19 @@ function PrivateRoute({ children }: PrivateRouteProps): JSX.Element {
|
||||
[pathname],
|
||||
);
|
||||
|
||||
useEffect(() => {
|
||||
if (orgPreferences && !isEmpty(orgPreferences)) {
|
||||
const onboardingPreference = orgPreferences?.find(
|
||||
const isOnboardingComplete = useMemo(
|
||||
() =>
|
||||
orgPreferences?.find(
|
||||
(preference: Record<string, any>) => preference.key === 'ORG_ONBOARDING',
|
||||
);
|
||||
|
||||
if (onboardingPreference) {
|
||||
setIsOnboardingComplete(onboardingPreference.value);
|
||||
}
|
||||
}
|
||||
}, [orgPreferences]);
|
||||
)?.value,
|
||||
[orgPreferences],
|
||||
);
|
||||
|
||||
const {
|
||||
data: licensesData,
|
||||
isFetching: isFetchingLicensesData,
|
||||
} = useLicense();
|
||||
|
||||
const {
|
||||
isUserFetching,
|
||||
isUserFetchingError,
|
||||
isLoggedIn: isLoggedInState,
|
||||
isFetchingOrgPreferences,
|
||||
} = useSelector<AppState, AppReducer>((state) => state.app);
|
||||
|
||||
const { t } = useTranslation(['common']);
|
||||
|
||||
const localStorageUserAuthToken = getInitialUserTokenRefreshToken();
|
||||
@@ -135,7 +129,8 @@ function PrivateRoute({ children }: PrivateRouteProps): JSX.Element {
|
||||
// Check if the onboarding should be shown based on the org users and onboarding completion status, wait for org users and preferences to load
|
||||
const shouldShowOnboarding = (): boolean => {
|
||||
// Only run this effect if the org users and preferences are loaded
|
||||
if (!isLoadingOrgUsers) {
|
||||
|
||||
if (!isLoadingOrgUsers && !isFetchingOrgPreferences) {
|
||||
const isFirstUser = checkFirstTimeUser();
|
||||
|
||||
// Redirect to get started if it's not the first user or if the onboarding is complete
|
||||
@@ -145,6 +140,26 @@ function PrivateRoute({ children }: PrivateRouteProps): JSX.Element {
|
||||
return false;
|
||||
};
|
||||
|
||||
const handleRedirectForOrgOnboarding = (key: string): void => {
|
||||
if (
|
||||
isLoggedInState &&
|
||||
!isFetchingOrgPreferences &&
|
||||
!isLoadingOrgUsers &&
|
||||
!isEmpty(orgUsers?.payload) &&
|
||||
!isNull(orgPreferences)
|
||||
) {
|
||||
if (key === 'ONBOARDING' && isOnboardingComplete) {
|
||||
history.push(ROUTES.APPLICATION);
|
||||
}
|
||||
|
||||
const isFirstTimeUser = checkFirstTimeUser();
|
||||
|
||||
if (isFirstTimeUser && !isOnboardingComplete) {
|
||||
history.push(ROUTES.ONBOARDING);
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
const handleUserLoginIfTokenPresent = async (
|
||||
key: keyof typeof ROUTES,
|
||||
): Promise<void> => {
|
||||
@@ -166,15 +181,7 @@ function PrivateRoute({ children }: PrivateRouteProps): JSX.Element {
|
||||
response.payload.refreshJwt,
|
||||
);
|
||||
|
||||
const showOnboarding = shouldShowOnboarding();
|
||||
|
||||
if (
|
||||
userResponse &&
|
||||
showOnboarding &&
|
||||
userResponse.payload.role === 'ADMIN'
|
||||
) {
|
||||
history.push(ROUTES.ONBOARDING);
|
||||
}
|
||||
handleRedirectForOrgOnboarding(key);
|
||||
|
||||
if (
|
||||
userResponse &&
|
||||
@@ -203,7 +210,7 @@ function PrivateRoute({ children }: PrivateRouteProps): JSX.Element {
|
||||
) {
|
||||
handleUserLoginIfTokenPresent(key);
|
||||
} else {
|
||||
// user does have localstorage values
|
||||
handleRedirectForOrgOnboarding(key);
|
||||
|
||||
navigateToLoginIfNotLoggedIn(isLocalStorageLoggedIn);
|
||||
}
|
||||
@@ -241,9 +248,9 @@ function PrivateRoute({ children }: PrivateRouteProps): JSX.Element {
|
||||
}, [org]);
|
||||
|
||||
const handleRouting = (): void => {
|
||||
const showOnboarding = shouldShowOnboarding();
|
||||
const showOrgOnboarding = shouldShowOnboarding();
|
||||
|
||||
if (showOnboarding) {
|
||||
if (showOrgOnboarding && !isOnboardingComplete) {
|
||||
history.push(ROUTES.ONBOARDING);
|
||||
} else {
|
||||
history.push(ROUTES.APPLICATION);
|
||||
@@ -251,17 +258,27 @@ function PrivateRoute({ children }: PrivateRouteProps): JSX.Element {
|
||||
};
|
||||
|
||||
useEffect(() => {
|
||||
// Only run this effect if the org users and preferences are loaded
|
||||
if (!isLoadingOrgUsers && isOnboardingComplete !== null) {
|
||||
const isFirstUser = checkFirstTimeUser();
|
||||
const { isPrivate } = currentRoute || {
|
||||
isPrivate: false,
|
||||
};
|
||||
|
||||
// Redirect to get started if it's not the first user or if the onboarding is complete
|
||||
if (isFirstUser && !isOnboardingComplete) {
|
||||
history.push(ROUTES.ONBOARDING);
|
||||
}
|
||||
if (isLoggedInState && role && role !== 'ADMIN') {
|
||||
setIsLoading(false);
|
||||
}
|
||||
// eslint-disable-next-line react-hooks/exhaustive-deps
|
||||
}, [isLoadingOrgUsers, isOnboardingComplete, orgUsers]);
|
||||
|
||||
if (!isPrivate) {
|
||||
setIsLoading(false);
|
||||
}
|
||||
|
||||
if (
|
||||
!isEmpty(user) &&
|
||||
!isFetchingOrgPreferences &&
|
||||
!isEmpty(orgUsers?.payload) &&
|
||||
!isNull(orgPreferences)
|
||||
) {
|
||||
setIsLoading(false);
|
||||
}
|
||||
}, [currentRoute, user, role, orgUsers, orgPreferences]);
|
||||
|
||||
// eslint-disable-next-line sonarjs/cognitive-complexity
|
||||
useEffect(() => {
|
||||
@@ -284,7 +301,6 @@ function PrivateRoute({ children }: PrivateRouteProps): JSX.Element {
|
||||
handlePrivateRoutes(key);
|
||||
} else {
|
||||
// no need to fetch the user and make user fetching false
|
||||
|
||||
if (getLocalStorageApi(LOCALSTORAGE.IS_LOGGED_IN) === 'true') {
|
||||
handleRouting();
|
||||
}
|
||||
@@ -311,13 +327,20 @@ function PrivateRoute({ children }: PrivateRouteProps): JSX.Element {
|
||||
history.push(ROUTES.SOMETHING_WENT_WRONG);
|
||||
}
|
||||
})();
|
||||
}, [dispatch, isLoggedInState, currentRoute, licensesData]);
|
||||
}, [
|
||||
dispatch,
|
||||
isLoggedInState,
|
||||
currentRoute,
|
||||
licensesData,
|
||||
orgUsers,
|
||||
orgPreferences,
|
||||
]);
|
||||
|
||||
if (isUserFetchingError) {
|
||||
return <Redirect to={ROUTES.SOMETHING_WENT_WRONG} />;
|
||||
}
|
||||
|
||||
if (isUserFetching || (isLoggedInState && isFetchingOrgPreferences)) {
|
||||
if (isUserFetching || isLoading) {
|
||||
return <Spinner tip="Loading..." />;
|
||||
}
|
||||
|
||||
|
||||
@@ -37,6 +37,7 @@ import {
|
||||
UPDATE_ORG_PREFERENCES,
|
||||
} from 'types/actions/app';
|
||||
import AppReducer, { User } from 'types/reducer/app';
|
||||
import { USER_ROLES } from 'types/roles';
|
||||
import { extractDomain, isCloudUser, isEECloudUser } from 'utils/app';
|
||||
|
||||
import PrivateRoute from './Private';
|
||||
@@ -74,7 +75,7 @@ function App(): JSX.Element {
|
||||
const { data: orgPreferences, isLoading: isLoadingOrgPreferences } = useQuery({
|
||||
queryFn: () => getAllOrgPreferences(),
|
||||
queryKey: ['getOrgPreferences'],
|
||||
enabled: isLoggedInState,
|
||||
enabled: isLoggedInState && role === USER_ROLES.ADMIN,
|
||||
});
|
||||
|
||||
useEffect(() => {
|
||||
@@ -95,6 +96,17 @@ function App(): JSX.Element {
|
||||
}
|
||||
}, [orgPreferences, dispatch, isLoadingOrgPreferences]);
|
||||
|
||||
useEffect(() => {
|
||||
if (isLoggedInState && role !== USER_ROLES.ADMIN) {
|
||||
dispatch({
|
||||
type: UPDATE_IS_FETCHING_ORG_PREFERENCES,
|
||||
payload: {
|
||||
isFetchingOrgPreferences: false,
|
||||
},
|
||||
});
|
||||
}
|
||||
}, [isLoggedInState, role, dispatch]);
|
||||
|
||||
const featureResponse = useGetFeatureFlag((allFlags) => {
|
||||
dispatch({
|
||||
type: UPDATE_FEATURE_FLAG_RESPONSE,
|
||||
|
||||
@@ -386,32 +386,31 @@ function RuleOptions({
|
||||
renderThresholdRuleOpts()}
|
||||
|
||||
<Space direction="vertical" size="large">
|
||||
{queryCategory !== EQueryType.PROM &&
|
||||
ruleType !== AlertDetectionTypes.ANOMALY_DETECTION_ALERT && (
|
||||
<Space direction="horizontal" align="center">
|
||||
<Form.Item noStyle name={['condition', 'target']}>
|
||||
<InputNumber
|
||||
addonBefore={t('field_threshold')}
|
||||
value={alertDef?.condition?.target}
|
||||
onChange={onChange}
|
||||
type="number"
|
||||
onWheel={(e): void => e.currentTarget.blur()}
|
||||
/>
|
||||
</Form.Item>
|
||||
{ruleType !== AlertDetectionTypes.ANOMALY_DETECTION_ALERT && (
|
||||
<Space direction="horizontal" align="center">
|
||||
<Form.Item noStyle name={['condition', 'target']}>
|
||||
<InputNumber
|
||||
addonBefore={t('field_threshold')}
|
||||
value={alertDef?.condition?.target}
|
||||
onChange={onChange}
|
||||
type="number"
|
||||
onWheel={(e): void => e.currentTarget.blur()}
|
||||
/>
|
||||
</Form.Item>
|
||||
|
||||
<Form.Item noStyle>
|
||||
<Select
|
||||
getPopupContainer={popupContainer}
|
||||
allowClear
|
||||
showSearch
|
||||
options={categorySelectOptions}
|
||||
placeholder={t('field_unit')}
|
||||
value={alertDef.condition.targetUnit}
|
||||
onChange={onChangeAlertUnit}
|
||||
/>
|
||||
</Form.Item>
|
||||
</Space>
|
||||
)}
|
||||
<Form.Item noStyle>
|
||||
<Select
|
||||
getPopupContainer={popupContainer}
|
||||
allowClear
|
||||
showSearch
|
||||
options={categorySelectOptions}
|
||||
placeholder={t('field_unit')}
|
||||
value={alertDef.condition.targetUnit}
|
||||
onChange={onChangeAlertUnit}
|
||||
/>
|
||||
</Form.Item>
|
||||
</Space>
|
||||
)}
|
||||
|
||||
<Collapse>
|
||||
<Collapse.Panel header={t('More options')} key="1">
|
||||
|
||||
@@ -73,6 +73,19 @@ export enum AlertDetectionTypes {
|
||||
ANOMALY_DETECTION_ALERT = 'anomaly_rule',
|
||||
}
|
||||
|
||||
const ALERT_SETUP_GUIDE_URLS: Record<AlertTypes, string> = {
|
||||
[AlertTypes.METRICS_BASED_ALERT]:
|
||||
'https://signoz.io/docs/alerts-management/metrics-based-alerts/?utm_source=product&utm_medium=alert-creation-page',
|
||||
[AlertTypes.LOGS_BASED_ALERT]:
|
||||
'https://signoz.io/docs/alerts-management/log-based-alerts/?utm_source=product&utm_medium=alert-creation-page',
|
||||
[AlertTypes.TRACES_BASED_ALERT]:
|
||||
'https://signoz.io/docs/alerts-management/trace-based-alerts/?utm_source=product&utm_medium=alert-creation-page',
|
||||
[AlertTypes.EXCEPTIONS_BASED_ALERT]:
|
||||
'https://signoz.io/docs/alerts-management/exceptions-based-alerts/?utm_source=product&utm_medium=alert-creation-page',
|
||||
[AlertTypes.ANOMALY_BASED_ALERT]:
|
||||
'https://signoz.io/docs/alerts-management/anomaly-based-alerts/?utm_source=product&utm_medium=alert-creation-page',
|
||||
};
|
||||
|
||||
// eslint-disable-next-line sonarjs/cognitive-complexity
|
||||
function FormAlertRules({
|
||||
alertType,
|
||||
@@ -702,6 +715,29 @@ function FormAlertRules({
|
||||
|
||||
const isRuleCreated = !ruleId || ruleId === 0;
|
||||
|
||||
function handleRedirection(option: AlertTypes): void {
|
||||
let url;
|
||||
if (
|
||||
option === AlertTypes.METRICS_BASED_ALERT &&
|
||||
alertTypeFromURL === AlertDetectionTypes.ANOMALY_DETECTION_ALERT
|
||||
) {
|
||||
url = ALERT_SETUP_GUIDE_URLS[AlertTypes.ANOMALY_BASED_ALERT];
|
||||
} else {
|
||||
url = ALERT_SETUP_GUIDE_URLS[option];
|
||||
}
|
||||
|
||||
if (url) {
|
||||
logEvent('Alert: Check example alert clicked', {
|
||||
dataSource: ALERTS_DATA_SOURCE_MAP[alertDef?.alertType as AlertTypes],
|
||||
isNewRule: !ruleId || ruleId === 0,
|
||||
ruleId,
|
||||
queryType: currentQuery.queryType,
|
||||
link: url,
|
||||
});
|
||||
window.open(url, '_blank');
|
||||
}
|
||||
}
|
||||
|
||||
useEffect(() => {
|
||||
if (!isRuleCreated) {
|
||||
logEvent('Alert: Edit page visited', {
|
||||
@@ -752,7 +788,11 @@ function FormAlertRules({
|
||||
)}
|
||||
</div>
|
||||
|
||||
<Button className="periscope-btn" icon={<ExternalLink size={14} />}>
|
||||
<Button
|
||||
className="periscope-btn"
|
||||
onClick={(): void => handleRedirection(alertDef.alertType as AlertTypes)}
|
||||
icon={<ExternalLink size={14} />}
|
||||
>
|
||||
Alert Setup Guide
|
||||
</Button>
|
||||
</div>
|
||||
|
||||
@@ -312,7 +312,7 @@ export default function Onboarding(): JSX.Element {
|
||||
<div
|
||||
onClick={(): void => {
|
||||
logEvent('Onboarding V2: Skip Button Clicked', {});
|
||||
history.push('/');
|
||||
history.push(ROUTES.APPLICATION);
|
||||
}}
|
||||
className="skip-to-console"
|
||||
>
|
||||
|
||||
@@ -10,6 +10,7 @@ import {
|
||||
ArrowLeft,
|
||||
ArrowRight,
|
||||
CheckCircle,
|
||||
Loader2,
|
||||
Plus,
|
||||
TriangleAlert,
|
||||
X,
|
||||
@@ -33,6 +34,7 @@ interface TeamMember {
|
||||
}
|
||||
|
||||
interface InviteTeamMembersProps {
|
||||
isLoading: boolean;
|
||||
teamMembers: TeamMember[] | null;
|
||||
setTeamMembers: (teamMembers: TeamMember[]) => void;
|
||||
onNext: () => void;
|
||||
@@ -40,6 +42,7 @@ interface InviteTeamMembersProps {
|
||||
}
|
||||
|
||||
function InviteTeamMembers({
|
||||
isLoading,
|
||||
teamMembers,
|
||||
setTeamMembers,
|
||||
onNext,
|
||||
@@ -67,8 +70,6 @@ function InviteTeamMembers({
|
||||
|
||||
const [disableNextButton, setDisableNextButton] = useState<boolean>(false);
|
||||
|
||||
const [allInvitesSent, setAllInvitesSent] = useState<boolean>(false);
|
||||
|
||||
const defaultTeamMember: TeamMember = {
|
||||
email: '',
|
||||
role: 'EDITOR',
|
||||
@@ -157,7 +158,6 @@ function InviteTeamMembers({
|
||||
setError(null);
|
||||
setHasErrors(false);
|
||||
setInviteUsersErrorResponse(null);
|
||||
setAllInvitesSent(true);
|
||||
|
||||
setInviteUsersSuccessResponse(successfulInvites);
|
||||
|
||||
@@ -358,33 +358,36 @@ function InviteTeamMembers({
|
||||
</div>
|
||||
)}
|
||||
|
||||
{inviteUsersSuccessResponse && (
|
||||
<div className="success-message-container invite-users-success-message-container">
|
||||
{inviteUsersSuccessResponse?.map((success, index) => (
|
||||
<Typography.Text
|
||||
className="success-message"
|
||||
// eslint-disable-next-line react/no-array-index-key
|
||||
key={`${success}-${index}`}
|
||||
>
|
||||
<CheckCircle size={14} /> {success}
|
||||
</Typography.Text>
|
||||
))}
|
||||
</div>
|
||||
)}
|
||||
|
||||
{hasErrors && (
|
||||
<div className="error-message-container invite-users-error-message-container">
|
||||
{inviteUsersErrorResponse?.map((error, index) => (
|
||||
<Typography.Text
|
||||
className="error-message"
|
||||
type="danger"
|
||||
// eslint-disable-next-line react/no-array-index-key
|
||||
key={`${error}-${index}`}
|
||||
>
|
||||
<TriangleAlert size={14} /> {error}
|
||||
</Typography.Text>
|
||||
))}
|
||||
</div>
|
||||
<>
|
||||
{/* show only when invites are sent successfully & partial error is present */}
|
||||
{inviteUsersSuccessResponse && inviteUsersErrorResponse && (
|
||||
<div className="success-message-container invite-users-success-message-container">
|
||||
{inviteUsersSuccessResponse?.map((success, index) => (
|
||||
<Typography.Text
|
||||
className="success-message"
|
||||
// eslint-disable-next-line react/no-array-index-key
|
||||
key={`${success}-${index}`}
|
||||
>
|
||||
<CheckCircle size={14} /> {success}
|
||||
</Typography.Text>
|
||||
))}
|
||||
</div>
|
||||
)}
|
||||
|
||||
<div className="error-message-container invite-users-error-message-container">
|
||||
{inviteUsersErrorResponse?.map((error, index) => (
|
||||
<Typography.Text
|
||||
className="error-message"
|
||||
type="danger"
|
||||
// eslint-disable-next-line react/no-array-index-key
|
||||
key={`${error}-${index}`}
|
||||
>
|
||||
<TriangleAlert size={14} /> {error}
|
||||
</Typography.Text>
|
||||
))}
|
||||
</div>
|
||||
</>
|
||||
)}
|
||||
</div>
|
||||
|
||||
@@ -413,17 +416,23 @@ function InviteTeamMembers({
|
||||
type="primary"
|
||||
className="next-button"
|
||||
onClick={handleNext}
|
||||
loading={isSendingInvites || disableNextButton}
|
||||
loading={isSendingInvites || isLoading || disableNextButton}
|
||||
>
|
||||
{allInvitesSent ? 'Invites Sent' : 'Send Invites'}
|
||||
|
||||
{allInvitesSent ? <CheckCircle size={14} /> : <ArrowRight size={14} />}
|
||||
Send Invites
|
||||
<ArrowRight size={14} />
|
||||
</Button>
|
||||
</div>
|
||||
|
||||
<div className="do-later-container">
|
||||
<Button type="link" onClick={handleDoLater}>
|
||||
I'll do this later
|
||||
<Button
|
||||
type="link"
|
||||
className="do-later-button"
|
||||
onClick={handleDoLater}
|
||||
disabled={isSendingInvites || disableNextButton}
|
||||
>
|
||||
{isLoading && <Loader2 className="animate-spin" size={16} />}
|
||||
|
||||
<span>I'll do this later</span>
|
||||
</Button>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
@@ -189,6 +189,15 @@
|
||||
justify-content: center;
|
||||
align-items: center;
|
||||
margin-top: 24px;
|
||||
|
||||
.do-later-button {
|
||||
font-size: 12px;
|
||||
|
||||
display: flex;
|
||||
justify-content: center;
|
||||
align-items: center;
|
||||
gap: 8px;
|
||||
}
|
||||
}
|
||||
|
||||
.question {
|
||||
|
||||
@@ -314,7 +314,7 @@ function OptimiseSignozNeeds({
|
||||
|
||||
<div className="do-later-container">
|
||||
<Button type="link" onClick={handleWillDoLater}>
|
||||
Skip for now
|
||||
I'll do this later
|
||||
</Button>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
@@ -1,25 +1,19 @@
|
||||
import './OnboardingQuestionaire.styles.scss';
|
||||
|
||||
import { Skeleton } from 'antd';
|
||||
import { NotificationInstance } from 'antd/es/notification/interface';
|
||||
import logEvent from 'api/common/logEvent';
|
||||
import updateProfileAPI from 'api/onboarding/updateProfile';
|
||||
import getAllOrgPreferences from 'api/preferences/getAllOrgPreferences';
|
||||
import getOrgPreference from 'api/preferences/getOrgPreference';
|
||||
import updateOrgPreferenceAPI from 'api/preferences/updateOrgPreference';
|
||||
import getOrgUser from 'api/user/getOrgUser';
|
||||
import { AxiosError } from 'axios';
|
||||
import { SOMETHING_WENT_WRONG } from 'constants/api';
|
||||
import ROUTES from 'constants/routes';
|
||||
import { InviteTeamMembersProps } from 'container/OrganizationSettings/PendingInvitesContainer';
|
||||
import { useNotifications } from 'hooks/useNotifications';
|
||||
import history from 'lib/history';
|
||||
import { isEmpty } from 'lodash-es';
|
||||
import { Dispatch, useEffect, useState } from 'react';
|
||||
import { useEffect, useState } from 'react';
|
||||
import { useMutation, useQuery } from 'react-query';
|
||||
import { useDispatch, useSelector } from 'react-redux';
|
||||
import { AppState } from 'store/reducers';
|
||||
import AppActions from 'types/actions';
|
||||
import {
|
||||
UPDATE_IS_FETCHING_ORG_PREFERENCES,
|
||||
UPDATE_ORG_PREFERENCES,
|
||||
@@ -70,11 +64,13 @@ const INITIAL_OPTIMISE_SIGNOZ_DETAILS: OptimiseSignozDetails = {
|
||||
function OnboardingQuestionaire(): JSX.Element {
|
||||
const { notifications } = useNotifications();
|
||||
const { org } = useSelector<AppState, AppReducer>((state) => state.app);
|
||||
const dispatch = useDispatch();
|
||||
const [currentStep, setCurrentStep] = useState<number>(1);
|
||||
const [orgDetails, setOrgDetails] = useState<OrgDetails>(INITIAL_ORG_DETAILS);
|
||||
const [signozDetails, setSignozDetails] = useState<SignozDetails>(
|
||||
INITIAL_SIGNOZ_DETAILS,
|
||||
);
|
||||
|
||||
const [
|
||||
optimiseSignozDetails,
|
||||
setOptimiseSignozDetails,
|
||||
@@ -83,101 +79,12 @@ function OnboardingQuestionaire(): JSX.Element {
|
||||
InviteTeamMembersProps[] | null
|
||||
>(null);
|
||||
|
||||
const { data: orgUsers, isLoading: isLoadingOrgUsers } = useQuery({
|
||||
queryFn: () =>
|
||||
getOrgUser({
|
||||
orgId: (org || [])[0].id,
|
||||
}),
|
||||
queryKey: ['getOrgUser', org?.[0].id],
|
||||
});
|
||||
|
||||
const dispatch = useDispatch<Dispatch<AppActions>>();
|
||||
const [currentOrgData, setCurrentOrgData] = useState<OrgData | null>(null);
|
||||
const [isOnboardingComplete, setIsOnboardingComplete] = useState<boolean>(
|
||||
false,
|
||||
);
|
||||
|
||||
const {
|
||||
data: onboardingPreferenceData,
|
||||
isLoading: isLoadingOnboardingPreference,
|
||||
} = useQuery({
|
||||
queryFn: () => getOrgPreference({ preferenceID: 'ORG_ONBOARDING' }),
|
||||
queryKey: ['getOrgPreferences', 'ORG_ONBOARDING'],
|
||||
});
|
||||
|
||||
const { data: orgPreferences, isLoading: isLoadingOrgPreferences } = useQuery({
|
||||
queryFn: () => getAllOrgPreferences(),
|
||||
queryKey: ['getOrgPreferences'],
|
||||
enabled: isOnboardingComplete,
|
||||
});
|
||||
|
||||
useEffect(() => {
|
||||
if (orgPreferences && !isLoadingOrgPreferences) {
|
||||
dispatch({
|
||||
type: UPDATE_IS_FETCHING_ORG_PREFERENCES,
|
||||
payload: {
|
||||
isFetchingOrgPreferences: false,
|
||||
},
|
||||
});
|
||||
|
||||
dispatch({
|
||||
type: UPDATE_ORG_PREFERENCES,
|
||||
payload: {
|
||||
orgPreferences: orgPreferences.payload?.data || null,
|
||||
},
|
||||
});
|
||||
}
|
||||
}, [orgPreferences, dispatch, isLoadingOrgPreferences]);
|
||||
|
||||
useEffect(() => {
|
||||
if (
|
||||
!isLoadingOnboardingPreference &&
|
||||
!isEmpty(onboardingPreferenceData?.payload?.data)
|
||||
) {
|
||||
const preferenceId = onboardingPreferenceData?.payload?.data?.preference_id;
|
||||
const preferenceValue =
|
||||
onboardingPreferenceData?.payload?.data?.preference_value;
|
||||
|
||||
if (preferenceId === 'ORG_ONBOARDING') {
|
||||
setIsOnboardingComplete(preferenceValue as boolean);
|
||||
}
|
||||
}
|
||||
}, [onboardingPreferenceData, isLoadingOnboardingPreference]);
|
||||
|
||||
const checkFirstTimeUser = (): boolean => {
|
||||
const users = orgUsers?.payload || [];
|
||||
|
||||
const remainingUsers = users.filter(
|
||||
(user) => user.email !== 'admin@signoz.cloud',
|
||||
);
|
||||
|
||||
return remainingUsers.length === 1;
|
||||
};
|
||||
|
||||
useEffect(() => {
|
||||
// Only run this effect if the org users and preferences are loaded
|
||||
if (!isLoadingOrgUsers && !isLoadingOnboardingPreference) {
|
||||
const isFirstUser = checkFirstTimeUser();
|
||||
|
||||
// Redirect to get started if it's not the first user or if the onboarding is complete
|
||||
if (!isFirstUser || isOnboardingComplete) {
|
||||
history.push(ROUTES.GET_STARTED);
|
||||
|
||||
logEvent('User Onboarding: Redirected to Get Started', {
|
||||
isFirstUser,
|
||||
isOnboardingComplete,
|
||||
});
|
||||
} else {
|
||||
logEvent('User Onboarding: Started', {});
|
||||
}
|
||||
}
|
||||
// eslint-disable-next-line react-hooks/exhaustive-deps
|
||||
}, [
|
||||
isLoadingOrgUsers,
|
||||
isLoadingOnboardingPreference,
|
||||
isOnboardingComplete,
|
||||
orgUsers,
|
||||
]);
|
||||
const [
|
||||
updatingOrgOnboardingStatus,
|
||||
setUpdatingOrgOnboardingStatus,
|
||||
] = useState<boolean>(false);
|
||||
|
||||
useEffect(() => {
|
||||
if (org) {
|
||||
@@ -191,6 +98,35 @@ function OnboardingQuestionaire(): JSX.Element {
|
||||
// eslint-disable-next-line react-hooks/exhaustive-deps
|
||||
}, [org]);
|
||||
|
||||
const { refetch: refetchOrgPreferences } = useQuery({
|
||||
queryFn: () => getAllOrgPreferences(),
|
||||
queryKey: ['getOrgPreferences'],
|
||||
enabled: false,
|
||||
refetchOnWindowFocus: false,
|
||||
onSuccess: (response) => {
|
||||
dispatch({
|
||||
type: UPDATE_IS_FETCHING_ORG_PREFERENCES,
|
||||
payload: {
|
||||
isFetchingOrgPreferences: false,
|
||||
},
|
||||
});
|
||||
|
||||
dispatch({
|
||||
type: UPDATE_ORG_PREFERENCES,
|
||||
payload: {
|
||||
orgPreferences: response.payload?.data || null,
|
||||
},
|
||||
});
|
||||
|
||||
setUpdatingOrgOnboardingStatus(false);
|
||||
|
||||
history.push(ROUTES.GET_STARTED);
|
||||
},
|
||||
onError: () => {
|
||||
setUpdatingOrgOnboardingStatus(false);
|
||||
},
|
||||
});
|
||||
|
||||
const isNextDisabled =
|
||||
optimiseSignozDetails.logsPerDay === 0 &&
|
||||
optimiseSignozDetails.hostsPerDay === 0 &&
|
||||
@@ -210,10 +146,12 @@ function OnboardingQuestionaire(): JSX.Element {
|
||||
|
||||
const { mutate: updateOrgPreference } = useMutation(updateOrgPreferenceAPI, {
|
||||
onSuccess: () => {
|
||||
setIsOnboardingComplete(true);
|
||||
refetchOrgPreferences();
|
||||
},
|
||||
onError: (error) => {
|
||||
showErrorNotification(notifications, error as AxiosError);
|
||||
|
||||
setUpdatingOrgOnboardingStatus(false);
|
||||
},
|
||||
});
|
||||
|
||||
@@ -242,6 +180,7 @@ function OnboardingQuestionaire(): JSX.Element {
|
||||
};
|
||||
|
||||
const handleOnboardingComplete = (): void => {
|
||||
setUpdatingOrgOnboardingStatus(true);
|
||||
updateOrgPreference({
|
||||
preferenceID: 'ORG_ONBOARDING',
|
||||
value: true,
|
||||
@@ -255,55 +194,46 @@ function OnboardingQuestionaire(): JSX.Element {
|
||||
</div>
|
||||
|
||||
<div className="onboarding-questionaire-content">
|
||||
{(isLoadingOnboardingPreference || isLoadingOrgUsers) && (
|
||||
<div className="onboarding-questionaire-loading-container">
|
||||
<Skeleton />
|
||||
</div>
|
||||
{currentStep === 1 && (
|
||||
<OrgQuestions
|
||||
currentOrgData={currentOrgData}
|
||||
orgDetails={orgDetails}
|
||||
onNext={(orgDetails: OrgDetails): void => {
|
||||
setOrgDetails(orgDetails);
|
||||
setCurrentStep(2);
|
||||
}}
|
||||
/>
|
||||
)}
|
||||
|
||||
{!isLoadingOnboardingPreference && !isLoadingOrgUsers && (
|
||||
<>
|
||||
{currentStep === 1 && (
|
||||
<OrgQuestions
|
||||
currentOrgData={currentOrgData}
|
||||
orgDetails={orgDetails}
|
||||
onNext={(orgDetails: OrgDetails): void => {
|
||||
setOrgDetails(orgDetails);
|
||||
setCurrentStep(2);
|
||||
}}
|
||||
/>
|
||||
)}
|
||||
{currentStep === 2 && (
|
||||
<AboutSigNozQuestions
|
||||
signozDetails={signozDetails}
|
||||
setSignozDetails={setSignozDetails}
|
||||
onBack={(): void => setCurrentStep(1)}
|
||||
onNext={(): void => setCurrentStep(3)}
|
||||
/>
|
||||
)}
|
||||
|
||||
{currentStep === 2 && (
|
||||
<AboutSigNozQuestions
|
||||
signozDetails={signozDetails}
|
||||
setSignozDetails={setSignozDetails}
|
||||
onBack={(): void => setCurrentStep(1)}
|
||||
onNext={(): void => setCurrentStep(3)}
|
||||
/>
|
||||
)}
|
||||
{currentStep === 3 && (
|
||||
<OptimiseSignozNeeds
|
||||
isNextDisabled={isNextDisabled}
|
||||
isUpdatingProfile={isUpdatingProfile}
|
||||
optimiseSignozDetails={optimiseSignozDetails}
|
||||
setOptimiseSignozDetails={setOptimiseSignozDetails}
|
||||
onBack={(): void => setCurrentStep(2)}
|
||||
onNext={handleUpdateProfile}
|
||||
onWillDoLater={(): void => setCurrentStep(4)} // This is temporary, only to skip gateway api call as it's not setup on staging yet
|
||||
/>
|
||||
)}
|
||||
|
||||
{currentStep === 3 && (
|
||||
<OptimiseSignozNeeds
|
||||
isNextDisabled={isNextDisabled}
|
||||
isUpdatingProfile={isUpdatingProfile}
|
||||
optimiseSignozDetails={optimiseSignozDetails}
|
||||
setOptimiseSignozDetails={setOptimiseSignozDetails}
|
||||
onBack={(): void => setCurrentStep(2)}
|
||||
onNext={handleUpdateProfile}
|
||||
onWillDoLater={(): void => setCurrentStep(4)} // This is temporary, only to skip gateway api call as it's not setup on staging yet
|
||||
/>
|
||||
)}
|
||||
|
||||
{currentStep === 4 && (
|
||||
<InviteTeamMembers
|
||||
teamMembers={teamMembers}
|
||||
setTeamMembers={setTeamMembers}
|
||||
onBack={(): void => setCurrentStep(3)}
|
||||
onNext={handleOnboardingComplete}
|
||||
/>
|
||||
)}
|
||||
</>
|
||||
{currentStep === 4 && (
|
||||
<InviteTeamMembers
|
||||
isLoading={updatingOrgOnboardingStatus}
|
||||
teamMembers={teamMembers}
|
||||
setTeamMembers={setTeamMembers}
|
||||
onBack={(): void => setCurrentStep(3)}
|
||||
onNext={handleOnboardingComplete}
|
||||
/>
|
||||
)}
|
||||
</div>
|
||||
</div>
|
||||
|
||||
@@ -163,7 +163,8 @@ export const getUPlotChartOptions = ({
|
||||
|
||||
const stackBarChart = stackChart && isUndefined(hiddenGraph);
|
||||
|
||||
const isAnomalyRule = apiResponse?.data?.newResult?.data?.result[0].isAnomaly;
|
||||
const isAnomalyRule =
|
||||
apiResponse?.data?.newResult?.data?.result[0]?.isAnomaly || false;
|
||||
|
||||
const series = getStackedSeries(apiResponse?.data?.result || []);
|
||||
|
||||
|
||||
@@ -261,7 +261,7 @@ function SignUp({ version }: SignUpProps): JSX.Element {
|
||||
values,
|
||||
async (): Promise<void> => {
|
||||
if (isOnboardingEnabled && isCloudUser()) {
|
||||
history.push(ROUTES.ONBOARDING);
|
||||
history.push(ROUTES.GET_STARTED);
|
||||
} else {
|
||||
history.push(ROUTES.APPLICATION);
|
||||
}
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -1,5 +1,5 @@
|
||||
# use a minimal alpine image
|
||||
FROM alpine:3.18.5
|
||||
FROM alpine:3.20.3
|
||||
|
||||
# Add Maintainer Info
|
||||
LABEL maintainer="signoz"
|
||||
|
||||
@@ -2833,7 +2833,7 @@ func (aH *APIHandler) onboardKafka(
|
||||
return
|
||||
}
|
||||
|
||||
chq, err := mq.BuildClickHouseQuery(messagingQueue, mq.KafkaQueue, "onboard_kafka")
|
||||
queryRangeParams, err := mq.BuildBuilderQueriesKafkaOnboarding(messagingQueue)
|
||||
|
||||
if err != nil {
|
||||
zap.L().Error(err.Error())
|
||||
@@ -2841,66 +2841,69 @@ func (aH *APIHandler) onboardKafka(
|
||||
return
|
||||
}
|
||||
|
||||
result, err := aH.reader.GetListResultV3(r.Context(), chq.Query)
|
||||
|
||||
results, errQueriesByName, err := aH.querierV2.QueryRange(r.Context(), queryRangeParams)
|
||||
if err != nil {
|
||||
apiErrObj := &model.ApiError{Typ: model.ErrorBadData, Err: err}
|
||||
RespondError(w, apiErrObj, err)
|
||||
RespondError(w, apiErrObj, errQueriesByName)
|
||||
return
|
||||
}
|
||||
|
||||
var entries []mq.OnboardingResponse
|
||||
|
||||
for _, result := range result {
|
||||
for key, value := range result.Data {
|
||||
var message, attribute, status string
|
||||
var fetchLatencyState, consumerLagState bool
|
||||
|
||||
intValue := int(*value.(*uint8))
|
||||
|
||||
if key == "entries" {
|
||||
attribute = "telemetry ingestion"
|
||||
if intValue != 0 {
|
||||
entries = nil
|
||||
entry := mq.OnboardingResponse{
|
||||
Attribute: attribute,
|
||||
Message: "No data available in the given time range",
|
||||
Status: "0",
|
||||
for _, result := range results {
|
||||
for _, series := range result.Series {
|
||||
for _, point := range series.Points {
|
||||
pointValue := point.Value
|
||||
if pointValue > 0 {
|
||||
if result.QueryName == "fetch_latency" {
|
||||
fetchLatencyState = true
|
||||
break
|
||||
}
|
||||
if result.QueryName == "consumer_lag" {
|
||||
consumerLagState = true
|
||||
break
|
||||
}
|
||||
entries = append(entries, entry)
|
||||
break
|
||||
} else {
|
||||
status = "1"
|
||||
}
|
||||
} else if key == "fetchlatency" {
|
||||
attribute = "kafka_consumer_fetch_latency_avg"
|
||||
if intValue != 0 {
|
||||
status = "0"
|
||||
message = "Metric kafka_consumer_fetch_latency_avg is not present in the given time range."
|
||||
} else {
|
||||
status = "1"
|
||||
}
|
||||
} else if key == "grouplag" {
|
||||
attribute = "kafka_consumer_group_lag"
|
||||
if intValue != 0 {
|
||||
status = "0"
|
||||
message = "Metric kafka_consumer_group_lag is not present in the given time range."
|
||||
} else {
|
||||
status = "1"
|
||||
}
|
||||
}
|
||||
|
||||
entry := mq.OnboardingResponse{
|
||||
Attribute: attribute,
|
||||
Message: message,
|
||||
Status: status,
|
||||
}
|
||||
entries = append(entries, entry)
|
||||
}
|
||||
}
|
||||
|
||||
sort.Slice(entries, func(i, j int) bool {
|
||||
return entries[i].Attribute < entries[j].Attribute
|
||||
})
|
||||
if !fetchLatencyState && !consumerLagState {
|
||||
entries = append(entries, mq.OnboardingResponse{
|
||||
Attribute: "telemetry ingestion",
|
||||
Message: "No data available in the given time range",
|
||||
Status: "0",
|
||||
})
|
||||
}
|
||||
|
||||
if !fetchLatencyState {
|
||||
entries = append(entries, mq.OnboardingResponse{
|
||||
Attribute: "kafka_consumer_fetch_latency_avg",
|
||||
Message: "Metric kafka_consumer_fetch_latency_avg is not present in the given time range.",
|
||||
Status: "0",
|
||||
})
|
||||
} else {
|
||||
entries = append(entries, mq.OnboardingResponse{
|
||||
Attribute: "kafka_consumer_fetch_latency_avg",
|
||||
Status: "1",
|
||||
})
|
||||
}
|
||||
|
||||
if !consumerLagState {
|
||||
entries = append(entries, mq.OnboardingResponse{
|
||||
Attribute: "kafka_consumer_group_lag",
|
||||
Message: "Metric kafka_consumer_group_lag is not present in the given time range.",
|
||||
Status: "0",
|
||||
})
|
||||
} else {
|
||||
entries = append(entries, mq.OnboardingResponse{
|
||||
Attribute: "kafka_consumer_group_lag",
|
||||
Status: "1",
|
||||
})
|
||||
}
|
||||
|
||||
aH.Respond(w, entries)
|
||||
}
|
||||
|
||||
@@ -53,6 +53,10 @@ func getParamsForTopHosts(req model.HostListRequest) (int64, string, string) {
|
||||
return getParamsForTopItems(req.Start, req.End)
|
||||
}
|
||||
|
||||
func getParamsForTopProcesses(req model.ProcessListRequest) (int64, string, string) {
|
||||
return getParamsForTopItems(req.Start, req.End)
|
||||
}
|
||||
|
||||
func getParamsForTopPods(req model.PodListRequest) (int64, string, string) {
|
||||
return getParamsForTopItems(req.Start, req.End)
|
||||
}
|
||||
|
||||
@@ -2,10 +2,12 @@ package inframetrics
|
||||
|
||||
import (
|
||||
"context"
|
||||
"math"
|
||||
"sort"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"go.signoz.io/signoz/pkg/query-service/app/metrics/v4/helpers"
|
||||
"go.signoz.io/signoz/pkg/query-service/common"
|
||||
"go.signoz.io/signoz/pkg/query-service/interfaces"
|
||||
"go.signoz.io/signoz/pkg/query-service/model"
|
||||
@@ -54,9 +56,16 @@ var (
|
||||
// TODO(srikanthccv): remove hardcoded metric name and support keys from any system metric
|
||||
metricToUseForHostAttributes = "system_cpu_load_average_15m"
|
||||
hostNameAttrKey = "host_name"
|
||||
// TODO(srikanthccv): remove k8s hacky logic from hosts repo after charts users are migrated
|
||||
k8sNodeNameAttrKey = "k8s_node_name"
|
||||
agentNameToIgnore = "k8s-infra-otel-agent"
|
||||
agentNameToIgnore = "k8s-infra-otel-agent"
|
||||
hostAttrsToEnrich = []string{
|
||||
"os_type",
|
||||
}
|
||||
metricNamesForHosts = map[string]string{
|
||||
"cpu": "system_cpu_time",
|
||||
"memory": "system_memory_usage",
|
||||
"load15": "system_cpu_load_average_15m",
|
||||
"wait": "system_cpu_time",
|
||||
}
|
||||
)
|
||||
|
||||
func NewHostsRepo(reader interfaces.Reader, querierV2 interfaces.Querier) *HostsRepo {
|
||||
@@ -112,29 +121,10 @@ func (h *HostsRepo) GetHostAttributeValues(ctx context.Context, req v3.FilterAtt
|
||||
hostNames = append(hostNames, attributeValue)
|
||||
}
|
||||
|
||||
req.FilterAttributeKey = k8sNodeNameAttrKey
|
||||
req.DataSource = v3.DataSourceMetrics
|
||||
req.AggregateAttribute = metricToUseForHostAttributes
|
||||
if req.Limit == 0 {
|
||||
req.Limit = 50
|
||||
}
|
||||
|
||||
attributeValuesResponse, err = h.reader.GetMetricAttributeValues(ctx, &req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
for _, attributeValue := range attributeValuesResponse.StringAttributeValues {
|
||||
if strings.Contains(attributeValue, agentNameToIgnore) {
|
||||
continue
|
||||
}
|
||||
hostNames = append(hostNames, attributeValue)
|
||||
}
|
||||
|
||||
return &v3.FilterAttributeValueResponse{StringAttributeValues: hostNames}, nil
|
||||
}
|
||||
|
||||
func (h *HostsRepo) getActiveHosts(ctx context.Context,
|
||||
req model.HostListRequest, hostNameAttrKey string) (map[string]bool, error) {
|
||||
func (h *HostsRepo) getActiveHosts(ctx context.Context, req model.HostListRequest) (map[string]bool, error) {
|
||||
activeStatus := map[string]bool{}
|
||||
step := common.MinAllowedStepInterval(req.Start, req.End)
|
||||
|
||||
@@ -192,12 +182,72 @@ func (h *HostsRepo) getActiveHosts(ctx context.Context,
|
||||
return activeStatus, nil
|
||||
}
|
||||
|
||||
// getTopHosts returns the top hosts for the given order by column name
|
||||
func (h *HostsRepo) getTopHosts(ctx context.Context, req model.HostListRequest, q *v3.QueryRangeParamsV3, hostNameAttrKey string) ([]string, []string, error) {
|
||||
func (h *HostsRepo) getMetadataAttributes(ctx context.Context, req model.HostListRequest) (map[string]map[string]string, error) {
|
||||
hostAttrs := map[string]map[string]string{}
|
||||
|
||||
for _, key := range hostAttrsToEnrich {
|
||||
hasKey := false
|
||||
for _, groupByKey := range req.GroupBy {
|
||||
if groupByKey.Key == key {
|
||||
hasKey = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !hasKey {
|
||||
req.GroupBy = append(req.GroupBy, v3.AttributeKey{Key: key})
|
||||
}
|
||||
}
|
||||
|
||||
mq := v3.BuilderQuery{
|
||||
DataSource: v3.DataSourceMetrics,
|
||||
AggregateAttribute: v3.AttributeKey{
|
||||
Key: metricToUseForHostAttributes,
|
||||
DataType: v3.AttributeKeyDataTypeFloat64,
|
||||
},
|
||||
Temporality: v3.Unspecified,
|
||||
GroupBy: req.GroupBy,
|
||||
}
|
||||
|
||||
query, err := helpers.PrepareTimeseriesFilterQuery(req.Start, req.End, &mq)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
query = localQueryToDistributedQuery(query)
|
||||
|
||||
attrsListResponse, err := h.reader.GetListResultV3(ctx, query)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
for _, row := range attrsListResponse {
|
||||
stringData := map[string]string{}
|
||||
for key, value := range row.Data {
|
||||
if str, ok := value.(string); ok {
|
||||
stringData[key] = str
|
||||
} else if strPtr, ok := value.(*string); ok {
|
||||
stringData[key] = *strPtr
|
||||
}
|
||||
}
|
||||
|
||||
hostName := stringData[hostNameAttrKey]
|
||||
if _, ok := hostAttrs[hostName]; !ok {
|
||||
hostAttrs[hostName] = map[string]string{}
|
||||
}
|
||||
|
||||
for _, key := range req.GroupBy {
|
||||
hostAttrs[hostName][key.Key] = stringData[key.Key]
|
||||
}
|
||||
}
|
||||
|
||||
return hostAttrs, nil
|
||||
}
|
||||
|
||||
func (h *HostsRepo) getTopHostGroups(ctx context.Context, req model.HostListRequest, q *v3.QueryRangeParamsV3) ([]map[string]string, []map[string]string, error) {
|
||||
step, timeSeriesTableName, samplesTableName := getParamsForTopHosts(req)
|
||||
|
||||
queryNames := queryNamesForTopHosts[req.OrderBy.ColumnName]
|
||||
topHostsQueryRangeParams := &v3.QueryRangeParamsV3{
|
||||
topHostGroupsQueryRangeParams := &v3.QueryRangeParamsV3{
|
||||
Start: req.Start,
|
||||
End: req.End,
|
||||
Step: step,
|
||||
@@ -216,19 +266,16 @@ func (h *HostsRepo) getTopHosts(ctx context.Context, req model.HostListRequest,
|
||||
SamplesTableName: samplesTableName,
|
||||
}
|
||||
if req.Filters != nil && len(req.Filters.Items) > 0 {
|
||||
if query.Filters == nil {
|
||||
query.Filters = &v3.FilterSet{Operator: "AND", Items: []v3.FilterItem{}}
|
||||
}
|
||||
query.Filters.Items = append(query.Filters.Items, req.Filters.Items...)
|
||||
}
|
||||
topHostsQueryRangeParams.CompositeQuery.BuilderQueries[queryName] = query
|
||||
topHostGroupsQueryRangeParams.CompositeQuery.BuilderQueries[queryName] = query
|
||||
}
|
||||
|
||||
queryResponse, _, err := h.querierV2.QueryRange(ctx, topHostsQueryRangeParams)
|
||||
queryResponse, _, err := h.querierV2.QueryRange(ctx, topHostGroupsQueryRangeParams)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
formattedResponse, err := postprocess.PostProcessResult(queryResponse, topHostsQueryRangeParams)
|
||||
formattedResponse, err := postprocess.PostProcessResult(queryResponse, topHostGroupsQueryRangeParams)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
@@ -247,238 +294,150 @@ func (h *HostsRepo) getTopHosts(ctx context.Context, req model.HostListRequest,
|
||||
})
|
||||
}
|
||||
|
||||
paginatedTopHostsSeries := formattedResponse[0].Series[req.Offset : req.Offset+req.Limit]
|
||||
limit := math.Min(float64(req.Offset+req.Limit), float64(len(formattedResponse[0].Series)))
|
||||
|
||||
topHosts := []string{}
|
||||
for _, series := range paginatedTopHostsSeries {
|
||||
topHosts = append(topHosts, series.Labels[hostNameAttrKey])
|
||||
paginatedTopHostGroupsSeries := formattedResponse[0].Series[req.Offset:int(limit)]
|
||||
|
||||
topHostGroups := []map[string]string{}
|
||||
for _, series := range paginatedTopHostGroupsSeries {
|
||||
topHostGroups = append(topHostGroups, series.Labels)
|
||||
}
|
||||
allHosts := []string{}
|
||||
allHostGroups := []map[string]string{}
|
||||
for _, series := range formattedResponse[0].Series {
|
||||
allHosts = append(allHosts, series.Labels[hostNameAttrKey])
|
||||
allHostGroups = append(allHostGroups, series.Labels)
|
||||
}
|
||||
|
||||
return topHosts, allHosts, nil
|
||||
return topHostGroups, allHostGroups, nil
|
||||
}
|
||||
|
||||
func (h *HostsRepo) getHostsForQuery(ctx context.Context,
|
||||
req model.HostListRequest, q *v3.QueryRangeParamsV3, hostNameAttrKey string) ([]model.HostListRecord, []string, error) {
|
||||
func (h *HostsRepo) GetHostList(ctx context.Context, req model.HostListRequest) (model.HostListResponse, error) {
|
||||
resp := model.HostListResponse{}
|
||||
|
||||
step := common.MinAllowedStepInterval(req.Start, req.End)
|
||||
if req.Limit == 0 {
|
||||
req.Limit = 10
|
||||
}
|
||||
|
||||
query := q.Clone()
|
||||
// default to cpu order by
|
||||
if req.OrderBy == nil {
|
||||
req.OrderBy = &v3.OrderBy{ColumnName: "cpu", Order: v3.DirectionDesc}
|
||||
}
|
||||
|
||||
// default to host name group by
|
||||
if len(req.GroupBy) == 0 {
|
||||
req.GroupBy = []v3.AttributeKey{{Key: hostNameAttrKey}}
|
||||
resp.Type = model.ResponseTypeList
|
||||
} else {
|
||||
resp.Type = model.ResponseTypeGroupedList
|
||||
}
|
||||
|
||||
step := int64(math.Max(float64(common.MinAllowedStepInterval(req.Start, req.End)), 60))
|
||||
|
||||
query := HostsTableListQuery.Clone()
|
||||
|
||||
query.Start = req.Start
|
||||
query.End = req.End
|
||||
query.Step = step
|
||||
|
||||
topHosts, allHosts, err := h.getTopHosts(ctx, req, q, hostNameAttrKey)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
for _, query := range query.CompositeQuery.BuilderQueries {
|
||||
query.StepInterval = step
|
||||
// check if the filter has host_name and is either IN or EQUAL operator
|
||||
// if so, we don't need to add the topHosts filter again
|
||||
hasHostNameInOrEqual := false
|
||||
|
||||
if req.Filters != nil && len(req.Filters.Items) > 0 {
|
||||
for _, item := range req.Filters.Items {
|
||||
if item.Key.Key == hostNameAttrKey && (item.Operator == v3.FilterOperatorIn || item.Operator == v3.FilterOperatorEqual) {
|
||||
hasHostNameInOrEqual = true
|
||||
}
|
||||
}
|
||||
if query.Filters == nil {
|
||||
query.Filters = &v3.FilterSet{Operator: "AND", Items: []v3.FilterItem{}}
|
||||
}
|
||||
query.Filters.Items = append(query.Filters.Items, req.Filters.Items...)
|
||||
// what is happening here?
|
||||
// if the filter has host_name and we are querying for k8s host metrics,
|
||||
// we need to replace the host_name with k8s_node_name
|
||||
if hostNameAttrKey == k8sNodeNameAttrKey {
|
||||
for idx, item := range query.Filters.Items {
|
||||
if item.Key.Key == hostNameAttrKey {
|
||||
query.Filters.Items[idx].Key.Key = k8sNodeNameAttrKey
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
if !hasHostNameInOrEqual {
|
||||
if query.Filters == nil {
|
||||
query.Filters = &v3.FilterSet{Operator: "AND", Items: []v3.FilterItem{}}
|
||||
}
|
||||
query.Filters.Items = append(query.Filters.Items, v3.FilterItem{
|
||||
Key: v3.AttributeKey{
|
||||
Key: hostNameAttrKey,
|
||||
},
|
||||
Value: topHosts,
|
||||
Operator: v3.FilterOperatorIn,
|
||||
})
|
||||
query.GroupBy = req.GroupBy
|
||||
}
|
||||
|
||||
hostAttrs, err := h.getMetadataAttributes(ctx, req)
|
||||
if err != nil {
|
||||
return resp, err
|
||||
}
|
||||
|
||||
activeHosts, err := h.getActiveHosts(ctx, req)
|
||||
if err != nil {
|
||||
return resp, err
|
||||
}
|
||||
|
||||
topHostGroups, allHostGroups, err := h.getTopHostGroups(ctx, req, query)
|
||||
if err != nil {
|
||||
return resp, err
|
||||
}
|
||||
|
||||
groupFilters := map[string][]string{}
|
||||
for _, topHostGroup := range topHostGroups {
|
||||
for k, v := range topHostGroup {
|
||||
groupFilters[k] = append(groupFilters[k], v)
|
||||
}
|
||||
}
|
||||
|
||||
activeHosts, err := h.getActiveHosts(ctx, req, hostNameAttrKey)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
for groupKey, groupValues := range groupFilters {
|
||||
hasGroupFilter := false
|
||||
if req.Filters != nil && len(req.Filters.Items) > 0 {
|
||||
for _, filter := range req.Filters.Items {
|
||||
if filter.Key.Key == groupKey {
|
||||
hasGroupFilter = true
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
if !hasGroupFilter {
|
||||
for _, query := range query.CompositeQuery.BuilderQueries {
|
||||
query.Filters.Items = append(query.Filters.Items, v3.FilterItem{
|
||||
Key: v3.AttributeKey{Key: groupKey},
|
||||
Value: groupValues,
|
||||
Operator: v3.FilterOperatorIn,
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
queryResponse, _, err := h.querierV2.QueryRange(ctx, query)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
return resp, err
|
||||
}
|
||||
|
||||
type hostTSInfo struct {
|
||||
cpuTimeSeries *v3.Series
|
||||
memoryTimeSeries *v3.Series
|
||||
waitTimeSeries *v3.Series
|
||||
load15TimeSeries *v3.Series
|
||||
}
|
||||
hostTSInfoMap := map[string]*hostTSInfo{}
|
||||
|
||||
for _, result := range queryResponse {
|
||||
for _, series := range result.Series {
|
||||
hostName := series.Labels[hostNameAttrKey]
|
||||
if _, ok := hostTSInfoMap[hostName]; !ok {
|
||||
hostTSInfoMap[hostName] = &hostTSInfo{}
|
||||
}
|
||||
if result.QueryName == "G" {
|
||||
loadSeries := *series
|
||||
hostTSInfoMap[hostName].load15TimeSeries = &loadSeries
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
query.FormatForWeb = false
|
||||
query.CompositeQuery.PanelType = v3.PanelTypeGraph
|
||||
|
||||
formulaResult, err := postprocess.PostProcessResult(queryResponse, query)
|
||||
formattedResponse, err := postprocess.PostProcessResult(queryResponse, query)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
return resp, err
|
||||
}
|
||||
|
||||
for _, result := range formulaResult {
|
||||
for _, series := range result.Series {
|
||||
hostName := series.Labels[hostNameAttrKey]
|
||||
if _, ok := hostTSInfoMap[hostName]; !ok {
|
||||
hostTSInfoMap[hostName] = &hostTSInfo{}
|
||||
}
|
||||
if result.QueryName == "F1" {
|
||||
hostTSInfoMap[hostName].cpuTimeSeries = series
|
||||
} else if result.QueryName == "F2" {
|
||||
hostTSInfoMap[hostName].memoryTimeSeries = series
|
||||
} else if result.QueryName == "F3" {
|
||||
hostTSInfoMap[hostName].waitTimeSeries = series
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
query.FormatForWeb = true
|
||||
query.CompositeQuery.PanelType = v3.PanelTypeTable
|
||||
formattedResponse, _ := postprocess.PostProcessResult(queryResponse, query)
|
||||
|
||||
records := []model.HostListRecord{}
|
||||
|
||||
// there should be only one result in the response
|
||||
hostsInfo := formattedResponse[0]
|
||||
// each row represents a host
|
||||
for _, row := range hostsInfo.Table.Rows {
|
||||
record := model.HostListRecord{
|
||||
CPU: -1,
|
||||
Memory: -1,
|
||||
Wait: -1,
|
||||
Load15: -1,
|
||||
}
|
||||
for _, result := range formattedResponse {
|
||||
for _, row := range result.Table.Rows {
|
||||
record := model.HostListRecord{
|
||||
CPU: -1,
|
||||
Memory: -1,
|
||||
Wait: -1,
|
||||
Load15: -1,
|
||||
}
|
||||
|
||||
hostName, ok := row.Data[hostNameAttrKey].(string)
|
||||
if ok {
|
||||
record.HostName = hostName
|
||||
}
|
||||
if hostName, ok := row.Data[hostNameAttrKey].(string); ok {
|
||||
record.HostName = hostName
|
||||
}
|
||||
|
||||
osType, ok := row.Data["os_type"].(string)
|
||||
if ok {
|
||||
record.OS = osType
|
||||
}
|
||||
|
||||
cpu, ok := row.Data["F1"].(float64)
|
||||
if ok {
|
||||
record.CPU = cpu
|
||||
}
|
||||
memory, ok := row.Data["F2"].(float64)
|
||||
if ok {
|
||||
record.Memory = memory
|
||||
}
|
||||
wait, ok := row.Data["F3"].(float64)
|
||||
if ok {
|
||||
record.Wait = wait
|
||||
}
|
||||
load15, ok := row.Data["G"].(float64)
|
||||
if ok {
|
||||
record.Load15 = load15
|
||||
}
|
||||
record.Active = activeHosts[record.HostName]
|
||||
if hostTSInfoMap[record.HostName] != nil {
|
||||
record.CPUTimeSeries = hostTSInfoMap[record.HostName].cpuTimeSeries
|
||||
record.MemoryTimeSeries = hostTSInfoMap[record.HostName].memoryTimeSeries
|
||||
record.WaitTimeSeries = hostTSInfoMap[record.HostName].waitTimeSeries
|
||||
record.Load15TimeSeries = hostTSInfoMap[record.HostName].load15TimeSeries
|
||||
}
|
||||
records = append(records, record)
|
||||
}
|
||||
|
||||
return records, allHosts, nil
|
||||
}
|
||||
|
||||
func dedupRecords(records []model.HostListRecord) []model.HostListRecord {
|
||||
seen := map[string]bool{}
|
||||
deduped := []model.HostListRecord{}
|
||||
for _, record := range records {
|
||||
if !seen[record.HostName] {
|
||||
seen[record.HostName] = true
|
||||
deduped = append(deduped, record)
|
||||
if cpu, ok := row.Data["F1"].(float64); ok {
|
||||
record.CPU = cpu
|
||||
}
|
||||
if memory, ok := row.Data["F2"].(float64); ok {
|
||||
record.Memory = memory
|
||||
}
|
||||
if wait, ok := row.Data["F3"].(float64); ok {
|
||||
record.Wait = wait
|
||||
}
|
||||
if load15, ok := row.Data["G"].(float64); ok {
|
||||
record.Load15 = load15
|
||||
}
|
||||
record.Meta = map[string]string{}
|
||||
if _, ok := hostAttrs[record.HostName]; ok {
|
||||
record.Meta = hostAttrs[record.HostName]
|
||||
}
|
||||
if osType, ok := record.Meta["os_type"]; ok {
|
||||
record.OS = osType
|
||||
}
|
||||
record.Active = activeHosts[record.HostName]
|
||||
records = append(records, record)
|
||||
}
|
||||
}
|
||||
return deduped
|
||||
}
|
||||
|
||||
func (h *HostsRepo) GetHostList(ctx context.Context, req model.HostListRequest) (model.HostListResponse, error) {
|
||||
if req.Limit == 0 {
|
||||
req.Limit = 10
|
||||
}
|
||||
|
||||
if req.OrderBy == nil {
|
||||
req.OrderBy = &v3.OrderBy{ColumnName: "cpu", Order: v3.DirectionDesc}
|
||||
}
|
||||
|
||||
resp := model.HostListResponse{
|
||||
Type: "list",
|
||||
}
|
||||
|
||||
vmRecords, vmAllHosts, err := h.getHostsForQuery(ctx, req, &NonK8STableListQuery, hostNameAttrKey)
|
||||
if err != nil {
|
||||
return resp, err
|
||||
}
|
||||
k8sRecords, k8sAllHosts, err := h.getHostsForQuery(ctx, req, &K8STableListQuery, k8sNodeNameAttrKey)
|
||||
if err != nil {
|
||||
return resp, err
|
||||
}
|
||||
|
||||
uniqueHosts := map[string]bool{}
|
||||
for _, host := range vmAllHosts {
|
||||
uniqueHosts[host] = true
|
||||
}
|
||||
for _, host := range k8sAllHosts {
|
||||
uniqueHosts[host] = true
|
||||
}
|
||||
|
||||
records := append(vmRecords, k8sRecords...)
|
||||
|
||||
// since we added the fix for incorrect host name, it is possible that both host_name and k8s_node_name
|
||||
// are present in the response. we need to dedup the results.
|
||||
records = dedupRecords(records)
|
||||
|
||||
resp.Total = len(uniqueHosts)
|
||||
|
||||
resp.Total = len(allHostGroups)
|
||||
resp.Records = records
|
||||
|
||||
return resp, nil
|
||||
|
||||
@@ -2,14 +2,14 @@ package inframetrics
|
||||
|
||||
import v3 "go.signoz.io/signoz/pkg/query-service/model/v3"
|
||||
|
||||
var NonK8STableListQuery = v3.QueryRangeParamsV3{
|
||||
var HostsTableListQuery = v3.QueryRangeParamsV3{
|
||||
CompositeQuery: &v3.CompositeQuery{
|
||||
BuilderQueries: map[string]*v3.BuilderQuery{
|
||||
"A": {
|
||||
QueryName: "A",
|
||||
DataSource: v3.DataSourceMetrics,
|
||||
AggregateAttribute: v3.AttributeKey{
|
||||
Key: "system_cpu_time",
|
||||
Key: metricNamesForHosts["cpu"],
|
||||
DataType: v3.AttributeKeyDataTypeFloat64,
|
||||
},
|
||||
Temporality: v3.Cumulative,
|
||||
@@ -27,23 +27,18 @@ var NonK8STableListQuery = v3.QueryRangeParamsV3{
|
||||
},
|
||||
{
|
||||
Key: v3.AttributeKey{
|
||||
Key: "host_name",
|
||||
Key: hostNameAttrKey,
|
||||
DataType: v3.AttributeKeyDataTypeString,
|
||||
Type: v3.AttributeKeyTypeResource,
|
||||
},
|
||||
Operator: v3.FilterOperatorNotContains,
|
||||
Value: "k8s-infra-otel-agent",
|
||||
Value: agentNameToIgnore,
|
||||
},
|
||||
},
|
||||
},
|
||||
GroupBy: []v3.AttributeKey{
|
||||
{
|
||||
Key: "host_name",
|
||||
DataType: v3.AttributeKeyDataTypeString,
|
||||
Type: v3.AttributeKeyTypeResource,
|
||||
},
|
||||
{
|
||||
Key: "os_type",
|
||||
Key: hostNameAttrKey,
|
||||
DataType: v3.AttributeKeyDataTypeString,
|
||||
Type: v3.AttributeKeyTypeResource,
|
||||
},
|
||||
@@ -58,7 +53,7 @@ var NonK8STableListQuery = v3.QueryRangeParamsV3{
|
||||
QueryName: "B",
|
||||
DataSource: v3.DataSourceMetrics,
|
||||
AggregateAttribute: v3.AttributeKey{
|
||||
Key: "system_cpu_time",
|
||||
Key: metricNamesForHosts["cpu"],
|
||||
DataType: v3.AttributeKeyDataTypeFloat64,
|
||||
},
|
||||
Temporality: v3.Cumulative,
|
||||
@@ -67,23 +62,18 @@ var NonK8STableListQuery = v3.QueryRangeParamsV3{
|
||||
Items: []v3.FilterItem{
|
||||
{
|
||||
Key: v3.AttributeKey{
|
||||
Key: "host_name",
|
||||
Key: hostNameAttrKey,
|
||||
DataType: v3.AttributeKeyDataTypeString,
|
||||
Type: v3.AttributeKeyTypeResource,
|
||||
},
|
||||
Operator: v3.FilterOperatorNotContains,
|
||||
Value: "k8s-infra-otel-agent",
|
||||
Value: agentNameToIgnore,
|
||||
},
|
||||
},
|
||||
},
|
||||
GroupBy: []v3.AttributeKey{
|
||||
{
|
||||
Key: "host_name",
|
||||
DataType: v3.AttributeKeyDataTypeString,
|
||||
Type: v3.AttributeKeyTypeResource,
|
||||
},
|
||||
{
|
||||
Key: "os_type",
|
||||
Key: hostNameAttrKey,
|
||||
DataType: v3.AttributeKeyDataTypeString,
|
||||
Type: v3.AttributeKeyTypeResource,
|
||||
},
|
||||
@@ -98,12 +88,16 @@ var NonK8STableListQuery = v3.QueryRangeParamsV3{
|
||||
QueryName: "F1",
|
||||
Expression: "A/B",
|
||||
Legend: "CPU Usage (%)",
|
||||
Filters: &v3.FilterSet{
|
||||
Operator: "AND",
|
||||
Items: []v3.FilterItem{},
|
||||
},
|
||||
},
|
||||
"C": {
|
||||
QueryName: "C",
|
||||
DataSource: v3.DataSourceMetrics,
|
||||
AggregateAttribute: v3.AttributeKey{
|
||||
Key: "system_memory_usage",
|
||||
Key: metricNamesForHosts["memory"],
|
||||
DataType: v3.AttributeKeyDataTypeFloat64,
|
||||
},
|
||||
Temporality: v3.Cumulative,
|
||||
@@ -121,23 +115,18 @@ var NonK8STableListQuery = v3.QueryRangeParamsV3{
|
||||
},
|
||||
{
|
||||
Key: v3.AttributeKey{
|
||||
Key: "host_name",
|
||||
Key: hostNameAttrKey,
|
||||
DataType: v3.AttributeKeyDataTypeString,
|
||||
Type: v3.AttributeKeyTypeResource,
|
||||
},
|
||||
Operator: v3.FilterOperatorNotContains,
|
||||
Value: "k8s-infra-otel-agent",
|
||||
Value: agentNameToIgnore,
|
||||
},
|
||||
},
|
||||
},
|
||||
GroupBy: []v3.AttributeKey{
|
||||
{
|
||||
Key: "host_name",
|
||||
DataType: v3.AttributeKeyDataTypeString,
|
||||
Type: v3.AttributeKeyTypeResource,
|
||||
},
|
||||
{
|
||||
Key: "os_type",
|
||||
Key: hostNameAttrKey,
|
||||
DataType: v3.AttributeKeyDataTypeString,
|
||||
Type: v3.AttributeKeyTypeResource,
|
||||
},
|
||||
@@ -152,7 +141,7 @@ var NonK8STableListQuery = v3.QueryRangeParamsV3{
|
||||
QueryName: "D",
|
||||
DataSource: v3.DataSourceMetrics,
|
||||
AggregateAttribute: v3.AttributeKey{
|
||||
Key: "system_memory_usage",
|
||||
Key: metricNamesForHosts["memory"],
|
||||
DataType: v3.AttributeKeyDataTypeFloat64,
|
||||
},
|
||||
Temporality: v3.Cumulative,
|
||||
@@ -161,23 +150,18 @@ var NonK8STableListQuery = v3.QueryRangeParamsV3{
|
||||
Items: []v3.FilterItem{
|
||||
{
|
||||
Key: v3.AttributeKey{
|
||||
Key: "host_name",
|
||||
Key: hostNameAttrKey,
|
||||
DataType: v3.AttributeKeyDataTypeString,
|
||||
Type: v3.AttributeKeyTypeResource,
|
||||
},
|
||||
Operator: v3.FilterOperatorNotContains,
|
||||
Value: "k8s-infra-otel-agent",
|
||||
Value: agentNameToIgnore,
|
||||
},
|
||||
},
|
||||
},
|
||||
GroupBy: []v3.AttributeKey{
|
||||
{
|
||||
Key: "host_name",
|
||||
DataType: v3.AttributeKeyDataTypeString,
|
||||
Type: v3.AttributeKeyTypeResource,
|
||||
},
|
||||
{
|
||||
Key: "os_type",
|
||||
Key: hostNameAttrKey,
|
||||
DataType: v3.AttributeKeyDataTypeString,
|
||||
Type: v3.AttributeKeyTypeResource,
|
||||
},
|
||||
@@ -192,12 +176,16 @@ var NonK8STableListQuery = v3.QueryRangeParamsV3{
|
||||
QueryName: "F2",
|
||||
Expression: "C/D",
|
||||
Legend: "Memory Usage (%)",
|
||||
Filters: &v3.FilterSet{
|
||||
Operator: "AND",
|
||||
Items: []v3.FilterItem{},
|
||||
},
|
||||
},
|
||||
"E": {
|
||||
QueryName: "E",
|
||||
DataSource: v3.DataSourceMetrics,
|
||||
AggregateAttribute: v3.AttributeKey{
|
||||
Key: "system_cpu_time",
|
||||
Key: metricNamesForHosts["wait"],
|
||||
DataType: v3.AttributeKeyDataTypeFloat64,
|
||||
},
|
||||
Temporality: v3.Cumulative,
|
||||
@@ -215,23 +203,18 @@ var NonK8STableListQuery = v3.QueryRangeParamsV3{
|
||||
},
|
||||
{
|
||||
Key: v3.AttributeKey{
|
||||
Key: "host_name",
|
||||
Key: hostNameAttrKey,
|
||||
DataType: v3.AttributeKeyDataTypeString,
|
||||
Type: v3.AttributeKeyTypeResource,
|
||||
},
|
||||
Operator: v3.FilterOperatorNotContains,
|
||||
Value: "k8s-infra-otel-agent",
|
||||
Value: agentNameToIgnore,
|
||||
},
|
||||
},
|
||||
},
|
||||
GroupBy: []v3.AttributeKey{
|
||||
{
|
||||
Key: "host_name",
|
||||
DataType: v3.AttributeKeyDataTypeString,
|
||||
Type: v3.AttributeKeyTypeResource,
|
||||
},
|
||||
{
|
||||
Key: "os_type",
|
||||
Key: hostNameAttrKey,
|
||||
DataType: v3.AttributeKeyDataTypeString,
|
||||
Type: v3.AttributeKeyTypeResource,
|
||||
},
|
||||
@@ -246,7 +229,7 @@ var NonK8STableListQuery = v3.QueryRangeParamsV3{
|
||||
QueryName: "F",
|
||||
DataSource: v3.DataSourceMetrics,
|
||||
AggregateAttribute: v3.AttributeKey{
|
||||
Key: "system_cpu_time",
|
||||
Key: metricNamesForHosts["wait"],
|
||||
DataType: v3.AttributeKeyDataTypeFloat64,
|
||||
},
|
||||
Temporality: v3.Cumulative,
|
||||
@@ -255,23 +238,18 @@ var NonK8STableListQuery = v3.QueryRangeParamsV3{
|
||||
Items: []v3.FilterItem{
|
||||
{
|
||||
Key: v3.AttributeKey{
|
||||
Key: "host_name",
|
||||
Key: hostNameAttrKey,
|
||||
DataType: v3.AttributeKeyDataTypeString,
|
||||
Type: v3.AttributeKeyTypeResource,
|
||||
},
|
||||
Operator: v3.FilterOperatorNotContains,
|
||||
Value: "k8s-infra-otel-agent",
|
||||
Value: agentNameToIgnore,
|
||||
},
|
||||
},
|
||||
},
|
||||
GroupBy: []v3.AttributeKey{
|
||||
{
|
||||
Key: "host_name",
|
||||
DataType: v3.AttributeKeyDataTypeString,
|
||||
Type: v3.AttributeKeyTypeResource,
|
||||
},
|
||||
{
|
||||
Key: "os_type",
|
||||
Key: hostNameAttrKey,
|
||||
DataType: v3.AttributeKeyDataTypeString,
|
||||
Type: v3.AttributeKeyTypeResource,
|
||||
},
|
||||
@@ -286,12 +264,16 @@ var NonK8STableListQuery = v3.QueryRangeParamsV3{
|
||||
QueryName: "F3",
|
||||
Expression: "E/F",
|
||||
Legend: "CPU Wait Time (%)",
|
||||
Filters: &v3.FilterSet{
|
||||
Operator: "AND",
|
||||
Items: []v3.FilterItem{},
|
||||
},
|
||||
},
|
||||
"G": {
|
||||
QueryName: "G",
|
||||
DataSource: v3.DataSourceMetrics,
|
||||
AggregateAttribute: v3.AttributeKey{
|
||||
Key: "system_cpu_load_average_15m",
|
||||
Key: metricNamesForHosts["load15"],
|
||||
DataType: v3.AttributeKeyDataTypeFloat64,
|
||||
},
|
||||
Temporality: v3.Unspecified,
|
||||
@@ -300,23 +282,18 @@ var NonK8STableListQuery = v3.QueryRangeParamsV3{
|
||||
Items: []v3.FilterItem{
|
||||
{
|
||||
Key: v3.AttributeKey{
|
||||
Key: "host_name",
|
||||
Key: hostNameAttrKey,
|
||||
DataType: v3.AttributeKeyDataTypeString,
|
||||
Type: v3.AttributeKeyTypeResource,
|
||||
},
|
||||
Operator: v3.FilterOperatorNotContains,
|
||||
Value: "k8s-infra-otel-agent",
|
||||
Value: agentNameToIgnore,
|
||||
},
|
||||
},
|
||||
},
|
||||
GroupBy: []v3.AttributeKey{
|
||||
{
|
||||
Key: "host_name",
|
||||
DataType: v3.AttributeKeyDataTypeString,
|
||||
Type: v3.AttributeKeyTypeResource,
|
||||
},
|
||||
{
|
||||
Key: "os_type",
|
||||
Key: hostNameAttrKey,
|
||||
DataType: v3.AttributeKeyDataTypeString,
|
||||
Type: v3.AttributeKeyTypeResource,
|
||||
},
|
||||
@@ -335,69 +312,3 @@ var NonK8STableListQuery = v3.QueryRangeParamsV3{
|
||||
Version: "v4",
|
||||
FormatForWeb: true,
|
||||
}
|
||||
|
||||
var ProcessesTableListQuery = v3.QueryRangeParamsV3{
|
||||
CompositeQuery: &v3.CompositeQuery{
|
||||
BuilderQueries: map[string]*v3.BuilderQuery{
|
||||
"A": {
|
||||
QueryName: "A",
|
||||
DataSource: v3.DataSourceMetrics,
|
||||
AggregateAttribute: v3.AttributeKey{
|
||||
Key: "process_cpu_time",
|
||||
DataType: v3.AttributeKeyDataTypeFloat64,
|
||||
},
|
||||
Temporality: v3.Cumulative,
|
||||
Filters: &v3.FilterSet{
|
||||
Operator: "AND",
|
||||
Items: []v3.FilterItem{},
|
||||
},
|
||||
GroupBy: []v3.AttributeKey{
|
||||
{
|
||||
Key: "process_pid",
|
||||
DataType: v3.AttributeKeyDataTypeString,
|
||||
Type: v3.AttributeKeyTypeResource,
|
||||
},
|
||||
},
|
||||
Expression: "A",
|
||||
ReduceTo: v3.ReduceToOperatorAvg,
|
||||
TimeAggregation: v3.TimeAggregationRate,
|
||||
SpaceAggregation: v3.SpaceAggregationSum,
|
||||
Disabled: true,
|
||||
},
|
||||
"F1": {
|
||||
QueryName: "F1",
|
||||
Expression: "A",
|
||||
Legend: "Process CPU Usage (%)",
|
||||
},
|
||||
"C": {
|
||||
QueryName: "C",
|
||||
DataSource: v3.DataSourceMetrics,
|
||||
AggregateAttribute: v3.AttributeKey{
|
||||
Key: "process_memory_usage",
|
||||
DataType: v3.AttributeKeyDataTypeFloat64,
|
||||
},
|
||||
Temporality: v3.Cumulative,
|
||||
Filters: &v3.FilterSet{
|
||||
Operator: "AND",
|
||||
Items: []v3.FilterItem{},
|
||||
},
|
||||
GroupBy: []v3.AttributeKey{
|
||||
{
|
||||
Key: "process_pid",
|
||||
DataType: v3.AttributeKeyDataTypeString,
|
||||
Type: v3.AttributeKeyTypeResource,
|
||||
},
|
||||
},
|
||||
Expression: "C",
|
||||
ReduceTo: v3.ReduceToOperatorAvg,
|
||||
TimeAggregation: v3.TimeAggregationAvg,
|
||||
SpaceAggregation: v3.SpaceAggregationSum,
|
||||
Disabled: false,
|
||||
},
|
||||
},
|
||||
PanelType: v3.PanelTypeTable,
|
||||
QueryType: v3.QueryTypeBuilder,
|
||||
},
|
||||
Version: "v4",
|
||||
FormatForWeb: true,
|
||||
}
|
||||
@@ -178,7 +178,9 @@ func (p *NamespacesRepo) getTopNamespaceGroups(ctx context.Context, req model.Na
|
||||
})
|
||||
}
|
||||
|
||||
paginatedTopNamespaceGroupsSeries := formattedResponse[0].Series[req.Offset : req.Offset+req.Limit]
|
||||
limit := math.Min(float64(req.Offset+req.Limit), float64(len(formattedResponse[0].Series)))
|
||||
|
||||
paginatedTopNamespaceGroupsSeries := formattedResponse[0].Series[req.Offset:int(limit)]
|
||||
|
||||
topNamespaceGroups := []map[string]string{}
|
||||
for _, series := range paginatedTopNamespaceGroupsSeries {
|
||||
|
||||
@@ -217,7 +217,9 @@ func (p *PodsRepo) getTopPodGroups(ctx context.Context, req model.PodListRequest
|
||||
})
|
||||
}
|
||||
|
||||
paginatedTopPodGroupsSeries := formattedResponse[0].Series[req.Offset : req.Offset+req.Limit]
|
||||
limit := math.Min(float64(req.Offset+req.Limit), float64(len(formattedResponse[0].Series)))
|
||||
|
||||
paginatedTopPodGroupsSeries := formattedResponse[0].Series[req.Offset:int(limit)]
|
||||
|
||||
topPodGroups := []map[string]string{}
|
||||
for _, series := range paginatedTopPodGroupsSeries {
|
||||
|
||||
73
pkg/query-service/app/inframetrics/process_query.go
Normal file
73
pkg/query-service/app/inframetrics/process_query.go
Normal file
@@ -0,0 +1,73 @@
|
||||
package inframetrics
|
||||
|
||||
import v3 "go.signoz.io/signoz/pkg/query-service/model/v3"
|
||||
|
||||
var ProcessesTableListQuery = v3.QueryRangeParamsV3{
|
||||
CompositeQuery: &v3.CompositeQuery{
|
||||
BuilderQueries: map[string]*v3.BuilderQuery{
|
||||
"A": {
|
||||
QueryName: "A",
|
||||
DataSource: v3.DataSourceMetrics,
|
||||
AggregateAttribute: v3.AttributeKey{
|
||||
Key: metricNamesForProcesses["cpu"],
|
||||
DataType: v3.AttributeKeyDataTypeFloat64,
|
||||
},
|
||||
Temporality: v3.Cumulative,
|
||||
Filters: &v3.FilterSet{
|
||||
Operator: "AND",
|
||||
Items: []v3.FilterItem{},
|
||||
},
|
||||
GroupBy: []v3.AttributeKey{
|
||||
{
|
||||
Key: processPIDAttrKey,
|
||||
DataType: v3.AttributeKeyDataTypeString,
|
||||
Type: v3.AttributeKeyTypeResource,
|
||||
},
|
||||
},
|
||||
Expression: "A",
|
||||
ReduceTo: v3.ReduceToOperatorAvg,
|
||||
TimeAggregation: v3.TimeAggregationRate,
|
||||
SpaceAggregation: v3.SpaceAggregationSum,
|
||||
Disabled: true,
|
||||
},
|
||||
"F1": {
|
||||
QueryName: "F1",
|
||||
Expression: "A",
|
||||
Legend: "Process CPU Usage (%)",
|
||||
Filters: &v3.FilterSet{
|
||||
Operator: "AND",
|
||||
Items: []v3.FilterItem{},
|
||||
},
|
||||
},
|
||||
"C": {
|
||||
QueryName: "C",
|
||||
DataSource: v3.DataSourceMetrics,
|
||||
AggregateAttribute: v3.AttributeKey{
|
||||
Key: metricNamesForProcesses["memory"],
|
||||
DataType: v3.AttributeKeyDataTypeFloat64,
|
||||
},
|
||||
Temporality: v3.Cumulative,
|
||||
Filters: &v3.FilterSet{
|
||||
Operator: "AND",
|
||||
Items: []v3.FilterItem{},
|
||||
},
|
||||
GroupBy: []v3.AttributeKey{
|
||||
{
|
||||
Key: processPIDAttrKey,
|
||||
DataType: v3.AttributeKeyDataTypeString,
|
||||
Type: v3.AttributeKeyTypeResource,
|
||||
},
|
||||
},
|
||||
Expression: "C",
|
||||
ReduceTo: v3.ReduceToOperatorAvg,
|
||||
TimeAggregation: v3.TimeAggregationAvg,
|
||||
SpaceAggregation: v3.SpaceAggregationSum,
|
||||
Disabled: false,
|
||||
},
|
||||
},
|
||||
PanelType: v3.PanelTypeTable,
|
||||
QueryType: v3.QueryTypeBuilder,
|
||||
},
|
||||
Version: "v4",
|
||||
FormatForWeb: true,
|
||||
}
|
||||
@@ -2,9 +2,8 @@ package inframetrics
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"math"
|
||||
"strings"
|
||||
"sort"
|
||||
|
||||
"go.signoz.io/signoz/pkg/query-service/app/metrics/v4/helpers"
|
||||
"go.signoz.io/signoz/pkg/query-service/common"
|
||||
@@ -15,6 +14,23 @@ import (
|
||||
"golang.org/x/exp/slices"
|
||||
)
|
||||
|
||||
var (
|
||||
queryNamesForTopProcesses = map[string][]string{
|
||||
"cpu": {"A"},
|
||||
"memory": {"C"},
|
||||
}
|
||||
|
||||
processPIDAttrKey = "process_pid"
|
||||
metricNamesForProcesses = map[string]string{
|
||||
"cpu": "process_cpu_time",
|
||||
"memory": "process_memory_usage",
|
||||
}
|
||||
metricToUseForProcessAttributes = "process_memory_usage"
|
||||
processNameAttrKey = "process_executable_name"
|
||||
processCMDAttrKey = "process_command"
|
||||
processCMDLineAttrKey = "process_command_line"
|
||||
)
|
||||
|
||||
type ProcessesRepo struct {
|
||||
reader interfaces.Reader
|
||||
querierV2 interfaces.Querier
|
||||
@@ -64,14 +80,6 @@ func (p *ProcessesRepo) GetProcessAttributeValues(ctx context.Context, req v3.Fi
|
||||
return attributeValuesResponse, nil
|
||||
}
|
||||
|
||||
func getGroupKeyForProcesses(record model.ProcessListRecord, groupBy []v3.AttributeKey) string {
|
||||
groupKey := ""
|
||||
for _, key := range groupBy {
|
||||
groupKey += fmt.Sprintf("%s=%s,", key.Key, record.Meta[key.Key])
|
||||
}
|
||||
return groupKey
|
||||
}
|
||||
|
||||
func (p *ProcessesRepo) getMetadataAttributes(ctx context.Context,
|
||||
req model.ProcessListRequest) (map[string]map[string]string, error) {
|
||||
processAttrs := map[string]map[string]string{}
|
||||
@@ -92,7 +100,7 @@ func (p *ProcessesRepo) getMetadataAttributes(ctx context.Context,
|
||||
|
||||
mq := v3.BuilderQuery{
|
||||
AggregateAttribute: v3.AttributeKey{
|
||||
Key: "process_memory_usage",
|
||||
Key: metricToUseForProcessAttributes,
|
||||
DataType: v3.AttributeKeyDataTypeFloat64,
|
||||
},
|
||||
Temporality: v3.Cumulative,
|
||||
@@ -104,14 +112,7 @@ func (p *ProcessesRepo) getMetadataAttributes(ctx context.Context,
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// TODO(srikanthccv): remove this
|
||||
// What is happening here?
|
||||
// The `PrepareTimeseriesFilterQuery` uses the local time series table for sub-query because each fingerprint
|
||||
// goes to same shard.
|
||||
// However, in this case, we are interested in the attributes values across all the shards.
|
||||
// So, we replace the local time series table with the distributed time series table.
|
||||
// See `PrepareTimeseriesFilterQuery` for more details.
|
||||
query = strings.Replace(query, ".time_series_v4", ".distributed_time_series_v4", 1)
|
||||
query = localQueryToDistributedQuery(query)
|
||||
|
||||
attrsListResponse, err := p.reader.GetListResultV3(ctx, query)
|
||||
if err != nil {
|
||||
@@ -128,36 +129,108 @@ func (p *ProcessesRepo) getMetadataAttributes(ctx context.Context,
|
||||
}
|
||||
}
|
||||
|
||||
pid := stringData["process_pid"]
|
||||
if _, ok := processAttrs[pid]; !ok {
|
||||
processAttrs[pid] = map[string]string{}
|
||||
processID := stringData[processPIDAttrKey]
|
||||
if _, ok := processAttrs[processID]; !ok {
|
||||
processAttrs[processID] = map[string]string{}
|
||||
}
|
||||
|
||||
for _, key := range req.GroupBy {
|
||||
processAttrs[pid][key.Key] = stringData[key.Key]
|
||||
processAttrs[processID][key.Key] = stringData[key.Key]
|
||||
}
|
||||
}
|
||||
|
||||
return processAttrs, nil
|
||||
}
|
||||
|
||||
func (p *ProcessesRepo) getTopProcessGroups(ctx context.Context, req model.ProcessListRequest, q *v3.QueryRangeParamsV3) ([]map[string]string, []map[string]string, error) {
|
||||
step, timeSeriesTableName, samplesTableName := getParamsForTopProcesses(req)
|
||||
|
||||
queryNames := queryNamesForTopProcesses[req.OrderBy.ColumnName]
|
||||
topProcessGroupsQueryRangeParams := &v3.QueryRangeParamsV3{
|
||||
Start: req.Start,
|
||||
End: req.End,
|
||||
Step: step,
|
||||
CompositeQuery: &v3.CompositeQuery{
|
||||
BuilderQueries: map[string]*v3.BuilderQuery{},
|
||||
QueryType: v3.QueryTypeBuilder,
|
||||
PanelType: v3.PanelTypeTable,
|
||||
},
|
||||
}
|
||||
|
||||
for _, queryName := range queryNames {
|
||||
query := q.CompositeQuery.BuilderQueries[queryName].Clone()
|
||||
query.StepInterval = step
|
||||
query.MetricTableHints = &v3.MetricTableHints{
|
||||
TimeSeriesTableName: timeSeriesTableName,
|
||||
SamplesTableName: samplesTableName,
|
||||
}
|
||||
if req.Filters != nil && len(req.Filters.Items) > 0 {
|
||||
query.Filters.Items = append(query.Filters.Items, req.Filters.Items...)
|
||||
}
|
||||
topProcessGroupsQueryRangeParams.CompositeQuery.BuilderQueries[queryName] = query
|
||||
}
|
||||
|
||||
queryResponse, _, err := p.querierV2.QueryRange(ctx, topProcessGroupsQueryRangeParams)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
formattedResponse, err := postprocess.PostProcessResult(queryResponse, topProcessGroupsQueryRangeParams)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
if len(formattedResponse) == 0 || len(formattedResponse[0].Series) == 0 {
|
||||
return nil, nil, nil
|
||||
}
|
||||
|
||||
if req.OrderBy.Order == v3.DirectionDesc {
|
||||
sort.Slice(formattedResponse[0].Series, func(i, j int) bool {
|
||||
return formattedResponse[0].Series[i].Points[0].Value > formattedResponse[0].Series[j].Points[0].Value
|
||||
})
|
||||
} else {
|
||||
sort.Slice(formattedResponse[0].Series, func(i, j int) bool {
|
||||
return formattedResponse[0].Series[i].Points[0].Value < formattedResponse[0].Series[j].Points[0].Value
|
||||
})
|
||||
}
|
||||
|
||||
limit := math.Min(float64(req.Offset+req.Limit), float64(len(formattedResponse[0].Series)))
|
||||
|
||||
paginatedTopProcessGroupsSeries := formattedResponse[0].Series[req.Offset:int(limit)]
|
||||
|
||||
topProcessGroups := []map[string]string{}
|
||||
for _, series := range paginatedTopProcessGroupsSeries {
|
||||
topProcessGroups = append(topProcessGroups, series.Labels)
|
||||
}
|
||||
allProcessGroups := []map[string]string{}
|
||||
for _, series := range formattedResponse[0].Series {
|
||||
allProcessGroups = append(allProcessGroups, series.Labels)
|
||||
}
|
||||
|
||||
return topProcessGroups, allProcessGroups, nil
|
||||
}
|
||||
|
||||
func (p *ProcessesRepo) GetProcessList(ctx context.Context, req model.ProcessListRequest) (model.ProcessListResponse, error) {
|
||||
resp := model.ProcessListResponse{}
|
||||
if req.Limit == 0 {
|
||||
req.Limit = 10
|
||||
}
|
||||
|
||||
resp := model.ProcessListResponse{
|
||||
Type: "list",
|
||||
// default to cpu order by
|
||||
if req.OrderBy == nil {
|
||||
req.OrderBy = &v3.OrderBy{ColumnName: "cpu", Order: v3.DirectionDesc}
|
||||
}
|
||||
|
||||
step := common.MinAllowedStepInterval(req.Start, req.End)
|
||||
// default to process pid group by
|
||||
if len(req.GroupBy) == 0 {
|
||||
req.GroupBy = []v3.AttributeKey{{Key: processPIDAttrKey}}
|
||||
resp.Type = model.ResponseTypeList
|
||||
} else {
|
||||
resp.Type = model.ResponseTypeGroupedList
|
||||
}
|
||||
|
||||
step := int64(math.Max(float64(common.MinAllowedStepInterval(req.Start, req.End)), 60))
|
||||
|
||||
query := ProcessesTableListQuery.Clone()
|
||||
if req.OrderBy != nil {
|
||||
for _, q := range query.CompositeQuery.BuilderQueries {
|
||||
q.OrderBy = []v3.OrderBy{*req.OrderBy}
|
||||
}
|
||||
}
|
||||
|
||||
query.Start = req.Start
|
||||
query.End = req.End
|
||||
@@ -166,11 +239,9 @@ func (p *ProcessesRepo) GetProcessList(ctx context.Context, req model.ProcessLis
|
||||
for _, query := range query.CompositeQuery.BuilderQueries {
|
||||
query.StepInterval = step
|
||||
if req.Filters != nil && len(req.Filters.Items) > 0 {
|
||||
if query.Filters == nil {
|
||||
query.Filters = &v3.FilterSet{Operator: "AND", Items: []v3.FilterItem{}}
|
||||
}
|
||||
query.Filters.Items = append(query.Filters.Items, req.Filters.Items...)
|
||||
}
|
||||
query.GroupBy = req.GroupBy
|
||||
}
|
||||
|
||||
processAttrs, err := p.getMetadataAttributes(ctx, req)
|
||||
@@ -178,157 +249,83 @@ func (p *ProcessesRepo) GetProcessList(ctx context.Context, req model.ProcessLis
|
||||
return resp, err
|
||||
}
|
||||
|
||||
topProcessGroups, allProcessGroups, err := p.getTopProcessGroups(ctx, req, query)
|
||||
if err != nil {
|
||||
return resp, err
|
||||
}
|
||||
|
||||
groupFilters := map[string][]string{}
|
||||
for _, topProcessGroup := range topProcessGroups {
|
||||
for k, v := range topProcessGroup {
|
||||
groupFilters[k] = append(groupFilters[k], v)
|
||||
}
|
||||
}
|
||||
|
||||
for groupKey, groupValues := range groupFilters {
|
||||
hasGroupFilter := false
|
||||
if req.Filters != nil && len(req.Filters.Items) > 0 {
|
||||
for _, filter := range req.Filters.Items {
|
||||
if filter.Key.Key == groupKey {
|
||||
hasGroupFilter = true
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if !hasGroupFilter {
|
||||
for _, query := range query.CompositeQuery.BuilderQueries {
|
||||
query.Filters.Items = append(query.Filters.Items, v3.FilterItem{
|
||||
Key: v3.AttributeKey{Key: groupKey},
|
||||
Value: groupValues,
|
||||
Operator: v3.FilterOperatorIn,
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
queryResponse, _, err := p.querierV2.QueryRange(ctx, query)
|
||||
if err != nil {
|
||||
return resp, err
|
||||
}
|
||||
|
||||
type processTSInfo struct {
|
||||
CpuTimeSeries *v3.Series `json:"cpu_time_series"`
|
||||
MemoryTimeSeries *v3.Series `json:"memory_time_series"`
|
||||
}
|
||||
processTSInfoMap := map[string]*processTSInfo{}
|
||||
|
||||
for _, result := range queryResponse {
|
||||
for _, series := range result.Series {
|
||||
pid := series.Labels["process_pid"]
|
||||
if _, ok := processTSInfoMap[pid]; !ok {
|
||||
processTSInfoMap[pid] = &processTSInfo{}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
query.FormatForWeb = false
|
||||
query.CompositeQuery.PanelType = v3.PanelTypeGraph
|
||||
|
||||
formulaResult, err := postprocess.PostProcessResult(queryResponse, query)
|
||||
if err != nil {
|
||||
return resp, err
|
||||
}
|
||||
|
||||
for _, result := range formulaResult {
|
||||
for _, series := range result.Series {
|
||||
pid := series.Labels["process_pid"]
|
||||
if _, ok := processTSInfoMap[pid]; !ok {
|
||||
processTSInfoMap[pid] = &processTSInfo{}
|
||||
}
|
||||
loadSeries := *series
|
||||
if result.QueryName == "F1" {
|
||||
processTSInfoMap[pid].CpuTimeSeries = &loadSeries
|
||||
} else if result.QueryName == "C" {
|
||||
processTSInfoMap[pid].MemoryTimeSeries = &loadSeries
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
query.FormatForWeb = true
|
||||
query.CompositeQuery.PanelType = v3.PanelTypeTable
|
||||
|
||||
formattedResponse, err := postprocess.PostProcessResult(queryResponse, query)
|
||||
if err != nil {
|
||||
return resp, err
|
||||
}
|
||||
|
||||
if len(formattedResponse) == 0 {
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
records := []model.ProcessListRecord{}
|
||||
|
||||
// there should be only one result in the response
|
||||
processInfo := formattedResponse[0]
|
||||
for _, result := range formattedResponse {
|
||||
for _, row := range result.Table.Rows {
|
||||
record := model.ProcessListRecord{
|
||||
ProcessCPU: -1,
|
||||
ProcessMemory: -1,
|
||||
}
|
||||
|
||||
for _, row := range processInfo.Table.Rows {
|
||||
record := model.ProcessListRecord{
|
||||
ProcessCPU: -1,
|
||||
ProcessMemory: -1,
|
||||
}
|
||||
pid, ok := row.Data[processPIDAttrKey].(string)
|
||||
if ok {
|
||||
record.ProcessID = pid
|
||||
}
|
||||
|
||||
pid, ok := row.Data["process_pid"].(string)
|
||||
if ok {
|
||||
record.ProcessID = pid
|
||||
}
|
||||
processCPU, ok := row.Data["F1"].(float64)
|
||||
if ok {
|
||||
record.ProcessCPU = processCPU
|
||||
}
|
||||
|
||||
processCPU, ok := row.Data["F1"].(float64)
|
||||
if ok {
|
||||
record.ProcessCPU = processCPU
|
||||
processMemory, ok := row.Data["C"].(float64)
|
||||
if ok {
|
||||
record.ProcessMemory = processMemory
|
||||
}
|
||||
record.Meta = processAttrs[record.ProcessID]
|
||||
record.ProcessName = record.Meta[processNameAttrKey]
|
||||
record.ProcessCMD = record.Meta[processCMDAttrKey]
|
||||
record.ProcessCMDLine = record.Meta[processCMDLineAttrKey]
|
||||
records = append(records, record)
|
||||
}
|
||||
|
||||
processMemory, ok := row.Data["C"].(float64)
|
||||
if ok {
|
||||
record.ProcessMemory = processMemory
|
||||
}
|
||||
record.Meta = processAttrs[record.ProcessID]
|
||||
if processTSInfoMap[record.ProcessID] != nil {
|
||||
record.ProcessCPUTimeSeries = processTSInfoMap[record.ProcessID].CpuTimeSeries
|
||||
record.ProcessMemoryTimeSeries = processTSInfoMap[record.ProcessID].MemoryTimeSeries
|
||||
}
|
||||
record.ProcessName = record.Meta["process_executable_name"]
|
||||
record.ProcessCMD = record.Meta["process_command"]
|
||||
record.ProcessCMDLine = record.Meta["process_command_line"]
|
||||
records = append(records, record)
|
||||
}
|
||||
|
||||
resp.Total = len(records)
|
||||
|
||||
if req.Offset > 0 {
|
||||
records = records[req.Offset:]
|
||||
}
|
||||
if req.Limit > 0 && len(records) > req.Limit {
|
||||
records = records[:req.Limit]
|
||||
}
|
||||
resp.Total = len(allProcessGroups)
|
||||
resp.Records = records
|
||||
|
||||
if len(req.GroupBy) > 0 {
|
||||
groups := []model.ProcessListGroup{}
|
||||
|
||||
groupMap := make(map[string][]model.ProcessListRecord)
|
||||
for _, record := range records {
|
||||
groupKey := getGroupKeyForProcesses(record, req.GroupBy)
|
||||
if _, ok := groupMap[groupKey]; !ok {
|
||||
groupMap[groupKey] = []model.ProcessListRecord{record}
|
||||
} else {
|
||||
groupMap[groupKey] = append(groupMap[groupKey], record)
|
||||
}
|
||||
}
|
||||
|
||||
for _, records := range groupMap {
|
||||
var avgCPU, avgMemory float64
|
||||
var validCPU, validMemory int
|
||||
for _, record := range records {
|
||||
if !math.IsNaN(record.ProcessCPU) {
|
||||
avgCPU += record.ProcessCPU
|
||||
validCPU++
|
||||
}
|
||||
if !math.IsNaN(record.ProcessMemory) {
|
||||
avgMemory += record.ProcessMemory
|
||||
validMemory++
|
||||
}
|
||||
}
|
||||
avgCPU /= float64(validCPU)
|
||||
avgMemory /= float64(validMemory)
|
||||
|
||||
// take any record and make it as the group meta
|
||||
firstRecord := records[0]
|
||||
var groupValues []string
|
||||
for _, key := range req.GroupBy {
|
||||
groupValues = append(groupValues, firstRecord.Meta[key.Key])
|
||||
}
|
||||
processNames := []string{}
|
||||
for _, record := range records {
|
||||
processNames = append(processNames, record.ProcessName)
|
||||
}
|
||||
|
||||
groups = append(groups, model.ProcessListGroup{
|
||||
GroupValues: groupValues,
|
||||
GroupCPUAvg: avgCPU,
|
||||
GroupMemoryAvg: avgMemory,
|
||||
ProcessNames: processNames,
|
||||
})
|
||||
}
|
||||
resp.Groups = groups
|
||||
resp.Type = "grouped_list"
|
||||
}
|
||||
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
@@ -1,269 +0,0 @@
|
||||
package inframetrics
|
||||
|
||||
import v3 "go.signoz.io/signoz/pkg/query-service/model/v3"
|
||||
|
||||
var K8STableListQuery = v3.QueryRangeParamsV3{
|
||||
CompositeQuery: &v3.CompositeQuery{
|
||||
BuilderQueries: map[string]*v3.BuilderQuery{
|
||||
"A": {
|
||||
QueryName: "A",
|
||||
DataSource: v3.DataSourceMetrics,
|
||||
AggregateAttribute: v3.AttributeKey{
|
||||
Key: "system_cpu_time",
|
||||
DataType: v3.AttributeKeyDataTypeFloat64,
|
||||
},
|
||||
Temporality: v3.Cumulative,
|
||||
Filters: &v3.FilterSet{
|
||||
Operator: "AND",
|
||||
Items: []v3.FilterItem{
|
||||
{
|
||||
Key: v3.AttributeKey{
|
||||
Key: "state",
|
||||
DataType: v3.AttributeKeyDataTypeString,
|
||||
Type: v3.AttributeKeyTypeTag,
|
||||
},
|
||||
Operator: v3.FilterOperatorNotEqual,
|
||||
Value: "idle",
|
||||
},
|
||||
},
|
||||
},
|
||||
GroupBy: []v3.AttributeKey{
|
||||
{
|
||||
Key: "k8s_node_name",
|
||||
DataType: v3.AttributeKeyDataTypeString,
|
||||
Type: v3.AttributeKeyTypeResource,
|
||||
},
|
||||
{
|
||||
Key: "os_type",
|
||||
DataType: v3.AttributeKeyDataTypeString,
|
||||
Type: v3.AttributeKeyTypeResource,
|
||||
},
|
||||
},
|
||||
Expression: "A",
|
||||
ReduceTo: v3.ReduceToOperatorAvg,
|
||||
TimeAggregation: v3.TimeAggregationRate,
|
||||
SpaceAggregation: v3.SpaceAggregationSum,
|
||||
Disabled: true,
|
||||
},
|
||||
"B": {
|
||||
QueryName: "B",
|
||||
DataSource: v3.DataSourceMetrics,
|
||||
AggregateAttribute: v3.AttributeKey{
|
||||
Key: "system_cpu_time",
|
||||
DataType: v3.AttributeKeyDataTypeFloat64,
|
||||
},
|
||||
Temporality: v3.Cumulative,
|
||||
Filters: &v3.FilterSet{
|
||||
Operator: "AND",
|
||||
Items: []v3.FilterItem{},
|
||||
},
|
||||
GroupBy: []v3.AttributeKey{
|
||||
{
|
||||
Key: "k8s_node_name",
|
||||
DataType: v3.AttributeKeyDataTypeString,
|
||||
Type: v3.AttributeKeyTypeResource,
|
||||
},
|
||||
{
|
||||
Key: "os_type",
|
||||
DataType: v3.AttributeKeyDataTypeString,
|
||||
Type: v3.AttributeKeyTypeResource,
|
||||
},
|
||||
},
|
||||
Expression: "B",
|
||||
ReduceTo: v3.ReduceToOperatorAvg,
|
||||
TimeAggregation: v3.TimeAggregationRate,
|
||||
SpaceAggregation: v3.SpaceAggregationSum,
|
||||
Disabled: true,
|
||||
},
|
||||
"F1": {
|
||||
QueryName: "F1",
|
||||
Expression: "A/B",
|
||||
Legend: "CPU Usage (%)",
|
||||
},
|
||||
"C": {
|
||||
QueryName: "C",
|
||||
DataSource: v3.DataSourceMetrics,
|
||||
AggregateAttribute: v3.AttributeKey{
|
||||
Key: "system_memory_usage",
|
||||
DataType: v3.AttributeKeyDataTypeFloat64,
|
||||
},
|
||||
Temporality: v3.Cumulative,
|
||||
Filters: &v3.FilterSet{
|
||||
Operator: "AND",
|
||||
Items: []v3.FilterItem{
|
||||
{
|
||||
Key: v3.AttributeKey{
|
||||
Key: "state",
|
||||
DataType: v3.AttributeKeyDataTypeString,
|
||||
Type: v3.AttributeKeyTypeTag,
|
||||
},
|
||||
Operator: v3.FilterOperatorIn,
|
||||
Value: []string{"used", "cached"},
|
||||
},
|
||||
},
|
||||
},
|
||||
GroupBy: []v3.AttributeKey{
|
||||
{
|
||||
Key: "k8s_node_name",
|
||||
DataType: v3.AttributeKeyDataTypeString,
|
||||
Type: v3.AttributeKeyTypeResource,
|
||||
},
|
||||
{
|
||||
Key: "os_type",
|
||||
DataType: v3.AttributeKeyDataTypeString,
|
||||
Type: v3.AttributeKeyTypeResource,
|
||||
},
|
||||
},
|
||||
Expression: "C",
|
||||
ReduceTo: v3.ReduceToOperatorAvg,
|
||||
TimeAggregation: v3.TimeAggregationAvg,
|
||||
SpaceAggregation: v3.SpaceAggregationSum,
|
||||
Disabled: true,
|
||||
},
|
||||
"D": {
|
||||
QueryName: "D",
|
||||
DataSource: v3.DataSourceMetrics,
|
||||
AggregateAttribute: v3.AttributeKey{
|
||||
Key: "system_memory_usage",
|
||||
DataType: v3.AttributeKeyDataTypeFloat64,
|
||||
},
|
||||
Temporality: v3.Cumulative,
|
||||
Filters: &v3.FilterSet{
|
||||
Operator: "AND",
|
||||
Items: []v3.FilterItem{},
|
||||
},
|
||||
GroupBy: []v3.AttributeKey{
|
||||
{
|
||||
Key: "k8s_node_name",
|
||||
DataType: v3.AttributeKeyDataTypeString,
|
||||
Type: v3.AttributeKeyTypeResource,
|
||||
},
|
||||
{
|
||||
Key: "os_type",
|
||||
DataType: v3.AttributeKeyDataTypeString,
|
||||
Type: v3.AttributeKeyTypeResource,
|
||||
},
|
||||
},
|
||||
Expression: "D",
|
||||
ReduceTo: v3.ReduceToOperatorAvg,
|
||||
TimeAggregation: v3.TimeAggregationAvg,
|
||||
SpaceAggregation: v3.SpaceAggregationSum,
|
||||
Disabled: true,
|
||||
},
|
||||
"F2": {
|
||||
QueryName: "F2",
|
||||
Expression: "C/D",
|
||||
Legend: "Memory Usage (%)",
|
||||
},
|
||||
"E": {
|
||||
QueryName: "E",
|
||||
DataSource: v3.DataSourceMetrics,
|
||||
AggregateAttribute: v3.AttributeKey{
|
||||
Key: "system_cpu_time",
|
||||
DataType: v3.AttributeKeyDataTypeFloat64,
|
||||
},
|
||||
Temporality: v3.Cumulative,
|
||||
Filters: &v3.FilterSet{
|
||||
Operator: "AND",
|
||||
Items: []v3.FilterItem{
|
||||
{
|
||||
Key: v3.AttributeKey{
|
||||
Key: "state",
|
||||
DataType: v3.AttributeKeyDataTypeString,
|
||||
Type: v3.AttributeKeyTypeTag,
|
||||
},
|
||||
Operator: v3.FilterOperatorEqual,
|
||||
Value: "wait",
|
||||
},
|
||||
},
|
||||
},
|
||||
GroupBy: []v3.AttributeKey{
|
||||
{
|
||||
Key: "k8s_node_name",
|
||||
DataType: v3.AttributeKeyDataTypeString,
|
||||
Type: v3.AttributeKeyTypeResource,
|
||||
},
|
||||
{
|
||||
Key: "os_type",
|
||||
DataType: v3.AttributeKeyDataTypeString,
|
||||
Type: v3.AttributeKeyTypeResource,
|
||||
},
|
||||
},
|
||||
Expression: "E",
|
||||
ReduceTo: v3.ReduceToOperatorAvg,
|
||||
TimeAggregation: v3.TimeAggregationRate,
|
||||
SpaceAggregation: v3.SpaceAggregationSum,
|
||||
Disabled: true,
|
||||
},
|
||||
"F": {
|
||||
QueryName: "F",
|
||||
DataSource: v3.DataSourceMetrics,
|
||||
AggregateAttribute: v3.AttributeKey{
|
||||
Key: "system_cpu_time",
|
||||
DataType: v3.AttributeKeyDataTypeFloat64,
|
||||
},
|
||||
Temporality: v3.Cumulative,
|
||||
Filters: &v3.FilterSet{
|
||||
Operator: "AND",
|
||||
Items: []v3.FilterItem{},
|
||||
},
|
||||
GroupBy: []v3.AttributeKey{
|
||||
{
|
||||
Key: "k8s_node_name",
|
||||
DataType: v3.AttributeKeyDataTypeString,
|
||||
Type: v3.AttributeKeyTypeResource,
|
||||
},
|
||||
{
|
||||
Key: "os_type",
|
||||
DataType: v3.AttributeKeyDataTypeString,
|
||||
Type: v3.AttributeKeyTypeResource,
|
||||
},
|
||||
},
|
||||
Expression: "F",
|
||||
ReduceTo: v3.ReduceToOperatorAvg,
|
||||
TimeAggregation: v3.TimeAggregationRate,
|
||||
SpaceAggregation: v3.SpaceAggregationSum,
|
||||
Disabled: true,
|
||||
},
|
||||
"F3": {
|
||||
QueryName: "F3",
|
||||
Expression: "E/F",
|
||||
Legend: "CPU Wait Time (%)",
|
||||
},
|
||||
"G": {
|
||||
QueryName: "G",
|
||||
DataSource: v3.DataSourceMetrics,
|
||||
AggregateAttribute: v3.AttributeKey{
|
||||
Key: "system_cpu_load_average_15m",
|
||||
DataType: v3.AttributeKeyDataTypeFloat64,
|
||||
},
|
||||
Temporality: v3.Unspecified,
|
||||
Filters: &v3.FilterSet{
|
||||
Operator: "AND",
|
||||
Items: []v3.FilterItem{},
|
||||
},
|
||||
GroupBy: []v3.AttributeKey{
|
||||
{
|
||||
Key: "k8s_node_name",
|
||||
DataType: v3.AttributeKeyDataTypeString,
|
||||
Type: v3.AttributeKeyTypeResource,
|
||||
},
|
||||
{
|
||||
Key: "os_type",
|
||||
DataType: v3.AttributeKeyDataTypeString,
|
||||
Type: v3.AttributeKeyTypeResource,
|
||||
},
|
||||
},
|
||||
Expression: "G",
|
||||
ReduceTo: v3.ReduceToOperatorAvg,
|
||||
TimeAggregation: v3.TimeAggregationAvg,
|
||||
SpaceAggregation: v3.SpaceAggregationSum,
|
||||
Legend: "CPU Load Average (15m)",
|
||||
},
|
||||
},
|
||||
PanelType: v3.PanelTypeTable,
|
||||
QueryType: v3.QueryTypeBuilder,
|
||||
},
|
||||
Version: "v4",
|
||||
FormatForWeb: true,
|
||||
}
|
||||
@@ -381,18 +381,3 @@ WHERE
|
||||
AND timestamp <= '%d';`, queueType, start, end)
|
||||
return query
|
||||
}
|
||||
|
||||
func onboardKafkaSQL(start, end int64) string {
|
||||
query := fmt.Sprintf(`
|
||||
SELECT
|
||||
COUNT(*) = 0 AS entries,
|
||||
COUNT(IF(metric_name = 'kafka_consumer_fetch_latency_avg', 1, NULL)) = 0 AS fetchlatency,
|
||||
COUNT(IF(metric_name = 'kafka_consumer_group_lag', 1, NULL)) = 0 AS grouplag
|
||||
FROM
|
||||
signoz_metrics.time_series_v4_1day
|
||||
WHERE
|
||||
metric_name IN ('kafka_consumer_fetch_latency_avg', 'kafka_consumer_group_lag')
|
||||
AND unix_milli >= '%d'
|
||||
AND unix_milli < '%d';`, start/1000000, end/1000000)
|
||||
return query
|
||||
}
|
||||
|
||||
@@ -185,6 +185,60 @@ func buildBuilderQueriesNetwork(unixMilliStart, unixMilliEnd int64, attributeCac
|
||||
return bq, nil
|
||||
}
|
||||
|
||||
func BuildBuilderQueriesKafkaOnboarding(messagingQueue *MessagingQueue) (*v3.QueryRangeParamsV3, error) {
|
||||
bq := make(map[string]*v3.BuilderQuery)
|
||||
|
||||
unixMilliStart := messagingQueue.Start / 1000000
|
||||
unixMilliEnd := messagingQueue.End / 1000000
|
||||
|
||||
buiderQuery := &v3.BuilderQuery{
|
||||
QueryName: "fetch_latency",
|
||||
StepInterval: common.MinAllowedStepInterval(unixMilliStart, unixMilliEnd),
|
||||
DataSource: v3.DataSourceMetrics,
|
||||
AggregateAttribute: v3.AttributeKey{
|
||||
Key: "kafka_consumer_fetch_latency_avg",
|
||||
},
|
||||
AggregateOperator: v3.AggregateOperatorCount,
|
||||
Temporality: v3.Unspecified,
|
||||
TimeAggregation: v3.TimeAggregationCount,
|
||||
SpaceAggregation: v3.SpaceAggregationSum,
|
||||
Expression: "fetch_latency",
|
||||
}
|
||||
bq["fetch_latency"] = buiderQuery
|
||||
|
||||
buiderQuery = &v3.BuilderQuery{
|
||||
QueryName: "consumer_lag",
|
||||
StepInterval: common.MinAllowedStepInterval(unixMilliStart, unixMilliEnd),
|
||||
DataSource: v3.DataSourceMetrics,
|
||||
AggregateAttribute: v3.AttributeKey{
|
||||
Key: "kafka_consumer_group_lag",
|
||||
},
|
||||
AggregateOperator: v3.AggregateOperatorCount,
|
||||
Temporality: v3.Unspecified,
|
||||
TimeAggregation: v3.TimeAggregationCount,
|
||||
SpaceAggregation: v3.SpaceAggregationSum,
|
||||
Expression: "consumer_lag",
|
||||
}
|
||||
bq["consumer_lag"] = buiderQuery
|
||||
|
||||
cq := &v3.CompositeQuery{
|
||||
QueryType: v3.QueryTypeBuilder,
|
||||
BuilderQueries: bq,
|
||||
PanelType: v3.PanelTypeTable,
|
||||
}
|
||||
|
||||
queryRangeParams := &v3.QueryRangeParamsV3{
|
||||
Start: unixMilliStart,
|
||||
End: unixMilliEnd,
|
||||
Step: defaultStepInterval,
|
||||
CompositeQuery: cq,
|
||||
Version: "v4",
|
||||
FormatForWeb: true,
|
||||
}
|
||||
|
||||
return queryRangeParams, nil
|
||||
}
|
||||
|
||||
func BuildQRParamsWithCache(messagingQueue *MessagingQueue, queryContext string, attributeCache *Clients) (*v3.QueryRangeParamsV3, error) {
|
||||
|
||||
queueType := KafkaQueue
|
||||
@@ -302,8 +356,6 @@ func BuildClickHouseQuery(messagingQueue *MessagingQueue, queueType string, quer
|
||||
query = onboardProducersSQL(start, end, queueType)
|
||||
} else if queryContext == "onboard_consumers" {
|
||||
query = onboardConsumerSQL(start, end, queueType)
|
||||
} else if queryContext == "onboard_kafka" {
|
||||
query = onboardKafkaSQL(start, end)
|
||||
}
|
||||
return &v3.ClickHouseQuery{
|
||||
Query: query,
|
||||
@@ -312,7 +364,7 @@ func BuildClickHouseQuery(messagingQueue *MessagingQueue, queueType string, quer
|
||||
|
||||
func buildCompositeQuery(chq *v3.ClickHouseQuery, queryContext string) (*v3.CompositeQuery, error) {
|
||||
|
||||
if queryContext == "producer-consumer-eva" {
|
||||
if queryContext == "producer-consumer-eval" {
|
||||
return &v3.CompositeQuery{
|
||||
QueryType: v3.QueryTypeClickHouseSQL,
|
||||
ClickHouseQueries: map[string]*v3.ClickHouseQuery{queryContext: chq},
|
||||
|
||||
@@ -149,7 +149,7 @@ func buildAttributeFilter(item v3.FilterItem) (string, error) {
|
||||
return fmt.Sprintf(logsOp, keyName, fmtVal), nil
|
||||
case v3.FilterOperatorContains, v3.FilterOperatorNotContains:
|
||||
// we also want to treat %, _ as literals for contains
|
||||
val := utils.QuoteEscapedStringForContains(fmt.Sprintf("%s", item.Value))
|
||||
val := utils.QuoteEscapedStringForContains(fmt.Sprintf("%s", item.Value), false)
|
||||
// for body the contains is case insensitive
|
||||
if keyName == BODY {
|
||||
logsOp = strings.Replace(logsOp, "ILIKE", "LIKE", 1) // removing i from ilike and not ilike
|
||||
|
||||
@@ -49,7 +49,7 @@ func buildResourceFilter(logsOp string, key string, op v3.FilterOperator, value
|
||||
case v3.FilterOperatorContains, v3.FilterOperatorNotContains:
|
||||
// this is required as clickhouseFormattedValue add's quotes to the string
|
||||
// we also want to treat %, _ as literals for contains
|
||||
escapedStringValue := utils.QuoteEscapedStringForContains(lowerValue)
|
||||
escapedStringValue := utils.QuoteEscapedStringForContains(lowerValue, false)
|
||||
return fmt.Sprintf("%s %s '%%%s%%'", lowerSearchKey, logsOp, escapedStringValue)
|
||||
case v3.FilterOperatorLike, v3.FilterOperatorNotLike:
|
||||
// this is required as clickhouseFormattedValue add's quotes to the string
|
||||
@@ -92,7 +92,7 @@ func buildIndexFilterForInOperator(key string, op v3.FilterOperator, value inter
|
||||
// if there are no values to filter on, return an empty string
|
||||
if len(values) > 0 {
|
||||
for _, v := range values {
|
||||
value := utils.QuoteEscapedStringForContains(v)
|
||||
value := utils.QuoteEscapedStringForContains(v, true)
|
||||
conditions = append(conditions, fmt.Sprintf("labels %s '%%\"%s\":\"%s\"%%'", sqlOp, key, value))
|
||||
}
|
||||
return "(" + strings.Join(conditions, separator) + ")"
|
||||
@@ -110,24 +110,24 @@ func buildIndexFilterForInOperator(key string, op v3.FilterOperator, value inter
|
||||
func buildResourceIndexFilter(key string, op v3.FilterOperator, value interface{}) string {
|
||||
// not using clickhouseFormattedValue as we don't wan't the quotes
|
||||
strVal := fmt.Sprintf("%s", value)
|
||||
formattedValueEscapedForContains := strings.ToLower(utils.QuoteEscapedStringForContains(strVal))
|
||||
formattedValueEscaped := utils.QuoteEscapedString(strVal)
|
||||
formattedValueEscapedLower := strings.ToLower(formattedValueEscaped)
|
||||
fmtValEscapedForContains := utils.QuoteEscapedStringForContains(strVal, true)
|
||||
fmtValEscapedForContainsLower := strings.ToLower(fmtValEscapedForContains)
|
||||
fmtValEscapedLower := strings.ToLower(utils.QuoteEscapedString(strVal))
|
||||
|
||||
// add index filters
|
||||
switch op {
|
||||
case v3.FilterOperatorContains:
|
||||
return fmt.Sprintf("lower(labels) like '%%%s%%%s%%'", key, formattedValueEscapedForContains)
|
||||
return fmt.Sprintf("lower(labels) like '%%%s%%%s%%'", key, fmtValEscapedForContainsLower)
|
||||
case v3.FilterOperatorNotContains:
|
||||
return fmt.Sprintf("lower(labels) not like '%%%s%%%s%%'", key, formattedValueEscapedForContains)
|
||||
return fmt.Sprintf("lower(labels) not like '%%%s%%%s%%'", key, fmtValEscapedForContainsLower)
|
||||
case v3.FilterOperatorLike:
|
||||
return fmt.Sprintf("lower(labels) like '%%%s%%%s%%'", key, formattedValueEscapedLower)
|
||||
return fmt.Sprintf("lower(labels) like '%%%s%%%s%%'", key, fmtValEscapedLower)
|
||||
case v3.FilterOperatorNotLike:
|
||||
return fmt.Sprintf("lower(labels) not like '%%%s%%%s%%'", key, formattedValueEscapedLower)
|
||||
return fmt.Sprintf("lower(labels) not like '%%%s%%%s%%'", key, fmtValEscapedLower)
|
||||
case v3.FilterOperatorEqual:
|
||||
return fmt.Sprintf("labels like '%%%s%%%s%%'", key, formattedValueEscaped)
|
||||
return fmt.Sprintf("labels like '%%%s%%%s%%'", key, fmtValEscapedForContains)
|
||||
case v3.FilterOperatorNotEqual:
|
||||
return fmt.Sprintf("labels not like '%%%s%%%s%%'", key, formattedValueEscaped)
|
||||
return fmt.Sprintf("labels not like '%%%s%%%s%%'", key, fmtValEscapedForContains)
|
||||
case v3.FilterOperatorRegex, v3.FilterOperatorNotRegex:
|
||||
// don't try to do anything for regex.
|
||||
return ""
|
||||
|
||||
@@ -138,9 +138,9 @@ func Test_buildIndexFilterForInOperator(t *testing.T) {
|
||||
args: args{
|
||||
key: "service.name",
|
||||
op: v3.FilterOperatorNotIn,
|
||||
value: "application'\"_s",
|
||||
value: `application'"_s`,
|
||||
},
|
||||
want: `(labels not like '%"service.name":"application\'"\_s"%')`,
|
||||
want: `(labels not like '%"service.name":"application\'\\\\"\_s"%')`,
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
@@ -231,9 +231,9 @@ func Test_buildResourceIndexFilter(t *testing.T) {
|
||||
args: args{
|
||||
key: "service.name",
|
||||
op: v3.FilterOperatorEqual,
|
||||
value: "Application",
|
||||
value: `Application"`,
|
||||
},
|
||||
want: `labels like '%service.name%Application%'`,
|
||||
want: `labels like '%service.name%Application\\\\"%'`,
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
@@ -319,7 +319,7 @@ func Test_buildResourceFiltersFromFilterItems(t *testing.T) {
|
||||
Type: v3.AttributeKeyTypeResource,
|
||||
},
|
||||
Operator: v3.FilterOperatorContains,
|
||||
Value: "test1",
|
||||
Value: `test1"`,
|
||||
},
|
||||
},
|
||||
},
|
||||
@@ -327,8 +327,8 @@ func Test_buildResourceFiltersFromFilterItems(t *testing.T) {
|
||||
want: []string{
|
||||
"simpleJSONExtractString(labels, 'service.name') = 'test'",
|
||||
"labels like '%service.name%test%'",
|
||||
"simpleJSONExtractString(lower(labels), 'namespace') LIKE '%test1%'",
|
||||
"lower(labels) like '%namespace%test1%'",
|
||||
`simpleJSONExtractString(lower(labels), 'namespace') LIKE '%test1"%'`,
|
||||
`lower(labels) like '%namespace%test1\\\\"%'`,
|
||||
},
|
||||
wantErr: false,
|
||||
},
|
||||
|
||||
@@ -85,6 +85,10 @@ func UseMetricsPreAggregation() bool {
|
||||
return GetOrDefaultEnv("USE_METRICS_PRE_AGGREGATION", "true") == "true"
|
||||
}
|
||||
|
||||
func EnableHostsInfraMonitoring() bool {
|
||||
return GetOrDefaultEnv("ENABLE_INFRA_METRICS", "true") == "true"
|
||||
}
|
||||
|
||||
var KafkaSpanEval = GetOrDefaultEnv("KAFKA_SPAN_EVAL", "false")
|
||||
|
||||
func IsDurationSortFeatureEnabled() bool {
|
||||
|
||||
@@ -23,6 +23,7 @@ const AlertChannelMsTeams = "ALERT_CHANNEL_MSTEAMS"
|
||||
const AlertChannelOpsgenie = "ALERT_CHANNEL_OPSGENIE"
|
||||
const AlertChannelEmail = "ALERT_CHANNEL_EMAIL"
|
||||
const AnomalyDetection = "ANOMALY_DETECTION"
|
||||
const HostsInfraMonitoring = "HOSTS_INFRA_MONITORING"
|
||||
|
||||
var BasicPlan = FeatureSet{
|
||||
Feature{
|
||||
|
||||
@@ -22,35 +22,19 @@ type HostListRequest struct {
|
||||
}
|
||||
|
||||
type HostListRecord struct {
|
||||
HostName string `json:"hostName"`
|
||||
Active bool `json:"active"`
|
||||
OS string `json:"os"`
|
||||
CPU float64 `json:"cpu"`
|
||||
CPUTimeSeries *v3.Series `json:"cpuTimeSeries"`
|
||||
Memory float64 `json:"memory"`
|
||||
MemoryTimeSeries *v3.Series `json:"memoryTimeSeries"`
|
||||
Wait float64 `json:"wait"`
|
||||
WaitTimeSeries *v3.Series `json:"waitTimeSeries"`
|
||||
Load15 float64 `json:"load15"`
|
||||
Load15TimeSeries *v3.Series `json:"load15TimeSeries"`
|
||||
Meta map[string]string `json:"-"`
|
||||
}
|
||||
|
||||
type HostListGroup struct {
|
||||
GroupValues []string `json:"groupValues"`
|
||||
Active int `json:"active"`
|
||||
Inactive int `json:"inactive"`
|
||||
GroupCPUAvg float64 `json:"groupCPUAvg"`
|
||||
GroupMemoryAvg float64 `json:"groupMemoryAvg"`
|
||||
GroupWaitAvg float64 `json:"groupWaitAvg"`
|
||||
GroupLoad15Avg float64 `json:"groupLoad15Avg"`
|
||||
HostNames []string `json:"hostNames"`
|
||||
HostName string `json:"hostName"`
|
||||
Active bool `json:"active"`
|
||||
OS string `json:"os"`
|
||||
CPU float64 `json:"cpu"`
|
||||
Memory float64 `json:"memory"`
|
||||
Wait float64 `json:"wait"`
|
||||
Load15 float64 `json:"load15"`
|
||||
Meta map[string]string `json:"meta"`
|
||||
}
|
||||
|
||||
type HostListResponse struct {
|
||||
Type string `json:"type"`
|
||||
Type ResponseType `json:"type"`
|
||||
Records []HostListRecord `json:"records"`
|
||||
Groups []HostListGroup `json:"groups"`
|
||||
Total int `json:"total"`
|
||||
}
|
||||
|
||||
@@ -65,29 +49,19 @@ type ProcessListRequest struct {
|
||||
}
|
||||
|
||||
type ProcessListResponse struct {
|
||||
Type string `json:"type"`
|
||||
Type ResponseType `json:"type"`
|
||||
Records []ProcessListRecord `json:"records"`
|
||||
Groups []ProcessListGroup `json:"groups"`
|
||||
Total int `json:"total"`
|
||||
}
|
||||
|
||||
type ProcessListRecord struct {
|
||||
ProcessName string `json:"processName"`
|
||||
ProcessID string `json:"processID"`
|
||||
ProcessCMD string `json:"processCMD"`
|
||||
ProcessCMDLine string `json:"processCMDLine"`
|
||||
ProcessCPU float64 `json:"processCPU"`
|
||||
ProcessCPUTimeSeries *v3.Series `json:"processCPUTimeSeries"`
|
||||
ProcessMemory float64 `json:"processMemory"`
|
||||
ProcessMemoryTimeSeries *v3.Series `json:"processMemoryTimeSeries"`
|
||||
Meta map[string]string `json:"-"`
|
||||
}
|
||||
|
||||
type ProcessListGroup struct {
|
||||
GroupValues []string `json:"groupValues"`
|
||||
GroupCPUAvg float64 `json:"groupCPUAvg"`
|
||||
GroupMemoryAvg float64 `json:"groupMemoryAvg"`
|
||||
ProcessNames []string `json:"processNames"`
|
||||
ProcessName string `json:"processName"`
|
||||
ProcessID string `json:"processID"`
|
||||
ProcessCMD string `json:"processCMD"`
|
||||
ProcessCMDLine string `json:"processCMDLine"`
|
||||
ProcessCPU float64 `json:"processCPU"`
|
||||
ProcessMemory float64 `json:"processMemory"`
|
||||
Meta map[string]string `json:"meta"`
|
||||
}
|
||||
|
||||
type PodListRequest struct {
|
||||
|
||||
@@ -156,9 +156,18 @@ func QuoteEscapedString(str string) string {
|
||||
return str
|
||||
}
|
||||
|
||||
func QuoteEscapedStringForContains(str string) string {
|
||||
func QuoteEscapedStringForContains(str string, isIndex bool) string {
|
||||
// https: //clickhouse.com/docs/en/sql-reference/functions/string-search-functions#like
|
||||
str = QuoteEscapedString(str)
|
||||
|
||||
// we are adding this because if a string contains quote `"` it will be stored as \" in clickhouse
|
||||
// to query that using like our query should be \\\\"
|
||||
if isIndex {
|
||||
// isIndex is true means that the extra slash is present
|
||||
// [\"a\",\"b\",\"sdf\"]
|
||||
str = strings.ReplaceAll(str, `"`, `\\\\"`)
|
||||
}
|
||||
|
||||
str = strings.ReplaceAll(str, `%`, `\%`)
|
||||
str = strings.ReplaceAll(str, `_`, `\_`)
|
||||
return str
|
||||
|
||||
Reference in New Issue
Block a user